From 4ab6c995e16139965c1ae635cbd39de9d74d2529 Mon Sep 17 00:00:00 2001 From: fern-api <115122769+fern-api[bot]@users.noreply.github.com> Date: Wed, 17 Jan 2024 16:46:15 +0000 Subject: [PATCH] Release v2.5.0 --- client/client.go | 81 ++- client/options.go | 9 + connectors.go | 229 +------ connectors/client.go | 55 +- core/client_option.go | 7 +- datasets.go | 160 +++++ datasets/client.go | 228 +++++++ embed_jobs.go | 53 ++ embedjobs/client.go | 250 +++++++ environments.go | 2 +- types.go | 1455 ++++++++++++++++++++++++++++++++++------- 11 files changed, 2042 insertions(+), 487 deletions(-) create mode 100644 datasets.go create mode 100644 datasets/client.go create mode 100644 embed_jobs.go create mode 100644 embedjobs/client.go diff --git a/client/client.go b/client/client.go index 834e2b2..d22db43 100644 --- a/client/client.go +++ b/client/client.go @@ -10,6 +10,8 @@ import ( v2 "github.com/cohere-ai/cohere-go/v2" connectors "github.com/cohere-ai/cohere-go/v2/connectors" core "github.com/cohere-ai/cohere-go/v2/core" + datasets "github.com/cohere-ai/cohere-go/v2/datasets" + embedjobs "github.com/cohere-ai/cohere-go/v2/embedjobs" io "io" http "net/http" ) @@ -19,7 +21,9 @@ type Client struct { caller *core.Caller header http.Header + Datasets *datasets.Client Connectors *connectors.Client + EmbedJobs *embedjobs.Client } func NewClient(opts ...core.ClientOption) *Client { @@ -31,14 +35,17 @@ func NewClient(opts ...core.ClientOption) *Client { baseURL: options.BaseURL, caller: core.NewCaller(options.HTTPClient), header: options.ToHeader(), + Datasets: datasets.NewClient(opts...), Connectors: connectors.NewClient(opts...), + EmbedJobs: embedjobs.NewClient(opts...), } } // The `chat` endpoint allows users to have conversations with a Large Language Model (LLM) from Cohere. Users can send messages as part of a persisted conversation using the `conversation_id` parameter, or they can pass in their own conversation history using the `chat_history` parameter. -// The endpoint features additional parameters such as [connectors](https://docs.cohere.com/docs/connectors) and `documents` that enable conversations enriched by external knowledge. We call this "Retrieval Augmented Generation", or "RAG". +// +// The endpoint features additional parameters such as [connectors](https://docs.cohere.com/docs/connectors) and `documents` that enable conversations enriched by external knowledge. We call this ["Retrieval Augmented Generation"](https://docs.cohere.com/docs/retrieval-augmented-generation-rag), or "RAG". For a full breakdown of the Chat API endpoint, document and connector modes, and streaming (with code samples), see [this guide](https://docs.cohere.com/docs/cochat-beta). func (c *Client) ChatStream(ctx context.Context, request *v2.ChatStreamRequest) (*core.Stream[v2.StreamedChatResponse], error) { - baseURL := "https://api.cohere.ai" + baseURL := "https://api.cohere.ai/v1" if c.baseURL != "" { baseURL = c.baseURL } @@ -57,9 +64,10 @@ func (c *Client) ChatStream(ctx context.Context, request *v2.ChatStreamRequest) } // The `chat` endpoint allows users to have conversations with a Large Language Model (LLM) from Cohere. Users can send messages as part of a persisted conversation using the `conversation_id` parameter, or they can pass in their own conversation history using the `chat_history` parameter. -// The endpoint features additional parameters such as [connectors](https://docs.cohere.com/docs/connectors) and `documents` that enable conversations enriched by external knowledge. We call this "Retrieval Augmented Generation", or "RAG". +// +// The endpoint features additional parameters such as [connectors](https://docs.cohere.com/docs/connectors) and `documents` that enable conversations enriched by external knowledge. We call this ["Retrieval Augmented Generation"](https://docs.cohere.com/docs/retrieval-augmented-generation-rag), or "RAG". For a full breakdown of the Chat API endpoint, document and connector modes, and streaming (with code samples), see [this guide](https://docs.cohere.com/docs/cochat-beta). func (c *Client) Chat(ctx context.Context, request *v2.ChatRequest) (*v2.NonStreamedChatResponse, error) { - baseURL := "https://api.cohere.ai" + baseURL := "https://api.cohere.ai/v1" if c.baseURL != "" { baseURL = c.baseURL } @@ -81,9 +89,56 @@ func (c *Client) Chat(ctx context.Context, request *v2.ChatRequest) (*v2.NonStre return response, nil } +// This endpoint generates realistic text conditioned on a given input. +func (c *Client) GenerateStream(ctx context.Context, request *v2.GenerateStreamRequest) (*core.Stream[v2.GenerateStreamedResponse], error) { + baseURL := "https://api.cohere.ai/v1" + if c.baseURL != "" { + baseURL = c.baseURL + } + endpointURL := baseURL + "/" + "v1/generate" + + errorDecoder := func(statusCode int, body io.Reader) error { + raw, err := io.ReadAll(body) + if err != nil { + return err + } + apiError := core.NewAPIError(statusCode, errors.New(string(raw))) + decoder := json.NewDecoder(bytes.NewReader(raw)) + switch statusCode { + case 400: + value := new(v2.BadRequestError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 500: + value := new(v2.InternalServerError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + } + return apiError + } + + streamer := core.NewStreamer[v2.GenerateStreamedResponse](c.caller) + return streamer.Stream( + ctx, + &core.StreamParams{ + URL: endpointURL, + Method: http.MethodPost, + Headers: c.header, + Request: request, + ErrorDecoder: errorDecoder, + }, + ) +} + // This endpoint generates realistic text conditioned on a given input. func (c *Client) Generate(ctx context.Context, request *v2.GenerateRequest) (*v2.Generation, error) { - baseURL := "https://api.cohere.ai" + baseURL := "https://api.cohere.ai/v1" if c.baseURL != "" { baseURL = c.baseURL } @@ -138,7 +193,7 @@ func (c *Client) Generate(ctx context.Context, request *v2.GenerateRequest) (*v2 // // If you want to learn more how to use the embedding model, have a look at the [Semantic Search Guide](/docs/semantic-search). func (c *Client) Embed(ctx context.Context, request *v2.EmbedRequest) (*v2.EmbedResponse, error) { - baseURL := "https://api.cohere.ai" + baseURL := "https://api.cohere.ai/v1" if c.baseURL != "" { baseURL = c.baseURL } @@ -189,7 +244,7 @@ func (c *Client) Embed(ctx context.Context, request *v2.EmbedRequest) (*v2.Embed // This endpoint takes in a query and a list of texts and produces an ordered array with each text assigned a relevance score. func (c *Client) Rerank(ctx context.Context, request *v2.RerankRequest) (*v2.RerankResponse, error) { - baseURL := "https://api.cohere.ai" + baseURL := "https://api.cohere.ai/v1" if c.baseURL != "" { baseURL = c.baseURL } @@ -212,9 +267,9 @@ func (c *Client) Rerank(ctx context.Context, request *v2.RerankRequest) (*v2.Rer } // This endpoint makes a prediction about which label fits the specified text inputs best. To make a prediction, Classify uses the provided `examples` of text + label pairs as a reference. -// Note: [Custom Models](/training-representation-models) trained on classification examples don't require the `examples` parameter to be passed in explicitly. +// Note: [Fine-tuned models](https://docs.cohere.com/docs/classify-fine-tuning) trained on classification examples don't require the `examples` parameter to be passed in explicitly. func (c *Client) Classify(ctx context.Context, request *v2.ClassifyRequest) (*v2.ClassifyResponse, error) { - baseURL := "https://api.cohere.ai" + baseURL := "https://api.cohere.ai/v1" if c.baseURL != "" { baseURL = c.baseURL } @@ -265,7 +320,7 @@ func (c *Client) Classify(ctx context.Context, request *v2.ClassifyRequest) (*v2 // This endpoint identifies which language each of the provided texts is written in. func (c *Client) DetectLanguage(ctx context.Context, request *v2.DetectLanguageRequest) (*v2.DetectLanguageResponse, error) { - baseURL := "https://api.cohere.ai" + baseURL := "https://api.cohere.ai/v1" if c.baseURL != "" { baseURL = c.baseURL } @@ -289,7 +344,7 @@ func (c *Client) DetectLanguage(ctx context.Context, request *v2.DetectLanguageR // This endpoint generates a summary in English for a given text. func (c *Client) Summarize(ctx context.Context, request *v2.SummarizeRequest) (*v2.SummarizeResponse, error) { - baseURL := "https://api.cohere.ai" + baseURL := "https://api.cohere.ai/v1" if c.baseURL != "" { baseURL = c.baseURL } @@ -313,7 +368,7 @@ func (c *Client) Summarize(ctx context.Context, request *v2.SummarizeRequest) (* // This endpoint splits input text into smaller units called tokens using byte-pair encoding (BPE). To learn more about tokenization and byte pair encoding, see the tokens page. func (c *Client) Tokenize(ctx context.Context, request *v2.TokenizeRequest) (*v2.TokenizeResponse, error) { - baseURL := "https://api.cohere.ai" + baseURL := "https://api.cohere.ai/v1" if c.baseURL != "" { baseURL = c.baseURL } @@ -364,7 +419,7 @@ func (c *Client) Tokenize(ctx context.Context, request *v2.TokenizeRequest) (*v2 // This endpoint takes tokens using byte-pair encoding and returns their text representation. To learn more about tokenization and byte pair encoding, see the tokens page. func (c *Client) Detokenize(ctx context.Context, request *v2.DetokenizeRequest) (*v2.DetokenizeResponse, error) { - baseURL := "https://api.cohere.ai" + baseURL := "https://api.cohere.ai/v1" if c.baseURL != "" { baseURL = c.baseURL } diff --git a/client/options.go b/client/options.go index 1e3a6d1..aa652dd 100644 --- a/client/options.go +++ b/client/options.go @@ -37,3 +37,12 @@ func WithToken(token string) core.ClientOption { opts.Token = token } } + +// WithClientName sets the clientName header on every request. +// +// The name of the project that is making the request. +func WithClientName(clientName *string) core.ClientOption { + return func(opts *core.ClientOptions) { + opts.ClientName = clientName + } +} diff --git a/connectors.go b/connectors.go index 6ca63db..fe12998 100644 --- a/connectors.go +++ b/connectors.go @@ -2,13 +2,7 @@ package api -import ( - json "encoding/json" - fmt "fmt" - core "github.com/cohere-ai/cohere-go/v2/core" -) - -type CreateRequest struct { +type CreateConnectorRequest struct { // A human-readable name for the connector. Name string `json:"name"` // A description of the connector. @@ -34,225 +28,12 @@ type ConnectorsListRequest struct { Offset *float64 `json:"-"` } -type CreateConnectorOAuth struct { - // The OAuth 2.0 client ID. This fields is encrypted at rest. - ClientId string `json:"clientId"` - // The OAuth 2.0 client Secret. This field is encrypted at rest and never returned in a response. - ClientSecret string `json:"clientSecret"` - // The OAuth 2.0 /authorize endpoint to use when users authorize the connector. - AuthorizeUrl string `json:"authorizeUrl"` - // The OAuth 2.0 /token endpoint to use when users authorize the connector. - TokenUrl string `json:"tokenUrl"` - // The OAuth scopes to request when users authorize the connector. - Scope *string `json:"scope,omitempty"` - - _rawJSON json.RawMessage -} - -func (c *CreateConnectorOAuth) UnmarshalJSON(data []byte) error { - type unmarshaler CreateConnectorOAuth - var value unmarshaler - if err := json.Unmarshal(data, &value); err != nil { - return err - } - *c = CreateConnectorOAuth(value) - c._rawJSON = json.RawMessage(data) - return nil -} - -func (c *CreateConnectorOAuth) String() string { - if len(c._rawJSON) > 0 { - if value, err := core.StringifyJSON(c._rawJSON); err == nil { - return value - } - } - if value, err := core.StringifyJSON(c); err == nil { - return value - } - return fmt.Sprintf("%#v", c) -} - -type CreateConnectorServiceAuth struct { - // The token_type specifies the way the token is passed in the Authorization header. Valid values are "bearer", "basic", and "noscheme". - Type string `json:"type"` - // The token that will be used in the HTTP Authorization header when making requests to the connector. This field is encrypted at rest and never returned in a response. - Token string `json:"token"` - - _rawJSON json.RawMessage -} - -func (c *CreateConnectorServiceAuth) UnmarshalJSON(data []byte) error { - type unmarshaler CreateConnectorServiceAuth - var value unmarshaler - if err := json.Unmarshal(data, &value); err != nil { - return err - } - *c = CreateConnectorServiceAuth(value) - c._rawJSON = json.RawMessage(data) - return nil -} - -func (c *CreateConnectorServiceAuth) String() string { - if len(c._rawJSON) > 0 { - if value, err := core.StringifyJSON(c._rawJSON); err == nil { - return value - } - } - if value, err := core.StringifyJSON(c); err == nil { - return value - } - return fmt.Sprintf("%#v", c) -} - -type CreateResponse struct { - Connector *Connector `json:"connector,omitempty"` - - _rawJSON json.RawMessage -} - -func (c *CreateResponse) UnmarshalJSON(data []byte) error { - type unmarshaler CreateResponse - var value unmarshaler - if err := json.Unmarshal(data, &value); err != nil { - return err - } - *c = CreateResponse(value) - c._rawJSON = json.RawMessage(data) - return nil -} - -func (c *CreateResponse) String() string { - if len(c._rawJSON) > 0 { - if value, err := core.StringifyJSON(c._rawJSON); err == nil { - return value - } - } - if value, err := core.StringifyJSON(c); err == nil { - return value - } - return fmt.Sprintf("%#v", c) -} - -type DeleteResponse = map[string]interface{} - -type GetResponse struct { - Connector *Connector `json:"connector,omitempty"` - - _rawJSON json.RawMessage -} - -func (g *GetResponse) UnmarshalJSON(data []byte) error { - type unmarshaler GetResponse - var value unmarshaler - if err := json.Unmarshal(data, &value); err != nil { - return err - } - *g = GetResponse(value) - g._rawJSON = json.RawMessage(data) - return nil -} - -func (g *GetResponse) String() string { - if len(g._rawJSON) > 0 { - if value, err := core.StringifyJSON(g._rawJSON); err == nil { - return value - } - } - if value, err := core.StringifyJSON(g); err == nil { - return value - } - return fmt.Sprintf("%#v", g) -} - -type ListResponse struct { - Connectors []*Connector `json:"connectors,omitempty"` - - _rawJSON json.RawMessage -} - -func (l *ListResponse) UnmarshalJSON(data []byte) error { - type unmarshaler ListResponse - var value unmarshaler - if err := json.Unmarshal(data, &value); err != nil { - return err - } - *l = ListResponse(value) - l._rawJSON = json.RawMessage(data) - return nil -} - -func (l *ListResponse) String() string { - if len(l._rawJSON) > 0 { - if value, err := core.StringifyJSON(l._rawJSON); err == nil { - return value - } - } - if value, err := core.StringifyJSON(l); err == nil { - return value - } - return fmt.Sprintf("%#v", l) -} - -type OAuthAuthorizeResponse struct { - // The OAuth 2.0 redirect url. Redirect the user to this url to authorize the connector. - RedirectUrl *string `json:"redirect_url,omitempty"` - - _rawJSON json.RawMessage -} - -func (o *OAuthAuthorizeResponse) UnmarshalJSON(data []byte) error { - type unmarshaler OAuthAuthorizeResponse - var value unmarshaler - if err := json.Unmarshal(data, &value); err != nil { - return err - } - *o = OAuthAuthorizeResponse(value) - o._rawJSON = json.RawMessage(data) - return nil -} - -func (o *OAuthAuthorizeResponse) String() string { - if len(o._rawJSON) > 0 { - if value, err := core.StringifyJSON(o._rawJSON); err == nil { - return value - } - } - if value, err := core.StringifyJSON(o); err == nil { - return value - } - return fmt.Sprintf("%#v", o) -} - -type UpdateResponse struct { - Connector *Connector `json:"connector,omitempty"` - - _rawJSON json.RawMessage -} - -func (u *UpdateResponse) UnmarshalJSON(data []byte) error { - type unmarshaler UpdateResponse - var value unmarshaler - if err := json.Unmarshal(data, &value); err != nil { - return err - } - *u = UpdateResponse(value) - u._rawJSON = json.RawMessage(data) - return nil -} - -func (u *UpdateResponse) String() string { - if len(u._rawJSON) > 0 { - if value, err := core.StringifyJSON(u._rawJSON); err == nil { - return value - } - } - if value, err := core.StringifyJSON(u); err == nil { - return value - } - return fmt.Sprintf("%#v", u) +type ConnectorsOAuthAuthorizeRequest struct { + // The URL to redirect to after the connector has been authorized. + AfterTokenRedirect *string `json:"-"` } -type UpdateRequest struct { +type UpdateConnectorRequest struct { // A human-readable name for the connector. Name *string `json:"name,omitempty"` // The URL of the connector that will be used to search for documents. diff --git a/connectors/client.go b/connectors/client.go index 745ed12..bf3d61b 100644 --- a/connectors/client.go +++ b/connectors/client.go @@ -33,9 +33,9 @@ func NewClient(opts ...core.ClientOption) *Client { } } -// Returns a list of connectors ordered by descending creation date (newer first). -func (c *Client) List(ctx context.Context, request *v2.ConnectorsListRequest) (*v2.ListResponse, error) { - baseURL := "https://api.cohere.ai" +// Returns a list of connectors ordered by descending creation date (newer first). See ['Managing your Connector'](https://docs.cohere.com/docs/managing-your-connector) for more information. +func (c *Client) List(ctx context.Context, request *v2.ConnectorsListRequest) (*v2.ListConnectorsResponse, error) { + baseURL := "https://api.cohere.ai/v1" if c.baseURL != "" { baseURL = c.baseURL } @@ -78,7 +78,7 @@ func (c *Client) List(ctx context.Context, request *v2.ConnectorsListRequest) (* return apiError } - var response *v2.ListResponse + var response *v2.ListConnectorsResponse if err := c.caller.Call( ctx, &core.CallParams{ @@ -94,10 +94,9 @@ func (c *Client) List(ctx context.Context, request *v2.ConnectorsListRequest) (* return response, nil } -// Creates a new connector. The connector is tested during registration -// and will cancel registration when the test is unsuccessful. -func (c *Client) Create(ctx context.Context, request *v2.CreateRequest) (*v2.CreateResponse, error) { - baseURL := "https://api.cohere.ai" +// Creates a new connector. The connector is tested during registration and will cancel registration when the test is unsuccessful. See ['Creating and Deploying a Connector'](https://docs.cohere.com/docs/creating-and-deploying-a-connector) for more information. +func (c *Client) Create(ctx context.Context, request *v2.CreateConnectorRequest) (*v2.CreateConnectorResponse, error) { + baseURL := "https://api.cohere.ai/v1" if c.baseURL != "" { baseURL = c.baseURL } @@ -136,7 +135,7 @@ func (c *Client) Create(ctx context.Context, request *v2.CreateRequest) (*v2.Cre return apiError } - var response *v2.CreateResponse + var response *v2.CreateConnectorResponse if err := c.caller.Call( ctx, &core.CallParams{ @@ -153,11 +152,11 @@ func (c *Client) Create(ctx context.Context, request *v2.CreateRequest) (*v2.Cre return response, nil } -// Retrieve a connector by ID. +// Retrieve a connector by ID. See ['Connectors'](https://docs.cohere.com/docs/connectors) for more information. // // The ID of the connector to retrieve. -func (c *Client) Get(ctx context.Context, id string) (*v2.GetResponse, error) { - baseURL := "https://api.cohere.ai" +func (c *Client) Get(ctx context.Context, id string) (*v2.GetConnectorResponse, error) { + baseURL := "https://api.cohere.ai/v1" if c.baseURL != "" { baseURL = c.baseURL } @@ -196,7 +195,7 @@ func (c *Client) Get(ctx context.Context, id string) (*v2.GetResponse, error) { return apiError } - var response *v2.GetResponse + var response *v2.GetConnectorResponse if err := c.caller.Call( ctx, &core.CallParams{ @@ -212,11 +211,11 @@ func (c *Client) Get(ctx context.Context, id string) (*v2.GetResponse, error) { return response, nil } -// Delete a connector by ID. +// Delete a connector by ID. See ['Connectors'](https://docs.cohere.com/docs/connectors) for more information. // // The ID of the connector to delete. -func (c *Client) Delete(ctx context.Context, id string) (v2.DeleteResponse, error) { - baseURL := "https://api.cohere.ai" +func (c *Client) Delete(ctx context.Context, id string) (v2.DeleteConnectorResponse, error) { + baseURL := "https://api.cohere.ai/v1" if c.baseURL != "" { baseURL = c.baseURL } @@ -262,7 +261,7 @@ func (c *Client) Delete(ctx context.Context, id string) (v2.DeleteResponse, erro return apiError } - var response v2.DeleteResponse + var response v2.DeleteConnectorResponse if err := c.caller.Call( ctx, &core.CallParams{ @@ -278,11 +277,11 @@ func (c *Client) Delete(ctx context.Context, id string) (v2.DeleteResponse, erro return response, nil } -// Update a connector by ID. Omitted fields will not be updated. +// Update a connector by ID. Omitted fields will not be updated. See ['Managing your Connector'](https://docs.cohere.com/docs/managing-your-connector) for more information. // // The ID of the connector to update. -func (c *Client) Update(ctx context.Context, id string, request *v2.UpdateRequest) (*v2.UpdateResponse, error) { - baseURL := "https://api.cohere.ai" +func (c *Client) Update(ctx context.Context, id string, request *v2.UpdateConnectorRequest) (*v2.UpdateConnectorResponse, error) { + baseURL := "https://api.cohere.ai/v1" if c.baseURL != "" { baseURL = c.baseURL } @@ -328,7 +327,7 @@ func (c *Client) Update(ctx context.Context, id string, request *v2.UpdateReques return apiError } - var response *v2.UpdateResponse + var response *v2.UpdateConnectorResponse if err := c.caller.Call( ctx, &core.CallParams{ @@ -345,16 +344,24 @@ func (c *Client) Update(ctx context.Context, id string, request *v2.UpdateReques return response, nil } -// Authorize the connector with the given ID for the connector oauth app. +// Authorize the connector with the given ID for the connector oauth app. See ['Connector Authentication'](https://docs.cohere.com/docs/connector-authentication) for more information. // // The ID of the connector to authorize. -func (c *Client) OAuthAuthorize(ctx context.Context, id string) (*v2.OAuthAuthorizeResponse, error) { - baseURL := "https://api.cohere.ai" +func (c *Client) OAuthAuthorize(ctx context.Context, id string, request *v2.ConnectorsOAuthAuthorizeRequest) (*v2.OAuthAuthorizeResponse, error) { + baseURL := "https://api.cohere.ai/v1" if c.baseURL != "" { baseURL = c.baseURL } endpointURL := fmt.Sprintf(baseURL+"/"+"v1/connectors/%v/oauth/authorize", id) + queryParams := make(url.Values) + if request.AfterTokenRedirect != nil { + queryParams.Add("after_token_redirect", fmt.Sprintf("%v", *request.AfterTokenRedirect)) + } + if len(queryParams) > 0 { + endpointURL += "?" + queryParams.Encode() + } + errorDecoder := func(statusCode int, body io.Reader) error { raw, err := io.ReadAll(body) if err != nil { diff --git a/core/client_option.go b/core/client_option.go index 4459b58..38b0bd6 100644 --- a/core/client_option.go +++ b/core/client_option.go @@ -3,6 +3,7 @@ package core import ( + fmt "fmt" http "net/http" ) @@ -17,6 +18,7 @@ type ClientOptions struct { HTTPClient HTTPClient HTTPHeader http.Header Token string + ClientName *string } // NewClientOptions returns a new *ClientOptions value. @@ -36,6 +38,9 @@ func (c *ClientOptions) ToHeader() http.Header { if c.Token != "" { header.Set("Authorization", "Bearer "+c.Token) } + if c.ClientName != nil { + header.Set("X-Client-Name", fmt.Sprintf("%v", *c.ClientName)) + } return header } @@ -43,6 +48,6 @@ func (c *ClientOptions) cloneHeader() http.Header { headers := c.HTTPHeader.Clone() headers.Set("X-Fern-Language", "Go") headers.Set("X-Fern-SDK-Name", "github.com/cohere-ai/cohere-go/v2") - headers.Set("X-Fern-SDK-Version", "v2.4.1") + headers.Set("X-Fern-SDK-Version", "v2.5.0") return headers } diff --git a/datasets.go b/datasets.go new file mode 100644 index 0000000..284829d --- /dev/null +++ b/datasets.go @@ -0,0 +1,160 @@ +// This file was auto-generated by Fern from our API Definition. + +package api + +import ( + json "encoding/json" + fmt "fmt" + core "github.com/cohere-ai/cohere-go/v2/core" + time "time" +) + +type DatasetsCreateRequest struct { + // The name of the uploaded dataset. + Name *string `json:"-"` + // The dataset type, which is used to validate the data. + Type *DatasetType `json:"-"` + // Indicates if the original file should be stored. + KeepOriginalFile *bool `json:"-"` + // Indicates whether rows with malformed input should be dropped (instead of failing the validation check). Dropped rows will be returned in the warnings field. + SkipMalformedInput *bool `json:"-"` + // List of names of fields that will be persisted in the Dataset. By default the Dataset will retain only the required fields indicated in the [schema for the corresponding Dataset type](https://docs.cohere.com/docs/datasets#dataset-types). For example, datasets of type `embed-input` will drop all fields other than the required `text` field. If any of the fields in `keep_fields` are missing from the uploaded file, Dataset validation will fail. + KeepFields []*string `json:"-"` + // List of names of fields that will be persisted in the Dataset. By default the Dataset will retain only the required fields indicated in the [schema for the corresponding Dataset type](https://docs.cohere.com/docs/datasets#dataset-types). For example, Datasets of type `embed-input` will drop all fields other than the required `text` field. If any of the fields in `optional_fields` are missing from the uploaded file, Dataset validation will pass. + OptionalFields []*string `json:"-"` + // Raw .txt uploads will be split into entries using the text_separator value. + TextSeparator *string `json:"-"` + // The delimiter used for .csv uploads. + CsvDelimiter *string `json:"-"` +} + +type DatasetsListRequest struct { + // optional filter by dataset type + DatasetType *string `json:"-"` + // optional filter before a date + Before *time.Time `json:"-"` + // optional filter after a date + After *time.Time `json:"-"` + // optional limit to number of results + Limit *string `json:"-"` + // optional offset to start of results + Offset *string `json:"-"` +} + +type DatasetsCreateResponse struct { + // The dataset ID + Id *string `json:"id,omitempty"` + + _rawJSON json.RawMessage +} + +func (d *DatasetsCreateResponse) UnmarshalJSON(data []byte) error { + type unmarshaler DatasetsCreateResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *d = DatasetsCreateResponse(value) + d._rawJSON = json.RawMessage(data) + return nil +} + +func (d *DatasetsCreateResponse) String() string { + if len(d._rawJSON) > 0 { + if value, err := core.StringifyJSON(d._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(d); err == nil { + return value + } + return fmt.Sprintf("%#v", d) +} + +type DatasetsGetResponse struct { + Dataset *Dataset `json:"dataset,omitempty"` + + _rawJSON json.RawMessage +} + +func (d *DatasetsGetResponse) UnmarshalJSON(data []byte) error { + type unmarshaler DatasetsGetResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *d = DatasetsGetResponse(value) + d._rawJSON = json.RawMessage(data) + return nil +} + +func (d *DatasetsGetResponse) String() string { + if len(d._rawJSON) > 0 { + if value, err := core.StringifyJSON(d._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(d); err == nil { + return value + } + return fmt.Sprintf("%#v", d) +} + +type DatasetsGetUsageResponse struct { + // The total number of bytes used by the organization. + OrganizationUsage *string `json:"organization_usage,omitempty"` + + _rawJSON json.RawMessage +} + +func (d *DatasetsGetUsageResponse) UnmarshalJSON(data []byte) error { + type unmarshaler DatasetsGetUsageResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *d = DatasetsGetUsageResponse(value) + d._rawJSON = json.RawMessage(data) + return nil +} + +func (d *DatasetsGetUsageResponse) String() string { + if len(d._rawJSON) > 0 { + if value, err := core.StringifyJSON(d._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(d); err == nil { + return value + } + return fmt.Sprintf("%#v", d) +} + +type DatasetsListResponse struct { + Datasets []*Dataset `json:"datasets,omitempty"` + + _rawJSON json.RawMessage +} + +func (d *DatasetsListResponse) UnmarshalJSON(data []byte) error { + type unmarshaler DatasetsListResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *d = DatasetsListResponse(value) + d._rawJSON = json.RawMessage(data) + return nil +} + +func (d *DatasetsListResponse) String() string { + if len(d._rawJSON) > 0 { + if value, err := core.StringifyJSON(d._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(d); err == nil { + return value + } + return fmt.Sprintf("%#v", d) +} diff --git a/datasets/client.go b/datasets/client.go new file mode 100644 index 0000000..8e9fb70 --- /dev/null +++ b/datasets/client.go @@ -0,0 +1,228 @@ +// This file was auto-generated by Fern from our API Definition. + +package datasets + +import ( + bytes "bytes" + context "context" + fmt "fmt" + v2 "github.com/cohere-ai/cohere-go/v2" + core "github.com/cohere-ai/cohere-go/v2/core" + io "io" + multipart "mime/multipart" + http "net/http" + url "net/url" + time "time" +) + +type Client struct { + baseURL string + caller *core.Caller + header http.Header +} + +func NewClient(opts ...core.ClientOption) *Client { + options := core.NewClientOptions() + for _, opt := range opts { + opt(options) + } + return &Client{ + baseURL: options.BaseURL, + caller: core.NewCaller(options.HTTPClient), + header: options.ToHeader(), + } +} + +// List datasets that have been created. +func (c *Client) List(ctx context.Context, request *v2.DatasetsListRequest) (*v2.DatasetsListResponse, error) { + baseURL := "https://api.cohere.ai/v1" + if c.baseURL != "" { + baseURL = c.baseURL + } + endpointURL := baseURL + "/" + "v1/datasets" + + queryParams := make(url.Values) + if request.DatasetType != nil { + queryParams.Add("datasetType", fmt.Sprintf("%v", *request.DatasetType)) + } + if request.Before != nil { + queryParams.Add("before", fmt.Sprintf("%v", request.Before.Format(time.RFC3339))) + } + if request.After != nil { + queryParams.Add("after", fmt.Sprintf("%v", request.After.Format(time.RFC3339))) + } + if request.Limit != nil { + queryParams.Add("limit", fmt.Sprintf("%v", *request.Limit)) + } + if request.Offset != nil { + queryParams.Add("offset", fmt.Sprintf("%v", *request.Offset)) + } + if len(queryParams) > 0 { + endpointURL += "?" + queryParams.Encode() + } + + var response *v2.DatasetsListResponse + if err := c.caller.Call( + ctx, + &core.CallParams{ + URL: endpointURL, + Method: http.MethodGet, + Headers: c.header, + Response: &response, + }, + ); err != nil { + return nil, err + } + return response, nil +} + +// Create a dataset by uploading a file. See ['Dataset Creation'](https://docs.cohere.com/docs/datasets#dataset-creation) for more information. +func (c *Client) Create(ctx context.Context, data io.Reader, evalData io.Reader, request *v2.DatasetsCreateRequest) (*v2.DatasetsCreateResponse, error) { + baseURL := "https://api.cohere.ai/v1" + if c.baseURL != "" { + baseURL = c.baseURL + } + endpointURL := baseURL + "/" + "v1/datasets" + + queryParams := make(url.Values) + if request.Name != nil { + queryParams.Add("name", fmt.Sprintf("%v", *request.Name)) + } + if request.Type != nil { + queryParams.Add("type", fmt.Sprintf("%v", *request.Type)) + } + if request.KeepOriginalFile != nil { + queryParams.Add("keep_original_file", fmt.Sprintf("%v", *request.KeepOriginalFile)) + } + if request.SkipMalformedInput != nil { + queryParams.Add("skip_malformed_input", fmt.Sprintf("%v", *request.SkipMalformedInput)) + } + for _, value := range request.KeepFields { + queryParams.Add("keep_fields", fmt.Sprintf("%v", *value)) + } + for _, value := range request.OptionalFields { + queryParams.Add("optional_fields", fmt.Sprintf("%v", *value)) + } + if request.TextSeparator != nil { + queryParams.Add("text_separator", fmt.Sprintf("%v", *request.TextSeparator)) + } + if request.CsvDelimiter != nil { + queryParams.Add("csv_delimiter", fmt.Sprintf("%v", *request.CsvDelimiter)) + } + if len(queryParams) > 0 { + endpointURL += "?" + queryParams.Encode() + } + + var response *v2.DatasetsCreateResponse + requestBuffer := bytes.NewBuffer(nil) + writer := multipart.NewWriter(requestBuffer) + dataFilename := "data_filename" + if named, ok := data.(interface{ Name() string }); ok { + dataFilename = named.Name() + } + dataPart, err := writer.CreateFormFile("data", dataFilename) + if err != nil { + return nil, err + } + if _, err := io.Copy(dataPart, data); err != nil { + return nil, err + } + evalDataFilename := "evalData_filename" + if named, ok := evalData.(interface{ Name() string }); ok { + evalDataFilename = named.Name() + } + evalDataPart, err := writer.CreateFormFile("eval_data", evalDataFilename) + if err != nil { + return nil, err + } + if _, err := io.Copy(evalDataPart, evalData); err != nil { + return nil, err + } + if err := writer.Close(); err != nil { + return nil, err + } + c.header.Set("Content-Type", writer.FormDataContentType()) + + if err := c.caller.Call( + ctx, + &core.CallParams{ + URL: endpointURL, + Method: http.MethodPost, + Headers: c.header, + Request: requestBuffer, + Response: &response, + }, + ); err != nil { + return nil, err + } + return response, nil +} + +// View the dataset storage usage for your Organization. Each Organization can have up to 10GB of storage across all their users. +func (c *Client) GetUsage(ctx context.Context) (*v2.DatasetsGetUsageResponse, error) { + baseURL := "https://api.cohere.ai/v1" + if c.baseURL != "" { + baseURL = c.baseURL + } + endpointURL := baseURL + "/" + "v1/datasets/usage" + + var response *v2.DatasetsGetUsageResponse + if err := c.caller.Call( + ctx, + &core.CallParams{ + URL: endpointURL, + Method: http.MethodGet, + Headers: c.header, + Response: &response, + }, + ); err != nil { + return nil, err + } + return response, nil +} + +// Retrieve a dataset by ID. See ['Datasets'](https://docs.cohere.com/docs/datasets) for more information. +func (c *Client) Get(ctx context.Context, id string) (*v2.DatasetsGetResponse, error) { + baseURL := "https://api.cohere.ai/v1" + if c.baseURL != "" { + baseURL = c.baseURL + } + endpointURL := fmt.Sprintf(baseURL+"/"+"v1/datasets/%v", id) + + var response *v2.DatasetsGetResponse + if err := c.caller.Call( + ctx, + &core.CallParams{ + URL: endpointURL, + Method: http.MethodGet, + Headers: c.header, + Response: &response, + }, + ); err != nil { + return nil, err + } + return response, nil +} + +// Delete a dataset by ID. Datasets are automatically deleted after 30 days, but they can also be deleted manually. +func (c *Client) Delete(ctx context.Context, id string) (map[string]interface{}, error) { + baseURL := "https://api.cohere.ai/v1" + if c.baseURL != "" { + baseURL = c.baseURL + } + endpointURL := fmt.Sprintf(baseURL+"/"+"v1/datasets/%v", id) + + var response map[string]interface{} + if err := c.caller.Call( + ctx, + &core.CallParams{ + URL: endpointURL, + Method: http.MethodDelete, + Headers: c.header, + Response: &response, + }, + ); err != nil { + return nil, err + } + return response, nil +} diff --git a/embed_jobs.go b/embed_jobs.go new file mode 100644 index 0000000..05504fd --- /dev/null +++ b/embed_jobs.go @@ -0,0 +1,53 @@ +// This file was auto-generated by Fern from our API Definition. + +package api + +import ( + fmt "fmt" +) + +type CreateEmbedJobRequest struct { + // ID of the embedding model. + // + // Available models and corresponding embedding dimensions: + // + // - `embed-english-v3.0` : 1024 + // - `embed-multilingual-v3.0` : 1024 + // - `embed-english-light-v3.0` : 384 + // - `embed-multilingual-light-v3.0` : 384 + Model string `json:"model"` + // ID of a [Dataset](https://docs.cohere.com/docs/datasets). The Dataset must be of type `embed-input` and must have a validation status `Validated` + DatasetId string `json:"dataset_id"` + InputType EmbedInputType `json:"input_type,omitempty"` + // The name of the embed job. + Name *string `json:"name,omitempty"` + // One of `START|END` to specify how the API will handle inputs longer than the maximum token length. + // + // Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model. + Truncate *CreateEmbedJobRequestTruncate `json:"truncate,omitempty"` +} + +// One of `START|END` to specify how the API will handle inputs longer than the maximum token length. +// +// Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model. +type CreateEmbedJobRequestTruncate string + +const ( + CreateEmbedJobRequestTruncateStart CreateEmbedJobRequestTruncate = "START" + CreateEmbedJobRequestTruncateEnd CreateEmbedJobRequestTruncate = "END" +) + +func NewCreateEmbedJobRequestTruncateFromString(s string) (CreateEmbedJobRequestTruncate, error) { + switch s { + case "START": + return CreateEmbedJobRequestTruncateStart, nil + case "END": + return CreateEmbedJobRequestTruncateEnd, nil + } + var t CreateEmbedJobRequestTruncate + return "", fmt.Errorf("%s is not a valid %T", s, t) +} + +func (c CreateEmbedJobRequestTruncate) Ptr() *CreateEmbedJobRequestTruncate { + return &c +} diff --git a/embedjobs/client.go b/embedjobs/client.go new file mode 100644 index 0000000..7ffbb44 --- /dev/null +++ b/embedjobs/client.go @@ -0,0 +1,250 @@ +// This file was auto-generated by Fern from our API Definition. + +package embedjobs + +import ( + bytes "bytes" + context "context" + json "encoding/json" + errors "errors" + fmt "fmt" + v2 "github.com/cohere-ai/cohere-go/v2" + core "github.com/cohere-ai/cohere-go/v2/core" + io "io" + http "net/http" +) + +type Client struct { + baseURL string + caller *core.Caller + header http.Header +} + +func NewClient(opts ...core.ClientOption) *Client { + options := core.NewClientOptions() + for _, opt := range opts { + opt(options) + } + return &Client{ + baseURL: options.BaseURL, + caller: core.NewCaller(options.HTTPClient), + header: options.ToHeader(), + } +} + +// The list embed job endpoint allows users to view all embed jobs history for that specific user. +func (c *Client) List(ctx context.Context) (*v2.ListEmbedJobResponse, error) { + baseURL := "https://api.cohere.ai/v1" + if c.baseURL != "" { + baseURL = c.baseURL + } + endpointURL := baseURL + "/" + "v1/embed-jobs" + + errorDecoder := func(statusCode int, body io.Reader) error { + raw, err := io.ReadAll(body) + if err != nil { + return err + } + apiError := core.NewAPIError(statusCode, errors.New(string(raw))) + decoder := json.NewDecoder(bytes.NewReader(raw)) + switch statusCode { + case 400: + value := new(v2.BadRequestError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 500: + value := new(v2.InternalServerError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + } + return apiError + } + + var response *v2.ListEmbedJobResponse + if err := c.caller.Call( + ctx, + &core.CallParams{ + URL: endpointURL, + Method: http.MethodGet, + Headers: c.header, + Response: &response, + ErrorDecoder: errorDecoder, + }, + ); err != nil { + return nil, err + } + return response, nil +} + +// This API launches an async Embed job for a [Dataset](https://docs.cohere.com/docs/datasets) of type `embed-input`. The result of a completed embed job is new Dataset of type `embed-output`, which contains the original text entries and the corresponding embeddings. +func (c *Client) Create(ctx context.Context, request *v2.CreateEmbedJobRequest) (*v2.CreateEmbedJobResponse, error) { + baseURL := "https://api.cohere.ai/v1" + if c.baseURL != "" { + baseURL = c.baseURL + } + endpointURL := baseURL + "/" + "v1/embed-jobs" + + errorDecoder := func(statusCode int, body io.Reader) error { + raw, err := io.ReadAll(body) + if err != nil { + return err + } + apiError := core.NewAPIError(statusCode, errors.New(string(raw))) + decoder := json.NewDecoder(bytes.NewReader(raw)) + switch statusCode { + case 400: + value := new(v2.BadRequestError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 500: + value := new(v2.InternalServerError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + } + return apiError + } + + var response *v2.CreateEmbedJobResponse + if err := c.caller.Call( + ctx, + &core.CallParams{ + URL: endpointURL, + Method: http.MethodPost, + Headers: c.header, + Request: request, + Response: &response, + ErrorDecoder: errorDecoder, + }, + ); err != nil { + return nil, err + } + return response, nil +} + +// This API retrieves the details about an embed job started by the same user. +// +// The ID of the embed job to retrieve. +func (c *Client) Get(ctx context.Context, id string) (*v2.EmbedJob, error) { + baseURL := "https://api.cohere.ai/v1" + if c.baseURL != "" { + baseURL = c.baseURL + } + endpointURL := fmt.Sprintf(baseURL+"/"+"v1/embed-jobs/%v", id) + + errorDecoder := func(statusCode int, body io.Reader) error { + raw, err := io.ReadAll(body) + if err != nil { + return err + } + apiError := core.NewAPIError(statusCode, errors.New(string(raw))) + decoder := json.NewDecoder(bytes.NewReader(raw)) + switch statusCode { + case 400: + value := new(v2.BadRequestError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 404: + value := new(v2.NotFoundError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 500: + value := new(v2.InternalServerError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + } + return apiError + } + + var response *v2.EmbedJob + if err := c.caller.Call( + ctx, + &core.CallParams{ + URL: endpointURL, + Method: http.MethodGet, + Headers: c.header, + Response: &response, + ErrorDecoder: errorDecoder, + }, + ); err != nil { + return nil, err + } + return response, nil +} + +// This API allows users to cancel an active embed job. Once invoked, the embedding process will be terminated, and users will be charged for the embeddings processed up to the cancellation point. It's important to note that partial results will not be available to users after cancellation. +// +// The ID of the embed job to cancel. +func (c *Client) Cancel(ctx context.Context, id string) error { + baseURL := "https://api.cohere.ai/v1" + if c.baseURL != "" { + baseURL = c.baseURL + } + endpointURL := fmt.Sprintf(baseURL+"/"+"v1/embed-jobs/%v/cancel", id) + + errorDecoder := func(statusCode int, body io.Reader) error { + raw, err := io.ReadAll(body) + if err != nil { + return err + } + apiError := core.NewAPIError(statusCode, errors.New(string(raw))) + decoder := json.NewDecoder(bytes.NewReader(raw)) + switch statusCode { + case 400: + value := new(v2.BadRequestError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 404: + value := new(v2.NotFoundError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 500: + value := new(v2.InternalServerError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + } + return apiError + } + + if err := c.caller.Call( + ctx, + &core.CallParams{ + URL: endpointURL, + Method: http.MethodPost, + Headers: c.header, + ErrorDecoder: errorDecoder, + }, + ); err != nil { + return err + } + return nil +} diff --git a/environments.go b/environments.go index bb280a3..c2ad074 100644 --- a/environments.go +++ b/environments.go @@ -9,5 +9,5 @@ package api var Environments = struct { Production string }{ - Production: "https://api.cohere.ai", + Production: "https://api.cohere.ai/v1", } diff --git a/types.go b/types.go index c7f3ddd..5b7e2e1 100644 --- a/types.go +++ b/types.go @@ -14,7 +14,9 @@ type ChatRequest struct { // The chat message from the user to the model. Message string `json:"message"` // Defaults to `command`. - // The identifier of the model, which can be one of the existing Cohere models or the full ID for a [finetuned custom model](/docs/training-custom-models). + // + // The identifier of the model, which can be one of the existing Cohere models or the full ID for a [fine-tuned custom model](https://docs.cohere.com/docs/chat-fine-tuning). + // // Compatible Cohere models are `command` and `command-light` as well as the experimental `command-nightly` and `command-light-nightly` variants. Read more about [Cohere models](https://docs.cohere.com/docs/models). Model *string `json:"model,omitempty"` // When specified, the default Cohere preamble will be replaced with the provided one. @@ -22,55 +24,35 @@ type ChatRequest struct { // A list of previous messages between the user and the model, meant to give the model conversational context for responding to the user's `message`. ChatHistory []*ChatMessage `json:"chat_history,omitempty"` // An alternative to `chat_history`. Previous conversations can be resumed by providing the conversation's identifier. The contents of `message` and the model's response will be stored as part of this conversation. + // // If a conversation with this id does not already exist, a new conversation will be created. ConversationId *string `json:"conversation_id,omitempty"` // Defaults to `AUTO` when `connectors` are specified and `OFF` in all other cases. + // // Dictates how the prompt will be constructed. + // // With `prompt_truncation` set to "AUTO", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. + // // With `prompt_truncation` set to "OFF", no elements will be dropped. If the sum of the inputs exceeds the model's context length limit, a `TooManyTokens` error will be returned. PromptTruncation *ChatRequestPromptTruncation `json:"prompt_truncation,omitempty"` // Accepts `{"id": "web-search"}`, and/or the `"id"` for a custom [connector](https://docs.cohere.com/docs/connectors), if you've [created](https://docs.cohere.com/docs/creating-and-deploying-a-connector) one. + // // When specified, the model's reply will be enriched with information found by quering each of the connectors (RAG). Connectors []*ChatConnector `json:"connectors,omitempty"` // Defaults to `false`. + // // When `true`, the response will only contain a list of generated search queries, but no search will take place, and no reply from the model to the user's `message` will be generated. SearchQueriesOnly *bool `json:"search_queries_only,omitempty"` // A list of relevant documents that the model can use to enrich its reply. See ['Document Mode'](https://docs.cohere.com/docs/retrieval-augmented-generation-rag#document-mode) in the guide for more information. Documents []ChatDocument `json:"documents,omitempty"` // Defaults to `"accurate"`. + // // Dictates the approach taken to generating citations as part of the RAG flow by allowing the user to specify whether they want `"accurate"` results or `"fast"` results. CitationQuality *ChatRequestCitationQuality `json:"citation_quality,omitempty"` - // Defaults to `0.3` + // Defaults to `0.3`. + // // A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations, and higher temperatures mean more random generations. Temperature *float64 `json:"temperature,omitempty"` - stream bool -} - -func (c *ChatRequest) Stream() bool { - return c.stream -} - -func (c *ChatRequest) UnmarshalJSON(data []byte) error { - type unmarshaler ChatRequest - var body unmarshaler - if err := json.Unmarshal(data, &body); err != nil { - return err - } - *c = ChatRequest(body) - c.stream = false - return nil -} - -func (c *ChatRequest) MarshalJSON() ([]byte, error) { - type embed ChatRequest - var marshaler = struct { - embed - Stream bool `json:"stream"` - }{ - embed: embed(*c), - Stream: false, - } - return json.Marshal(marshaler) } type ChatStreamRequest struct { @@ -78,7 +60,9 @@ type ChatStreamRequest struct { // The chat message from the user to the model. Message string `json:"message"` // Defaults to `command`. - // The identifier of the model, which can be one of the existing Cohere models or the full ID for a [finetuned custom model](/docs/training-custom-models). + // + // The identifier of the model, which can be one of the existing Cohere models or the full ID for a [fine-tuned custom model](https://docs.cohere.com/docs/chat-fine-tuning). + // // Compatible Cohere models are `command` and `command-light` as well as the experimental `command-nightly` and `command-light-nightly` variants. Read more about [Cohere models](https://docs.cohere.com/docs/models). Model *string `json:"model,omitempty"` // When specified, the default Cohere preamble will be replaced with the provided one. @@ -86,64 +70,46 @@ type ChatStreamRequest struct { // A list of previous messages between the user and the model, meant to give the model conversational context for responding to the user's `message`. ChatHistory []*ChatMessage `json:"chat_history,omitempty"` // An alternative to `chat_history`. Previous conversations can be resumed by providing the conversation's identifier. The contents of `message` and the model's response will be stored as part of this conversation. + // // If a conversation with this id does not already exist, a new conversation will be created. ConversationId *string `json:"conversation_id,omitempty"` // Defaults to `AUTO` when `connectors` are specified and `OFF` in all other cases. + // // Dictates how the prompt will be constructed. + // // With `prompt_truncation` set to "AUTO", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. + // // With `prompt_truncation` set to "OFF", no elements will be dropped. If the sum of the inputs exceeds the model's context length limit, a `TooManyTokens` error will be returned. PromptTruncation *ChatStreamRequestPromptTruncation `json:"prompt_truncation,omitempty"` // Accepts `{"id": "web-search"}`, and/or the `"id"` for a custom [connector](https://docs.cohere.com/docs/connectors), if you've [created](https://docs.cohere.com/docs/creating-and-deploying-a-connector) one. + // // When specified, the model's reply will be enriched with information found by quering each of the connectors (RAG). Connectors []*ChatConnector `json:"connectors,omitempty"` // Defaults to `false`. + // // When `true`, the response will only contain a list of generated search queries, but no search will take place, and no reply from the model to the user's `message` will be generated. SearchQueriesOnly *bool `json:"search_queries_only,omitempty"` // A list of relevant documents that the model can use to enrich its reply. See ['Document Mode'](https://docs.cohere.com/docs/retrieval-augmented-generation-rag#document-mode) in the guide for more information. Documents []ChatDocument `json:"documents,omitempty"` // Defaults to `"accurate"`. + // // Dictates the approach taken to generating citations as part of the RAG flow by allowing the user to specify whether they want `"accurate"` results or `"fast"` results. CitationQuality *ChatStreamRequestCitationQuality `json:"citation_quality,omitempty"` - // Defaults to `0.3` + // Defaults to `0.3`. + // // A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations, and higher temperatures mean more random generations. Temperature *float64 `json:"temperature,omitempty"` - stream bool -} - -func (c *ChatStreamRequest) Stream() bool { - return c.stream -} - -func (c *ChatStreamRequest) UnmarshalJSON(data []byte) error { - type unmarshaler ChatStreamRequest - var body unmarshaler - if err := json.Unmarshal(data, &body); err != nil { - return err - } - *c = ChatStreamRequest(body) - c.stream = true - return nil -} - -func (c *ChatStreamRequest) MarshalJSON() ([]byte, error) { - type embed ChatStreamRequest - var marshaler = struct { - embed - Stream bool `json:"stream"` - }{ - embed: embed(*c), - Stream: true, - } - return json.Marshal(marshaler) } type ClassifyRequest struct { - // Represents a list of queries to be classified, each entry must not be empty. The maximum is 96 inputs. + // A list of up to 96 texts to be classified. Each one must be a non-empty string. + // There is, however, no consistent, universal limit to the length a particular input can be. We perform classification on the first `x` tokens of each input, and `x` varies depending on which underlying model is powering classification. The maximum token length for each model is listed in the "max tokens" column [here](https://docs.cohere.com/docs/models). + // Note: by default the `truncate` parameter is set to `END`, so tokens exceeding the limit will be automatically dropped. This behavior can be disabled by setting `truncate` to `NONE`, which will result in validation errors for longer texts. Inputs []string `json:"inputs,omitempty"` // An array of examples to provide context to the model. Each example is a text string and its associated label/class. Each unique label requires at least 2 examples associated with it; the maximum number of examples is 2500, and each example has a maximum length of 512 tokens. The values should be structured as `{text: "...",label: "..."}`. - // Note: [Custom Models](/training-representation-models) trained on classification examples don't require the `examples` parameter to be passed in explicitly. + // Note: [Fine-tuned Models](https://docs.cohere.com/docs/classify-fine-tuning) trained on classification examples don't require the `examples` parameter to be passed in explicitly. Examples []*ClassifyRequestExamplesItem `json:"examples,omitempty"` - // The identifier of the model. Currently available models are `embed-multilingual-v2.0`, `embed-english-light-v2.0`, and `embed-english-v2.0` (default). Smaller "light" models are faster, while larger models will perform better. [Custom models](/docs/training-custom-models) can also be supplied with their full ID. + // The identifier of the model. Currently available models are `embed-multilingual-v2.0`, `embed-english-light-v2.0`, and `embed-english-v2.0` (default). Smaller "light" models are faster, while larger models will perform better. [Fine-tuned models](https://docs.cohere.com/docs/fine-tuning) can also be supplied with their full ID. Model *string `json:"model,omitempty"` // The ID of a custom playground preset. You can create presets in the [playground](https://dashboard.cohere.ai/playground/classify?model=large). If you use a preset, all other parameters become optional, and any included parameters will override the preset's parameters. Preset *string `json:"preset,omitempty"` @@ -184,14 +150,8 @@ type EmbedRequest struct { // * `embed-english-v2.0` 4096 // * `embed-english-light-v2.0` 1024 // * `embed-multilingual-v2.0` 768 - Model *string `json:"model,omitempty"` - // Specifies the type of input you're giving to the model. Not required for older versions of the embedding models (i.e. anything lower than v3), but is required for more recent versions (i.e. anything bigger than v2). - // - // * `"search_document"`: Use this when you encode documents for embeddings that you store in a vector database for search use-cases. - // * `"search_query"`: Use this when you query your vector DB to find relevant documents. - // * `"classification"`: Use this when you use the embeddings as an input to a text classifier. - // * `"clustering"`: Use this when you want to cluster the embeddings. - InputType *string `json:"input_type,omitempty"` + Model *string `json:"model,omitempty"` + InputType *EmbedInputType `json:"input_type,omitempty"` // Specifies the types of embeddings you want to get back. Not required and default is None, which returns the Embed Floats response type. Can be one or more of the following types. // // * `"float"`: Use this when you want to get back the default float embeddings. Valid for all models. @@ -217,14 +177,6 @@ type GenerateRequest struct { Model *string `json:"model,omitempty"` // The maximum number of generations that will be returned. Defaults to `1`, min value of `1`, max value of `5`. NumGenerations *int `json:"num_generations,omitempty"` - // When `true`, the response will be a JSON stream of events. Streaming is beneficial for user interfaces that render the contents of the response piece by piece, as it gets generated. - // - // The final event will contain the complete response, and will contain an `is_finished` field set to `true`. The event will also contain a `finish_reason`, which can be one of the following: - // - `COMPLETE` - the model sent back a finished reply - // - `MAX_TOKENS` - the reply was cut off because the model reached the maximum number of tokens for its context length - // - `ERROR` - something went wrong when generating the reply - // - `ERROR_TOXIC` - the model generated a reply that was deemed toxic - Stream *bool `json:"stream,omitempty"` // The maximum number of tokens the model will generate as part of the response. Note: Setting a low value may result in incomplete generations. // // This parameter is off by default, and if it's not specified, the model will continue generating until it emits an EOS completion token. See [BPE Tokens](/bpe-tokens-wiki) for more details. @@ -271,6 +223,61 @@ type GenerateRequest struct { LogitBias map[string]float64 `json:"logit_bias,omitempty"` } +type GenerateStreamRequest struct { + // The input text that serves as the starting point for generating the response. + // Note: The prompt will be pre-processed and modified before reaching the model. + Prompt string `json:"prompt"` + // The identifier of the model to generate with. Currently available models are `command` (default), `command-nightly` (experimental), `command-light`, and `command-light-nightly` (experimental). + // Smaller, "light" models are faster, while larger models will perform better. [Custom models](/docs/training-custom-models) can also be supplied with their full ID. + Model *string `json:"model,omitempty"` + // The maximum number of generations that will be returned. Defaults to `1`, min value of `1`, max value of `5`. + NumGenerations *int `json:"num_generations,omitempty"` + // The maximum number of tokens the model will generate as part of the response. Note: Setting a low value may result in incomplete generations. + // + // This parameter is off by default, and if it's not specified, the model will continue generating until it emits an EOS completion token. See [BPE Tokens](/bpe-tokens-wiki) for more details. + // + // Can only be set to `0` if `return_likelihoods` is set to `ALL` to get the likelihood of the prompt. + MaxTokens *int `json:"max_tokens,omitempty"` + // One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length. + // + // Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model. + // + // If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned. + Truncate *GenerateStreamRequestTruncate `json:"truncate,omitempty"` + // A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations. See [Temperature](/temperature-wiki) for more details. + // Defaults to `0.75`, min value of `0.0`, max value of `5.0`. + Temperature *float64 `json:"temperature,omitempty"` + // Identifier of a custom preset. A preset is a combination of parameters, such as prompt, temperature etc. You can create presets in the [playground](https://dashboard.cohere.ai/playground/generate). + // When a preset is specified, the `prompt` parameter becomes optional, and any included parameters will override the preset's parameters. + Preset *string `json:"preset,omitempty"` + // The generated text will be cut at the beginning of the earliest occurrence of an end sequence. The sequence will be excluded from the text. + EndSequences []string `json:"end_sequences,omitempty"` + // The generated text will be cut at the end of the earliest occurrence of a stop sequence. The sequence will be included the text. + StopSequences []string `json:"stop_sequences,omitempty"` + // Ensures only the top `k` most likely tokens are considered for generation at each step. + // Defaults to `0`, min value of `0`, max value of `500`. + K *int `json:"k,omitempty"` + // Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`. + // Defaults to `0.75`. min value of `0.01`, max value of `0.99`. + P *float64 `json:"p,omitempty"` + // Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation.' + FrequencyPenalty *float64 `json:"frequency_penalty,omitempty"` + // Defaults to `0.0`, min value of `0.0`, max value of `1.0`. Can be used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies. + PresencePenalty *float64 `json:"presence_penalty,omitempty"` + // One of `GENERATION|ALL|NONE` to specify how and if the token likelihoods are returned with the response. Defaults to `NONE`. + // + // If `GENERATION` is selected, the token likelihoods will only be provided for generated text. + // + // If `ALL` is selected, the token likelihoods will be provided both for the prompt and the generated text. + ReturnLikelihoods *GenerateStreamRequestReturnLikelihoods `json:"return_likelihoods,omitempty"` + // Used to prevent the model from generating unwanted tokens or to incentivize it to include desired tokens. The format is `{token_id: bias}` where bias is a float between -10 and 10. Tokens can be obtained from text using [Tokenize](/reference/tokenize). + // + // For example, if the value `{'11': -10}` is provided, the model will be very unlikely to include the token 11 (`"\n"`, the newline character) anywhere in the generated text. In contrast `{'11': 10}` will result in generations that nearly only contain that token. Values between -10 and 10 will proportionally affect the likelihood of the token appearing in the generated text. + // + // Note: logit bias may not be supported for all custom models. + LogitBias map[string]float64 `json:"logit_bias,omitempty"` +} + type RerankRequest struct { // The identifier of the model to use, one of : `rerank-english-v2.0`, `rerank-multilingual-v2.0` Model *string `json:"model,omitempty"` @@ -317,8 +324,9 @@ type TokenizeRequest struct { } type ApiMeta struct { - ApiVersion *ApiMetaApiVersion `json:"api_version,omitempty"` - Warnings []string `json:"warnings,omitempty"` + ApiVersion *ApiMetaApiVersion `json:"api_version,omitempty"` + BilledUnits *ApiMetaBilledUnits `json:"billed_units,omitempty"` + Warnings []string `json:"warnings,omitempty"` _rawJSON json.RawMessage } @@ -347,10 +355,9 @@ func (a *ApiMeta) String() string { } type ApiMetaApiVersion struct { - Version string `json:"version"` - IsDeprecated *bool `json:"is_deprecated,omitempty"` - IsExperimental *bool `json:"is_experimental,omitempty"` - BilledUnits *ApiMetaApiVersionBilledUnits `json:"billed_units,omitempty"` + Version string `json:"version"` + IsDeprecated *bool `json:"is_deprecated,omitempty"` + IsExperimental *bool `json:"is_experimental,omitempty"` _rawJSON json.RawMessage } @@ -378,7 +385,7 @@ func (a *ApiMetaApiVersion) String() string { return fmt.Sprintf("%#v", a) } -type ApiMetaApiVersionBilledUnits struct { +type ApiMetaBilledUnits struct { // The number of billed input tokens. InputTokens *float64 `json:"input_tokens,omitempty"` // The number of billed output tokens. @@ -391,18 +398,18 @@ type ApiMetaApiVersionBilledUnits struct { _rawJSON json.RawMessage } -func (a *ApiMetaApiVersionBilledUnits) UnmarshalJSON(data []byte) error { - type unmarshaler ApiMetaApiVersionBilledUnits +func (a *ApiMetaBilledUnits) UnmarshalJSON(data []byte) error { + type unmarshaler ApiMetaBilledUnits var value unmarshaler if err := json.Unmarshal(data, &value); err != nil { return err } - *a = ApiMetaApiVersionBilledUnits(value) + *a = ApiMetaBilledUnits(value) a._rawJSON = json.RawMessage(data) return nil } -func (a *ApiMetaApiVersionBilledUnits) String() string { +func (a *ApiMetaBilledUnits) String() string { if len(a._rawJSON) > 0 { if value, err := core.StringifyJSON(a._rawJSON); err == nil { return value @@ -414,6 +421,32 @@ func (a *ApiMetaApiVersionBilledUnits) String() string { return fmt.Sprintf("%#v", a) } +// The token_type specifies the way the token is passed in the Authorization header. Valid values are "bearer", "basic", and "noscheme". +type AuthTokenType string + +const ( + AuthTokenTypeBearer AuthTokenType = "bearer" + AuthTokenTypeBasic AuthTokenType = "basic" + AuthTokenTypeNoscheme AuthTokenType = "noscheme" +) + +func NewAuthTokenTypeFromString(s string) (AuthTokenType, error) { + switch s { + case "bearer": + return AuthTokenTypeBearer, nil + case "basic": + return AuthTokenTypeBasic, nil + case "noscheme": + return AuthTokenTypeNoscheme, nil + } + var t AuthTokenType + return "", fmt.Errorf("%s is not a valid %T", s, t) +} + +func (a AuthTokenType) Ptr() *AuthTokenType { + return &a +} + // A section of the generated reply which cites external knowledge. type ChatCitation struct { // The index of text that the citation starts at, counting from zero. For example, a generation of `Hello, world!` with a citation on `world` would have a start value of `7`. This is because the citation starts at `w`, which is the seventh character. @@ -497,8 +530,9 @@ type ChatConnector struct { // // **site** - The web search results will be restricted to this domain (and TLD) when specified. Only a single domain is specified, and subdomains are also accepted. // Examples: - // * `{"options": {"site": "cohere.com"}}` would restrict the results to all subdomains at cohere.com - // * `{"options": {"site": "txt.cohere.com"}}` would restrict the results to `txt.cohere.com` + // + // - `{"options": {"site": "cohere.com"}}` would restrict the results to all subdomains at cohere.com + // - `{"options": {"site": "txt.cohere.com"}}` would restrict the results to `txt.cohere.com` Options map[string]interface{} `json:"options,omitempty"` _rawJSON json.RawMessage @@ -588,6 +622,7 @@ func (c ChatMessageRole) Ptr() *ChatMessageRole { } // Defaults to `"accurate"`. +// // Dictates the approach taken to generating citations as part of the RAG flow by allowing the user to specify whether they want `"accurate"` results or `"fast"` results. type ChatRequestCitationQuality string @@ -612,8 +647,11 @@ func (c ChatRequestCitationQuality) Ptr() *ChatRequestCitationQuality { } // Defaults to `AUTO` when `connectors` are specified and `OFF` in all other cases. +// // Dictates how the prompt will be constructed. +// // With `prompt_truncation` set to "AUTO", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. +// // With `prompt_truncation` set to "OFF", no elements will be dropped. If the sum of the inputs exceeds the model's context length limit, a `TooManyTokens` error will be returned. type ChatRequestPromptTruncation string @@ -923,6 +961,7 @@ func (c *ChatStreamEvent) String() string { } // Defaults to `"accurate"`. +// // Dictates the approach taken to generating citations as part of the RAG flow by allowing the user to specify whether they want `"accurate"` results or `"fast"` results. type ChatStreamRequestCitationQuality string @@ -947,8 +986,11 @@ func (c ChatStreamRequestCitationQuality) Ptr() *ChatStreamRequestCitationQualit } // Defaults to `AUTO` when `connectors` are specified and `OFF` in all other cases. +// // Dictates how the prompt will be constructed. +// // With `prompt_truncation` set to "AUTO", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. +// // With `prompt_truncation` set to "OFF", no elements will be dropped. If the sum of the inputs exceeds the model's context length limit, a `TooManyTokens` error will be returned. type ChatStreamRequestPromptTruncation string @@ -1299,9 +1341,9 @@ func (c ConnectorAuthStatus) Ptr() *ConnectorAuthStatus { type ConnectorOAuth struct { // The OAuth 2.0 /authorize endpoint to use when users authorize the connector. - AuthorizeUrl *string `json:"authorizeUrl,omitempty"` + AuthorizeUrl string `json:"authorize_url"` // The OAuth 2.0 /token endpoint to use when users authorize the connector. - TokenUrl *string `json:"tokenUrl,omitempty"` + TokenUrl string `json:"token_url"` // The OAuth scopes to request when users authorize the connector. Scope *string `json:"scope,omitempty"` @@ -1331,219 +1373,651 @@ func (c *ConnectorOAuth) String() string { return fmt.Sprintf("%#v", c) } -type DetectLanguageResponse struct { - // List of languages, one per input text - Results []*DetectLanguageResponseResultsItem `json:"results,omitempty"` - Meta *ApiMeta `json:"meta,omitempty"` +type CreateConnectorOAuth struct { + // The OAuth 2.0 client ID. This fields is encrypted at rest. + ClientId *string `json:"client_id,omitempty"` + // The OAuth 2.0 client Secret. This field is encrypted at rest and never returned in a response. + ClientSecret *string `json:"client_secret,omitempty"` + // The OAuth 2.0 /authorize endpoint to use when users authorize the connector. + AuthorizeUrl *string `json:"authorize_url,omitempty"` + // The OAuth 2.0 /token endpoint to use when users authorize the connector. + TokenUrl *string `json:"token_url,omitempty"` + // The OAuth scopes to request when users authorize the connector. + Scope *string `json:"scope,omitempty"` _rawJSON json.RawMessage } -func (d *DetectLanguageResponse) UnmarshalJSON(data []byte) error { - type unmarshaler DetectLanguageResponse +func (c *CreateConnectorOAuth) UnmarshalJSON(data []byte) error { + type unmarshaler CreateConnectorOAuth var value unmarshaler if err := json.Unmarshal(data, &value); err != nil { return err } - *d = DetectLanguageResponse(value) - d._rawJSON = json.RawMessage(data) + *c = CreateConnectorOAuth(value) + c._rawJSON = json.RawMessage(data) return nil } -func (d *DetectLanguageResponse) String() string { - if len(d._rawJSON) > 0 { - if value, err := core.StringifyJSON(d._rawJSON); err == nil { +func (c *CreateConnectorOAuth) String() string { + if len(c._rawJSON) > 0 { + if value, err := core.StringifyJSON(c._rawJSON); err == nil { return value } } - if value, err := core.StringifyJSON(d); err == nil { + if value, err := core.StringifyJSON(c); err == nil { return value } - return fmt.Sprintf("%#v", d) + return fmt.Sprintf("%#v", c) } -type DetectLanguageResponseResultsItem struct { - LanguageName *string `json:"language_name,omitempty"` - LanguageCode *string `json:"language_code,omitempty"` +type CreateConnectorResponse struct { + Connector *Connector `json:"connector,omitempty"` _rawJSON json.RawMessage } -func (d *DetectLanguageResponseResultsItem) UnmarshalJSON(data []byte) error { - type unmarshaler DetectLanguageResponseResultsItem +func (c *CreateConnectorResponse) UnmarshalJSON(data []byte) error { + type unmarshaler CreateConnectorResponse var value unmarshaler if err := json.Unmarshal(data, &value); err != nil { return err } - *d = DetectLanguageResponseResultsItem(value) - d._rawJSON = json.RawMessage(data) + *c = CreateConnectorResponse(value) + c._rawJSON = json.RawMessage(data) return nil } -func (d *DetectLanguageResponseResultsItem) String() string { - if len(d._rawJSON) > 0 { - if value, err := core.StringifyJSON(d._rawJSON); err == nil { +func (c *CreateConnectorResponse) String() string { + if len(c._rawJSON) > 0 { + if value, err := core.StringifyJSON(c._rawJSON); err == nil { return value } } - if value, err := core.StringifyJSON(d); err == nil { + if value, err := core.StringifyJSON(c); err == nil { return value } - return fmt.Sprintf("%#v", d) + return fmt.Sprintf("%#v", c) } -type DetokenizeResponse struct { - // A string representing the list of tokens. - Text string `json:"text"` - Meta *ApiMeta `json:"meta,omitempty"` +type CreateConnectorServiceAuth struct { + Type AuthTokenType `json:"type,omitempty"` + // The token that will be used in the HTTP Authorization header when making requests to the connector. This field is encrypted at rest and never returned in a response. + Token string `json:"token"` _rawJSON json.RawMessage } -func (d *DetokenizeResponse) UnmarshalJSON(data []byte) error { - type unmarshaler DetokenizeResponse +func (c *CreateConnectorServiceAuth) UnmarshalJSON(data []byte) error { + type unmarshaler CreateConnectorServiceAuth var value unmarshaler if err := json.Unmarshal(data, &value); err != nil { return err } - *d = DetokenizeResponse(value) - d._rawJSON = json.RawMessage(data) + *c = CreateConnectorServiceAuth(value) + c._rawJSON = json.RawMessage(data) return nil } -func (d *DetokenizeResponse) String() string { - if len(d._rawJSON) > 0 { - if value, err := core.StringifyJSON(d._rawJSON); err == nil { +func (c *CreateConnectorServiceAuth) String() string { + if len(c._rawJSON) > 0 { + if value, err := core.StringifyJSON(c._rawJSON); err == nil { return value } } - if value, err := core.StringifyJSON(d); err == nil { + if value, err := core.StringifyJSON(c); err == nil { return value } - return fmt.Sprintf("%#v", d) + return fmt.Sprintf("%#v", c) } -type EmbedByTypeResponse struct { - Id string `json:"id"` - // An object with different embedding types. The length of each embedding type array will be the same as the length of the original `texts` array. - Embeddings *EmbedByTypeResponseEmbeddings `json:"embeddings,omitempty"` - // The text entries for which embeddings were returned. - Texts []string `json:"texts,omitempty"` +// Response from creating an embed job. +type CreateEmbedJobResponse struct { + JobId string `json:"job_id"` Meta *ApiMeta `json:"meta,omitempty"` _rawJSON json.RawMessage } -func (e *EmbedByTypeResponse) UnmarshalJSON(data []byte) error { - type unmarshaler EmbedByTypeResponse +func (c *CreateEmbedJobResponse) UnmarshalJSON(data []byte) error { + type unmarshaler CreateEmbedJobResponse var value unmarshaler if err := json.Unmarshal(data, &value); err != nil { return err } - *e = EmbedByTypeResponse(value) - e._rawJSON = json.RawMessage(data) + *c = CreateEmbedJobResponse(value) + c._rawJSON = json.RawMessage(data) return nil } -func (e *EmbedByTypeResponse) String() string { - if len(e._rawJSON) > 0 { - if value, err := core.StringifyJSON(e._rawJSON); err == nil { +func (c *CreateEmbedJobResponse) String() string { + if len(c._rawJSON) > 0 { + if value, err := core.StringifyJSON(c._rawJSON); err == nil { return value } } - if value, err := core.StringifyJSON(e); err == nil { + if value, err := core.StringifyJSON(c); err == nil { return value } - return fmt.Sprintf("%#v", e) + return fmt.Sprintf("%#v", c) } -// An object with different embedding types. The length of each embedding type array will be the same as the length of the original `texts` array. -type EmbedByTypeResponseEmbeddings struct { - // An array of float embeddings. - Float [][]float64 `json:"float,omitempty"` - // An array of signed int8 embeddings. Each value is between -128 and 127. - Int8 [][]float64 `json:"int8,omitempty"` - // An array of unsigned int8 embeddings. Each value is between 0 and 255. - Uint8 [][]float64 `json:"uint8,omitempty"` - // An array of packed signed binary embeddings. The length of each binary embedding is 1/8 the length of the float embeddings of the provided model. Each value is between -128 and 127. - Binary [][]float64 `json:"binary,omitempty"` - // An array of packed unsigned binary embeddings. The length of each binary embedding is 1/8 the length of the float embeddings of the provided model. Each value is between 0 and 255. - Ubinary [][]float64 `json:"ubinary,omitempty"` +type Dataset struct { + // The dataset ID + Id string `json:"id"` + // The name of the dataset + Name string `json:"name"` + // The creation date + CreatedAt time.Time `json:"created_at"` + // The last update date + UpdatedAt time.Time `json:"updated_at"` + // Errors found during validation + ValidationError *string `json:"validation_error,omitempty"` + // the avro schema of the dataset + Schema *string `json:"schema,omitempty"` + RequiredFields []string `json:"required_fields,omitempty"` + PreserveFields []string `json:"preserve_fields,omitempty"` + // the underlying files that make up the dataset + DatasetParts []*DatasetPart `json:"dataset_parts,omitempty"` + // warnings found during validation + ValidationWarnings []string `json:"validation_warnings,omitempty"` _rawJSON json.RawMessage } -func (e *EmbedByTypeResponseEmbeddings) UnmarshalJSON(data []byte) error { - type unmarshaler EmbedByTypeResponseEmbeddings +func (d *Dataset) UnmarshalJSON(data []byte) error { + type unmarshaler Dataset var value unmarshaler if err := json.Unmarshal(data, &value); err != nil { return err } - *e = EmbedByTypeResponseEmbeddings(value) - e._rawJSON = json.RawMessage(data) + *d = Dataset(value) + d._rawJSON = json.RawMessage(data) return nil } -func (e *EmbedByTypeResponseEmbeddings) String() string { - if len(e._rawJSON) > 0 { - if value, err := core.StringifyJSON(e._rawJSON); err == nil { +func (d *Dataset) String() string { + if len(d._rawJSON) > 0 { + if value, err := core.StringifyJSON(d._rawJSON); err == nil { return value } } - if value, err := core.StringifyJSON(e); err == nil { + if value, err := core.StringifyJSON(d); err == nil { return value } - return fmt.Sprintf("%#v", e) + return fmt.Sprintf("%#v", d) } -type EmbedFloatsResponse struct { +type DatasetPart struct { + // The dataset part ID Id string `json:"id"` - // An array of embeddings, where each embedding is an array of floats. The length of the `embeddings` array will be the same as the length of the original `texts` array. - Embeddings [][]float64 `json:"embeddings,omitempty"` - // The text entries for which embeddings were returned. - Texts []string `json:"texts,omitempty"` - Meta *ApiMeta `json:"meta,omitempty"` + // The name of the dataset part + Name string `json:"name"` + // The download url of the file + Url *string `json:"url,omitempty"` + // The index of the file + Index *int `json:"index,omitempty"` + // The size of the file in bytes + SizeBytes *int `json:"size_bytes,omitempty"` + // The number of rows in the file + NumRows *int `json:"num_rows,omitempty"` + // The download url of the original file + OriginalUrl *string `json:"original_url,omitempty"` _rawJSON json.RawMessage } -func (e *EmbedFloatsResponse) UnmarshalJSON(data []byte) error { - type unmarshaler EmbedFloatsResponse +func (d *DatasetPart) UnmarshalJSON(data []byte) error { + type unmarshaler DatasetPart var value unmarshaler if err := json.Unmarshal(data, &value); err != nil { return err } - *e = EmbedFloatsResponse(value) - e._rawJSON = json.RawMessage(data) + *d = DatasetPart(value) + d._rawJSON = json.RawMessage(data) return nil } -func (e *EmbedFloatsResponse) String() string { - if len(e._rawJSON) > 0 { - if value, err := core.StringifyJSON(e._rawJSON); err == nil { +func (d *DatasetPart) String() string { + if len(d._rawJSON) > 0 { + if value, err := core.StringifyJSON(d._rawJSON); err == nil { return value } } - if value, err := core.StringifyJSON(e); err == nil { + if value, err := core.StringifyJSON(d); err == nil { return value } - return fmt.Sprintf("%#v", e) + return fmt.Sprintf("%#v", d) } -// One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length. -// -// Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model. -// -// If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned. -type EmbedRequestTruncate string +// The type of the dataset +type DatasetType string const ( - EmbedRequestTruncateNone EmbedRequestTruncate = "NONE" - EmbedRequestTruncateStart EmbedRequestTruncate = "START" - EmbedRequestTruncateEnd EmbedRequestTruncate = "END" + DatasetTypeEmbedInput DatasetType = "embed-input" + DatasetTypeEmbedResult DatasetType = "embed-result" + DatasetTypeClusterResult DatasetType = "cluster-result" + DatasetTypeClusterOutliers DatasetType = "cluster-outliers" + DatasetTypeRerankerFinetuneInput DatasetType = "reranker-finetune-input" + DatasetTypePromptCompletionFinetuneInput DatasetType = "prompt-completion-finetune-input" + DatasetTypeSingleLabelClassificationFinetuneInput DatasetType = "single-label-classification-finetune-input" + DatasetTypeChatFinetuneInput DatasetType = "chat-finetune-input" + DatasetTypeMultiLabelClassificationFinetuneInput DatasetType = "multi-label-classification-finetune-input" ) -func NewEmbedRequestTruncateFromString(s string) (EmbedRequestTruncate, error) { +func NewDatasetTypeFromString(s string) (DatasetType, error) { + switch s { + case "embed-input": + return DatasetTypeEmbedInput, nil + case "embed-result": + return DatasetTypeEmbedResult, nil + case "cluster-result": + return DatasetTypeClusterResult, nil + case "cluster-outliers": + return DatasetTypeClusterOutliers, nil + case "reranker-finetune-input": + return DatasetTypeRerankerFinetuneInput, nil + case "prompt-completion-finetune-input": + return DatasetTypePromptCompletionFinetuneInput, nil + case "single-label-classification-finetune-input": + return DatasetTypeSingleLabelClassificationFinetuneInput, nil + case "chat-finetune-input": + return DatasetTypeChatFinetuneInput, nil + case "multi-label-classification-finetune-input": + return DatasetTypeMultiLabelClassificationFinetuneInput, nil + } + var t DatasetType + return "", fmt.Errorf("%s is not a valid %T", s, t) +} + +func (d DatasetType) Ptr() *DatasetType { + return &d +} + +// The validation status of the dataset +type DatasetValidationStatus string + +const ( + DatasetValidationStatusUnknown DatasetValidationStatus = "unknown" + DatasetValidationStatusQueued DatasetValidationStatus = "queued" + DatasetValidationStatusProcessing DatasetValidationStatus = "processing" + DatasetValidationStatusFailed DatasetValidationStatus = "failed" + DatasetValidationStatusValidated DatasetValidationStatus = "validated" + DatasetValidationStatusSkipped DatasetValidationStatus = "skipped" +) + +func NewDatasetValidationStatusFromString(s string) (DatasetValidationStatus, error) { + switch s { + case "unknown": + return DatasetValidationStatusUnknown, nil + case "queued": + return DatasetValidationStatusQueued, nil + case "processing": + return DatasetValidationStatusProcessing, nil + case "failed": + return DatasetValidationStatusFailed, nil + case "validated": + return DatasetValidationStatusValidated, nil + case "skipped": + return DatasetValidationStatusSkipped, nil + } + var t DatasetValidationStatus + return "", fmt.Errorf("%s is not a valid %T", s, t) +} + +func (d DatasetValidationStatus) Ptr() *DatasetValidationStatus { + return &d +} + +type DeleteConnectorResponse = map[string]interface{} + +type DetectLanguageResponse struct { + // List of languages, one per input text + Results []*DetectLanguageResponseResultsItem `json:"results,omitempty"` + Meta *ApiMeta `json:"meta,omitempty"` + + _rawJSON json.RawMessage +} + +func (d *DetectLanguageResponse) UnmarshalJSON(data []byte) error { + type unmarshaler DetectLanguageResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *d = DetectLanguageResponse(value) + d._rawJSON = json.RawMessage(data) + return nil +} + +func (d *DetectLanguageResponse) String() string { + if len(d._rawJSON) > 0 { + if value, err := core.StringifyJSON(d._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(d); err == nil { + return value + } + return fmt.Sprintf("%#v", d) +} + +type DetectLanguageResponseResultsItem struct { + LanguageName *string `json:"language_name,omitempty"` + LanguageCode *string `json:"language_code,omitempty"` + + _rawJSON json.RawMessage +} + +func (d *DetectLanguageResponseResultsItem) UnmarshalJSON(data []byte) error { + type unmarshaler DetectLanguageResponseResultsItem + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *d = DetectLanguageResponseResultsItem(value) + d._rawJSON = json.RawMessage(data) + return nil +} + +func (d *DetectLanguageResponseResultsItem) String() string { + if len(d._rawJSON) > 0 { + if value, err := core.StringifyJSON(d._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(d); err == nil { + return value + } + return fmt.Sprintf("%#v", d) +} + +type DetokenizeResponse struct { + // A string representing the list of tokens. + Text string `json:"text"` + Meta *ApiMeta `json:"meta,omitempty"` + + _rawJSON json.RawMessage +} + +func (d *DetokenizeResponse) UnmarshalJSON(data []byte) error { + type unmarshaler DetokenizeResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *d = DetokenizeResponse(value) + d._rawJSON = json.RawMessage(data) + return nil +} + +func (d *DetokenizeResponse) String() string { + if len(d._rawJSON) > 0 { + if value, err := core.StringifyJSON(d._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(d); err == nil { + return value + } + return fmt.Sprintf("%#v", d) +} + +type EmbedByTypeResponse struct { + Id string `json:"id"` + // An object with different embedding types. The length of each embedding type array will be the same as the length of the original `texts` array. + Embeddings *EmbedByTypeResponseEmbeddings `json:"embeddings,omitempty"` + // The text entries for which embeddings were returned. + Texts []string `json:"texts,omitempty"` + Meta *ApiMeta `json:"meta,omitempty"` + + _rawJSON json.RawMessage +} + +func (e *EmbedByTypeResponse) UnmarshalJSON(data []byte) error { + type unmarshaler EmbedByTypeResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *e = EmbedByTypeResponse(value) + e._rawJSON = json.RawMessage(data) + return nil +} + +func (e *EmbedByTypeResponse) String() string { + if len(e._rawJSON) > 0 { + if value, err := core.StringifyJSON(e._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(e); err == nil { + return value + } + return fmt.Sprintf("%#v", e) +} + +// An object with different embedding types. The length of each embedding type array will be the same as the length of the original `texts` array. +type EmbedByTypeResponseEmbeddings struct { + // An array of float embeddings. + Float [][]float64 `json:"float,omitempty"` + // An array of signed int8 embeddings. Each value is between -128 and 127. + Int8 [][]float64 `json:"int8,omitempty"` + // An array of unsigned int8 embeddings. Each value is between 0 and 255. + Uint8 [][]float64 `json:"uint8,omitempty"` + // An array of packed signed binary embeddings. The length of each binary embedding is 1/8 the length of the float embeddings of the provided model. Each value is between -128 and 127. + Binary [][]float64 `json:"binary,omitempty"` + // An array of packed unsigned binary embeddings. The length of each binary embedding is 1/8 the length of the float embeddings of the provided model. Each value is between 0 and 255. + Ubinary [][]float64 `json:"ubinary,omitempty"` + + _rawJSON json.RawMessage +} + +func (e *EmbedByTypeResponseEmbeddings) UnmarshalJSON(data []byte) error { + type unmarshaler EmbedByTypeResponseEmbeddings + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *e = EmbedByTypeResponseEmbeddings(value) + e._rawJSON = json.RawMessage(data) + return nil +} + +func (e *EmbedByTypeResponseEmbeddings) String() string { + if len(e._rawJSON) > 0 { + if value, err := core.StringifyJSON(e._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(e); err == nil { + return value + } + return fmt.Sprintf("%#v", e) +} + +type EmbedFloatsResponse struct { + Id string `json:"id"` + // An array of embeddings, where each embedding is an array of floats. The length of the `embeddings` array will be the same as the length of the original `texts` array. + Embeddings [][]float64 `json:"embeddings,omitempty"` + // The text entries for which embeddings were returned. + Texts []string `json:"texts,omitempty"` + Meta *ApiMeta `json:"meta,omitempty"` + + _rawJSON json.RawMessage +} + +func (e *EmbedFloatsResponse) UnmarshalJSON(data []byte) error { + type unmarshaler EmbedFloatsResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *e = EmbedFloatsResponse(value) + e._rawJSON = json.RawMessage(data) + return nil +} + +func (e *EmbedFloatsResponse) String() string { + if len(e._rawJSON) > 0 { + if value, err := core.StringifyJSON(e._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(e); err == nil { + return value + } + return fmt.Sprintf("%#v", e) +} + +// Specifies the type of input passed to the model. Required for embedding models v3 and higher. +// +// - `"search_document"`: Used for embeddings stored in a vector database for search use-cases. +// - `"search_query"`: Used for embeddings of search queries run against a vector DB to find relevant documents. +// - `"classification"`: Used for embeddings passed through a text classifier. +// - `"clustering"`: Used for the embeddings run through a clustering algorithm. +type EmbedInputType string + +const ( + EmbedInputTypeSearchDocument EmbedInputType = "search_document" + EmbedInputTypeSearchQuery EmbedInputType = "search_query" + EmbedInputTypeClassification EmbedInputType = "classification" + EmbedInputTypeClustering EmbedInputType = "clustering" +) + +func NewEmbedInputTypeFromString(s string) (EmbedInputType, error) { + switch s { + case "search_document": + return EmbedInputTypeSearchDocument, nil + case "search_query": + return EmbedInputTypeSearchQuery, nil + case "classification": + return EmbedInputTypeClassification, nil + case "clustering": + return EmbedInputTypeClustering, nil + } + var t EmbedInputType + return "", fmt.Errorf("%s is not a valid %T", s, t) +} + +func (e EmbedInputType) Ptr() *EmbedInputType { + return &e +} + +type EmbedJob struct { + // ID of the embed job + JobId string `json:"job_id"` + // The name of the embed job + Name *string `json:"name,omitempty"` + // The status of the embed job + Status EmbedJobStatus `json:"status,omitempty"` + // The creation date of the embed job + CreatedAt time.Time `json:"created_at"` + // ID of the input dataset + InputDatasetId string `json:"input_dataset_id"` + // ID of the resulting output dataset + OutputDatasetId *string `json:"output_dataset_id,omitempty"` + // ID of the model used to embed + Model string `json:"model"` + // The truncation option used + Truncate EmbedJobTruncate `json:"truncate,omitempty"` + Meta *ApiMeta `json:"meta,omitempty"` + + _rawJSON json.RawMessage +} + +func (e *EmbedJob) UnmarshalJSON(data []byte) error { + type unmarshaler EmbedJob + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *e = EmbedJob(value) + e._rawJSON = json.RawMessage(data) + return nil +} + +func (e *EmbedJob) String() string { + if len(e._rawJSON) > 0 { + if value, err := core.StringifyJSON(e._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(e); err == nil { + return value + } + return fmt.Sprintf("%#v", e) +} + +// The status of the embed job +type EmbedJobStatus string + +const ( + EmbedJobStatusProcessing EmbedJobStatus = "processing" + EmbedJobStatusComplete EmbedJobStatus = "complete" + EmbedJobStatusCancelling EmbedJobStatus = "cancelling" + EmbedJobStatusCancelled EmbedJobStatus = "cancelled" + EmbedJobStatusFailed EmbedJobStatus = "failed" +) + +func NewEmbedJobStatusFromString(s string) (EmbedJobStatus, error) { + switch s { + case "processing": + return EmbedJobStatusProcessing, nil + case "complete": + return EmbedJobStatusComplete, nil + case "cancelling": + return EmbedJobStatusCancelling, nil + case "cancelled": + return EmbedJobStatusCancelled, nil + case "failed": + return EmbedJobStatusFailed, nil + } + var t EmbedJobStatus + return "", fmt.Errorf("%s is not a valid %T", s, t) +} + +func (e EmbedJobStatus) Ptr() *EmbedJobStatus { + return &e +} + +// The truncation option used +type EmbedJobTruncate string + +const ( + EmbedJobTruncateStart EmbedJobTruncate = "START" + EmbedJobTruncateEnd EmbedJobTruncate = "END" +) + +func NewEmbedJobTruncateFromString(s string) (EmbedJobTruncate, error) { + switch s { + case "START": + return EmbedJobTruncateStart, nil + case "END": + return EmbedJobTruncateEnd, nil + } + var t EmbedJobTruncate + return "", fmt.Errorf("%s is not a valid %T", s, t) +} + +func (e EmbedJobTruncate) Ptr() *EmbedJobTruncate { + return &e +} + +// One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length. +// +// Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model. +// +// If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned. +type EmbedRequestTruncate string + +const ( + EmbedRequestTruncateNone EmbedRequestTruncate = "NONE" + EmbedRequestTruncateStart EmbedRequestTruncate = "START" + EmbedRequestTruncateEnd EmbedRequestTruncate = "END" +) + +func NewEmbedRequestTruncateFromString(s string) (EmbedRequestTruncate, error) { switch s { case "NONE": return EmbedRequestTruncateNone, nil @@ -1624,20 +2098,237 @@ func (e EmbedResponse) MarshalJSON() ([]byte, error) { } } -type EmbedResponseVisitor interface { - VisitEmbeddingsFloats(*EmbedFloatsResponse) error - VisitEmbeddingsByType(*EmbedByTypeResponse) error +type EmbedResponseVisitor interface { + VisitEmbeddingsFloats(*EmbedFloatsResponse) error + VisitEmbeddingsByType(*EmbedByTypeResponse) error +} + +func (e *EmbedResponse) Accept(visitor EmbedResponseVisitor) error { + switch e.ResponseType { + default: + return fmt.Errorf("invalid type %s in %T", e.ResponseType, e) + case "embeddings_floats": + return visitor.VisitEmbeddingsFloats(e.EmbeddingsFloats) + case "embeddings_by_type": + return visitor.VisitEmbeddingsByType(e.EmbeddingsByType) + } +} + +type FinishReason string + +const ( + FinishReasonComplete FinishReason = "COMPLETE" + FinishReasonError FinishReason = "ERROR" + FinishReasonErrorToxic FinishReason = "ERROR_TOXIC" + FinishReasonErrorLimit FinishReason = "ERROR_LIMIT" + FinishReasonUserCancel FinishReason = "USER_CANCEL" + FinishReasonMaxTokens FinishReason = "MAX_TOKENS" +) + +func NewFinishReasonFromString(s string) (FinishReason, error) { + switch s { + case "COMPLETE": + return FinishReasonComplete, nil + case "ERROR": + return FinishReasonError, nil + case "ERROR_TOXIC": + return FinishReasonErrorToxic, nil + case "ERROR_LIMIT": + return FinishReasonErrorLimit, nil + case "USER_CANCEL": + return FinishReasonUserCancel, nil + case "MAX_TOKENS": + return FinishReasonMaxTokens, nil + } + var t FinishReason + return "", fmt.Errorf("%s is not a valid %T", s, t) +} + +func (f FinishReason) Ptr() *FinishReason { + return &f +} + +// One of `GENERATION|ALL|NONE` to specify how and if the token likelihoods are returned with the response. Defaults to `NONE`. +// +// If `GENERATION` is selected, the token likelihoods will only be provided for generated text. +// +// If `ALL` is selected, the token likelihoods will be provided both for the prompt and the generated text. +type GenerateRequestReturnLikelihoods string + +const ( + GenerateRequestReturnLikelihoodsGeneration GenerateRequestReturnLikelihoods = "GENERATION" + GenerateRequestReturnLikelihoodsAll GenerateRequestReturnLikelihoods = "ALL" + GenerateRequestReturnLikelihoodsNone GenerateRequestReturnLikelihoods = "NONE" +) + +func NewGenerateRequestReturnLikelihoodsFromString(s string) (GenerateRequestReturnLikelihoods, error) { + switch s { + case "GENERATION": + return GenerateRequestReturnLikelihoodsGeneration, nil + case "ALL": + return GenerateRequestReturnLikelihoodsAll, nil + case "NONE": + return GenerateRequestReturnLikelihoodsNone, nil + } + var t GenerateRequestReturnLikelihoods + return "", fmt.Errorf("%s is not a valid %T", s, t) +} + +func (g GenerateRequestReturnLikelihoods) Ptr() *GenerateRequestReturnLikelihoods { + return &g +} + +// One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length. +// +// Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model. +// +// If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned. +type GenerateRequestTruncate string + +const ( + GenerateRequestTruncateNone GenerateRequestTruncate = "NONE" + GenerateRequestTruncateStart GenerateRequestTruncate = "START" + GenerateRequestTruncateEnd GenerateRequestTruncate = "END" +) + +func NewGenerateRequestTruncateFromString(s string) (GenerateRequestTruncate, error) { + switch s { + case "NONE": + return GenerateRequestTruncateNone, nil + case "START": + return GenerateRequestTruncateStart, nil + case "END": + return GenerateRequestTruncateEnd, nil + } + var t GenerateRequestTruncate + return "", fmt.Errorf("%s is not a valid %T", s, t) +} + +func (g GenerateRequestTruncate) Ptr() *GenerateRequestTruncate { + return &g +} + +type GenerateStreamEnd struct { + IsFinished bool `json:"is_finished"` + FinishReason *FinishReason `json:"finish_reason,omitempty"` + Response *GenerateStreamEndResponse `json:"response,omitempty"` + + _rawJSON json.RawMessage +} + +func (g *GenerateStreamEnd) UnmarshalJSON(data []byte) error { + type unmarshaler GenerateStreamEnd + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *g = GenerateStreamEnd(value) + g._rawJSON = json.RawMessage(data) + return nil +} + +func (g *GenerateStreamEnd) String() string { + if len(g._rawJSON) > 0 { + if value, err := core.StringifyJSON(g._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(g); err == nil { + return value + } + return fmt.Sprintf("%#v", g) +} + +type GenerateStreamEndResponse struct { + Id string `json:"id"` + Prompt *string `json:"prompt,omitempty"` + Generations []*SingleGenerationInStream `json:"generations,omitempty"` + + _rawJSON json.RawMessage +} + +func (g *GenerateStreamEndResponse) UnmarshalJSON(data []byte) error { + type unmarshaler GenerateStreamEndResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *g = GenerateStreamEndResponse(value) + g._rawJSON = json.RawMessage(data) + return nil +} + +func (g *GenerateStreamEndResponse) String() string { + if len(g._rawJSON) > 0 { + if value, err := core.StringifyJSON(g._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(g); err == nil { + return value + } + return fmt.Sprintf("%#v", g) +} + +type GenerateStreamError struct { + // Refers to the nth generation. Only present when `num_generations` is greater than zero. + Index *int `json:"index,omitempty"` + IsFinished bool `json:"is_finished"` + FinishReason FinishReason `json:"finish_reason,omitempty"` + // Error message + Err string `json:"err"` + + _rawJSON json.RawMessage +} + +func (g *GenerateStreamError) UnmarshalJSON(data []byte) error { + type unmarshaler GenerateStreamError + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *g = GenerateStreamError(value) + g._rawJSON = json.RawMessage(data) + return nil +} + +func (g *GenerateStreamError) String() string { + if len(g._rawJSON) > 0 { + if value, err := core.StringifyJSON(g._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(g); err == nil { + return value + } + return fmt.Sprintf("%#v", g) +} + +type GenerateStreamEvent struct { + _rawJSON json.RawMessage } -func (e *EmbedResponse) Accept(visitor EmbedResponseVisitor) error { - switch e.ResponseType { - default: - return fmt.Errorf("invalid type %s in %T", e.ResponseType, e) - case "embeddings_floats": - return visitor.VisitEmbeddingsFloats(e.EmbeddingsFloats) - case "embeddings_by_type": - return visitor.VisitEmbeddingsByType(e.EmbeddingsByType) +func (g *GenerateStreamEvent) UnmarshalJSON(data []byte) error { + type unmarshaler GenerateStreamEvent + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *g = GenerateStreamEvent(value) + g._rawJSON = json.RawMessage(data) + return nil +} + +func (g *GenerateStreamEvent) String() string { + if len(g._rawJSON) > 0 { + if value, err := core.StringifyJSON(g._rawJSON); err == nil { + return value + } } + if value, err := core.StringifyJSON(g); err == nil { + return value + } + return fmt.Sprintf("%#v", g) } // One of `GENERATION|ALL|NONE` to specify how and if the token likelihoods are returned with the response. Defaults to `NONE`. @@ -1645,28 +2336,28 @@ func (e *EmbedResponse) Accept(visitor EmbedResponseVisitor) error { // If `GENERATION` is selected, the token likelihoods will only be provided for generated text. // // If `ALL` is selected, the token likelihoods will be provided both for the prompt and the generated text. -type GenerateRequestReturnLikelihoods string +type GenerateStreamRequestReturnLikelihoods string const ( - GenerateRequestReturnLikelihoodsGeneration GenerateRequestReturnLikelihoods = "GENERATION" - GenerateRequestReturnLikelihoodsAll GenerateRequestReturnLikelihoods = "ALL" - GenerateRequestReturnLikelihoodsNone GenerateRequestReturnLikelihoods = "NONE" + GenerateStreamRequestReturnLikelihoodsGeneration GenerateStreamRequestReturnLikelihoods = "GENERATION" + GenerateStreamRequestReturnLikelihoodsAll GenerateStreamRequestReturnLikelihoods = "ALL" + GenerateStreamRequestReturnLikelihoodsNone GenerateStreamRequestReturnLikelihoods = "NONE" ) -func NewGenerateRequestReturnLikelihoodsFromString(s string) (GenerateRequestReturnLikelihoods, error) { +func NewGenerateStreamRequestReturnLikelihoodsFromString(s string) (GenerateStreamRequestReturnLikelihoods, error) { switch s { case "GENERATION": - return GenerateRequestReturnLikelihoodsGeneration, nil + return GenerateStreamRequestReturnLikelihoodsGeneration, nil case "ALL": - return GenerateRequestReturnLikelihoodsAll, nil + return GenerateStreamRequestReturnLikelihoodsAll, nil case "NONE": - return GenerateRequestReturnLikelihoodsNone, nil + return GenerateStreamRequestReturnLikelihoodsNone, nil } - var t GenerateRequestReturnLikelihoods + var t GenerateStreamRequestReturnLikelihoods return "", fmt.Errorf("%s is not a valid %T", s, t) } -func (g GenerateRequestReturnLikelihoods) Ptr() *GenerateRequestReturnLikelihoods { +func (g GenerateStreamRequestReturnLikelihoods) Ptr() *GenerateStreamRequestReturnLikelihoods { return &g } @@ -1675,31 +2366,168 @@ func (g GenerateRequestReturnLikelihoods) Ptr() *GenerateRequestReturnLikelihood // Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model. // // If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned. -type GenerateRequestTruncate string +type GenerateStreamRequestTruncate string const ( - GenerateRequestTruncateNone GenerateRequestTruncate = "NONE" - GenerateRequestTruncateStart GenerateRequestTruncate = "START" - GenerateRequestTruncateEnd GenerateRequestTruncate = "END" + GenerateStreamRequestTruncateNone GenerateStreamRequestTruncate = "NONE" + GenerateStreamRequestTruncateStart GenerateStreamRequestTruncate = "START" + GenerateStreamRequestTruncateEnd GenerateStreamRequestTruncate = "END" ) -func NewGenerateRequestTruncateFromString(s string) (GenerateRequestTruncate, error) { +func NewGenerateStreamRequestTruncateFromString(s string) (GenerateStreamRequestTruncate, error) { switch s { case "NONE": - return GenerateRequestTruncateNone, nil + return GenerateStreamRequestTruncateNone, nil case "START": - return GenerateRequestTruncateStart, nil + return GenerateStreamRequestTruncateStart, nil case "END": - return GenerateRequestTruncateEnd, nil + return GenerateStreamRequestTruncateEnd, nil } - var t GenerateRequestTruncate + var t GenerateStreamRequestTruncate return "", fmt.Errorf("%s is not a valid %T", s, t) } -func (g GenerateRequestTruncate) Ptr() *GenerateRequestTruncate { +func (g GenerateStreamRequestTruncate) Ptr() *GenerateStreamRequestTruncate { return &g } +type GenerateStreamText struct { + // A segment of text of the generation. + Text string `json:"text"` + // Refers to the nth generation. Only present when `num_generations` is greater than zero, and only when text responses are being streamed. + Index *int `json:"index,omitempty"` + IsFinished bool `json:"is_finished"` + + _rawJSON json.RawMessage +} + +func (g *GenerateStreamText) UnmarshalJSON(data []byte) error { + type unmarshaler GenerateStreamText + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *g = GenerateStreamText(value) + g._rawJSON = json.RawMessage(data) + return nil +} + +func (g *GenerateStreamText) String() string { + if len(g._rawJSON) > 0 { + if value, err := core.StringifyJSON(g._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(g); err == nil { + return value + } + return fmt.Sprintf("%#v", g) +} + +// Response in content type stream when `stream` is `true` in the request parameters. Generation tokens are streamed with the GenerationStream response. The final response is of type GenerationFinalResponse. +type GenerateStreamedResponse struct { + EventType string + TextGeneration *GenerateStreamText + StreamEnd *GenerateStreamEnd + StreamError *GenerateStreamError +} + +func NewGenerateStreamedResponseFromTextGeneration(value *GenerateStreamText) *GenerateStreamedResponse { + return &GenerateStreamedResponse{EventType: "text-generation", TextGeneration: value} +} + +func NewGenerateStreamedResponseFromStreamEnd(value *GenerateStreamEnd) *GenerateStreamedResponse { + return &GenerateStreamedResponse{EventType: "stream-end", StreamEnd: value} +} + +func NewGenerateStreamedResponseFromStreamError(value *GenerateStreamError) *GenerateStreamedResponse { + return &GenerateStreamedResponse{EventType: "stream-error", StreamError: value} +} + +func (g *GenerateStreamedResponse) UnmarshalJSON(data []byte) error { + var unmarshaler struct { + EventType string `json:"event_type"` + } + if err := json.Unmarshal(data, &unmarshaler); err != nil { + return err + } + g.EventType = unmarshaler.EventType + switch unmarshaler.EventType { + case "text-generation": + value := new(GenerateStreamText) + if err := json.Unmarshal(data, &value); err != nil { + return err + } + g.TextGeneration = value + case "stream-end": + value := new(GenerateStreamEnd) + if err := json.Unmarshal(data, &value); err != nil { + return err + } + g.StreamEnd = value + case "stream-error": + value := new(GenerateStreamError) + if err := json.Unmarshal(data, &value); err != nil { + return err + } + g.StreamError = value + } + return nil +} + +func (g GenerateStreamedResponse) MarshalJSON() ([]byte, error) { + switch g.EventType { + default: + return nil, fmt.Errorf("invalid type %s in %T", g.EventType, g) + case "text-generation": + var marshaler = struct { + EventType string `json:"event_type"` + *GenerateStreamText + }{ + EventType: g.EventType, + GenerateStreamText: g.TextGeneration, + } + return json.Marshal(marshaler) + case "stream-end": + var marshaler = struct { + EventType string `json:"event_type"` + *GenerateStreamEnd + }{ + EventType: g.EventType, + GenerateStreamEnd: g.StreamEnd, + } + return json.Marshal(marshaler) + case "stream-error": + var marshaler = struct { + EventType string `json:"event_type"` + *GenerateStreamError + }{ + EventType: g.EventType, + GenerateStreamError: g.StreamError, + } + return json.Marshal(marshaler) + } +} + +type GenerateStreamedResponseVisitor interface { + VisitTextGeneration(*GenerateStreamText) error + VisitStreamEnd(*GenerateStreamEnd) error + VisitStreamError(*GenerateStreamError) error +} + +func (g *GenerateStreamedResponse) Accept(visitor GenerateStreamedResponseVisitor) error { + switch g.EventType { + default: + return fmt.Errorf("invalid type %s in %T", g.EventType, g) + case "text-generation": + return visitor.VisitTextGeneration(g.TextGeneration) + case "stream-end": + return visitor.VisitStreamEnd(g.StreamEnd) + case "stream-error": + return visitor.VisitStreamError(g.StreamError) + } +} + type Generation struct { Id string `json:"id"` // Prompt used for generations. @@ -1734,6 +2562,93 @@ func (g *Generation) String() string { return fmt.Sprintf("%#v", g) } +type GetConnectorResponse struct { + Connector *Connector `json:"connector,omitempty"` + + _rawJSON json.RawMessage +} + +func (g *GetConnectorResponse) UnmarshalJSON(data []byte) error { + type unmarshaler GetConnectorResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *g = GetConnectorResponse(value) + g._rawJSON = json.RawMessage(data) + return nil +} + +func (g *GetConnectorResponse) String() string { + if len(g._rawJSON) > 0 { + if value, err := core.StringifyJSON(g._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(g); err == nil { + return value + } + return fmt.Sprintf("%#v", g) +} + +type ListConnectorsResponse struct { + Connectors []*Connector `json:"connectors,omitempty"` + + _rawJSON json.RawMessage +} + +func (l *ListConnectorsResponse) UnmarshalJSON(data []byte) error { + type unmarshaler ListConnectorsResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *l = ListConnectorsResponse(value) + l._rawJSON = json.RawMessage(data) + return nil +} + +func (l *ListConnectorsResponse) String() string { + if len(l._rawJSON) > 0 { + if value, err := core.StringifyJSON(l._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(l); err == nil { + return value + } + return fmt.Sprintf("%#v", l) +} + +type ListEmbedJobResponse struct { + EmbedJobs []*EmbedJob `json:"embed_jobs,omitempty"` + + _rawJSON json.RawMessage +} + +func (l *ListEmbedJobResponse) UnmarshalJSON(data []byte) error { + type unmarshaler ListEmbedJobResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *l = ListEmbedJobResponse(value) + l._rawJSON = json.RawMessage(data) + return nil +} + +func (l *ListEmbedJobResponse) String() string { + if len(l._rawJSON) > 0 { + if value, err := core.StringifyJSON(l._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(l); err == nil { + return value + } + return fmt.Sprintf("%#v", l) +} + type NonStreamedChatResponse struct { // Contents of the reply generated by the model. Text string `json:"text"` @@ -1774,6 +2689,36 @@ func (n *NonStreamedChatResponse) String() string { return fmt.Sprintf("%#v", n) } +type OAuthAuthorizeResponse struct { + // The OAuth 2.0 redirect url. Redirect the user to this url to authorize the connector. + RedirectUrl *string `json:"redirect_url,omitempty"` + + _rawJSON json.RawMessage +} + +func (o *OAuthAuthorizeResponse) UnmarshalJSON(data []byte) error { + type unmarshaler OAuthAuthorizeResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *o = OAuthAuthorizeResponse(value) + o._rawJSON = json.RawMessage(data) + return nil +} + +func (o *OAuthAuthorizeResponse) String() string { + if len(o._rawJSON) > 0 { + if value, err := core.StringifyJSON(o._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(o); err == nil { + return value + } + return fmt.Sprintf("%#v", o) +} + type RerankRequestDocumentsItem struct { typeName string String string @@ -2023,6 +2968,39 @@ func (s *SingleGeneration) String() string { return fmt.Sprintf("%#v", s) } +type SingleGenerationInStream struct { + Id string `json:"id"` + // Full text of the generation. + Text string `json:"text"` + // Refers to the nth generation. Only present when `num_generations` is greater than zero. + Index *int `json:"index,omitempty"` + + _rawJSON json.RawMessage +} + +func (s *SingleGenerationInStream) UnmarshalJSON(data []byte) error { + type unmarshaler SingleGenerationInStream + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *s = SingleGenerationInStream(value) + s._rawJSON = json.RawMessage(data) + return nil +} + +func (s *SingleGenerationInStream) String() string { + if len(s._rawJSON) > 0 { + if value, err := core.StringifyJSON(s._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(s); err == nil { + return value + } + return fmt.Sprintf("%#v", s) +} + type SingleGenerationTokenLikelihoodsItem struct { Token string `json:"token"` Likelihood float64 `json:"likelihood"` @@ -2365,3 +3343,32 @@ func (t *TokenizeResponse) String() string { } return fmt.Sprintf("%#v", t) } + +type UpdateConnectorResponse struct { + Connector *Connector `json:"connector,omitempty"` + + _rawJSON json.RawMessage +} + +func (u *UpdateConnectorResponse) UnmarshalJSON(data []byte) error { + type unmarshaler UpdateConnectorResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *u = UpdateConnectorResponse(value) + u._rawJSON = json.RawMessage(data) + return nil +} + +func (u *UpdateConnectorResponse) String() string { + if len(u._rawJSON) > 0 { + if value, err := core.StringifyJSON(u._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(u); err == nil { + return value + } + return fmt.Sprintf("%#v", u) +}