diff --git a/package.json b/package.json index ee1fb8e..39a5d23 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "cohere-ai", - "version": "7.2.0", + "version": "7.3.0", "private": false, "repository": "https://github.com/cohere-ai/cohere-typescript", "main": "./index.js", diff --git a/src/Client.ts b/src/Client.ts index a377814..368b1cf 100644 --- a/src/Client.ts +++ b/src/Client.ts @@ -25,52 +25,93 @@ export class CohereClient { constructor(protected readonly _options: CohereClient.Options) {} /** - * This endpoint generates realistic text conditioned on a given input. - * @throws {@link Cohere.BadRequestError} - * @throws {@link Cohere.InternalServerError} + * The `chat` endpoint allows users to have conversations with a Large Language Model (LLM) from Cohere. Users can send messages as part of a persisted conversation using the `conversation_id` parameter, or they can pass in their own conversation history using the `chat_history` parameter. + * The endpoint features additional parameters such as `connectors` and `documents` that enable conversations enriched by external knowledge. We call this "Retrieval Augmented Generation", or "RAG". + * If you have questions or require support, we're here to help! Reach out to your Cohere partner to enable access to this API. + * */ - public async generate( - request: Cohere.GenerateRequest, + public async chatStream( + request: Cohere.ChatStreamRequest, requestOptions?: CohereClient.RequestOptions - ): Promise { + ): Promise> { + const _response = await core.streamingFetcher({ + url: urlJoin( + (await core.Supplier.get(this._options.environment)) ?? environments.CohereEnvironment.Production, + "v1/chat" + ), + method: "POST", + headers: { + Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", + "X-Fern-SDK-Name": "cohere-ai", + "X-Fern-SDK-Version": "7.3.0", + }, + body: { + ...(await serializers.ChatStreamRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" })), + stream: true, + }, + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + }); + return new core.Stream({ + stream: _response.data, + terminator: "\n", + parse: async (data) => { + return await serializers.StreamedChatResponse.parseOrThrow(data, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + }, + }); + } + + /** + * The `chat` endpoint allows users to have conversations with a Large Language Model (LLM) from Cohere. Users can send messages as part of a persisted conversation using the `conversation_id` parameter, or they can pass in their own conversation history using the `chat_history` parameter. + * The endpoint features additional parameters such as `connectors` and `documents` that enable conversations enriched by external knowledge. We call this "Retrieval Augmented Generation", or "RAG". + * If you have questions or require support, we're here to help! Reach out to your Cohere partner to enable access to this API. + * + */ + public async chat( + request: Cohere.ChatRequest, + requestOptions?: CohereClient.RequestOptions + ): Promise { const _response = await core.fetcher({ url: urlJoin( (await core.Supplier.get(this._options.environment)) ?? environments.CohereEnvironment.Production, - "v1/generate" + "v1/chat" ), method: "POST", headers: { Authorization: await this._getAuthorizationHeader(), "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "cohere-ai", - "X-Fern-SDK-Version": "7.2.0", + "X-Fern-SDK-Version": "7.3.0", }, contentType: "application/json", - body: await serializers.GenerateRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }), + body: { + ...(await serializers.ChatRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" })), + stream: false, + }, timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, maxRetries: requestOptions?.maxRetries, }); if (_response.ok) { - return await serializers.Generation.parseOrThrow(_response.body, { + return await serializers.NonStreamedChatResponse.parseOrThrow(_response.body, { unrecognizedObjectKeys: "passthrough", allowUnrecognizedUnionMembers: true, allowUnrecognizedEnumValues: true, + skipValidation: true, breadcrumbsPrefix: ["response"], }); } if (_response.error.reason === "status-code") { - switch (_response.error.statusCode) { - case 400: - throw new Cohere.BadRequestError(_response.error.body); - case 500: - throw new Cohere.InternalServerError(_response.error.body); - default: - throw new errors.CohereError({ - statusCode: _response.error.statusCode, - body: _response.error.body, - }); - } + throw new errors.CohereError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); } switch (_response.error.reason) { @@ -89,40 +130,37 @@ export class CohereClient { } /** - * This endpoint returns text embeddings. An embedding is a list of floating point numbers that captures semantic information about the text that it represents. - * - * Embeddings can be used to create text classifiers as well as empower semantic search. To learn more about embeddings, see the embedding page. - * - * If you want to learn more how to use the embedding model, have a look at the [Semantic Search Guide](/docs/semantic-search). + * This endpoint generates realistic text conditioned on a given input. * @throws {@link Cohere.BadRequestError} * @throws {@link Cohere.InternalServerError} */ - public async embed( - request: Cohere.EmbedRequest, + public async generate( + request: Cohere.GenerateRequest, requestOptions?: CohereClient.RequestOptions - ): Promise { + ): Promise { const _response = await core.fetcher({ url: urlJoin( (await core.Supplier.get(this._options.environment)) ?? environments.CohereEnvironment.Production, - "v1/embed" + "v1/generate" ), method: "POST", headers: { Authorization: await this._getAuthorizationHeader(), "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "cohere-ai", - "X-Fern-SDK-Version": "7.2.0", + "X-Fern-SDK-Version": "7.3.0", }, contentType: "application/json", - body: await serializers.EmbedRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }), + body: await serializers.GenerateRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }), timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, maxRetries: requestOptions?.maxRetries, }); if (_response.ok) { - return await serializers.EmbedResponse.parseOrThrow(_response.body, { + return await serializers.Generation.parseOrThrow(_response.body, { unrecognizedObjectKeys: "passthrough", allowUnrecognizedUnionMembers: true, allowUnrecognizedEnumValues: true, + skipValidation: true, breadcrumbsPrefix: ["response"], }); } @@ -157,37 +195,41 @@ export class CohereClient { } /** - * This endpoint makes a prediction about which label fits the specified text inputs best. To make a prediction, Classify uses the provided `examples` of text + label pairs as a reference. - * Note: [Custom Models](/training-representation-models) trained on classification examples don't require the `examples` parameter to be passed in explicitly. + * This endpoint returns text embeddings. An embedding is a list of floating point numbers that captures semantic information about the text that it represents. + * + * Embeddings can be used to create text classifiers as well as empower semantic search. To learn more about embeddings, see the embedding page. + * + * If you want to learn more how to use the embedding model, have a look at the [Semantic Search Guide](/docs/semantic-search). * @throws {@link Cohere.BadRequestError} * @throws {@link Cohere.InternalServerError} */ - public async classify( - request: Cohere.ClassifyRequest, + public async embed( + request: Cohere.EmbedRequest, requestOptions?: CohereClient.RequestOptions - ): Promise { + ): Promise { const _response = await core.fetcher({ url: urlJoin( (await core.Supplier.get(this._options.environment)) ?? environments.CohereEnvironment.Production, - "v1/classify" + "v1/embed" ), method: "POST", headers: { Authorization: await this._getAuthorizationHeader(), "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "cohere-ai", - "X-Fern-SDK-Version": "7.2.0", + "X-Fern-SDK-Version": "7.3.0", }, contentType: "application/json", - body: await serializers.ClassifyRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }), + body: await serializers.EmbedRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }), timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, maxRetries: requestOptions?.maxRetries, }); if (_response.ok) { - return await serializers.ClassifyResponse.parseOrThrow(_response.body, { + return await serializers.EmbedResponse.parseOrThrow(_response.body, { unrecognizedObjectKeys: "passthrough", allowUnrecognizedUnionMembers: true, allowUnrecognizedEnumValues: true, + skipValidation: true, breadcrumbsPrefix: ["response"], }); } @@ -222,82 +264,35 @@ export class CohereClient { } /** - * The `chat` endpoint allows users to have conversations with a Large Language Model (LLM) from Cohere. Users can send messages as part of a persisted conversation using the `conversation_id` parameter, or they can pass in their own conversation history using the `chat_history` parameter. - * The endpoint features additional parameters such as `connectors` and `documents` that enable conversations enriched by external knowledge. We call this "Retrieval Augmented Generation", or "RAG". - * If you have questions or require support, we're here to help! Reach out to your Cohere partner to enable access to this API. - * - */ - public async chatStream( - request: Cohere.ChatStreamRequest, - requestOptions?: CohereClient.RequestOptions - ): Promise> { - const _response = await core.streamingFetcher({ - url: urlJoin( - (await core.Supplier.get(this._options.environment)) ?? environments.CohereEnvironment.Production, - "v1/chat" - ), - method: "POST", - headers: { - Authorization: await this._getAuthorizationHeader(), - "X-Fern-Language": "JavaScript", - "X-Fern-SDK-Name": "cohere-ai", - "X-Fern-SDK-Version": "7.2.0", - }, - body: { - ...(await serializers.ChatStreamRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" })), - stream: true, - }, - timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, - }); - return new core.Stream({ - stream: _response.data, - terminator: "\n", - parse: async (data) => { - return await serializers.StreamedChatResponse.parseOrThrow(data, { - unrecognizedObjectKeys: "passthrough", - allowUnrecognizedUnionMembers: true, - allowUnrecognizedEnumValues: true, - breadcrumbsPrefix: ["response"], - }); - }, - }); - } - - /** - * The `chat` endpoint allows users to have conversations with a Large Language Model (LLM) from Cohere. Users can send messages as part of a persisted conversation using the `conversation_id` parameter, or they can pass in their own conversation history using the `chat_history` parameter. - * The endpoint features additional parameters such as `connectors` and `documents` that enable conversations enriched by external knowledge. We call this "Retrieval Augmented Generation", or "RAG". - * If you have questions or require support, we're here to help! Reach out to your Cohere partner to enable access to this API. - * + * This endpoint takes in a query and a list of texts and produces an ordered array with each text assigned a relevance score. */ - public async chat( - request: Cohere.ChatRequest, + public async rerank( + request: Cohere.RerankRequest, requestOptions?: CohereClient.RequestOptions - ): Promise { + ): Promise { const _response = await core.fetcher({ url: urlJoin( (await core.Supplier.get(this._options.environment)) ?? environments.CohereEnvironment.Production, - "v1/chat" + "v1/rerank" ), method: "POST", headers: { Authorization: await this._getAuthorizationHeader(), "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "cohere-ai", - "X-Fern-SDK-Version": "7.2.0", + "X-Fern-SDK-Version": "7.3.0", }, contentType: "application/json", - body: { - ...(await serializers.ChatRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" })), - stream: false, - }, + body: await serializers.RerankRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }), timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, maxRetries: requestOptions?.maxRetries, }); if (_response.ok) { - return await serializers.NonStreamedChatResponse.parseOrThrow(_response.body, { + return await serializers.RerankResponse.parseOrThrow(_response.body, { unrecognizedObjectKeys: "passthrough", allowUnrecognizedUnionMembers: true, allowUnrecognizedEnumValues: true, + skipValidation: true, breadcrumbsPrefix: ["response"], }); } @@ -325,36 +320,38 @@ export class CohereClient { } /** - * This endpoint splits input text into smaller units called tokens using byte-pair encoding (BPE). To learn more about tokenization and byte pair encoding, see the tokens page. + * This endpoint makes a prediction about which label fits the specified text inputs best. To make a prediction, Classify uses the provided `examples` of text + label pairs as a reference. + * Note: [Custom Models](/training-representation-models) trained on classification examples don't require the `examples` parameter to be passed in explicitly. * @throws {@link Cohere.BadRequestError} * @throws {@link Cohere.InternalServerError} */ - public async tokenize( - request: Cohere.TokenizeRequest, + public async classify( + request: Cohere.ClassifyRequest, requestOptions?: CohereClient.RequestOptions - ): Promise { + ): Promise { const _response = await core.fetcher({ url: urlJoin( (await core.Supplier.get(this._options.environment)) ?? environments.CohereEnvironment.Production, - "v1/tokenize" + "v1/classify" ), method: "POST", headers: { Authorization: await this._getAuthorizationHeader(), "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "cohere-ai", - "X-Fern-SDK-Version": "7.2.0", + "X-Fern-SDK-Version": "7.3.0", }, contentType: "application/json", - body: await serializers.TokenizeRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }), + body: await serializers.ClassifyRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }), timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, maxRetries: requestOptions?.maxRetries, }); if (_response.ok) { - return await serializers.TokenizeResponse.parseOrThrow(_response.body, { + return await serializers.ClassifyResponse.parseOrThrow(_response.body, { unrecognizedObjectKeys: "passthrough", allowUnrecognizedUnionMembers: true, allowUnrecognizedEnumValues: true, + skipValidation: true, breadcrumbsPrefix: ["response"], }); } @@ -389,34 +386,35 @@ export class CohereClient { } /** - * This endpoint takes tokens using byte-pair encoding and returns their text representation. To learn more about tokenization and byte pair encoding, see the tokens page. + * This endpoint identifies which language each of the provided texts is written in. */ - public async detokenize( - request: Cohere.DetokenizeRequest, + public async detectLanguage( + request: Cohere.DetectLanguageRequest, requestOptions?: CohereClient.RequestOptions - ): Promise { + ): Promise { const _response = await core.fetcher({ url: urlJoin( (await core.Supplier.get(this._options.environment)) ?? environments.CohereEnvironment.Production, - "v1/detokenize" + "v1/detect-language" ), method: "POST", headers: { Authorization: await this._getAuthorizationHeader(), "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "cohere-ai", - "X-Fern-SDK-Version": "7.2.0", + "X-Fern-SDK-Version": "7.3.0", }, contentType: "application/json", - body: await serializers.DetokenizeRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }), + body: await serializers.DetectLanguageRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }), timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, maxRetries: requestOptions?.maxRetries, }); if (_response.ok) { - return await serializers.DetokenizeResponse.parseOrThrow(_response.body, { + return await serializers.DetectLanguageResponse.parseOrThrow(_response.body, { unrecognizedObjectKeys: "passthrough", allowUnrecognizedUnionMembers: true, allowUnrecognizedEnumValues: true, + skipValidation: true, breadcrumbsPrefix: ["response"], }); } @@ -444,34 +442,35 @@ export class CohereClient { } /** - * This endpoint identifies which language each of the provided texts is written in. + * This endpoint generates a summary in English for a given text. */ - public async detectLanguage( - request: Cohere.DetectLanguageRequest, + public async summarize( + request: Cohere.SummarizeRequest, requestOptions?: CohereClient.RequestOptions - ): Promise { + ): Promise { const _response = await core.fetcher({ url: urlJoin( (await core.Supplier.get(this._options.environment)) ?? environments.CohereEnvironment.Production, - "v1/detect-language" + "v1/summarize" ), method: "POST", headers: { Authorization: await this._getAuthorizationHeader(), "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "cohere-ai", - "X-Fern-SDK-Version": "7.2.0", + "X-Fern-SDK-Version": "7.3.0", }, contentType: "application/json", - body: await serializers.DetectLanguageRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }), + body: await serializers.SummarizeRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }), timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, maxRetries: requestOptions?.maxRetries, }); if (_response.ok) { - return await serializers.DetectLanguageResponse.parseOrThrow(_response.body, { + return await serializers.SummarizeResponse.parseOrThrow(_response.body, { unrecognizedObjectKeys: "passthrough", allowUnrecognizedUnionMembers: true, allowUnrecognizedEnumValues: true, + skipValidation: true, breadcrumbsPrefix: ["response"], }); } @@ -499,43 +498,53 @@ export class CohereClient { } /** - * This endpoint generates a summary in English for a given text. + * This endpoint splits input text into smaller units called tokens using byte-pair encoding (BPE). To learn more about tokenization and byte pair encoding, see the tokens page. + * @throws {@link Cohere.BadRequestError} + * @throws {@link Cohere.InternalServerError} */ - public async summarize( - request: Cohere.SummarizeRequest, + public async tokenize( + request: Cohere.TokenizeRequest, requestOptions?: CohereClient.RequestOptions - ): Promise { + ): Promise { const _response = await core.fetcher({ url: urlJoin( (await core.Supplier.get(this._options.environment)) ?? environments.CohereEnvironment.Production, - "v1/summarize" + "v1/tokenize" ), method: "POST", headers: { Authorization: await this._getAuthorizationHeader(), "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "cohere-ai", - "X-Fern-SDK-Version": "7.2.0", + "X-Fern-SDK-Version": "7.3.0", }, contentType: "application/json", - body: await serializers.SummarizeRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }), + body: await serializers.TokenizeRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }), timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, maxRetries: requestOptions?.maxRetries, }); if (_response.ok) { - return await serializers.SummarizeResponse.parseOrThrow(_response.body, { + return await serializers.TokenizeResponse.parseOrThrow(_response.body, { unrecognizedObjectKeys: "passthrough", allowUnrecognizedUnionMembers: true, allowUnrecognizedEnumValues: true, + skipValidation: true, breadcrumbsPrefix: ["response"], }); } if (_response.error.reason === "status-code") { - throw new errors.CohereError({ - statusCode: _response.error.statusCode, - body: _response.error.body, - }); + switch (_response.error.statusCode) { + case 400: + throw new Cohere.BadRequestError(_response.error.body); + case 500: + throw new Cohere.InternalServerError(_response.error.body); + default: + throw new errors.CohereError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } } switch (_response.error.reason) { @@ -554,34 +563,35 @@ export class CohereClient { } /** - * This endpoint takes in a query and a list of texts and produces an ordered array with each text assigned a relevance score. + * This endpoint takes tokens using byte-pair encoding and returns their text representation. To learn more about tokenization and byte pair encoding, see the tokens page. */ - public async rerank( - request: Cohere.RerankRequest, + public async detokenize( + request: Cohere.DetokenizeRequest, requestOptions?: CohereClient.RequestOptions - ): Promise { + ): Promise { const _response = await core.fetcher({ url: urlJoin( (await core.Supplier.get(this._options.environment)) ?? environments.CohereEnvironment.Production, - "v1/rerank" + "v1/detokenize" ), method: "POST", headers: { Authorization: await this._getAuthorizationHeader(), "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "cohere-ai", - "X-Fern-SDK-Version": "7.2.0", + "X-Fern-SDK-Version": "7.3.0", }, contentType: "application/json", - body: await serializers.RerankRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }), + body: await serializers.DetokenizeRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }), timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, maxRetries: requestOptions?.maxRetries, }); if (_response.ok) { - return await serializers.RerankResponse.parseOrThrow(_response.body, { + return await serializers.DetokenizeResponse.parseOrThrow(_response.body, { unrecognizedObjectKeys: "passthrough", allowUnrecognizedUnionMembers: true, allowUnrecognizedEnumValues: true, + skipValidation: true, breadcrumbsPrefix: ["response"], }); } diff --git a/src/api/client/requests/GenerateRequest.ts b/src/api/client/requests/GenerateRequest.ts index c4d463b..69b732a 100644 --- a/src/api/client/requests/GenerateRequest.ts +++ b/src/api/client/requests/GenerateRequest.ts @@ -61,9 +61,9 @@ export interface GenerateRequest { * */ preset?: string; - /** The generated text will be cut at the beginning of the earliest occurence of an end sequence. The sequence will be excluded from the text. */ + /** The generated text will be cut at the beginning of the earliest occurrence of an end sequence. The sequence will be excluded from the text. */ endSequences?: string[]; - /** The generated text will be cut at the end of the earliest occurence of a stop sequence. The sequence will be included the text. */ + /** The generated text will be cut at the end of the earliest occurrence of a stop sequence. The sequence will be included the text. */ stopSequences?: string[]; /** * Ensures only the top `k` most likely tokens are considered for generation at each step. diff --git a/src/api/client/requests/index.ts b/src/api/client/requests/index.ts index 2b5b6eb..f881ade 100644 --- a/src/api/client/requests/index.ts +++ b/src/api/client/requests/index.ts @@ -1,10 +1,10 @@ +export { ChatStreamRequest } from "./ChatStreamRequest"; +export { ChatRequest } from "./ChatRequest"; export { GenerateRequest } from "./GenerateRequest"; export { EmbedRequest } from "./EmbedRequest"; +export { RerankRequest } from "./RerankRequest"; export { ClassifyRequest } from "./ClassifyRequest"; -export { ChatStreamRequest } from "./ChatStreamRequest"; -export { ChatRequest } from "./ChatRequest"; -export { TokenizeRequest } from "./TokenizeRequest"; -export { DetokenizeRequest } from "./DetokenizeRequest"; export { DetectLanguageRequest } from "./DetectLanguageRequest"; export { SummarizeRequest } from "./SummarizeRequest"; -export { RerankRequest } from "./RerankRequest"; +export { TokenizeRequest } from "./TokenizeRequest"; +export { DetokenizeRequest } from "./DetokenizeRequest"; diff --git a/src/api/types/index.ts b/src/api/types/index.ts index 54d6ef4..7dbcc58 100644 --- a/src/api/types/index.ts +++ b/src/api/types/index.ts @@ -1,19 +1,21 @@ +export * from "./ChatStreamRequestPromptTruncation"; +export * from "./ChatStreamRequestCitationQuality"; +export * from "./ChatRequestPromptTruncation"; +export * from "./ChatRequestCitationQuality"; export * from "./GenerateRequestTruncate"; export * from "./GenerateRequestReturnLikelihoods"; export * from "./EmbedRequestTruncate"; export * from "./EmbedResponse"; +export * from "./RerankRequestDocumentsItem"; +export * from "./RerankResponse"; +export * from "./RerankResponseResultsItem"; +export * from "./RerankResponseResultsItemDocument"; export * from "./ClassifyRequestExamplesItem"; export * from "./ClassifyRequestTruncate"; export * from "./ClassifyResponse"; export * from "./ClassifyResponseClassificationsItem"; export * from "./ClassifyResponseClassificationsItemLabelsValue"; export * from "./ClassifyResponseClassificationsItemClassificationType"; -export * from "./ChatStreamRequestPromptTruncation"; -export * from "./ChatStreamRequestCitationQuality"; -export * from "./ChatRequestPromptTruncation"; -export * from "./ChatRequestCitationQuality"; -export * from "./TokenizeResponse"; -export * from "./DetokenizeResponse"; export * from "./DetectLanguageResponse"; export * from "./DetectLanguageResponseResultsItem"; export * from "./SummarizeRequestLength"; @@ -21,22 +23,8 @@ export * from "./SummarizeRequestFormat"; export * from "./SummarizeRequestExtractiveness"; export * from "./SummarizeResponse"; export * from "./SummarizeResponseResultsItem"; -export * from "./RerankRequestDocumentsItem"; -export * from "./RerankResponse"; -export * from "./RerankResponseResultsItem"; -export * from "./RerankResponseResultsItemDocument"; -export * from "./SingleGeneration"; -export * from "./SingleGenerationTokenLikelihoodsItem"; -export * from "./ApiMeta"; -export * from "./ApiMetaApiVersion"; -export * from "./Generation"; -export * from "./GenerationStream"; -export * from "./FinishReason"; -export * from "./SingleGenerationInStream"; -export * from "./GenerationFinalResponse"; -export * from "./GenerationFinalResponseResponse"; -export * from "./StreamedGeneration"; -export * from "./StreamedGenerationItem"; +export * from "./TokenizeResponse"; +export * from "./DetokenizeResponse"; export * from "./ChatMessage"; export * from "./ChatMessageRole"; export * from "./ChatConnector"; @@ -56,3 +44,15 @@ export * from "./ChatStreamEndEvent"; export * from "./ChatStreamEndEventFinishReason"; export * from "./ChatStreamEndEventResponse"; export * from "./StreamedChatResponse"; +export * from "./SingleGeneration"; +export * from "./SingleGenerationTokenLikelihoodsItem"; +export * from "./ApiMeta"; +export * from "./ApiMetaApiVersion"; +export * from "./Generation"; +export * from "./GenerationStream"; +export * from "./FinishReason"; +export * from "./SingleGenerationInStream"; +export * from "./GenerationFinalResponse"; +export * from "./GenerationFinalResponseResponse"; +export * from "./StreamedGeneration"; +export * from "./StreamedGenerationItem"; diff --git a/src/serialization/client/requests/index.ts b/src/serialization/client/requests/index.ts index 2b5b6eb..f881ade 100644 --- a/src/serialization/client/requests/index.ts +++ b/src/serialization/client/requests/index.ts @@ -1,10 +1,10 @@ +export { ChatStreamRequest } from "./ChatStreamRequest"; +export { ChatRequest } from "./ChatRequest"; export { GenerateRequest } from "./GenerateRequest"; export { EmbedRequest } from "./EmbedRequest"; +export { RerankRequest } from "./RerankRequest"; export { ClassifyRequest } from "./ClassifyRequest"; -export { ChatStreamRequest } from "./ChatStreamRequest"; -export { ChatRequest } from "./ChatRequest"; -export { TokenizeRequest } from "./TokenizeRequest"; -export { DetokenizeRequest } from "./DetokenizeRequest"; export { DetectLanguageRequest } from "./DetectLanguageRequest"; export { SummarizeRequest } from "./SummarizeRequest"; -export { RerankRequest } from "./RerankRequest"; +export { TokenizeRequest } from "./TokenizeRequest"; +export { DetokenizeRequest } from "./DetokenizeRequest"; diff --git a/src/serialization/types/index.ts b/src/serialization/types/index.ts index 54d6ef4..7dbcc58 100644 --- a/src/serialization/types/index.ts +++ b/src/serialization/types/index.ts @@ -1,19 +1,21 @@ +export * from "./ChatStreamRequestPromptTruncation"; +export * from "./ChatStreamRequestCitationQuality"; +export * from "./ChatRequestPromptTruncation"; +export * from "./ChatRequestCitationQuality"; export * from "./GenerateRequestTruncate"; export * from "./GenerateRequestReturnLikelihoods"; export * from "./EmbedRequestTruncate"; export * from "./EmbedResponse"; +export * from "./RerankRequestDocumentsItem"; +export * from "./RerankResponse"; +export * from "./RerankResponseResultsItem"; +export * from "./RerankResponseResultsItemDocument"; export * from "./ClassifyRequestExamplesItem"; export * from "./ClassifyRequestTruncate"; export * from "./ClassifyResponse"; export * from "./ClassifyResponseClassificationsItem"; export * from "./ClassifyResponseClassificationsItemLabelsValue"; export * from "./ClassifyResponseClassificationsItemClassificationType"; -export * from "./ChatStreamRequestPromptTruncation"; -export * from "./ChatStreamRequestCitationQuality"; -export * from "./ChatRequestPromptTruncation"; -export * from "./ChatRequestCitationQuality"; -export * from "./TokenizeResponse"; -export * from "./DetokenizeResponse"; export * from "./DetectLanguageResponse"; export * from "./DetectLanguageResponseResultsItem"; export * from "./SummarizeRequestLength"; @@ -21,22 +23,8 @@ export * from "./SummarizeRequestFormat"; export * from "./SummarizeRequestExtractiveness"; export * from "./SummarizeResponse"; export * from "./SummarizeResponseResultsItem"; -export * from "./RerankRequestDocumentsItem"; -export * from "./RerankResponse"; -export * from "./RerankResponseResultsItem"; -export * from "./RerankResponseResultsItemDocument"; -export * from "./SingleGeneration"; -export * from "./SingleGenerationTokenLikelihoodsItem"; -export * from "./ApiMeta"; -export * from "./ApiMetaApiVersion"; -export * from "./Generation"; -export * from "./GenerationStream"; -export * from "./FinishReason"; -export * from "./SingleGenerationInStream"; -export * from "./GenerationFinalResponse"; -export * from "./GenerationFinalResponseResponse"; -export * from "./StreamedGeneration"; -export * from "./StreamedGenerationItem"; +export * from "./TokenizeResponse"; +export * from "./DetokenizeResponse"; export * from "./ChatMessage"; export * from "./ChatMessageRole"; export * from "./ChatConnector"; @@ -56,3 +44,15 @@ export * from "./ChatStreamEndEvent"; export * from "./ChatStreamEndEventFinishReason"; export * from "./ChatStreamEndEventResponse"; export * from "./StreamedChatResponse"; +export * from "./SingleGeneration"; +export * from "./SingleGenerationTokenLikelihoodsItem"; +export * from "./ApiMeta"; +export * from "./ApiMetaApiVersion"; +export * from "./Generation"; +export * from "./GenerationStream"; +export * from "./FinishReason"; +export * from "./SingleGenerationInStream"; +export * from "./GenerationFinalResponse"; +export * from "./GenerationFinalResponseResponse"; +export * from "./StreamedGeneration"; +export * from "./StreamedGenerationItem";