Skip to content

Commit

Permalink
Merge pull request #100 from cohere-ai/fern-bot/11-09-2023-0314PM
Browse files Browse the repository at this point in the history
🌿 Fern Regeneration -- November 9, 2023
  • Loading branch information
billytrend-cohere authored Nov 9, 2023
2 parents c071cb9 + ca73a47 commit f52a15e
Show file tree
Hide file tree
Showing 39 changed files with 547 additions and 179 deletions.
3 changes: 2 additions & 1 deletion .fernignore
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
# Specify files that shouldn't be modified by Fern
README.md
banner.png
banner.png
.npmignore
7 changes: 4 additions & 3 deletions package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "cohere-ai",
"version": "7.1.1",
"version": "7.2.0",
"private": false,
"repository": "https://github.com/cohere-ai/cohere-typescript",
"main": "./index.js",
Expand All @@ -14,12 +14,13 @@
"url-join": "4.0.1",
"@types/url-join": "4.0.1",
"axios": "0.27.2",
"@ungap/url-search-params": "0.2.2",
"qs": "6.11.2",
"@types/qs": "6.9.8",
"js-base64": "3.7.2"
},
"devDependencies": {
"@types/node": "17.0.33",
"prettier": "2.7.1",
"typescript": "4.6.4"
}
}
}
103 changes: 61 additions & 42 deletions src/Client.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ import * as Cohere from "./api";
import * as serializers from "./serialization";
import urlJoin from "url-join";
import * as errors from "./errors";
import { Stream } from "core/streaming-fetcher/StreamingFetcher";

export declare namespace CohereClient {
interface Options {
Expand All @@ -18,6 +17,7 @@ export declare namespace CohereClient {

interface RequestOptions {
timeoutInSeconds?: number;
maxRetries?: number;
}
}

Expand All @@ -43,11 +43,12 @@ export class CohereClient {
Authorization: await this._getAuthorizationHeader(),
"X-Fern-Language": "JavaScript",
"X-Fern-SDK-Name": "cohere-ai",
"X-Fern-SDK-Version": "7.0.0",
"X-Fern-SDK-Version": "7.2.0",
},
contentType: "application/json",
body: await serializers.GenerateRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }),
timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000,
maxRetries: requestOptions?.maxRetries,
});
if (_response.ok) {
return await serializers.Generation.parseOrThrow(_response.body, {
Expand Down Expand Up @@ -110,11 +111,12 @@ export class CohereClient {
Authorization: await this._getAuthorizationHeader(),
"X-Fern-Language": "JavaScript",
"X-Fern-SDK-Name": "cohere-ai",
"X-Fern-SDK-Version": "7.0.0",
"X-Fern-SDK-Version": "7.2.0",
},
contentType: "application/json",
body: await serializers.EmbedRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }),
timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000,
maxRetries: requestOptions?.maxRetries,
});
if (_response.ok) {
return await serializers.EmbedResponse.parseOrThrow(_response.body, {
Expand Down Expand Up @@ -174,11 +176,12 @@ export class CohereClient {
Authorization: await this._getAuthorizationHeader(),
"X-Fern-Language": "JavaScript",
"X-Fern-SDK-Name": "cohere-ai",
"X-Fern-SDK-Version": "7.0.0",
"X-Fern-SDK-Version": "7.2.0",
},
contentType: "application/json",
body: await serializers.ClassifyRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }),
timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000,
maxRetries: requestOptions?.maxRetries,
});
if (_response.ok) {
return await serializers.ClassifyResponse.parseOrThrow(_response.body, {
Expand Down Expand Up @@ -218,6 +221,48 @@ export class CohereClient {
}
}

/**
* The `chat` endpoint allows users to have conversations with a Large Language Model (LLM) from Cohere. Users can send messages as part of a persisted conversation using the `conversation_id` parameter, or they can pass in their own conversation history using the `chat_history` parameter.
* The endpoint features additional parameters such as `connectors` and `documents` that enable conversations enriched by external knowledge. We call this "Retrieval Augmented Generation", or "RAG".
* If you have questions or require support, we're here to help! Reach out to your Cohere partner to enable access to this API.
*
*/
public async chatStream(
request: Cohere.ChatStreamRequest,
requestOptions?: CohereClient.RequestOptions
): Promise<core.Stream<Cohere.StreamedChatResponse>> {
const _response = await core.streamingFetcher({
url: urlJoin(
(await core.Supplier.get(this._options.environment)) ?? environments.CohereEnvironment.Production,
"v1/chat"
),
method: "POST",
headers: {
Authorization: await this._getAuthorizationHeader(),
"X-Fern-Language": "JavaScript",
"X-Fern-SDK-Name": "cohere-ai",
"X-Fern-SDK-Version": "7.2.0",
},
body: {
...(await serializers.ChatStreamRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" })),
stream: true,
},
timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000,
});
return new core.Stream({
stream: _response.data,
terminator: "\n",
parse: async (data) => {
return await serializers.StreamedChatResponse.parseOrThrow(data, {
unrecognizedObjectKeys: "passthrough",
allowUnrecognizedUnionMembers: true,
allowUnrecognizedEnumValues: true,
breadcrumbsPrefix: ["response"],
});
},
});
}

/**
* The `chat` endpoint allows users to have conversations with a Large Language Model (LLM) from Cohere. Users can send messages as part of a persisted conversation using the `conversation_id` parameter, or they can pass in their own conversation history using the `chat_history` parameter.
* The endpoint features additional parameters such as `connectors` and `documents` that enable conversations enriched by external knowledge. We call this "Retrieval Augmented Generation", or "RAG".
Expand All @@ -238,14 +283,15 @@ export class CohereClient {
Authorization: await this._getAuthorizationHeader(),
"X-Fern-Language": "JavaScript",
"X-Fern-SDK-Name": "cohere-ai",
"X-Fern-SDK-Version": "7.0.0",
"X-Fern-SDK-Version": "7.2.0",
},
contentType: "application/json",
body: {
...(await serializers.ChatRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" })),
stream: false,
},
timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000,
maxRetries: requestOptions?.maxRetries,
});
if (_response.ok) {
return await serializers.NonStreamedChatResponse.parseOrThrow(_response.body, {
Expand Down Expand Up @@ -297,11 +343,12 @@ export class CohereClient {
Authorization: await this._getAuthorizationHeader(),
"X-Fern-Language": "JavaScript",
"X-Fern-SDK-Name": "cohere-ai",
"X-Fern-SDK-Version": "7.0.0",
"X-Fern-SDK-Version": "7.2.0",
},
contentType: "application/json",
body: await serializers.TokenizeRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }),
timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000,
maxRetries: requestOptions?.maxRetries,
});
if (_response.ok) {
return await serializers.TokenizeResponse.parseOrThrow(_response.body, {
Expand Down Expand Up @@ -358,11 +405,12 @@ export class CohereClient {
Authorization: await this._getAuthorizationHeader(),
"X-Fern-Language": "JavaScript",
"X-Fern-SDK-Name": "cohere-ai",
"X-Fern-SDK-Version": "7.0.0",
"X-Fern-SDK-Version": "7.2.0",
},
contentType: "application/json",
body: await serializers.DetokenizeRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }),
timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000,
maxRetries: requestOptions?.maxRetries,
});
if (_response.ok) {
return await serializers.DetokenizeResponse.parseOrThrow(_response.body, {
Expand Down Expand Up @@ -412,11 +460,12 @@ export class CohereClient {
Authorization: await this._getAuthorizationHeader(),
"X-Fern-Language": "JavaScript",
"X-Fern-SDK-Name": "cohere-ai",
"X-Fern-SDK-Version": "7.0.0",
"X-Fern-SDK-Version": "7.2.0",
},
contentType: "application/json",
body: await serializers.DetectLanguageRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }),
timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000,
maxRetries: requestOptions?.maxRetries,
});
if (_response.ok) {
return await serializers.DetectLanguageResponse.parseOrThrow(_response.body, {
Expand Down Expand Up @@ -466,11 +515,12 @@ export class CohereClient {
Authorization: await this._getAuthorizationHeader(),
"X-Fern-Language": "JavaScript",
"X-Fern-SDK-Name": "cohere-ai",
"X-Fern-SDK-Version": "7.0.0",
"X-Fern-SDK-Version": "7.2.0",
},
contentType: "application/json",
body: await serializers.SummarizeRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }),
timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000,
maxRetries: requestOptions?.maxRetries,
});
if (_response.ok) {
return await serializers.SummarizeResponse.parseOrThrow(_response.body, {
Expand Down Expand Up @@ -520,11 +570,12 @@ export class CohereClient {
Authorization: await this._getAuthorizationHeader(),
"X-Fern-Language": "JavaScript",
"X-Fern-SDK-Name": "cohere-ai",
"X-Fern-SDK-Version": "7.0.0",
"X-Fern-SDK-Version": "7.2.0",
},
contentType: "application/json",
body: await serializers.RerankRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }),
timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000,
maxRetries: requestOptions?.maxRetries,
});
if (_response.ok) {
return await serializers.RerankResponse.parseOrThrow(_response.body, {
Expand Down Expand Up @@ -557,38 +608,6 @@ export class CohereClient {
}
}

public async chatStream(
request: Cohere.ChatRequest,
requestOptions?: CohereClient.RequestOptions
): Promise<Stream<Cohere.StreamedChatResponse>> {
return await core.streamingFetcher({
url: urlJoin(
(await core.Supplier.get(this._options.environment)) ?? environments.CohereEnvironment.Production,
"v1/chat"
),
method: "POST",
headers: {
Authorization: await this._getAuthorizationHeader(),
"X-Fern-Language": "JavaScript",
"X-Fern-SDK-Name": "cohere-ai",
"X-Fern-SDK-Version": "7.0.0",
},
body: {
...(await serializers.ChatRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" })),
stream: true,
},
timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000,
parse: async (data) => {
return await serializers.StreamedChatResponse.parseOrThrow(data, {
unrecognizedObjectKeys: "passthrough",
allowUnrecognizedUnionMembers: true,
allowUnrecognizedEnumValues: true,
breadcrumbsPrefix: ["response"],
});
},
});
}

protected async _getAuthorizationHeader() {
return `Bearer ${await core.Supplier.get(this._options.token)}`;
}
Expand Down
9 changes: 1 addition & 8 deletions src/api/client/requests/ChatRequest.ts
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,6 @@ export interface ChatRequest {
*
*/
model?: string;
/**
* Defaults to `false`.
* When `true`, the response will be a JSON stream of events. The final event will contain the complete response, and will have an `event_type` of `"stream-end"`.
* Streaming is beneficial for user interfaces that render the contents of the response piece by piece, as it gets generated.
*
*/
stream?: boolean;
/**
* When specified, the default Cohere preamble will be replaced with the provided one.
*
Expand Down Expand Up @@ -62,7 +55,7 @@ export interface ChatRequest {
*/
searchQueriesOnly?: boolean;
/**
* A list of relevant documents that the model can use to enrich its reply (RAG).
* A list of relevant documents that the model can use to enrich its reply. See ['Document Mode'](https://docs.cohere.com/docs/retrieval-augmented-generation-rag#document-mode) in the guide for more information.
*
*/
documents?: Cohere.ChatDocument[];
Expand Down
74 changes: 74 additions & 0 deletions src/api/client/requests/ChatStreamRequest.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
/**
* This file was auto-generated by Fern from our API Definition.
*/

import * as Cohere from "../..";

export interface ChatStreamRequest {
/**
* Accepts a string.
* The chat message from the user to the model.
*
*/
message: string;
/**
* Defaults to `command`.
* The identifier of the model, which can be one of the existing Cohere models or the full ID for a [finetuned custom model](/docs/training-custom-models).
* Compatible Cohere models are `command` and `command-light` as well as the experimental `command-nightly` and `command-light-nightly` variants. Read more about [Cohere models](https://docs.cohere.com/docs/models).
*
*/
model?: string;
/**
* When specified, the default Cohere preamble will be replaced with the provided one.
*
*/
preambleOverride?: string;
/**
* A list of previous messages between the user and the model, meant to give the model conversational context for responding to the user's `message`.
*
*/
chatHistory?: Cohere.ChatMessage[];
/**
* An alternative to `chat_history`. Previous conversations can be resumed by providing the conversation's identifier. The contents of `message` and the model's response will be stored as part of this conversation.
* If a conversation with this id does not already exist, a new conversation will be created.
*
*/
conversationId?: string;
/**
* Defaults to `AUTO` when `connectors` are specified and `OFF` in all other cases.
* Dictates how the prompt will be constructed.
* With `prompt_truncation` set to "AUTO", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit.
* With `prompt_truncation` set to "OFF", no elements will be dropped. If the sum of the inputs exceeds the model's context length limit, a `TooManyTokens` error will be returned.
*
*/
promptTruncation?: Cohere.ChatStreamRequestPromptTruncation;
/**
* Currently only accepts `{"id": "web-search"}`.
* When specified, the model's reply will be enriched with information found by quering each of the connectors (RAG).
*
*/
connectors?: Cohere.ChatConnector[];
/**
* Defaults to `false`.
* When `true`, the response will only contain a list of generated search queries, but no search will take place, and no reply from the model to the user's `message` will be generated.
*
*/
searchQueriesOnly?: boolean;
/**
* A list of relevant documents that the model can use to enrich its reply. See ['Document Mode'](https://docs.cohere.com/docs/retrieval-augmented-generation-rag#document-mode) in the guide for more information.
*
*/
documents?: Cohere.ChatDocument[];
/**
* Defaults to `"accurate"`.
* Dictates the approach taken to generating citations as part of the RAG flow by allowing the user to specify whether they want `"accurate"` results or `"fast"` results.
*
*/
citationQuality?: Cohere.ChatStreamRequestCitationQuality;
/**
* Defaults to `0.3`
* A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations, and higher temperatures mean more random generations.
*
*/
temperature?: number;
}
2 changes: 2 additions & 0 deletions src/api/client/requests/DetectLanguageRequest.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,4 +5,6 @@
export interface DetectLanguageRequest {
/** List of strings to run the detection on. */
texts: string[];
/** The identifier of the model to generate with. */
model?: string;
}
Loading

0 comments on commit f52a15e

Please sign in to comment.