diff --git a/.fernignore b/.fernignore index 033d5aa..80c15f3 100644 --- a/.fernignore +++ b/.fernignore @@ -1,3 +1,4 @@ # Specify files that shouldn't be modified by Fern README.md -banner.png \ No newline at end of file +banner.png +.npmignore \ No newline at end of file diff --git a/package.json b/package.json index 4d18c56..ee1fb8e 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "cohere-ai", - "version": "7.1.1", + "version": "7.2.0", "private": false, "repository": "https://github.com/cohere-ai/cohere-typescript", "main": "./index.js", @@ -14,7 +14,8 @@ "url-join": "4.0.1", "@types/url-join": "4.0.1", "axios": "0.27.2", - "@ungap/url-search-params": "0.2.2", + "qs": "6.11.2", + "@types/qs": "6.9.8", "js-base64": "3.7.2" }, "devDependencies": { @@ -22,4 +23,4 @@ "prettier": "2.7.1", "typescript": "4.6.4" } -} +} \ No newline at end of file diff --git a/src/Client.ts b/src/Client.ts index 2aa605e..a377814 100644 --- a/src/Client.ts +++ b/src/Client.ts @@ -8,7 +8,6 @@ import * as Cohere from "./api"; import * as serializers from "./serialization"; import urlJoin from "url-join"; import * as errors from "./errors"; -import { Stream } from "core/streaming-fetcher/StreamingFetcher"; export declare namespace CohereClient { interface Options { @@ -18,6 +17,7 @@ export declare namespace CohereClient { interface RequestOptions { timeoutInSeconds?: number; + maxRetries?: number; } } @@ -43,11 +43,12 @@ export class CohereClient { Authorization: await this._getAuthorizationHeader(), "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "cohere-ai", - "X-Fern-SDK-Version": "7.0.0", + "X-Fern-SDK-Version": "7.2.0", }, contentType: "application/json", body: await serializers.GenerateRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }), timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, }); if (_response.ok) { return await serializers.Generation.parseOrThrow(_response.body, { @@ -110,11 +111,12 @@ export class CohereClient { Authorization: await this._getAuthorizationHeader(), "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "cohere-ai", - "X-Fern-SDK-Version": "7.0.0", + "X-Fern-SDK-Version": "7.2.0", }, contentType: "application/json", body: await serializers.EmbedRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }), timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, }); if (_response.ok) { return await serializers.EmbedResponse.parseOrThrow(_response.body, { @@ -174,11 +176,12 @@ export class CohereClient { Authorization: await this._getAuthorizationHeader(), "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "cohere-ai", - "X-Fern-SDK-Version": "7.0.0", + "X-Fern-SDK-Version": "7.2.0", }, contentType: "application/json", body: await serializers.ClassifyRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }), timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, }); if (_response.ok) { return await serializers.ClassifyResponse.parseOrThrow(_response.body, { @@ -218,6 +221,48 @@ export class CohereClient { } } + /** + * The `chat` endpoint allows users to have conversations with a Large Language Model (LLM) from Cohere. Users can send messages as part of a persisted conversation using the `conversation_id` parameter, or they can pass in their own conversation history using the `chat_history` parameter. + * The endpoint features additional parameters such as `connectors` and `documents` that enable conversations enriched by external knowledge. We call this "Retrieval Augmented Generation", or "RAG". + * If you have questions or require support, we're here to help! Reach out to your Cohere partner to enable access to this API. + * + */ + public async chatStream( + request: Cohere.ChatStreamRequest, + requestOptions?: CohereClient.RequestOptions + ): Promise> { + const _response = await core.streamingFetcher({ + url: urlJoin( + (await core.Supplier.get(this._options.environment)) ?? environments.CohereEnvironment.Production, + "v1/chat" + ), + method: "POST", + headers: { + Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", + "X-Fern-SDK-Name": "cohere-ai", + "X-Fern-SDK-Version": "7.2.0", + }, + body: { + ...(await serializers.ChatStreamRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" })), + stream: true, + }, + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + }); + return new core.Stream({ + stream: _response.data, + terminator: "\n", + parse: async (data) => { + return await serializers.StreamedChatResponse.parseOrThrow(data, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }); + }, + }); + } + /** * The `chat` endpoint allows users to have conversations with a Large Language Model (LLM) from Cohere. Users can send messages as part of a persisted conversation using the `conversation_id` parameter, or they can pass in their own conversation history using the `chat_history` parameter. * The endpoint features additional parameters such as `connectors` and `documents` that enable conversations enriched by external knowledge. We call this "Retrieval Augmented Generation", or "RAG". @@ -238,7 +283,7 @@ export class CohereClient { Authorization: await this._getAuthorizationHeader(), "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "cohere-ai", - "X-Fern-SDK-Version": "7.0.0", + "X-Fern-SDK-Version": "7.2.0", }, contentType: "application/json", body: { @@ -246,6 +291,7 @@ export class CohereClient { stream: false, }, timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, }); if (_response.ok) { return await serializers.NonStreamedChatResponse.parseOrThrow(_response.body, { @@ -297,11 +343,12 @@ export class CohereClient { Authorization: await this._getAuthorizationHeader(), "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "cohere-ai", - "X-Fern-SDK-Version": "7.0.0", + "X-Fern-SDK-Version": "7.2.0", }, contentType: "application/json", body: await serializers.TokenizeRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }), timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, }); if (_response.ok) { return await serializers.TokenizeResponse.parseOrThrow(_response.body, { @@ -358,11 +405,12 @@ export class CohereClient { Authorization: await this._getAuthorizationHeader(), "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "cohere-ai", - "X-Fern-SDK-Version": "7.0.0", + "X-Fern-SDK-Version": "7.2.0", }, contentType: "application/json", body: await serializers.DetokenizeRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }), timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, }); if (_response.ok) { return await serializers.DetokenizeResponse.parseOrThrow(_response.body, { @@ -412,11 +460,12 @@ export class CohereClient { Authorization: await this._getAuthorizationHeader(), "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "cohere-ai", - "X-Fern-SDK-Version": "7.0.0", + "X-Fern-SDK-Version": "7.2.0", }, contentType: "application/json", body: await serializers.DetectLanguageRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }), timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, }); if (_response.ok) { return await serializers.DetectLanguageResponse.parseOrThrow(_response.body, { @@ -466,11 +515,12 @@ export class CohereClient { Authorization: await this._getAuthorizationHeader(), "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "cohere-ai", - "X-Fern-SDK-Version": "7.0.0", + "X-Fern-SDK-Version": "7.2.0", }, contentType: "application/json", body: await serializers.SummarizeRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }), timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, }); if (_response.ok) { return await serializers.SummarizeResponse.parseOrThrow(_response.body, { @@ -520,11 +570,12 @@ export class CohereClient { Authorization: await this._getAuthorizationHeader(), "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "cohere-ai", - "X-Fern-SDK-Version": "7.0.0", + "X-Fern-SDK-Version": "7.2.0", }, contentType: "application/json", body: await serializers.RerankRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }), timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, }); if (_response.ok) { return await serializers.RerankResponse.parseOrThrow(_response.body, { @@ -557,38 +608,6 @@ export class CohereClient { } } - public async chatStream( - request: Cohere.ChatRequest, - requestOptions?: CohereClient.RequestOptions - ): Promise> { - return await core.streamingFetcher({ - url: urlJoin( - (await core.Supplier.get(this._options.environment)) ?? environments.CohereEnvironment.Production, - "v1/chat" - ), - method: "POST", - headers: { - Authorization: await this._getAuthorizationHeader(), - "X-Fern-Language": "JavaScript", - "X-Fern-SDK-Name": "cohere-ai", - "X-Fern-SDK-Version": "7.0.0", - }, - body: { - ...(await serializers.ChatRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" })), - stream: true, - }, - timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, - parse: async (data) => { - return await serializers.StreamedChatResponse.parseOrThrow(data, { - unrecognizedObjectKeys: "passthrough", - allowUnrecognizedUnionMembers: true, - allowUnrecognizedEnumValues: true, - breadcrumbsPrefix: ["response"], - }); - }, - }); - } - protected async _getAuthorizationHeader() { return `Bearer ${await core.Supplier.get(this._options.token)}`; } diff --git a/src/api/client/requests/ChatRequest.ts b/src/api/client/requests/ChatRequest.ts index d6d6973..6e073fa 100644 --- a/src/api/client/requests/ChatRequest.ts +++ b/src/api/client/requests/ChatRequest.ts @@ -18,13 +18,6 @@ export interface ChatRequest { * */ model?: string; - /** - * Defaults to `false`. - * When `true`, the response will be a JSON stream of events. The final event will contain the complete response, and will have an `event_type` of `"stream-end"`. - * Streaming is beneficial for user interfaces that render the contents of the response piece by piece, as it gets generated. - * - */ - stream?: boolean; /** * When specified, the default Cohere preamble will be replaced with the provided one. * @@ -62,7 +55,7 @@ export interface ChatRequest { */ searchQueriesOnly?: boolean; /** - * A list of relevant documents that the model can use to enrich its reply (RAG). + * A list of relevant documents that the model can use to enrich its reply. See ['Document Mode'](https://docs.cohere.com/docs/retrieval-augmented-generation-rag#document-mode) in the guide for more information. * */ documents?: Cohere.ChatDocument[]; diff --git a/src/api/client/requests/ChatStreamRequest.ts b/src/api/client/requests/ChatStreamRequest.ts new file mode 100644 index 0000000..4fb1a67 --- /dev/null +++ b/src/api/client/requests/ChatStreamRequest.ts @@ -0,0 +1,74 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Cohere from "../.."; + +export interface ChatStreamRequest { + /** + * Accepts a string. + * The chat message from the user to the model. + * + */ + message: string; + /** + * Defaults to `command`. + * The identifier of the model, which can be one of the existing Cohere models or the full ID for a [finetuned custom model](/docs/training-custom-models). + * Compatible Cohere models are `command` and `command-light` as well as the experimental `command-nightly` and `command-light-nightly` variants. Read more about [Cohere models](https://docs.cohere.com/docs/models). + * + */ + model?: string; + /** + * When specified, the default Cohere preamble will be replaced with the provided one. + * + */ + preambleOverride?: string; + /** + * A list of previous messages between the user and the model, meant to give the model conversational context for responding to the user's `message`. + * + */ + chatHistory?: Cohere.ChatMessage[]; + /** + * An alternative to `chat_history`. Previous conversations can be resumed by providing the conversation's identifier. The contents of `message` and the model's response will be stored as part of this conversation. + * If a conversation with this id does not already exist, a new conversation will be created. + * + */ + conversationId?: string; + /** + * Defaults to `AUTO` when `connectors` are specified and `OFF` in all other cases. + * Dictates how the prompt will be constructed. + * With `prompt_truncation` set to "AUTO", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. + * With `prompt_truncation` set to "OFF", no elements will be dropped. If the sum of the inputs exceeds the model's context length limit, a `TooManyTokens` error will be returned. + * + */ + promptTruncation?: Cohere.ChatStreamRequestPromptTruncation; + /** + * Currently only accepts `{"id": "web-search"}`. + * When specified, the model's reply will be enriched with information found by quering each of the connectors (RAG). + * + */ + connectors?: Cohere.ChatConnector[]; + /** + * Defaults to `false`. + * When `true`, the response will only contain a list of generated search queries, but no search will take place, and no reply from the model to the user's `message` will be generated. + * + */ + searchQueriesOnly?: boolean; + /** + * A list of relevant documents that the model can use to enrich its reply. See ['Document Mode'](https://docs.cohere.com/docs/retrieval-augmented-generation-rag#document-mode) in the guide for more information. + * + */ + documents?: Cohere.ChatDocument[]; + /** + * Defaults to `"accurate"`. + * Dictates the approach taken to generating citations as part of the RAG flow by allowing the user to specify whether they want `"accurate"` results or `"fast"` results. + * + */ + citationQuality?: Cohere.ChatStreamRequestCitationQuality; + /** + * Defaults to `0.3` + * A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations, and higher temperatures mean more random generations. + * + */ + temperature?: number; +} diff --git a/src/api/client/requests/DetectLanguageRequest.ts b/src/api/client/requests/DetectLanguageRequest.ts index 3ccaf55..3043030 100644 --- a/src/api/client/requests/DetectLanguageRequest.ts +++ b/src/api/client/requests/DetectLanguageRequest.ts @@ -5,4 +5,6 @@ export interface DetectLanguageRequest { /** List of strings to run the detection on. */ texts: string[]; + /** The identifier of the model to generate with. */ + model?: string; } diff --git a/src/api/client/requests/EmbedRequest.ts b/src/api/client/requests/EmbedRequest.ts index 1a60aa9..b13c188 100644 --- a/src/api/client/requests/EmbedRequest.ts +++ b/src/api/client/requests/EmbedRequest.ts @@ -8,14 +8,31 @@ export interface EmbedRequest { /** An array of strings for the model to embed. Maximum number of texts per call is `96`. We recommend reducing the length of each text to be under `512` tokens for optimal quality. */ texts: string[]; /** + * Defaults to embed-english-v2.0 + * * The identifier of the model. Smaller "light" models are faster, while larger models will perform better. [Custom models](/docs/training-custom-models) can also be supplied with their full ID. * * Available models and corresponding embedding dimensions: - * * `embed-english-v2.0` (default) 4096 - * * `embed-english-light-v2.0` 1024 - * * `embed-multilingual-v2.0` 768 + * + * * `embed-english-v3.0` 1024 + * * `embed-multilingual-v3.0` 1024 + * * `embed-english-light-v3.0` 384 + * * `embed-multilingual-light-v3.0` 384 + * + * * `embed-english-v2.0` 4096 + * * `embed-english-light-v2.0` 1024 + * * `embed-multilingual-v2.0` 768 */ model?: string; + /** + * Specifies the type of input you're giving to the model. Not required for older versions of the embedding models (i.e. anything lower than v3), but is required for more recent versions (i.e. anything bigger than v2). + * + * * `"search_document"`: Use this when you encode documents for embeddings that you store in a vector database for search use-cases. + * * `"search_query"`: Use this when you query your vector DB to find relevant documents. + * * `"classification"`: Use this when you use the embeddings as an input to a text classifier. + * * `"clustering"`: Use this when you want to cluster the embeddings. + */ + inputType?: string; /** * One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length. * diff --git a/src/api/client/requests/GenerateRequest.ts b/src/api/client/requests/GenerateRequest.ts index cef54a5..c4d463b 100644 --- a/src/api/client/requests/GenerateRequest.ts +++ b/src/api/client/requests/GenerateRequest.ts @@ -34,7 +34,8 @@ export interface GenerateRequest { stream?: boolean; /** * The maximum number of tokens the model will generate as part of the response. Note: Setting a low value may result in incomplete generations. - * Defaults to `20`. See [BPE Tokens](/bpe-tokens-wiki) for more details. + * + * This parameter is off by default, and if it's not specified, the model will continue generating until it emits an EOS completion token. See [BPE Tokens](/bpe-tokens-wiki) for more details. * * Can only be set to `0` if `return_likelihoods` is set to `ALL` to get the likelihood of the prompt. * diff --git a/src/api/client/requests/SummarizeRequest.ts b/src/api/client/requests/SummarizeRequest.ts index 8d4ea13..73bf791 100644 --- a/src/api/client/requests/SummarizeRequest.ts +++ b/src/api/client/requests/SummarizeRequest.ts @@ -12,7 +12,7 @@ export interface SummarizeRequest { /** One of `paragraph`, `bullets`, or `auto`, defaults to `auto`. Indicates the style in which the summary will be delivered - in a free form paragraph or in bullet points. If `auto` is selected, the best option will be picked based on the input text. */ format?: Cohere.SummarizeRequestFormat; /** The identifier of the model to generate the summary with. Currently available models are `command` (default), `command-nightly` (experimental), `command-light`, and `command-light-nightly` (experimental). Smaller, "light" models are faster, while larger models will perform better. */ - model?: Cohere.SummarizeRequestModel; + model?: string; /** One of `low`, `medium`, `high`, or `auto`, defaults to `auto`. Controls how close to the original text the summary is. `high` extractiveness summaries will lean towards reusing sentences verbatim, while `low` extractiveness summaries will tend to paraphrase more. If `auto` is selected, the best option will be picked based on the input text. */ extractiveness?: Cohere.SummarizeRequestExtractiveness; /** Ranges from 0 to 5. Controls the randomness of the output. Lower values tend to generate more “predictable” output, while higher values tend to generate more “creative” output. The sweet spot is typically between 0 and 1. */ diff --git a/src/api/client/requests/index.ts b/src/api/client/requests/index.ts index dc6691f..2b5b6eb 100644 --- a/src/api/client/requests/index.ts +++ b/src/api/client/requests/index.ts @@ -1,6 +1,7 @@ export { GenerateRequest } from "./GenerateRequest"; export { EmbedRequest } from "./EmbedRequest"; export { ClassifyRequest } from "./ClassifyRequest"; +export { ChatStreamRequest } from "./ChatStreamRequest"; export { ChatRequest } from "./ChatRequest"; export { TokenizeRequest } from "./TokenizeRequest"; export { DetokenizeRequest } from "./DetokenizeRequest"; diff --git a/src/api/errors/BadRequestError.ts b/src/api/errors/BadRequestError.ts index d53bf5f..df0a942 100644 --- a/src/api/errors/BadRequestError.ts +++ b/src/api/errors/BadRequestError.ts @@ -7,6 +7,7 @@ import * as errors from "../../errors"; export class BadRequestError extends errors.CohereError { constructor(body?: unknown) { super({ + message: "BadRequestError", statusCode: 400, body: body, }); diff --git a/src/api/errors/InternalServerError.ts b/src/api/errors/InternalServerError.ts index e52c7fc..631bcf0 100644 --- a/src/api/errors/InternalServerError.ts +++ b/src/api/errors/InternalServerError.ts @@ -7,6 +7,7 @@ import * as errors from "../../errors"; export class InternalServerError extends errors.CohereError { constructor(body?: unknown) { super({ + message: "InternalServerError", statusCode: 500, body: body, }); diff --git a/src/api/types/ChatResponse.ts b/src/api/types/ChatResponse.ts deleted file mode 100644 index dd590fb..0000000 --- a/src/api/types/ChatResponse.ts +++ /dev/null @@ -1,10 +0,0 @@ -/** - * This file was auto-generated by Fern from our API Definition. - */ - -import * as Cohere from ".."; - -export type ChatResponse = - | Cohere.NonStreamedChatResponse - | Cohere.StreamedChatResponse - | Cohere.SearchQueriesOnlyResponse; diff --git a/src/api/types/ChatStreamRequestCitationQuality.ts b/src/api/types/ChatStreamRequestCitationQuality.ts new file mode 100644 index 0000000..0da1a17 --- /dev/null +++ b/src/api/types/ChatStreamRequestCitationQuality.ts @@ -0,0 +1,15 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * Defaults to `"accurate"`. + * Dictates the approach taken to generating citations as part of the RAG flow by allowing the user to specify whether they want `"accurate"` results or `"fast"` results. + * + */ +export type ChatStreamRequestCitationQuality = "fast" | "accurate"; + +export const ChatStreamRequestCitationQuality = { + Fast: "fast", + Accurate: "accurate", +} as const; diff --git a/src/api/types/ChatStreamRequestPromptTruncation.ts b/src/api/types/ChatStreamRequestPromptTruncation.ts new file mode 100644 index 0000000..432b2fc --- /dev/null +++ b/src/api/types/ChatStreamRequestPromptTruncation.ts @@ -0,0 +1,17 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * Defaults to `AUTO` when `connectors` are specified and `OFF` in all other cases. + * Dictates how the prompt will be constructed. + * With `prompt_truncation` set to "AUTO", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. + * With `prompt_truncation` set to "OFF", no elements will be dropped. If the sum of the inputs exceeds the model's context length limit, a `TooManyTokens` error will be returned. + * + */ +export type ChatStreamRequestPromptTruncation = "OFF" | "AUTO"; + +export const ChatStreamRequestPromptTruncation = { + Off: "OFF", + Auto: "AUTO", +} as const; diff --git a/src/api/types/SummarizeRequestModel.ts b/src/api/types/SummarizeRequestModel.ts deleted file mode 100644 index 285db94..0000000 --- a/src/api/types/SummarizeRequestModel.ts +++ /dev/null @@ -1,13 +0,0 @@ -/** - * This file was auto-generated by Fern from our API Definition. - */ - -/** - * The identifier of the model to generate the summary with. Currently available models are `command` (default), `command-nightly` (experimental), `command-light`, and `command-light-nightly` (experimental). Smaller, "light" models are faster, while larger models will perform better. - */ -export type SummarizeRequestModel = "command" | "command-light"; - -export const SummarizeRequestModel = { - Command: "command", - CommandLight: "command-light", -} as const; diff --git a/src/api/types/index.ts b/src/api/types/index.ts index 7eb610f..54d6ef4 100644 --- a/src/api/types/index.ts +++ b/src/api/types/index.ts @@ -8,16 +8,16 @@ export * from "./ClassifyResponse"; export * from "./ClassifyResponseClassificationsItem"; export * from "./ClassifyResponseClassificationsItemLabelsValue"; export * from "./ClassifyResponseClassificationsItemClassificationType"; +export * from "./ChatStreamRequestPromptTruncation"; +export * from "./ChatStreamRequestCitationQuality"; export * from "./ChatRequestPromptTruncation"; export * from "./ChatRequestCitationQuality"; -export * from "./ChatResponse"; export * from "./TokenizeResponse"; export * from "./DetokenizeResponse"; export * from "./DetectLanguageResponse"; export * from "./DetectLanguageResponseResultsItem"; export * from "./SummarizeRequestLength"; export * from "./SummarizeRequestFormat"; -export * from "./SummarizeRequestModel"; export * from "./SummarizeRequestExtractiveness"; export * from "./SummarizeResponse"; export * from "./SummarizeResponseResultsItem"; diff --git a/src/core/fetcher/Fetcher.ts b/src/core/fetcher/Fetcher.ts index 6af0fb1..96c7004 100644 --- a/src/core/fetcher/Fetcher.ts +++ b/src/core/fetcher/Fetcher.ts @@ -1,5 +1,5 @@ -import { default as URLSearchParams } from "@ungap/url-search-params"; -import axios, { AxiosAdapter, AxiosError } from "axios"; +import axios, { AxiosAdapter, AxiosError, AxiosResponse } from "axios"; +import qs from "qs"; import { APIResponse } from "./APIResponse"; export type FetchFunction = (args: Fetcher.Args) => Promise>; @@ -10,9 +10,10 @@ export declare namespace Fetcher { method: string; contentType?: string; headers?: Record; - queryParameters?: URLSearchParams; + queryParameters?: Record; body?: unknown; timeoutMs?: number; + maxRetries?: number; withCredentials?: boolean; responseType?: "json" | "blob"; adapter?: AxiosAdapter; @@ -43,6 +44,10 @@ export declare namespace Fetcher { } } +const INITIAL_RETRY_DELAY = 1; +const MAX_RETRY_DELAY = 60; +const DEFAULT_MAX_RETRIES = 2; + async function fetcherImpl(args: Fetcher.Args): Promise> { const headers: Record = {}; if (args.body !== undefined && args.contentType != null) { @@ -57,10 +62,13 @@ async function fetcherImpl(args: Fetcher.Args): Promise => + await axios({ url: args.url, params: args.queryParameters, + paramsSerializer: (params) => { + return qs.stringify(params, { arrayFormat: "repeat" }); + }, method: args.method, headers, data: args.body, @@ -78,6 +86,23 @@ async function fetcherImpl(args: Fetcher.Args): Promise= 500 + ) { + const delay = Math.min(INITIAL_RETRY_DELAY * Math.pow(i, 2), MAX_RETRY_DELAY); + response = await new Promise((resolve) => setTimeout(resolve, delay)); + } else { + break; + } + } + let body: unknown; if (args.responseType === "blob") { body = response.data; diff --git a/src/core/index.ts b/src/core/index.ts index ecd8e8e..e36b05e 100644 --- a/src/core/index.ts +++ b/src/core/index.ts @@ -1,4 +1,4 @@ export * from "./fetcher"; -export * from "./streaming-fetcher"; export * from "./auth"; +export * from "./streaming-fetcher"; export * as serialization from "./schemas"; diff --git a/src/core/schemas/Schema.ts b/src/core/schemas/Schema.ts index 3211fa4..870f373 100644 --- a/src/core/schemas/Schema.ts +++ b/src/core/schemas/Schema.ts @@ -17,6 +17,7 @@ export const SchemaType = { ENUM: "enum", LIST: "list", STRING_LITERAL: "stringLiteral", + BOOLEAN_LITERAL: "booleanLiteral", OBJECT: "object", ANY: "any", BOOLEAN: "boolean", diff --git a/src/core/schemas/builders/literals/booleanLiteral.ts b/src/core/schemas/builders/literals/booleanLiteral.ts new file mode 100644 index 0000000..a83d22c --- /dev/null +++ b/src/core/schemas/builders/literals/booleanLiteral.ts @@ -0,0 +1,29 @@ +import { Schema, SchemaType } from "../../Schema"; +import { createIdentitySchemaCreator } from "../../utils/createIdentitySchemaCreator"; +import { getErrorMessageForIncorrectType } from "../../utils/getErrorMessageForIncorrectType"; + +export function booleanLiteral(literal: V): Schema { + const schemaCreator = createIdentitySchemaCreator( + SchemaType.BOOLEAN_LITERAL, + (value, { breadcrumbsPrefix = [] } = {}) => { + if (value === literal) { + return { + ok: true, + value: literal, + }; + } else { + return { + ok: false, + errors: [ + { + path: breadcrumbsPrefix, + message: getErrorMessageForIncorrectType(value, `${literal.toString()}`), + }, + ], + }; + } + } + ); + + return schemaCreator(); +} diff --git a/src/core/schemas/builders/literals/index.ts b/src/core/schemas/builders/literals/index.ts index a4cd05c..d2bf08f 100644 --- a/src/core/schemas/builders/literals/index.ts +++ b/src/core/schemas/builders/literals/index.ts @@ -1 +1,2 @@ export { stringLiteral } from "./stringLiteral"; +export { booleanLiteral } from "./booleanLiteral"; diff --git a/src/core/schemas/builders/object/index.ts b/src/core/schemas/builders/object/index.ts index e6db5b5..e3f4388 100644 --- a/src/core/schemas/builders/object/index.ts +++ b/src/core/schemas/builders/object/index.ts @@ -1,4 +1,9 @@ export { getObjectUtils, object } from "./object"; +export { objectWithoutOptionalProperties } from "./objectWithoutOptionalProperties"; +export type { + inferObjectWithoutOptionalPropertiesSchemaFromPropertySchemas, + inferParsedObjectWithoutOptionalPropertiesFromPropertySchemas, +} from "./objectWithoutOptionalProperties"; export { isProperty, property } from "./property"; export type { Property } from "./property"; export type { diff --git a/src/core/schemas/builders/object/objectWithoutOptionalProperties.ts b/src/core/schemas/builders/object/objectWithoutOptionalProperties.ts new file mode 100644 index 0000000..a0951f4 --- /dev/null +++ b/src/core/schemas/builders/object/objectWithoutOptionalProperties.ts @@ -0,0 +1,18 @@ +import { object } from "./object"; +import { inferParsedPropertySchema, inferRawObjectFromPropertySchemas, ObjectSchema, PropertySchemas } from "./types"; + +export function objectWithoutOptionalProperties>( + schemas: T +): inferObjectWithoutOptionalPropertiesSchemaFromPropertySchemas { + return object(schemas) as unknown as inferObjectWithoutOptionalPropertiesSchemaFromPropertySchemas; +} + +export type inferObjectWithoutOptionalPropertiesSchemaFromPropertySchemas> = + ObjectSchema< + inferRawObjectFromPropertySchemas, + inferParsedObjectWithoutOptionalPropertiesFromPropertySchemas + >; + +export type inferParsedObjectWithoutOptionalPropertiesFromPropertySchemas> = { + [K in keyof T]: inferParsedPropertySchema; +}; diff --git a/src/core/streaming-fetcher/Stream.ts b/src/core/streaming-fetcher/Stream.ts new file mode 100644 index 0000000..216be48 --- /dev/null +++ b/src/core/streaming-fetcher/Stream.ts @@ -0,0 +1,43 @@ +import { Readable } from "stream"; + +export class Stream implements AsyncIterable { + private stream: Readable; + private parse: (val: unknown) => Promise; + private terminator: string; + + constructor({ + stream, + parse, + terminator, + }: { + stream: Readable; + parse: (val: unknown) => Promise; + terminator: string; + }) { + this.stream = stream; + this.parse = parse; + this.terminator = terminator; + } + + private async *iterMessages(): AsyncGenerator { + let previous = ""; + for await (const chunk of this.stream) { + const bufferChunk = Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk); + previous += bufferChunk; + let terminatorIndex: number; + + while ((terminatorIndex = previous.indexOf(this.terminator)) >= 0) { + const line = previous.slice(0, terminatorIndex).trimEnd(); + const message = await this.parse(JSON.parse(line)); + yield message; + previous = previous.slice(terminatorIndex + 1); + } + } + } + + async *[Symbol.asyncIterator](): AsyncIterator { + for await (const message of this.iterMessages()) { + yield message; + } + } +} diff --git a/src/core/streaming-fetcher/StreamingFetcher.ts b/src/core/streaming-fetcher/StreamingFetcher.ts index 9a6abdc..d08f9fc 100644 --- a/src/core/streaming-fetcher/StreamingFetcher.ts +++ b/src/core/streaming-fetcher/StreamingFetcher.ts @@ -1,22 +1,23 @@ -import { default as URLSearchParams } from "@ungap/url-search-params"; -import axios, { AxiosAdapter, AxiosResponse } from "axios"; +import axios, { AxiosAdapter } from "axios"; +import qs from "qs"; import { Readable } from "stream"; -export type StreamingFetchFunction = (args: StreamingFetcher.Args) => Promise>; +export type StreamingFetchFunction = (args: StreamingFetcher.Args) => Promise; export declare namespace StreamingFetcher { - export interface Args { + export interface Args { url: string; method: string; headers?: Record; - queryParameters?: URLSearchParams; + queryParameters?: Record; body?: unknown; timeoutMs?: number; withCredentials?: boolean; adapter?: AxiosAdapter; - parse: (val: unknown) => Promise; + onUploadProgress?: (event: ProgressEvent) => void; + onDownloadProgress?: (event: ProgressEvent) => void; - terminator?: string; + abortController?: AbortController; } export interface Response { @@ -25,7 +26,7 @@ export declare namespace StreamingFetcher { } } -export async function streamingFetcher(args: StreamingFetcher.Args): Promise> { +export const streamingFetcher: StreamingFetchFunction = async (args) => { const headers: Record = {}; if (args.body !== undefined) { headers["Content-Type"] = "application/json"; @@ -41,6 +42,9 @@ export async function streamingFetcher(args: StreamingFetcher.Args): Promi const response = await axios({ url: args.url, params: args.queryParameters, + paramsSerializer: (params) => { + return qs.stringify(params, { arrayFormat: "repeat" }); + }, method: args.method, headers, data: args.body, @@ -51,40 +55,15 @@ export async function streamingFetcher(args: StreamingFetcher.Args): Promi withCredentials: args.withCredentials, maxBodyLength: Infinity, maxContentLength: Infinity, + onUploadProgress: args.onUploadProgress, + onDownloadProgress: args.onDownloadProgress, + signal: args.abortController?.signal, responseType: "stream", adapter: args.adapter, }); - return new Stream(response, args.parse); -} - -export class Stream implements AsyncIterable { - private response: AxiosResponse; - private parse: (val: unknown) => Promise; - - constructor(response: AxiosResponse, parse: (val: unknown) => Promise) { - this.response = response; - this.parse = parse; - } - - private async *iterMessages(): AsyncGenerator { - let previous = ""; - for await (const chunk of this.response.data) { - const bufferChunk = Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk); - previous += bufferChunk; - let eolIndex: number; - while ((eolIndex = previous.indexOf("\n")) >= 0) { - const line = previous.slice(0, eolIndex).trimEnd(); - const message = await this.parse(JSON.parse(line)); - yield message; - previous = previous.slice(eolIndex + 1); - } - } - } - - async *[Symbol.asyncIterator](): AsyncIterator { - for await (const message of this.iterMessages()) { - yield message; - } - } -} + return { + data: response.data, + headers: response.headers, + }; +}; diff --git a/src/core/streaming-fetcher/index.ts b/src/core/streaming-fetcher/index.ts index da8e465..b4188da 100644 --- a/src/core/streaming-fetcher/index.ts +++ b/src/core/streaming-fetcher/index.ts @@ -1,3 +1,4 @@ export { getHeader } from "./getHeader"; +export { Stream } from "./Stream"; export { streamingFetcher } from "./StreamingFetcher"; export type { StreamingFetcher, StreamingFetchFunction } from "./StreamingFetcher"; diff --git a/src/serialization/client/requests/ChatRequest.ts b/src/serialization/client/requests/ChatRequest.ts index 2438697..797a2f0 100644 --- a/src/serialization/client/requests/ChatRequest.ts +++ b/src/serialization/client/requests/ChatRequest.ts @@ -10,7 +10,6 @@ export const ChatRequest: core.serialization.Schema = + core.serialization.object({ + message: core.serialization.string(), + model: core.serialization.string().optional(), + preambleOverride: core.serialization.property("preamble_override", core.serialization.string().optional()), + chatHistory: core.serialization.property( + "chat_history", + core.serialization + .list(core.serialization.lazyObject(async () => (await import("../..")).ChatMessage)) + .optional() + ), + conversationId: core.serialization.property("conversation_id", core.serialization.string().optional()), + promptTruncation: core.serialization.property( + "prompt_truncation", + core.serialization.lazy(async () => (await import("../..")).ChatStreamRequestPromptTruncation).optional() + ), + connectors: core.serialization + .list(core.serialization.lazyObject(async () => (await import("../..")).ChatConnector)) + .optional(), + searchQueriesOnly: core.serialization.property("search_queries_only", core.serialization.boolean().optional()), + documents: core.serialization + .list(core.serialization.lazyObject(async () => (await import("../..")).ChatDocument)) + .optional(), + citationQuality: core.serialization.property( + "citation_quality", + core.serialization.lazy(async () => (await import("../..")).ChatStreamRequestCitationQuality).optional() + ), + temperature: core.serialization.number().optional(), + }); + +export declare namespace ChatStreamRequest { + interface Raw { + message: string; + model?: string | null; + preamble_override?: string | null; + chat_history?: serializers.ChatMessage.Raw[] | null; + conversation_id?: string | null; + prompt_truncation?: serializers.ChatStreamRequestPromptTruncation.Raw | null; + connectors?: serializers.ChatConnector.Raw[] | null; + search_queries_only?: boolean | null; + documents?: serializers.ChatDocument.Raw[] | null; + citation_quality?: serializers.ChatStreamRequestCitationQuality.Raw | null; + temperature?: number | null; + } +} diff --git a/src/serialization/client/requests/DetectLanguageRequest.ts b/src/serialization/client/requests/DetectLanguageRequest.ts index 1c3c280..0d7dd13 100644 --- a/src/serialization/client/requests/DetectLanguageRequest.ts +++ b/src/serialization/client/requests/DetectLanguageRequest.ts @@ -11,10 +11,12 @@ export const DetectLanguageRequest: core.serialization.Schema< Cohere.DetectLanguageRequest > = core.serialization.object({ texts: core.serialization.list(core.serialization.string()), + model: core.serialization.string().optional(), }); export declare namespace DetectLanguageRequest { interface Raw { texts: string[]; + model?: string | null; } } diff --git a/src/serialization/client/requests/EmbedRequest.ts b/src/serialization/client/requests/EmbedRequest.ts index 38e6a0b..65a7144 100644 --- a/src/serialization/client/requests/EmbedRequest.ts +++ b/src/serialization/client/requests/EmbedRequest.ts @@ -10,6 +10,7 @@ export const EmbedRequest: core.serialization.Schema (await import("../..")).EmbedRequestTruncate).optional(), }); @@ -17,6 +18,7 @@ export declare namespace EmbedRequest { interface Raw { texts: string[]; model?: string | null; + input_type?: string | null; truncate?: serializers.EmbedRequestTruncate.Raw | null; } } diff --git a/src/serialization/client/requests/SummarizeRequest.ts b/src/serialization/client/requests/SummarizeRequest.ts index f73f63f..0f28798 100644 --- a/src/serialization/client/requests/SummarizeRequest.ts +++ b/src/serialization/client/requests/SummarizeRequest.ts @@ -11,7 +11,7 @@ export const SummarizeRequest: core.serialization.Schema (await import("../..")).SummarizeRequestLength).optional(), format: core.serialization.lazy(async () => (await import("../..")).SummarizeRequestFormat).optional(), - model: core.serialization.lazy(async () => (await import("../..")).SummarizeRequestModel).optional(), + model: core.serialization.string().optional(), extractiveness: core.serialization .lazy(async () => (await import("../..")).SummarizeRequestExtractiveness) .optional(), @@ -24,7 +24,7 @@ export declare namespace SummarizeRequest { text: string; length?: serializers.SummarizeRequestLength.Raw | null; format?: serializers.SummarizeRequestFormat.Raw | null; - model?: serializers.SummarizeRequestModel.Raw | null; + model?: string | null; extractiveness?: serializers.SummarizeRequestExtractiveness.Raw | null; temperature?: number | null; additional_command?: string | null; diff --git a/src/serialization/client/requests/index.ts b/src/serialization/client/requests/index.ts index dc6691f..2b5b6eb 100644 --- a/src/serialization/client/requests/index.ts +++ b/src/serialization/client/requests/index.ts @@ -1,6 +1,7 @@ export { GenerateRequest } from "./GenerateRequest"; export { EmbedRequest } from "./EmbedRequest"; export { ClassifyRequest } from "./ClassifyRequest"; +export { ChatStreamRequest } from "./ChatStreamRequest"; export { ChatRequest } from "./ChatRequest"; export { TokenizeRequest } from "./TokenizeRequest"; export { DetokenizeRequest } from "./DetokenizeRequest"; diff --git a/src/serialization/types/ChatResponse.ts b/src/serialization/types/ChatResponse.ts deleted file mode 100644 index 689f647..0000000 --- a/src/serialization/types/ChatResponse.ts +++ /dev/null @@ -1,21 +0,0 @@ -/** - * This file was auto-generated by Fern from our API Definition. - */ - -import * as serializers from ".."; -import * as Cohere from "../../api"; -import * as core from "../../core"; - -export const ChatResponse: core.serialization.Schema = - core.serialization.undiscriminatedUnion([ - core.serialization.lazyObject(async () => (await import("..")).NonStreamedChatResponse), - core.serialization.lazy(async () => (await import("..")).StreamedChatResponse), - core.serialization.lazyObject(async () => (await import("..")).SearchQueriesOnlyResponse), - ]); - -export declare namespace ChatResponse { - type Raw = - | serializers.NonStreamedChatResponse.Raw - | serializers.StreamedChatResponse.Raw - | serializers.SearchQueriesOnlyResponse.Raw; -} diff --git a/src/serialization/types/ChatStreamRequestCitationQuality.ts b/src/serialization/types/ChatStreamRequestCitationQuality.ts new file mode 100644 index 0000000..10cc647 --- /dev/null +++ b/src/serialization/types/ChatStreamRequestCitationQuality.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from ".."; +import * as Cohere from "../../api"; +import * as core from "../../core"; + +export const ChatStreamRequestCitationQuality: core.serialization.Schema< + serializers.ChatStreamRequestCitationQuality.Raw, + Cohere.ChatStreamRequestCitationQuality +> = core.serialization.enum_(["fast", "accurate"]); + +export declare namespace ChatStreamRequestCitationQuality { + type Raw = "fast" | "accurate"; +} diff --git a/src/serialization/types/ChatStreamRequestPromptTruncation.ts b/src/serialization/types/ChatStreamRequestPromptTruncation.ts new file mode 100644 index 0000000..062af07 --- /dev/null +++ b/src/serialization/types/ChatStreamRequestPromptTruncation.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from ".."; +import * as Cohere from "../../api"; +import * as core from "../../core"; + +export const ChatStreamRequestPromptTruncation: core.serialization.Schema< + serializers.ChatStreamRequestPromptTruncation.Raw, + Cohere.ChatStreamRequestPromptTruncation +> = core.serialization.enum_(["OFF", "AUTO"]); + +export declare namespace ChatStreamRequestPromptTruncation { + type Raw = "OFF" | "AUTO"; +} diff --git a/src/serialization/types/SummarizeRequestModel.ts b/src/serialization/types/SummarizeRequestModel.ts deleted file mode 100644 index 35d25ec..0000000 --- a/src/serialization/types/SummarizeRequestModel.ts +++ /dev/null @@ -1,16 +0,0 @@ -/** - * This file was auto-generated by Fern from our API Definition. - */ - -import * as serializers from ".."; -import * as Cohere from "../../api"; -import * as core from "../../core"; - -export const SummarizeRequestModel: core.serialization.Schema< - serializers.SummarizeRequestModel.Raw, - Cohere.SummarizeRequestModel -> = core.serialization.enum_(["command", "command-light"]); - -export declare namespace SummarizeRequestModel { - type Raw = "command" | "command-light"; -} diff --git a/src/serialization/types/index.ts b/src/serialization/types/index.ts index 7eb610f..54d6ef4 100644 --- a/src/serialization/types/index.ts +++ b/src/serialization/types/index.ts @@ -8,16 +8,16 @@ export * from "./ClassifyResponse"; export * from "./ClassifyResponseClassificationsItem"; export * from "./ClassifyResponseClassificationsItemLabelsValue"; export * from "./ClassifyResponseClassificationsItemClassificationType"; +export * from "./ChatStreamRequestPromptTruncation"; +export * from "./ChatStreamRequestCitationQuality"; export * from "./ChatRequestPromptTruncation"; export * from "./ChatRequestCitationQuality"; -export * from "./ChatResponse"; export * from "./TokenizeResponse"; export * from "./DetokenizeResponse"; export * from "./DetectLanguageResponse"; export * from "./DetectLanguageResponseResultsItem"; export * from "./SummarizeRequestLength"; export * from "./SummarizeRequestFormat"; -export * from "./SummarizeRequestModel"; export * from "./SummarizeRequestExtractiveness"; export * from "./SummarizeResponse"; export * from "./SummarizeResponseResultsItem"; diff --git a/yarn.lock b/yarn.lock index e3d7ca2..fa08e94 100644 --- a/yarn.lock +++ b/yarn.lock @@ -7,16 +7,16 @@ resolved "https://registry.yarnpkg.com/@types/node/-/node-17.0.33.tgz#3c1879b276dc63e73030bb91165e62a4509cd506" integrity sha512-miWq2m2FiQZmaHfdZNcbpp9PuXg34W5JZ5CrJ/BaS70VuhoJENBEQybeiYSaPBRNq6KQGnjfEnc/F3PN++D+XQ== +"@types/qs@6.9.8": + version "6.9.8" + resolved "https://registry.yarnpkg.com/@types/qs/-/qs-6.9.8.tgz#f2a7de3c107b89b441e071d5472e6b726b4adf45" + integrity sha512-u95svzDlTysU5xecFNTgfFG5RUWu1A9P0VzgpcIiGZA9iraHOdSzcxMxQ55DyeRaGCSxQi7LxXDI4rzq/MYfdg== + "@types/url-join@4.0.1": version "4.0.1" resolved "https://registry.yarnpkg.com/@types/url-join/-/url-join-4.0.1.tgz#4989c97f969464647a8586c7252d97b449cdc045" integrity sha512-wDXw9LEEUHyV+7UWy7U315nrJGJ7p1BzaCxDpEoLr789Dk1WDVMMlf3iBfbG2F8NdWnYyFbtTxUn2ZNbm1Q4LQ== -"@ungap/url-search-params@0.2.2": - version "0.2.2" - resolved "https://registry.yarnpkg.com/@ungap/url-search-params/-/url-search-params-0.2.2.tgz#2de3bdec21476a9b70ef11fd7b794752f9afa04c" - integrity sha512-qQsguKXZVKdCixOHX9jqnX/K/1HekPDpGKyEcXHT+zR6EjGA7S4boSuelL4uuPv6YfhN0n8c4UxW+v/Z3gM2iw== - asynckit@^0.4.0: version "0.4.0" resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79" @@ -30,6 +30,15 @@ axios@0.27.2: follow-redirects "^1.14.9" form-data "^4.0.0" +call-bind@^1.0.0: + version "1.0.5" + resolved "https://registry.yarnpkg.com/call-bind/-/call-bind-1.0.5.tgz#6fa2b7845ce0ea49bf4d8b9ef64727a2c2e2e513" + integrity sha512-C3nQxfFZxFRVoJoGKKI8y3MOEo129NQ+FgQ08iye+Mk4zNZZGdjfs06bVTr+DBSlA66Q2VEcMki/cUCP4SercQ== + dependencies: + function-bind "^1.1.2" + get-intrinsic "^1.2.1" + set-function-length "^1.1.1" + combined-stream@^1.0.8: version "1.0.8" resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.8.tgz#c3d45a8b34fd730631a110a8a2520682b31d5a7f" @@ -37,6 +46,15 @@ combined-stream@^1.0.8: dependencies: delayed-stream "~1.0.0" +define-data-property@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/define-data-property/-/define-data-property-1.1.1.tgz#c35f7cd0ab09883480d12ac5cb213715587800b3" + integrity sha512-E7uGkTzkk1d0ByLeSc6ZsFS79Axg+m1P/VsgYsxHgiuc3tFSj+MjMIwe90FC4lOAZzNBdY7kkO2P2wKdsQ1vgQ== + dependencies: + get-intrinsic "^1.2.1" + gopd "^1.0.1" + has-property-descriptors "^1.0.0" + delayed-stream@~1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619" @@ -56,6 +74,52 @@ form-data@^4.0.0: combined-stream "^1.0.8" mime-types "^2.1.12" +function-bind@^1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.2.tgz#2c02d864d97f3ea6c8830c464cbd11ab6eab7a1c" + integrity sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA== + +get-intrinsic@^1.0.2, get-intrinsic@^1.1.3, get-intrinsic@^1.2.1, get-intrinsic@^1.2.2: + version "1.2.2" + resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.2.2.tgz#281b7622971123e1ef4b3c90fd7539306da93f3b" + integrity sha512-0gSo4ml/0j98Y3lngkFEot/zhiCeWsbYIlZ+uZOVgzLyLaUw7wxUL+nCTP0XJvJg1AXulJRI3UJi8GsbDuxdGA== + dependencies: + function-bind "^1.1.2" + has-proto "^1.0.1" + has-symbols "^1.0.3" + hasown "^2.0.0" + +gopd@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/gopd/-/gopd-1.0.1.tgz#29ff76de69dac7489b7c0918a5788e56477c332c" + integrity sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA== + dependencies: + get-intrinsic "^1.1.3" + +has-property-descriptors@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/has-property-descriptors/-/has-property-descriptors-1.0.1.tgz#52ba30b6c5ec87fd89fa574bc1c39125c6f65340" + integrity sha512-VsX8eaIewvas0xnvinAe9bw4WfIeODpGYikiWYLH+dma0Jw6KHYqWiWfhQlgOVK8D6PvjubK5Uc4P0iIhIcNVg== + dependencies: + get-intrinsic "^1.2.2" + +has-proto@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/has-proto/-/has-proto-1.0.1.tgz#1885c1305538958aff469fef37937c22795408e0" + integrity sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg== + +has-symbols@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.0.3.tgz#bb7b2c4349251dce87b125f7bdf874aa7c8b39f8" + integrity sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A== + +hasown@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/hasown/-/hasown-2.0.0.tgz#f4c513d454a57b7c7e1650778de226b11700546c" + integrity sha512-vUptKVTpIJhcczKBbgnS+RtcuYMB8+oNzPK2/Hp3hanz8JmpATdmmgLgSaadVREkDm+e2giHwY3ZRkyjSIDDFA== + dependencies: + function-bind "^1.1.2" + js-base64@3.7.2: version "3.7.2" resolved "https://registry.yarnpkg.com/js-base64/-/js-base64-3.7.2.tgz#816d11d81a8aff241603d19ce5761e13e41d7745" @@ -73,11 +137,42 @@ mime-types@^2.1.12: dependencies: mime-db "1.52.0" +object-inspect@^1.9.0: + version "1.13.1" + resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.13.1.tgz#b96c6109324ccfef6b12216a956ca4dc2ff94bc2" + integrity sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ== + prettier@2.7.1: version "2.7.1" resolved "https://registry.yarnpkg.com/prettier/-/prettier-2.7.1.tgz#e235806850d057f97bb08368a4f7d899f7760c64" integrity sha512-ujppO+MkdPqoVINuDFDRLClm7D78qbDt0/NR+wp5FqEZOoTNAjPHWj17QRhu7geIHJfcNhRk1XVQmF8Bp3ye+g== +qs@6.11.2: + version "6.11.2" + resolved "https://registry.yarnpkg.com/qs/-/qs-6.11.2.tgz#64bea51f12c1f5da1bc01496f48ffcff7c69d7d9" + integrity sha512-tDNIz22aBzCDxLtVH++VnTfzxlfeK5CbqohpSqpJgj1Wg/cQbStNAz3NuqCs5vV+pjBsK4x4pN9HlVh7rcYRiA== + dependencies: + side-channel "^1.0.4" + +set-function-length@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/set-function-length/-/set-function-length-1.1.1.tgz#4bc39fafb0307224a33e106a7d35ca1218d659ed" + integrity sha512-VoaqjbBJKiWtg4yRcKBQ7g7wnGnLV3M8oLvVWwOk2PdYY6PEFegR1vezXR0tw6fZGF9csVakIRjrJiy2veSBFQ== + dependencies: + define-data-property "^1.1.1" + get-intrinsic "^1.2.1" + gopd "^1.0.1" + has-property-descriptors "^1.0.0" + +side-channel@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/side-channel/-/side-channel-1.0.4.tgz#efce5c8fdc104ee751b25c58d4290011fa5ea2cf" + integrity sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw== + dependencies: + call-bind "^1.0.0" + get-intrinsic "^1.0.2" + object-inspect "^1.9.0" + typescript@4.6.4: version "4.6.4" resolved "https://registry.yarnpkg.com/typescript/-/typescript-4.6.4.tgz#caa78bbc3a59e6a5c510d35703f6a09877ce45e9"