From 84611f3692463a73b75f36857d7d14e1cf536c2a Mon Sep 17 00:00:00 2001 From: Avaer Kazmer Date: Sun, 29 Dec 2024 17:54:12 -0800 Subject: [PATCH] Update elizaos-core-proxy files --- .../packages/elizaos-core-proxy/context.ts | 104 + .../elizaos-core-proxy/elizaos-core.ts | 21 - .../packages/elizaos-core-proxy/generation.ts | 1763 +++++++++++++++++ .../packages/elizaos-core-proxy/memory.ts | 243 +++ .../packages/elizaos-core-proxy/parsing.ts | 207 ++ .../packages/elizaos-core-proxy/types.ts | 1280 ++++++++++++ 6 files changed, 3597 insertions(+), 21 deletions(-) create mode 100644 packages/usdk/packages/upstreet-agent/packages/elizaos-core-proxy/context.ts delete mode 100644 packages/usdk/packages/upstreet-agent/packages/elizaos-core-proxy/elizaos-core.ts create mode 100644 packages/usdk/packages/upstreet-agent/packages/elizaos-core-proxy/generation.ts create mode 100644 packages/usdk/packages/upstreet-agent/packages/elizaos-core-proxy/memory.ts create mode 100644 packages/usdk/packages/upstreet-agent/packages/elizaos-core-proxy/parsing.ts create mode 100644 packages/usdk/packages/upstreet-agent/packages/elizaos-core-proxy/types.ts diff --git a/packages/usdk/packages/upstreet-agent/packages/elizaos-core-proxy/context.ts b/packages/usdk/packages/upstreet-agent/packages/elizaos-core-proxy/context.ts new file mode 100644 index 000000000..a682e6794 --- /dev/null +++ b/packages/usdk/packages/upstreet-agent/packages/elizaos-core-proxy/context.ts @@ -0,0 +1,104 @@ +import handlebars from "handlebars"; +import { type State } from "./types.ts"; +import { names, uniqueNamesGenerator } from "unique-names-generator"; + +/** + * Composes a context string by replacing placeholders in a template with corresponding values from the state. + * + * This function takes a template string with placeholders in the format `{{placeholder}}` and a state object. + * It replaces each placeholder with the value from the state object that matches the placeholder's name. + * If a matching key is not found in the state object for a given placeholder, the placeholder is replaced with an empty string. + * + * By default, this function uses a simple string replacement approach. However, when `templatingEngine` is set to `'handlebars'`, it uses Handlebars templating engine instead, compiling the template into a reusable function and evaluating it with the provided state object. + * + * @param {Object} params - The parameters for composing the context. + * @param {State} params.state - The state object containing values to replace the placeholders in the template. + * @param {string} params.template - The template string containing placeholders to be replaced with state values. + * @param {"handlebars" | undefined} [params.templatingEngine] - The templating engine to use for compiling and evaluating the template (optional, default: `undefined`). + * @returns {string} The composed context string with placeholders replaced by corresponding state values. + * + * @example + * // Given a state object and a template + * const state = { userName: "Alice", userAge: 30 }; + * const template = "Hello, {{userName}}! You are {{userAge}} years old"; + * + * // Composing the context with simple string replacement will result in: + * // "Hello, Alice! You are 30 years old." + * const contextSimple = composeContext({ state, template }); + */ +export const composeContext = ({ + state, + template, + templatingEngine, +}: { + state: State; + template: string; + templatingEngine?: "handlebars"; +}) => { + if (templatingEngine === "handlebars") { + const templateFunction = handlebars.compile(template); + return templateFunction(state); + } + + // @ts-expect-error match isn't working as expected + const out = template.replace(/{{\w+}}/g, (match) => { + const key = match.replace(/{{|}}/g, ""); + return state[key] ?? ""; + }); + return out; +}; + +/** + * Adds a header to a body of text. + * + * This function takes a header string and a body string and returns a new string with the header prepended to the body. + * If the body string is empty, the header is returned as is. + * + * @param {string} header - The header to add to the body. + * @param {string} body - The body to which to add the header. + * @returns {string} The body with the header prepended. + * + * @example + * // Given a header and a body + * const header = "Header"; + * const body = "Body"; + * + * // Adding the header to the body will result in: + * // "Header\nBody" + * const text = addHeader(header, body); + */ +export const addHeader = (header: string, body: string) => { + return body.length > 0 ? `${header ? header + "\n" : header}${body}\n` : ""; +}; + +/** + * Generates a string with random user names populated in a template. + * + * This function generates a specified number of random user names and populates placeholders + * in the provided template with these names. Placeholders in the template should follow the format `{{userX}}` + * where `X` is the position of the user (e.g., `{{user1}}`, `{{user2}}`). + * + * @param {string} params.template - The template string containing placeholders for random user names. + * @param {number} params.length - The number of random user names to generate. + * @returns {string} The template string with placeholders replaced by random user names. + * + * @example + * // Given a template and a length + * const template = "Hello, {{user1}}! Meet {{user2}} and {{user3}}."; + * const length = 3; + * + * // Composing the random user string will result in: + * // "Hello, John! Meet Alice and Bob." + * const result = composeRandomUser({ template, length }); + */ +export const composeRandomUser = (template: string, length: number) => { + const exampleNames = Array.from({ length }, () => + uniqueNamesGenerator({ dictionaries: [names] }) + ); + let result = template; + for (let i = 0; i < exampleNames.length; i++) { + result = result.replaceAll(`{{user${i + 1}}}`, exampleNames[i]); + } + + return result; +}; diff --git a/packages/usdk/packages/upstreet-agent/packages/elizaos-core-proxy/elizaos-core.ts b/packages/usdk/packages/upstreet-agent/packages/elizaos-core-proxy/elizaos-core.ts deleted file mode 100644 index aac71e9b8..000000000 --- a/packages/usdk/packages/upstreet-agent/packages/elizaos-core-proxy/elizaos-core.ts +++ /dev/null @@ -1,21 +0,0 @@ -export * from "./generation.ts"; - -// - -type State = {}; -type IAgentRuntime = {}; - -// - -export const elizaLogger = console; -export const settings = { - SOL_ADDRESS: 'So11111111111111111111111111111111111111112', - SLIPPAGE: '1', -}; - -export const Action = {}; -export const HandlerCallback = {}; -export const IAgentRuntime = {}; -export const Memory = {}; -export const Plugin = {}; -export const State = {}; \ No newline at end of file diff --git a/packages/usdk/packages/upstreet-agent/packages/elizaos-core-proxy/generation.ts b/packages/usdk/packages/upstreet-agent/packages/elizaos-core-proxy/generation.ts new file mode 100644 index 000000000..64873a42c --- /dev/null +++ b/packages/usdk/packages/upstreet-agent/packages/elizaos-core-proxy/generation.ts @@ -0,0 +1,1763 @@ +import { createAnthropic } from "@ai-sdk/anthropic"; +import { createGoogleGenerativeAI } from "@ai-sdk/google"; +import { createGroq } from "@ai-sdk/groq"; +import { createOpenAI } from "@ai-sdk/openai"; +import { RecursiveCharacterTextSplitter } from "langchain/text_splitter"; +import { + generateObject as aiGenerateObject, + generateText as aiGenerateText, + GenerateObjectResult, +} from "ai"; +import { Buffer } from "buffer"; +// import { createOllama } from "ollama-ai-provider"; +import OpenAI from "openai"; +import { encodingForModel, TiktokenModel } from "js-tiktoken"; +import Together from "together-ai"; +import { ZodSchema } from "zod"; +import { elizaLogger } from "./index.ts"; +import { getModel, models } from "./models.ts"; +import { + parseBooleanFromText, + parseJsonArrayFromText, + parseJSONObjectFromText, + parseShouldRespondFromText, + parseActionResponseFromText, +} from "./parsing.ts"; +import settings from "./settings.ts"; +import { + Content, + IAgentRuntime, + IImageDescriptionService, + ITextGenerationService, + ModelClass, + ModelProviderName, + ServiceType, + SearchResponse, + ActionResponse, +} from "./types.ts"; +import { fal } from "@fal-ai/client"; + +/** + * Send a message to the model for a text generateText - receive a string back and parse how you'd like + * @param opts - The options for the generateText request. + * @param opts.context The context of the message to be completed. + * @param opts.stop A list of strings to stop the generateText at. + * @param opts.model The model to use for generateText. + * @param opts.frequency_penalty The frequency penalty to apply to the generateText. + * @param opts.presence_penalty The presence penalty to apply to the generateText. + * @param opts.temperature The temperature to apply to the generateText. + * @param opts.max_context_length The maximum length of the context to apply to the generateText. + * @returns The completed message. + */ + +export async function generateText({ + runtime, + context, + modelClass, + stop, + customSystemPrompt, +}: { + runtime: IAgentRuntime; + context: string; + modelClass: string; + stop?: string[]; + customSystemPrompt?: string; +}): Promise { + if (!context) { + console.error("generateText context is empty"); + return ""; + } + + elizaLogger.log("Generating text..."); + + elizaLogger.info("Generating text with options:", { + modelProvider: runtime.modelProvider, + model: modelClass, + }); + + const provider = runtime.modelProvider; + const endpoint = + runtime.character.modelEndpointOverride || models[provider].endpoint; + let model = models[provider].model[modelClass]; + + // allow character.json settings => secrets to override models + // FIXME: add MODEL_MEDIUM support + switch (provider) { + // if runtime.getSetting("LLAMACLOUD_MODEL_LARGE") is true and modelProvider is LLAMACLOUD, then use the large model + case ModelProviderName.LLAMACLOUD: + { + switch (modelClass) { + case ModelClass.LARGE: + { + model = + runtime.getSetting("LLAMACLOUD_MODEL_LARGE") || + model; + } + break; + case ModelClass.SMALL: + { + model = + runtime.getSetting("LLAMACLOUD_MODEL_SMALL") || + model; + } + break; + } + } + break; + case ModelProviderName.TOGETHER: + { + switch (modelClass) { + case ModelClass.LARGE: + { + model = + runtime.getSetting("TOGETHER_MODEL_LARGE") || + model; + } + break; + case ModelClass.SMALL: + { + model = + runtime.getSetting("TOGETHER_MODEL_SMALL") || + model; + } + break; + } + } + break; + case ModelProviderName.OPENROUTER: + { + switch (modelClass) { + case ModelClass.LARGE: + { + model = + runtime.getSetting("LARGE_OPENROUTER_MODEL") || + model; + } + break; + case ModelClass.SMALL: + { + model = + runtime.getSetting("SMALL_OPENROUTER_MODEL") || + model; + } + break; + } + } + break; + } + + elizaLogger.info("Selected model:", model); + + const modelConfiguration = runtime.character?.settings?.modelConfig; + const temperature = + modelConfiguration?.temperature || + models[provider].settings.temperature; + const frequency_penalty = + modelConfiguration?.frequency_penalty || + models[provider].settings.frequency_penalty; + const presence_penalty = + modelConfiguration?.presence_penalty || + models[provider].settings.presence_penalty; + const max_context_length = + modelConfiguration?.maxInputTokens || + models[provider].settings.maxInputTokens; + const max_response_length = + modelConfiguration?.max_response_length || + models[provider].settings.maxOutputTokens; + + const apiKey = runtime.token; + + try { + elizaLogger.debug( + `Trimming context to max length of ${max_context_length} tokens.` + ); + context = await trimTokens(context, max_context_length, "gpt-4o"); + + let response: string; + + const _stop = stop || models[provider].settings.stop; + elizaLogger.debug( + `Using provider: ${provider}, model: ${model}, temperature: ${temperature}, max response length: ${max_response_length}` + ); + + switch (provider) { + // OPENAI & LLAMACLOUD shared same structure. + case ModelProviderName.OPENAI: + case ModelProviderName.ETERNALAI: + case ModelProviderName.ALI_BAILIAN: + case ModelProviderName.VOLENGINE: + case ModelProviderName.LLAMACLOUD: + case ModelProviderName.NANOGPT: + case ModelProviderName.HYPERBOLIC: + case ModelProviderName.TOGETHER: + case ModelProviderName.AKASH_CHAT_API: { + elizaLogger.debug("Initializing OpenAI model."); + const openai = createOpenAI({ + apiKey, + baseURL: endpoint, + fetch: runtime.fetch, + }); + + const { text: openaiResponse } = await aiGenerateText({ + model: openai.languageModel(model), + prompt: context, + system: + runtime.character.system ?? + settings.SYSTEM_PROMPT ?? + undefined, + temperature: temperature, + maxTokens: max_response_length, + frequencyPenalty: frequency_penalty, + presencePenalty: presence_penalty, + }); + + response = openaiResponse; + elizaLogger.debug("Received response from OpenAI model."); + break; + } + + case ModelProviderName.GOOGLE: { + const google = createGoogleGenerativeAI({ + fetch: runtime.fetch, + }); + + const { text: googleResponse } = await aiGenerateText({ + model: google(model), + prompt: context, + system: + runtime.character.system ?? + settings.SYSTEM_PROMPT ?? + undefined, + temperature: temperature, + maxTokens: max_response_length, + frequencyPenalty: frequency_penalty, + presencePenalty: presence_penalty, + }); + + response = googleResponse; + elizaLogger.debug("Received response from Google model."); + break; + } + + case ModelProviderName.ANTHROPIC: { + elizaLogger.debug("Initializing Anthropic model."); + + const anthropic = createAnthropic({ + apiKey, + fetch: runtime.fetch, + }); + + const { text: anthropicResponse } = await aiGenerateText({ + model: anthropic.languageModel(model), + prompt: context, + system: + runtime.character.system ?? + settings.SYSTEM_PROMPT ?? + undefined, + temperature: temperature, + maxTokens: max_response_length, + frequencyPenalty: frequency_penalty, + presencePenalty: presence_penalty, + }); + + response = anthropicResponse; + elizaLogger.debug("Received response from Anthropic model."); + break; + } + + case ModelProviderName.CLAUDE_VERTEX: { + elizaLogger.debug("Initializing Claude Vertex model."); + + const anthropic = createAnthropic({ + apiKey, + fetch: runtime.fetch, + }); + + const { text: anthropicResponse } = await aiGenerateText({ + model: anthropic.languageModel(model), + prompt: context, + system: + runtime.character.system ?? + settings.SYSTEM_PROMPT ?? + undefined, + temperature: temperature, + maxTokens: max_response_length, + frequencyPenalty: frequency_penalty, + presencePenalty: presence_penalty, + }); + + response = anthropicResponse; + elizaLogger.debug( + "Received response from Claude Vertex model." + ); + break; + } + + case ModelProviderName.GROK: { + elizaLogger.debug("Initializing Grok model."); + const grok = createOpenAI({ + apiKey, + baseURL: endpoint, + fetch: runtime.fetch, + }); + + const { text: grokResponse } = await aiGenerateText({ + model: grok.languageModel(model, { + parallelToolCalls: false, + }), + prompt: context, + system: + runtime.character.system ?? + settings.SYSTEM_PROMPT ?? + undefined, + temperature: temperature, + maxTokens: max_response_length, + frequencyPenalty: frequency_penalty, + presencePenalty: presence_penalty, + }); + + response = grokResponse; + elizaLogger.debug("Received response from Grok model."); + break; + } + + case ModelProviderName.GROQ: { + const groq = createGroq({ apiKey, fetch: runtime.fetch }); + + const { text: groqResponse } = await aiGenerateText({ + model: groq.languageModel(model), + prompt: context, + temperature: temperature, + system: + runtime.character.system ?? + settings.SYSTEM_PROMPT ?? + undefined, + maxTokens: max_response_length, + frequencyPenalty: frequency_penalty, + presencePenalty: presence_penalty, + }); + + response = groqResponse; + break; + } + + case ModelProviderName.LLAMALOCAL: { + elizaLogger.debug( + "Using local Llama model for text completion." + ); + const textGenerationService = + runtime.getService( + ServiceType.TEXT_GENERATION + ); + + if (!textGenerationService) { + throw new Error("Text generation service not found"); + } + + response = await textGenerationService.queueTextCompletion( + context, + temperature, + _stop, + frequency_penalty, + presence_penalty, + max_response_length + ); + elizaLogger.debug("Received response from local Llama model."); + break; + } + + case ModelProviderName.REDPILL: { + elizaLogger.debug("Initializing RedPill model."); + const serverUrl = models[provider].endpoint; + const openai = createOpenAI({ + apiKey, + baseURL: serverUrl, + fetch: runtime.fetch, + }); + + const { text: redpillResponse } = await aiGenerateText({ + model: openai.languageModel(model), + prompt: context, + temperature: temperature, + system: + runtime.character.system ?? + settings.SYSTEM_PROMPT ?? + undefined, + maxTokens: max_response_length, + frequencyPenalty: frequency_penalty, + presencePenalty: presence_penalty, + }); + + response = redpillResponse; + elizaLogger.debug("Received response from redpill model."); + break; + } + + case ModelProviderName.OPENROUTER: { + elizaLogger.debug("Initializing OpenRouter model."); + const serverUrl = models[provider].endpoint; + const openrouter = createOpenAI({ + apiKey, + baseURL: serverUrl, + fetch: runtime.fetch, + }); + + const { text: openrouterResponse } = await aiGenerateText({ + model: openrouter.languageModel(model), + prompt: context, + temperature: temperature, + system: + runtime.character.system ?? + settings.SYSTEM_PROMPT ?? + undefined, + maxTokens: max_response_length, + frequencyPenalty: frequency_penalty, + presencePenalty: presence_penalty, + }); + + response = openrouterResponse; + elizaLogger.debug("Received response from OpenRouter model."); + break; + } + + /* case ModelProviderName.OLLAMA: + { + elizaLogger.debug("Initializing Ollama model."); + + const ollamaProvider = createOllama({ + baseURL: models[provider].endpoint + "/api", + fetch: runtime.fetch, + }); + const ollama = ollamaProvider(model); + + elizaLogger.debug("****** MODEL\n", model); + + const { text: ollamaResponse } = await aiGenerateText({ + model: ollama, + prompt: context, + temperature: temperature, + maxTokens: max_response_length, + frequencyPenalty: frequency_penalty, + presencePenalty: presence_penalty, + }); + + response = ollamaResponse; + } + elizaLogger.debug("Received response from Ollama model."); + break; */ + + case ModelProviderName.HEURIST: { + elizaLogger.debug("Initializing Heurist model."); + const heurist = createOpenAI({ + apiKey: apiKey, + baseURL: endpoint, + fetch: runtime.fetch, + }); + + const { text: heuristResponse } = await aiGenerateText({ + model: heurist.languageModel(model), + prompt: context, + system: + customSystemPrompt ?? + runtime.character.system ?? + settings.SYSTEM_PROMPT ?? + undefined, + temperature: temperature, + maxTokens: max_response_length, + frequencyPenalty: frequency_penalty, + presencePenalty: presence_penalty, + }); + + response = heuristResponse; + elizaLogger.debug("Received response from Heurist model."); + break; + } + case ModelProviderName.GAIANET: { + elizaLogger.debug("Initializing GAIANET model."); + + var baseURL = models[provider].endpoint; + if (!baseURL) { + switch (modelClass) { + case ModelClass.SMALL: + baseURL = + settings.SMALL_GAIANET_SERVER_URL || + "https://llama3b.gaia.domains/v1"; + break; + case ModelClass.MEDIUM: + baseURL = + settings.MEDIUM_GAIANET_SERVER_URL || + "https://llama8b.gaia.domains/v1"; + break; + case ModelClass.LARGE: + baseURL = + settings.LARGE_GAIANET_SERVER_URL || + "https://qwen72b.gaia.domains/v1"; + break; + } + } + + elizaLogger.debug("Using GAIANET model with baseURL:", baseURL); + + const openai = createOpenAI({ + apiKey, + baseURL: endpoint, + fetch: runtime.fetch, + }); + + const { text: openaiResponse } = await aiGenerateText({ + model: openai.languageModel(model), + prompt: context, + system: + runtime.character.system ?? + settings.SYSTEM_PROMPT ?? + undefined, + temperature: temperature, + maxTokens: max_response_length, + frequencyPenalty: frequency_penalty, + presencePenalty: presence_penalty, + }); + + response = openaiResponse; + elizaLogger.debug("Received response from GAIANET model."); + break; + } + + case ModelProviderName.GALADRIEL: { + elizaLogger.debug("Initializing Galadriel model."); + const galadriel = createOpenAI({ + apiKey: apiKey, + baseURL: endpoint, + fetch: runtime.fetch, + }); + + const { text: galadrielResponse } = await aiGenerateText({ + model: galadriel.languageModel(model), + prompt: context, + system: + runtime.character.system ?? + settings.SYSTEM_PROMPT ?? + undefined, + temperature: temperature, + maxTokens: max_response_length, + frequencyPenalty: frequency_penalty, + presencePenalty: presence_penalty, + }); + + response = galadrielResponse; + elizaLogger.debug("Received response from Galadriel model."); + break; + } + + case ModelProviderName.VENICE: { + elizaLogger.debug("Initializing Venice model."); + const venice = createOpenAI({ + apiKey: apiKey, + baseURL: endpoint, + }); + + const { text: veniceResponse } = await aiGenerateText({ + model: venice.languageModel(model), + prompt: context, + system: + runtime.character.system ?? + settings.SYSTEM_PROMPT ?? + undefined, + temperature: temperature, + maxTokens: max_response_length, + }); + + response = veniceResponse; + elizaLogger.debug("Received response from Venice model."); + break; + } + + default: { + const errorMessage = `Unsupported provider: ${provider}`; + elizaLogger.error(errorMessage); + throw new Error(errorMessage); + } + } + + return response; + } catch (error) { + elizaLogger.error("Error in generateText:", error); + throw error; + } +} + +/** + * Truncate the context to the maximum length allowed by the model. + * @param context The text to truncate + * @param maxTokens Maximum number of tokens to keep + * @param model The tokenizer model to use + * @returns The truncated text + */ +export function trimTokens( + context: string, + maxTokens: number, + model: TiktokenModel +): string { + if (!context) return ""; + if (maxTokens <= 0) throw new Error("maxTokens must be positive"); + + // Get the tokenizer for the model + const encoding = encodingForModel(model); + + try { + // Encode the text into tokens + const tokens = encoding.encode(context); + + // If already within limits, return unchanged + if (tokens.length <= maxTokens) { + return context; + } + + // Keep the most recent tokens by slicing from the end + const truncatedTokens = tokens.slice(-maxTokens); + + // Decode back to text - js-tiktoken decode() returns a string directly + return encoding.decode(truncatedTokens); + } catch (error) { + console.error("Error in trimTokens:", error); + // Return truncated string if tokenization fails + return context.slice(-maxTokens * 4); // Rough estimate of 4 chars per token + } +} + +/** + * Sends a message to the model to determine if it should respond to the given context. + * @param opts - The options for the generateText request + * @param opts.context The context to evaluate for response + * @param opts.stop A list of strings to stop the generateText at + * @param opts.model The model to use for generateText + * @param opts.frequency_penalty The frequency penalty to apply (0.0 to 2.0) + * @param opts.presence_penalty The presence penalty to apply (0.0 to 2.0) + * @param opts.temperature The temperature to control randomness (0.0 to 2.0) + * @param opts.serverUrl The URL of the API server + * @param opts.max_context_length Maximum allowed context length in tokens + * @param opts.max_response_length Maximum allowed response length in tokens + * @returns Promise resolving to "RESPOND", "IGNORE", "STOP" or null + */ +export async function generateShouldRespond({ + runtime, + context, + modelClass, +}: { + runtime: IAgentRuntime; + context: string; + modelClass: string; +}): Promise<"RESPOND" | "IGNORE" | "STOP" | null> { + let retryDelay = 1000; + while (true) { + try { + elizaLogger.debug( + "Attempting to generate text with context:", + context + ); + const response = await generateText({ + runtime, + context, + modelClass, + }); + + elizaLogger.debug("Received response from generateText:", response); + const parsedResponse = parseShouldRespondFromText(response.trim()); + if (parsedResponse) { + elizaLogger.debug("Parsed response:", parsedResponse); + return parsedResponse; + } else { + elizaLogger.debug("generateShouldRespond no response"); + } + } catch (error) { + elizaLogger.error("Error in generateShouldRespond:", error); + if ( + error instanceof TypeError && + error.message.includes("queueTextCompletion") + ) { + elizaLogger.error( + "TypeError: Cannot read properties of null (reading 'queueTextCompletion')" + ); + } + } + + elizaLogger.log(`Retrying in ${retryDelay}ms...`); + await new Promise((resolve) => setTimeout(resolve, retryDelay)); + retryDelay *= 2; + } +} + +/** + * Splits content into chunks of specified size with optional overlapping bleed sections + * @param content - The text content to split into chunks + * @param chunkSize - The maximum size of each chunk in tokens + * @param bleed - Number of characters to overlap between chunks (default: 100) + * @returns Promise resolving to array of text chunks with bleed sections + */ +export async function splitChunks( + content: string, + chunkSize: number = 512, + bleed: number = 20 +): Promise { + const textSplitter = new RecursiveCharacterTextSplitter({ + chunkSize: Number(chunkSize), + chunkOverlap: Number(bleed), + }); + + return textSplitter.splitText(content); +} + +/** + * Sends a message to the model and parses the response as a boolean value + * @param opts - The options for the generateText request + * @param opts.context The context to evaluate for the boolean response + * @param opts.stop A list of strings to stop the generateText at + * @param opts.model The model to use for generateText + * @param opts.frequency_penalty The frequency penalty to apply (0.0 to 2.0) + * @param opts.presence_penalty The presence penalty to apply (0.0 to 2.0) + * @param opts.temperature The temperature to control randomness (0.0 to 2.0) + * @param opts.serverUrl The URL of the API server + * @param opts.token The API token for authentication + * @param opts.max_context_length Maximum allowed context length in tokens + * @param opts.max_response_length Maximum allowed response length in tokens + * @returns Promise resolving to a boolean value parsed from the model's response + */ +export async function generateTrueOrFalse({ + runtime, + context = "", + modelClass, +}: { + runtime: IAgentRuntime; + context: string; + modelClass: string; +}): Promise { + let retryDelay = 1000; + + const stop = Array.from( + new Set([ + ...(models[runtime.modelProvider].settings.stop || []), + ["\n"], + ]) + ) as string[]; + + while (true) { + try { + const response = await generateText({ + stop, + runtime, + context, + modelClass, + }); + + const parsedResponse = parseBooleanFromText(response.trim()); + if (parsedResponse !== null) { + return parsedResponse; + } + } catch (error) { + elizaLogger.error("Error in generateTrueOrFalse:", error); + } + + await new Promise((resolve) => setTimeout(resolve, retryDelay)); + retryDelay *= 2; + } +} + +/** + * Send a message to the model and parse the response as a string array + * @param opts - The options for the generateText request + * @param opts.context The context/prompt to send to the model + * @param opts.stop Array of strings that will stop the model's generation if encountered + * @param opts.model The language model to use + * @param opts.frequency_penalty The frequency penalty to apply (0.0 to 2.0) + * @param opts.presence_penalty The presence penalty to apply (0.0 to 2.0) + * @param opts.temperature The temperature to control randomness (0.0 to 2.0) + * @param opts.serverUrl The URL of the API server + * @param opts.token The API token for authentication + * @param opts.max_context_length Maximum allowed context length in tokens + * @param opts.max_response_length Maximum allowed response length in tokens + * @returns Promise resolving to an array of strings parsed from the model's response + */ +export async function generateTextArray({ + runtime, + context, + modelClass, +}: { + runtime: IAgentRuntime; + context: string; + modelClass: string; +}): Promise { + if (!context) { + elizaLogger.error("generateTextArray context is empty"); + return []; + } + let retryDelay = 1000; + + while (true) { + try { + const response = await generateText({ + runtime, + context, + modelClass, + }); + + const parsedResponse = parseJsonArrayFromText(response); + if (parsedResponse) { + return parsedResponse; + } + } catch (error) { + elizaLogger.error("Error in generateTextArray:", error); + } + + await new Promise((resolve) => setTimeout(resolve, retryDelay)); + retryDelay *= 2; + } +} + +export async function generateObjectDeprecated({ + runtime, + context, + modelClass, +}: { + runtime: IAgentRuntime; + context: string; + modelClass: string; +}): Promise { + if (!context) { + elizaLogger.error("generateObjectDeprecated context is empty"); + return null; + } + let retryDelay = 1000; + + while (true) { + try { + // this is slightly different than generateObjectArray, in that we parse object, not object array + const response = await generateText({ + runtime, + context, + modelClass, + }); + const parsedResponse = parseJSONObjectFromText(response); + if (parsedResponse) { + return parsedResponse; + } + } catch (error) { + elizaLogger.error("Error in generateObject:", error); + } + + await new Promise((resolve) => setTimeout(resolve, retryDelay)); + retryDelay *= 2; + } +} + +export async function generateObjectArray({ + runtime, + context, + modelClass, +}: { + runtime: IAgentRuntime; + context: string; + modelClass: string; +}): Promise { + if (!context) { + elizaLogger.error("generateObjectArray context is empty"); + return []; + } + let retryDelay = 1000; + + while (true) { + try { + const response = await generateText({ + runtime, + context, + modelClass, + }); + + const parsedResponse = parseJsonArrayFromText(response); + if (parsedResponse) { + return parsedResponse; + } + } catch (error) { + elizaLogger.error("Error in generateTextArray:", error); + } + + await new Promise((resolve) => setTimeout(resolve, retryDelay)); + retryDelay *= 2; + } +} + +/** + * Send a message to the model for generateText. + * @param opts - The options for the generateText request. + * @param opts.context The context of the message to be completed. + * @param opts.stop A list of strings to stop the generateText at. + * @param opts.model The model to use for generateText. + * @param opts.frequency_penalty The frequency penalty to apply to the generateText. + * @param opts.presence_penalty The presence penalty to apply to the generateText. + * @param opts.temperature The temperature to apply to the generateText. + * @param opts.max_context_length The maximum length of the context to apply to the generateText. + * @returns The completed message. + */ +export async function generateMessageResponse({ + runtime, + context, + modelClass, +}: { + runtime: IAgentRuntime; + context: string; + modelClass: string; +}): Promise { + const max_context_length = + models[runtime.modelProvider].settings.maxInputTokens; + context = trimTokens(context, max_context_length, "gpt-4o"); + let retryLength = 1000; // exponential backoff + while (true) { + try { + elizaLogger.log("Generating message response.."); + + const response = await generateText({ + runtime, + context, + modelClass, + }); + + // try parsing the response as JSON, if null then try again + const parsedContent = parseJSONObjectFromText(response) as Content; + if (!parsedContent) { + elizaLogger.debug("parsedContent is null, retrying"); + continue; + } + + return parsedContent; + } catch (error) { + elizaLogger.error("ERROR:", error); + // wait for 2 seconds + retryLength *= 2; + await new Promise((resolve) => setTimeout(resolve, retryLength)); + elizaLogger.debug("Retrying..."); + } + } +} + +export const generateImage = async ( + data: { + prompt: string; + width: number; + height: number; + count?: number; + negativePrompt?: string; + numIterations?: number; + guidanceScale?: number; + seed?: number; + modelId?: string; + jobId?: string; + stylePreset?: string; + hideWatermark?: boolean; + }, + runtime: IAgentRuntime +): Promise<{ + success: boolean; + data?: string[]; + error?: any; +}> => { + const model = getModel(runtime.imageModelProvider, ModelClass.IMAGE); + const modelSettings = models[runtime.imageModelProvider].imageSettings; + + elizaLogger.info("Generating image with options:", { + imageModelProvider: model, + }); + + const apiKey = + runtime.imageModelProvider === runtime.modelProvider + ? runtime.token + : (() => { + // First try to match the specific provider + switch (runtime.imageModelProvider) { + case ModelProviderName.HEURIST: + return runtime.getSetting("HEURIST_API_KEY"); + case ModelProviderName.TOGETHER: + return runtime.getSetting("TOGETHER_API_KEY"); + case ModelProviderName.FAL: + return runtime.getSetting("FAL_API_KEY"); + case ModelProviderName.OPENAI: + return runtime.getSetting("OPENAI_API_KEY"); + case ModelProviderName.VENICE: + return runtime.getSetting("VENICE_API_KEY"); + case ModelProviderName.LIVEPEER: + return runtime.getSetting("LIVEPEER_GATEWAY_URL"); + default: + // If no specific match, try the fallback chain + return (runtime.getSetting("HEURIST_API_KEY") ?? + runtime.getSetting("TOGETHER_API_KEY") ?? + runtime.getSetting("FAL_API_KEY") ?? + runtime.getSetting("OPENAI_API_KEY") ?? + runtime.getSetting("VENICE_API_KEY"))?? + runtime.getSetting("LIVEPEER_GATEWAY_URL"); + } + })(); + try { + if (runtime.imageModelProvider === ModelProviderName.HEURIST) { + const response = await fetch( + "http://sequencer.heurist.xyz/submit_job", + { + method: "POST", + headers: { + Authorization: `Bearer ${apiKey}`, + "Content-Type": "application/json", + }, + body: JSON.stringify({ + job_id: data.jobId || crypto.randomUUID(), + model_input: { + SD: { + prompt: data.prompt, + neg_prompt: data.negativePrompt, + num_iterations: data.numIterations || 20, + width: data.width || 512, + height: data.height || 512, + guidance_scale: data.guidanceScale || 3, + seed: data.seed || -1, + }, + }, + model_id: data.modelId || "FLUX.1-dev", + deadline: 60, + priority: 1, + }), + } + ); + + if (!response.ok) { + throw new Error( + `Heurist image generation failed: ${response.statusText}` + ); + } + + const imageURL = await response.json(); + return { success: true, data: [imageURL] }; + } else if ( + runtime.imageModelProvider === ModelProviderName.TOGETHER || + // for backwards compat + runtime.imageModelProvider === ModelProviderName.LLAMACLOUD + ) { + const together = new Together({ apiKey: apiKey as string }); + const response = await together.images.create({ + model: "black-forest-labs/FLUX.1-schnell", + prompt: data.prompt, + width: data.width, + height: data.height, + steps: modelSettings?.steps ?? 4, + n: data.count, + }); + + // Add type assertion to handle the response properly + const togetherResponse = + response as unknown as TogetherAIImageResponse; + + if ( + !togetherResponse.data || + !Array.isArray(togetherResponse.data) + ) { + throw new Error("Invalid response format from Together AI"); + } + + // Rest of the code remains the same... + const base64s = await Promise.all( + togetherResponse.data.map(async (image) => { + if (!image.url) { + elizaLogger.error("Missing URL in image data:", image); + throw new Error("Missing URL in Together AI response"); + } + + // Fetch the image from the URL + const imageResponse = await fetch(image.url); + if (!imageResponse.ok) { + throw new Error( + `Failed to fetch image: ${imageResponse.statusText}` + ); + } + + // Convert to blob and then to base64 + const blob = await imageResponse.blob(); + const arrayBuffer = await blob.arrayBuffer(); + const base64 = Buffer.from(arrayBuffer).toString("base64"); + + // Return with proper MIME type + return `data:image/jpeg;base64,${base64}`; + }) + ); + + if (base64s.length === 0) { + throw new Error("No images generated by Together AI"); + } + + elizaLogger.debug(`Generated ${base64s.length} images`); + return { success: true, data: base64s }; + } else if (runtime.imageModelProvider === ModelProviderName.FAL) { + fal.config({ + credentials: apiKey as string, + }); + + // Prepare the input parameters according to their schema + const input = { + prompt: data.prompt, + image_size: "square" as const, + num_inference_steps: modelSettings?.steps ?? 50, + guidance_scale: data.guidanceScale || 3.5, + num_images: data.count, + enable_safety_checker: + runtime.getSetting("FAL_AI_ENABLE_SAFETY_CHECKER") === + "true", + safety_tolerance: Number( + runtime.getSetting("FAL_AI_SAFETY_TOLERANCE") || "2" + ), + output_format: "png" as const, + seed: data.seed ?? 6252023, + ...(runtime.getSetting("FAL_AI_LORA_PATH") + ? { + loras: [ + { + path: runtime.getSetting("FAL_AI_LORA_PATH"), + scale: 1, + }, + ], + } + : {}), + }; + + // Subscribe to the model + const result = await fal.subscribe(model, { + input, + logs: true, + onQueueUpdate: (update) => { + if (update.status === "IN_PROGRESS") { + elizaLogger.info(update.logs.map((log) => log.message)); + } + }, + }); + + // Convert the returned image URLs to base64 to match existing functionality + const base64Promises = result.data.images.map(async (image) => { + const response = await fetch(image.url); + const blob = await response.blob(); + const buffer = await blob.arrayBuffer(); + const base64 = Buffer.from(buffer).toString("base64"); + return `data:${image.content_type};base64,${base64}`; + }); + + const base64s = await Promise.all(base64Promises); + return { success: true, data: base64s }; + } else if (runtime.imageModelProvider === ModelProviderName.VENICE) { + const response = await fetch( + "https://api.venice.ai/api/v1/image/generate", + { + method: "POST", + headers: { + Authorization: `Bearer ${apiKey}`, + "Content-Type": "application/json", + }, + body: JSON.stringify({ + model: data.modelId || "fluently-xl", + prompt: data.prompt, + negative_prompt: data.negativePrompt, + width: data.width, + height: data.height, + steps: data.numIterations, + seed: data.seed, + style_preset: data.stylePreset, + hide_watermark: data.hideWatermark, + }), + } + ); + + const result = await response.json(); + + if (!result.images || !Array.isArray(result.images)) { + throw new Error("Invalid response format from Venice AI"); + } + + const base64s = result.images.map((base64String) => { + if (!base64String) { + throw new Error( + "Empty base64 string in Venice AI response" + ); + } + return `data:image/png;base64,${base64String}`; + }); + + return { success: true, data: base64s }; + + } else if (runtime.imageModelProvider === ModelProviderName.LIVEPEER) { + if (!apiKey) { + throw new Error("Livepeer Gateway is not defined"); + } + try { + const baseUrl = new URL(apiKey); + if (!baseUrl.protocol.startsWith('http')) { + throw new Error("Invalid Livepeer Gateway URL protocol"); + } + const response = await fetch(`${baseUrl.toString()}text-to-image`, { + method: "POST", + headers: { + "Content-Type": "application/json" + }, + body: JSON.stringify({ + model_id: data.modelId || "ByteDance/SDXL-Lightning", + prompt: data.prompt, + width: data.width || 1024, + height: data.height || 1024 + }) + }); + const result = await response.json(); + if (!result.images?.length) { + throw new Error("No images generated"); + } + const base64Images = await Promise.all( + result.images.map(async (image) => { + console.log("imageUrl console log", image.url); + let imageUrl; + if (image.url.includes("http")) { + imageUrl = image.url; + } else { + imageUrl = `${apiKey}${image.url}`; + } + const imageResponse = await fetch(imageUrl); + if (!imageResponse.ok) { + throw new Error( + `Failed to fetch image: ${imageResponse.statusText}` + ); + } + const blob = await imageResponse.blob(); + const arrayBuffer = await blob.arrayBuffer(); + const base64 = Buffer.from(arrayBuffer).toString("base64"); + return `data:image/jpeg;base64,${base64}`; + }) + ); + return { + success: true, + data: base64Images + }; + } catch (error) { + console.error(error); + return { success: false, error: error }; + } + + } else { + let targetSize = `${data.width}x${data.height}`; + if ( + targetSize !== "1024x1024" && + targetSize !== "1792x1024" && + targetSize !== "1024x1792" + ) { + targetSize = "1024x1024"; + } + const openaiApiKey = runtime.getSetting("OPENAI_API_KEY") as string; + if (!openaiApiKey) { + throw new Error("OPENAI_API_KEY is not set"); + } + const openai = new OpenAI({ + apiKey: openaiApiKey as string, + }); + const response = await openai.images.generate({ + model, + prompt: data.prompt, + size: targetSize as "1024x1024" | "1792x1024" | "1024x1792", + n: data.count, + response_format: "b64_json", + }); + const base64s = response.data.map( + (image) => `data:image/png;base64,${image.b64_json}` + ); + return { success: true, data: base64s }; + } + } catch (error) { + console.error(error); + return { success: false, error: error }; + } +}; + +export const generateCaption = async ( + data: { imageUrl: string }, + runtime: IAgentRuntime +): Promise<{ + title: string; + description: string; +}> => { + const { imageUrl } = data; + const imageDescriptionService = + runtime.getService( + ServiceType.IMAGE_DESCRIPTION + ); + + if (!imageDescriptionService) { + throw new Error("Image description service not found"); + } + + const resp = await imageDescriptionService.describeImage(imageUrl); + return { + title: resp.title.trim(), + description: resp.description.trim(), + }; +}; + +export const generateWebSearch = async ( + query: string, + runtime: IAgentRuntime +): Promise => { + const apiUrl = "https://api.tavily.com/search"; + const apiKey = runtime.getSetting("TAVILY_API_KEY"); + + try { + const response = await fetch(apiUrl, { + method: "POST", + headers: { + "Content-Type": "application/json", + }, + body: JSON.stringify({ + api_key: apiKey, + query, + include_answer: true, + max_results: 3, // 5 (default) + topic: "general", // "general"(default) "news" + search_depth: "basic", // "basic"(default) "advanced" + include_images: false, // false (default) true + }), + }); + + if (!response.ok) { + throw new elizaLogger.error( + `HTTP error! status: ${response.status}` + ); + } + + const data: SearchResponse = await response.json(); + return data; + } catch (error) { + elizaLogger.error("Error:", error); + } +}; +/** + * Configuration options for generating objects with a model. + */ +export interface GenerationOptions { + runtime: IAgentRuntime; + context: string; + modelClass: ModelClass; + schema?: ZodSchema; + schemaName?: string; + schemaDescription?: string; + stop?: string[]; + mode?: "auto" | "json" | "tool"; + experimental_providerMetadata?: Record; +} + +/** + * Base settings for model generation. + */ +interface ModelSettings { + prompt: string; + temperature: number; + maxTokens: number; + frequencyPenalty: number; + presencePenalty: number; + stop?: string[]; +} + +/** + * Generates structured objects from a prompt using specified AI models and configuration options. + * + * @param {GenerationOptions} options - Configuration options for generating objects. + * @returns {Promise} - A promise that resolves to an array of generated objects. + * @throws {Error} - Throws an error if the provider is unsupported or if generation fails. + */ +export const generateObject = async ({ + runtime, + context, + modelClass, + schema, + schemaName, + schemaDescription, + stop, + mode = "json", +}: GenerationOptions): Promise> => { + if (!context) { + const errorMessage = "generateObject context is empty"; + console.error(errorMessage); + throw new Error(errorMessage); + } + + const provider = runtime.modelProvider; + const model = models[provider].model[modelClass] as TiktokenModel; + if (!model) { + throw new Error(`Unsupported model class: ${modelClass}`); + } + const temperature = models[provider].settings.temperature; + const frequency_penalty = models[provider].settings.frequency_penalty; + const presence_penalty = models[provider].settings.presence_penalty; + const max_context_length = models[provider].settings.maxInputTokens; + const max_response_length = models[provider].settings.maxOutputTokens; + const apiKey = runtime.token; + + try { + context = trimTokens(context, max_context_length, model); + + const modelOptions: ModelSettings = { + prompt: context, + temperature, + maxTokens: max_response_length, + frequencyPenalty: frequency_penalty, + presencePenalty: presence_penalty, + stop: stop || models[provider].settings.stop, + }; + + const response = await handleProvider({ + provider, + model, + apiKey, + schema, + schemaName, + schemaDescription, + mode, + modelOptions, + runtime, + context, + modelClass, + }); + + return response; + } catch (error) { + console.error("Error in generateObject:", error); + throw error; + } +}; + +/** + * Interface for provider-specific generation options. + */ +interface ProviderOptions { + runtime: IAgentRuntime; + provider: ModelProviderName; + model: any; + apiKey: string; + schema?: ZodSchema; + schemaName?: string; + schemaDescription?: string; + mode?: "auto" | "json" | "tool"; + experimental_providerMetadata?: Record; + modelOptions: ModelSettings; + modelClass: string; + context: string; +} + +/** + * Handles AI generation based on the specified provider. + * + * @param {ProviderOptions} options - Configuration options specific to the provider. + * @returns {Promise} - A promise that resolves to an array of generated objects. + */ +export async function handleProvider( + options: ProviderOptions +): Promise> { + const { provider, runtime, context, modelClass } = options; + switch (provider) { + case ModelProviderName.OPENAI: + case ModelProviderName.ETERNALAI: + case ModelProviderName.ALI_BAILIAN: + case ModelProviderName.VOLENGINE: + case ModelProviderName.LLAMACLOUD: + case ModelProviderName.TOGETHER: + case ModelProviderName.NANOGPT: + case ModelProviderName.AKASH_CHAT_API: + return await handleOpenAI(options); + case ModelProviderName.ANTHROPIC: + case ModelProviderName.CLAUDE_VERTEX: + return await handleAnthropic(options); + case ModelProviderName.GROK: + return await handleGrok(options); + case ModelProviderName.GROQ: + return await handleGroq(options); + case ModelProviderName.LLAMALOCAL: + return await generateObjectDeprecated({ + runtime, + context, + modelClass, + }); + case ModelProviderName.GOOGLE: + return await handleGoogle(options); + case ModelProviderName.REDPILL: + return await handleRedPill(options); + case ModelProviderName.OPENROUTER: + return await handleOpenRouter(options); + case ModelProviderName.OLLAMA: + return await handleOllama(options); + default: { + const errorMessage = `Unsupported provider: ${provider}`; + elizaLogger.error(errorMessage); + throw new Error(errorMessage); + } + } +} +/** + * Handles object generation for OpenAI. + * + * @param {ProviderOptions} options - Options specific to OpenAI. + * @returns {Promise>} - A promise that resolves to generated objects. + */ +async function handleOpenAI({ + model, + apiKey, + schema, + schemaName, + schemaDescription, + mode, + modelOptions, +}: ProviderOptions): Promise> { + const baseURL = models.openai.endpoint || undefined; + const openai = createOpenAI({ apiKey, baseURL }); + return await aiGenerateObject({ + model: openai.languageModel(model), + schema, + schemaName, + schemaDescription, + mode, + ...modelOptions, + }); +} + +/** + * Handles object generation for Anthropic models. + * + * @param {ProviderOptions} options - Options specific to Anthropic. + * @returns {Promise>} - A promise that resolves to generated objects. + */ +async function handleAnthropic({ + model, + apiKey, + schema, + schemaName, + schemaDescription, + mode, + modelOptions, +}: ProviderOptions): Promise> { + const anthropic = createAnthropic({ apiKey }); + return await aiGenerateObject({ + model: anthropic.languageModel(model), + schema, + schemaName, + schemaDescription, + mode, + ...modelOptions, + }); +} + +/** + * Handles object generation for Grok models. + * + * @param {ProviderOptions} options - Options specific to Grok. + * @returns {Promise>} - A promise that resolves to generated objects. + */ +async function handleGrok({ + model, + apiKey, + schema, + schemaName, + schemaDescription, + mode, + modelOptions, +}: ProviderOptions): Promise> { + const grok = createOpenAI({ apiKey, baseURL: models.grok.endpoint }); + return await aiGenerateObject({ + model: grok.languageModel(model, { parallelToolCalls: false }), + schema, + schemaName, + schemaDescription, + mode, + ...modelOptions, + }); +} + +/** + * Handles object generation for Groq models. + * + * @param {ProviderOptions} options - Options specific to Groq. + * @returns {Promise>} - A promise that resolves to generated objects. + */ +async function handleGroq({ + model, + apiKey, + schema, + schemaName, + schemaDescription, + mode, + modelOptions, +}: ProviderOptions): Promise> { + const groq = createGroq({ apiKey }); + return await aiGenerateObject({ + model: groq.languageModel(model), + schema, + schemaName, + schemaDescription, + mode, + ...modelOptions, + }); +} + +/** + * Handles object generation for Google models. + * + * @param {ProviderOptions} options - Options specific to Google. + * @returns {Promise>} - A promise that resolves to generated objects. + */ +async function handleGoogle({ + model, + apiKey: _apiKey, + schema, + schemaName, + schemaDescription, + mode, + modelOptions, +}: ProviderOptions): Promise> { + const google = createGoogleGenerativeAI(); + return await aiGenerateObject({ + model: google(model), + schema, + schemaName, + schemaDescription, + mode, + ...modelOptions, + }); +} + +/** + * Handles object generation for Redpill models. + * + * @param {ProviderOptions} options - Options specific to Redpill. + * @returns {Promise>} - A promise that resolves to generated objects. + */ +async function handleRedPill({ + model, + apiKey, + schema, + schemaName, + schemaDescription, + mode, + modelOptions, +}: ProviderOptions): Promise> { + const redPill = createOpenAI({ apiKey, baseURL: models.redpill.endpoint }); + return await aiGenerateObject({ + model: redPill.languageModel(model), + schema, + schemaName, + schemaDescription, + mode, + ...modelOptions, + }); +} + +/** + * Handles object generation for OpenRouter models. + * + * @param {ProviderOptions} options - Options specific to OpenRouter. + * @returns {Promise>} - A promise that resolves to generated objects. + */ +async function handleOpenRouter({ + model, + apiKey, + schema, + schemaName, + schemaDescription, + mode, + modelOptions, +}: ProviderOptions): Promise> { + const openRouter = createOpenAI({ + apiKey, + baseURL: models.openrouter.endpoint, + }); + return await aiGenerateObject({ + model: openRouter.languageModel(model), + schema, + schemaName, + schemaDescription, + mode, + ...modelOptions, + }); +} + +/** + * Handles object generation for Ollama models. + * + * @param {ProviderOptions} options - Options specific to Ollama. + * @returns {Promise>} - A promise that resolves to generated objects. + */ +async function handleOllama({ + model, + schema, + schemaName, + schemaDescription, + mode, + modelOptions, + provider, +}: ProviderOptions): Promise> { + const ollamaProvider = createOllama({ + baseURL: models[provider].endpoint + "/api", + }); + const ollama = ollamaProvider(model); + return await aiGenerateObject({ + model: ollama, + schema, + schemaName, + schemaDescription, + mode, + ...modelOptions, + }); +} + +// Add type definition for Together AI response +interface TogetherAIImageResponse { + data: Array<{ + url: string; + content_type?: string; + image_type?: string; + }>; +} + +export async function generateTweetActions({ + runtime, + context, + modelClass, +}: { + runtime: IAgentRuntime; + context: string; + modelClass: string; +}): Promise { + let retryDelay = 1000; + while (true) { + try { + const response = await generateText({ + runtime, + context, + modelClass, + }); + console.debug( + "Received response from generateText for tweet actions:", + response + ); + const { actions } = parseActionResponseFromText(response.trim()); + if (actions) { + console.debug("Parsed tweet actions:", actions); + return actions; + } else { + elizaLogger.debug("generateTweetActions no valid response"); + } + } catch (error) { + elizaLogger.error("Error in generateTweetActions:", error); + if ( + error instanceof TypeError && + error.message.includes("queueTextCompletion") + ) { + elizaLogger.error( + "TypeError: Cannot read properties of null (reading 'queueTextCompletion')" + ); + } + } + elizaLogger.log(`Retrying in ${retryDelay}ms...`); + await new Promise((resolve) => setTimeout(resolve, retryDelay)); + retryDelay *= 2; + } +} diff --git a/packages/usdk/packages/upstreet-agent/packages/elizaos-core-proxy/memory.ts b/packages/usdk/packages/upstreet-agent/packages/elizaos-core-proxy/memory.ts new file mode 100644 index 000000000..00baad449 --- /dev/null +++ b/packages/usdk/packages/upstreet-agent/packages/elizaos-core-proxy/memory.ts @@ -0,0 +1,243 @@ +// import { embed, getEmbeddingZeroVector } from "./embedding.ts"; +import elizaLogger from "./logger.ts"; +import { + IAgentRuntime, + IMemoryManager, + type Memory, + type UUID, +} from "./types.ts"; + +const defaultMatchThreshold = 0.1; +const defaultMatchCount = 10; + +/** + * Manage memories in the database. + */ +export class MemoryManager implements IMemoryManager { + /** + * The AgentRuntime instance associated with this manager. + */ + runtime: IAgentRuntime; + + /** + * The name of the database table this manager operates on. + */ + tableName: string; + + /** + * Constructs a new MemoryManager instance. + * @param opts Options for the manager. + * @param opts.tableName The name of the table this manager will operate on. + * @param opts.runtime The AgentRuntime instance associated with this manager. + */ + constructor(opts: { tableName: string; runtime: IAgentRuntime }) { + this.runtime = opts.runtime; + this.tableName = opts.tableName; + } + + /** + * Adds an embedding vector to a memory object. If the memory already has an embedding, it is returned as is. + * @param memory The memory object to add an embedding to. + * @returns A Promise resolving to the memory object, potentially updated with an embedding vector. + */ + /** + * Adds an embedding vector to a memory object if one doesn't already exist. + * The embedding is generated from the memory's text content using the runtime's + * embedding model. If the memory has no text content, an error is thrown. + * + * @param memory The memory object to add an embedding to + * @returns The memory object with an embedding vector added + * @throws Error if the memory content is empty + */ + async addEmbeddingToMemory(memory: Memory): Promise { + // Return early if embedding already exists + if (memory.embedding) { + return memory; + } + + const memoryText = memory.content.text; + + // Validate memory has text content + if (!memoryText) { + throw new Error( + "Cannot generate embedding: Memory content is empty" + ); + } + + try { + // Generate embedding from text content + memory.embedding = await embed(this.runtime, memoryText); + } catch (error) { + elizaLogger.error("Failed to generate embedding:", error); + // Fallback to zero vector if embedding fails + memory.embedding = getEmbeddingZeroVector().slice(); + } + + return memory; + } + + /** + * Retrieves a list of memories by user IDs, with optional deduplication. + * @param opts Options including user IDs, count, and uniqueness. + * @param opts.roomId The room ID to retrieve memories for. + * @param opts.count The number of memories to retrieve. + * @param opts.unique Whether to retrieve unique memories only. + * @returns A Promise resolving to an array of Memory objects. + */ + async getMemories({ + roomId, + count = 10, + unique = true, + start, + end, + }: { + roomId: UUID; + count?: number; + unique?: boolean; + start?: number; + end?: number; + }): Promise { + return await this.runtime.databaseAdapter.getMemories({ + roomId, + count, + unique, + tableName: this.tableName, + agentId: this.runtime.agentId, + start, + end, + }); + } + + async getCachedEmbeddings(content: string): Promise< + { + embedding: number[]; + levenshtein_score: number; + }[] + > { + return await this.runtime.databaseAdapter.getCachedEmbeddings({ + query_table_name: this.tableName, + query_threshold: 2, + query_input: content, + query_field_name: "content", + query_field_sub_name: "text", + query_match_count: 10, + }); + } + + /** + * Searches for memories similar to a given embedding vector. + * @param embedding The embedding vector to search with. + * @param opts Options including match threshold, count, user IDs, and uniqueness. + * @param opts.match_threshold The similarity threshold for matching memories. + * @param opts.count The maximum number of memories to retrieve. + * @param opts.roomId The room ID to retrieve memories for. + * @param opts.unique Whether to retrieve unique memories only. + * @returns A Promise resolving to an array of Memory objects that match the embedding. + */ + async searchMemoriesByEmbedding( + embedding: number[], + opts: { + match_threshold?: number; + count?: number; + roomId: UUID; + unique?: boolean; + } + ): Promise { + const { + match_threshold = defaultMatchThreshold, + count = defaultMatchCount, + roomId, + unique, + } = opts; + + const result = await this.runtime.databaseAdapter.searchMemories({ + tableName: this.tableName, + roomId, + agentId: this.runtime.agentId, + embedding: embedding, + match_threshold: match_threshold, + match_count: count, + unique: !!unique, + }); + + return result; + } + + /** + * Creates a new memory in the database, with an option to check for similarity before insertion. + * @param memory The memory object to create. + * @param unique Whether to check for similarity before insertion. + * @returns A Promise that resolves when the operation completes. + */ + async createMemory(memory: Memory, unique = false): Promise { + // TODO: check memory.agentId == this.runtime.agentId + + const existingMessage = + await this.runtime.databaseAdapter.getMemoryById(memory.id); + + if (existingMessage) { + elizaLogger.debug("Memory already exists, skipping"); + return; + } + + elizaLogger.log("Creating Memory", memory.id, memory.content.text); + + await this.runtime.databaseAdapter.createMemory( + memory, + this.tableName, + unique + ); + } + + async getMemoriesByRoomIds(params: { roomIds: UUID[] }): Promise { + return await this.runtime.databaseAdapter.getMemoriesByRoomIds({ + tableName: this.tableName, + agentId: this.runtime.agentId, + roomIds: params.roomIds, + }); + } + + async getMemoryById(id: UUID): Promise { + const result = await this.runtime.databaseAdapter.getMemoryById(id); + if (result && result.agentId !== this.runtime.agentId) return null; + return result; + } + + /** + * Removes a memory from the database by its ID. + * @param memoryId The ID of the memory to remove. + * @returns A Promise that resolves when the operation completes. + */ + async removeMemory(memoryId: UUID): Promise { + await this.runtime.databaseAdapter.removeMemory( + memoryId, + this.tableName + ); + } + + /** + * Removes all memories associated with a set of user IDs. + * @param roomId The room ID to remove memories for. + * @returns A Promise that resolves when the operation completes. + */ + async removeAllMemories(roomId: UUID): Promise { + await this.runtime.databaseAdapter.removeAllMemories( + roomId, + this.tableName + ); + } + + /** + * Counts the number of memories associated with a set of user IDs, with an option for uniqueness. + * @param roomId The room ID to count memories for. + * @param unique Whether to count unique memories only. + * @returns A Promise resolving to the count of memories. + */ + async countMemories(roomId: UUID, unique = true): Promise { + return await this.runtime.databaseAdapter.countMemories( + roomId, + unique, + this.tableName + ); + } +} diff --git a/packages/usdk/packages/upstreet-agent/packages/elizaos-core-proxy/parsing.ts b/packages/usdk/packages/upstreet-agent/packages/elizaos-core-proxy/parsing.ts new file mode 100644 index 000000000..107ce8ea0 --- /dev/null +++ b/packages/usdk/packages/upstreet-agent/packages/elizaos-core-proxy/parsing.ts @@ -0,0 +1,207 @@ +import { ActionResponse } from "./types.ts"; +const jsonBlockPattern = /```json\n([\s\S]*?)\n```/; + +export const messageCompletionFooter = `\nResponse format should be formatted in a JSON block like this: +\`\`\`json +{ "user": "{{agentName}}", "text": "string", "action": "string" } +\`\`\``; + +export const shouldRespondFooter = `The available options are [RESPOND], [IGNORE], or [STOP]. Choose the most appropriate option. +If {{agentName}} is talking too much, you can choose [IGNORE] + +Your response must include one of the options.`; + +export const parseShouldRespondFromText = ( + text: string +): "RESPOND" | "IGNORE" | "STOP" | null => { + const match = text + .split("\n")[0] + .trim() + .replace("[", "") + .toUpperCase() + .replace("]", "") + .match(/^(RESPOND|IGNORE|STOP)$/i); + return match + ? (match[0].toUpperCase() as "RESPOND" | "IGNORE" | "STOP") + : text.includes("RESPOND") + ? "RESPOND" + : text.includes("IGNORE") + ? "IGNORE" + : text.includes("STOP") + ? "STOP" + : null; +}; + +export const booleanFooter = `Respond with only a YES or a NO.`; + +/** + * Parses a string to determine its boolean equivalent. + * + * Recognized affirmative values: "YES", "Y", "TRUE", "T", "1", "ON", "ENABLE". + * Recognized negative values: "NO", "N", "FALSE", "F", "0", "OFF", "DISABLE". + * + * @param {string} text - The input text to parse. + * @returns {boolean|null} - Returns `true` for affirmative inputs, `false` for negative inputs, and `null` for unrecognized inputs or null/undefined. + */ +export const parseBooleanFromText = (text: string) => { + if (!text) return null; // Handle null or undefined input + + const affirmative = ["YES", "Y", "TRUE", "T", "1", "ON", "ENABLE"]; + const negative = ["NO", "N", "FALSE", "F", "0", "OFF", "DISABLE"]; + + const normalizedText = text.trim().toUpperCase(); + + if (affirmative.includes(normalizedText)) { + return true; + } else if (negative.includes(normalizedText)) { + return false; + } + + return null; // Return null for unrecognized inputs +}; + +export const stringArrayFooter = `Respond with a JSON array containing the values in a JSON block formatted for markdown with this structure: +\`\`\`json +[ + 'value', + 'value' +] +\`\`\` + +Your response must include the JSON block.`; + +/** + * Parses a JSON array from a given text. The function looks for a JSON block wrapped in triple backticks + * with `json` language identifier, and if not found, it searches for an array pattern within the text. + * It then attempts to parse the JSON string into a JavaScript object. If parsing is successful and the result + * is an array, it returns the array; otherwise, it returns null. + * + * @param text - The input text from which to extract and parse the JSON array. + * @returns An array parsed from the JSON string if successful; otherwise, null. + */ +export function parseJsonArrayFromText(text: string) { + let jsonData = null; + + // First try to parse with the original JSON format + const jsonBlockMatch = text.match(jsonBlockPattern); + + if (jsonBlockMatch) { + try { + // Replace single quotes with double quotes before parsing + const normalizedJson = jsonBlockMatch[1].replace(/'/g, '"'); + jsonData = JSON.parse(normalizedJson); + } catch (e) { + console.error("Error parsing JSON:", e); + } + } + + // If that fails, try to find an array pattern + if (!jsonData) { + const arrayPattern = /\[\s*['"][^'"]*['"]\s*\]/; + const arrayMatch = text.match(arrayPattern); + + if (arrayMatch) { + try { + // Replace single quotes with double quotes before parsing + const normalizedJson = arrayMatch[0].replace(/'/g, '"'); + jsonData = JSON.parse(normalizedJson); + } catch (e) { + console.error("Error parsing JSON:", e); + } + } + } + + if (Array.isArray(jsonData)) { + return jsonData; + } + + return null; +} + +/** + * Parses a JSON object from a given text. The function looks for a JSON block wrapped in triple backticks + * with `json` language identifier, and if not found, it searches for an object pattern within the text. + * It then attempts to parse the JSON string into a JavaScript object. If parsing is successful and the result + * is an object (but not an array), it returns the object; otherwise, it tries to parse an array if the result + * is an array, or returns null if parsing is unsuccessful or the result is neither an object nor an array. + * + * @param text - The input text from which to extract and parse the JSON object. + * @returns An object parsed from the JSON string if successful; otherwise, null or the result of parsing an array. + */ +export function parseJSONObjectFromText( + text: string +): Record | null { + let jsonData = null; + + const jsonBlockMatch = text.match(jsonBlockPattern); + + if (jsonBlockMatch) { + try { + jsonData = JSON.parse(jsonBlockMatch[1]); + } catch (e) { + console.error("Error parsing JSON:", e); + return null; + } + } else { + const objectPattern = /{[\s\S]*?}/; + const objectMatch = text.match(objectPattern); + + if (objectMatch) { + try { + jsonData = JSON.parse(objectMatch[0]); + } catch (e) { + console.error("Error parsing JSON:", e); + return null; + } + } + } + + if ( + typeof jsonData === "object" && + jsonData !== null && + !Array.isArray(jsonData) + ) { + return jsonData; + } else if (typeof jsonData === "object" && Array.isArray(jsonData)) { + return parseJsonArrayFromText(text); + } else { + return null; + } +} + +export const postActionResponseFooter = `Choose any combination of [LIKE], [RETWEET], [QUOTE], and [REPLY] that are appropriate. Each action must be on its own line. Your response must only include the chosen actions.`; + +export const parseActionResponseFromText = ( + text: string +): { actions: ActionResponse } => { + const actions: ActionResponse = { + like: false, + retweet: false, + quote: false, + reply: false, + }; + + // Regex patterns + const likePattern = /\[LIKE\]/i; + const retweetPattern = /\[RETWEET\]/i; + const quotePattern = /\[QUOTE\]/i; + const replyPattern = /\[REPLY\]/i; + + // Check with regex + actions.like = likePattern.test(text); + actions.retweet = retweetPattern.test(text); + actions.quote = quotePattern.test(text); + actions.reply = replyPattern.test(text); + + // Also do line by line parsing as backup + const lines = text.split("\n"); + for (const line of lines) { + const trimmed = line.trim(); + if (trimmed === "[LIKE]") actions.like = true; + if (trimmed === "[RETWEET]") actions.retweet = true; + if (trimmed === "[QUOTE]") actions.quote = true; + if (trimmed === "[REPLY]") actions.reply = true; + } + + return { actions }; +}; diff --git a/packages/usdk/packages/upstreet-agent/packages/elizaos-core-proxy/types.ts b/packages/usdk/packages/upstreet-agent/packages/elizaos-core-proxy/types.ts new file mode 100644 index 000000000..dfc19c2eb --- /dev/null +++ b/packages/usdk/packages/upstreet-agent/packages/elizaos-core-proxy/types.ts @@ -0,0 +1,1280 @@ +import { Readable } from "stream"; + +/** + * Represents a UUID string in the format "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + */ +export type UUID = `${string}-${string}-${string}-${string}-${string}`; + +/** + * Represents the content of a message or communication + */ +export interface Content { + /** The main text content */ + text: string; + + /** Optional action associated with the message */ + action?: string; + + /** Optional source/origin of the content */ + source?: string; + + /** URL of the original message/post (e.g. tweet URL, Discord message link) */ + url?: string; + + /** UUID of parent message if this is a reply/thread */ + inReplyTo?: UUID; + + /** Array of media attachments */ + attachments?: Media[]; + + /** Additional dynamic properties */ + [key: string]: unknown; +} + +/** + * Example content with associated user for demonstration purposes + */ +export interface ActionExample { + /** User associated with the example */ + user: string; + + /** Content of the example */ + content: Content; +} + +/** + * Example conversation content with user ID + */ +export interface ConversationExample { + /** UUID of user in conversation */ + userId: UUID; + + /** Content of the conversation */ + content: Content; +} + +/** + * Represents an actor/participant in a conversation + */ +export interface Actor { + /** Display name */ + name: string; + + /** Username/handle */ + username: string; + + /** Additional profile details */ + details: { + /** Short profile tagline */ + tagline: string; + + /** Longer profile summary */ + summary: string; + + /** Favorite quote */ + quote: string; + }; + + /** Unique identifier */ + id: UUID; +} + +/** + * Represents a single objective within a goal + */ +export interface Objective { + /** Optional unique identifier */ + id?: string; + + /** Description of what needs to be achieved */ + description: string; + + /** Whether objective is completed */ + completed: boolean; +} + +/** + * Status enum for goals + */ +export enum GoalStatus { + DONE = "DONE", + FAILED = "FAILED", + IN_PROGRESS = "IN_PROGRESS", +} + +/** + * Represents a high-level goal composed of objectives + */ +export interface Goal { + /** Optional unique identifier */ + id?: UUID; + + /** Room ID where goal exists */ + roomId: UUID; + + /** User ID of goal owner */ + userId: UUID; + + /** Name/title of the goal */ + name: string; + + /** Current status */ + status: GoalStatus; + + /** Component objectives */ + objectives: Objective[]; +} + +/** + * Model size/type classification + */ +export enum ModelClass { + SMALL = "small", + MEDIUM = "medium", + LARGE = "large", + EMBEDDING = "embedding", + IMAGE = "image", +} + +/** + * Configuration for an AI model + */ +export type Model = { + /** Optional API endpoint */ + endpoint?: string; + + /** Model settings */ + settings: { + /** Maximum input tokens */ + maxInputTokens: number; + + /** Maximum output tokens */ + maxOutputTokens: number; + + /** Optional frequency penalty */ + frequency_penalty?: number; + + /** Optional presence penalty */ + presence_penalty?: number; + + /** Optional repetition penalty */ + repetition_penalty?: number; + + /** Stop sequences */ + stop: string[]; + + /** Temperature setting */ + temperature: number; + }; + + /** Optional image generation settings */ + imageSettings?: { + steps?: number; + }; + + /** Model names by size class */ + model: { + [ModelClass.SMALL]: string; + [ModelClass.MEDIUM]: string; + [ModelClass.LARGE]: string; + [ModelClass.EMBEDDING]?: string; + [ModelClass.IMAGE]?: string; + }; +}; + +/** + * Model configurations by provider + */ +export type Models = { + [ModelProviderName.OPENAI]: Model; + [ModelProviderName.ETERNALAI]: Model; + [ModelProviderName.ANTHROPIC]: Model; + [ModelProviderName.GROK]: Model; + [ModelProviderName.GROQ]: Model; + [ModelProviderName.LLAMACLOUD]: Model; + [ModelProviderName.TOGETHER]: Model; + [ModelProviderName.LLAMALOCAL]: Model; + [ModelProviderName.GOOGLE]: Model; + [ModelProviderName.CLAUDE_VERTEX]: Model; + [ModelProviderName.REDPILL]: Model; + [ModelProviderName.OPENROUTER]: Model; + [ModelProviderName.OLLAMA]: Model; + [ModelProviderName.HEURIST]: Model; + [ModelProviderName.GALADRIEL]: Model; + [ModelProviderName.FAL]: Model; + [ModelProviderName.GAIANET]: Model; + [ModelProviderName.ALI_BAILIAN]: Model; + [ModelProviderName.VOLENGINE]: Model; + [ModelProviderName.NANOGPT]: Model; + [ModelProviderName.HYPERBOLIC]: Model; + [ModelProviderName.VENICE]: Model; + [ModelProviderName.AKASH_CHAT_API]: Model; + [ModelProviderName.LIVEPEER]: Model; +}; + +/** + * Available model providers + */ +export enum ModelProviderName { + OPENAI = "openai", + ETERNALAI = "eternalai", + ANTHROPIC = "anthropic", + GROK = "grok", + GROQ = "groq", + LLAMACLOUD = "llama_cloud", + TOGETHER = "together", + LLAMALOCAL = "llama_local", + GOOGLE = "google", + CLAUDE_VERTEX = "claude_vertex", + REDPILL = "redpill", + OPENROUTER = "openrouter", + OLLAMA = "ollama", + HEURIST = "heurist", + GALADRIEL = "galadriel", + FAL = "falai", + GAIANET = "gaianet", + ALI_BAILIAN = "ali_bailian", + VOLENGINE = "volengine", + NANOGPT = "nanogpt", + HYPERBOLIC = "hyperbolic", + VENICE = "venice", + AKASH_CHAT_API = "akash_chat_api", + LIVEPEER = "livepeer", +} + +/** + * Represents the current state/context of a conversation + */ +export interface State { + /** ID of user who sent current message */ + userId?: UUID; + + /** ID of agent in conversation */ + agentId?: UUID; + + /** Agent's biography */ + bio: string; + + /** Agent's background lore */ + lore: string; + + /** Message handling directions */ + messageDirections: string; + + /** Post handling directions */ + postDirections: string; + + /** Current room/conversation ID */ + roomId: UUID; + + /** Optional agent name */ + agentName?: string; + + /** Optional message sender name */ + senderName?: string; + + /** String representation of conversation actors */ + actors: string; + + /** Optional array of actor objects */ + actorsData?: Actor[]; + + /** Optional string representation of goals */ + goals?: string; + + /** Optional array of goal objects */ + goalsData?: Goal[]; + + /** Recent message history as string */ + recentMessages: string; + + /** Recent message objects */ + recentMessagesData: Memory[]; + + /** Optional valid action names */ + actionNames?: string; + + /** Optional action descriptions */ + actions?: string; + + /** Optional action objects */ + actionsData?: Action[]; + + /** Optional action examples */ + actionExamples?: string; + + /** Optional provider descriptions */ + providers?: string; + + /** Optional response content */ + responseData?: Content; + + /** Optional recent interaction objects */ + recentInteractionsData?: Memory[]; + + /** Optional recent interactions string */ + recentInteractions?: string; + + /** Optional formatted conversation */ + formattedConversation?: string; + + /** Optional formatted knowledge */ + knowledge?: string; + /** Optional knowledge data */ + knowledgeData?: KnowledgeItem[]; + + /** Additional dynamic properties */ + [key: string]: unknown; +} + +/** + * Represents a stored memory/message + */ +export interface Memory { + /** Optional unique identifier */ + id?: UUID; + + /** Associated user ID */ + userId: UUID; + + /** Associated agent ID */ + agentId: UUID; + + /** Optional creation timestamp */ + createdAt?: number; + + /** Memory content */ + content: Content; + + /** Optional embedding vector */ + embedding?: number[]; + + /** Associated room ID */ + roomId: UUID; + + /** Whether memory is unique */ + unique?: boolean; + + /** Embedding similarity score */ + similarity?: number; +} + +/** + * Example message for demonstration + */ +export interface MessageExample { + /** Associated user */ + user: string; + + /** Message content */ + content: Content; +} + +/** + * Handler function type for processing messages + */ +export type Handler = ( + runtime: IAgentRuntime, + message: Memory, + state?: State, + options?: { [key: string]: unknown }, + callback?: HandlerCallback +) => Promise; + +/** + * Callback function type for handlers + */ +export type HandlerCallback = ( + response: Content, + files?: any +) => Promise; + +/** + * Validator function type for actions/evaluators + */ +export type Validator = ( + runtime: IAgentRuntime, + message: Memory, + state?: State +) => Promise; + +/** + * Represents an action the agent can perform + */ +export interface Action { + /** Similar action descriptions */ + similes: string[]; + + /** Detailed description */ + description: string; + + /** Example usages */ + examples: ActionExample[][]; + + /** Handler function */ + handler: Handler; + + /** Action name */ + name: string; + + /** Validation function */ + validate: Validator; + + /** Whether to suppress the initial message when this action is used */ + suppressInitialMessage?: boolean; +} + +/** + * Example for evaluating agent behavior + */ +export interface EvaluationExample { + /** Evaluation context */ + context: string; + + /** Example messages */ + messages: Array; + + /** Expected outcome */ + outcome: string; +} + +/** + * Evaluator for assessing agent responses + */ +export interface Evaluator { + /** Whether to always run */ + alwaysRun?: boolean; + + /** Detailed description */ + description: string; + + /** Similar evaluator descriptions */ + similes: string[]; + + /** Example evaluations */ + examples: EvaluationExample[]; + + /** Handler function */ + handler: Handler; + + /** Evaluator name */ + name: string; + + /** Validation function */ + validate: Validator; +} + +/** + * Provider for external data/services + */ +export interface Provider { + /** Data retrieval function */ + get: ( + runtime: IAgentRuntime, + message: Memory, + state?: State + ) => Promise; +} + +/** + * Represents a relationship between users + */ +export interface Relationship { + /** Unique identifier */ + id: UUID; + + /** First user ID */ + userA: UUID; + + /** Second user ID */ + userB: UUID; + + /** Primary user ID */ + userId: UUID; + + /** Associated room ID */ + roomId: UUID; + + /** Relationship status */ + status: string; + + /** Optional creation timestamp */ + createdAt?: string; +} + +/** + * Represents a user account + */ +export interface Account { + /** Unique identifier */ + id: UUID; + + /** Display name */ + name: string; + + /** Username */ + username: string; + + /** Optional additional details */ + details?: { [key: string]: any }; + + /** Optional email */ + email?: string; + + /** Optional avatar URL */ + avatarUrl?: string; +} + +/** + * Room participant with account details + */ +export interface Participant { + /** Unique identifier */ + id: UUID; + + /** Associated account */ + account: Account; +} + +/** + * Represents a conversation room + */ +export interface Room { + /** Unique identifier */ + id: UUID; + + /** Room participants */ + participants: Participant[]; +} + +/** + * Represents a media attachment + */ +export type Media = { + /** Unique identifier */ + id: string; + + /** Media URL */ + url: string; + + /** Media title */ + title: string; + + /** Media source */ + source: string; + + /** Media description */ + description: string; + + /** Text content */ + text: string; + + /** Content type */ + contentType?: string; +}; + +/** + * Client interface for platform connections + */ +export type Client = { + /** Start client connection */ + start: (runtime: IAgentRuntime) => Promise; + + /** Stop client connection */ + stop: (runtime: IAgentRuntime) => Promise; +}; + +/** + * Plugin for extending agent functionality + */ +export type Plugin = { + /** Plugin name */ + name: string; + + /** Plugin description */ + description: string; + + /** Optional actions */ + actions?: Action[]; + + /** Optional providers */ + providers?: Provider[]; + + /** Optional evaluators */ + evaluators?: Evaluator[]; + + /** Optional services */ + services?: Service[]; + + /** Optional clients */ + clients?: Client[]; +}; + +/** + * Available client platforms + */ +export enum Clients { + DISCORD = "discord", + DIRECT = "direct", + TWITTER = "twitter", + TELEGRAM = "telegram", + FARCASTER = "farcaster", + LENS = "lens", + AUTO = "auto", + SLACK = "slack", +} + +export interface IAgentConfig { + [key: string]: string; +} + +export interface ModelConfiguration { + temperature?: number; + max_response_length?: number; + frequency_penalty?: number; + presence_penalty?: number; + maxInputTokens?: number; +} + +/** + * Configuration for an agent character + */ +export type Character = { + /** Optional unique identifier */ + id?: UUID; + + /** Character name */ + name: string; + + /** Optional username */ + username?: string; + + /** Optional system prompt */ + system?: string; + + /** Model provider to use */ + modelProvider: ModelProviderName; + + /** Image model provider to use, if different from modelProvider */ + imageModelProvider?: ModelProviderName; + + /** Optional model endpoint override */ + modelEndpointOverride?: string; + + /** Optional prompt templates */ + templates?: { + goalsTemplate?: string; + factsTemplate?: string; + messageHandlerTemplate?: string; + shouldRespondTemplate?: string; + continueMessageHandlerTemplate?: string; + evaluationTemplate?: string; + twitterSearchTemplate?: string; + twitterActionTemplate?: string; + twitterPostTemplate?: string; + twitterMessageHandlerTemplate?: string; + twitterShouldRespondTemplate?: string; + farcasterPostTemplate?: string; + lensPostTemplate?: string; + farcasterMessageHandlerTemplate?: string; + lensMessageHandlerTemplate?: string; + farcasterShouldRespondTemplate?: string; + lensShouldRespondTemplate?: string; + telegramMessageHandlerTemplate?: string; + telegramShouldRespondTemplate?: string; + discordVoiceHandlerTemplate?: string; + discordShouldRespondTemplate?: string; + discordMessageHandlerTemplate?: string; + slackMessageHandlerTemplate?: string; + slackShouldRespondTemplate?: string; + }; + + /** Character biography */ + bio: string | string[]; + + /** Character background lore */ + lore: string[]; + + /** Example messages */ + messageExamples: MessageExample[][]; + + /** Example posts */ + postExamples: string[]; + + /** Known topics */ + topics: string[]; + + /** Character traits */ + adjectives: string[]; + + /** Optional knowledge base */ + knowledge?: string[]; + + /** Supported client platforms */ + clients: Clients[]; + + /** Available plugins */ + plugins: Plugin[]; + + /** Optional configuration */ + settings?: { + secrets?: { [key: string]: string }; + intiface?: boolean; + imageSettings?: { + steps?: number; + width?: number; + height?: number; + negativePrompt?: string; + numIterations?: number; + guidanceScale?: number; + seed?: number; + modelId?: string; + jobId?: string; + count?: number; + stylePreset?: string; + hideWatermark?: boolean; + }; + voice?: { + model?: string; // For VITS + url?: string; // Legacy VITS support + elevenlabs?: { + // New structured ElevenLabs config + voiceId: string; + model?: string; + stability?: string; + similarityBoost?: string; + style?: string; + useSpeakerBoost?: string; + }; + }; + model?: string; + modelConfig?: ModelConfiguration; + embeddingModel?: string; + chains?: { + evm?: any[]; + solana?: any[]; + [key: string]: any[]; + }; + }; + + /** Optional client-specific config */ + clientConfig?: { + discord?: { + shouldIgnoreBotMessages?: boolean; + shouldIgnoreDirectMessages?: boolean; + shouldRespondOnlyToMentions?: boolean; + messageSimilarityThreshold?: number; + isPartOfTeam?: boolean; + teamAgentIds?: string[]; + teamLeaderId?: string; + teamMemberInterestKeywords?: string[]; + }; + telegram?: { + shouldIgnoreBotMessages?: boolean; + shouldIgnoreDirectMessages?: boolean; + shouldRespondOnlyToMentions?: boolean; + shouldOnlyJoinInAllowedGroups?: boolean; + allowedGroupIds?: string[]; + messageSimilarityThreshold?: number; + isPartOfTeam?: boolean; + teamAgentIds?: string[]; + teamLeaderId?: string; + teamMemberInterestKeywords?: string[]; + }; + slack?: { + shouldIgnoreBotMessages?: boolean; + shouldIgnoreDirectMessages?: boolean; + }; + gitbook?: { + keywords?: { + projectTerms?: string[]; + generalQueries?: string[]; + }; + documentTriggers?: string[]; + }; + }; + + /** Writing style guides */ + style: { + all: string[]; + chat: string[]; + post: string[]; + }; + + /** Optional Twitter profile */ + twitterProfile?: { + id: string; + username: string; + screenName: string; + bio: string; + nicknames?: string[]; + }; + /** Optional NFT prompt */ + nft?: { + prompt: string; + }; +}; + +/** + * Interface for database operations + */ +export interface IDatabaseAdapter { + /** Database instance */ + db: any; + + /** Optional initialization */ + init(): Promise; + + /** Close database connection */ + close(): Promise; + + /** Get account by ID */ + getAccountById(userId: UUID): Promise; + + /** Create new account */ + createAccount(account: Account): Promise; + + /** Get memories matching criteria */ + getMemories(params: { + roomId: UUID; + count?: number; + unique?: boolean; + tableName: string; + agentId: UUID; + start?: number; + end?: number; + }): Promise; + + getMemoryById(id: UUID): Promise; + + getMemoriesByRoomIds(params: { + tableName: string; + agentId: UUID; + roomIds: UUID[]; + }): Promise; + + getCachedEmbeddings(params: { + query_table_name: string; + query_threshold: number; + query_input: string; + query_field_name: string; + query_field_sub_name: string; + query_match_count: number; + }): Promise<{ embedding: number[]; levenshtein_score: number }[]>; + + log(params: { + body: { [key: string]: unknown }; + userId: UUID; + roomId: UUID; + type: string; + }): Promise; + + getActorDetails(params: { roomId: UUID }): Promise; + + searchMemories(params: { + tableName: string; + agentId: UUID; + roomId: UUID; + embedding: number[]; + match_threshold: number; + match_count: number; + unique: boolean; + }): Promise; + + updateGoalStatus(params: { + goalId: UUID; + status: GoalStatus; + }): Promise; + + searchMemoriesByEmbedding( + embedding: number[], + params: { + match_threshold?: number; + count?: number; + roomId?: UUID; + agentId?: UUID; + unique?: boolean; + tableName: string; + } + ): Promise; + + createMemory( + memory: Memory, + tableName: string, + unique?: boolean + ): Promise; + + removeMemory(memoryId: UUID, tableName: string): Promise; + + removeAllMemories(roomId: UUID, tableName: string): Promise; + + countMemories( + roomId: UUID, + unique?: boolean, + tableName?: string + ): Promise; + + getGoals(params: { + agentId: UUID; + roomId: UUID; + userId?: UUID | null; + onlyInProgress?: boolean; + count?: number; + }): Promise; + + updateGoal(goal: Goal): Promise; + + createGoal(goal: Goal): Promise; + + removeGoal(goalId: UUID): Promise; + + removeAllGoals(roomId: UUID): Promise; + + getRoom(roomId: UUID): Promise; + + createRoom(roomId?: UUID): Promise; + + removeRoom(roomId: UUID): Promise; + + getRoomsForParticipant(userId: UUID): Promise; + + getRoomsForParticipants(userIds: UUID[]): Promise; + + addParticipant(userId: UUID, roomId: UUID): Promise; + + removeParticipant(userId: UUID, roomId: UUID): Promise; + + getParticipantsForAccount(userId: UUID): Promise; + + getParticipantsForRoom(roomId: UUID): Promise; + + getParticipantUserState( + roomId: UUID, + userId: UUID + ): Promise<"FOLLOWED" | "MUTED" | null>; + + setParticipantUserState( + roomId: UUID, + userId: UUID, + state: "FOLLOWED" | "MUTED" | null + ): Promise; + + createRelationship(params: { userA: UUID; userB: UUID }): Promise; + + getRelationship(params: { + userA: UUID; + userB: UUID; + }): Promise; + + getRelationships(params: { userId: UUID }): Promise; +} + +export interface IDatabaseCacheAdapter { + getCache(params: { + agentId: UUID; + key: string; + }): Promise; + + setCache(params: { + agentId: UUID; + key: string; + value: string; + }): Promise; + + deleteCache(params: { agentId: UUID; key: string }): Promise; +} + +export interface IMemoryManager { + runtime: IAgentRuntime; + tableName: string; + constructor: Function; + + addEmbeddingToMemory(memory: Memory): Promise; + + getMemories(opts: { + roomId: UUID; + count?: number; + unique?: boolean; + start?: number; + end?: number; + }): Promise; + + getCachedEmbeddings( + content: string + ): Promise<{ embedding: number[]; levenshtein_score: number }[]>; + + getMemoryById(id: UUID): Promise; + getMemoriesByRoomIds(params: { roomIds: UUID[] }): Promise; + searchMemoriesByEmbedding( + embedding: number[], + opts: { + match_threshold?: number; + count?: number; + roomId: UUID; + unique?: boolean; + } + ): Promise; + + createMemory(memory: Memory, unique?: boolean): Promise; + + removeMemory(memoryId: UUID): Promise; + + removeAllMemories(roomId: UUID): Promise; + + countMemories(roomId: UUID, unique?: boolean): Promise; +} + +export type CacheOptions = { + expires?: number; +}; + +export enum CacheStore { + REDIS = "redis", + DATABASE = "database", + FILESYSTEM = "filesystem", +} + +export interface ICacheManager { + get(key: string): Promise; + set(key: string, value: T, options?: CacheOptions): Promise; + delete(key: string): Promise; +} + +export abstract class Service { + private static instance: Service | null = null; + + static get serviceType(): ServiceType { + throw new Error("Service must implement static serviceType getter"); + } + + public static getInstance(): T { + if (!Service.instance) { + Service.instance = new (this as any)(); + } + return Service.instance as T; + } + + get serviceType(): ServiceType { + return (this.constructor as typeof Service).serviceType; + } + + // Add abstract initialize method that must be implemented by derived classes + abstract initialize(runtime: IAgentRuntime): Promise; +} + +export interface IAgentRuntime { + // Properties + agentId: UUID; + serverUrl: string; + databaseAdapter: IDatabaseAdapter; + token: string | null; + modelProvider: ModelProviderName; + imageModelProvider: ModelProviderName; + character: Character; + providers: Provider[]; + actions: Action[]; + evaluators: Evaluator[]; + plugins: Plugin[]; + + fetch?: typeof fetch | null; + + messageManager: IMemoryManager; + descriptionManager: IMemoryManager; + documentsManager: IMemoryManager; + knowledgeManager: IMemoryManager; + loreManager: IMemoryManager; + + cacheManager: ICacheManager; + + services: Map; + // any could be EventEmitter + // but I think the real solution is forthcoming as a base client interface + clients: Record; + + initialize(): Promise; + + registerMemoryManager(manager: IMemoryManager): void; + + getMemoryManager(name: string): IMemoryManager | null; + + getService(service: ServiceType): T | null; + + registerService(service: Service): void; + + getSetting(key: string): string | null; + + // Methods + getConversationLength(): number; + + processActions( + message: Memory, + responses: Memory[], + state?: State, + callback?: HandlerCallback + ): Promise; + + evaluate( + message: Memory, + state?: State, + didRespond?: boolean, + callback?: HandlerCallback + ): Promise; + + ensureParticipantExists(userId: UUID, roomId: UUID): Promise; + + ensureUserExists( + userId: UUID, + userName: string | null, + name: string | null, + source: string | null + ): Promise; + + registerAction(action: Action): void; + + ensureConnection( + userId: UUID, + roomId: UUID, + userName?: string, + userScreenName?: string, + source?: string + ): Promise; + + ensureParticipantInRoom(userId: UUID, roomId: UUID): Promise; + + ensureRoomExists(roomId: UUID): Promise; + + composeState( + message: Memory, + additionalKeys?: { [key: string]: unknown } + ): Promise; + + updateRecentMessageState(state: State): Promise; +} + +export interface IImageDescriptionService extends Service { + describeImage( + imageUrl: string + ): Promise<{ title: string; description: string }>; +} + +export interface ITranscriptionService extends Service { + transcribeAttachment(audioBuffer: ArrayBuffer): Promise; + transcribeAttachmentLocally( + audioBuffer: ArrayBuffer + ): Promise; + transcribe(audioBuffer: ArrayBuffer): Promise; + transcribeLocally(audioBuffer: ArrayBuffer): Promise; +} + +export interface IVideoService extends Service { + isVideoUrl(url: string): boolean; + fetchVideoInfo(url: string): Promise; + downloadVideo(videoInfo: Media): Promise; + processVideo(url: string, runtime: IAgentRuntime): Promise; +} + +export interface ITextGenerationService extends Service { + initializeModel(): Promise; + queueMessageCompletion( + context: string, + temperature: number, + stop: string[], + frequency_penalty: number, + presence_penalty: number, + max_tokens: number + ): Promise; + queueTextCompletion( + context: string, + temperature: number, + stop: string[], + frequency_penalty: number, + presence_penalty: number, + max_tokens: number + ): Promise; + getEmbeddingResponse(input: string): Promise; +} + +export interface IBrowserService extends Service { + closeBrowser(): Promise; + getPageContent( + url: string, + runtime: IAgentRuntime + ): Promise<{ title: string; description: string; bodyContent: string }>; +} + +export interface ISpeechService extends Service { + getInstance(): ISpeechService; + generate(runtime: IAgentRuntime, text: string): Promise; +} + +export interface IPdfService extends Service { + getInstance(): IPdfService; + convertPdfToText(pdfBuffer: Buffer): Promise; +} + +export interface IAwsS3Service extends Service { + uploadFile( + imagePath: string, + subDirectory: string, + useSignedUrl: boolean, + expiresIn: number + ): Promise<{ + success: boolean; + url?: string; + error?: string; + }>; + generateSignedUrl(fileName: string, expiresIn: number): Promise; +} + +export type SearchResult = { + title: string; + url: string; + content: string; + score: number; + raw_content: string | null; +}; + +export type SearchResponse = { + query: string; + follow_up_questions: string[] | null; + answer: string | null; + images: string[]; + results: SearchResult[]; + response_time: number; +}; + +export enum ServiceType { + IMAGE_DESCRIPTION = "image_description", + TRANSCRIPTION = "transcription", + VIDEO = "video", + TEXT_GENERATION = "text_generation", + BROWSER = "browser", + SPEECH_GENERATION = "speech_generation", + PDF = "pdf", + INTIFACE = "intiface", + AWS_S3 = "aws_s3", + BUTTPLUG = "buttplug", + SLACK = "slack", +} + +export enum LoggingLevel { + DEBUG = "debug", + VERBOSE = "verbose", + NONE = "none", +} + +export type KnowledgeItem = { + id: UUID; + content: Content; +}; + +export interface ActionResponse { + like: boolean; + retweet: boolean; + quote?: boolean; + reply?: boolean; +} + +export interface ISlackService extends Service { + client: any; +}