diff --git a/README.md b/README.md
index b8202ecd0f..ea1894c77e 100644
--- a/README.md
+++ b/README.md
@@ -96,6 +96,7 @@ AnythingLLM divides your documents into objects called `workspaces`. A Workspace
- [Text Generation Web UI](https://github.com/oobabooga/text-generation-webui)
- [Apipie](https://apipie.ai/)
- [xAI](https://x.ai/)
+- [Google Vertex](https://cloud.google.com/vertex-ai)
- [Novita AI (chat models)](https://novita.ai/model-api/product/llm-api?utm_source=github_anything-llm&utm_medium=github_readme&utm_campaign=link)
**Embedder models:**
diff --git a/frontend/src/components/LLMSelection/VertexLLMOptions/index.jsx b/frontend/src/components/LLMSelection/VertexLLMOptions/index.jsx
new file mode 100644
index 0000000000..6478a4caff
--- /dev/null
+++ b/frontend/src/components/LLMSelection/VertexLLMOptions/index.jsx
@@ -0,0 +1,95 @@
+export default function VertexLLMOptions({ settings }) {
+ return (
+
+
+
+
+
+
+
+
+
+
+ {!settings?.credentialsOnly && (
+
+ )}
+
+
+ );
+
+ function VertexLLMModelSelection({ settings }) {
+ console.log(settings);
+ return (
+ <>
+
+
+
+
+
+
+
+
+ >
+ );
+ }
+}
diff --git a/frontend/src/hooks/useGetProvidersModels.js b/frontend/src/hooks/useGetProvidersModels.js
index 57cf650188..bcdc0e07d4 100644
--- a/frontend/src/hooks/useGetProvidersModels.js
+++ b/frontend/src/hooks/useGetProvidersModels.js
@@ -11,6 +11,14 @@ export const DISABLED_PROVIDERS = [
];
const PROVIDER_DEFAULT_MODELS = {
openai: [],
+ vertex: [
+ "gemini-1.5-flash",
+ "gemini-1.5-pro",
+ "gemini-1.5-flash-001",
+ "gemini-1.5-pro-001",
+ "gemini-1.5-flash-002",
+ "gemini-1.5-pro-002",
+ ],
gemini: [
"gemini-pro",
"gemini-1.0-pro",
diff --git a/frontend/src/media/llmprovider/vertex.png b/frontend/src/media/llmprovider/vertex.png
new file mode 100644
index 0000000000..0c97267630
Binary files /dev/null and b/frontend/src/media/llmprovider/vertex.png differ
diff --git a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
index 252418d671..e749cf0d5c 100644
--- a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
+++ b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
@@ -10,6 +10,7 @@ import GenericOpenAiLogo from "@/media/llmprovider/generic-openai.png";
import AzureOpenAiLogo from "@/media/llmprovider/azure.png";
import AnthropicLogo from "@/media/llmprovider/anthropic.png";
import GeminiLogo from "@/media/llmprovider/gemini.png";
+import VertexLogo from "@/media/llmprovider/vertex.png";
import OllamaLogo from "@/media/llmprovider/ollama.png";
import NovitaLogo from "@/media/llmprovider/novita.png";
import LMStudioLogo from "@/media/llmprovider/lmstudio.png";
@@ -39,6 +40,7 @@ import LMStudioOptions from "@/components/LLMSelection/LMStudioOptions";
import LocalAiOptions from "@/components/LLMSelection/LocalAiOptions";
import NativeLLMOptions from "@/components/LLMSelection/NativeLLMOptions";
import GeminiLLMOptions from "@/components/LLMSelection/GeminiLLMOptions";
+import VertexLLMOptions from "@/components/LLMSelection/VertexLLMOptions";
import OllamaLLMOptions from "@/components/LLMSelection/OllamaLLMOptions";
import NovitaLLMOptions from "@/components/LLMSelection/NovitaLLMOptions";
import TogetherAiOptions from "@/components/LLMSelection/TogetherAiOptions";
@@ -94,6 +96,14 @@ export const AVAILABLE_LLM_PROVIDERS = [
description: "Google's largest and most capable AI model",
requiredConfig: ["GeminiLLMApiKey"],
},
+ {
+ name: "Vertex",
+ value: "vertex",
+ logo: VertexLogo,
+ options: (settings) => ,
+ description: "Google's Genereative AI model platform",
+ requiredConfig: ["VertexProjectName", "VertexRegion"],
+ },
{
name: "HuggingFace",
value: "huggingface",
diff --git a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
index ab83a5af24..7dfa096349 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
@@ -6,6 +6,7 @@ import GenericOpenAiLogo from "@/media/llmprovider/generic-openai.png";
import AzureOpenAiLogo from "@/media/llmprovider/azure.png";
import AnthropicLogo from "@/media/llmprovider/anthropic.png";
import GeminiLogo from "@/media/llmprovider/gemini.png";
+import VertexLogo from "@/media/llmprovider/vertex.png";
import OllamaLogo from "@/media/llmprovider/ollama.png";
import TogetherAILogo from "@/media/llmprovider/togetherai.png";
import FireworksAILogo from "@/media/llmprovider/fireworksai.jpeg";
@@ -76,6 +77,14 @@ export const LLM_SELECTION_PRIVACY = {
],
logo: GeminiLogo,
},
+ vertex: {
+ name: "Vertex",
+ description: [
+ "Your chats will not be used for training",
+ "Your prompts and document text used in response creation are visible to Google",
+ ],
+ logo: VertexLogo,
+ },
lmstudio: {
name: "LMStudio",
description: [
diff --git a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
index 69704f19c7..ff10459dd8 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
@@ -5,6 +5,7 @@ import GenericOpenAiLogo from "@/media/llmprovider/generic-openai.png";
import AzureOpenAiLogo from "@/media/llmprovider/azure.png";
import AnthropicLogo from "@/media/llmprovider/anthropic.png";
import GeminiLogo from "@/media/llmprovider/gemini.png";
+import VertexLogo from "@/media/llmprovider/vertex.png";
import OllamaLogo from "@/media/llmprovider/ollama.png";
import LMStudioLogo from "@/media/llmprovider/lmstudio.png";
import LocalAiLogo from "@/media/llmprovider/localai.png";
@@ -34,6 +35,7 @@ import LMStudioOptions from "@/components/LLMSelection/LMStudioOptions";
import LocalAiOptions from "@/components/LLMSelection/LocalAiOptions";
import NativeLLMOptions from "@/components/LLMSelection/NativeLLMOptions";
import GeminiLLMOptions from "@/components/LLMSelection/GeminiLLMOptions";
+import VertexLLMOptions from "@/components/LLMSelection/VertexLLMOptions";
import OllamaLLMOptions from "@/components/LLMSelection/OllamaLLMOptions";
import MistralOptions from "@/components/LLMSelection/MistralOptions";
import HuggingFaceOptions from "@/components/LLMSelection/HuggingFaceOptions";
@@ -91,6 +93,13 @@ const LLMS = [
options: (settings) => ,
description: "Google's largest and most capable AI model",
},
+ {
+ name: "Vertex",
+ value: "vertex",
+ logo: VertexLogo,
+ options: (settings) => ,
+ description: "Google's Genereative AI model platform",
+ },
{
name: "HuggingFace",
value: "huggingface",
diff --git a/server/models/systemSettings.js b/server/models/systemSettings.js
index dd54b8e362..372090413b 100644
--- a/server/models/systemSettings.js
+++ b/server/models/systemSettings.js
@@ -433,6 +433,14 @@ const SystemSettings = {
GeminiSafetySetting:
process.env.GEMINI_SAFETY_SETTING || "BLOCK_MEDIUM_AND_ABOVE",
+ // Vertex
+ VertexLLMModelPref:
+ process.env.VERTEX_LLM_MODEL_PREF || "gemini-1.5-flash",
+ VertexSafetySetting:
+ process.env.VERTEX_SAFETY_SETTING || "BLOCK_MEDIUM_AND_ABOVE",
+ VertexProjectName: process.env.VERTEX_PROJECT_NAME,
+ VertexRegion: process.env.VERTEX_REGION,
+
// LMStudio Keys
LMStudioBasePath: process.env.LMSTUDIO_BASE_PATH,
LMStudioTokenLimit: process.env.LMSTUDIO_MODEL_TOKEN_LIMIT,
diff --git a/server/package.json b/server/package.json
index e090b431af..dd1efe7e2b 100644
--- a/server/package.json
+++ b/server/package.json
@@ -22,6 +22,7 @@
"@anthropic-ai/sdk": "^0.32.1",
"@azure/openai": "1.0.0-beta.10",
"@datastax/astra-db-ts": "^0.1.3",
+ "@google-cloud/vertexai": "^1.9.0",
"@google/generative-ai": "^0.7.1",
"@ladjs/graceful": "^3.2.2",
"@lancedb/lancedb": "0.5.2",
@@ -92,8 +93,8 @@
"flow-remove-types": "^2.217.1",
"globals": "^13.21.0",
"hermes-eslint": "^0.15.0",
- "nodemon": "^2.0.22",
"node-html-markdown": "^1.3.0",
+ "nodemon": "^2.0.22",
"prettier": "^3.0.3"
}
-}
\ No newline at end of file
+}
diff --git a/server/utils/AiProviders/modelMap.js b/server/utils/AiProviders/modelMap.js
index f9bf00672c..f3eacfc033 100644
--- a/server/utils/AiProviders/modelMap.js
+++ b/server/utils/AiProviders/modelMap.js
@@ -38,6 +38,14 @@ const MODEL_MAP = {
"gemini-exp-1121": 32_767,
"learnlm-1.5-pro-experimental": 32_767,
},
+ vertex: {
+ "gemini-1.5-flash": 1_048_576,
+ "gemini-1.5-pro": 2_097_152,
+ "gemini-1.5-flash-001": 1_048_576,
+ "gemini-1.5-pro-001": 2_097_152,
+ "gemini-1.5-flash-002": 1_048_576,
+ "gemini-1.5-pro-002": 2_097_152,
+ },
groq: {
"gemma2-9b-it": 8192,
"gemma-7b-it": 8192,
diff --git a/server/utils/AiProviders/vertex/index.js b/server/utils/AiProviders/vertex/index.js
new file mode 100644
index 0000000000..6906e64b20
--- /dev/null
+++ b/server/utils/AiProviders/vertex/index.js
@@ -0,0 +1,297 @@
+const { NativeEmbedder } = require("../../EmbeddingEngines/native");
+const {
+ writeResponseChunk,
+ clientAbortedHandler,
+} = require("../../helpers/chat/responses");
+const { MODEL_MAP } = require("../modelMap");
+
+class VertexLLM {
+ constructor(embedder = null, modelPreference = null) {
+ const { VertexAI } = require("@google-cloud/vertexai");
+ const vertexAI = new VertexAI({
+ project: process.env.VERTEX_PROJECT_NAME,
+ location: process.env.VERTEX_REGION,
+ });
+ this.model =
+ modelPreference ||
+ process.env.VERTEX_LLM_MODEL_PREF ||
+ "gemini-1.5-flash";
+ this.vertex = vertexAI.getGenerativeModel({
+ model: this.model,
+ });
+ this.limits = {
+ history: this.promptWindowLimit() * 0.15,
+ system: this.promptWindowLimit() * 0.15,
+ user: this.promptWindowLimit() * 0.7,
+ };
+
+ this.embedder = embedder ?? new NativeEmbedder();
+ this.defaultTemp = 0.7;
+ this.safetyThreshold = this.#fetchSafetyThreshold();
+ }
+
+ #appendContext(contextTexts = []) {
+ if (!contextTexts || !contextTexts.length) return "";
+ return (
+ "\nContext:\n" +
+ contextTexts
+ .map((text, i) => {
+ return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
+ })
+ .join("")
+ );
+ }
+
+ // BLOCK_NONE can be a special candidate for some fields
+ // https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/configure-safety-attributes#how_to_remove_automated_response_blocking_for_select_safety_attributes
+ // so if you are wondering why BLOCK_NONE still failed, the link above will explain why.
+ #fetchSafetyThreshold() {
+ const threshold =
+ process.env.GEMINI_SAFETY_SETTING ?? "BLOCK_MEDIUM_AND_ABOVE";
+ const safetyThresholds = [
+ "BLOCK_NONE",
+ "BLOCK_ONLY_HIGH",
+ "BLOCK_MEDIUM_AND_ABOVE",
+ "BLOCK_LOW_AND_ABOVE",
+ ];
+ return safetyThresholds.includes(threshold)
+ ? threshold
+ : "BLOCK_MEDIUM_AND_ABOVE";
+ }
+
+ #safetySettings() {
+ return [
+ {
+ category: "HARM_CATEGORY_HATE_SPEECH",
+ threshold: this.safetyThreshold,
+ },
+ {
+ category: "HARM_CATEGORY_SEXUALLY_EXPLICIT",
+ threshold: this.safetyThreshold,
+ },
+ { category: "HARM_CATEGORY_HARASSMENT", threshold: this.safetyThreshold },
+ {
+ category: "HARM_CATEGORY_DANGEROUS_CONTENT",
+ threshold: this.safetyThreshold,
+ },
+ ];
+ }
+
+ streamingEnabled() {
+ return "streamGetChatCompletion" in this;
+ }
+
+ static promptWindowLimit(modelName) {
+ return MODEL_MAP.vertex[modelName] ?? 30_720;
+ }
+
+ promptWindowLimit() {
+ return MODEL_MAP.vertex[this.model] ?? 30_720;
+ }
+
+ isValidChatCompletionModel(modelName = "") {
+ const validModels = [
+ "gemini-1.5-flash",
+ "gemini-1.5-pro",
+ "gemini-1.5-flash-001",
+ "gemini-1.5-pro-001",
+ "gemini-1.5-flash-002",
+ "gemini-1.5-pro-002",
+ ];
+ return validModels.includes(modelName);
+ }
+
+ /**
+ * Generates appropriate content array for a message + attachments.
+ * @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
+ * @returns {string|object[]}
+ */
+ #generateContent({ userPrompt, attachments = [] }) {
+ if (!attachments.length) {
+ return userPrompt;
+ }
+
+ const content = [{ text: userPrompt }];
+ for (let attachment of attachments) {
+ content.push({
+ inlineData: {
+ data: attachment.contentString.split("base64,")[1],
+ mimeType: attachment.mime,
+ },
+ });
+ }
+ return content.flat();
+ }
+
+ constructPrompt({
+ systemPrompt = "",
+ contextTexts = [],
+ chatHistory = [],
+ userPrompt = "",
+ attachments = [],
+ }) {
+ const prompt = {
+ role: "system",
+ content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
+ };
+ return [
+ prompt,
+ { role: "assistant", content: "Okay." },
+ ...chatHistory,
+ {
+ role: "USER_PROMPT",
+ content: this.#generateContent({ userPrompt, attachments }),
+ },
+ ];
+ }
+
+ // This will take an OpenAi format message array and only pluck valid roles from it.
+ formatMessages(messages = []) {
+ // Gemini roles are either user || model.
+ // and all "content" is relabeled to "parts"
+ const allMessages = messages
+ .map((message) => {
+ if (message.role === "system")
+ return { role: "user", parts: [{ text: message.content }] };
+ if (message.role === "user")
+ return { role: "user", parts: [{ text: message.content }] };
+ if (message.role === "assistant")
+ return { role: "model", parts: [{ text: message.content }] };
+ return null;
+ })
+ .filter((msg) => !!msg);
+
+ // Specifically, Google cannot have the last sent message be from a user with no assistant reply
+ // otherwise it will crash. So if the last item is from the user, it was not completed so pop it off
+ // the history.
+ if (
+ allMessages.length > 0 &&
+ allMessages[allMessages.length - 1].role === "user"
+ )
+ allMessages.pop();
+
+ // Validate that after every user message, there is a model message
+ // sometimes when using gemini we try to compress messages in order to retain as
+ // much context as possible but this may mess up the order of the messages that the gemini model expects
+ // we do this check to work around the edge case where 2 user prompts may be next to each other, in the message array
+ for (let i = 0; i < allMessages.length; i++) {
+ if (
+ allMessages[i].role === "user" &&
+ i < allMessages.length - 1 &&
+ allMessages[i + 1].role !== "model"
+ ) {
+ allMessages.splice(i + 1, 0, {
+ role: "model",
+ parts: [{ text: "Okay." }],
+ });
+ }
+ }
+
+ return allMessages;
+ }
+
+ async getChatCompletion(messages = [], _opts = {}) {
+ if (!this.isValidChatCompletionModel(this.model))
+ throw new Error(
+ `Gemini chat: ${this.model} is not valid for chat completion!`
+ );
+
+ const prompt = messages.find(
+ (chat) => chat.role === "USER_PROMPT"
+ )?.content;
+ const chatThread = this.vertex.startChat({
+ history: this.formatMessages(messages),
+ safetySettings: this.#safetySettings(),
+ });
+ const result = await chatThread.sendMessage(prompt);
+ const response = result.response;
+ const responseText = response.text();
+
+ if (!responseText) throw new Error("Gemini: No response could be parsed.");
+
+ return responseText;
+ }
+
+ async streamGetChatCompletion(messages = [], _opts = {}) {
+ if (!this.isValidChatCompletionModel(this.model))
+ throw new Error(
+ `Gemini chat: ${this.model} is not valid for chat completion!`
+ );
+
+ const prompt = messages.find(
+ (chat) => chat.role === "USER_PROMPT"
+ )?.content;
+ const chatThread = this.vertex.startChat({
+ history: this.formatMessages(messages),
+ safetySettings: this.#safetySettings(),
+ });
+ const responseStream = await chatThread.sendMessageStream(prompt);
+ if (!responseStream.stream)
+ throw new Error("Could not stream response stream from Gemini.");
+
+ return responseStream.stream;
+ }
+
+ async compressMessages(promptArgs = {}, rawHistory = []) {
+ const { messageArrayCompressor } = require("../../helpers/chat");
+ const messageArray = this.constructPrompt(promptArgs);
+ return await messageArrayCompressor(this, messageArray, rawHistory);
+ }
+
+ async handleStream(response, stream, responseProps) {
+ const { uuid = uuidv4(), sources = [] } = responseProps;
+
+ return new Promise(async (resolve) => {
+ let fullText = "";
+
+ // Establish listener to early-abort a streaming response
+ // in case things go sideways or the user does not like the response.
+ // We preserve the generated text but continue as if chat was completed
+ // to preserve previously generated content.
+ const handleAbort = () => clientAbortedHandler(resolve, fullText);
+ response.on("close", handleAbort);
+
+ for await (const chunk of stream) {
+ try {
+ for (const part of chunk.candidates[0].content.parts) {
+ fullText += part.text;
+ writeResponseChunk(response, {
+ uuid,
+ sources: [],
+ type: "textResponseChunk",
+ textResponse: part.text,
+ close: false,
+ error: false,
+ });
+ }
+ } catch (e) {
+ writeResponseChunk(response, {
+ uuid,
+ sources: [],
+ type: "abort",
+ textResponse: null,
+ close: true,
+ error: e.message,
+ });
+ resolve(e.message);
+ return;
+ }
+ }
+
+ response.removeListener("close", handleAbort);
+ resolve(fullText);
+ });
+ }
+
+ // Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
+ async embedTextInput(textInput) {
+ return await this.embedder.embedTextInput(textInput);
+ }
+ async embedChunks(textChunks = []) {
+ return await this.embedder.embedChunks(textChunks);
+ }
+}
+
+module.exports = {
+ VertexLLM,
+};
diff --git a/server/utils/agents/index.js b/server/utils/agents/index.js
index 6b1d42af29..7ce073aaa7 100644
--- a/server/utils/agents/index.js
+++ b/server/utils/agents/index.js
@@ -214,6 +214,8 @@ class AgentHandler {
return process.env.KOBOLD_CPP_MODEL_PREF ?? null;
case "gemini":
return process.env.GEMINI_MODEL_PREF ?? "gemini-pro";
+ case "vertex":
+ return process.env.GEMINI_MODEL_PREF ?? "gemini-1.5-pro";
case "localai":
return process.env.LOCAL_AI_MODEL_PREF ?? null;
case "openrouter":
diff --git a/server/utils/helpers/index.js b/server/utils/helpers/index.js
index cbf07fbd0e..a263c0ec80 100644
--- a/server/utils/helpers/index.js
+++ b/server/utils/helpers/index.js
@@ -108,6 +108,9 @@ function getLLMProvider({ provider = null, model = null } = {}) {
case "gemini":
const { GeminiLLM } = require("../AiProviders/gemini");
return new GeminiLLM(embedder, model);
+ case "vertex":
+ const { VertexLLM } = require("../AiProviders/vertex");
+ return new VertexLLM(embedder, model);
case "lmstudio":
const { LMStudioLLM } = require("../AiProviders/lmStudio");
return new LMStudioLLM(embedder, model);
@@ -246,6 +249,9 @@ function getLLMProviderClass({ provider = null } = {}) {
case "gemini":
const { GeminiLLM } = require("../AiProviders/gemini");
return GeminiLLM;
+ case "vertex":
+ const { VertexLLM } = require("../AiProviders/vertex");
+ return VertexLLM;
case "lmstudio":
const { LMStudioLLM } = require("../AiProviders/lmStudio");
return LMStudioLLM;
diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js
index 2c43846210..1e1f03ead2 100644
--- a/server/utils/helpers/updateENV.js
+++ b/server/utils/helpers/updateENV.js
@@ -57,6 +57,23 @@ const KEY_MAPPING = {
checks: [validGeminiSafetySetting],
},
+ VertexLLMModelPref: {
+ envKey: "VERTEX_LLM_MODEL_PREF",
+ checks: [isNotEmpty, validVertexModel],
+ },
+ VertexProjectName: {
+ envKey: "VERTEX_PROJECT_NAME",
+ checks: [isNotEmpty],
+ },
+ VertexRegion: {
+ envKey: "VERTEX_REGION",
+ checks: [isNotEmpty],
+ },
+ VertexSafetySetting: {
+ envKey: "VERTEX_SAFETY_SETTING",
+ checks: [validGeminiSafetySetting],
+ },
+
// LMStudio Settings
LMStudioBasePath: {
envKey: "LMSTUDIO_BASE_PATH",
@@ -663,6 +680,7 @@ function supportedLLM(input = "") {
"azure",
"anthropic",
"gemini",
+ "vertex",
"lmstudio",
"localai",
"ollama",
@@ -714,6 +732,20 @@ function validGeminiModel(input = "") {
: `Invalid Model type. Must be one of ${validModels.join(", ")}.`;
}
+function validVertexModel(input = "") {
+ const validModels = [
+ "gemini-1.5-flash",
+ "gemini-1.5-pro",
+ "gemini-1.5-flash-001",
+ "gemini-1.5-pro-001",
+ "gemini-1.5-flash-002",
+ "gemini-1.5-pro-002",
+ ];
+ return validModels.includes(input)
+ ? null
+ : `Invalid Model type. Must be one of ${validModels.join(", ")}.`;
+}
+
function validGeminiSafetySetting(input = "") {
const validModes = [
"BLOCK_NONE",
diff --git a/server/yarn.lock b/server/yarn.lock
index 44bab1f03c..219c21c66d 100644
--- a/server/yarn.lock
+++ b/server/yarn.lock
@@ -872,6 +872,13 @@
resolved "https://registry.yarnpkg.com/@fastify/busboy/-/busboy-2.1.1.tgz#b9da6a878a371829a0502c9b6c1c143ef6663f4d"
integrity sha512-vBZP4NlzfOlerQTnba4aqZoMhE/a9HY7HRqoOPaETQcSQuWEIyZMHGfVu6w9wGtGK5fED5qRs2DteVCjOH60sA==
+"@google-cloud/vertexai@^1.9.0":
+ version "1.9.0"
+ resolved "https://registry.yarnpkg.com/@google-cloud/vertexai/-/vertexai-1.9.0.tgz#30941faa920e1218231604285c56aa4ae172b358"
+ integrity sha512-8brlcJwFXI4fPuBtsDNQqCdWZmz8gV9jeEKOU0vc5H2SjehCQpXK/NwuSEr916zbhlBHtg/sU37qQQdgvh5BRA==
+ dependencies:
+ google-auth-library "^9.1.0"
+
"@google/generative-ai@^0.7.1":
version "0.7.1"
resolved "https://registry.yarnpkg.com/@google/generative-ai/-/generative-ai-0.7.1.tgz#eb187c75080c0706245699dbc06816c830d8c6a7"
@@ -2586,7 +2593,7 @@ base-64@^0.1.0:
resolved "https://registry.yarnpkg.com/base-64/-/base-64-0.1.0.tgz#780a99c84e7d600260361511c4877613bf24f6bb"
integrity sha512-Y5gU45svrR5tI2Vt/X9GPd3L0HNIKzGu202EjxrXMpuc2V2CiKgemAbUUsqYmZJvPtCXoUKjNZwBJzsNScUbXA==
-base64-js@^1.3.1, base64-js@^1.5.1:
+base64-js@^1.3.0, base64-js@^1.3.1, base64-js@^1.5.1:
version "1.5.1"
resolved "https://registry.yarnpkg.com/base64-js/-/base64-js-1.5.1.tgz#1b1b440160a5bf7ad40b650f095963481903930a"
integrity sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==
@@ -2604,6 +2611,11 @@ before-after-hook@^2.2.0:
resolved "https://registry.yarnpkg.com/before-after-hook/-/before-after-hook-2.2.3.tgz#c51e809c81a4e354084422b9b26bad88249c517c"
integrity sha512-NzUnlZexiaH/46WDhANlyR2bXRopNg4F/zuSA3OpZnllCUgRaOF2znDioDWrmbNVsuZk6l9pMquQB38cfBZwkQ==
+bignumber.js@^9.0.0:
+ version "9.1.2"
+ resolved "https://registry.yarnpkg.com/bignumber.js/-/bignumber.js-9.1.2.tgz#b7c4242259c008903b13707983b5f4bbd31eda0c"
+ integrity sha512-2/mKyZH9K85bzOEfhXDBFZTGd1CTs+5IHpeFQo9luiBG7hghdC851Pj2WAhb6E3R6b9tZj/XKhbg4fum+Kepug==
+
binary-extensions@^2.0.0, binary-extensions@^2.2.0:
version "2.3.0"
resolved "https://registry.yarnpkg.com/binary-extensions/-/binary-extensions-2.3.0.tgz#f6e14a97858d327252200242d4ccfe522c445522"
@@ -3337,7 +3349,7 @@ eastasianwidth@^0.2.0:
resolved "https://registry.yarnpkg.com/eastasianwidth/-/eastasianwidth-0.2.0.tgz#696ce2ec0aa0e6ea93a397ffcf24aa7840c827cb"
integrity sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==
-ecdsa-sig-formatter@1.0.11:
+ecdsa-sig-formatter@1.0.11, ecdsa-sig-formatter@^1.0.11:
version "1.0.11"
resolved "https://registry.yarnpkg.com/ecdsa-sig-formatter/-/ecdsa-sig-formatter-1.0.11.tgz#ae0f0fa2d85045ef14a817daa3ce9acd0489e5bf"
integrity sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==
@@ -3782,6 +3794,11 @@ express@^4.18.2:
utils-merge "1.0.1"
vary "~1.1.2"
+extend@^3.0.2:
+ version "3.0.2"
+ resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.2.tgz#f8b1136b4071fbd8eb140aff858b1019ec2915fa"
+ integrity sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==
+
external-editor@^3.1.0:
version "3.1.0"
resolved "https://registry.yarnpkg.com/external-editor/-/external-editor-3.1.0.tgz#cb03f740befae03ea4d283caed2741a83f335495"
@@ -4075,6 +4092,25 @@ gauge@^4.0.3:
strip-ansi "^6.0.1"
wide-align "^1.1.5"
+gaxios@^6.0.0, gaxios@^6.1.1:
+ version "6.7.1"
+ resolved "https://registry.yarnpkg.com/gaxios/-/gaxios-6.7.1.tgz#ebd9f7093ede3ba502685e73390248bb5b7f71fb"
+ integrity sha512-LDODD4TMYx7XXdpwxAVRAIAuB0bzv0s+ywFonY46k126qzQHT9ygyoa9tncmOiQmmDrik65UYsEkv3lbfqQ3yQ==
+ dependencies:
+ extend "^3.0.2"
+ https-proxy-agent "^7.0.1"
+ is-stream "^2.0.0"
+ node-fetch "^2.6.9"
+ uuid "^9.0.1"
+
+gcp-metadata@^6.1.0:
+ version "6.1.0"
+ resolved "https://registry.yarnpkg.com/gcp-metadata/-/gcp-metadata-6.1.0.tgz#9b0dd2b2445258e7597f2024332d20611cbd6b8c"
+ integrity sha512-Jh/AIwwgaxan+7ZUUmRLCjtchyDiqh4KjBJ5tW3plBZb5iL/BPcso8A5DlzeD9qlw0duCamnNdpFjxwaT0KyKg==
+ dependencies:
+ gaxios "^6.0.0"
+ json-bigint "^1.0.0"
+
generate-function@^2.3.1:
version "2.3.1"
resolved "https://registry.yarnpkg.com/generate-function/-/generate-function-2.3.1.tgz#f069617690c10c868e73b8465746764f97c3479f"
@@ -4163,6 +4199,18 @@ globalthis@^1.0.3:
define-properties "^1.2.1"
gopd "^1.0.1"
+google-auth-library@^9.1.0:
+ version "9.14.2"
+ resolved "https://registry.yarnpkg.com/google-auth-library/-/google-auth-library-9.14.2.tgz#92a53ba32b3a9ff9ced8ed34129edb5a7fa7fb52"
+ integrity sha512-R+FRIfk1GBo3RdlRYWPdwk8nmtVUOn6+BkDomAC46KoU8kzXzE1HLmOasSCbWUByMMAGkknVF0G5kQ69Vj7dlA==
+ dependencies:
+ base64-js "^1.3.0"
+ ecdsa-sig-formatter "^1.0.11"
+ gaxios "^6.1.1"
+ gcp-metadata "^6.1.0"
+ gtoken "^7.0.0"
+ jws "^4.0.0"
+
gopd@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/gopd/-/gopd-1.0.1.tgz#29ff76de69dac7489b7c0918a5788e56477c332c"
@@ -4195,6 +4243,14 @@ graphql@^16.7.1:
resolved "https://registry.yarnpkg.com/graphql/-/graphql-16.9.0.tgz#1c310e63f16a49ce1fbb230bd0a000e99f6f115f"
integrity sha512-GGTKBX4SD7Wdb8mqeDLni2oaRGYQWjWHGKPQ24ZMnUtKfcsVoiv4uX8+LJr1K6U5VW2Lu1BwJnj7uiori0YtRw==
+gtoken@^7.0.0:
+ version "7.1.0"
+ resolved "https://registry.yarnpkg.com/gtoken/-/gtoken-7.1.0.tgz#d61b4ebd10132222817f7222b1e6064bd463fc26"
+ integrity sha512-pCcEwRi+TKpMlxAQObHDQ56KawURgyAf6jtIY046fJ5tIv3zDe/LEIubckAO8fj6JnAxLdmWkUfNyulQ2iKdEw==
+ dependencies:
+ gaxios "^6.0.0"
+ jws "^4.0.0"
+
guid-typescript@^1.0.9:
version "1.0.9"
resolved "https://registry.yarnpkg.com/guid-typescript/-/guid-typescript-1.0.9.tgz#e35f77003535b0297ea08548f5ace6adb1480ddc"
@@ -4324,6 +4380,14 @@ https-proxy-agent@^7.0.0:
agent-base "^7.0.2"
debug "4"
+https-proxy-agent@^7.0.1:
+ version "7.0.5"
+ resolved "https://registry.yarnpkg.com/https-proxy-agent/-/https-proxy-agent-7.0.5.tgz#9e8b5013873299e11fab6fd548405da2d6c602b2"
+ integrity sha512-1e4Wqeblerz+tMKPIq2EMGiiWW1dIjZOksyHWSUm1rmuvw/how9hBHZ38lAGj5ID4Ik6EdkOw7NmWPy6LAwalw==
+ dependencies:
+ agent-base "^7.0.2"
+ debug "4"
+
human-interval@^2.0.1:
version "2.0.1"
resolved "https://registry.yarnpkg.com/human-interval/-/human-interval-2.0.1.tgz#655baf606c7067bb26042dcae14ec777b099af15"
@@ -4785,6 +4849,13 @@ jsbi@^4.3.0:
resolved "https://registry.yarnpkg.com/jsbi/-/jsbi-4.3.0.tgz#b54ee074fb6fcbc00619559305c8f7e912b04741"
integrity sha512-SnZNcinB4RIcnEyZqFPdGPVgrg2AcnykiBy0sHVJQKHYeaLUvi3Exj+iaPpLnFVkDPZIV4U0yvgC9/R4uEAZ9g==
+json-bigint@^1.0.0:
+ version "1.0.0"
+ resolved "https://registry.yarnpkg.com/json-bigint/-/json-bigint-1.0.0.tgz#ae547823ac0cad8398667f8cd9ef4730f5b01ff1"
+ integrity sha512-SiPv/8VpZuWbvLSMtTDU8hEfrZWg/mH/nV/b4o0CYbSxu1UIQPLdwKOCIyLQX+VIPO5vrLX3i8qtqFyhdPSUSQ==
+ dependencies:
+ bignumber.js "^9.0.0"
+
json-bignum@^0.0.3:
version "0.0.3"
resolved "https://registry.yarnpkg.com/json-bignum/-/json-bignum-0.0.3.tgz#41163b50436c773d82424dbc20ed70db7604b8d7"
@@ -5450,7 +5521,7 @@ node-domexception@1.0.0:
resolved "https://registry.yarnpkg.com/node-domexception/-/node-domexception-1.0.0.tgz#6888db46a1f71c0b76b3f7555016b63fe64766e5"
integrity sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==
-node-fetch@2.7.0, node-fetch@^2.6.1, node-fetch@^2.6.12, node-fetch@^2.6.7:
+node-fetch@2.7.0, node-fetch@^2.6.1, node-fetch@^2.6.12, node-fetch@^2.6.7, node-fetch@^2.6.9:
version "2.7.0"
resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.7.0.tgz#d0f0fa6e3e2dc1d27efcd8ad99d550bda94d187d"
integrity sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==