From e28c98b7ab0d5279ee18710332bda2d501aba5ef Mon Sep 17 00:00:00 2001 From: JW Date: Fri, 15 Nov 2024 22:49:27 -0500 Subject: [PATCH] change default configuration of Heurist --- .env.example | 6 +++++- docs/docs/advanced/fine-tuning.md | 10 +++++----- docs/docs/api/type-aliases/Models.md | 4 ++++ docs/docs/guides/configuration.md | 17 ++++++++++------- docs/docs/quickstart.md | 6 +++--- packages/core/src/generation.ts | 2 +- 6 files changed, 28 insertions(+), 17 deletions(-) diff --git a/.env.example b/.env.example index caf26ced..6407ffd8 100644 --- a/.env.example +++ b/.env.example @@ -55,8 +55,12 @@ LARGE_OLLAMA_MODEL= #default hermes3:70b # For asking Claude stuff ANTHROPIC_API_KEY= -# Heurist API +# Heurist API (Get API Key at https://heurist.ai/dev-access) HEURIST_API_KEY= +SMALL_HEURIST_LANGUAGE_MODEL= +MEDIUM_HEURIST_LANGUAGE_MODEL= +LARGE_HEURIST_LANGUAGE_MODEL= +HEURIST_IMAGE_MODEL= WALLET_PRIVATE_KEY=EXAMPLE_WALLET_PRIVATE_KEY WALLET_PUBLIC_KEY=EXAMPLE_WALLET_PUBLIC_KEY diff --git a/docs/docs/advanced/fine-tuning.md b/docs/docs/advanced/fine-tuning.md index f256483a..331b8cab 100644 --- a/docs/docs/advanced/fine-tuning.md +++ b/docs/docs/advanced/fine-tuning.md @@ -273,7 +273,7 @@ const llamaLocalSettings = { const heuristSettings = { settings: { stop: [], - maxInputTokens: 128000, + maxInputTokens: 32768, maxOutputTokens: 8192, repetition_penalty: 0.0, temperature: 0.7, @@ -283,11 +283,11 @@ const heuristSettings = { }, endpoint: "https://llm-gateway.heurist.xyz", model: { - [ModelClass.SMALL]: "meta-llama/llama-3-70b-instruct", - [ModelClass.MEDIUM]: "meta-llama/llama-3-70b-instruct", - [ModelClass.LARGE]: "meta-llama/llama-3.1-405b-instruct", + [ModelClass.SMALL]: "hermes-3-llama3.1-8b", + [ModelClass.MEDIUM]: "mistralai/mixtral-8x7b-instruct", + [ModelClass.LARGE]: "nvidia/llama-3.1-nemotron-70b-instruct", [ModelClass.EMBEDDING]: "", // Add later - [ModelClass.IMAGE]: "PepeXL", + [ModelClass.IMAGE]: "FLUX.1-dev", }, }; ``` diff --git a/docs/docs/api/type-aliases/Models.md b/docs/docs/api/type-aliases/Models.md index 6b21703b..b118fa2c 100644 --- a/docs/docs/api/type-aliases/Models.md +++ b/docs/docs/api/type-aliases/Models.md @@ -48,6 +48,10 @@ > **redpill**: [`Model`](Model.md) +### heurist + +> **heurist**: [`Model`](Model.md) + ## Defined in [packages/core/src/types.ts:105](https://github.com/ai16z/eliza/blob/7fcf54e7fb2ba027d110afcc319c0b01b3f181dc/packages/core/src/types.ts#L105) diff --git a/docs/docs/guides/configuration.md b/docs/docs/guides/configuration.md index 463d46d6..074337fd 100644 --- a/docs/docs/guides/configuration.md +++ b/docs/docs/guides/configuration.md @@ -70,6 +70,9 @@ ANTHROPIC_API_KEY= # Together.ai Settings TOGETHER_API_KEY= +# Heurist Settings +HEURIST_API_KEY= + # Local Model Settings XAI_MODEL=meta-llama/Llama-3.1-7b-instruct ``` @@ -80,12 +83,12 @@ Configure image generation in your character file: ```json { - "modelProvider": "HEURIST", + "modelProvider": "heurist", "settings": { "imageSettings": { "steps": 20, - "width": 512, - "height": 512 + "width": 1024, + "height": 1024 } } } @@ -96,13 +99,13 @@ Example usage: ```typescript const result = await generateImage( { - prompt: "pepe_frog, meme, web comic, cartoon, 3d render", - width: 512, - height: 512, + prompt: "A cute anime girl with big breasts and straight long black hair wearing orange T-shirt. The T-shirt has \"ai16z\" texts in the front. The girl is looking at the viewer", + width: 1024, + height: 1024, numIterations: 20, // optional guidanceScale: 3, // optional seed: -1, // optional - modelId: "PepeXL", // optional + modelId: "FLUX.1-dev", // optional }, runtime, ); diff --git a/docs/docs/quickstart.md b/docs/docs/quickstart.md index d3d851ff..f678a716 100644 --- a/docs/docs/quickstart.md +++ b/docs/docs/quickstart.md @@ -67,9 +67,9 @@ Before getting started with Eliza, ensure you have: Eliza supports multiple AI models: -- **Heurist**: Set `modelProvider: "HEURIST"` in your character file - - LLM: Uses Llama models (more available LLM models [here](https://heurist.mintlify.app/developer/supported-models)) - - Image Generation: Uses PepeXL model (more info of available models [here](https://heurist.mintlify.app/developer/image-generation-api)) +- **Heurist**: Set `modelProvider: "heurist"` in your character file. Most models are uncensored. + - LLM: Select available LLMs [here](https://docs.heurist.ai/dev-guide/supported-models#large-language-models-llms) and configure `SMALL_HEURIST_LANGUAGE_MODEL`,`MEDIUM_HEURIST_LANGUAGE_MODEL`,`LARGE_HEURIST_LANGUAGE_MODEL` + - Image Generation: Select available Stable Diffusion or Flux models [here](https://docs.heurist.ai/dev-guide/supported-models#image-generation-models) and configure `HEURIST_IMAGE_MODEL` (default is FLUX.1-dev) - **Llama**: Set `XAI_MODEL=meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo` - **Grok**: Set `XAI_MODEL=grok-beta` - **OpenAI**: Set `XAI_MODEL=gpt-4o-mini` or `gpt-4o` diff --git a/packages/core/src/generation.ts b/packages/core/src/generation.ts index abbd7b15..f213a2de 100644 --- a/packages/core/src/generation.ts +++ b/packages/core/src/generation.ts @@ -771,7 +771,7 @@ export const generateImage = async ( seed: data.seed || -1, }, }, - model_id: data.modelId || "PepeXL", // Default to SD 1.5 if not specified + model_id: data.modelId || "FLUX.1-dev", }), } );