From 10f1248270be6e27519ff925f6f5bb93ca567f02 Mon Sep 17 00:00:00 2001 From: Corneliu Croitoru Date: Mon, 16 Dec 2024 14:51:52 +0100 Subject: [PATCH 01/12] draft work --- .../docs/agents/built-in/openai-agent.mdx | 186 +++++++++++++++--- .../agents/openai_agent.py | 170 ++++++++++++++++ typescript/src/agents/openAIAgent.ts | 68 ++++++- 3 files changed, 396 insertions(+), 28 deletions(-) create mode 100644 python/src/multi_agent_orchestrator/agents/openai_agent.py diff --git a/docs/src/content/docs/agents/built-in/openai-agent.mdx b/docs/src/content/docs/agents/built-in/openai-agent.mdx index 0d1ddbb2..97953fc2 100644 --- a/docs/src/content/docs/agents/built-in/openai-agent.mdx +++ b/docs/src/content/docs/agents/built-in/openai-agent.mdx @@ -20,12 +20,11 @@ To create a new `OpenAIAgent`, you need to provide an `OpenAIAgentOptions` objec import { Tabs, TabItem } from '@astrojs/starlight/components'; - ```typescript -import { OpenAIAgent } from './path-to-openai-agent'; +import { OpenAIAgent } from 'multi-agent-orchestrator'; const agent = new OpenAIAgent({ name: 'OpenAI Assistant', @@ -39,29 +38,101 @@ const agent = new OpenAIAgent({ topP: 0.9, stopSequences: ['Human:', 'AI:'] }, - systemPrompt: 'You are a helpful AI assistant specialized in answering questions about technology.' + customSystemPrompt: { + template: 'You are a helpful AI assistant specialized in {{DOMAIN}}', + variables: { + DOMAIN: 'technology' + } + } }); ``` - + + +```python +from multi_agent_orchestrator import OpenAIAgent, OpenAIAgentOptions + +agent = OpenAIAgent(OpenAIAgentOptions( + name='OpenAI Assistant', + description='A versatile AI assistant powered by OpenAI models', + api_key='your-openai-api-key', + model='gpt-3.5-turbo', + streaming=True, + inference_config={ + 'maxTokens': 500, + 'temperature': 0.7, + 'topP': 0.9, + 'stopSequences': ['Human:', 'AI:'] + }, + custom_system_prompt={ + 'template': 'You are a helpful AI assistant specialized in {{DOMAIN}}', + 'variables': { + 'DOMAIN': 'technology' + } + } +)) +``` + - ### OpenAIAgentOptions -The `OpenAIAgentOptions` extends the base `AgentOptions` and includes the following properties: +The `OpenAIAgentOptions` extends the base `AgentOptions` to provide configuration for the OpenAI agent: -- `name` (required): A string representing the name of the agent. -- `description` (required): A string describing the agent's capabilities and expertise. -- `apiKey` (required): Your OpenAI API key. -- `model` (optional): The OpenAI model to use. Defaults to `OPENAI_MODEL_ID_GPT_O_MINI`. -- `streaming` (optional): Whether to use streaming responses. Defaults to `false`. -- `inferenceConfig` (optional): An object to customize the inference behavior: - - `maxTokens` (optional): The maximum number of tokens to generate. Defaults to 1000. - - `temperature` (optional): Controls randomness in output generation. - - `topP` (optional): Controls diversity of output generation. - - `stopSequences` (optional): An array of sequences that, when generated, will stop the generation process. -- `systemPrompt` (optional): A string representing the initial system prompt for the agent. + + + +The TypeScript interface extends `AgentOptions` with the following fields: + +- `apiKey` (required): Your OpenAI API key for authentication. +- `model` (optional): The OpenAI model identifier to use (e.g., 'gpt-4', 'gpt-3.5-turbo'). Defaults to `OPENAI_MODEL_ID_GPT_O_MINI`. +- `streaming` (optional): Boolean flag to enable/disable streaming responses. Defaults to `false`. +- `inferenceConfig` (optional): Configuration object for the model's inference behavior: +```typescript +{ + maxTokens?: number; // Maximum tokens to generate (default: 1000) + temperature?: number; // Controls randomness (0-1) + topP?: number; // Controls diversity via nucleus sampling + stopSequences?: string[]; // Sequences that stop generation +} +``` +- `customSystemPrompt` (optional): System prompt configuration: +```typescript +{ + template: string; // The prompt template string + variables?: { // Key-value pairs for template variables + [key: string]: string | string[]; + } +} +``` + + + + +The Python class extends `AgentOptions` with the following fields: + +- `api_key` (required): String containing your OpenAI API key for authentication. +- `model` (Optional[str]): The OpenAI model identifier to use (e.g., 'gpt-4', 'gpt-3.5-turbo'). Defaults to `OPENAI_MODEL_ID_GPT_O_MINI`. +- `streaming` (Optional[bool]): Flag to enable/disable streaming responses. Defaults to `False`. +- `inference_config` (Optional[Dict[str, Any]]): Dictionary configuring the model's inference behavior: +```python +{ + 'maxTokens': int, # Maximum tokens to generate (default: 1000) + 'temperature': float, # Controls randomness (0-1) + 'topP': float, # Controls diversity via nucleus sampling + 'stopSequences': List[str] # Sequences that stop generation +} +``` +- `custom_system_prompt` (Optional[Dict[str, Any]]): Dictionary configuring the system prompt: +```python +{ + 'template': str, # The prompt template string + 'variables': dict # Key-value pairs for template variables +} +``` + + + ## Setting the System Prompt @@ -75,15 +146,26 @@ You can set or update the system prompt for the OpenAIAgent in two ways: ```typescript const agent = new OpenAIAgent({ // ... other options ... - systemPrompt: 'You are a helpful AI assistant specialized in answering questions about technology.' + customSystemPrompt: { + template: 'You are a helpful AI assistant specialized in answering questions about technology.' + } }); +``` + + + +```python +agent = OpenAIAgent(OpenAIAgentOptions( + # ... other options ... + custom_system_prompt={ + 'template': 'You are a helpful AI assistant specialized in answering questions about technology.' + } +)) ``` - - -2. Using the `setSystemPrompt` method after initialization: +2. Using the `setSystemPrompt/set_system_prompt` method after initialization: @@ -99,12 +181,26 @@ agent.setSystemPrompt( TONE: "friendly and educational" } ); +``` + + + +```python +agent.set_system_prompt( + template="""You are an AI assistant specialized in {{DOMAIN}}. + Your main goal is to {{GOAL}}. + Always maintain a {{TONE}} tone in your responses.""", + variables={ + "DOMAIN": "artificial intelligence", + "GOAL": "explain complex AI concepts in simple terms", + "TONE": "friendly and educational" + } +) ``` - -The `setSystemPrompt` method allows you to dynamically change the agent's behavior and focus without creating a new instance. You can use placeholders in the prompt template and provide values for them in the second argument. +The `setSystemPrompt/set_system_prompt` method allows you to dynamically change the agent's behavior and focus without creating a new instance. You can use placeholders in the prompt template and provide values for them in the second argument. ## Usage @@ -126,8 +222,22 @@ const response = await orchestrator.routeRequest( ); ``` - + + +```python +from multi_agent_orchestrator import MultiAgentOrchestrator + +orchestrator = MultiAgentOrchestrator() +orchestrator.add_agent(agent) +response = await orchestrator.route_request( + "What is the capital of France?", + "user123", + "session456" +) +``` + + ## Streaming Responses @@ -150,11 +260,23 @@ if (Symbol.asyncIterator in streamingResponse) { } ``` - + + +```python +streaming_response = await orchestrator.route_request( + "Tell me a long story about a brave knight", + "user123", + "session456" +) + +# Check if the response is streaming +if hasattr(streaming_response, '__aiter__'): + async for chunk in streaming_response: + print(chunk, end='', flush=True) # Process each chunk of the response +``` + - - ## Best Practices 1. **API Key Security**: Ensure your OpenAI API key is kept secure and not exposed in your codebase. @@ -164,4 +286,14 @@ if (Symbol.asyncIterator in streamingResponse) { 5. **Rate Limiting**: Be aware of OpenAI's rate limits and implement appropriate throttling if necessary. 6. **System Prompts**: Craft clear and specific system prompts to guide the model's behavior and improve response quality for your use case. +## Implementation Notes + +When implementing the OpenAIAgent in your application: + +- In Python, use snake_case naming conventions (e.g., `custom_system_prompt`, `set_system_prompt`) +- In TypeScript, use camelCase naming conventions (e.g., `customSystemPrompt`, `setSystemPrompt`) +- Both implementations support async/await patterns for handling responses +- Both versions include callback support for streaming responses +- Error handling patterns are consistent across both implementations + By leveraging the OpenAIAgent, you can create sophisticated, context-aware AI agents capable of handling a wide range of tasks and interactions, all powered by OpenAI's state-of-the-art language models. \ No newline at end of file diff --git a/python/src/multi_agent_orchestrator/agents/openai_agent.py b/python/src/multi_agent_orchestrator/agents/openai_agent.py new file mode 100644 index 00000000..8a362bde --- /dev/null +++ b/python/src/multi_agent_orchestrator/agents/openai_agent.py @@ -0,0 +1,170 @@ +from typing import Dict, List, Union, AsyncIterable, Optional, Any +from dataclasses import dataclass +import openai +from multi_agent_orchestrator.agents import Agent, AgentOptions +from multi_agent_orchestrator.types import ( + ConversationMessage, + ParticipantRole, + OPENAI_MODEL_ID_GPT_O_MINI, + TemplateVariables +) +from multi_agent_orchestrator.utils import Logger + + +@dataclass +class OpenAIAgentOptions(AgentOptions): + api_key: str + model: Optional[str] = None + streaming: Optional[bool] = None + inference_config: Optional[Dict[str, Any]] = None + custom_system_prompt: Optional[Dict[str, Any]] = None + + +class OpenAIAgent(Agent): + def __init__(self, options: OpenAIAgentOptions): + super().__init__(options) + if not options.api_key: + raise ValueError("OpenAI API key is required") + + self.client = openai.OpenAI(api_key=options.api_key) + self.model = options.model or OPENAI_MODEL_ID_GPT_O_MINI + self.streaming = options.streaming or False + + # Default inference configuration + default_inference_config = { + 'maxTokens': 1000, + 'temperature': None, + 'topP': None, + 'stopSequences': None + } + + if options.inference_config: + self.inference_config = {**default_inference_config, **options.inference_config} + else: + self.inference_config = default_inference_config + + # Initialize system prompt + self.prompt_template = f"""You are a {self.name}. + {self.description} Provide helpful and accurate information based on your expertise. + You will engage in an open-ended conversation, providing helpful and accurate information based on your expertise. + The conversation will proceed as follows: + - The human may ask an initial question or provide a prompt on any topic. + - You will provide a relevant and informative response. + - The human may then follow up with additional questions or prompts related to your previous response, + allowing for a multi-turn dialogue on that topic. + - Or, the human may switch to a completely new and unrelated topic at any point. + - You will seamlessly shift your focus to the new topic, providing thoughtful and coherent responses + based on your broad knowledge base. + Throughout the conversation, you should aim to: + - Understand the context and intent behind each new question or prompt. + - Provide substantive and well-reasoned responses that directly address the query. + - Draw insights and connections from your extensive knowledge when appropriate. + - Ask for clarification if any part of the question or prompt is ambiguous. + - Maintain a consistent, respectful, and engaging tone tailored to the human's communication style. + - Seamlessly transition between topics as the human introduces new subjects.""" + + self.system_prompt = "" + self.custom_variables: TemplateVariables = {} + + if options.custom_system_prompt: + self.set_system_prompt( + options.custom_system_prompt.get('template'), + options.custom_system_prompt.get('variables') + ) + + self.update_system_prompt() + + def is_streaming_enabled(self) -> bool: + return self.streaming is True + + async def process_request( + self, + input_text: str, + user_id: str, + session_id: str, + chat_history: List[ConversationMessage], + additional_params: Optional[Dict[str, str]] = None + ) -> Union[ConversationMessage, AsyncIterable[Any]]: + try: + messages = [ + {"role": "system", "content": self.system_prompt}, + *[{ + "role": msg.role.lower(), + "content": msg.content[0].get('text', '') if msg.content else '' + } for msg in chat_history], + {"role": "user", "content": input_text} + ] + + request_options = { + "model": self.model, + "messages": messages, + "max_tokens": self.inference_config.get('maxTokens'), + "temperature": self.inference_config.get('temperature'), + "top_p": self.inference_config.get('topP'), + "stop": self.inference_config.get('stopSequences'), + "stream": self.streaming + } + + if self.streaming: + return await self.handle_streaming_response(request_options) + else: + return await self.handle_single_response(request_options) + + except Exception as error: + Logger.error(f"Error in OpenAI API call: {str(error)}") + raise error + + async def handle_single_response(self, request_options: Dict[str, Any]) -> ConversationMessage: + try: + request_options['stream'] = False + chat_completion = await self.client.chat.completions.create(**request_options) + + if not chat_completion.choices: + raise ValueError('No choices returned from OpenAI API') + + assistant_message = chat_completion.choices[0].message.content + + if not isinstance(assistant_message, str): + raise ValueError('Unexpected response format from OpenAI API') + + return ConversationMessage( + role=ParticipantRole.ASSISTANT.value, + content=[{"text": assistant_message}] + ) + + except Exception as error: + Logger.error(f'Error in OpenAI API call: {str(error)}') + raise error + + async def handle_streaming_response(self, request_options: Dict[str, Any]) -> AsyncIterable[str]: + stream = await self.client.chat.completions.create(**request_options) + async for chunk in stream: + if content := chunk.choices[0].delta.content: + if self.callbacks: + self.callbacks.on_llm_new_token(content) + yield content + + def set_system_prompt(self, + template: Optional[str] = None, + variables: Optional[TemplateVariables] = None) -> None: + if template: + self.prompt_template = template + if variables: + self.custom_variables = variables + self.update_system_prompt() + + def update_system_prompt(self) -> None: + all_variables: TemplateVariables = {**self.custom_variables} + self.system_prompt = self.replace_placeholders(self.prompt_template, all_variables) + + @staticmethod + def replace_placeholders(template: str, variables: TemplateVariables) -> str: + import re + def replace(match): + key = match.group(1) + if key in variables: + value = variables[key] + return '\n'.join(value) if isinstance(value, list) else str(value) + return match.group(0) + + return re.sub(r'{{(\w+)}}', replace, template) \ No newline at end of file diff --git a/typescript/src/agents/openAIAgent.ts b/typescript/src/agents/openAIAgent.ts index 9465bfe4..86696702 100644 --- a/typescript/src/agents/openAIAgent.ts +++ b/typescript/src/agents/openAIAgent.ts @@ -1,5 +1,5 @@ import { Agent, AgentOptions } from './agent'; -import { ConversationMessage, OPENAI_MODEL_ID_GPT_O_MINI, ParticipantRole } from '../types'; +import { ConversationMessage, OPENAI_MODEL_ID_GPT_O_MINI, ParticipantRole, TemplateVariables } from '../types'; import OpenAI from 'openai'; import { Logger } from '../utils/logger'; @@ -13,6 +13,10 @@ export interface OpenAIAgentOptions extends AgentOptions { topP?: number; stopSequences?: string[]; }; + customSystemPrompt?: { + template: string; + variables?: TemplateVariables; + }; } const DEFAULT_MAX_TOKENS = 1000; @@ -27,6 +31,9 @@ export class OpenAIAgent extends Agent { topP?: number; stopSequences?: string[]; }; + private promptTemplate: string; + private systemPrompt: string; + private customVariables: TemplateVariables; constructor(options: OpenAIAgentOptions) { super(options); @@ -39,6 +46,35 @@ export class OpenAIAgent extends Agent { topP: options.inferenceConfig?.topP, stopSequences: options.inferenceConfig?.stopSequences, }; + + this.promptTemplate = `You are a ${this.name}. ${this.description} Provide helpful and accurate information based on your expertise. + You will engage in an open-ended conversation, providing helpful and accurate information based on your expertise. + The conversation will proceed as follows: + - The human may ask an initial question or provide a prompt on any topic. + - You will provide a relevant and informative response. + - The human may then follow up with additional questions or prompts related to your previous response, allowing for a multi-turn dialogue on that topic. + - Or, the human may switch to a completely new and unrelated topic at any point. + - You will seamlessly shift your focus to the new topic, providing thoughtful and coherent responses based on your broad knowledge base. + Throughout the conversation, you should aim to: + - Understand the context and intent behind each new question or prompt. + - Provide substantive and well-reasoned responses that directly address the query. + - Draw insights and connections from your extensive knowledge when appropriate. + - Ask for clarification if any part of the question or prompt is ambiguous. + - Maintain a consistent, respectful, and engaging tone tailored to the human's communication style. + - Seamlessly transition between topics as the human introduces new subjects.` + + this.customVariables = {}; + this.systemPrompt = ''; + + if (options.customSystemPrompt) { + this.setSystemPrompt( + options.customSystemPrompt.template, + options.customSystemPrompt.variables + ); + } + + this.updateSystemPrompt(); + } /* eslint-disable @typescript-eslint/no-unused-vars */ @@ -52,6 +88,7 @@ export class OpenAIAgent extends Agent { const messages = [ + { role: 'system', content: this.systemPrompt }, ...chatHistory.map(msg => ({ role: msg.role.toLowerCase() as OpenAI.Chat.ChatCompletionMessageParam['role'], content: msg.content[0]?.text || '' @@ -59,6 +96,8 @@ export class OpenAIAgent extends Agent { { role: 'user' as const, content: inputText } ] as OpenAI.Chat.ChatCompletionMessageParam[]; + console.log("messages="+JSON.stringify(messages)) + const { maxTokens, temperature, topP, stopSequences } = this.inferenceConfig; const requestOptions: OpenAI.Chat.ChatCompletionCreateParams = { @@ -80,6 +119,33 @@ export class OpenAIAgent extends Agent { } } + setSystemPrompt(template?: string, variables?: TemplateVariables): void { + if (template) { + this.promptTemplate = template; + } + if (variables) { + this.customVariables = variables; + } + this.updateSystemPrompt(); + } + + private updateSystemPrompt(): void { + const allVariables: TemplateVariables = { + ...this.customVariables + }; + this.systemPrompt = this.replaceplaceholders(this.promptTemplate, allVariables); + } + + private replaceplaceholders(template: string, variables: TemplateVariables): string { + return template.replace(/{{(\w+)}}/g, (match, key) => { + if (key in variables) { + const value = variables[key]; + return Array.isArray(value) ? value.join('\n') : String(value); + } + return match; + }); + } + private async handleSingleResponse(input: any): Promise { try { const nonStreamingOptions = { ...input, stream: false }; From 145b518343baaec40aec42361494c5fa6cd3c2ae Mon Sep 17 00:00:00 2001 From: Corneliu Croitoru Date: Mon, 16 Dec 2024 18:39:29 +0100 Subject: [PATCH 02/12] fix streaming response --- .../agents/__init__.py | 17 ++++++ .../agents/openai_agent.py | 59 ++++++++++++++----- 2 files changed, 62 insertions(+), 14 deletions(-) diff --git a/python/src/multi_agent_orchestrator/agents/__init__.py b/python/src/multi_agent_orchestrator/agents/__init__.py index 0d198032..c02982dc 100644 --- a/python/src/multi_agent_orchestrator/agents/__init__.py +++ b/python/src/multi_agent_orchestrator/agents/__init__.py @@ -18,6 +18,16 @@ except ImportError: _ANTHROPIC_AVAILABLE = False +from .openai_agent import OpenAIAgent, OpenAIAgentOptions + + +try: + from .openai_agent import OpenAIAgent, OpenAIAgentOptions + _OPENAI_AVAILABLE = True +except ImportError: + print(ImportError) + _OPENAI_AVAILABLE = False + __all__ = [ 'Agent', @@ -50,3 +60,10 @@ 'AnthropicAgent', 'AnthropicAgentOptions' ]) + +print(_OPENAI_AVAILABLE) +#if _OPENAI_AVAILABLE: +__all__.extend([ + 'OpenAIAgent', + 'OpenAIAgentOptions' + ]) diff --git a/python/src/multi_agent_orchestrator/agents/openai_agent.py b/python/src/multi_agent_orchestrator/agents/openai_agent.py index 8a362bde..4cdb59c7 100644 --- a/python/src/multi_agent_orchestrator/agents/openai_agent.py +++ b/python/src/multi_agent_orchestrator/agents/openai_agent.py @@ -1,6 +1,6 @@ from typing import Dict, List, Union, AsyncIterable, Optional, Any from dataclasses import dataclass -import openai +from openai import OpenAI from multi_agent_orchestrator.agents import Agent, AgentOptions from multi_agent_orchestrator.types import ( ConversationMessage, @@ -9,15 +9,20 @@ TemplateVariables ) from multi_agent_orchestrator.utils import Logger +from multi_agent_orchestrator.retrievers import Retriever + @dataclass class OpenAIAgentOptions(AgentOptions): - api_key: str + api_key: str = None model: Optional[str] = None streaming: Optional[bool] = None inference_config: Optional[Dict[str, Any]] = None custom_system_prompt: Optional[Dict[str, Any]] = None + retriever: Optional[Retriever] = None + client: Optional[Any] = None + class OpenAIAgent(Agent): @@ -26,9 +31,16 @@ def __init__(self, options: OpenAIAgentOptions): if not options.api_key: raise ValueError("OpenAI API key is required") - self.client = openai.OpenAI(api_key=options.api_key) + if options.client: + self.client = options.client + else: + self.client = OpenAI(api_key=options.api_key) + + self.model = options.model or OPENAI_MODEL_ID_GPT_O_MINI self.streaming = options.streaming or False + self.retriever: Optional[Retriever] = options.retriever + # Default inference configuration default_inference_config = { @@ -72,7 +84,7 @@ def __init__(self, options: OpenAIAgentOptions): options.custom_system_prompt.get('variables') ) - self.update_system_prompt() + def is_streaming_enabled(self) -> bool: return self.streaming is True @@ -86,8 +98,19 @@ async def process_request( additional_params: Optional[Dict[str, str]] = None ) -> Union[ConversationMessage, AsyncIterable[Any]]: try: + + self.update_system_prompt() + + system_prompt = self.system_prompt + + if self.retriever: + response = await self.retriever.retrieve_and_combine_results(input_text) + context_prompt = "\nHere is the context to use to answer the user's question:\n" + response + system_prompt += context_prompt + + messages = [ - {"role": "system", "content": self.system_prompt}, + {"role": "system", "content": system_prompt}, *[{ "role": msg.role.lower(), "content": msg.content[0].get('text', '') if msg.content else '' @@ -95,6 +118,7 @@ async def process_request( {"role": "user", "content": input_text} ] + request_options = { "model": self.model, "messages": messages, @@ -106,7 +130,7 @@ async def process_request( } if self.streaming: - return await self.handle_streaming_response(request_options) + return self.handle_streaming_response(request_options) else: return await self.handle_single_response(request_options) @@ -117,7 +141,7 @@ async def process_request( async def handle_single_response(self, request_options: Dict[str, Any]) -> ConversationMessage: try: request_options['stream'] = False - chat_completion = await self.client.chat.completions.create(**request_options) + chat_completion = self.client.chat.completions.create(**request_options) if not chat_completion.choices: raise ValueError('No choices returned from OpenAI API') @@ -136,13 +160,20 @@ async def handle_single_response(self, request_options: Dict[str, Any]) -> Conve Logger.error(f'Error in OpenAI API call: {str(error)}') raise error - async def handle_streaming_response(self, request_options: Dict[str, Any]) -> AsyncIterable[str]: - stream = await self.client.chat.completions.create(**request_options) - async for chunk in stream: - if content := chunk.choices[0].delta.content: - if self.callbacks: - self.callbacks.on_llm_new_token(content) - yield content + async def handle_streaming_response(self, request_options: Dict[str, Any]) -> Any: + + try: + stream = self.client.chat.completions.create(**request_options) + + for chunk in stream: + if chunk.choices[0].delta.content: + if self.callbacks: + self.callbacks.on_llm_new_token(chunk.choices[0].delta.content) + yield chunk.choices[0].delta.content + + except Exception as error: + Logger.error(f"Error getting stream from OpenAI model: {str(error)}") + raise error def set_system_prompt(self, template: Optional[str] = None, From 61431a3484428c6764a7c6e6c2e95503046dcd73 Mon Sep 17 00:00:00 2001 From: Corneliu Croitoru Date: Mon, 16 Dec 2024 18:42:33 +0100 Subject: [PATCH 03/12] add retriever option to ts agent --- typescript/src/agents/openAIAgent.ts | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/typescript/src/agents/openAIAgent.ts b/typescript/src/agents/openAIAgent.ts index 86696702..6d1f4ab8 100644 --- a/typescript/src/agents/openAIAgent.ts +++ b/typescript/src/agents/openAIAgent.ts @@ -2,6 +2,8 @@ import { Agent, AgentOptions } from './agent'; import { ConversationMessage, OPENAI_MODEL_ID_GPT_O_MINI, ParticipantRole, TemplateVariables } from '../types'; import OpenAI from 'openai'; import { Logger } from '../utils/logger'; +import { Retriever } from "../retrievers/retriever"; + export interface OpenAIAgentOptions extends AgentOptions { apiKey: string; @@ -17,6 +19,8 @@ export interface OpenAIAgentOptions extends AgentOptions { template: string; variables?: TemplateVariables; }; + retriever?: Retriever; + } const DEFAULT_MAX_TOKENS = 1000; @@ -34,6 +38,8 @@ export class OpenAIAgent extends Agent { private promptTemplate: string; private systemPrompt: string; private customVariables: TemplateVariables; + protected retriever?: Retriever; + constructor(options: OpenAIAgentOptions) { super(options); @@ -47,6 +53,9 @@ export class OpenAIAgent extends Agent { stopSequences: options.inferenceConfig?.stopSequences, }; + this.retriever = options.retriever ?? null; + + this.promptTemplate = `You are a ${this.name}. ${this.description} Provide helpful and accurate information based on your expertise. You will engage in an open-ended conversation, providing helpful and accurate information based on your expertise. The conversation will proceed as follows: @@ -73,7 +82,6 @@ export class OpenAIAgent extends Agent { ); } - this.updateSystemPrompt(); } @@ -86,9 +94,22 @@ export class OpenAIAgent extends Agent { additionalParams?: Record ): Promise> { + this.updateSystemPrompt(); + + let systemPrompt = this.systemPrompt; + + if (this.retriever) { + // retrieve from Vector store + const response = await this.retriever.retrieveAndCombineResults(inputText); + const contextPrompt = + "\nHere is the context to use to answer the user's question:\n" + + response; + systemPrompt = systemPrompt + contextPrompt; + } + const messages = [ - { role: 'system', content: this.systemPrompt }, + { role: 'system', content: systemPrompt }, ...chatHistory.map(msg => ({ role: msg.role.toLowerCase() as OpenAI.Chat.ChatCompletionMessageParam['role'], content: msg.content[0]?.text || '' From d63fa690984b5c8a1cd4da84f4f852435dff6f79 Mon Sep 17 00:00:00 2001 From: Corneliu Croitoru Date: Mon, 16 Dec 2024 18:54:46 +0100 Subject: [PATCH 04/12] update doc --- .../docs/agents/built-in/openai-agent.mdx | 244 +++++++++++++++--- 1 file changed, 213 insertions(+), 31 deletions(-) diff --git a/docs/src/content/docs/agents/built-in/openai-agent.mdx b/docs/src/content/docs/agents/built-in/openai-agent.mdx index 97953fc2..7a348ba7 100644 --- a/docs/src/content/docs/agents/built-in/openai-agent.mdx +++ b/docs/src/content/docs/agents/built-in/openai-agent.mdx @@ -13,31 +13,71 @@ The `OpenAIAgent` is a powerful agent class in the Multi-Agent Orchestrator fram - Customizable inference configuration - Handles conversation history for context-aware responses - Customizable system prompts +- Support for retrievers to enhance responses with additional context ## Creating an OpenAIAgent -To create a new `OpenAIAgent`, you need to provide an `OpenAIAgentOptions` object. Here's an example of how to create an OpenAIAgent: +### Minimal Example (Required Fields Only) + +Here's the simplest way to create an OpenAIAgent with just the required fields: import { Tabs, TabItem } from '@astrojs/starlight/components'; - + + +```typescript +import { OpenAIAgent } from 'multi-agent-orchestrator'; + +const agent = new OpenAIAgent({ + name: 'OpenAI Assistant', // Required + description: 'A versatile AI assistant', // Required + apiKey: 'your-openai-api-key' // Required +}); +``` + + + +```python +from multi_agent_orchestrator import OpenAIAgent, OpenAIAgentOptions + +agent = OpenAIAgent(OpenAIAgentOptions( + name='OpenAI Assistant', # Required + description='A versatile AI assistant', # Required + api_key='your-openai-api-key' # Required +)) +``` + + + +### Full Example with All Options + +Here's an example showing all available configuration options: + + + ```typescript import { OpenAIAgent } from 'multi-agent-orchestrator'; const agent = new OpenAIAgent({ + // Required fields name: 'OpenAI Assistant', description: 'A versatile AI assistant powered by OpenAI models', apiKey: 'your-openai-api-key', + + // Optional fields model: 'gpt-3.5-turbo', streaming: true, + retriever: customRetriever, // Custom retriever for additional context + inferenceConfig: { maxTokens: 500, temperature: 0.7, topP: 0.9, stopSequences: ['Human:', 'AI:'] }, + customSystemPrompt: { template: 'You are a helpful AI assistant specialized in {{DOMAIN}}', variables: { @@ -47,23 +87,30 @@ const agent = new OpenAIAgent({ }); ``` - + ```python from multi_agent_orchestrator import OpenAIAgent, OpenAIAgentOptions agent = OpenAIAgent(OpenAIAgentOptions( + # Required fields name='OpenAI Assistant', description='A versatile AI assistant powered by OpenAI models', api_key='your-openai-api-key', + + # Optional fields model='gpt-3.5-turbo', streaming=True, + client=custom_openai_client, # Custom OpenAI client instance + retriever=custom_retriever, # Custom retriever for additional context + inference_config={ 'maxTokens': 500, 'temperature': 0.7, 'topP': 0.9, 'stopSequences': ['Human:', 'AI:'] }, + custom_system_prompt={ 'template': 'You are a helpful AI assistant specialized in {{DOMAIN}}', 'variables': { @@ -77,17 +124,21 @@ agent = OpenAIAgent(OpenAIAgentOptions( ### OpenAIAgentOptions -The `OpenAIAgentOptions` extends the base `AgentOptions` to provide configuration for the OpenAI agent: +The `OpenAIAgentOptions` extends the base `AgentOptions` with the following fields: - - -The TypeScript interface extends `AgentOptions` with the following fields: - -- `apiKey` (required): Your OpenAI API key for authentication. -- `model` (optional): The OpenAI model identifier to use (e.g., 'gpt-4', 'gpt-3.5-turbo'). Defaults to `OPENAI_MODEL_ID_GPT_O_MINI`. -- `streaming` (optional): Boolean flag to enable/disable streaming responses. Defaults to `false`. -- `inferenceConfig` (optional): Configuration object for the model's inference behavior: + + +Required fields: +- `name` (required): Name of the agent +- `description` (required): Description of the agent's capabilities +- `apiKey` (required): Your OpenAI API key for authentication + +Optional fields: +- `model` (optional): The OpenAI model identifier (e.g., 'gpt-4', 'gpt-3.5-turbo'). Defaults to `OPENAI_MODEL_ID_GPT_O_MINI` +- `streaming` (optional): Boolean flag for streaming responses. Defaults to `false` +- `retriever` (optional): Custom retriever instance for enhancing responses with additional context +- `inferenceConfig` (optional): Configuration object for model inference: ```typescript { maxTokens?: number; // Maximum tokens to generate (default: 1000) @@ -109,12 +160,17 @@ The TypeScript interface extends `AgentOptions` with the following fields: -The Python class extends `AgentOptions` with the following fields: - -- `api_key` (required): String containing your OpenAI API key for authentication. -- `model` (Optional[str]): The OpenAI model identifier to use (e.g., 'gpt-4', 'gpt-3.5-turbo'). Defaults to `OPENAI_MODEL_ID_GPT_O_MINI`. -- `streaming` (Optional[bool]): Flag to enable/disable streaming responses. Defaults to `False`. -- `inference_config` (Optional[Dict[str, Any]]): Dictionary configuring the model's inference behavior: +Required fields: +- `name` (required): Name of the agent +- `description` (required): Description of the agent's capabilities +- `api_key` (required): String containing your OpenAI API key for authentication + +Optional fields: +- `model` (Optional[str]): The OpenAI model identifier. Defaults to `OPENAI_MODEL_ID_GPT_O_MINI` +- `streaming` (Optional[bool]): Flag for streaming responses. Defaults to `False` +- `client` (Optional[Any]): Custom OpenAI client instance +- `retriever` (Optional[Retriever]): Custom retriever instance for enhancing responses +- `inference_config` (Optional[Dict[str, Any]]): Dictionary configuring model inference: ```python { 'maxTokens': int, # Maximum tokens to generate (default: 1000) @@ -141,7 +197,7 @@ You can set or update the system prompt for the OpenAIAgent in two ways: 1. During initialization: - + ```typescript const agent = new OpenAIAgent({ @@ -152,7 +208,7 @@ const agent = new OpenAIAgent({ }); ``` - + ```python agent = OpenAIAgent(OpenAIAgentOptions( @@ -168,7 +224,7 @@ agent = OpenAIAgent(OpenAIAgentOptions( 2. Using the `setSystemPrompt/set_system_prompt` method after initialization: - + ```typescript agent.setSystemPrompt( @@ -183,7 +239,7 @@ agent.setSystemPrompt( ); ``` - + ```python agent.set_system_prompt( @@ -200,14 +256,82 @@ agent.set_system_prompt( -The `setSystemPrompt/set_system_prompt` method allows you to dynamically change the agent's behavior and focus without creating a new instance. You can use placeholders in the prompt template and provide values for them in the second argument. - ## Usage -Once you've created an OpenAIAgent, you can add it to the Multi-Agent Orchestrator and use it to process requests: +There are two ways to use the OpenAIAgent: directly or through the Multi-Agent Orchestrator. + +### Direct Agent Usage + +You can call the agent directly when you want to use a single agent without the orchestrator's routing capabilities: - + + +```typescript +import { OpenAIAgent, ClassifierResult } from 'multi-agent-orchestrator'; + +const agent = new OpenAIAgent({ + name: 'OpenAI Assistant', + description: 'A versatile AI assistant', + apiKey: 'your-openai-api-key' +}); + +// Create a classifier result for direct agent usage +const classifierResult = { + selectedAgent: agent, + confidence: 1.0 +}; + +// Call the agent directly +const response = await orchestrator.agentProcessRequest( + "What is the capital of France?", + "user123", + "session456", + classifierResult, + {} // additional parameters (optional) +); + +console.log(response.output); // Access the agent's response +console.log(response.metadata); // Access metadata about the request +console.log(response.streaming); // Check if response is streaming +``` + + + +```python +from multi_agent_orchestrator import OpenAIAgent, OpenAIAgentOptions, ClassifierResult + +agent = OpenAIAgent(OpenAIAgentOptions( + name='OpenAI Assistant', + description='A versatile AI assistant', + api_key='your-openai-api-key' +)) + +# Create a classifier result for direct agent usage +classifier_result = ClassifierResult(selected_agent=agent, confidence=1.0) + +# Call the agent directly +response = await orchestrator.agent_process_request( + "What is the capital of France?", + "user123", + "session456", + classifier_result, + {} # additional parameters (optional) +) + +print(response.output) # Access the agent's response +print(response.metadata) # Access metadata about the request +print(response.streaming) # Check if response is streaming +``` + + + +### Using through the Orchestrator + +When you want to use the agent as part of a multi-agent system, add it to the Multi-Agent Orchestrator: + + + ```typescript import { MultiAgentOrchestrator } from "multi-agent-orchestrator"; @@ -215,14 +339,20 @@ import { MultiAgentOrchestrator } from "multi-agent-orchestrator"; const orchestrator = new MultiAgentOrchestrator(); orchestrator.addAgent(agent); +// The orchestrator will automatically handle agent selection and routing const response = await orchestrator.routeRequest( "What is the capital of France?", "user123", - "session456" + "session456", + {} // additional parameters (optional) ); + +console.log(response.output); // Access the agent's response +console.log(response.metadata); // Access metadata about the request +console.log(response.streaming); // Check if response is streaming ``` - + ```python from multi_agent_orchestrator import MultiAgentOrchestrator @@ -230,21 +360,32 @@ from multi_agent_orchestrator import MultiAgentOrchestrator orchestrator = MultiAgentOrchestrator() orchestrator.add_agent(agent) +# The orchestrator will automatically handle agent selection and routing response = await orchestrator.route_request( "What is the capital of France?", "user123", - "session456" + "session456", + {} # additional parameters (optional) ) + +print(response.output) # Access the agent's response +print(response.metadata) # Access metadata about the request +print(response.streaming) # Check if response is streaming ``` +The key differences between these approaches are: +- Direct usage is simpler when you only need one agent +- Orchestrator usage provides automatic agent selection and handles multiple agents +- Both methods return an `AgentResponse` with the same structure + ## Streaming Responses If you've enabled streaming (`streaming: true` in the options), the agent will return an AsyncIterable that you can use to process the response in chunks: - + ```typescript const streamingResponse = await orchestrator.routeRequest( @@ -260,7 +401,7 @@ if (Symbol.asyncIterator in streamingResponse) { } ``` - + ```python streaming_response = await orchestrator.route_request( @@ -277,6 +418,47 @@ if hasattr(streaming_response, '__aiter__'): +## Using Retrievers + +The OpenAIAgent supports retrievers to enhance responses with additional context. Here's how to use a retriever: + + + + +```typescript +import { OpenAIAgent, CustomRetriever } from 'multi-agent-orchestrator'; + +const retriever = new CustomRetriever({ + // Retriever configuration +}); + +const agent = new OpenAIAgent({ + name: 'OpenAI Assistant', + description: 'Context-aware AI assistant', + apiKey: 'your-openai-api-key', + retriever: retriever +}); +``` + + + +```python +from multi_agent_orchestrator import OpenAIAgent, OpenAIAgentOptions, CustomRetriever + +retriever = CustomRetriever( + # Retriever configuration +) + +agent = OpenAIAgent(OpenAIAgentOptions( + name='OpenAI Assistant', + description='Context-aware AI assistant', + api_key='your-openai-api-key', + retriever=retriever +)) +``` + + + ## Best Practices 1. **API Key Security**: Ensure your OpenAI API key is kept secure and not exposed in your codebase. From c7308f4d11c1c2de521fe4ba8e39f008d900ba8e Mon Sep 17 00:00:00 2001 From: Corneliu Croitoru Date: Fri, 20 Dec 2024 14:30:24 +0100 Subject: [PATCH 05/12] draft --- .../src/multi_agent_orchestrator/agents/__init__.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/python/src/multi_agent_orchestrator/agents/__init__.py b/python/src/multi_agent_orchestrator/agents/__init__.py index c02982dc..9cb4c999 100644 --- a/python/src/multi_agent_orchestrator/agents/__init__.py +++ b/python/src/multi_agent_orchestrator/agents/__init__.py @@ -61,9 +61,9 @@ 'AnthropicAgentOptions' ]) -print(_OPENAI_AVAILABLE) -#if _OPENAI_AVAILABLE: -__all__.extend([ - 'OpenAIAgent', - 'OpenAIAgentOptions' - ]) + +if _OPENAI_AVAILABLE: + __all__.extend([ + 'OpenAIAgent', + 'OpenAIAgentOptions' + ]) From 320061d4a925c013e5ba7f15e19c423f881d9b71 Mon Sep 17 00:00:00 2001 From: Corneliu Croitoru Date: Mon, 23 Dec 2024 11:54:11 +0100 Subject: [PATCH 06/12] openai agent & aws, anthropic and openai optional packages --- python/setup.cfg | 6 +- .../agents/__init__.py | 72 ++++++++++--------- .../agents/openai_agent.py | 16 +++-- .../classifiers/__init__.py | 17 +++-- .../multi_agent_orchestrator/orchestrator.py | 22 ++++-- .../storage/__init__.py | 19 ++++- 6 files changed, 99 insertions(+), 53 deletions(-) diff --git a/python/setup.cfg b/python/setup.cfg index aa3e784b..340a56ca 100644 --- a/python/setup.cfg +++ b/python/setup.cfg @@ -19,10 +19,11 @@ package_dir = = src packages = find: python_requires = >=3.11 -install_requires = - boto3==1.35.0 + [options.extras_require] +aws = + boto3==0.40.0 anthropic = anthropic==0.40.0 openai = @@ -30,6 +31,7 @@ openai = all = anthropic==0.40.0 openai==1.55.3 + boto3==0.40.0 [options.packages.find] where = src diff --git a/python/src/multi_agent_orchestrator/agents/__init__.py b/python/src/multi_agent_orchestrator/agents/__init__.py index 9cb4c999..b9b70c04 100644 --- a/python/src/multi_agent_orchestrator/agents/__init__.py +++ b/python/src/multi_agent_orchestrator/agents/__init__.py @@ -2,58 +2,64 @@ Code for Agents. """ from .agent import Agent, AgentOptions, AgentCallbacks, AgentProcessingResult, AgentResponse -from .lambda_agent import LambdaAgent, LambdaAgentOptions -from .bedrock_llm_agent import BedrockLLMAgent, BedrockLLMAgentOptions -from .lex_bot_agent import LexBotAgent, LexBotAgentOptions -from .amazon_bedrock_agent import AmazonBedrockAgent, AmazonBedrockAgentOptions -from .comprehend_filter_agent import ComprehendFilterAgent, ComprehendFilterAgentOptions -from .chain_agent import ChainAgent, ChainAgentOptions -from .bedrock_translator_agent import BedrockTranslatorAgent, BedrockTranslatorAgentOptions -from .bedrock_inline_agent import BedrockInlineAgent, BedrockInlineAgentOptions -from .bedrock_flows_agent import BedrockFlowsAgent, BedrockFlowsAgentOptions +try: + from .lambda_agent import LambdaAgent, LambdaAgentOptions + from .bedrock_llm_agent import BedrockLLMAgent, BedrockLLMAgentOptions + from .lex_bot_agent import LexBotAgent, LexBotAgentOptions + from .amazon_bedrock_agent import AmazonBedrockAgent, AmazonBedrockAgentOptions + from .comprehend_filter_agent import ComprehendFilterAgent, ComprehendFilterAgentOptions + from .bedrock_translator_agent import BedrockTranslatorAgent, BedrockTranslatorAgentOptions + from .chain_agent import ChainAgent, ChainAgentOptions + from .bedrock_inline_agent import BedrockInlineAgent, BedrockInlineAgentOptions + from .bedrock_flows_agent import BedrockFlowsAgent, BedrockFlowsAgentOptions + _AWS_AVAILABLE = True +except ImportError: + _AWS_AVAILABLE = False try: from .anthropic_agent import AnthropicAgent, AnthropicAgentOptions _ANTHROPIC_AVAILABLE = True except ImportError: _ANTHROPIC_AVAILABLE = False -from .openai_agent import OpenAIAgent, OpenAIAgentOptions - try: from .openai_agent import OpenAIAgent, OpenAIAgentOptions _OPENAI_AVAILABLE = True except ImportError: - print(ImportError) _OPENAI_AVAILABLE = False - __all__ = [ 'Agent', 'AgentOptions', 'AgentCallbacks', 'AgentProcessingResult', - 'AgentResponse', - 'LambdaAgent', - 'LambdaAgentOptions', - 'BedrockLLMAgent', - 'BedrockLLMAgentOptions', - 'LexBotAgent', - 'LexBotAgentOptions', - 'AmazonBedrockAgent', - 'AmazonBedrockAgentOptions', - 'ComprehendFilterAgent', - 'ComprehendFilterAgentOptions', - 'BedrockTranslatorAgent', - 'BedrockTranslatorAgentOptions', - 'ChainAgent', - 'ChainAgentOptions', - 'BedrockInlineAgent', - 'BedrockInlineAgentOptions', - 'BedrockFlowsAgent', - 'BedrockFlowsAgentOptions' -] + 'AgentResponse' + ] + + +if _AWS_AVAILABLE : + __all__.extend([ + 'LambdaAgent', + 'LambdaAgentOptions', + 'BedrockLLMAgent', + 'BedrockLLMAgentOptions', + 'LexBotAgent', + 'LexBotAgentOptions', + 'AmazonBedrockAgent', + 'AmazonBedrockAgentOptions', + 'ComprehendFilterAgent', + 'ComprehendFilterAgentOptions', + 'ChainAgent', + 'ChainAgentOptions', + 'BedrockTranslatorAgent', + 'BedrockTranslatorAgentOptions', + 'BedrockInlineAgent', + 'BedrockInlineAgentOptions', + 'BedrockFlowsAgent', + 'BedrockFlowsAgentOptions' + ]) + if _ANTHROPIC_AVAILABLE: __all__.extend([ diff --git a/python/src/multi_agent_orchestrator/agents/openai_agent.py b/python/src/multi_agent_orchestrator/agents/openai_agent.py index 4cdb59c7..0f3d892e 100644 --- a/python/src/multi_agent_orchestrator/agents/openai_agent.py +++ b/python/src/multi_agent_orchestrator/agents/openai_agent.py @@ -160,16 +160,24 @@ async def handle_single_response(self, request_options: Dict[str, Any]) -> Conve Logger.error(f'Error in OpenAI API call: {str(error)}') raise error - async def handle_streaming_response(self, request_options: Dict[str, Any]) -> Any: - + async def handle_streaming_response(self, request_options: Dict[str, Any]) -> AsyncIterable[Any]: try: stream = self.client.chat.completions.create(**request_options) + accumulated_message = [] for chunk in stream: if chunk.choices[0].delta.content: + chunk_content = chunk.choices[0].delta.content + accumulated_message.append(chunk_content) if self.callbacks: - self.callbacks.on_llm_new_token(chunk.choices[0].delta.content) - yield chunk.choices[0].delta.content + self.callbacks.on_llm_new_token(chunk_content) + yield chunk_content + + # Store the complete message in the instance for later access if needed + self._last_complete_message = ConversationMessage( + role=ParticipantRole.ASSISTANT.value, + content=[{"text": ''.join(accumulated_message)}] + ) except Exception as error: Logger.error(f"Error getting stream from OpenAI model: {str(error)}") diff --git a/python/src/multi_agent_orchestrator/classifiers/__init__.py b/python/src/multi_agent_orchestrator/classifiers/__init__.py index 123bbe8d..48c51865 100644 --- a/python/src/multi_agent_orchestrator/classifiers/__init__.py +++ b/python/src/multi_agent_orchestrator/classifiers/__init__.py @@ -2,7 +2,12 @@ Code for Classifier. """ from .classifier import Classifier, ClassifierResult -from .bedrock_classifier import BedrockClassifier, BedrockClassifierOptions + +try: + from .bedrock_classifier import BedrockClassifier, BedrockClassifierOptions + _AWS_AVAILABLE = True +except Exception as e: + _AWS_AVAILABLE = False try: from .anthropic_classifier import AnthropicClassifier, AnthropicClassifierOptions @@ -16,15 +21,17 @@ except Exception as e: _OPENAI_AVAILABLE = False - - __all__ = [ "Classifier", "ClassifierResult", - "BedrockClassifier", - "BedrockClassifierOptions" ] +if _AWS_AVAILABLE: + __all__.extend([ + "BedrockClassifier", + "BedrockClassifierOptions" + ]) + if _ANTHROPIC_AVAILABLE: __all__.extend([ "AnthropicClassifier", diff --git a/python/src/multi_agent_orchestrator/orchestrator.py b/python/src/multi_agent_orchestrator/orchestrator.py index 6e239b1b..49e577c4 100644 --- a/python/src/multi_agent_orchestrator/orchestrator.py +++ b/python/src/multi_agent_orchestrator/orchestrator.py @@ -3,14 +3,17 @@ import time from multi_agent_orchestrator.utils.logger import Logger from multi_agent_orchestrator.types import ConversationMessage, ParticipantRole, OrchestratorConfig -from multi_agent_orchestrator.classifiers import (Classifier, - ClassifierResult, - BedrockClassifier, - BedrockClassifierOptions) +from multi_agent_orchestrator.classifiers import Classifier,ClassifierResult from multi_agent_orchestrator.agents import (Agent, AgentResponse, AgentProcessingResult) -from multi_agent_orchestrator.storage import ChatStorage, InMemoryChatStorage +from multi_agent_orchestrator.storage import ChatStorage +from multi_agent_orchestrator.storage import InMemoryChatStorage +try: + from multi_agent_orchestrator.classifiers import BedrockClassifier, BedrockClassifierOptions + _BEDROCK_AVAILABLE = True +except ImportError: + _BEDROCK_AVAILABLE = False @dataclass class MultiAgentOrchestrator: @@ -41,7 +44,14 @@ def __init__(self, self.logger = Logger(self.config, logger) self.agents: Dict[str, Agent] = {} self.storage = storage or InMemoryChatStorage() - self.classifier: Classifier = classifier or BedrockClassifier(options=BedrockClassifierOptions()) + + if classifier: + self.classifier = classifier + elif _BEDROCK_AVAILABLE: + self.classifier = BedrockClassifier(options=BedrockClassifierOptions()) + else: + raise ValueError("No classifier provided and BedrockClassifier is not available. Please provide a classifier.") + self.execution_times: Dict[str, float] = {} self.default_agent: Agent = default_agent diff --git a/python/src/multi_agent_orchestrator/storage/__init__.py b/python/src/multi_agent_orchestrator/storage/__init__.py index 483a5bab..51d3279d 100644 --- a/python/src/multi_agent_orchestrator/storage/__init__.py +++ b/python/src/multi_agent_orchestrator/storage/__init__.py @@ -1,10 +1,23 @@ +""" +Storage implementations for chat history. +""" from .chat_storage import ChatStorage from .in_memory_chat_storage import InMemoryChatStorage -from .dynamodb_chat_storage import DynamoDbChatStorage +_AWS_AVAILABLE = False + +try: + from .dynamodb_chat_storage import DynamoDbChatStorage + _AWS_AVAILABLE = True +except ImportError: + _AWS_AVAILABLE = False __all__ = [ - 'ChatStorage', + 'ChatStorage', 'InMemoryChatStorage', - 'DynamoDbChatStorage' ] + +if _AWS_AVAILABLE: + __all__.extend([ + 'DynamoDbChatStorage' + ]) \ No newline at end of file From 0dccdf720f0de2785102e8ed0341b075a6a7efd0 Mon Sep 17 00:00:00 2001 From: Corneliu Croitoru Date: Mon, 23 Dec 2024 11:59:02 +0100 Subject: [PATCH 07/12] update doc & readme --- README.md | 2 +- docs/src/content/docs/general/quickstart.mdx | 1 - python/README.md | 2 +- python/setup.cfg | 4 ++-- 4 files changed, 4 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index f40465a3..88ef8e0f 100644 --- a/README.md +++ b/README.md @@ -235,7 +235,7 @@ if (response.streaming == true) { # Optional: Set up a virtual environment python -m venv venv source venv/bin/activate # On Windows use `venv\Scripts\activate` -pip install multi-agent-orchestrator +pip install multi-agent-orchestrator[aws] ``` #### Default Usage diff --git a/docs/src/content/docs/general/quickstart.mdx b/docs/src/content/docs/general/quickstart.mdx index bfd0122f..2470d28d 100644 --- a/docs/src/content/docs/general/quickstart.mdx +++ b/docs/src/content/docs/general/quickstart.mdx @@ -91,7 +91,6 @@ Ensure you have [requested access](https://docs.aws.amazon.com/bedrock/latest/us ```bash - pip install multi-agent-orchestrator # for core dependencies pip install "multi-agent-orchestrator[anthropic]" # for Anthropic classifier and agent pip install "multi-agent-orchestrator[openai]" # for OpenAI classifier and agent pip install "multi-agent-orchestrator[all]" # for all packages including Anthropic and OpenAI diff --git a/python/README.md b/python/README.md index 937f9263..ce52471b 100644 --- a/python/README.md +++ b/python/README.md @@ -101,7 +101,7 @@ Check out our [documentation](https://awslabs.github.io/multi-agent-orchestrator # Optional: Set up a virtual environment python -m venv venv source venv/bin/activate # On Windows use `venv\Scripts\activate` -pip install multi-agent-orchestrator +pip install multi-agent-orchestrator[aws] ``` #### Default Usage diff --git a/python/setup.cfg b/python/setup.cfg index 340a56ca..05c7a424 100644 --- a/python/setup.cfg +++ b/python/setup.cfg @@ -23,7 +23,7 @@ python_requires = >=3.11 [options.extras_require] aws = - boto3==0.40.0 + boto3==1.35.0 anthropic = anthropic==0.40.0 openai = @@ -31,7 +31,7 @@ openai = all = anthropic==0.40.0 openai==1.55.3 - boto3==0.40.0 + boto3==1.35.0 [options.packages.find] where = src From 57824bc113ff63febe0a64c8f05df70cd3667cfa Mon Sep 17 00:00:00 2001 From: Corneliu Croitoru Date: Mon, 23 Dec 2024 15:35:26 +0100 Subject: [PATCH 08/12] add unit tests for openaiagent --- .../agents/openai_agent.py | 7 +- python/src/tests/agents/test_openai_agent.py | 164 ++++++++++++++++++ typescript/package-lock.json | 53 ++++++ typescript/src/agents/anthropicAgent.ts | 2 +- typescript/src/agents/openAIAgent.ts | 36 +++- typescript/tests/agents/OpenAi.test.ts | 144 +++++++++++++++ 6 files changed, 393 insertions(+), 13 deletions(-) create mode 100644 python/src/tests/agents/test_openai_agent.py create mode 100644 typescript/tests/agents/OpenAi.test.ts diff --git a/python/src/multi_agent_orchestrator/agents/openai_agent.py b/python/src/multi_agent_orchestrator/agents/openai_agent.py index 0f3d892e..74f58971 100644 --- a/python/src/multi_agent_orchestrator/agents/openai_agent.py +++ b/python/src/multi_agent_orchestrator/agents/openai_agent.py @@ -128,9 +128,10 @@ async def process_request( "stop": self.inference_config.get('stopSequences'), "stream": self.streaming } - + print("****") + print(self.streaming) if self.streaming: - return self.handle_streaming_response(request_options) + return await self.handle_streaming_response(request_options) else: return await self.handle_single_response(request_options) @@ -171,7 +172,7 @@ async def handle_streaming_response(self, request_options: Dict[str, Any]) -> As accumulated_message.append(chunk_content) if self.callbacks: self.callbacks.on_llm_new_token(chunk_content) - yield chunk_content + #yield chunk_content # Store the complete message in the instance for later access if needed self._last_complete_message = ConversationMessage( diff --git a/python/src/tests/agents/test_openai_agent.py b/python/src/tests/agents/test_openai_agent.py new file mode 100644 index 00000000..4b035a9b --- /dev/null +++ b/python/src/tests/agents/test_openai_agent.py @@ -0,0 +1,164 @@ +import pytest +from unittest.mock import Mock, AsyncMock, patch +from multi_agent_orchestrator.types import ConversationMessage, ParticipantRole +from multi_agent_orchestrator.agents import OpenAIAgent, OpenAIAgentOptions + +@pytest.fixture +def mock_openai_client(): + mock_client = Mock() + # Set up nested structure to match OpenAI client + mock_client.chat = Mock() + mock_client.chat.completions = Mock() + mock_client.chat.completions.create = Mock() + return mock_client + + +@pytest.fixture +def openai_agent(mock_openai_client): + with patch('openai.OpenAI', return_value=mock_openai_client): + options = OpenAIAgentOptions( + name="TestAgent", + description="A test OpenAI agent", + api_key="test-api-key", + model="gpt-4", + streaming=False, + inference_config={ + 'maxTokens': 500, + 'temperature': 0.5, + 'topP': 0.8, + 'stopSequences': [] + } + ) + agent = OpenAIAgent(options) + agent.client = mock_openai_client # Explicitly set the mock client + return agent + + +def test_custom_system_prompt_with_variable(): + with patch('openai.OpenAI'): + options = OpenAIAgentOptions( + name="TestAgent", + description="A test agent", + api_key="test-api-key", + custom_system_prompt={ + 'template': "This is a prompt with {{variable}}", + 'variables': {'variable': 'value'} + } + ) + agent = OpenAIAgent(options) + assert agent.system_prompt == "This is a prompt with value" + + +@pytest.mark.asyncio +async def test_process_request_success(openai_agent, mock_openai_client): + # Create a mock response object + mock_response = Mock() + mock_response.choices = [Mock()] + mock_response.choices[0].message = Mock() + mock_response.choices[0].message.content = "This is a test response" + mock_openai_client.chat.completions.create.return_value = mock_response + + result = await openai_agent.process_request( + "Test question", + "test_user", + "test_session", + [] + ) + + assert isinstance(result, ConversationMessage) + assert result.role == ParticipantRole.ASSISTANT.value + assert result.content[0]['text'] == 'This is a test response' + + +@pytest.mark.asyncio +async def test_process_request_streaming(openai_agent, mock_openai_client): + openai_agent.streaming = True + + # Create mock chunks + class MockChunk: + def __init__(self, content): + self.choices = [Mock()] + self.choices[0].delta = Mock() + self.choices[0].delta.content = content + + mock_stream = [ + MockChunk("This "), + MockChunk("is "), + MockChunk("a "), + MockChunk("test response") + ] + mock_openai_client.chat.completions.create.return_value = mock_stream + + result = await openai_agent.process_request( + "Test question", + "test_user", + "test_session", + [] + ) + + chunks = [] + async for chunk in result: + chunks.append(chunk) + + assert chunks == ["This ", "is ", "a ", "test response"] + + +@pytest.mark.asyncio +async def test_process_request_with_retriever(openai_agent, mock_openai_client): + # Set up mock retriever + mock_retriever = AsyncMock() + mock_retriever.retrieve_and_combine_results.return_value = "Context from retriever" + openai_agent.retriever = mock_retriever + + # Set up mock response + mock_response = Mock() + mock_response.choices = [Mock()] + mock_response.choices[0].message = Mock() + mock_response.choices[0].message.content = "Response with context" + mock_openai_client.chat.completions.create.return_value = mock_response + + result = await openai_agent.process_request( + "Test question", + "test_user", + "test_session", + [] + ) + + mock_retriever.retrieve_and_combine_results.assert_called_once_with("Test question") + assert isinstance(result, ConversationMessage) + assert result.content[0]['text'] == "Response with context" + + +@pytest.mark.asyncio +async def test_process_request_api_error(openai_agent, mock_openai_client): + mock_openai_client.chat.completions.create.side_effect = Exception("API Error") + + with pytest.raises(Exception) as exc_info: + await openai_agent.process_request( + "Test input", + "user123", + "session456", + [] + ) + assert "API Error" in str(exc_info.value) + + +@pytest.mark.asyncio +async def test_handle_single_response_no_choices(openai_agent, mock_openai_client): + # Create mock response with no choices + mock_response = Mock() + mock_response.choices = [] + mock_openai_client.chat.completions.create.return_value = mock_response + + with pytest.raises(ValueError, match='No choices returned from OpenAI API'): + await openai_agent.handle_single_response({ + "model": "gpt-4", + "messages": [{"role": "user", "content": "Hi"}], + "stream": False + }) + + +def test_is_streaming_enabled(openai_agent): + assert not openai_agent.is_streaming_enabled() + openai_agent.streaming = True + assert openai_agent.is_streaming_enabled() \ No newline at end of file diff --git a/typescript/package-lock.json b/typescript/package-lock.json index 9147e361..494c1b35 100644 --- a/typescript/package-lock.json +++ b/typescript/package-lock.json @@ -19,6 +19,7 @@ "@aws-sdk/lib-dynamodb": "^3.621.0", "@aws-sdk/util-dynamodb": "^3.621.0", "axios": "^1.7.2", + "chai": "^5.1.2", "eslint-config-prettier": "^9.1.0", "natural": "^7.0.7", "openai": "^4.52.7", @@ -4270,6 +4271,14 @@ "node": ">=8" } }, + "node_modules/assertion-error": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz", + "integrity": "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==", + "engines": { + "node": ">=12" + } + }, "node_modules/async": { "version": "2.6.4", "resolved": "https://registry.npmjs.org/async/-/async-2.6.4.tgz", @@ -4589,6 +4598,21 @@ } ] }, + "node_modules/chai": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/chai/-/chai-5.1.2.tgz", + "integrity": "sha512-aGtmf24DW6MLHHG5gCx4zaI3uBq3KRtxeVs0DjFH6Z0rDNbsvTxFASFvdj79pxjxZ8/5u3PIiN3IwEIQkiiuPw==", + "dependencies": { + "assertion-error": "^2.0.1", + "check-error": "^2.1.1", + "deep-eql": "^5.0.1", + "loupe": "^3.1.0", + "pathval": "^2.0.0" + }, + "engines": { + "node": ">=12" + } + }, "node_modules/chalk": { "version": "4.1.2", "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", @@ -4613,6 +4637,14 @@ "node": ">=10" } }, + "node_modules/check-error": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/check-error/-/check-error-2.1.1.tgz", + "integrity": "sha512-OAlb+T7V4Op9OwdkjmguYRqncdlx5JiofwOAUkmTF+jNdHwzTaTs4sRAGpzLF3oOz5xAyDGrPgeIDFQmDOTiJw==", + "engines": { + "node": ">= 16" + } + }, "node_modules/ci-info": { "version": "3.9.0", "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz", @@ -4793,6 +4825,14 @@ } } }, + "node_modules/deep-eql": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-5.0.2.tgz", + "integrity": "sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==", + "engines": { + "node": ">=6" + } + }, "node_modules/deep-is": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", @@ -6821,6 +6861,11 @@ "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==" }, + "node_modules/loupe": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/loupe/-/loupe-3.1.2.tgz", + "integrity": "sha512-23I4pFZHmAemUnz8WZXbYRSKYj801VDaNv9ETuMh7IrMc7VuVVSo+Z9iLE3ni30+U48iDWfi30d3twAXBYmnCg==" + }, "node_modules/lru-cache": { "version": "5.1.1", "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", @@ -7453,6 +7498,14 @@ "node": ">=8" } }, + "node_modules/pathval": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/pathval/-/pathval-2.0.0.tgz", + "integrity": "sha512-vE7JKRyES09KiunauX7nd2Q9/L7lhok4smP9RZTDeD4MVs72Dp2qNFVz39Nz5a0FVEW0BJR6C0DYrq6unoziZA==", + "engines": { + "node": ">= 14.16" + } + }, "node_modules/pg": { "version": "8.12.0", "resolved": "https://registry.npmjs.org/pg/-/pg-8.12.0.tgz", diff --git a/typescript/src/agents/anthropicAgent.ts b/typescript/src/agents/anthropicAgent.ts index 04abb5c8..5c90c771 100644 --- a/typescript/src/agents/anthropicAgent.ts +++ b/typescript/src/agents/anthropicAgent.ts @@ -190,7 +190,7 @@ export interface AnthropicAgentOptions extends AgentOptions { if (toolUseBlocks.length > 0) { // Append current response to the conversation - messages.push({role:'assistant', content:response.content}); + messages.push({role: ParticipantRole.ASSISTANT, content:response.content}); if (!this.toolConfig){ throw new Error("No tools available for tool use"); } diff --git a/typescript/src/agents/openAIAgent.ts b/typescript/src/agents/openAIAgent.ts index 6d1f4ab8..161136de 100644 --- a/typescript/src/agents/openAIAgent.ts +++ b/typescript/src/agents/openAIAgent.ts @@ -4,9 +4,17 @@ import OpenAI from 'openai'; import { Logger } from '../utils/logger'; import { Retriever } from "../retrievers/retriever"; +type WithApiKey = { + apiKey: string; + client?: never; +}; + +type WithClient = { + client: OpenAI; + apiKey?: never; +}; export interface OpenAIAgentOptions extends AgentOptions { - apiKey: string; model?: string; streaming?: boolean; inferenceConfig?: { @@ -23,10 +31,12 @@ export interface OpenAIAgentOptions extends AgentOptions { } +export type OpenAIAgentOptionsWithAuth = OpenAIAgentOptions & (WithApiKey | WithClient); + const DEFAULT_MAX_TOKENS = 1000; export class OpenAIAgent extends Agent { - private openai: OpenAI; + private client: OpenAI; private model: string; private streaming: boolean; private inferenceConfig: { @@ -41,9 +51,20 @@ export class OpenAIAgent extends Agent { protected retriever?: Retriever; - constructor(options: OpenAIAgentOptions) { + constructor(options: OpenAIAgentOptionsWithAuth) { + super(options); - this.openai = new OpenAI({ apiKey: options.apiKey }); + + if (!options.apiKey && !options.client) { + throw new Error("OpenAI API key or OpenAI client is required"); + } + if (options.client) { + this.client = options.client; + } else { + if (!options.apiKey) throw new Error("OpenAI API key is required"); + this.client = new OpenAI({ apiKey: options.apiKey }); + } + this.model = options.model ?? OPENAI_MODEL_ID_GPT_O_MINI; this.streaming = options.streaming ?? false; this.inferenceConfig = { @@ -117,8 +138,6 @@ export class OpenAIAgent extends Agent { { role: 'user' as const, content: inputText } ] as OpenAI.Chat.ChatCompletionMessageParam[]; - console.log("messages="+JSON.stringify(messages)) - const { maxTokens, temperature, topP, stopSequences } = this.inferenceConfig; const requestOptions: OpenAI.Chat.ChatCompletionCreateParams = { @@ -170,8 +189,7 @@ export class OpenAIAgent extends Agent { private async handleSingleResponse(input: any): Promise { try { const nonStreamingOptions = { ...input, stream: false }; - const chatCompletion = await this.openai.chat.completions.create(nonStreamingOptions); - + const chatCompletion = await this.client.chat.completions.create(nonStreamingOptions); if (!chatCompletion.choices || chatCompletion.choices.length === 0) { throw new Error('No choices returned from OpenAI API'); } @@ -193,7 +211,7 @@ export class OpenAIAgent extends Agent { } private async *handleStreamingResponse(options: OpenAI.Chat.ChatCompletionCreateParams): AsyncIterable { - const stream = await this.openai.chat.completions.create({ ...options, stream: true }); + const stream = await this.client.chat.completions.create({ ...options, stream: true }); for await (const chunk of stream) { const content = chunk.choices[0]?.delta?.content; if (content) { diff --git a/typescript/tests/agents/OpenAi.test.ts b/typescript/tests/agents/OpenAi.test.ts new file mode 100644 index 00000000..7d949218 --- /dev/null +++ b/typescript/tests/agents/OpenAi.test.ts @@ -0,0 +1,144 @@ +import { OpenAIAgent, OpenAIAgentOptions } from '../../src/agents/openAIAgent'; +import OpenAI from 'openai'; +import { ParticipantRole } from '../../src/types'; + +// Create a mock OpenAI client type that matches the structure we need +const createMockOpenAIClient = () => ({ + chat: { + completions: { + create: jest.fn(), + }, + }, +}); + +describe('OpenAIAgent', () => { + const mockUserId = 'user123'; + const mockSessionId = 'session456'; + let mockClient; + + beforeEach(() => { + // Create mocked OpenAI client + mockClient = createMockOpenAIClient(); + + // Set up default mock response + mockClient.chat.completions.create.mockResolvedValue({ + choices: [{ message: { content: 'Mock response' } }], + }); + }); + + afterEach(() => { + jest.clearAllMocks(); + }); + + describe('processRequest', () => { + it('should call OpenAI API with the correct parameters', async () => { + const options = { + name: 'Test Agent', + description: 'Test description', + client: mockClient as unknown as OpenAI, + customSystemPrompt: { + template: 'Custom prompt with {{variable}}', + variables: { variable: 'test-value' }, + }, + }; + + const openAIAgent = new OpenAIAgent(options); + + const inputText = 'What is AI?'; + const chatHistory = []; + + const response = await openAIAgent.processRequest( + inputText, + mockUserId, + mockSessionId, + chatHistory + ); + + // Verify API call + expect(mockClient.chat.completions.create).toHaveBeenCalledWith( + expect.objectContaining({ + model: expect.any(String), + messages: expect.arrayContaining([ + expect.objectContaining({ + role: 'system', + content: expect.stringContaining('Custom prompt with test-value'), + }), + expect.objectContaining({ + role: 'user', + content: inputText, + }), + ]), + }) + ); + // Verify response structure + expect(response).toEqual({ + role: ParticipantRole.ASSISTANT, + content: [{ text: 'Mock response' }], + }); + }); + + it('should handle streaming responses correctly when streaming is enabled', async () => { + const options: OpenAIAgentOptions & { client: OpenAI } = { + name: 'Test Agent', + description: 'Test description', + client: mockClient as unknown as OpenAI, + streaming: true, + }; + + const openAIAgent = new OpenAIAgent(options); + + // Mock streaming response + const mockStream = { + async *[Symbol.asyncIterator]() { + yield { choices: [{ delta: { content: 'Hello' } }] }; + yield { choices: [{ delta: { content: ' World' } }] }; + }, + }; + + mockClient.chat.completions.create.mockResolvedValueOnce(mockStream as any); + + const inputText = 'What is AI?'; + const chatHistory = []; + + const response = await openAIAgent.processRequest( + inputText, + mockUserId, + mockSessionId, + chatHistory + ); + + // Verify it returns an AsyncIterable + expect(response).toBeDefined(); + expect(typeof response[Symbol.asyncIterator]).toBe('function'); + + // Verify the streamed content + const chunks = []; + for await (const chunk of response as AsyncIterable) { + chunks.push(chunk); + } + expect(chunks).toEqual(['Hello', ' World']); + }); + + it('should throw error when API call fails', async () => { + const options: OpenAIAgentOptions & { client: OpenAI } = { + name: 'Test Agent', + description: 'Test description', + client: mockClient as unknown as OpenAI, + }; + + const openAIAgent = new OpenAIAgent(options); + + // Mock API error + mockClient.chat.completions.create.mockRejectedValueOnce( + new Error('API Error') + ); + + const inputText = 'What is AI?'; + const chatHistory = []; + + await expect( + openAIAgent.processRequest(inputText, mockUserId, mockSessionId, chatHistory) + ).rejects.toThrow('API Error'); + }); + }); +}); From 25b7f5d9fb5943a20b392fa62d8cc3f28830e046 Mon Sep 17 00:00:00 2001 From: Corneliu Croitoru Date: Mon, 23 Dec 2024 15:41:15 +0100 Subject: [PATCH 09/12] add missing return --- python/src/multi_agent_orchestrator/agents/openai_agent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/src/multi_agent_orchestrator/agents/openai_agent.py b/python/src/multi_agent_orchestrator/agents/openai_agent.py index 74f58971..b8eb06b3 100644 --- a/python/src/multi_agent_orchestrator/agents/openai_agent.py +++ b/python/src/multi_agent_orchestrator/agents/openai_agent.py @@ -175,7 +175,7 @@ async def handle_streaming_response(self, request_options: Dict[str, Any]) -> As #yield chunk_content # Store the complete message in the instance for later access if needed - self._last_complete_message = ConversationMessage( + return ConversationMessage( role=ParticipantRole.ASSISTANT.value, content=[{"text": ''.join(accumulated_message)}] ) From d2d24fbf50f6ed66dcc54226778b5c5f2022158e Mon Sep 17 00:00:00 2001 From: Corneliu Croitoru Date: Mon, 23 Dec 2024 15:43:01 +0100 Subject: [PATCH 10/12] add missing return --- python/src/multi_agent_orchestrator/agents/openai_agent.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/python/src/multi_agent_orchestrator/agents/openai_agent.py b/python/src/multi_agent_orchestrator/agents/openai_agent.py index b8eb06b3..8f65949c 100644 --- a/python/src/multi_agent_orchestrator/agents/openai_agent.py +++ b/python/src/multi_agent_orchestrator/agents/openai_agent.py @@ -128,8 +128,6 @@ async def process_request( "stop": self.inference_config.get('stopSequences'), "stream": self.streaming } - print("****") - print(self.streaming) if self.streaming: return await self.handle_streaming_response(request_options) else: From 37066b41574301b5d8fc0446c3da47a135b03c8b Mon Sep 17 00:00:00 2001 From: Corneliu Croitoru Date: Mon, 23 Dec 2024 15:54:56 +0100 Subject: [PATCH 11/12] update doc for openai agent --- .../docs/agents/built-in/openai-agent.mdx | 638 +++++++++--------- .../agents/openai_agent.py | 2 +- 2 files changed, 331 insertions(+), 309 deletions(-) diff --git a/docs/src/content/docs/agents/built-in/openai-agent.mdx b/docs/src/content/docs/agents/built-in/openai-agent.mdx index 7a348ba7..b0034219 100644 --- a/docs/src/content/docs/agents/built-in/openai-agent.mdx +++ b/docs/src/content/docs/agents/built-in/openai-agent.mdx @@ -1,481 +1,503 @@ --- title: Open AI Agent -description: Documentation Open AI Agent +description: Documentation for the OpenAI Agent --- -The `OpenAIAgent` is a powerful agent class in the Multi-Agent Orchestrator framework that integrates with OpenAI's Chat Completion API. This agent allows you to leverage OpenAI's language models, such as GPT-3.5 and GPT-4, for various natural language processing tasks. +The `OpenAIAgent` is a powerful agent class in the Multi-Agent Orchestrator framework that integrates with OpenAI's Chat Completion API. This agent allows you to leverage OpenAI's language models for various natural language processing tasks. ## Key Features - Integration with OpenAI's Chat Completion API -- Support for multiple OpenAI models +- Support for multiple OpenAI models (e.g., GPT-4, GPT-3.5) - Streaming and non-streaming response options - Customizable inference configuration -- Handles conversation history for context-aware responses -- Customizable system prompts +- Conversation history handling for context-aware responses +- Customizable system prompts with variable support - Support for retrievers to enhance responses with additional context +- Flexible initialization with API key or custom client + +## Configuration Options + +The `OpenAIAgentOptions` extends the base `AgentOptions` with the following fields: + +### Required Fields +- `name`: Name of the agent +- `description`: Description of the agent's capabilities +- Authentication (one of the following is required): + - `apiKey`: Your OpenAI API key + - `client`: Custom OpenAI client instance + +### Optional Fields +- `model`: OpenAI model identifier (e.g., 'gpt-4', 'gpt-3.5-turbo'). Defaults to `OPENAI_MODEL_ID_GPT_O_MINI` +- `streaming`: Enable streaming responses. Defaults to `false` +- `retriever`: Custom retriever instance for enhancing responses with additional context +- `inferenceConfig`: Configuration for model inference: + - `maxTokens`: Maximum tokens to generate (default: 1000) + - `temperature`: Controls randomness (0-1) + - `topP`: Controls diversity via nucleus sampling + - `stopSequences`: Sequences that stop generation +- `customSystemPrompt`: System prompt configuration: + - `template`: Template string with optional variable placeholders + - `variables`: Key-value pairs for template variables ## Creating an OpenAIAgent -### Minimal Example (Required Fields Only) +Here are various examples showing different ways to create and configure an OpenAIAgent: + +### Basic Examples -Here's the simplest way to create an OpenAIAgent with just the required fields: +**1. Minimal Configuration** import { Tabs, TabItem } from '@astrojs/starlight/components'; + - ```typescript -import { OpenAIAgent } from 'multi-agent-orchestrator'; - const agent = new OpenAIAgent({ - name: 'OpenAI Assistant', // Required - description: 'A versatile AI assistant', // Required - apiKey: 'your-openai-api-key' // Required + name: 'OpenAI Assistant', + description: 'A versatile AI assistant', + apiKey: 'your-openai-api-key' }); + + + + +```python +agent = OpenAIAgent(OpenAIAgentOptions( + name='OpenAI Assistant', + description='A versatile AI assistant', + api_key='your-openai-api-key' +)) ``` - + + +
+ +**2. Using Custom Client** + + + +```typescript +import OpenAI from 'openai'; +const customClient = new OpenAI({ apiKey: 'your-openai-api-key' }); +const agent = new OpenAIAgent({ +name: 'OpenAI Assistant', +description: 'A versatile AI assistant', +client: customClient +}); + + ```python -from multi_agent_orchestrator import OpenAIAgent, OpenAIAgentOptions +from openai import OpenAI + +custom_client = OpenAI(api_key='your-openai-api-key') agent = OpenAIAgent(OpenAIAgentOptions( - name='OpenAI Assistant', # Required - description='A versatile AI assistant', # Required - api_key='your-openai-api-key' # Required + name='OpenAI Assistant', + description='A versatile AI assistant', + client=custom_client )) ``` - +
-### Full Example with All Options -Here's an example showing all available configuration options: +
+ +**3. Custom Model and Streaming** - ```typescript -import { OpenAIAgent } from 'multi-agent-orchestrator'; +const agent = new OpenAIAgent({ + name: 'OpenAI Assistant', + description: 'A streaming-enabled assistant', + apiKey: 'your-openai-api-key', + model: 'gpt-4', + streaming: true +}); +``` + + +```python +agent = OpenAIAgent(OpenAIAgentOptions( + name='OpenAI Assistant', + description='A streaming-enabled assistant', + api_key='your-openai-api-key', + model='gpt-4', + streaming=True +)) +``` + + + +
+ +**4. With Inference Configuration** + + + +```typescript const agent = new OpenAIAgent({ - // Required fields name: 'OpenAI Assistant', - description: 'A versatile AI assistant powered by OpenAI models', + description: 'An assistant with custom inference settings', apiKey: 'your-openai-api-key', - - // Optional fields - model: 'gpt-3.5-turbo', - streaming: true, - retriever: customRetriever, // Custom retriever for additional context - inferenceConfig: { maxTokens: 500, temperature: 0.7, topP: 0.9, stopSequences: ['Human:', 'AI:'] - }, - - customSystemPrompt: { - template: 'You are a helpful AI assistant specialized in {{DOMAIN}}', - variables: { - DOMAIN: 'technology' - } } }); ``` - - + ```python -from multi_agent_orchestrator import OpenAIAgent, OpenAIAgentOptions - agent = OpenAIAgent(OpenAIAgentOptions( - # Required fields name='OpenAI Assistant', - description='A versatile AI assistant powered by OpenAI models', + description='An assistant with custom inference settings', api_key='your-openai-api-key', - - # Optional fields - model='gpt-3.5-turbo', - streaming=True, - client=custom_openai_client, # Custom OpenAI client instance - retriever=custom_retriever, # Custom retriever for additional context - inference_config={ 'maxTokens': 500, 'temperature': 0.7, 'topP': 0.9, 'stopSequences': ['Human:', 'AI:'] - }, - - custom_system_prompt={ - 'template': 'You are a helpful AI assistant specialized in {{DOMAIN}}', - 'variables': { - 'DOMAIN': 'technology' - } } )) ``` -### OpenAIAgentOptions +
-The `OpenAIAgentOptions` extends the base `AgentOptions` with the following fields: +**5. With Simple System Prompt** - - -Required fields: -- `name` (required): Name of the agent -- `description` (required): Description of the agent's capabilities -- `apiKey` (required): Your OpenAI API key for authentication - -Optional fields: -- `model` (optional): The OpenAI model identifier (e.g., 'gpt-4', 'gpt-3.5-turbo'). Defaults to `OPENAI_MODEL_ID_GPT_O_MINI` -- `streaming` (optional): Boolean flag for streaming responses. Defaults to `false` -- `retriever` (optional): Custom retriever instance for enhancing responses with additional context -- `inferenceConfig` (optional): Configuration object for model inference: + ```typescript -{ - maxTokens?: number; // Maximum tokens to generate (default: 1000) - temperature?: number; // Controls randomness (0-1) - topP?: number; // Controls diversity via nucleus sampling - stopSequences?: string[]; // Sequences that stop generation -} +const agent = new OpenAIAgent({ + name: 'OpenAI Assistant', + description: 'An assistant with custom prompt', + apiKey: 'your-openai-api-key', + customSystemPrompt: { + template: 'You are a helpful AI assistant focused on technical support.' + } +}); ``` -- `customSystemPrompt` (optional): System prompt configuration: + + +```python +agent = OpenAIAgent(OpenAIAgentOptions( + name='OpenAI Assistant', + description='An assistant with custom prompt', + api_key='your-openai-api-key', + custom_system_prompt={ + 'template': 'You are a helpful AI assistant focused on technical support.' + } +)) +``` + + + +
+ +**6. With System Prompt Variables** + + + ```typescript -{ - template: string; // The prompt template string - variables?: { // Key-value pairs for template variables - [key: string]: string | string[]; +const agent = new OpenAIAgent({ + name: 'OpenAI Assistant', + description: 'An assistant with variable prompt', + apiKey: 'your-openai-api-key', + customSystemPrompt: { + template: 'You are an AI assistant specialized in {{DOMAIN}}. Always use a {{TONE}} tone.', + variables: { + DOMAIN: 'customer support', + TONE: 'friendly and helpful' + } } -} +}); ``` - - - - -Required fields: -- `name` (required): Name of the agent -- `description` (required): Description of the agent's capabilities -- `api_key` (required): String containing your OpenAI API key for authentication - -Optional fields: -- `model` (Optional[str]): The OpenAI model identifier. Defaults to `OPENAI_MODEL_ID_GPT_O_MINI` -- `streaming` (Optional[bool]): Flag for streaming responses. Defaults to `False` -- `client` (Optional[Any]): Custom OpenAI client instance -- `retriever` (Optional[Retriever]): Custom retriever instance for enhancing responses -- `inference_config` (Optional[Dict[str, Any]]): Dictionary configuring model inference: + + ```python -{ - 'maxTokens': int, # Maximum tokens to generate (default: 1000) - 'temperature': float, # Controls randomness (0-1) - 'topP': float, # Controls diversity via nucleus sampling - 'stopSequences': List[str] # Sequences that stop generation -} +agent = OpenAIAgent(OpenAIAgentOptions( + name='OpenAI Assistant', + description='An assistant with variable prompt', + api_key='your-openai-api-key', + custom_system_prompt={ + 'template': 'You are an AI assistant specialized in {{DOMAIN}}. Always use a {{TONE}} tone.', + 'variables': { + 'DOMAIN': 'customer support', + 'TONE': 'friendly and helpful' + } + } +)) ``` -- `custom_system_prompt` (Optional[Dict[str, Any]]): Dictionary configuring the system prompt: + + + +
+ +**7. With Custom Retriever** + + + +```typescript +const retriever = new CustomRetriever({ + // Retriever configuration +}); +const agent = new OpenAIAgent({ +name: 'OpenAI Assistant', +description: 'An assistant with retriever', +apiKey: 'your-openai-api-key', +retriever: retriever +}); + + + ```python -{ - 'template': str, # The prompt template string - 'variables': dict # Key-value pairs for template variables -} -``` +retriever = CustomRetriever( + # Retriever configuration +) +agent = OpenAIAgent(OpenAIAgentOptions( + name='OpenAI Assistant', + description='An assistant with retriever', + api_key='your-openai-api-key', + retriever=retriever +)) +``` -## Setting the System Prompt +
-You can set or update the system prompt for the OpenAIAgent in two ways: - -1. During initialization: +**8. Combining Multiple Options** - ```typescript const agent = new OpenAIAgent({ - // ... other options ... + name: 'OpenAI Assistant', + description: 'An assistant with multiple options', + apiKey: 'your-openai-api-key', + model: 'gpt-4', + streaming: true, + inferenceConfig: { + maxTokens: 500, + temperature: 0.7 + }, customSystemPrompt: { - template: 'You are a helpful AI assistant specialized in answering questions about technology.' + template: 'You are an AI assistant specialized in {{DOMAIN}}.', + variables: { + DOMAIN: 'technical support' + } } }); ``` - - + ```python agent = OpenAIAgent(OpenAIAgentOptions( - # ... other options ... + name='OpenAI Assistant', + description='An assistant with multiple options', + api_key='your-openai-api-key', + model='gpt-4', + streaming=True, + inference_config={ + 'maxTokens': 500, + 'temperature': 0.7 + }, custom_system_prompt={ - 'template': 'You are a helpful AI assistant specialized in answering questions about technology.' + 'template': 'You are an AI assistant specialized in {{DOMAIN}}.', + 'variables': { + 'DOMAIN': 'technical support' + } } )) ``` -2. Using the `setSystemPrompt/set_system_prompt` method after initialization: +
+ +**9. Complete Example with All Options** + +Here's a comprehensive example showing all available configuration options: - ```typescript -agent.setSystemPrompt( - `You are an AI assistant specialized in {{DOMAIN}}. - Your main goal is to {{GOAL}}. - Always maintain a {{TONE}} tone in your responses.`, - { - DOMAIN: "artificial intelligence", - GOAL: "explain complex AI concepts in simple terms", - TONE: "friendly and educational" +import { OpenAIAgent } from 'multi-agent-orchestrator'; + +const agent = new OpenAIAgent({ + // Required fields + name: 'Advanced OpenAI Assistant', + description: 'A fully configured AI assistant powered by OpenAI models', + apiKey: 'your-openai-api-key', + + // Optional fields + model: 'gpt-4', // Choose OpenAI model + streaming: true, // Enable streaming responses + retriever: customRetriever, // Custom retriever for additional context + + // Inference configuration + inferenceConfig: { + maxTokens: 500, // Maximum tokens to generate + temperature: 0.7, // Control randomness (0-1) + topP: 0.9, // Control diversity via nucleus sampling + stopSequences: ['Human:', 'AI:'] // Sequences that stop generation + }, + + // Custom system prompt with variables + customSystemPrompt: { + template: `You are an AI assistant specialized in {{DOMAIN}}. + Your core competencies: + {{SKILLS}} + + Communication style: + - Maintain a {{TONE}} tone + - Focus on {{FOCUS}} + - Prioritize {{PRIORITY}}`, + variables: { + DOMAIN: 'scientific research', + SKILLS: [ + '- Advanced data analysis', + '- Statistical methodology', + '- Research design', + '- Technical writing' + ], + TONE: 'professional and academic', + FOCUS: 'accuracy and clarity', + PRIORITY: 'evidence-based insights' + } } -); +}); ``` - - + ```python -agent.set_system_prompt( - template="""You are an AI assistant specialized in {{DOMAIN}}. - Your main goal is to {{GOAL}}. - Always maintain a {{TONE}} tone in your responses.""", - variables={ - "DOMAIN": "artificial intelligence", - "GOAL": "explain complex AI concepts in simple terms", - "TONE": "friendly and educational" +from multi_agent_orchestrator import OpenAIAgent, OpenAIAgentOptions + +agent = OpenAIAgent(OpenAIAgentOptions( + # Required fields + name='Advanced OpenAI Assistant', + description='A fully configured AI assistant powered by OpenAI models', + api_key='your-openai-api-key', + + # Optional fields + model='gpt-4', # Choose OpenAI model + streaming=True, # Enable streaming responses + retriever=custom_retriever, # Custom retriever for additional context + + # Inference configuration + inference_config={ + 'maxTokens': 500, # Maximum tokens to generate + 'temperature': 0.7, # Control randomness (0-1) + 'topP': 0.9, # Control diversity via nucleus sampling + 'stopSequences': ['Human:', 'AI:'] # Sequences that stop generation + }, + + # Custom system prompt with variables + custom_system_prompt={ + 'template': """You are an AI assistant specialized in {{DOMAIN}}. + Your core competencies: + {{SKILLS}} + + Communication style: + - Maintain a {{TONE}} tone + - Focus on {{FOCUS}} + - Prioritize {{PRIORITY}}""", + 'variables': { + 'DOMAIN': 'scientific research', + 'SKILLS': [ + '- Advanced data analysis', + '- Statistical methodology', + '- Research design', + '- Technical writing' + ], + 'TONE': 'professional and academic', + 'FOCUS': 'accuracy and clarity', + 'PRIORITY': 'evidence-based insights' + } } -) +)) ``` -## Usage +## Using the OpenAIAgent There are two ways to use the OpenAIAgent: directly or through the Multi-Agent Orchestrator. -### Direct Agent Usage +### Direct Usage -You can call the agent directly when you want to use a single agent without the orchestrator's routing capabilities: +Call the agent directly when you want to use a single agent without orchestrator routing: - ```typescript -import { OpenAIAgent, ClassifierResult } from 'multi-agent-orchestrator'; - -const agent = new OpenAIAgent({ - name: 'OpenAI Assistant', - description: 'A versatile AI assistant', - apiKey: 'your-openai-api-key' -}); - -// Create a classifier result for direct agent usage const classifierResult = { selectedAgent: agent, confidence: 1.0 }; -// Call the agent directly const response = await orchestrator.agentProcessRequest( "What is the capital of France?", "user123", "session456", - classifierResult, - {} // additional parameters (optional) + classifierResult ); - -console.log(response.output); // Access the agent's response -console.log(response.metadata); // Access metadata about the request -console.log(response.streaming); // Check if response is streaming ``` - - + ```python -from multi_agent_orchestrator import OpenAIAgent, OpenAIAgentOptions, ClassifierResult - -agent = OpenAIAgent(OpenAIAgentOptions( - name='OpenAI Assistant', - description='A versatile AI assistant', - api_key='your-openai-api-key' -)) - -# Create a classifier result for direct agent usage classifier_result = ClassifierResult(selected_agent=agent, confidence=1.0) -# Call the agent directly response = await orchestrator.agent_process_request( "What is the capital of France?", "user123", "session456", - classifier_result, - {} # additional parameters (optional) + classifier_result ) - -print(response.output) # Access the agent's response -print(response.metadata) # Access metadata about the request -print(response.streaming) # Check if response is streaming ``` -### Using through the Orchestrator +### Using with the Orchestrator -When you want to use the agent as part of a multi-agent system, add it to the Multi-Agent Orchestrator: +Add the agent to Multi-Agent Orchestrator for use in a multi-agent system: - ```typescript -import { MultiAgentOrchestrator } from "multi-agent-orchestrator"; - const orchestrator = new MultiAgentOrchestrator(); orchestrator.addAgent(agent); -// The orchestrator will automatically handle agent selection and routing const response = await orchestrator.routeRequest( "What is the capital of France?", "user123", - "session456", - {} // additional parameters (optional) + "session456" ); - -console.log(response.output); // Access the agent's response -console.log(response.metadata); // Access metadata about the request -console.log(response.streaming); // Check if response is streaming ``` - - + ```python -from multi_agent_orchestrator import MultiAgentOrchestrator - orchestrator = MultiAgentOrchestrator() orchestrator.add_agent(agent) -# The orchestrator will automatically handle agent selection and routing response = await orchestrator.route_request( "What is the capital of France?", "user123", - "session456", - {} # additional parameters (optional) -) - -print(response.output) # Access the agent's response -print(response.metadata) # Access metadata about the request -print(response.streaming) # Check if response is streaming -``` - - - -The key differences between these approaches are: -- Direct usage is simpler when you only need one agent -- Orchestrator usage provides automatic agent selection and handles multiple agents -- Both methods return an `AgentResponse` with the same structure - -## Streaming Responses - -If you've enabled streaming (`streaming: true` in the options), the agent will return an AsyncIterable that you can use to process the response in chunks: - - - - -```typescript -const streamingResponse = await orchestrator.routeRequest( - "Tell me a long story about a brave knight", - "user123", - "session456" -); - -if (Symbol.asyncIterator in streamingResponse) { - for await (const chunk of streamingResponse) { - console.log(chunk); // Process each chunk of the response - } -} -``` - - - -```python -streaming_response = await orchestrator.route_request( - "Tell me a long story about a brave knight", - "user123", "session456" ) - -# Check if the response is streaming -if hasattr(streaming_response, '__aiter__'): - async for chunk in streaming_response: - print(chunk, end='', flush=True) # Process each chunk of the response ``` -## Using Retrievers - -The OpenAIAgent supports retrievers to enhance responses with additional context. Here's how to use a retriever: - - - - -```typescript -import { OpenAIAgent, CustomRetriever } from 'multi-agent-orchestrator'; - -const retriever = new CustomRetriever({ - // Retriever configuration -}); - -const agent = new OpenAIAgent({ - name: 'OpenAI Assistant', - description: 'Context-aware AI assistant', - apiKey: 'your-openai-api-key', - retriever: retriever -}); -``` - - - -```python -from multi_agent_orchestrator import OpenAIAgent, OpenAIAgentOptions, CustomRetriever - -retriever = CustomRetriever( - # Retriever configuration -) - -agent = OpenAIAgent(OpenAIAgentOptions( - name='OpenAI Assistant', - description='Context-aware AI assistant', - api_key='your-openai-api-key', - retriever=retriever -)) -``` - - - -## Best Practices - -1. **API Key Security**: Ensure your OpenAI API key is kept secure and not exposed in your codebase. -2. **Model Selection**: Choose an appropriate model based on your use case and performance requirements. -3. **Inference Configuration**: Experiment with different inference parameters to find the best balance between response quality and speed. -4. **Error Handling**: Implement additional error handling in your application to manage potential API failures gracefully. -5. **Rate Limiting**: Be aware of OpenAI's rate limits and implement appropriate throttling if necessary. -6. **System Prompts**: Craft clear and specific system prompts to guide the model's behavior and improve response quality for your use case. - -## Implementation Notes - -When implementing the OpenAIAgent in your application: - -- In Python, use snake_case naming conventions (e.g., `custom_system_prompt`, `set_system_prompt`) -- In TypeScript, use camelCase naming conventions (e.g., `customSystemPrompt`, `setSystemPrompt`) -- Both implementations support async/await patterns for handling responses -- Both versions include callback support for streaming responses -- Error handling patterns are consistent across both implementations - -By leveraging the OpenAIAgent, you can create sophisticated, context-aware AI agents capable of handling a wide range of tasks and interactions, all powered by OpenAI's state-of-the-art language models. \ No newline at end of file diff --git a/python/src/multi_agent_orchestrator/agents/openai_agent.py b/python/src/multi_agent_orchestrator/agents/openai_agent.py index 8f65949c..8b205724 100644 --- a/python/src/multi_agent_orchestrator/agents/openai_agent.py +++ b/python/src/multi_agent_orchestrator/agents/openai_agent.py @@ -159,7 +159,7 @@ async def handle_single_response(self, request_options: Dict[str, Any]) -> Conve Logger.error(f'Error in OpenAI API call: {str(error)}') raise error - async def handle_streaming_response(self, request_options: Dict[str, Any]) -> AsyncIterable[Any]: + async def handle_streaming_response(self, request_options: Dict[str, Any]) -> ConversationMessage: try: stream = self.client.chat.completions.create(**request_options) accumulated_message = [] From 1409110bb4af5c0d5e78e77477338456692bb8c6 Mon Sep 17 00:00:00 2001 From: Corneliu Croitoru Date: Mon, 23 Dec 2024 16:23:21 +0100 Subject: [PATCH 12/12] update doc with examples for bedrockagent, anthropic agent and bedrockllm agent --- .../agents/built-in/amazon-bedrock-agent.mdx | 194 ++++- .../docs/agents/built-in/anthropic-agent.mdx | 522 ++++++++++++-- .../agents/built-in/bedrock-llm-agent.mdx | 679 ++++++++++-------- 3 files changed, 1006 insertions(+), 389 deletions(-) diff --git a/docs/src/content/docs/agents/built-in/amazon-bedrock-agent.mdx b/docs/src/content/docs/agents/built-in/amazon-bedrock-agent.mdx index 2c496f01..736d8fa2 100644 --- a/docs/src/content/docs/agents/built-in/amazon-bedrock-agent.mdx +++ b/docs/src/content/docs/agents/built-in/amazon-bedrock-agent.mdx @@ -7,47 +7,189 @@ The `AmazonBedrockAgent` is a specialized agent class in the Multi-Agent Orchest ## Creating an AmazonBedrockAgent -To create a new `AmazonBedrockAgent` with only the required parameters, use the following code: +Here are various examples showing different ways to create and configure an AmazonBedrockAgent: + +### Basic Examples + +**1. Minimal Configuration** import { Tabs, TabItem } from '@astrojs/starlight/components'; - ```typescript - import { AmazonBedrockAgent } from 'multi-agent-orchestrator'; - - const agent = new AmazonBedrockAgent({ - name: 'My Bank Agent', - description: 'You are a helpful and friendly agent that answers questions about loan-related inquiries', - agentId: 'your-agent-id', - agentAliasId: 'your-agent-alias-id' - }); - ``` +```typescript +const agent = new AmazonBedrockAgent({ + name: 'My Bank Agent', + description: 'A helpful and friendly agent that answers questions about loan-related inquiries', + agentId: 'your-agent-id', + agentAliasId: 'your-agent-alias-id' +}); +``` + + +```python +agent = AmazonBedrockAgent(AmazonBedrockAgentOptions( + name='My Bank Agent', + description='A helpful and friendly agent that answers questions about loan-related inquiries', + agent_id='your-agent-id', + agent_alias_id='your-agent-alias-id' +)) +``` + + +
+ +**2. Using Custom Client** + + + +```typescript +import { BedrockAgentRuntimeClient } from "@aws-sdk/client-bedrock-agent-runtime"; +const customClient = new BedrockAgentRuntimeClient({ region: 'us-east-1' }); +const agent = new AmazonBedrockAgent({ + name: 'My Bank Agent', + description: 'A helpful and friendly agent for banking inquiries', + agentId: 'your-agent-id', + agentAliasId: 'your-agent-alias-id', + client: customClient +}); +``` + - ```python - from multi_agent_orchestrator.agents import AmazonBedrockAgent, AmazonBedrockAgentOptions - - agent = AmazonBedrockAgent(AmazonBedrockAgentOptions( - name='My Bank Agent', - description='You are a helpful and friendly agent that answers questions about loan-related inquiries', - agent_id='your-agent-id', - agent_alias_id='your-agent-alias-id' - )) - ``` +```python +import boto3 +custom_client = boto3.client('bedrock-agent-runtime', region_name='us-east-1') +agent = AmazonBedrockAgent(AmazonBedrockAgentOptions( +name='My Bank Agent', +description='A helpful and friendly agent for banking inquiries', +agent_id='your-agent-id', +agent_alias_id='your-agent-alias-id', +client=custom_client +)) +``` + + + +
+ +**3. With Tracing Enabled** + + + +```typescript +const agent = new AmazonBedrockAgent({ + name: 'My Bank Agent', + description: 'A banking agent with tracing enabled', + agentId: 'your-agent-id', + agentAliasId: 'your-agent-alias-id', + enableTrace: true +}); +``` + + +```python +agent = AmazonBedrockAgent(AmazonBedrockAgentOptions( + name='My Bank Agent', + description='A banking agent with tracing enabled', + agent_id='your-agent-id', + agent_alias_id='your-agent-alias-id', + enable_trace=True +)) +``` -In this basic example, we provide the four required parameters: `name`, `description`, `agent_id`, and `agent_alias_id`. +
+ +**4. With Streaming Enabled** + + + +```typescript +const agent = new AmazonBedrockAgent({ + name: 'My Bank Agent', + description: 'A streaming-enabled banking agent', + agentId: 'your-agent-id', + agentAliasId: 'your-agent-alias-id', + streaming: true +}); +``` + + +```python +agent = AmazonBedrockAgent(AmazonBedrockAgentOptions( + name='My Bank Agent', + description='A streaming-enabled banking agent', + agent_id='your-agent-id', + agent_alias_id='your-agent-alias-id', + streaming=True +)) +``` + + + +
+ +**5. Complete Example with All Options** + + + +```typescript +import { AmazonBedrockAgent } from "multi-agent-orchestrator"; +import { BedrockAgentRuntimeClient } from "@aws-sdk/client-bedrock-agent-runtime"; +const agent = new AmazonBedrockAgent({ + // Required fields + name: "Advanced Bank Agent", + description: "A fully configured banking agent with all features enabled", + agentId: "your-agent-id", + agentAliasId: "your-agent-alias-id", + // Optional fields + region: "us-west-2", + streaming: true, + enableTrace: true, + client: new BedrockAgentRuntimeClient({ region: "us-west-2" }), +}); + +``` + + +```python +import boto3 +from multi_agent_orchestrator.agents import AmazonBedrockAgent, AmazonBedrockAgentOptions + +custom_client = boto3.client('bedrock-agent-runtime', region_name='us-west-2') + +agent = AmazonBedrockAgent(AmazonBedrockAgentOptions( + # Required fields + name='Advanced Bank Agent', + description='A fully configured banking agent with all features enabled', + agent_id='your-agent-id', + agent_alias_id='your-agent-alias-id', + + # Optional fields + region='us-west-2', + streaming=True, + enable_trace=True, + client=custom_client +)) +``` + + + ### Option Explanations - `name`: (Required) Identifies the agent within your system. - `description`: (Required) Describes the agent's purpose or capabilities. -- `agent_id`: (Required) The ID of the Amazon Bedrock agent you want to use. -- `agent_alias_id`: (Required) The alias ID of the Amazon Bedrock agent. -- `enableTrace`: If you set enableTrace to `true` in the request, you can trace the agent’s steps and reasoning process that led it to the response. -- `streaming`: Specifies whether to enable streaming for the final response. This is set to false by default `False` +- `agentId/agent_id`: (Required) The ID of the Amazon Bedrock agent you want to use. +- `agentAliasId/agent_alias_id`: (Required) The alias ID of the Amazon Bedrock agent. +- `region`: (Optional) AWS region for the Bedrock service. If not provided, uses the default AWS region. +- `client`: (Optional) Custom BedrockAgentRuntimeClient for specialized configurations. +- `enableTrace/enable_trace`: (Optional) When set to true, enables tracing of the agent's steps and reasoning process. +- `streaming`: (Optional) Enables streaming for the final response. Defaults to false. + + ## Adding the Agent to the Orchestrator diff --git a/docs/src/content/docs/agents/built-in/anthropic-agent.mdx b/docs/src/content/docs/agents/built-in/anthropic-agent.mdx index 89d598c3..80d49f82 100644 --- a/docs/src/content/docs/agents/built-in/anthropic-agent.mdx +++ b/docs/src/content/docs/agents/built-in/anthropic-agent.mdx @@ -20,103 +20,481 @@ This agent can handle a wide range of processing tasks, making it suitable for d ## Creating a AnthropicAgent -By default, the **AnthropicAgent** uses the `claude-3-5-sonnet-20240620 model`. +Here are various examples showing different ways to create and configure an AnthropicAgent: -### Basic Example +### Basic Examples -To create a new **Anthropic Agent** with only the required parameters, use the following code: +**1. Minimal Configuration** import { Tabs, TabItem } from '@astrojs/starlight/components'; + - ```typescript - import { AnthropicAgent } from 'multi-agent-orchestrator'; - - const agent = new AnthropicAgent({ - name: 'Tech Agent', - description: 'Specializes in technology areas including software development, \ - hardware, AI, cybersecurity, blockchain, cloud computing, \ - emerging tech innovations,and pricing/costs related to \ - technology products and services.', - apiKey: 'your-anthropic-api-key-here' - }); - ``` + +```typescript +const agent = new AnthropicAgent({ + name: 'Anthropic Assistant', + description: 'A versatile AI assistant', + apiKey: 'your-anthropic-api-key' +}); +``` - ```python +```python +agent = AnthropicAgent(AnthropicAgentOptions( + name='Anthropic Assistant', + description='A versatile AI assistant', + api_key='your-anthropic-api-key' +)) +``` + + - ``` +
+ +**2. Using Custom Client** + + + +```typescript +import { Anthropic } from '@anthropic-ai/sdk'; +const customClient = new Anthropic({ apiKey: 'your-anthropic-api-key' }); +const agent = new AnthropicAgent({ +name: 'Anthropic Assistant', +description: 'A versatile AI assistant', +client: customClient +}); +``` + + +```python +from anthropic import Anthropic + +custom_client = Anthropic(api_key='your-anthropic-api-key') + +agent = AnthropicAgent(AnthropicAgentOptions( + name='Anthropic Assistant', + description='A versatile AI assistant', + client=custom_client +)) +``` + + + + +
+ +**3. Custom Model and Streaming** + + + +```typescript +const agent = new AnthropicAgent({ + name: 'Anthropic Assistant', + description: 'A streaming-enabled assistant', + apiKey: 'your-anthropic-api-key', + modelId: 'claude-3-opus-20240229', + streaming: true +}); +``` + + +```python +agent = AnthropicAgent(AnthropicAgentOptions( + name='Anthropic Assistant', + description='A streaming-enabled assistant', + api_key='your-anthropic-api-key', + model_id='claude-3-opus-20240229', + streaming=True +)) +``` -In this basic example, only the `name`, `description` and `apiKey` are provided, which are the only required parameters for creating a AnthropicAgent. +
-### Advanced Example +**4. With Inference Configuration** -For more complex use cases, you can create a **Anthropic Agent** with all available options. All parameters except `name`, `description` and `apiKey` or `client` are optional: + + +```typescript +const agent = new AnthropicAgent({ + name: 'Anthropic Assistant', + description: 'An assistant with custom inference settings', + apiKey: 'your-anthropic-api-key', + inferenceConfig: { + maxTokens: 500, + temperature: 0.7, + topP: 0.9, + stopSequences: ['Human:', 'AI:'] + } +}); +``` + + +```python +agent = AnthropicAgent(AnthropicAgentOptions( + name='Anthropic Assistant', + description='An assistant with custom inference settings', + api_key='your-anthropic-api-key', + inference_config={ + 'maxTokens': 500, + 'temperature': 0.7, + 'topP': 0.9, + 'stopSequences': ['Human:', 'AI:'] + } +)) +``` + + + +
+ +**5. With Simple System Prompt** - ```typescript - import { AnthropicAgent, AnthropicAgentOptions, ParticipantRole } from 'multi-agent-orchestrator'; - import { Retriever } from '../retrievers/retriever'; - - const options: AnthropicAgentOptions = { - name: 'My Advanced Anthropic Agent', - description: 'A versatile agent for complex NLP tasks', - apiKey: 'your-anthropic-api-key-here', - modelId: 'claude-3-opus-20240229', - streaming: true, - inferenceConfig: { - maxTokens: 1000, - temperature: 0.7, - topP: 0.9, - stopSequences: ['Human:', 'AI:'] - }, - retriever: new Retriever(), // Assuming you have a Retriever class implemented - toolConfig: { - tool: [ - { - name: "Weather_Tool", - description: "Get the current weather for a given location, based on its WGS84 coordinates.", - input_schema: { - type: "object", - properties: { - latitude: { - type: "string", - description: "Geographical WGS84 latitude of the location.", - }, - longitude: { - type: "string", - description: "Geographical WGS84 longitude of the location.", - }, - }, - required: ["latitude", "longitude"], +```typescript +const agent = new AnthropicAgent({ + name: 'Anthropic Assistant', + description: 'An assistant with custom prompt', + apiKey: 'your-anthropic-api-key', + customSystemPrompt: { + template: 'You are a helpful AI assistant focused on technical support.' + } +}); +``` + + +```python +agent = AnthropicAgent(AnthropicAgentOptions( + name='Anthropic Assistant', + description='An assistant with custom prompt', + api_key='your-anthropic-api-key', + custom_system_prompt={ + 'template': 'You are a helpful AI assistant focused on technical support.' + } +)) +``` + + +
+ +**6. With System Prompt Variables** + + + +```typescript +const agent = new AnthropicAgent({ + name: 'Anthropic Assistant', + description: 'An assistant with variable prompt', + apiKey: 'your-anthropic-api-key', + customSystemPrompt: { + template: 'You are an AI assistant specialized in {{DOMAIN}}. Always use a {{TONE}} tone.', + variables: { + DOMAIN: 'customer support', + TONE: 'friendly and helpful' + } + } +}); +``` + + +```python +agent = AnthropicAgent(AnthropicAgentOptions( + name='Anthropic Assistant', + description='An assistant with variable prompt', + api_key='your-anthropic-api-key', + custom_system_prompt={ + 'template': 'You are an AI assistant specialized in {{DOMAIN}}. Always use a {{TONE}} tone.', + 'variables': { + 'DOMAIN': 'customer support', + 'TONE': 'friendly and helpful' + } + } +)) +``` + + + +
+ +**7. With Custom Retriever** + + + +```typescript +const retriever = new CustomRetriever({ + // Retriever configuration +}); +const agent = new AnthropicAgent({ +name: 'Anthropic Assistant', +description: 'An assistant with retriever', +apiKey: 'your-anthropic-api-key', +retriever: retriever +}); +``` + + + +```python +retriever = CustomRetriever( + # Retriever configuration +) + +agent = AnthropicAgent(AnthropicAgentOptions( + name='Anthropic Assistant', + description='An assistant with retriever', + api_key='your-anthropic-api-key', + retriever=retriever +)) +``` + + + +
+ +**8. With Tool Configuration** + + + +```typescript +const agent = new AnthropicAgent({ + name: 'Anthropic Assistant', + description: 'An assistant with tool support', + apiKey: 'your-anthropic-api-key', + toolConfig: { + tool: [ + { + name: "Weather_Tool", + description: "Get current weather data", + input_schema: { + type: "object", + properties: { + location: { + type: "string", + description: "City name", } - } - ], - useToolHandler: (response, conversation) => { - // Process tool response - // Return processed response - return {role: ParticipantRole.USER, content: { - "type": "tool_result", - "tool_use_id": "tool_user_id", - "content": "Response from the tool" - }} + }, + required: ["location"] } } - }; - - const agent = new AnthropicAgent(options); - ``` + ], + useToolHandler: (response, conversation) => { + return { + role: ParticipantRole.USER, + content: { + "type": "tool_result", + "tool_use_id": "weather_tool", + "content": "Current weather data for the location" + } + } + } + } +}); +``` - ```python - - ``` +```python +agent = AnthropicAgent(AnthropicAgentOptions( + name='Anthropic Assistant', + description='An assistant with tool support', + api_key='your-anthropic-api-key', + tool_config={ + 'tool': [{ + 'name': 'Weather_Tool', + 'description': 'Get current weather data', + 'input_schema': { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'City name' + } + }, + 'required': ['location'] + } + }], + 'useToolHandler': lambda response, conversation: { + 'role': ParticipantRole.USER.value, + 'content': { + 'type': 'tool_result', + 'tool_use_id': 'weather_tool', + 'content': 'Current weather data for the location' + } + } + } +)) +``` +
+ +**9. Complete Example with All Options** + + + +```typescript +import { AnthropicAgent } from 'multi-agent-orchestrator'; + +const agent = new AnthropicAgent({ + // Required fields + name: 'Advanced Anthropic Assistant', + description: 'A fully configured AI assistant powered by Anthropic models', + apiKey: 'your-anthropic-api-key', + + // Optional fields + modelId: 'claude-3-opus-20240229', // Choose Anthropic model + streaming: true, // Enable streaming responses + retriever: customRetriever, // Custom retriever for additional context + + // Inference configuration + inferenceConfig: { + maxTokens: 500, // Maximum tokens to generate + temperature: 0.7, // Control randomness (0-1) + topP: 0.9, // Control diversity via nucleus sampling + stopSequences: ['Human:', 'AI:'] // Sequences that stop generation + }, + + // Tool configuration + toolConfig: { + tool: [{ + name: "Weather_Tool", + description: "Get the current weather for a given location", + input_schema: { + type: "object", + properties: { + latitude: { + type: "string", + description: "Geographical WGS84 latitude" + }, + longitude: { + type: "string", + description: "Geographical WGS84 longitude" + } + }, + required: ["latitude", "longitude"] + } + }], + useToolHandler: (response, conversation) => ({ + role: ParticipantRole.USER, + content: { + type: "tool_result", + tool_use_id: "tool_user_id", + content: "Response from the tool" + } + }) + }, + + // Custom system prompt with variables + customSystemPrompt: { + template: `You are an AI assistant specialized in {{DOMAIN}}. + Your core competencies: + {{SKILLS}} + + Communication style: + - Maintain a {{TONE}} tone + - Focus on {{FOCUS}} + - Prioritize {{PRIORITY}}`, + variables: { + DOMAIN: 'scientific research', + SKILLS: [ + '- Advanced data analysis', + '- Statistical methodology', + '- Research design', + '- Technical writing' + ], + TONE: 'professional and academic', + FOCUS: 'accuracy and clarity', + PRIORITY: 'evidence-based insights' + } + } +}); +``` + + + + +```python + +from multi_agent_orchestrator import AnthropicAgent, AnthropicAgentOptions +from multi_agent_orchestrator.types import ParticipantRole +agent = AnthropicAgent(AnthropicAgentOptions( +# Required fields +name='Advanced Anthropic Assistant', +description='A fully configured AI assistant powered by Anthropic models', +api_key='your-anthropic-api-key', +# Optional fields +model_id='claude-3-opus-20240229', # Choose Anthropic model +streaming=True, # Enable streaming responses +retriever=custom_retriever, # Custom retriever for additional context + +# Inference configuration +inference_config={ + 'maxTokens': 500, # Maximum tokens to generate + 'temperature': 0.7, # Control randomness (0-1) + 'topP': 0.9, # Control diversity via nucleus sampling + 'stopSequences': ['Human:', 'AI:'] # Sequences that stop generation +}, + +# Tool configuration +tool_config={ + 'tool': [{ + 'name': 'Weather_Tool', + 'description': 'Get the current weather for a given location', + 'input_schema': { + 'type': 'object', + 'properties': { + 'latitude': { + 'type': 'string', + 'description': 'Geographical WGS84 latitude' + }, + 'longitude': { + 'type': 'string', + 'description': 'Geographical WGS84 longitude' + } + }, + 'required': ['latitude', 'longitude'] + } + }], + 'useToolHandler': lambda response, conversation: { + 'role': ParticipantRole.USER.value, + 'content': { + 'type': 'tool_result', + 'tool_use_id': 'tool_user_id', + 'content': 'Response from the tool' + } + } +}, + +# Custom system prompt with variables +custom_system_prompt={ + 'template': """You are an AI assistant specialized in {{DOMAIN}}. + Your core competencies: + {{SKILLS}} + + Communication style: + - Maintain a {{TONE}} tone + - Focus on {{FOCUS}} + - Prioritize {{PRIORITY}}""", + 'variables': { + 'DOMAIN': 'scientific research', + 'SKILLS': [ + '- Advanced data analysis', + '- Statistical methodology', + '- Research design', + '- Technical writing' + ], + 'TONE': 'professional and academic', + 'FOCUS': 'accuracy and clarity', + 'PRIORITY': 'evidence-based insights' + } +} +)) +``` + + ### Option Explanations diff --git a/docs/src/content/docs/agents/built-in/bedrock-llm-agent.mdx b/docs/src/content/docs/agents/built-in/bedrock-llm-agent.mdx index 622226cb..e2bd6da3 100644 --- a/docs/src/content/docs/agents/built-in/bedrock-llm-agent.mdx +++ b/docs/src/content/docs/agents/built-in/bedrock-llm-agent.mdx @@ -23,340 +23,421 @@ This agent can handle a wide range of processing tasks, making it suitable for d By default, the **Bedrock LLM Agent** uses the `anthropic.claude-3-haiku-20240307-v1:0` model. -### Basic Example -To create a new **Bedrock LLM Agent** with only the required parameters, use the following code: +**1. Minimal Configuration** import { Tabs, TabItem } from '@astrojs/starlight/components'; - ```typescript - import { BedrockLLMAgent } from 'multi-agent-orchestrator'; - - const agent = new BedrockLLMAgent({ - name: 'Tech Agent', - description: 'Specializes in technology areas including software development, hardware, AI, cybersecurity, blockchain, cloud computing, emerging tech innovations, and pricing/costs related to technology products and services.' - }); - ``` - +```typescript +const agent = new BedrockLLMAgent({ + name: 'Bedrock Assistant', + description: 'A versatile AI assistant' +}); +``` +
- ```python - from multi_agent_orchestrator.agents import BedrockLLMAgent, BedrockLLMAgentOptions - - agent = BedrockLLMAgent(BedrockLLMAgentOptions( - name='Tech Agent', - description='Specializes in technology areas including software development, hardware, AI, cybersecurity, blockchain, cloud computing, emerging tech innovations, and pricing/costs related to technology products and services.' - )) - ``` +```python +agent = BedrockLLMAgent(BedrockLLMAgentOptions( + name='Bedrock Assistant', + description='A versatile AI assistant' +)) +```
-### Advanced Example +
-For more complex use cases, you can create a **Bedrock LLM Agent** with all available options. All parameters except `name` and `description` are optional: +**2. Using Custom Client** - ```typescript - import { BedrockLLMAgent, BedrockLLMAgentOptions } from 'multi-agent-orchestrator'; - import { Retriever } from '../retrievers/retriever'; - - const options: BedrockLLMAgentOptions = { - name: 'My Advanced Bedrock Agent', - description: 'A versatile agent for complex NLP tasks', - modelId: 'anthropic.claude-3-sonnet-20240229-v1:0', - region: 'us-west-2', - streaming: true, - inferenceConfig: { - maxTokens: 1000, - temperature: 0.7, - topP: 0.9, - stopSequences: ['Human:', 'AI:'] - }, - guardrailConfig: { - guardrailIdentifier: 'my-guardrail', - guardrailVersion: '1.0' - }, - retriever: new Retriever(), // Assuming you have a Retriever class implemented - toolConfig: { - tool: [ - { - type: 'function', - function: { - name: 'get_current_weather', - description: 'Get the current weather in a given location', - parameters: { - type: 'object', - properties: { - location: { - type: 'string', - description: 'The city and state, e.g. San Francisco, CA' - }, - unit: { type: 'string', enum: ['celsius', 'fahrenheit'] } - }, - required: ['location'] - } - } - } - ] - }, - customSystemPrompt: { - template: `You are a specialized AI assistant with multiple capabilities. - -Your expertise areas: -{{AREAS}} - -When responding: -{{GUIDELINES}} - -Always maintain: {{TONE}}`, - variables: { - AREAS: [ - '- Weather information analysis', - '- Data interpretation', - '- Natural language processing' - ], - GUIDELINES: [ - '- Provide detailed explanations', - '- Use relevant examples', - '- Consider user context' - ], - TONE: 'professional and helpful demeanor' - } - } - }; - - const agent = new BedrockLLMAgent(options); - ``` - +```typescript +import { BedrockRuntimeClient } from "@aws-sdk/client-bedrock-runtime"; +const customClient = new BedrockRuntimeClient({ region: 'us-east-1' }); +const agent = new BedrockLLMAgent({ + name: 'Bedrock Assistant', + description: 'A versatile AI assistant', + client: customClient +}); +``` +
- ```python - from multi_agent_orchestrator.agents import BedrockLLMAgent, BedrockLLMAgentOptions - from multi_agent_orchestrator.retrievers import Retriever - - options = BedrockLLMAgentOptions( - name='My Advanced Bedrock Agent', - description='A versatile agent for complex NLP tasks', - model_id='anthropic.claude-3-sonnet-20240229-v1:0', - region='us-west-2', - streaming=True, - inference_config={ - 'maxTokens': 1000, - 'temperature': 0.7, - 'topP': 0.9, - 'stopSequences': ['Human:', 'AI:'] - }, - guardrail_config={ - 'guardrailIdentifier': 'my-guardrail', - 'guardrailVersion': '1.0' - }, - retriever=Retriever(), # Assuming you have a Retriever class implemented - tool_config={ - 'tool': [ - { - 'toolSpec': { - 'name': 'get_current_weather', - 'description': 'Get the current weather in a given location', - 'inputSchema': { - 'json': { - 'type': 'object', - 'properties': { - 'location': { - 'type': 'string', - 'description': 'The city and state, e.g. San Francisco, CA' - }, - 'unit': {'type': 'string', 'enum': ['celsius', 'fahrenheit']} - }, - 'required': ['location'] - } - } - } - } - ], - 'useToolHandler': lambda response, conversation: (False, response) # Process tool response - }, - custom_system_prompt={ - "template": """You are a specialized AI assistant with multiple capabilities. - -Your expertise areas: -{{AREAS}} - -When responding: -{{GUIDELINES}} - -Always maintain: {{TONE}}""", - "variables": { - "AREAS": [ - "- Weather information analysis", - "- Data interpretation", - "- Natural language processing" - ], - "GUIDELINES": [ - "- Provide detailed explanations", - "- Use relevant examples", - "- Consider user context" - ], - "TONE": "professional and helpful demeanor" - } - } - ) - - agent = BedrockLLMAgent(options) - ``` - +```python +import boto3 +custom_client = boto3.client('bedrock-runtime', region_name='us-east-1') +agent = BedrockLLMAgent(BedrockLLMAgentOptions( +name='Bedrock Assistant', +description='A versatile AI assistant', +client=custom_client +)) +``` +
-### Option Explanations +
-- `name` and `description`: Required fields to identify and describe the agent's purpose -- `modelId`/`model_id`: Specifies the LLM model to use (e.g., Claude 3 Sonnet) -- `region`: AWS region for the Bedrock service -- `streaming`: Enables streaming responses for real-time output -- `inferenceConfig`/`inference_config`: Fine-tunes the model's output characteristics -- `guardrailConfig`/`guardrail_config`: Applies predefined guardrails to the model's responses -- `retriever`: Integrates a retrieval system for enhanced context -- `toolConfig`/`tool_config`: Defines tools the agent can use and how to handle their responses -- `customSystemPrompt`/`custom_system_prompt`: Defines the agent's system prompt and behavior, with optional variables for dynamic content -- `client`: Optional custom Bedrock client for specialized configurations +**3. Custom Model and Streaming** -## Setting Custom Prompts + + +```typescript +const agent = new BedrockLLMAgent({ + name: 'Bedrock Assistant', + description: 'A streaming-enabled assistant', + modelId: 'anthropic.claude-3-sonnet-20240229-v1:0', + streaming: true +}); +``` + + +```python +agent = BedrockLLMAgent(BedrockLLMAgentOptions( + name='Bedrock Assistant', + description='A streaming-enabled assistant', + model_id='anthropic.claude-3-sonnet-20240229-v1:0', + streaming=True +)) +``` + + -The BedrockLLMAgent provides multiple ways to set custom prompts. You can set them either during initialization or after the agent is created, and you can use prompts with or without variables. +
-### 1. Setting Custom Prompt During Initialization (Without Variables) +**4. With Inference Configuration** - ```typescript - import { BedrockLLMAgent, BedrockLLMAgentOptions } from 'multi-agent-orchestrator'; +```typescript +const agent = new BedrockLLMAgent({ + name: 'Bedrock Assistant', + description: 'An assistant with custom inference settings', + inferenceConfig: { + maxTokens: 500, + temperature: 0.7, + topP: 0.9, + stopSequences: ['Human:', 'AI:'] + } +}); +``` + + +```python +agent = BedrockLLMAgent(BedrockLLMAgentOptions( + name='Bedrock Assistant', + description='An assistant with custom inference settings', + inference_config={ + 'maxTokens': 500, + 'temperature': 0.7, + 'topP': 0.9, + 'stopSequences': ['Human:', 'AI:'] + } +)) +``` + + - const agent = new BedrockLLMAgent({ - name: 'Tech Expert', - description: 'Technology and software development expert', - modelId: 'anthropic.claude-3-sonnet-20240229-v1:0', - customSystemPrompt: { - template: `You are a technology expert specializing in software development. - -Core Competencies: -1. Programming Languages -2. Software Architecture -3. Best Practices -4. Performance Optimization +
-When providing technical advice: -- Start with fundamentals -- Include code examples when relevant -- Explain complex concepts simply -- Consider security implications` - } - }); - ``` +**5. With Simple System Prompt** + + + +```typescript +const agent = new BedrockLLMAgent({ + name: 'Bedrock Assistant', + description: 'An assistant with custom prompt', + customSystemPrompt: { + template: 'You are a helpful AI assistant focused on technical support.' + } +}); +``` - ```python - from multi_agent_orchestrator.agents import BedrockLLMAgent, BedrockLLMAgentOptions +```python +agent = BedrockLLMAgent(BedrockLLMAgentOptions( + name='Bedrock Assistant', + description='An assistant with custom prompt', + custom_system_prompt={ + 'template': 'You are a helpful AI assistant focused on technical support.' + } +)) +``` + + - agent = BedrockLLMAgent(BedrockLLMAgentOptions( - name='Tech Expert', - description='Technology and software development expert', - model_id='anthropic.claude-3-sonnet-20240229-v1:0', - custom_system_prompt={ - "template": """You are a technology expert specializing in software development. - -Core Competencies: -1. Programming Languages -2. Software Architecture -3. Best Practices -4. Performance Optimization +
-When providing technical advice: -- Start with fundamentals -- Include code examples when relevant -- Explain complex concepts simply -- Consider security implications""" +**6. With System Prompt Variables** + + + +```typescript +const agent = new BedrockLLMAgent({ + name: 'Bedrock Assistant', + description: 'An assistant with variable prompt', + customSystemPrompt: { + template: 'You are an AI assistant specialized in {{DOMAIN}}. Always use a {{TONE}} tone.', + variables: { + DOMAIN: 'technical support', + TONE: 'friendly and helpful' + } + } +}); +``` + + +```python +agent = BedrockLLMAgent(BedrockLLMAgentOptions( + name='Bedrock Assistant', + description='An assistant with variable prompt', + custom_system_prompt={ + 'template': 'You are an AI assistant specialized in {{DOMAIN}}. Always use a {{TONE}} tone.', + 'variables': { + 'DOMAIN': 'technical support', + 'TONE': 'friendly and helpful' } - )) - ``` + } +)) +``` -### 2. Setting Custom Prompt During Initialization (With Variables) +
+ +**7. With Custom Retriever** - ```typescript - const agent = new BedrockLLMAgent({ - name: 'Creative Writer', - description: 'Creative writing and storytelling expert', - modelId: 'anthropic.claude-3-sonnet-20240229-v1:0', - customSystemPrompt: { - template: `You are a {{ROLE}} specializing in {{SPECIALTY}}. - -Key strengths: -{{STRENGTHS}} - -When crafting content: -{{GUIDELINES}} - -Style: {{STYLE}}`, - variables: { - ROLE: 'creative writing expert', - SPECIALTY: 'narrative development', - STRENGTHS: [ - '- Character development', - '- Plot structuring', - '- Dialogue creation' - ], - GUIDELINES: [ - '- Start with a hook', - '- Show, don\'t tell', - '- Create vivid scenes' - ], - STYLE: 'engaging and imaginative' +```typescript +const retriever = new CustomRetriever({ + // Retriever configuration +}); +const agent = new BedrockLLMAgent({ + name: 'Bedrock Assistant', + description: 'An assistant with retriever', + retriever: retriever +}); +``` + + +```python +retriever = CustomRetriever( + # Retriever configuration +) +agent = BedrockLLMAgent(BedrockLLMAgentOptions( +name='Bedrock Assistant', +description='An assistant with retriever', +retriever=retriever +)) +``` + + + +
+ +**8. With Tool Configuration** + + + +```typescript +const agent = new BedrockLLMAgent({ + name: 'Bedrock Assistant', + description: 'An assistant with tool support', + toolConfig: { + tool: [ + { + name: "Weather_Tool", + description: "Get current weather data", + input_schema: { + type: "object", + properties: { + location: { + type: "string", + description: "City name", + } + }, + required: ["location"] } } - }); - ``` + ] + } +}); +``` + + +```python +agent = BedrockLLMAgent(BedrockLLMAgentOptions( + name='Bedrock Assistant', + description='An assistant with tool support', + tool_config={ + 'tool': [{ + 'name': 'Weather_Tool', + 'description': 'Get current weather data', + 'input_schema': { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'City name' + } + }, + 'required': ['location'] + } + }] + } +)) +``` + + +
+ +**9. Complete Example with All Options** + + + +```typescript +import { BedrockLLMAgent } from "multi-agent-orchestrator"; +const agent = new BedrockLLMAgent({ + // Required fields + name: "Advanced Bedrock Assistant", + description: "A fully configured AI assistant powered by Bedrock models", + // Optional fields + modelId: "anthropic.claude-3-sonnet-20240229-v1:0", + region: "us-west-2", + streaming: true, + retriever: customRetriever, // Custom retriever for additional context + inferenceConfig: { + maxTokens: 500, + temperature: 0.7, + topP: 0.9, + stopSequences: ["Human:", "AI:"], + }, + guardrailConfig: { + guardrailIdentifier: "my-guardrail", + guardrailVersion: "1.0", + }, + toolConfig: { + tool: [ + { + name: "Weather_Tool", + description: "Get current weather data", + input_schema: { + type: "object", + properties: { + location: { + type: "string", + description: "City name", + }, + }, + required: ["location"], + }, + }, + ], + }, + customSystemPrompt: { + template: `You are an AI assistant specialized in {{DOMAIN}}. +Your core competencies: +{{SKILLS}} +Communication style: + - Maintain a {{TONE}} tone + - Focus on {{FOCUS}} + - Prioritize {{PRIORITY}}`, + variables: { + DOMAIN: "scientific research", + SKILLS: [ + "- Advanced data analysis", + "- Statistical methodology", + "- Research design", + "- Technical writing", + ], + TONE: "professional and academic", + FOCUS: "accuracy and clarity", + PRIORITY: "evidence-based insights", + }, + }, +}); + +``` + - ```python - agent = BedrockLLMAgent(BedrockLLMAgentOptions( - name='Creative Writer', - description='Creative writing and storytelling expert', - model_id='anthropic.claude-3-sonnet-20240229-v1:0', - custom_system_prompt={ - "template": """You are a {{ROLE}} specializing in {{SPECIALTY}}. - -Key strengths: -{{STRENGTHS}} - -When crafting content: -{{GUIDELINES}} - -Style: {{STYLE}}""", - "variables": { - "ROLE": "creative writing expert", - "SPECIALTY": "narrative development", - "STRENGTHS": [ - "- Character development", - "- Plot structuring", - "- Dialogue creation" - ], - "GUIDELINES": [ - "- Start with a hook", - "- Show, don't tell", - "- Create vivid scenes" - ], - "STYLE": "engaging and imaginative" +```python +from multi_agent_orchestrator.agents import BedrockLLMAgent, BedrockLLMAgentOptions + +agent = BedrockLLMAgent(BedrockLLMAgentOptions( + # Required fields + name='Advanced Bedrock Assistant', + description='A fully configured AI assistant powered by Bedrock models', + + # Optional fields + model_id='anthropic.claude-3-sonnet-20240229-v1:0', + region='us-west-2', + streaming=True, + retriever=custom_retriever, # Custom retriever for additional context + + inference_config={ + 'maxTokens': 500, + 'temperature': 0.7, + 'topP': 0.9, + 'stopSequences': ['Human:', 'AI:'] + }, + + guardrail_config={ + 'guardrailIdentifier': 'my-guardrail', + 'guardrailVersion': '1.0' + }, + + tool_config={ + 'tool': [{ + 'name': 'Weather_Tool', + 'description': 'Get current weather data', + 'input_schema': { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'City name' + } + }, + 'required': ['location'] } + }] + }, + + custom_system_prompt={ + 'template': """You are an AI assistant specialized in {{DOMAIN}}. + Your core competencies: + {{SKILLS}} + + Communication style: + - Maintain a {{TONE}} tone + - Focus on {{FOCUS}} + - Prioritize {{PRIORITY}}""", + 'variables': { + 'DOMAIN': 'scientific research', + 'SKILLS': [ + '- Advanced data analysis', + '- Statistical methodology', + '- Research design', + '- Technical writing' + ], + 'TONE': 'professional and academic', + 'FOCUS': 'accuracy and clarity', + 'PRIORITY': 'evidence-based insights' } - )) - ``` - + } +)) +``` + -### 3. Setting Custom Prompt After Initialization (Without Variables) +
+ +The `BedrockLLMAgent` provides multiple ways to set custom prompts. You can set them either during initialization or after the agent is created, and you can use prompts with or without variables. + +**10. Setting Custom Prompt After Initialization (Without Variables)** @@ -405,7 +486,9 @@ When providing business advice: -### 4. Setting Custom Prompt After Initialization (With Variables) +
+ +**11. Setting Custom Prompt After Initialization (With Variables)** @@ -509,4 +592,18 @@ Choose the approach that best fits your needs: - Use initialization when the prompt is part of the agent's core configuration - Use post-initialization when prompts need to be changed dynamically - Use variables when parts of the prompt need to be modified frequently -- Use direct templates when the prompt is static \ No newline at end of file +- Use direct templates when the prompt is static + +### Option Explanations + +- `name` and `description`: Required fields to identify and describe the agent's purpose +- `modelId`/`model_id`: Specifies the LLM model to use (e.g., Claude 3 Sonnet) +- `region`: AWS region for the Bedrock service +- `streaming`: Enables streaming responses for real-time output +- `inferenceConfig`/`inference_config`: Fine-tunes the model's output characteristics +- `guardrailConfig`/`guardrail_config`: Applies predefined guardrails to the model's responses +- `retriever`: Integrates a retrieval system for enhanced context +- `toolConfig`/`tool_config`: Defines tools the agent can use and how to handle their responses +- `customSystemPrompt`/`custom_system_prompt`: Defines the agent's system prompt and behavior, with optional variables for dynamic content +- `client`: Optional custom Bedrock client for specialized configurations +