diff --git a/README.md b/README.md
index 1e79d54b..7dbd56b4 100644
--- a/README.md
+++ b/README.md
@@ -247,7 +247,7 @@ if (response.streaming == true) {
# Optional: Set up a virtual environment
python -m venv venv
source venv/bin/activate # On Windows use `venv\Scripts\activate`
-pip install multi-agent-orchestrator
+pip install multi-agent-orchestrator[aws]
```
#### Default Usage
diff --git a/docs/src/content/docs/agents/built-in/amazon-bedrock-agent.mdx b/docs/src/content/docs/agents/built-in/amazon-bedrock-agent.mdx
index 2c496f01..736d8fa2 100644
--- a/docs/src/content/docs/agents/built-in/amazon-bedrock-agent.mdx
+++ b/docs/src/content/docs/agents/built-in/amazon-bedrock-agent.mdx
@@ -7,47 +7,189 @@ The `AmazonBedrockAgent` is a specialized agent class in the Multi-Agent Orchest
## Creating an AmazonBedrockAgent
-To create a new `AmazonBedrockAgent` with only the required parameters, use the following code:
+Here are various examples showing different ways to create and configure an AmazonBedrockAgent:
+
+### Basic Examples
+
+**1. Minimal Configuration**
import { Tabs, TabItem } from '@astrojs/starlight/components';
- ```typescript
- import { AmazonBedrockAgent } from 'multi-agent-orchestrator';
-
- const agent = new AmazonBedrockAgent({
- name: 'My Bank Agent',
- description: 'You are a helpful and friendly agent that answers questions about loan-related inquiries',
- agentId: 'your-agent-id',
- agentAliasId: 'your-agent-alias-id'
- });
- ```
+```typescript
+const agent = new AmazonBedrockAgent({
+ name: 'My Bank Agent',
+ description: 'A helpful and friendly agent that answers questions about loan-related inquiries',
+ agentId: 'your-agent-id',
+ agentAliasId: 'your-agent-alias-id'
+});
+```
+
+
+```python
+agent = AmazonBedrockAgent(AmazonBedrockAgentOptions(
+ name='My Bank Agent',
+ description='A helpful and friendly agent that answers questions about loan-related inquiries',
+ agent_id='your-agent-id',
+ agent_alias_id='your-agent-alias-id'
+))
+```
+
+
+
+
+**2. Using Custom Client**
+
+
+
+```typescript
+import { BedrockAgentRuntimeClient } from "@aws-sdk/client-bedrock-agent-runtime";
+const customClient = new BedrockAgentRuntimeClient({ region: 'us-east-1' });
+const agent = new AmazonBedrockAgent({
+ name: 'My Bank Agent',
+ description: 'A helpful and friendly agent for banking inquiries',
+ agentId: 'your-agent-id',
+ agentAliasId: 'your-agent-alias-id',
+ client: customClient
+});
+```
+
- ```python
- from multi_agent_orchestrator.agents import AmazonBedrockAgent, AmazonBedrockAgentOptions
-
- agent = AmazonBedrockAgent(AmazonBedrockAgentOptions(
- name='My Bank Agent',
- description='You are a helpful and friendly agent that answers questions about loan-related inquiries',
- agent_id='your-agent-id',
- agent_alias_id='your-agent-alias-id'
- ))
- ```
+```python
+import boto3
+custom_client = boto3.client('bedrock-agent-runtime', region_name='us-east-1')
+agent = AmazonBedrockAgent(AmazonBedrockAgentOptions(
+name='My Bank Agent',
+description='A helpful and friendly agent for banking inquiries',
+agent_id='your-agent-id',
+agent_alias_id='your-agent-alias-id',
+client=custom_client
+))
+```
+
+
+
+
+
+**3. With Tracing Enabled**
+
+
+
+```typescript
+const agent = new AmazonBedrockAgent({
+ name: 'My Bank Agent',
+ description: 'A banking agent with tracing enabled',
+ agentId: 'your-agent-id',
+ agentAliasId: 'your-agent-alias-id',
+ enableTrace: true
+});
+```
+
+
+```python
+agent = AmazonBedrockAgent(AmazonBedrockAgentOptions(
+ name='My Bank Agent',
+ description='A banking agent with tracing enabled',
+ agent_id='your-agent-id',
+ agent_alias_id='your-agent-alias-id',
+ enable_trace=True
+))
+```
-In this basic example, we provide the four required parameters: `name`, `description`, `agent_id`, and `agent_alias_id`.
+
+
+**4. With Streaming Enabled**
+
+
+
+```typescript
+const agent = new AmazonBedrockAgent({
+ name: 'My Bank Agent',
+ description: 'A streaming-enabled banking agent',
+ agentId: 'your-agent-id',
+ agentAliasId: 'your-agent-alias-id',
+ streaming: true
+});
+```
+
+
+```python
+agent = AmazonBedrockAgent(AmazonBedrockAgentOptions(
+ name='My Bank Agent',
+ description='A streaming-enabled banking agent',
+ agent_id='your-agent-id',
+ agent_alias_id='your-agent-alias-id',
+ streaming=True
+))
+```
+
+
+
+
+
+**5. Complete Example with All Options**
+
+
+
+```typescript
+import { AmazonBedrockAgent } from "multi-agent-orchestrator";
+import { BedrockAgentRuntimeClient } from "@aws-sdk/client-bedrock-agent-runtime";
+const agent = new AmazonBedrockAgent({
+ // Required fields
+ name: "Advanced Bank Agent",
+ description: "A fully configured banking agent with all features enabled",
+ agentId: "your-agent-id",
+ agentAliasId: "your-agent-alias-id",
+ // Optional fields
+ region: "us-west-2",
+ streaming: true,
+ enableTrace: true,
+ client: new BedrockAgentRuntimeClient({ region: "us-west-2" }),
+});
+
+```
+
+
+```python
+import boto3
+from multi_agent_orchestrator.agents import AmazonBedrockAgent, AmazonBedrockAgentOptions
+
+custom_client = boto3.client('bedrock-agent-runtime', region_name='us-west-2')
+
+agent = AmazonBedrockAgent(AmazonBedrockAgentOptions(
+ # Required fields
+ name='Advanced Bank Agent',
+ description='A fully configured banking agent with all features enabled',
+ agent_id='your-agent-id',
+ agent_alias_id='your-agent-alias-id',
+
+ # Optional fields
+ region='us-west-2',
+ streaming=True,
+ enable_trace=True,
+ client=custom_client
+))
+```
+
+
+
### Option Explanations
- `name`: (Required) Identifies the agent within your system.
- `description`: (Required) Describes the agent's purpose or capabilities.
-- `agent_id`: (Required) The ID of the Amazon Bedrock agent you want to use.
-- `agent_alias_id`: (Required) The alias ID of the Amazon Bedrock agent.
-- `enableTrace`: If you set enableTrace to `true` in the request, you can trace the agent’s steps and reasoning process that led it to the response.
-- `streaming`: Specifies whether to enable streaming for the final response. This is set to false by default `False`
+- `agentId/agent_id`: (Required) The ID of the Amazon Bedrock agent you want to use.
+- `agentAliasId/agent_alias_id`: (Required) The alias ID of the Amazon Bedrock agent.
+- `region`: (Optional) AWS region for the Bedrock service. If not provided, uses the default AWS region.
+- `client`: (Optional) Custom BedrockAgentRuntimeClient for specialized configurations.
+- `enableTrace/enable_trace`: (Optional) When set to true, enables tracing of the agent's steps and reasoning process.
+- `streaming`: (Optional) Enables streaming for the final response. Defaults to false.
+
+
## Adding the Agent to the Orchestrator
diff --git a/docs/src/content/docs/agents/built-in/anthropic-agent.mdx b/docs/src/content/docs/agents/built-in/anthropic-agent.mdx
index 89d598c3..80d49f82 100644
--- a/docs/src/content/docs/agents/built-in/anthropic-agent.mdx
+++ b/docs/src/content/docs/agents/built-in/anthropic-agent.mdx
@@ -20,103 +20,481 @@ This agent can handle a wide range of processing tasks, making it suitable for d
## Creating a AnthropicAgent
-By default, the **AnthropicAgent** uses the `claude-3-5-sonnet-20240620 model`.
+Here are various examples showing different ways to create and configure an AnthropicAgent:
-### Basic Example
+### Basic Examples
-To create a new **Anthropic Agent** with only the required parameters, use the following code:
+**1. Minimal Configuration**
import { Tabs, TabItem } from '@astrojs/starlight/components';
+
- ```typescript
- import { AnthropicAgent } from 'multi-agent-orchestrator';
-
- const agent = new AnthropicAgent({
- name: 'Tech Agent',
- description: 'Specializes in technology areas including software development, \
- hardware, AI, cybersecurity, blockchain, cloud computing, \
- emerging tech innovations,and pricing/costs related to \
- technology products and services.',
- apiKey: 'your-anthropic-api-key-here'
- });
- ```
+
+```typescript
+const agent = new AnthropicAgent({
+ name: 'Anthropic Assistant',
+ description: 'A versatile AI assistant',
+ apiKey: 'your-anthropic-api-key'
+});
+```
- ```python
+```python
+agent = AnthropicAgent(AnthropicAgentOptions(
+ name='Anthropic Assistant',
+ description='A versatile AI assistant',
+ api_key='your-anthropic-api-key'
+))
+```
+
+
- ```
+
+
+**2. Using Custom Client**
+
+
+
+```typescript
+import { Anthropic } from '@anthropic-ai/sdk';
+const customClient = new Anthropic({ apiKey: 'your-anthropic-api-key' });
+const agent = new AnthropicAgent({
+name: 'Anthropic Assistant',
+description: 'A versatile AI assistant',
+client: customClient
+});
+```
+
+
+```python
+from anthropic import Anthropic
+
+custom_client = Anthropic(api_key='your-anthropic-api-key')
+
+agent = AnthropicAgent(AnthropicAgentOptions(
+ name='Anthropic Assistant',
+ description='A versatile AI assistant',
+ client=custom_client
+))
+```
+
+
+
+
+
+
+**3. Custom Model and Streaming**
+
+
+
+```typescript
+const agent = new AnthropicAgent({
+ name: 'Anthropic Assistant',
+ description: 'A streaming-enabled assistant',
+ apiKey: 'your-anthropic-api-key',
+ modelId: 'claude-3-opus-20240229',
+ streaming: true
+});
+```
+
+
+```python
+agent = AnthropicAgent(AnthropicAgentOptions(
+ name='Anthropic Assistant',
+ description='A streaming-enabled assistant',
+ api_key='your-anthropic-api-key',
+ model_id='claude-3-opus-20240229',
+ streaming=True
+))
+```
-In this basic example, only the `name`, `description` and `apiKey` are provided, which are the only required parameters for creating a AnthropicAgent.
+
-### Advanced Example
+**4. With Inference Configuration**
-For more complex use cases, you can create a **Anthropic Agent** with all available options. All parameters except `name`, `description` and `apiKey` or `client` are optional:
+
+
+```typescript
+const agent = new AnthropicAgent({
+ name: 'Anthropic Assistant',
+ description: 'An assistant with custom inference settings',
+ apiKey: 'your-anthropic-api-key',
+ inferenceConfig: {
+ maxTokens: 500,
+ temperature: 0.7,
+ topP: 0.9,
+ stopSequences: ['Human:', 'AI:']
+ }
+});
+```
+
+
+```python
+agent = AnthropicAgent(AnthropicAgentOptions(
+ name='Anthropic Assistant',
+ description='An assistant with custom inference settings',
+ api_key='your-anthropic-api-key',
+ inference_config={
+ 'maxTokens': 500,
+ 'temperature': 0.7,
+ 'topP': 0.9,
+ 'stopSequences': ['Human:', 'AI:']
+ }
+))
+```
+
+
+
+
+
+**5. With Simple System Prompt**
- ```typescript
- import { AnthropicAgent, AnthropicAgentOptions, ParticipantRole } from 'multi-agent-orchestrator';
- import { Retriever } from '../retrievers/retriever';
-
- const options: AnthropicAgentOptions = {
- name: 'My Advanced Anthropic Agent',
- description: 'A versatile agent for complex NLP tasks',
- apiKey: 'your-anthropic-api-key-here',
- modelId: 'claude-3-opus-20240229',
- streaming: true,
- inferenceConfig: {
- maxTokens: 1000,
- temperature: 0.7,
- topP: 0.9,
- stopSequences: ['Human:', 'AI:']
- },
- retriever: new Retriever(), // Assuming you have a Retriever class implemented
- toolConfig: {
- tool: [
- {
- name: "Weather_Tool",
- description: "Get the current weather for a given location, based on its WGS84 coordinates.",
- input_schema: {
- type: "object",
- properties: {
- latitude: {
- type: "string",
- description: "Geographical WGS84 latitude of the location.",
- },
- longitude: {
- type: "string",
- description: "Geographical WGS84 longitude of the location.",
- },
- },
- required: ["latitude", "longitude"],
+```typescript
+const agent = new AnthropicAgent({
+ name: 'Anthropic Assistant',
+ description: 'An assistant with custom prompt',
+ apiKey: 'your-anthropic-api-key',
+ customSystemPrompt: {
+ template: 'You are a helpful AI assistant focused on technical support.'
+ }
+});
+```
+
+
+```python
+agent = AnthropicAgent(AnthropicAgentOptions(
+ name='Anthropic Assistant',
+ description='An assistant with custom prompt',
+ api_key='your-anthropic-api-key',
+ custom_system_prompt={
+ 'template': 'You are a helpful AI assistant focused on technical support.'
+ }
+))
+```
+
+
+
+
+**6. With System Prompt Variables**
+
+
+
+```typescript
+const agent = new AnthropicAgent({
+ name: 'Anthropic Assistant',
+ description: 'An assistant with variable prompt',
+ apiKey: 'your-anthropic-api-key',
+ customSystemPrompt: {
+ template: 'You are an AI assistant specialized in {{DOMAIN}}. Always use a {{TONE}} tone.',
+ variables: {
+ DOMAIN: 'customer support',
+ TONE: 'friendly and helpful'
+ }
+ }
+});
+```
+
+
+```python
+agent = AnthropicAgent(AnthropicAgentOptions(
+ name='Anthropic Assistant',
+ description='An assistant with variable prompt',
+ api_key='your-anthropic-api-key',
+ custom_system_prompt={
+ 'template': 'You are an AI assistant specialized in {{DOMAIN}}. Always use a {{TONE}} tone.',
+ 'variables': {
+ 'DOMAIN': 'customer support',
+ 'TONE': 'friendly and helpful'
+ }
+ }
+))
+```
+
+
+
+
+
+**7. With Custom Retriever**
+
+
+
+```typescript
+const retriever = new CustomRetriever({
+ // Retriever configuration
+});
+const agent = new AnthropicAgent({
+name: 'Anthropic Assistant',
+description: 'An assistant with retriever',
+apiKey: 'your-anthropic-api-key',
+retriever: retriever
+});
+```
+
+
+
+```python
+retriever = CustomRetriever(
+ # Retriever configuration
+)
+
+agent = AnthropicAgent(AnthropicAgentOptions(
+ name='Anthropic Assistant',
+ description='An assistant with retriever',
+ api_key='your-anthropic-api-key',
+ retriever=retriever
+))
+```
+
+
+
+
+
+**8. With Tool Configuration**
+
+
+
+```typescript
+const agent = new AnthropicAgent({
+ name: 'Anthropic Assistant',
+ description: 'An assistant with tool support',
+ apiKey: 'your-anthropic-api-key',
+ toolConfig: {
+ tool: [
+ {
+ name: "Weather_Tool",
+ description: "Get current weather data",
+ input_schema: {
+ type: "object",
+ properties: {
+ location: {
+ type: "string",
+ description: "City name",
}
- }
- ],
- useToolHandler: (response, conversation) => {
- // Process tool response
- // Return processed response
- return {role: ParticipantRole.USER, content: {
- "type": "tool_result",
- "tool_use_id": "tool_user_id",
- "content": "Response from the tool"
- }}
+ },
+ required: ["location"]
}
}
- };
-
- const agent = new AnthropicAgent(options);
- ```
+ ],
+ useToolHandler: (response, conversation) => {
+ return {
+ role: ParticipantRole.USER,
+ content: {
+ "type": "tool_result",
+ "tool_use_id": "weather_tool",
+ "content": "Current weather data for the location"
+ }
+ }
+ }
+ }
+});
+```
- ```python
-
- ```
+```python
+agent = AnthropicAgent(AnthropicAgentOptions(
+ name='Anthropic Assistant',
+ description='An assistant with tool support',
+ api_key='your-anthropic-api-key',
+ tool_config={
+ 'tool': [{
+ 'name': 'Weather_Tool',
+ 'description': 'Get current weather data',
+ 'input_schema': {
+ 'type': 'object',
+ 'properties': {
+ 'location': {
+ 'type': 'string',
+ 'description': 'City name'
+ }
+ },
+ 'required': ['location']
+ }
+ }],
+ 'useToolHandler': lambda response, conversation: {
+ 'role': ParticipantRole.USER.value,
+ 'content': {
+ 'type': 'tool_result',
+ 'tool_use_id': 'weather_tool',
+ 'content': 'Current weather data for the location'
+ }
+ }
+ }
+))
+```
+
+
+**9. Complete Example with All Options**
+
+
+
+```typescript
+import { AnthropicAgent } from 'multi-agent-orchestrator';
+
+const agent = new AnthropicAgent({
+ // Required fields
+ name: 'Advanced Anthropic Assistant',
+ description: 'A fully configured AI assistant powered by Anthropic models',
+ apiKey: 'your-anthropic-api-key',
+
+ // Optional fields
+ modelId: 'claude-3-opus-20240229', // Choose Anthropic model
+ streaming: true, // Enable streaming responses
+ retriever: customRetriever, // Custom retriever for additional context
+
+ // Inference configuration
+ inferenceConfig: {
+ maxTokens: 500, // Maximum tokens to generate
+ temperature: 0.7, // Control randomness (0-1)
+ topP: 0.9, // Control diversity via nucleus sampling
+ stopSequences: ['Human:', 'AI:'] // Sequences that stop generation
+ },
+
+ // Tool configuration
+ toolConfig: {
+ tool: [{
+ name: "Weather_Tool",
+ description: "Get the current weather for a given location",
+ input_schema: {
+ type: "object",
+ properties: {
+ latitude: {
+ type: "string",
+ description: "Geographical WGS84 latitude"
+ },
+ longitude: {
+ type: "string",
+ description: "Geographical WGS84 longitude"
+ }
+ },
+ required: ["latitude", "longitude"]
+ }
+ }],
+ useToolHandler: (response, conversation) => ({
+ role: ParticipantRole.USER,
+ content: {
+ type: "tool_result",
+ tool_use_id: "tool_user_id",
+ content: "Response from the tool"
+ }
+ })
+ },
+
+ // Custom system prompt with variables
+ customSystemPrompt: {
+ template: `You are an AI assistant specialized in {{DOMAIN}}.
+ Your core competencies:
+ {{SKILLS}}
+
+ Communication style:
+ - Maintain a {{TONE}} tone
+ - Focus on {{FOCUS}}
+ - Prioritize {{PRIORITY}}`,
+ variables: {
+ DOMAIN: 'scientific research',
+ SKILLS: [
+ '- Advanced data analysis',
+ '- Statistical methodology',
+ '- Research design',
+ '- Technical writing'
+ ],
+ TONE: 'professional and academic',
+ FOCUS: 'accuracy and clarity',
+ PRIORITY: 'evidence-based insights'
+ }
+ }
+});
+```
+
+
+
+
+```python
+
+from multi_agent_orchestrator import AnthropicAgent, AnthropicAgentOptions
+from multi_agent_orchestrator.types import ParticipantRole
+agent = AnthropicAgent(AnthropicAgentOptions(
+# Required fields
+name='Advanced Anthropic Assistant',
+description='A fully configured AI assistant powered by Anthropic models',
+api_key='your-anthropic-api-key',
+# Optional fields
+model_id='claude-3-opus-20240229', # Choose Anthropic model
+streaming=True, # Enable streaming responses
+retriever=custom_retriever, # Custom retriever for additional context
+
+# Inference configuration
+inference_config={
+ 'maxTokens': 500, # Maximum tokens to generate
+ 'temperature': 0.7, # Control randomness (0-1)
+ 'topP': 0.9, # Control diversity via nucleus sampling
+ 'stopSequences': ['Human:', 'AI:'] # Sequences that stop generation
+},
+
+# Tool configuration
+tool_config={
+ 'tool': [{
+ 'name': 'Weather_Tool',
+ 'description': 'Get the current weather for a given location',
+ 'input_schema': {
+ 'type': 'object',
+ 'properties': {
+ 'latitude': {
+ 'type': 'string',
+ 'description': 'Geographical WGS84 latitude'
+ },
+ 'longitude': {
+ 'type': 'string',
+ 'description': 'Geographical WGS84 longitude'
+ }
+ },
+ 'required': ['latitude', 'longitude']
+ }
+ }],
+ 'useToolHandler': lambda response, conversation: {
+ 'role': ParticipantRole.USER.value,
+ 'content': {
+ 'type': 'tool_result',
+ 'tool_use_id': 'tool_user_id',
+ 'content': 'Response from the tool'
+ }
+ }
+},
+
+# Custom system prompt with variables
+custom_system_prompt={
+ 'template': """You are an AI assistant specialized in {{DOMAIN}}.
+ Your core competencies:
+ {{SKILLS}}
+
+ Communication style:
+ - Maintain a {{TONE}} tone
+ - Focus on {{FOCUS}}
+ - Prioritize {{PRIORITY}}""",
+ 'variables': {
+ 'DOMAIN': 'scientific research',
+ 'SKILLS': [
+ '- Advanced data analysis',
+ '- Statistical methodology',
+ '- Research design',
+ '- Technical writing'
+ ],
+ 'TONE': 'professional and academic',
+ 'FOCUS': 'accuracy and clarity',
+ 'PRIORITY': 'evidence-based insights'
+ }
+}
+))
+```
+
+
### Option Explanations
diff --git a/docs/src/content/docs/agents/built-in/bedrock-llm-agent.mdx b/docs/src/content/docs/agents/built-in/bedrock-llm-agent.mdx
index 622226cb..e2bd6da3 100644
--- a/docs/src/content/docs/agents/built-in/bedrock-llm-agent.mdx
+++ b/docs/src/content/docs/agents/built-in/bedrock-llm-agent.mdx
@@ -23,340 +23,421 @@ This agent can handle a wide range of processing tasks, making it suitable for d
By default, the **Bedrock LLM Agent** uses the `anthropic.claude-3-haiku-20240307-v1:0` model.
-### Basic Example
-To create a new **Bedrock LLM Agent** with only the required parameters, use the following code:
+**1. Minimal Configuration**
import { Tabs, TabItem } from '@astrojs/starlight/components';
- ```typescript
- import { BedrockLLMAgent } from 'multi-agent-orchestrator';
-
- const agent = new BedrockLLMAgent({
- name: 'Tech Agent',
- description: 'Specializes in technology areas including software development, hardware, AI, cybersecurity, blockchain, cloud computing, emerging tech innovations, and pricing/costs related to technology products and services.'
- });
- ```
-
+```typescript
+const agent = new BedrockLLMAgent({
+ name: 'Bedrock Assistant',
+ description: 'A versatile AI assistant'
+});
+```
+
- ```python
- from multi_agent_orchestrator.agents import BedrockLLMAgent, BedrockLLMAgentOptions
-
- agent = BedrockLLMAgent(BedrockLLMAgentOptions(
- name='Tech Agent',
- description='Specializes in technology areas including software development, hardware, AI, cybersecurity, blockchain, cloud computing, emerging tech innovations, and pricing/costs related to technology products and services.'
- ))
- ```
+```python
+agent = BedrockLLMAgent(BedrockLLMAgentOptions(
+ name='Bedrock Assistant',
+ description='A versatile AI assistant'
+))
+```
-### Advanced Example
+
-For more complex use cases, you can create a **Bedrock LLM Agent** with all available options. All parameters except `name` and `description` are optional:
+**2. Using Custom Client**
- ```typescript
- import { BedrockLLMAgent, BedrockLLMAgentOptions } from 'multi-agent-orchestrator';
- import { Retriever } from '../retrievers/retriever';
-
- const options: BedrockLLMAgentOptions = {
- name: 'My Advanced Bedrock Agent',
- description: 'A versatile agent for complex NLP tasks',
- modelId: 'anthropic.claude-3-sonnet-20240229-v1:0',
- region: 'us-west-2',
- streaming: true,
- inferenceConfig: {
- maxTokens: 1000,
- temperature: 0.7,
- topP: 0.9,
- stopSequences: ['Human:', 'AI:']
- },
- guardrailConfig: {
- guardrailIdentifier: 'my-guardrail',
- guardrailVersion: '1.0'
- },
- retriever: new Retriever(), // Assuming you have a Retriever class implemented
- toolConfig: {
- tool: [
- {
- type: 'function',
- function: {
- name: 'get_current_weather',
- description: 'Get the current weather in a given location',
- parameters: {
- type: 'object',
- properties: {
- location: {
- type: 'string',
- description: 'The city and state, e.g. San Francisco, CA'
- },
- unit: { type: 'string', enum: ['celsius', 'fahrenheit'] }
- },
- required: ['location']
- }
- }
- }
- ]
- },
- customSystemPrompt: {
- template: `You are a specialized AI assistant with multiple capabilities.
-
-Your expertise areas:
-{{AREAS}}
-
-When responding:
-{{GUIDELINES}}
-
-Always maintain: {{TONE}}`,
- variables: {
- AREAS: [
- '- Weather information analysis',
- '- Data interpretation',
- '- Natural language processing'
- ],
- GUIDELINES: [
- '- Provide detailed explanations',
- '- Use relevant examples',
- '- Consider user context'
- ],
- TONE: 'professional and helpful demeanor'
- }
- }
- };
-
- const agent = new BedrockLLMAgent(options);
- ```
-
+```typescript
+import { BedrockRuntimeClient } from "@aws-sdk/client-bedrock-runtime";
+const customClient = new BedrockRuntimeClient({ region: 'us-east-1' });
+const agent = new BedrockLLMAgent({
+ name: 'Bedrock Assistant',
+ description: 'A versatile AI assistant',
+ client: customClient
+});
+```
+
- ```python
- from multi_agent_orchestrator.agents import BedrockLLMAgent, BedrockLLMAgentOptions
- from multi_agent_orchestrator.retrievers import Retriever
-
- options = BedrockLLMAgentOptions(
- name='My Advanced Bedrock Agent',
- description='A versatile agent for complex NLP tasks',
- model_id='anthropic.claude-3-sonnet-20240229-v1:0',
- region='us-west-2',
- streaming=True,
- inference_config={
- 'maxTokens': 1000,
- 'temperature': 0.7,
- 'topP': 0.9,
- 'stopSequences': ['Human:', 'AI:']
- },
- guardrail_config={
- 'guardrailIdentifier': 'my-guardrail',
- 'guardrailVersion': '1.0'
- },
- retriever=Retriever(), # Assuming you have a Retriever class implemented
- tool_config={
- 'tool': [
- {
- 'toolSpec': {
- 'name': 'get_current_weather',
- 'description': 'Get the current weather in a given location',
- 'inputSchema': {
- 'json': {
- 'type': 'object',
- 'properties': {
- 'location': {
- 'type': 'string',
- 'description': 'The city and state, e.g. San Francisco, CA'
- },
- 'unit': {'type': 'string', 'enum': ['celsius', 'fahrenheit']}
- },
- 'required': ['location']
- }
- }
- }
- }
- ],
- 'useToolHandler': lambda response, conversation: (False, response) # Process tool response
- },
- custom_system_prompt={
- "template": """You are a specialized AI assistant with multiple capabilities.
-
-Your expertise areas:
-{{AREAS}}
-
-When responding:
-{{GUIDELINES}}
-
-Always maintain: {{TONE}}""",
- "variables": {
- "AREAS": [
- "- Weather information analysis",
- "- Data interpretation",
- "- Natural language processing"
- ],
- "GUIDELINES": [
- "- Provide detailed explanations",
- "- Use relevant examples",
- "- Consider user context"
- ],
- "TONE": "professional and helpful demeanor"
- }
- }
- )
-
- agent = BedrockLLMAgent(options)
- ```
-
+```python
+import boto3
+custom_client = boto3.client('bedrock-runtime', region_name='us-east-1')
+agent = BedrockLLMAgent(BedrockLLMAgentOptions(
+name='Bedrock Assistant',
+description='A versatile AI assistant',
+client=custom_client
+))
+```
+
-### Option Explanations
+
-- `name` and `description`: Required fields to identify and describe the agent's purpose
-- `modelId`/`model_id`: Specifies the LLM model to use (e.g., Claude 3 Sonnet)
-- `region`: AWS region for the Bedrock service
-- `streaming`: Enables streaming responses for real-time output
-- `inferenceConfig`/`inference_config`: Fine-tunes the model's output characteristics
-- `guardrailConfig`/`guardrail_config`: Applies predefined guardrails to the model's responses
-- `retriever`: Integrates a retrieval system for enhanced context
-- `toolConfig`/`tool_config`: Defines tools the agent can use and how to handle their responses
-- `customSystemPrompt`/`custom_system_prompt`: Defines the agent's system prompt and behavior, with optional variables for dynamic content
-- `client`: Optional custom Bedrock client for specialized configurations
+**3. Custom Model and Streaming**
-## Setting Custom Prompts
+
+
+```typescript
+const agent = new BedrockLLMAgent({
+ name: 'Bedrock Assistant',
+ description: 'A streaming-enabled assistant',
+ modelId: 'anthropic.claude-3-sonnet-20240229-v1:0',
+ streaming: true
+});
+```
+
+
+```python
+agent = BedrockLLMAgent(BedrockLLMAgentOptions(
+ name='Bedrock Assistant',
+ description='A streaming-enabled assistant',
+ model_id='anthropic.claude-3-sonnet-20240229-v1:0',
+ streaming=True
+))
+```
+
+
-The BedrockLLMAgent provides multiple ways to set custom prompts. You can set them either during initialization or after the agent is created, and you can use prompts with or without variables.
+
-### 1. Setting Custom Prompt During Initialization (Without Variables)
+**4. With Inference Configuration**
- ```typescript
- import { BedrockLLMAgent, BedrockLLMAgentOptions } from 'multi-agent-orchestrator';
+```typescript
+const agent = new BedrockLLMAgent({
+ name: 'Bedrock Assistant',
+ description: 'An assistant with custom inference settings',
+ inferenceConfig: {
+ maxTokens: 500,
+ temperature: 0.7,
+ topP: 0.9,
+ stopSequences: ['Human:', 'AI:']
+ }
+});
+```
+
+
+```python
+agent = BedrockLLMAgent(BedrockLLMAgentOptions(
+ name='Bedrock Assistant',
+ description='An assistant with custom inference settings',
+ inference_config={
+ 'maxTokens': 500,
+ 'temperature': 0.7,
+ 'topP': 0.9,
+ 'stopSequences': ['Human:', 'AI:']
+ }
+))
+```
+
+
- const agent = new BedrockLLMAgent({
- name: 'Tech Expert',
- description: 'Technology and software development expert',
- modelId: 'anthropic.claude-3-sonnet-20240229-v1:0',
- customSystemPrompt: {
- template: `You are a technology expert specializing in software development.
-
-Core Competencies:
-1. Programming Languages
-2. Software Architecture
-3. Best Practices
-4. Performance Optimization
+
-When providing technical advice:
-- Start with fundamentals
-- Include code examples when relevant
-- Explain complex concepts simply
-- Consider security implications`
- }
- });
- ```
+**5. With Simple System Prompt**
+
+
+
+```typescript
+const agent = new BedrockLLMAgent({
+ name: 'Bedrock Assistant',
+ description: 'An assistant with custom prompt',
+ customSystemPrompt: {
+ template: 'You are a helpful AI assistant focused on technical support.'
+ }
+});
+```
- ```python
- from multi_agent_orchestrator.agents import BedrockLLMAgent, BedrockLLMAgentOptions
+```python
+agent = BedrockLLMAgent(BedrockLLMAgentOptions(
+ name='Bedrock Assistant',
+ description='An assistant with custom prompt',
+ custom_system_prompt={
+ 'template': 'You are a helpful AI assistant focused on technical support.'
+ }
+))
+```
+
+
- agent = BedrockLLMAgent(BedrockLLMAgentOptions(
- name='Tech Expert',
- description='Technology and software development expert',
- model_id='anthropic.claude-3-sonnet-20240229-v1:0',
- custom_system_prompt={
- "template": """You are a technology expert specializing in software development.
-
-Core Competencies:
-1. Programming Languages
-2. Software Architecture
-3. Best Practices
-4. Performance Optimization
+
-When providing technical advice:
-- Start with fundamentals
-- Include code examples when relevant
-- Explain complex concepts simply
-- Consider security implications"""
+**6. With System Prompt Variables**
+
+
+
+```typescript
+const agent = new BedrockLLMAgent({
+ name: 'Bedrock Assistant',
+ description: 'An assistant with variable prompt',
+ customSystemPrompt: {
+ template: 'You are an AI assistant specialized in {{DOMAIN}}. Always use a {{TONE}} tone.',
+ variables: {
+ DOMAIN: 'technical support',
+ TONE: 'friendly and helpful'
+ }
+ }
+});
+```
+
+
+```python
+agent = BedrockLLMAgent(BedrockLLMAgentOptions(
+ name='Bedrock Assistant',
+ description='An assistant with variable prompt',
+ custom_system_prompt={
+ 'template': 'You are an AI assistant specialized in {{DOMAIN}}. Always use a {{TONE}} tone.',
+ 'variables': {
+ 'DOMAIN': 'technical support',
+ 'TONE': 'friendly and helpful'
}
- ))
- ```
+ }
+))
+```
-### 2. Setting Custom Prompt During Initialization (With Variables)
+
+
+**7. With Custom Retriever**
- ```typescript
- const agent = new BedrockLLMAgent({
- name: 'Creative Writer',
- description: 'Creative writing and storytelling expert',
- modelId: 'anthropic.claude-3-sonnet-20240229-v1:0',
- customSystemPrompt: {
- template: `You are a {{ROLE}} specializing in {{SPECIALTY}}.
-
-Key strengths:
-{{STRENGTHS}}
-
-When crafting content:
-{{GUIDELINES}}
-
-Style: {{STYLE}}`,
- variables: {
- ROLE: 'creative writing expert',
- SPECIALTY: 'narrative development',
- STRENGTHS: [
- '- Character development',
- '- Plot structuring',
- '- Dialogue creation'
- ],
- GUIDELINES: [
- '- Start with a hook',
- '- Show, don\'t tell',
- '- Create vivid scenes'
- ],
- STYLE: 'engaging and imaginative'
+```typescript
+const retriever = new CustomRetriever({
+ // Retriever configuration
+});
+const agent = new BedrockLLMAgent({
+ name: 'Bedrock Assistant',
+ description: 'An assistant with retriever',
+ retriever: retriever
+});
+```
+
+
+```python
+retriever = CustomRetriever(
+ # Retriever configuration
+)
+agent = BedrockLLMAgent(BedrockLLMAgentOptions(
+name='Bedrock Assistant',
+description='An assistant with retriever',
+retriever=retriever
+))
+```
+
+
+
+
+
+**8. With Tool Configuration**
+
+
+
+```typescript
+const agent = new BedrockLLMAgent({
+ name: 'Bedrock Assistant',
+ description: 'An assistant with tool support',
+ toolConfig: {
+ tool: [
+ {
+ name: "Weather_Tool",
+ description: "Get current weather data",
+ input_schema: {
+ type: "object",
+ properties: {
+ location: {
+ type: "string",
+ description: "City name",
+ }
+ },
+ required: ["location"]
}
}
- });
- ```
+ ]
+ }
+});
+```
+
+
+```python
+agent = BedrockLLMAgent(BedrockLLMAgentOptions(
+ name='Bedrock Assistant',
+ description='An assistant with tool support',
+ tool_config={
+ 'tool': [{
+ 'name': 'Weather_Tool',
+ 'description': 'Get current weather data',
+ 'input_schema': {
+ 'type': 'object',
+ 'properties': {
+ 'location': {
+ 'type': 'string',
+ 'description': 'City name'
+ }
+ },
+ 'required': ['location']
+ }
+ }]
+ }
+))
+```
+
+
+
+
+**9. Complete Example with All Options**
+
+
+
+```typescript
+import { BedrockLLMAgent } from "multi-agent-orchestrator";
+const agent = new BedrockLLMAgent({
+ // Required fields
+ name: "Advanced Bedrock Assistant",
+ description: "A fully configured AI assistant powered by Bedrock models",
+ // Optional fields
+ modelId: "anthropic.claude-3-sonnet-20240229-v1:0",
+ region: "us-west-2",
+ streaming: true,
+ retriever: customRetriever, // Custom retriever for additional context
+ inferenceConfig: {
+ maxTokens: 500,
+ temperature: 0.7,
+ topP: 0.9,
+ stopSequences: ["Human:", "AI:"],
+ },
+ guardrailConfig: {
+ guardrailIdentifier: "my-guardrail",
+ guardrailVersion: "1.0",
+ },
+ toolConfig: {
+ tool: [
+ {
+ name: "Weather_Tool",
+ description: "Get current weather data",
+ input_schema: {
+ type: "object",
+ properties: {
+ location: {
+ type: "string",
+ description: "City name",
+ },
+ },
+ required: ["location"],
+ },
+ },
+ ],
+ },
+ customSystemPrompt: {
+ template: `You are an AI assistant specialized in {{DOMAIN}}.
+Your core competencies:
+{{SKILLS}}
+Communication style:
+ - Maintain a {{TONE}} tone
+ - Focus on {{FOCUS}}
+ - Prioritize {{PRIORITY}}`,
+ variables: {
+ DOMAIN: "scientific research",
+ SKILLS: [
+ "- Advanced data analysis",
+ "- Statistical methodology",
+ "- Research design",
+ "- Technical writing",
+ ],
+ TONE: "professional and academic",
+ FOCUS: "accuracy and clarity",
+ PRIORITY: "evidence-based insights",
+ },
+ },
+});
+
+```
+
- ```python
- agent = BedrockLLMAgent(BedrockLLMAgentOptions(
- name='Creative Writer',
- description='Creative writing and storytelling expert',
- model_id='anthropic.claude-3-sonnet-20240229-v1:0',
- custom_system_prompt={
- "template": """You are a {{ROLE}} specializing in {{SPECIALTY}}.
-
-Key strengths:
-{{STRENGTHS}}
-
-When crafting content:
-{{GUIDELINES}}
-
-Style: {{STYLE}}""",
- "variables": {
- "ROLE": "creative writing expert",
- "SPECIALTY": "narrative development",
- "STRENGTHS": [
- "- Character development",
- "- Plot structuring",
- "- Dialogue creation"
- ],
- "GUIDELINES": [
- "- Start with a hook",
- "- Show, don't tell",
- "- Create vivid scenes"
- ],
- "STYLE": "engaging and imaginative"
+```python
+from multi_agent_orchestrator.agents import BedrockLLMAgent, BedrockLLMAgentOptions
+
+agent = BedrockLLMAgent(BedrockLLMAgentOptions(
+ # Required fields
+ name='Advanced Bedrock Assistant',
+ description='A fully configured AI assistant powered by Bedrock models',
+
+ # Optional fields
+ model_id='anthropic.claude-3-sonnet-20240229-v1:0',
+ region='us-west-2',
+ streaming=True,
+ retriever=custom_retriever, # Custom retriever for additional context
+
+ inference_config={
+ 'maxTokens': 500,
+ 'temperature': 0.7,
+ 'topP': 0.9,
+ 'stopSequences': ['Human:', 'AI:']
+ },
+
+ guardrail_config={
+ 'guardrailIdentifier': 'my-guardrail',
+ 'guardrailVersion': '1.0'
+ },
+
+ tool_config={
+ 'tool': [{
+ 'name': 'Weather_Tool',
+ 'description': 'Get current weather data',
+ 'input_schema': {
+ 'type': 'object',
+ 'properties': {
+ 'location': {
+ 'type': 'string',
+ 'description': 'City name'
+ }
+ },
+ 'required': ['location']
}
+ }]
+ },
+
+ custom_system_prompt={
+ 'template': """You are an AI assistant specialized in {{DOMAIN}}.
+ Your core competencies:
+ {{SKILLS}}
+
+ Communication style:
+ - Maintain a {{TONE}} tone
+ - Focus on {{FOCUS}}
+ - Prioritize {{PRIORITY}}""",
+ 'variables': {
+ 'DOMAIN': 'scientific research',
+ 'SKILLS': [
+ '- Advanced data analysis',
+ '- Statistical methodology',
+ '- Research design',
+ '- Technical writing'
+ ],
+ 'TONE': 'professional and academic',
+ 'FOCUS': 'accuracy and clarity',
+ 'PRIORITY': 'evidence-based insights'
}
- ))
- ```
-
+ }
+))
+```
+
-### 3. Setting Custom Prompt After Initialization (Without Variables)
+
+
+The `BedrockLLMAgent` provides multiple ways to set custom prompts. You can set them either during initialization or after the agent is created, and you can use prompts with or without variables.
+
+**10. Setting Custom Prompt After Initialization (Without Variables)**
@@ -405,7 +486,9 @@ When providing business advice:
-### 4. Setting Custom Prompt After Initialization (With Variables)
+
+
+**11. Setting Custom Prompt After Initialization (With Variables)**
@@ -509,4 +592,18 @@ Choose the approach that best fits your needs:
- Use initialization when the prompt is part of the agent's core configuration
- Use post-initialization when prompts need to be changed dynamically
- Use variables when parts of the prompt need to be modified frequently
-- Use direct templates when the prompt is static
\ No newline at end of file
+- Use direct templates when the prompt is static
+
+### Option Explanations
+
+- `name` and `description`: Required fields to identify and describe the agent's purpose
+- `modelId`/`model_id`: Specifies the LLM model to use (e.g., Claude 3 Sonnet)
+- `region`: AWS region for the Bedrock service
+- `streaming`: Enables streaming responses for real-time output
+- `inferenceConfig`/`inference_config`: Fine-tunes the model's output characteristics
+- `guardrailConfig`/`guardrail_config`: Applies predefined guardrails to the model's responses
+- `retriever`: Integrates a retrieval system for enhanced context
+- `toolConfig`/`tool_config`: Defines tools the agent can use and how to handle their responses
+- `customSystemPrompt`/`custom_system_prompt`: Defines the agent's system prompt and behavior, with optional variables for dynamic content
+- `client`: Optional custom Bedrock client for specialized configurations
+
diff --git a/docs/src/content/docs/agents/built-in/openai-agent.mdx b/docs/src/content/docs/agents/built-in/openai-agent.mdx
index 0d1ddbb2..b0034219 100644
--- a/docs/src/content/docs/agents/built-in/openai-agent.mdx
+++ b/docs/src/content/docs/agents/built-in/openai-agent.mdx
@@ -1,167 +1,503 @@
---
title: Open AI Agent
-description: Documentation Open AI Agent
+description: Documentation for the OpenAI Agent
---
-The `OpenAIAgent` is a powerful agent class in the Multi-Agent Orchestrator framework that integrates with OpenAI's Chat Completion API. This agent allows you to leverage OpenAI's language models, such as GPT-3.5 and GPT-4, for various natural language processing tasks.
+The `OpenAIAgent` is a powerful agent class in the Multi-Agent Orchestrator framework that integrates with OpenAI's Chat Completion API. This agent allows you to leverage OpenAI's language models for various natural language processing tasks.
## Key Features
- Integration with OpenAI's Chat Completion API
-- Support for multiple OpenAI models
+- Support for multiple OpenAI models (e.g., GPT-4, GPT-3.5)
- Streaming and non-streaming response options
- Customizable inference configuration
-- Handles conversation history for context-aware responses
-- Customizable system prompts
+- Conversation history handling for context-aware responses
+- Customizable system prompts with variable support
+- Support for retrievers to enhance responses with additional context
+- Flexible initialization with API key or custom client
+
+## Configuration Options
+
+The `OpenAIAgentOptions` extends the base `AgentOptions` with the following fields:
+
+### Required Fields
+- `name`: Name of the agent
+- `description`: Description of the agent's capabilities
+- Authentication (one of the following is required):
+ - `apiKey`: Your OpenAI API key
+ - `client`: Custom OpenAI client instance
+
+### Optional Fields
+- `model`: OpenAI model identifier (e.g., 'gpt-4', 'gpt-3.5-turbo'). Defaults to `OPENAI_MODEL_ID_GPT_O_MINI`
+- `streaming`: Enable streaming responses. Defaults to `false`
+- `retriever`: Custom retriever instance for enhancing responses with additional context
+- `inferenceConfig`: Configuration for model inference:
+ - `maxTokens`: Maximum tokens to generate (default: 1000)
+ - `temperature`: Controls randomness (0-1)
+ - `topP`: Controls diversity via nucleus sampling
+ - `stopSequences`: Sequences that stop generation
+- `customSystemPrompt`: System prompt configuration:
+ - `template`: Template string with optional variable placeholders
+ - `variables`: Key-value pairs for template variables
## Creating an OpenAIAgent
-To create a new `OpenAIAgent`, you need to provide an `OpenAIAgentOptions` object. Here's an example of how to create an OpenAIAgent:
+Here are various examples showing different ways to create and configure an OpenAIAgent:
+
+### Basic Examples
+
+**1. Minimal Configuration**
import { Tabs, TabItem } from '@astrojs/starlight/components';
-
+
+```typescript
+const agent = new OpenAIAgent({
+ name: 'OpenAI Assistant',
+ description: 'A versatile AI assistant',
+ apiKey: 'your-openai-api-key'
+});
+
+
+
+
+```python
+agent = OpenAIAgent(OpenAIAgentOptions(
+ name='OpenAI Assistant',
+ description='A versatile AI assistant',
+ api_key='your-openai-api-key'
+))
+```
+
+
+
+
+**2. Using Custom Client**
+
+
+
```typescript
-import { OpenAIAgent } from './path-to-openai-agent';
+import OpenAI from 'openai';
+const customClient = new OpenAI({ apiKey: 'your-openai-api-key' });
+const agent = new OpenAIAgent({
+name: 'OpenAI Assistant',
+description: 'A versatile AI assistant',
+client: customClient
+});
+
+
+```python
+from openai import OpenAI
+
+custom_client = OpenAI(api_key='your-openai-api-key')
+
+agent = OpenAIAgent(OpenAIAgentOptions(
+ name='OpenAI Assistant',
+ description='A versatile AI assistant',
+ client=custom_client
+))
+```
+
+
+
+
+
+
+**3. Custom Model and Streaming**
+
+
+
+```typescript
const agent = new OpenAIAgent({
name: 'OpenAI Assistant',
- description: 'A versatile AI assistant powered by OpenAI models',
+ description: 'A streaming-enabled assistant',
+ apiKey: 'your-openai-api-key',
+ model: 'gpt-4',
+ streaming: true
+});
+```
+
+
+```python
+agent = OpenAIAgent(OpenAIAgentOptions(
+ name='OpenAI Assistant',
+ description='A streaming-enabled assistant',
+ api_key='your-openai-api-key',
+ model='gpt-4',
+ streaming=True
+))
+```
+
+
+
+
+
+
+**4. With Inference Configuration**
+
+
+
+```typescript
+const agent = new OpenAIAgent({
+ name: 'OpenAI Assistant',
+ description: 'An assistant with custom inference settings',
apiKey: 'your-openai-api-key',
- model: 'gpt-3.5-turbo',
- streaming: true,
inferenceConfig: {
maxTokens: 500,
temperature: 0.7,
topP: 0.9,
stopSequences: ['Human:', 'AI:']
- },
- systemPrompt: 'You are a helpful AI assistant specialized in answering questions about technology.'
+ }
});
```
-
+
+```python
+agent = OpenAIAgent(OpenAIAgentOptions(
+ name='OpenAI Assistant',
+ description='An assistant with custom inference settings',
+ api_key='your-openai-api-key',
+ inference_config={
+ 'maxTokens': 500,
+ 'temperature': 0.7,
+ 'topP': 0.9,
+ 'stopSequences': ['Human:', 'AI:']
+ }
+))
+```
+
+
-### OpenAIAgentOptions
+**5. With Simple System Prompt**
-The `OpenAIAgentOptions` extends the base `AgentOptions` and includes the following properties:
+
+
+```typescript
+const agent = new OpenAIAgent({
+ name: 'OpenAI Assistant',
+ description: 'An assistant with custom prompt',
+ apiKey: 'your-openai-api-key',
+ customSystemPrompt: {
+ template: 'You are a helpful AI assistant focused on technical support.'
+ }
+});
+```
+
+
+```python
+agent = OpenAIAgent(OpenAIAgentOptions(
+ name='OpenAI Assistant',
+ description='An assistant with custom prompt',
+ api_key='your-openai-api-key',
+ custom_system_prompt={
+ 'template': 'You are a helpful AI assistant focused on technical support.'
+ }
+))
+```
+
+
-- `name` (required): A string representing the name of the agent.
-- `description` (required): A string describing the agent's capabilities and expertise.
-- `apiKey` (required): Your OpenAI API key.
-- `model` (optional): The OpenAI model to use. Defaults to `OPENAI_MODEL_ID_GPT_O_MINI`.
-- `streaming` (optional): Whether to use streaming responses. Defaults to `false`.
-- `inferenceConfig` (optional): An object to customize the inference behavior:
- - `maxTokens` (optional): The maximum number of tokens to generate. Defaults to 1000.
- - `temperature` (optional): Controls randomness in output generation.
- - `topP` (optional): Controls diversity of output generation.
- - `stopSequences` (optional): An array of sequences that, when generated, will stop the generation process.
-- `systemPrompt` (optional): A string representing the initial system prompt for the agent.
+
-## Setting the System Prompt
+**6. With System Prompt Variables**
-You can set or update the system prompt for the OpenAIAgent in two ways:
+
+
+```typescript
+const agent = new OpenAIAgent({
+ name: 'OpenAI Assistant',
+ description: 'An assistant with variable prompt',
+ apiKey: 'your-openai-api-key',
+ customSystemPrompt: {
+ template: 'You are an AI assistant specialized in {{DOMAIN}}. Always use a {{TONE}} tone.',
+ variables: {
+ DOMAIN: 'customer support',
+ TONE: 'friendly and helpful'
+ }
+ }
+});
+```
+
+
+```python
+agent = OpenAIAgent(OpenAIAgentOptions(
+ name='OpenAI Assistant',
+ description='An assistant with variable prompt',
+ api_key='your-openai-api-key',
+ custom_system_prompt={
+ 'template': 'You are an AI assistant specialized in {{DOMAIN}}. Always use a {{TONE}} tone.',
+ 'variables': {
+ 'DOMAIN': 'customer support',
+ 'TONE': 'friendly and helpful'
+ }
+ }
+))
+```
+
+
-1. During initialization:
+
+
+**7. With Custom Retriever**
-
+
+```typescript
+const retriever = new CustomRetriever({
+ // Retriever configuration
+});
+const agent = new OpenAIAgent({
+name: 'OpenAI Assistant',
+description: 'An assistant with retriever',
+apiKey: 'your-openai-api-key',
+retriever: retriever
+});
+
+
+
+```python
+retriever = CustomRetriever(
+ # Retriever configuration
+)
+
+agent = OpenAIAgent(OpenAIAgentOptions(
+ name='OpenAI Assistant',
+ description='An assistant with retriever',
+ api_key='your-openai-api-key',
+ retriever=retriever
+))
+```
+
+
+
+
+**8. Combining Multiple Options**
+
+
+
```typescript
const agent = new OpenAIAgent({
- // ... other options ...
- systemPrompt: 'You are a helpful AI assistant specialized in answering questions about technology.'
+ name: 'OpenAI Assistant',
+ description: 'An assistant with multiple options',
+ apiKey: 'your-openai-api-key',
+ model: 'gpt-4',
+ streaming: true,
+ inferenceConfig: {
+ maxTokens: 500,
+ temperature: 0.7
+ },
+ customSystemPrompt: {
+ template: 'You are an AI assistant specialized in {{DOMAIN}}.',
+ variables: {
+ DOMAIN: 'technical support'
+ }
+ }
});
+```
+
+
+```python
+agent = OpenAIAgent(OpenAIAgentOptions(
+ name='OpenAI Assistant',
+ description='An assistant with multiple options',
+ api_key='your-openai-api-key',
+ model='gpt-4',
+ streaming=True,
+ inference_config={
+ 'maxTokens': 500,
+ 'temperature': 0.7
+ },
+ custom_system_prompt={
+ 'template': 'You are an AI assistant specialized in {{DOMAIN}}.',
+ 'variables': {
+ 'DOMAIN': 'technical support'
+ }
+ }
+))
```
+
+**9. Complete Example with All Options**
-2. Using the `setSystemPrompt` method after initialization:
+Here's a comprehensive example showing all available configuration options:
-
-
+
```typescript
-agent.setSystemPrompt(
- `You are an AI assistant specialized in {{DOMAIN}}.
- Your main goal is to {{GOAL}}.
- Always maintain a {{TONE}} tone in your responses.`,
- {
- DOMAIN: "artificial intelligence",
- GOAL: "explain complex AI concepts in simple terms",
- TONE: "friendly and educational"
+import { OpenAIAgent } from 'multi-agent-orchestrator';
+
+const agent = new OpenAIAgent({
+ // Required fields
+ name: 'Advanced OpenAI Assistant',
+ description: 'A fully configured AI assistant powered by OpenAI models',
+ apiKey: 'your-openai-api-key',
+
+ // Optional fields
+ model: 'gpt-4', // Choose OpenAI model
+ streaming: true, // Enable streaming responses
+ retriever: customRetriever, // Custom retriever for additional context
+
+ // Inference configuration
+ inferenceConfig: {
+ maxTokens: 500, // Maximum tokens to generate
+ temperature: 0.7, // Control randomness (0-1)
+ topP: 0.9, // Control diversity via nucleus sampling
+ stopSequences: ['Human:', 'AI:'] // Sequences that stop generation
+ },
+
+ // Custom system prompt with variables
+ customSystemPrompt: {
+ template: `You are an AI assistant specialized in {{DOMAIN}}.
+ Your core competencies:
+ {{SKILLS}}
+
+ Communication style:
+ - Maintain a {{TONE}} tone
+ - Focus on {{FOCUS}}
+ - Prioritize {{PRIORITY}}`,
+ variables: {
+ DOMAIN: 'scientific research',
+ SKILLS: [
+ '- Advanced data analysis',
+ '- Statistical methodology',
+ '- Research design',
+ '- Technical writing'
+ ],
+ TONE: 'professional and academic',
+ FOCUS: 'accuracy and clarity',
+ PRIORITY: 'evidence-based insights'
+ }
}
-);
+});
+```
+
+
+```python
+from multi_agent_orchestrator import OpenAIAgent, OpenAIAgentOptions
+
+agent = OpenAIAgent(OpenAIAgentOptions(
+ # Required fields
+ name='Advanced OpenAI Assistant',
+ description='A fully configured AI assistant powered by OpenAI models',
+ api_key='your-openai-api-key',
+
+ # Optional fields
+ model='gpt-4', # Choose OpenAI model
+ streaming=True, # Enable streaming responses
+ retriever=custom_retriever, # Custom retriever for additional context
+
+ # Inference configuration
+ inference_config={
+ 'maxTokens': 500, # Maximum tokens to generate
+ 'temperature': 0.7, # Control randomness (0-1)
+ 'topP': 0.9, # Control diversity via nucleus sampling
+ 'stopSequences': ['Human:', 'AI:'] # Sequences that stop generation
+ },
+
+ # Custom system prompt with variables
+ custom_system_prompt={
+ 'template': """You are an AI assistant specialized in {{DOMAIN}}.
+ Your core competencies:
+ {{SKILLS}}
+
+ Communication style:
+ - Maintain a {{TONE}} tone
+ - Focus on {{FOCUS}}
+ - Prioritize {{PRIORITY}}""",
+ 'variables': {
+ 'DOMAIN': 'scientific research',
+ 'SKILLS': [
+ '- Advanced data analysis',
+ '- Statistical methodology',
+ '- Research design',
+ '- Technical writing'
+ ],
+ 'TONE': 'professional and academic',
+ 'FOCUS': 'accuracy and clarity',
+ 'PRIORITY': 'evidence-based insights'
+ }
+ }
+))
```
+## Using the OpenAIAgent
-The `setSystemPrompt` method allows you to dynamically change the agent's behavior and focus without creating a new instance. You can use placeholders in the prompt template and provide values for them in the second argument.
+There are two ways to use the OpenAIAgent: directly or through the Multi-Agent Orchestrator.
-## Usage
+### Direct Usage
-Once you've created an OpenAIAgent, you can add it to the Multi-Agent Orchestrator and use it to process requests:
+Call the agent directly when you want to use a single agent without orchestrator routing:
-
-
+
```typescript
-import { MultiAgentOrchestrator } from "multi-agent-orchestrator";
+const classifierResult = {
+ selectedAgent: agent,
+ confidence: 1.0
+};
-const orchestrator = new MultiAgentOrchestrator();
-orchestrator.addAgent(agent);
-
-const response = await orchestrator.routeRequest(
+const response = await orchestrator.agentProcessRequest(
"What is the capital of France?",
"user123",
- "session456"
+ "session456",
+ classifierResult
);
+```
+
+
+```python
+classifier_result = ClassifierResult(selected_agent=agent, confidence=1.0)
+
+response = await orchestrator.agent_process_request(
+ "What is the capital of France?",
+ "user123",
+ "session456",
+ classifier_result
+)
```
+### Using with the Orchestrator
-## Streaming Responses
-
-If you've enabled streaming (`streaming: true` in the options), the agent will return an AsyncIterable that you can use to process the response in chunks:
+Add the agent to Multi-Agent Orchestrator for use in a multi-agent system:
-
-
+
```typescript
-const streamingResponse = await orchestrator.routeRequest(
- "Tell me a long story about a brave knight",
+const orchestrator = new MultiAgentOrchestrator();
+orchestrator.addAgent(agent);
+
+const response = await orchestrator.routeRequest(
+ "What is the capital of France?",
"user123",
"session456"
);
-
-if (Symbol.asyncIterator in streamingResponse) {
- for await (const chunk of streamingResponse) {
- console.log(chunk); // Process each chunk of the response
- }
-}
```
-
+
+```python
+orchestrator = MultiAgentOrchestrator()
+orchestrator.add_agent(agent)
+
+response = await orchestrator.route_request(
+ "What is the capital of France?",
+ "user123",
+ "session456"
+)
+```
+
-
-
-## Best Practices
-
-1. **API Key Security**: Ensure your OpenAI API key is kept secure and not exposed in your codebase.
-2. **Model Selection**: Choose an appropriate model based on your use case and performance requirements.
-3. **Inference Configuration**: Experiment with different inference parameters to find the best balance between response quality and speed.
-4. **Error Handling**: Implement additional error handling in your application to manage potential API failures gracefully.
-5. **Rate Limiting**: Be aware of OpenAI's rate limits and implement appropriate throttling if necessary.
-6. **System Prompts**: Craft clear and specific system prompts to guide the model's behavior and improve response quality for your use case.
-
-By leveraging the OpenAIAgent, you can create sophisticated, context-aware AI agents capable of handling a wide range of tasks and interactions, all powered by OpenAI's state-of-the-art language models.
\ No newline at end of file
diff --git a/docs/src/content/docs/general/quickstart.mdx b/docs/src/content/docs/general/quickstart.mdx
index bfd0122f..2470d28d 100644
--- a/docs/src/content/docs/general/quickstart.mdx
+++ b/docs/src/content/docs/general/quickstart.mdx
@@ -91,7 +91,6 @@ Ensure you have [requested access](https://docs.aws.amazon.com/bedrock/latest/us
```bash
- pip install multi-agent-orchestrator # for core dependencies
pip install "multi-agent-orchestrator[anthropic]" # for Anthropic classifier and agent
pip install "multi-agent-orchestrator[openai]" # for OpenAI classifier and agent
pip install "multi-agent-orchestrator[all]" # for all packages including Anthropic and OpenAI
diff --git a/python/README.md b/python/README.md
index 937f9263..ce52471b 100644
--- a/python/README.md
+++ b/python/README.md
@@ -101,7 +101,7 @@ Check out our [documentation](https://awslabs.github.io/multi-agent-orchestrator
# Optional: Set up a virtual environment
python -m venv venv
source venv/bin/activate # On Windows use `venv\Scripts\activate`
-pip install multi-agent-orchestrator
+pip install multi-agent-orchestrator[aws]
```
#### Default Usage
diff --git a/python/setup.cfg b/python/setup.cfg
index e9e49add..96c0e839 100644
--- a/python/setup.cfg
+++ b/python/setup.cfg
@@ -19,17 +19,18 @@ package_dir =
= src
packages = find:
python_requires = >=3.11
-install_requires =
- boto3>=1.35.0
[options.extras_require]
+aws =
+ boto3==1.35.0
anthropic =
anthropic>=0.40.0
openai =
openai>=1.55.3
all =
- anthropic>=0.40.0
- openai>=1.55.3
+ anthropic==0.40.0
+ openai==1.55.3
+ boto3==1.35.0
[options.packages.find]
where = src
diff --git a/python/src/multi_agent_orchestrator/agents/__init__.py b/python/src/multi_agent_orchestrator/agents/__init__.py
index 0d198032..b9b70c04 100644
--- a/python/src/multi_agent_orchestrator/agents/__init__.py
+++ b/python/src/multi_agent_orchestrator/agents/__init__.py
@@ -2,16 +2,20 @@
Code for Agents.
"""
from .agent import Agent, AgentOptions, AgentCallbacks, AgentProcessingResult, AgentResponse
-from .lambda_agent import LambdaAgent, LambdaAgentOptions
-from .bedrock_llm_agent import BedrockLLMAgent, BedrockLLMAgentOptions
-from .lex_bot_agent import LexBotAgent, LexBotAgentOptions
-from .amazon_bedrock_agent import AmazonBedrockAgent, AmazonBedrockAgentOptions
-from .comprehend_filter_agent import ComprehendFilterAgent, ComprehendFilterAgentOptions
-from .chain_agent import ChainAgent, ChainAgentOptions
-from .bedrock_translator_agent import BedrockTranslatorAgent, BedrockTranslatorAgentOptions
-from .bedrock_inline_agent import BedrockInlineAgent, BedrockInlineAgentOptions
-from .bedrock_flows_agent import BedrockFlowsAgent, BedrockFlowsAgentOptions
+try:
+ from .lambda_agent import LambdaAgent, LambdaAgentOptions
+ from .bedrock_llm_agent import BedrockLLMAgent, BedrockLLMAgentOptions
+ from .lex_bot_agent import LexBotAgent, LexBotAgentOptions
+ from .amazon_bedrock_agent import AmazonBedrockAgent, AmazonBedrockAgentOptions
+ from .comprehend_filter_agent import ComprehendFilterAgent, ComprehendFilterAgentOptions
+ from .bedrock_translator_agent import BedrockTranslatorAgent, BedrockTranslatorAgentOptions
+ from .chain_agent import ChainAgent, ChainAgentOptions
+ from .bedrock_inline_agent import BedrockInlineAgent, BedrockInlineAgentOptions
+ from .bedrock_flows_agent import BedrockFlowsAgent, BedrockFlowsAgentOptions
+ _AWS_AVAILABLE = True
+except ImportError:
+ _AWS_AVAILABLE = False
try:
from .anthropic_agent import AnthropicAgent, AnthropicAgentOptions
_ANTHROPIC_AVAILABLE = True
@@ -19,34 +23,53 @@
_ANTHROPIC_AVAILABLE = False
+try:
+ from .openai_agent import OpenAIAgent, OpenAIAgentOptions
+ _OPENAI_AVAILABLE = True
+except ImportError:
+ _OPENAI_AVAILABLE = False
+
__all__ = [
'Agent',
'AgentOptions',
'AgentCallbacks',
'AgentProcessingResult',
- 'AgentResponse',
- 'LambdaAgent',
- 'LambdaAgentOptions',
- 'BedrockLLMAgent',
- 'BedrockLLMAgentOptions',
- 'LexBotAgent',
- 'LexBotAgentOptions',
- 'AmazonBedrockAgent',
- 'AmazonBedrockAgentOptions',
- 'ComprehendFilterAgent',
- 'ComprehendFilterAgentOptions',
- 'BedrockTranslatorAgent',
- 'BedrockTranslatorAgentOptions',
- 'ChainAgent',
- 'ChainAgentOptions',
- 'BedrockInlineAgent',
- 'BedrockInlineAgentOptions',
- 'BedrockFlowsAgent',
- 'BedrockFlowsAgentOptions'
-]
+ 'AgentResponse'
+ ]
+
+
+if _AWS_AVAILABLE :
+ __all__.extend([
+ 'LambdaAgent',
+ 'LambdaAgentOptions',
+ 'BedrockLLMAgent',
+ 'BedrockLLMAgentOptions',
+ 'LexBotAgent',
+ 'LexBotAgentOptions',
+ 'AmazonBedrockAgent',
+ 'AmazonBedrockAgentOptions',
+ 'ComprehendFilterAgent',
+ 'ComprehendFilterAgentOptions',
+ 'ChainAgent',
+ 'ChainAgentOptions',
+ 'BedrockTranslatorAgent',
+ 'BedrockTranslatorAgentOptions',
+ 'BedrockInlineAgent',
+ 'BedrockInlineAgentOptions',
+ 'BedrockFlowsAgent',
+ 'BedrockFlowsAgentOptions'
+ ])
+
if _ANTHROPIC_AVAILABLE:
__all__.extend([
'AnthropicAgent',
'AnthropicAgentOptions'
])
+
+
+if _OPENAI_AVAILABLE:
+ __all__.extend([
+ 'OpenAIAgent',
+ 'OpenAIAgentOptions'
+ ])
diff --git a/python/src/multi_agent_orchestrator/agents/openai_agent.py b/python/src/multi_agent_orchestrator/agents/openai_agent.py
new file mode 100644
index 00000000..8b205724
--- /dev/null
+++ b/python/src/multi_agent_orchestrator/agents/openai_agent.py
@@ -0,0 +1,208 @@
+from typing import Dict, List, Union, AsyncIterable, Optional, Any
+from dataclasses import dataclass
+from openai import OpenAI
+from multi_agent_orchestrator.agents import Agent, AgentOptions
+from multi_agent_orchestrator.types import (
+ ConversationMessage,
+ ParticipantRole,
+ OPENAI_MODEL_ID_GPT_O_MINI,
+ TemplateVariables
+)
+from multi_agent_orchestrator.utils import Logger
+from multi_agent_orchestrator.retrievers import Retriever
+
+
+
+@dataclass
+class OpenAIAgentOptions(AgentOptions):
+ api_key: str = None
+ model: Optional[str] = None
+ streaming: Optional[bool] = None
+ inference_config: Optional[Dict[str, Any]] = None
+ custom_system_prompt: Optional[Dict[str, Any]] = None
+ retriever: Optional[Retriever] = None
+ client: Optional[Any] = None
+
+
+
+class OpenAIAgent(Agent):
+ def __init__(self, options: OpenAIAgentOptions):
+ super().__init__(options)
+ if not options.api_key:
+ raise ValueError("OpenAI API key is required")
+
+ if options.client:
+ self.client = options.client
+ else:
+ self.client = OpenAI(api_key=options.api_key)
+
+
+ self.model = options.model or OPENAI_MODEL_ID_GPT_O_MINI
+ self.streaming = options.streaming or False
+ self.retriever: Optional[Retriever] = options.retriever
+
+
+ # Default inference configuration
+ default_inference_config = {
+ 'maxTokens': 1000,
+ 'temperature': None,
+ 'topP': None,
+ 'stopSequences': None
+ }
+
+ if options.inference_config:
+ self.inference_config = {**default_inference_config, **options.inference_config}
+ else:
+ self.inference_config = default_inference_config
+
+ # Initialize system prompt
+ self.prompt_template = f"""You are a {self.name}.
+ {self.description} Provide helpful and accurate information based on your expertise.
+ You will engage in an open-ended conversation, providing helpful and accurate information based on your expertise.
+ The conversation will proceed as follows:
+ - The human may ask an initial question or provide a prompt on any topic.
+ - You will provide a relevant and informative response.
+ - The human may then follow up with additional questions or prompts related to your previous response,
+ allowing for a multi-turn dialogue on that topic.
+ - Or, the human may switch to a completely new and unrelated topic at any point.
+ - You will seamlessly shift your focus to the new topic, providing thoughtful and coherent responses
+ based on your broad knowledge base.
+ Throughout the conversation, you should aim to:
+ - Understand the context and intent behind each new question or prompt.
+ - Provide substantive and well-reasoned responses that directly address the query.
+ - Draw insights and connections from your extensive knowledge when appropriate.
+ - Ask for clarification if any part of the question or prompt is ambiguous.
+ - Maintain a consistent, respectful, and engaging tone tailored to the human's communication style.
+ - Seamlessly transition between topics as the human introduces new subjects."""
+
+ self.system_prompt = ""
+ self.custom_variables: TemplateVariables = {}
+
+ if options.custom_system_prompt:
+ self.set_system_prompt(
+ options.custom_system_prompt.get('template'),
+ options.custom_system_prompt.get('variables')
+ )
+
+
+
+ def is_streaming_enabled(self) -> bool:
+ return self.streaming is True
+
+ async def process_request(
+ self,
+ input_text: str,
+ user_id: str,
+ session_id: str,
+ chat_history: List[ConversationMessage],
+ additional_params: Optional[Dict[str, str]] = None
+ ) -> Union[ConversationMessage, AsyncIterable[Any]]:
+ try:
+
+ self.update_system_prompt()
+
+ system_prompt = self.system_prompt
+
+ if self.retriever:
+ response = await self.retriever.retrieve_and_combine_results(input_text)
+ context_prompt = "\nHere is the context to use to answer the user's question:\n" + response
+ system_prompt += context_prompt
+
+
+ messages = [
+ {"role": "system", "content": system_prompt},
+ *[{
+ "role": msg.role.lower(),
+ "content": msg.content[0].get('text', '') if msg.content else ''
+ } for msg in chat_history],
+ {"role": "user", "content": input_text}
+ ]
+
+
+ request_options = {
+ "model": self.model,
+ "messages": messages,
+ "max_tokens": self.inference_config.get('maxTokens'),
+ "temperature": self.inference_config.get('temperature'),
+ "top_p": self.inference_config.get('topP'),
+ "stop": self.inference_config.get('stopSequences'),
+ "stream": self.streaming
+ }
+ if self.streaming:
+ return await self.handle_streaming_response(request_options)
+ else:
+ return await self.handle_single_response(request_options)
+
+ except Exception as error:
+ Logger.error(f"Error in OpenAI API call: {str(error)}")
+ raise error
+
+ async def handle_single_response(self, request_options: Dict[str, Any]) -> ConversationMessage:
+ try:
+ request_options['stream'] = False
+ chat_completion = self.client.chat.completions.create(**request_options)
+
+ if not chat_completion.choices:
+ raise ValueError('No choices returned from OpenAI API')
+
+ assistant_message = chat_completion.choices[0].message.content
+
+ if not isinstance(assistant_message, str):
+ raise ValueError('Unexpected response format from OpenAI API')
+
+ return ConversationMessage(
+ role=ParticipantRole.ASSISTANT.value,
+ content=[{"text": assistant_message}]
+ )
+
+ except Exception as error:
+ Logger.error(f'Error in OpenAI API call: {str(error)}')
+ raise error
+
+ async def handle_streaming_response(self, request_options: Dict[str, Any]) -> ConversationMessage:
+ try:
+ stream = self.client.chat.completions.create(**request_options)
+ accumulated_message = []
+
+ for chunk in stream:
+ if chunk.choices[0].delta.content:
+ chunk_content = chunk.choices[0].delta.content
+ accumulated_message.append(chunk_content)
+ if self.callbacks:
+ self.callbacks.on_llm_new_token(chunk_content)
+ #yield chunk_content
+
+ # Store the complete message in the instance for later access if needed
+ return ConversationMessage(
+ role=ParticipantRole.ASSISTANT.value,
+ content=[{"text": ''.join(accumulated_message)}]
+ )
+
+ except Exception as error:
+ Logger.error(f"Error getting stream from OpenAI model: {str(error)}")
+ raise error
+
+ def set_system_prompt(self,
+ template: Optional[str] = None,
+ variables: Optional[TemplateVariables] = None) -> None:
+ if template:
+ self.prompt_template = template
+ if variables:
+ self.custom_variables = variables
+ self.update_system_prompt()
+
+ def update_system_prompt(self) -> None:
+ all_variables: TemplateVariables = {**self.custom_variables}
+ self.system_prompt = self.replace_placeholders(self.prompt_template, all_variables)
+
+ @staticmethod
+ def replace_placeholders(template: str, variables: TemplateVariables) -> str:
+ import re
+ def replace(match):
+ key = match.group(1)
+ if key in variables:
+ value = variables[key]
+ return '\n'.join(value) if isinstance(value, list) else str(value)
+ return match.group(0)
+
+ return re.sub(r'{{(\w+)}}', replace, template)
\ No newline at end of file
diff --git a/python/src/multi_agent_orchestrator/classifiers/__init__.py b/python/src/multi_agent_orchestrator/classifiers/__init__.py
index 123bbe8d..48c51865 100644
--- a/python/src/multi_agent_orchestrator/classifiers/__init__.py
+++ b/python/src/multi_agent_orchestrator/classifiers/__init__.py
@@ -2,7 +2,12 @@
Code for Classifier.
"""
from .classifier import Classifier, ClassifierResult
-from .bedrock_classifier import BedrockClassifier, BedrockClassifierOptions
+
+try:
+ from .bedrock_classifier import BedrockClassifier, BedrockClassifierOptions
+ _AWS_AVAILABLE = True
+except Exception as e:
+ _AWS_AVAILABLE = False
try:
from .anthropic_classifier import AnthropicClassifier, AnthropicClassifierOptions
@@ -16,15 +21,17 @@
except Exception as e:
_OPENAI_AVAILABLE = False
-
-
__all__ = [
"Classifier",
"ClassifierResult",
- "BedrockClassifier",
- "BedrockClassifierOptions"
]
+if _AWS_AVAILABLE:
+ __all__.extend([
+ "BedrockClassifier",
+ "BedrockClassifierOptions"
+ ])
+
if _ANTHROPIC_AVAILABLE:
__all__.extend([
"AnthropicClassifier",
diff --git a/python/src/multi_agent_orchestrator/orchestrator.py b/python/src/multi_agent_orchestrator/orchestrator.py
index 6e239b1b..49e577c4 100644
--- a/python/src/multi_agent_orchestrator/orchestrator.py
+++ b/python/src/multi_agent_orchestrator/orchestrator.py
@@ -3,14 +3,17 @@
import time
from multi_agent_orchestrator.utils.logger import Logger
from multi_agent_orchestrator.types import ConversationMessage, ParticipantRole, OrchestratorConfig
-from multi_agent_orchestrator.classifiers import (Classifier,
- ClassifierResult,
- BedrockClassifier,
- BedrockClassifierOptions)
+from multi_agent_orchestrator.classifiers import Classifier,ClassifierResult
from multi_agent_orchestrator.agents import (Agent,
AgentResponse,
AgentProcessingResult)
-from multi_agent_orchestrator.storage import ChatStorage, InMemoryChatStorage
+from multi_agent_orchestrator.storage import ChatStorage
+from multi_agent_orchestrator.storage import InMemoryChatStorage
+try:
+ from multi_agent_orchestrator.classifiers import BedrockClassifier, BedrockClassifierOptions
+ _BEDROCK_AVAILABLE = True
+except ImportError:
+ _BEDROCK_AVAILABLE = False
@dataclass
class MultiAgentOrchestrator:
@@ -41,7 +44,14 @@ def __init__(self,
self.logger = Logger(self.config, logger)
self.agents: Dict[str, Agent] = {}
self.storage = storage or InMemoryChatStorage()
- self.classifier: Classifier = classifier or BedrockClassifier(options=BedrockClassifierOptions())
+
+ if classifier:
+ self.classifier = classifier
+ elif _BEDROCK_AVAILABLE:
+ self.classifier = BedrockClassifier(options=BedrockClassifierOptions())
+ else:
+ raise ValueError("No classifier provided and BedrockClassifier is not available. Please provide a classifier.")
+
self.execution_times: Dict[str, float] = {}
self.default_agent: Agent = default_agent
diff --git a/python/src/multi_agent_orchestrator/storage/__init__.py b/python/src/multi_agent_orchestrator/storage/__init__.py
index 483a5bab..51d3279d 100644
--- a/python/src/multi_agent_orchestrator/storage/__init__.py
+++ b/python/src/multi_agent_orchestrator/storage/__init__.py
@@ -1,10 +1,23 @@
+"""
+Storage implementations for chat history.
+"""
from .chat_storage import ChatStorage
from .in_memory_chat_storage import InMemoryChatStorage
-from .dynamodb_chat_storage import DynamoDbChatStorage
+_AWS_AVAILABLE = False
+
+try:
+ from .dynamodb_chat_storage import DynamoDbChatStorage
+ _AWS_AVAILABLE = True
+except ImportError:
+ _AWS_AVAILABLE = False
__all__ = [
- 'ChatStorage',
+ 'ChatStorage',
'InMemoryChatStorage',
- 'DynamoDbChatStorage'
]
+
+if _AWS_AVAILABLE:
+ __all__.extend([
+ 'DynamoDbChatStorage'
+ ])
\ No newline at end of file
diff --git a/python/src/tests/agents/test_openai_agent.py b/python/src/tests/agents/test_openai_agent.py
new file mode 100644
index 00000000..4b035a9b
--- /dev/null
+++ b/python/src/tests/agents/test_openai_agent.py
@@ -0,0 +1,164 @@
+import pytest
+from unittest.mock import Mock, AsyncMock, patch
+from multi_agent_orchestrator.types import ConversationMessage, ParticipantRole
+from multi_agent_orchestrator.agents import OpenAIAgent, OpenAIAgentOptions
+
+@pytest.fixture
+def mock_openai_client():
+ mock_client = Mock()
+ # Set up nested structure to match OpenAI client
+ mock_client.chat = Mock()
+ mock_client.chat.completions = Mock()
+ mock_client.chat.completions.create = Mock()
+ return mock_client
+
+
+@pytest.fixture
+def openai_agent(mock_openai_client):
+ with patch('openai.OpenAI', return_value=mock_openai_client):
+ options = OpenAIAgentOptions(
+ name="TestAgent",
+ description="A test OpenAI agent",
+ api_key="test-api-key",
+ model="gpt-4",
+ streaming=False,
+ inference_config={
+ 'maxTokens': 500,
+ 'temperature': 0.5,
+ 'topP': 0.8,
+ 'stopSequences': []
+ }
+ )
+ agent = OpenAIAgent(options)
+ agent.client = mock_openai_client # Explicitly set the mock client
+ return agent
+
+
+def test_custom_system_prompt_with_variable():
+ with patch('openai.OpenAI'):
+ options = OpenAIAgentOptions(
+ name="TestAgent",
+ description="A test agent",
+ api_key="test-api-key",
+ custom_system_prompt={
+ 'template': "This is a prompt with {{variable}}",
+ 'variables': {'variable': 'value'}
+ }
+ )
+ agent = OpenAIAgent(options)
+ assert agent.system_prompt == "This is a prompt with value"
+
+
+@pytest.mark.asyncio
+async def test_process_request_success(openai_agent, mock_openai_client):
+ # Create a mock response object
+ mock_response = Mock()
+ mock_response.choices = [Mock()]
+ mock_response.choices[0].message = Mock()
+ mock_response.choices[0].message.content = "This is a test response"
+ mock_openai_client.chat.completions.create.return_value = mock_response
+
+ result = await openai_agent.process_request(
+ "Test question",
+ "test_user",
+ "test_session",
+ []
+ )
+
+ assert isinstance(result, ConversationMessage)
+ assert result.role == ParticipantRole.ASSISTANT.value
+ assert result.content[0]['text'] == 'This is a test response'
+
+
+@pytest.mark.asyncio
+async def test_process_request_streaming(openai_agent, mock_openai_client):
+ openai_agent.streaming = True
+
+ # Create mock chunks
+ class MockChunk:
+ def __init__(self, content):
+ self.choices = [Mock()]
+ self.choices[0].delta = Mock()
+ self.choices[0].delta.content = content
+
+ mock_stream = [
+ MockChunk("This "),
+ MockChunk("is "),
+ MockChunk("a "),
+ MockChunk("test response")
+ ]
+ mock_openai_client.chat.completions.create.return_value = mock_stream
+
+ result = await openai_agent.process_request(
+ "Test question",
+ "test_user",
+ "test_session",
+ []
+ )
+
+ chunks = []
+ async for chunk in result:
+ chunks.append(chunk)
+
+ assert chunks == ["This ", "is ", "a ", "test response"]
+
+
+@pytest.mark.asyncio
+async def test_process_request_with_retriever(openai_agent, mock_openai_client):
+ # Set up mock retriever
+ mock_retriever = AsyncMock()
+ mock_retriever.retrieve_and_combine_results.return_value = "Context from retriever"
+ openai_agent.retriever = mock_retriever
+
+ # Set up mock response
+ mock_response = Mock()
+ mock_response.choices = [Mock()]
+ mock_response.choices[0].message = Mock()
+ mock_response.choices[0].message.content = "Response with context"
+ mock_openai_client.chat.completions.create.return_value = mock_response
+
+ result = await openai_agent.process_request(
+ "Test question",
+ "test_user",
+ "test_session",
+ []
+ )
+
+ mock_retriever.retrieve_and_combine_results.assert_called_once_with("Test question")
+ assert isinstance(result, ConversationMessage)
+ assert result.content[0]['text'] == "Response with context"
+
+
+@pytest.mark.asyncio
+async def test_process_request_api_error(openai_agent, mock_openai_client):
+ mock_openai_client.chat.completions.create.side_effect = Exception("API Error")
+
+ with pytest.raises(Exception) as exc_info:
+ await openai_agent.process_request(
+ "Test input",
+ "user123",
+ "session456",
+ []
+ )
+ assert "API Error" in str(exc_info.value)
+
+
+@pytest.mark.asyncio
+async def test_handle_single_response_no_choices(openai_agent, mock_openai_client):
+ # Create mock response with no choices
+ mock_response = Mock()
+ mock_response.choices = []
+ mock_openai_client.chat.completions.create.return_value = mock_response
+
+ with pytest.raises(ValueError, match='No choices returned from OpenAI API'):
+ await openai_agent.handle_single_response({
+ "model": "gpt-4",
+ "messages": [{"role": "user", "content": "Hi"}],
+ "stream": False
+ })
+
+
+def test_is_streaming_enabled(openai_agent):
+ assert not openai_agent.is_streaming_enabled()
+ openai_agent.streaming = True
+ assert openai_agent.is_streaming_enabled()
\ No newline at end of file
diff --git a/typescript/package-lock.json b/typescript/package-lock.json
index 9147e361..494c1b35 100644
--- a/typescript/package-lock.json
+++ b/typescript/package-lock.json
@@ -19,6 +19,7 @@
"@aws-sdk/lib-dynamodb": "^3.621.0",
"@aws-sdk/util-dynamodb": "^3.621.0",
"axios": "^1.7.2",
+ "chai": "^5.1.2",
"eslint-config-prettier": "^9.1.0",
"natural": "^7.0.7",
"openai": "^4.52.7",
@@ -4270,6 +4271,14 @@
"node": ">=8"
}
},
+ "node_modules/assertion-error": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz",
+ "integrity": "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==",
+ "engines": {
+ "node": ">=12"
+ }
+ },
"node_modules/async": {
"version": "2.6.4",
"resolved": "https://registry.npmjs.org/async/-/async-2.6.4.tgz",
@@ -4589,6 +4598,21 @@
}
]
},
+ "node_modules/chai": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/chai/-/chai-5.1.2.tgz",
+ "integrity": "sha512-aGtmf24DW6MLHHG5gCx4zaI3uBq3KRtxeVs0DjFH6Z0rDNbsvTxFASFvdj79pxjxZ8/5u3PIiN3IwEIQkiiuPw==",
+ "dependencies": {
+ "assertion-error": "^2.0.1",
+ "check-error": "^2.1.1",
+ "deep-eql": "^5.0.1",
+ "loupe": "^3.1.0",
+ "pathval": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
"node_modules/chalk": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz",
@@ -4613,6 +4637,14 @@
"node": ">=10"
}
},
+ "node_modules/check-error": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/check-error/-/check-error-2.1.1.tgz",
+ "integrity": "sha512-OAlb+T7V4Op9OwdkjmguYRqncdlx5JiofwOAUkmTF+jNdHwzTaTs4sRAGpzLF3oOz5xAyDGrPgeIDFQmDOTiJw==",
+ "engines": {
+ "node": ">= 16"
+ }
+ },
"node_modules/ci-info": {
"version": "3.9.0",
"resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz",
@@ -4793,6 +4825,14 @@
}
}
},
+ "node_modules/deep-eql": {
+ "version": "5.0.2",
+ "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-5.0.2.tgz",
+ "integrity": "sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
"node_modules/deep-is": {
"version": "0.1.4",
"resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz",
@@ -6821,6 +6861,11 @@
"resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz",
"integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ=="
},
+ "node_modules/loupe": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/loupe/-/loupe-3.1.2.tgz",
+ "integrity": "sha512-23I4pFZHmAemUnz8WZXbYRSKYj801VDaNv9ETuMh7IrMc7VuVVSo+Z9iLE3ni30+U48iDWfi30d3twAXBYmnCg=="
+ },
"node_modules/lru-cache": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz",
@@ -7453,6 +7498,14 @@
"node": ">=8"
}
},
+ "node_modules/pathval": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/pathval/-/pathval-2.0.0.tgz",
+ "integrity": "sha512-vE7JKRyES09KiunauX7nd2Q9/L7lhok4smP9RZTDeD4MVs72Dp2qNFVz39Nz5a0FVEW0BJR6C0DYrq6unoziZA==",
+ "engines": {
+ "node": ">= 14.16"
+ }
+ },
"node_modules/pg": {
"version": "8.12.0",
"resolved": "https://registry.npmjs.org/pg/-/pg-8.12.0.tgz",
diff --git a/typescript/src/agents/anthropicAgent.ts b/typescript/src/agents/anthropicAgent.ts
index 04abb5c8..5c90c771 100644
--- a/typescript/src/agents/anthropicAgent.ts
+++ b/typescript/src/agents/anthropicAgent.ts
@@ -190,7 +190,7 @@ export interface AnthropicAgentOptions extends AgentOptions {
if (toolUseBlocks.length > 0) {
// Append current response to the conversation
- messages.push({role:'assistant', content:response.content});
+ messages.push({role: ParticipantRole.ASSISTANT, content:response.content});
if (!this.toolConfig){
throw new Error("No tools available for tool use");
}
diff --git a/typescript/src/agents/openAIAgent.ts b/typescript/src/agents/openAIAgent.ts
index 9465bfe4..161136de 100644
--- a/typescript/src/agents/openAIAgent.ts
+++ b/typescript/src/agents/openAIAgent.ts
@@ -1,10 +1,20 @@
import { Agent, AgentOptions } from './agent';
-import { ConversationMessage, OPENAI_MODEL_ID_GPT_O_MINI, ParticipantRole } from '../types';
+import { ConversationMessage, OPENAI_MODEL_ID_GPT_O_MINI, ParticipantRole, TemplateVariables } from '../types';
import OpenAI from 'openai';
import { Logger } from '../utils/logger';
+import { Retriever } from "../retrievers/retriever";
-export interface OpenAIAgentOptions extends AgentOptions {
+type WithApiKey = {
apiKey: string;
+ client?: never;
+};
+
+type WithClient = {
+ client: OpenAI;
+ apiKey?: never;
+};
+
+export interface OpenAIAgentOptions extends AgentOptions {
model?: string;
streaming?: boolean;
inferenceConfig?: {
@@ -13,12 +23,20 @@ export interface OpenAIAgentOptions extends AgentOptions {
topP?: number;
stopSequences?: string[];
};
+ customSystemPrompt?: {
+ template: string;
+ variables?: TemplateVariables;
+ };
+ retriever?: Retriever;
+
}
+export type OpenAIAgentOptionsWithAuth = OpenAIAgentOptions & (WithApiKey | WithClient);
+
const DEFAULT_MAX_TOKENS = 1000;
export class OpenAIAgent extends Agent {
- private openai: OpenAI;
+ private client: OpenAI;
private model: string;
private streaming: boolean;
private inferenceConfig: {
@@ -27,10 +45,26 @@ export class OpenAIAgent extends Agent {
topP?: number;
stopSequences?: string[];
};
+ private promptTemplate: string;
+ private systemPrompt: string;
+ private customVariables: TemplateVariables;
+ protected retriever?: Retriever;
+
+
+ constructor(options: OpenAIAgentOptionsWithAuth) {
- constructor(options: OpenAIAgentOptions) {
super(options);
- this.openai = new OpenAI({ apiKey: options.apiKey });
+
+ if (!options.apiKey && !options.client) {
+ throw new Error("OpenAI API key or OpenAI client is required");
+ }
+ if (options.client) {
+ this.client = options.client;
+ } else {
+ if (!options.apiKey) throw new Error("OpenAI API key is required");
+ this.client = new OpenAI({ apiKey: options.apiKey });
+ }
+
this.model = options.model ?? OPENAI_MODEL_ID_GPT_O_MINI;
this.streaming = options.streaming ?? false;
this.inferenceConfig = {
@@ -39,6 +73,37 @@ export class OpenAIAgent extends Agent {
topP: options.inferenceConfig?.topP,
stopSequences: options.inferenceConfig?.stopSequences,
};
+
+ this.retriever = options.retriever ?? null;
+
+
+ this.promptTemplate = `You are a ${this.name}. ${this.description} Provide helpful and accurate information based on your expertise.
+ You will engage in an open-ended conversation, providing helpful and accurate information based on your expertise.
+ The conversation will proceed as follows:
+ - The human may ask an initial question or provide a prompt on any topic.
+ - You will provide a relevant and informative response.
+ - The human may then follow up with additional questions or prompts related to your previous response, allowing for a multi-turn dialogue on that topic.
+ - Or, the human may switch to a completely new and unrelated topic at any point.
+ - You will seamlessly shift your focus to the new topic, providing thoughtful and coherent responses based on your broad knowledge base.
+ Throughout the conversation, you should aim to:
+ - Understand the context and intent behind each new question or prompt.
+ - Provide substantive and well-reasoned responses that directly address the query.
+ - Draw insights and connections from your extensive knowledge when appropriate.
+ - Ask for clarification if any part of the question or prompt is ambiguous.
+ - Maintain a consistent, respectful, and engaging tone tailored to the human's communication style.
+ - Seamlessly transition between topics as the human introduces new subjects.`
+
+ this.customVariables = {};
+ this.systemPrompt = '';
+
+ if (options.customSystemPrompt) {
+ this.setSystemPrompt(
+ options.customSystemPrompt.template,
+ options.customSystemPrompt.variables
+ );
+ }
+
+
}
/* eslint-disable @typescript-eslint/no-unused-vars */
@@ -50,8 +115,22 @@ export class OpenAIAgent extends Agent {
additionalParams?: Record
): Promise> {
+ this.updateSystemPrompt();
+
+ let systemPrompt = this.systemPrompt;
+
+ if (this.retriever) {
+ // retrieve from Vector store
+ const response = await this.retriever.retrieveAndCombineResults(inputText);
+ const contextPrompt =
+ "\nHere is the context to use to answer the user's question:\n" +
+ response;
+ systemPrompt = systemPrompt + contextPrompt;
+ }
+
const messages = [
+ { role: 'system', content: systemPrompt },
...chatHistory.map(msg => ({
role: msg.role.toLowerCase() as OpenAI.Chat.ChatCompletionMessageParam['role'],
content: msg.content[0]?.text || ''
@@ -80,11 +159,37 @@ export class OpenAIAgent extends Agent {
}
}
+ setSystemPrompt(template?: string, variables?: TemplateVariables): void {
+ if (template) {
+ this.promptTemplate = template;
+ }
+ if (variables) {
+ this.customVariables = variables;
+ }
+ this.updateSystemPrompt();
+ }
+
+ private updateSystemPrompt(): void {
+ const allVariables: TemplateVariables = {
+ ...this.customVariables
+ };
+ this.systemPrompt = this.replaceplaceholders(this.promptTemplate, allVariables);
+ }
+
+ private replaceplaceholders(template: string, variables: TemplateVariables): string {
+ return template.replace(/{{(\w+)}}/g, (match, key) => {
+ if (key in variables) {
+ const value = variables[key];
+ return Array.isArray(value) ? value.join('\n') : String(value);
+ }
+ return match;
+ });
+ }
+
private async handleSingleResponse(input: any): Promise {
try {
const nonStreamingOptions = { ...input, stream: false };
- const chatCompletion = await this.openai.chat.completions.create(nonStreamingOptions);
-
+ const chatCompletion = await this.client.chat.completions.create(nonStreamingOptions);
if (!chatCompletion.choices || chatCompletion.choices.length === 0) {
throw new Error('No choices returned from OpenAI API');
}
@@ -106,7 +211,7 @@ export class OpenAIAgent extends Agent {
}
private async *handleStreamingResponse(options: OpenAI.Chat.ChatCompletionCreateParams): AsyncIterable {
- const stream = await this.openai.chat.completions.create({ ...options, stream: true });
+ const stream = await this.client.chat.completions.create({ ...options, stream: true });
for await (const chunk of stream) {
const content = chunk.choices[0]?.delta?.content;
if (content) {
diff --git a/typescript/tests/agents/OpenAi.test.ts b/typescript/tests/agents/OpenAi.test.ts
new file mode 100644
index 00000000..7d949218
--- /dev/null
+++ b/typescript/tests/agents/OpenAi.test.ts
@@ -0,0 +1,144 @@
+import { OpenAIAgent, OpenAIAgentOptions } from '../../src/agents/openAIAgent';
+import OpenAI from 'openai';
+import { ParticipantRole } from '../../src/types';
+
+// Create a mock OpenAI client type that matches the structure we need
+const createMockOpenAIClient = () => ({
+ chat: {
+ completions: {
+ create: jest.fn(),
+ },
+ },
+});
+
+describe('OpenAIAgent', () => {
+ const mockUserId = 'user123';
+ const mockSessionId = 'session456';
+ let mockClient;
+
+ beforeEach(() => {
+ // Create mocked OpenAI client
+ mockClient = createMockOpenAIClient();
+
+ // Set up default mock response
+ mockClient.chat.completions.create.mockResolvedValue({
+ choices: [{ message: { content: 'Mock response' } }],
+ });
+ });
+
+ afterEach(() => {
+ jest.clearAllMocks();
+ });
+
+ describe('processRequest', () => {
+ it('should call OpenAI API with the correct parameters', async () => {
+ const options = {
+ name: 'Test Agent',
+ description: 'Test description',
+ client: mockClient as unknown as OpenAI,
+ customSystemPrompt: {
+ template: 'Custom prompt with {{variable}}',
+ variables: { variable: 'test-value' },
+ },
+ };
+
+ const openAIAgent = new OpenAIAgent(options);
+
+ const inputText = 'What is AI?';
+ const chatHistory = [];
+
+ const response = await openAIAgent.processRequest(
+ inputText,
+ mockUserId,
+ mockSessionId,
+ chatHistory
+ );
+
+ // Verify API call
+ expect(mockClient.chat.completions.create).toHaveBeenCalledWith(
+ expect.objectContaining({
+ model: expect.any(String),
+ messages: expect.arrayContaining([
+ expect.objectContaining({
+ role: 'system',
+ content: expect.stringContaining('Custom prompt with test-value'),
+ }),
+ expect.objectContaining({
+ role: 'user',
+ content: inputText,
+ }),
+ ]),
+ })
+ );
+ // Verify response structure
+ expect(response).toEqual({
+ role: ParticipantRole.ASSISTANT,
+ content: [{ text: 'Mock response' }],
+ });
+ });
+
+ it('should handle streaming responses correctly when streaming is enabled', async () => {
+ const options: OpenAIAgentOptions & { client: OpenAI } = {
+ name: 'Test Agent',
+ description: 'Test description',
+ client: mockClient as unknown as OpenAI,
+ streaming: true,
+ };
+
+ const openAIAgent = new OpenAIAgent(options);
+
+ // Mock streaming response
+ const mockStream = {
+ async *[Symbol.asyncIterator]() {
+ yield { choices: [{ delta: { content: 'Hello' } }] };
+ yield { choices: [{ delta: { content: ' World' } }] };
+ },
+ };
+
+ mockClient.chat.completions.create.mockResolvedValueOnce(mockStream as any);
+
+ const inputText = 'What is AI?';
+ const chatHistory = [];
+
+ const response = await openAIAgent.processRequest(
+ inputText,
+ mockUserId,
+ mockSessionId,
+ chatHistory
+ );
+
+ // Verify it returns an AsyncIterable
+ expect(response).toBeDefined();
+ expect(typeof response[Symbol.asyncIterator]).toBe('function');
+
+ // Verify the streamed content
+ const chunks = [];
+ for await (const chunk of response as AsyncIterable) {
+ chunks.push(chunk);
+ }
+ expect(chunks).toEqual(['Hello', ' World']);
+ });
+
+ it('should throw error when API call fails', async () => {
+ const options: OpenAIAgentOptions & { client: OpenAI } = {
+ name: 'Test Agent',
+ description: 'Test description',
+ client: mockClient as unknown as OpenAI,
+ };
+
+ const openAIAgent = new OpenAIAgent(options);
+
+ // Mock API error
+ mockClient.chat.completions.create.mockRejectedValueOnce(
+ new Error('API Error')
+ );
+
+ const inputText = 'What is AI?';
+ const chatHistory = [];
+
+ await expect(
+ openAIAgent.processRequest(inputText, mockUserId, mockSessionId, chatHistory)
+ ).rejects.toThrow('API Error');
+ });
+ });
+});