From b769e90b7b1f5875e16ce5994f7238b3cb33e08b Mon Sep 17 00:00:00 2001 From: TransformerOptimus Date: Fri, 14 Jul 2023 19:33:11 +0530 Subject: [PATCH 1/5] cleaning up the initial prompt --- superagi/helper/feed_parser.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/superagi/helper/feed_parser.py b/superagi/helper/feed_parser.py index 5ed13d2fb..c1e868b38 100644 --- a/superagi/helper/feed_parser.py +++ b/superagi/helper/feed_parser.py @@ -41,7 +41,12 @@ def parse_feed(feed): "time_difference": feed.time_difference} except Exception: return feed + if feed.role == "system": - return feed + final_output = feed.feed + if "json-schema.org" in feed.feed: + final_output = feed.feed.split("TOOLS")[0] + return {"role": "system", "feed": final_output, "updated_at": feed.updated_at, + "time_difference": feed.time_difference} return feed From 4d5c292611cc0ed9cf5d77cee86cd6ac828f7e15 Mon Sep 17 00:00:00 2001 From: TransformerOptimus Date: Fri, 14 Jul 2023 21:30:16 +0530 Subject: [PATCH 2/5] fixing the config exception issue --- superagi/jobs/agent_executor.py | 2 +- superagi/models/configuration.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/superagi/jobs/agent_executor.py b/superagi/jobs/agent_executor.py index 096f7477d..02d3cec73 100644 --- a/superagi/jobs/agent_executor.py +++ b/superagi/jobs/agent_executor.py @@ -127,7 +127,7 @@ def get_model_api_key_from_execution(cls, model, agent_execution, session): @classmethod def get_llm_source(cls, agent_execution, session): - return Configuration.fetch_value_by_agent_id(session, agent_execution.agent_id, "model_source") + return Configuration.fetch_value_by_agent_id(session, agent_execution.agent_id, "model_source") or "OpenAi" @classmethod def get_embedding(cls, model_source, model_api_key): diff --git a/superagi/models/configuration.py b/superagi/models/configuration.py index 72374fc37..c889214f1 100644 --- a/superagi/models/configuration.py +++ b/superagi/models/configuration.py @@ -85,5 +85,5 @@ def fetch_value_by_agent_id(cls, session, agent_id: int, key: str): config = session.query(Configuration).filter(Configuration.organisation_id == organisation.id, Configuration.key == key).first() if not config: - raise HTTPException(status_code=404, detail="Configuration not found") + return None return config.value if config else None From dc35026a81ffc853c89301c6d4494fb8759c4473 Mon Sep 17 00:00:00 2001 From: TransformerOptimus Date: Fri, 14 Jul 2023 21:48:43 +0530 Subject: [PATCH 3/5] Handling exception on agent run --- superagi/jobs/agent_executor.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/superagi/jobs/agent_executor.py b/superagi/jobs/agent_executor.py index 02d3cec73..68c893cc0 100644 --- a/superagi/jobs/agent_executor.py +++ b/superagi/jobs/agent_executor.py @@ -247,7 +247,14 @@ def execute_next_action(self, agent_execution_id): agent_workflow_step = session.query(AgentWorkflowStep).filter( AgentWorkflowStep.id == agent_execution.current_step_id).first() - response = spawned_agent.execute(agent_workflow_step) + try: + response = spawned_agent.execute(agent_workflow_step) + except RuntimeError as e: + # If our execution encounters an error we return and attempt to retry + logger.error("Error executing the agent:", e) + superagi.worker.execute_agent.apply_async((agent_execution_id, datetime.now()), countdown=15) + session.close() + return if "retry" in response and response["retry"]: superagi.worker.execute_agent.apply_async((agent_execution_id, datetime.now()), countdown=10) From 90d940d54c477d89ad7e0327a1eda8db87b1365e Mon Sep 17 00:00:00 2001 From: Boundless Asura Date: Tue, 18 Jul 2023 16:56:37 +0530 Subject: [PATCH 4/5] fixed --- gui/pages/Dashboard/Settings/Settings.js | 14 +++++++++++--- gui/pages/api/DashboardService.js | 3 +++ main.py | 18 ++++++++++++++++++ superagi/llms/base_llm.py | 6 +++++- superagi/llms/google_palm.py | 14 ++++++++++++++ superagi/llms/openai.py | 14 ++++++++++++++ .../types/validate_llm_api_key_request.py | 6 ++++++ 7 files changed, 71 insertions(+), 4 deletions(-) create mode 100644 superagi/models/types/validate_llm_api_key_request.py diff --git a/gui/pages/Dashboard/Settings/Settings.js b/gui/pages/Dashboard/Settings/Settings.js index 768baf54d..2b3460b9d 100644 --- a/gui/pages/Dashboard/Settings/Settings.js +++ b/gui/pages/Dashboard/Settings/Settings.js @@ -2,7 +2,7 @@ import React, {useState, useEffect, useRef} from 'react'; import {ToastContainer, toast} from 'react-toastify'; import 'react-toastify/dist/ReactToastify.css'; import agentStyles from "@/pages/Content/Agents/Agents.module.css"; -import {getOrganisationConfig, updateOrganisationConfig} from "@/pages/api/DashboardService"; +import {getOrganisationConfig, updateOrganisationConfig,validateLLMApiKey} from "@/pages/api/DashboardService"; import {EventBus} from "@/utils/eventBus"; import {removeTab, setLocalStorageValue} from "@/utils/utils"; import Image from "next/image"; @@ -83,8 +83,16 @@ export default function Settings({organisationId}) { return } - updateKey("model_api_key", modelApiKey); - updateKey("model_source", source); + validateLLMApiKey(source, modelApiKey) + .then((response) => { + console.log("CHANGES",response.data) + if (response.data.status==="success") { + updateKey("model_api_key", modelApiKey); + updateKey("model_source", source); + } else { + toast.error("Invalid API key", {autoClose: 1800}); + } + }) }; const handleTemperatureChange = (event) => { diff --git a/gui/pages/api/DashboardService.js b/gui/pages/api/DashboardService.js index 2e1978f91..cf976ab64 100644 --- a/gui/pages/api/DashboardService.js +++ b/gui/pages/api/DashboardService.js @@ -76,6 +76,9 @@ export const validateAccessToken = () => { return api.get(`/validate-access-token`); } +export const validateLLMApiKey = (model_source, model_api_key) => { + return api.post(`/validate-llm-api-key`,{model_source, model_api_key}); +} export const checkEnvironment = () => { return api.get(`/configs/get/env`); } diff --git a/main.py b/main.py index ff5966c2d..7de018229 100644 --- a/main.py +++ b/main.py @@ -44,6 +44,7 @@ from superagi.controllers.analytics import router as analytics_router from superagi.helper.tool_helper import register_toolkits from superagi.lib.logger import logger +from superagi.llms.google_palm import GooglePalm from superagi.llms.openai import OpenAi from superagi.helper.auth import get_current_user from superagi.models.agent_workflow import AgentWorkflow @@ -53,6 +54,7 @@ from superagi.models.toolkit import Toolkit from superagi.models.oauth_tokens import OauthTokens from superagi.models.types.login_request import LoginRequest +from superagi.models.types.validate_llm_api_key_request import ValidateAPIKeyRequest from superagi.models.user import User app = FastAPI() @@ -426,6 +428,22 @@ async def root(Authorize: AuthJWT = Depends()): raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid token") +@app.post("/validate-llm-api-key") +async def validate_llm_api_key(request: ValidateAPIKeyRequest, Authorize: AuthJWT = Depends()): + """API to validate LLM API Key""" + source = request.model_source + api_key = request.model_api_key + valid_api_key = False + if source == "OpenAi": + valid_api_key = OpenAi(api_key=api_key).verify_access_key() + elif source == "Google Palm": + valid_api_key = GooglePalm(api_key=api_key).verify_access_key() + if valid_api_key: + return {"message": "Valid API Key", "status": "success"} + else: + return {"message": "Invalid API Key", "status": "failed"} + + @app.get("/validate-open-ai-key/{open_ai_key}") async def root(open_ai_key: str, Authorize: AuthJWT = Depends()): """API to validate Open AI Key""" diff --git a/superagi/llms/base_llm.py b/superagi/llms/base_llm.py index 4408fcf9a..12b9eb452 100644 --- a/superagi/llms/base_llm.py +++ b/superagi/llms/base_llm.py @@ -16,4 +16,8 @@ def get_api_key(self): @abstractmethod def get_model(self): - pass \ No newline at end of file + pass + + @abstractmethod + def verify_access_key(self): + pass diff --git a/superagi/llms/google_palm.py b/superagi/llms/google_palm.py index 0f1de1e47..b19b595b3 100644 --- a/superagi/llms/google_palm.py +++ b/superagi/llms/google_palm.py @@ -76,3 +76,17 @@ def chat_completion(self, messages, max_tokens=get_config("MAX_MODEL_TOKEN_LIMIT except Exception as exception: logger.info("Google palm Exception:", exception) return {"error": exception} + + def verify_access_key(self): + """ + Verify the access key is valid. + + Returns: + bool: True if the access key is valid, False otherwise. + """ + try: + models = palm.list_models() + return True + except Exception as exception: + logger.info("Google palm Exception:", exception) + return False diff --git a/superagi/llms/openai.py b/superagi/llms/openai.py index 4f58b4200..1d44cc6dc 100644 --- a/superagi/llms/openai.py +++ b/superagi/llms/openai.py @@ -76,3 +76,17 @@ def chat_completion(self, messages, max_tokens=get_config("MAX_MODEL_TOKEN_LIMIT except Exception as exception: logger.info("OpenAi Exception:", exception) return {"error": exception} + + def verify_access_key(self): + """ + Verify the access key is valid. + + Returns: + bool: True if the access key is valid, False otherwise. + """ + try: + models = openai.Model.list() + return True + except Exception as exception: + logger.info("OpenAi Exception:", exception) + return False diff --git a/superagi/models/types/validate_llm_api_key_request.py b/superagi/models/types/validate_llm_api_key_request.py new file mode 100644 index 000000000..4a8153de8 --- /dev/null +++ b/superagi/models/types/validate_llm_api_key_request.py @@ -0,0 +1,6 @@ +from pydantic import BaseModel + + +class ValidateAPIKeyRequest(BaseModel): + model_source: str + model_api_key: str From 157417dadacc1a199c24767ed2b1fc454ca785d1 Mon Sep 17 00:00:00 2001 From: Boundless Asura Date: Tue, 18 Jul 2023 17:13:15 +0530 Subject: [PATCH 5/5] added tests --- tests/unit_tests/llms/test_google_palm.py | 8 ++++++++ tests/unit_tests/llms/test_open_ai.py | 9 +++++++++ 2 files changed, 17 insertions(+) diff --git a/tests/unit_tests/llms/test_google_palm.py b/tests/unit_tests/llms/test_google_palm.py index 6c0a9d964..e9848ac88 100644 --- a/tests/unit_tests/llms/test_google_palm.py +++ b/tests/unit_tests/llms/test_google_palm.py @@ -28,3 +28,11 @@ def test_chat_completion(mock_palm): top_p=palm_instance.top_p, max_output_tokens=int(max_tokens) ) + + +def test_verify_access_key(): + model = 'models/text-bison-001' + api_key = 'test_key' + palm_instance = GooglePalm(api_key, model=model) + result = palm_instance.verify_access_key() + assert result is False diff --git a/tests/unit_tests/llms/test_open_ai.py b/tests/unit_tests/llms/test_open_ai.py index 955581799..9882092f4 100644 --- a/tests/unit_tests/llms/test_open_ai.py +++ b/tests/unit_tests/llms/test_open_ai.py @@ -2,6 +2,7 @@ from unittest.mock import MagicMock, patch from superagi.llms.openai import OpenAi + @patch('superagi.llms.openai.openai') def test_chat_completion(mock_openai): # Arrange @@ -30,3 +31,11 @@ def test_chat_completion(mock_openai): frequency_penalty=openai_instance.frequency_penalty, presence_penalty=openai_instance.presence_penalty ) + + +def test_verify_access_key(): + model = 'gpt-4' + api_key = 'test_key' + openai_instance = OpenAi(api_key, model=model) + result = openai_instance.verify_access_key() + assert result is False