Skip to content

Commit

Permalink
✨ feat(app): implement save_credential function
Browse files Browse the repository at this point in the history
🚀 feat(components): add read_user_credential function

🔨 refactor(sender): replace login function with save_credential function

🔧 chore(setting): update database connection validation
  • Loading branch information
sudoskys committed Apr 20, 2024
1 parent a64ae20 commit 522d2b5
Show file tree
Hide file tree
Showing 3 changed files with 95 additions and 10 deletions.
4 changes: 1 addition & 3 deletions llmkira/extra/plugins/search/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ async def failed(
refer_llm_result: dict = None,
**kwargs,
):
meta = task.task_sign.notify(
meta = task.task_sign.reply(
plugin_name=__plugin_name__,
tool_response=[
ToolResponse(
Expand All @@ -115,8 +115,6 @@ async def failed(
tool_call=pending_task,
)
],
memory_able=True,
response_snapshot=True,
)
await Task.create_and_send(
queue_name=receiver.platform,
Expand Down
79 changes: 79 additions & 0 deletions llmkira/logic/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
from typing import Optional

from loguru import logger
from pydantic import BaseModel, Field, SecretStr

from llmkira.openai.cell import UserMessage
from llmkira.openai.request import OpenAI, OpenAICredential


class whether(BaseModel):
"""
Decide whether to agree to the decision based on the content
"""

yes_no: bool = Field(description="Whether the condition is true or false")
comment_to_user: Optional[str] = Field(
default="", description="Comment on the decision"
)


class continue_act(BaseModel):
"""
Decide whether to continue execution based on circumstances
"""

continue_it: bool = Field(description="Whether to continue execution")
comment_to_user: Optional[str] = Field(
default="", description="Comment on the decision"
)


class LLMLogic(object):
"""
LLMLogic is a class that provides some basic logic operations.
"""

def __init__(self, api_endpoint, api_key, api_model):
self.api_endpoint = api_endpoint
self.api_key = api_key
self.api_model = api_model

async def llm_if(self, context: str, condition: str, default: bool):
message = f"Context:{context}\nCondition{condition}\nPlease make a decision."
try:
logic_if = await OpenAI(
model=self.api_model, messages=[UserMessage(content=message)]
).extract(
response_model=whether,
session=OpenAICredential(
api_key=SecretStr(self.api_key),
base_url=self.api_endpoint,
model=self.api_model,
),
)
logic_if: whether
return logic_if
except Exception as e:
logger.error(f"llm_if error: {e}")
return whether(yes_no=default)

async def llm_continue(self, context: str, condition: str, default: bool):
message = f"Context:{context}\nCondition{condition}\nPlease make a decision whether to continue."
try:
logic_continue = await OpenAI(
model=self.api_model, messages=[UserMessage(content=message)]
).extract(
response_model=continue_act,
session=OpenAICredential(
api_key=SecretStr(self.api_key),
base_url=self.api_endpoint,
model=self.api_model,
),
)
logic_continue: continue_act
return logic_continue
except Exception as e:
logger.error(f"llm_continue error: {e}")
return continue_act(continue_it=default)
22 changes: 15 additions & 7 deletions llmkira/openai/request.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,17 +225,25 @@ async def request(self, session: OpenAICredential) -> OpenAIResult:

@retry(stop=stop_after_attempt(3), reraise=True)
async def extract(
self, response_model: Union[Type[BaseModel], Tool], session: OpenAICredential
self, response_model: Union[Type[BaseModel]], session: OpenAICredential
):
"""
Extract the result from the response
:param response_model: BaseModel
:param session: OpenAICredential
:return: BaseModel
:raises NetworkError, UnexpectedFormatError, RuntimeError: The response model is not matched with the result
"""
self.n = 1
self.response_format = None
if not isinstance(response_model, Tool):
response_model = Tool(function=response_model)
self.tools = [response_model]
self.tool_choice = ToolChoice(function=response_model.function)
tool = Tool(function=response_model)
self.tools = [tool]
self.tool_choice = ToolChoice(function=tool.function)
result = await self.request(session)
try:
tool_call = ToolCall.model_validate(result.choices[0].message.tool_calls[0])
return response_model.model_validate(tool_call.function.arguments)
except Exception:
logger.debug(f"Extracted: {tool_call}")
return response_model.model_validate(tool_call.function.json_arguments)
except Exception as exc:
logger.error(f"extract:{exc}")
raise RuntimeError("The response model is not matched with the result")

0 comments on commit 522d2b5

Please sign in to comment.