Skip to content

Commit

Permalink
Process response from crewai
Browse files Browse the repository at this point in the history
  • Loading branch information
Luisotee committed Dec 14, 2024
1 parent 2c16eb5 commit 3bc40e5
Show file tree
Hide file tree
Showing 4 changed files with 80 additions and 10 deletions.
4 changes: 4 additions & 0 deletions apps/ai_api/.gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -111,3 +111,7 @@ ENV/
# mypy
.mypy_cache/
.idea/

# CSV Reports
*.csv
report_*.csv
53 changes: 43 additions & 10 deletions apps/ai_api/eda_ai_api/api/routes/classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,13 @@
from eda_ai_api.models.classifier import ClassifierResponse
from eda_ai_api.utils.audio_utils import process_audio_file
from eda_ai_api.utils.memory import ZepConversationManager
from eda_ai_api.utils.prompts import (INSUFFICIENT_TEMPLATES,
PROPOSAL_TEMPLATE, ROUTER_TEMPLATE,
TOPIC_TEMPLATE)
from eda_ai_api.utils.prompts import (
INSUFFICIENT_TEMPLATES,
PROPOSAL_TEMPLATE,
ROUTER_TEMPLATE,
TOPIC_TEMPLATE,
RESPONSE_PROCESSOR_TEMPLATE,
)

router = APIRouter()

Expand Down Expand Up @@ -47,6 +51,13 @@ async def extract_proposal_details(message: str, history: list) -> tuple[str, st
return community_project, grant_call


async def process_llm_response(message: str, response: str) -> str:
processed = llm.complete(
RESPONSE_PROCESSOR_TEMPLATE.format(original_message=message, response=response)
)
return processed.text


async def process_decision(
decision: str, message: str, history: list
) -> Dict[str, Any]:
Expand All @@ -64,7 +75,15 @@ async def process_decision(
context=context, message=message
)
)
return {"response": response.text}
processed_response = await process_llm_response(message, response.text)
return {"response": processed_response}

# Add return for successful discovery
crew_result = (
OpportunityFinderCrew().crew().kickoff(inputs={"topics": ", ".join(topics)})
)
processed_response = await process_llm_response(message, str(crew_result))
return {"response": processed_response}

elif decision == "proposal":
community_project, grant_call = await extract_proposal_details(message, history)
Expand All @@ -77,13 +96,20 @@ async def process_decision(
context=context, message=message
)
)
return {"response": response.text}
processed_response = await process_llm_response(message, response.text)
return {"response": processed_response}

elif decision == "heartbeat":
return {"is_alive": True}
processed_response = await process_llm_response(
message, str({"is_alive": True})
)
return {"response": processed_response}

elif decision == "onboarding":
return OnboardingCrew().crew().kickoff()
result = OnboardingCrew().crew().kickoff()
processed_response = await process_llm_response(message, str(result))
logger.info(f"Processed onboarding response: {processed_response}")
return {"response": processed_response}

else:
return {"error": f"Unknown decision type: {decision}"}
Expand Down Expand Up @@ -137,10 +163,17 @@ async def classifier_route(

# Process decision and store conversation
result = await process_decision(decision, combined_message, history)
await zep.add_conversation(session_id, combined_message, str(result))

return ClassifierResponse(result=str(result), session_id=session_id)
# Process final result if it's not already processed
if isinstance(result.get("response"), str):
final_result = await process_llm_response(combined_message, str(result))
else:
final_result = str(result)

await zep.add_conversation(session_id, combined_message, final_result)
return ClassifierResponse(result=final_result, session_id=session_id)

except Exception as e:
logger.error(f"Error in classifier route: {str(e)}")
return ClassifierResponse(result=f"Error: {str(e)}")
error_msg = await process_llm_response(combined_message, f"Error: {str(e)}")
return ClassifierResponse(result=error_msg)
27 changes: 27 additions & 0 deletions apps/ai_api/eda_ai_api/utils/prompts.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,3 +70,30 @@
Response:"""
),
}

RESPONSE_PROCESSOR_TEMPLATE = PromptTemplate(
"""IMPORTANT: You must respond in exactly the same language as the user's original message:
{original_message}
Process this response to:
1. MATCH THE EXACT LANGUAGE of the input message (this is crucial!)
2. Be clear and conversational in that language
3. Not exceed 2000 characters (summarize if longer)
4. Use WhatsApp formatting:
- *bold* for important terms
- _italic_ for emphasis
- ```code``` for technical terms
- ~strikethrough~ for corrections
- Lists with emoji bullets
- For URLs: Write "Link: " followed by the plain URL (no markdown)
Example: "Link: https://example.com"
5. If response looks like JSON, convert to natural language in the user's language:
- For heartbeat: "*Yes, I'm here! 🟢*\n_Ready to help you!_" (translate to match user's language)
- For errors: "⚠️ *Error*: _[error message]_" (translate to match user's language)
- For other JSON: Convert to organized message with formatting (in user's language)
Original response:
{response}
Respond in the SAME LANGUAGE as the original message with WhatsApp formatting:"""
)
6 changes: 6 additions & 0 deletions apps/whatsapp/src/message/message.ts
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,9 @@ export async function handleMessage(message: WAMessage) {
}

// Make API request
const controller = new AbortController();
const timeoutId = setTimeout(() => controller.abort(), 600000); // 10 minute timeout (10 * 60 * 1000 ms)

const response = await fetch(
"http://127.0.0.1:8083/api/classifier/classify",
{
Expand All @@ -76,9 +79,12 @@ export async function handleMessage(message: WAMessage) {
accept: "application/json",
},
body: formData,
signal: controller.signal,
},
);

clearTimeout(timeoutId); // Clear timeout if request completes

if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
}
Expand Down

0 comments on commit 3bc40e5

Please sign in to comment.