Skip to content

Commit

Permalink
one file commit
Browse files Browse the repository at this point in the history
  • Loading branch information
peteryangms committed Jul 10, 2024
1 parent 44e5610 commit 6c278e9
Showing 1 changed file with 115 additions and 4 deletions.
119 changes: 115 additions & 4 deletions rdagent/scenarios/qlib/task_generator/feedback.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,118 @@
# TODO:
# Implement to feedback.
import json
from rdagent.oai.llm_utils import APIBackend
from rdagent.core.proposal import HypothesisExperiment2Feedback, Trace, Hypothesis, HypothesisFeedback
from rdagent.core.experiment import Experiment
from typing import Dict, List, Tuple
from rdagent.scenarios.qlib.experiment.model_experiment import QlibModelExperiment, QlibModelScenario

from rdagent.core.proposal import HypothesisExperiment2Feedback
class QlibFactorExperiment2Feedback(HypothesisExperiment2Feedback):
"""Generated feedbacks on the hypothesis from **Executed** Implementations of different tasks & their comparisons with previous performances"""

def generateFeedback(self, exp: Experiment, hypothesis: Hypothesis, trace: Trace) -> HypothesisFeedback:
"""
The `ti` should be executed and the results should be included, as well as the comparison between previous results (done by LLM).
For example: `mlflow` of Qlib will be included.
"""

class QlibFactorHypothesisExperiment2Feedback(HypothesisExperiment2Feedback): ...
'''
This is the 1st version where we only use things once.
'''


# Define the system prompt for hypothesis feedback
sys_prompt_hypothesis = (
"You are a professional result analysis assistant. You will receive a result and a hypothesis. "
"Your task is to provide feedback on how well the result supports or refutes the hypothesis. "
"Please provide detailed and constructive feedback. "
"Example JSON Structure for Result Analysis: "
'{"Observations": "Your overall observations here", "Feedback for Hypothesis": "Observations related to the hypothesis", '
'"New Hypothesis": "Put your new hypothesis here.", "Reasoning": "Provide reasoning for the hypothesis here.", '
'"Attitude": "yes or no"}'
)

# Define the user prompt for hypothesis feedback
context = trace.scen.get_scenario_all_desc()
usr_prompt_hypothesis = f'''
We are in an experiment of finding hypothesis and validating or rejecting them so that in the end we have a powerful model generated.
Here are the context: {context}.
Last Round Result is: {trace.get_last_experiment_info}
Now let's come to this round. You will receive the result and you will evaluate if the performance increases or decreases,
"Given the following hypothesis and result, provide feedback on how well the result supports or refutes the hypothesis. "
"Hypothesis: {hypothesis.hypothesis}\n"
"Relevant Reasoning:" {hypothesis.reason}\n"
"Result: {exp.result}\n"
"Please provide detailed and constructive feedback."
'''

try:
# Call the APIBackend to generate the response for hypothesis feedback
response_hypothesis = APIBackend().build_messages_and_create_chat_completion(
user_prompt=usr_prompt_hypothesis,
system_prompt=sys_prompt_hypothesis,
json_mode=True,
)

# Log the raw response for debugging
print("Raw Response for Hypothesis Feedback:\n", response_hypothesis)

# Parse the JSON response to extract the feedback
response_json_hypothesis = json.loads(response_hypothesis)
hypothesis_feedback = HypothesisFeedback(
observations=response_json_hypothesis.get("Observations", "No observations provided"),
feedback_for_hypothesis=response_json_hypothesis.get("Feedback for Hypothesis", "No feedback provided"),
new_hypothesis=response_json_hypothesis.get("New Hypothesis", "No new hypothesis provided"),
reasoning=response_json_hypothesis.get("Reasoning", "No reasoning provided"),
attitude=response_json_hypothesis.get("Attitude", "no") # Default to "no" if not provided
)

print("Generated Hypothesis Feedback:\n", hypothesis_feedback)

return hypothesis_feedback

except json.JSONDecodeError as e:
print("Error parsing JSON response from LLM for hypothesis feedback:", e)
except Exception as e:
print("An unexpected error occurred while generating hypothesis feedback:", e)

return HypothesisFeedback(
observations="",
feedback_for_hypothesis="",
new_hypothesis="",
reasoning="",
attitude="no"
)

# Test the implementation

# Test the implementation

# Create a mock scenario
scenario = QlibModelScenario()

# Create a mock hypothesis
hypothesis = Hypothesis()
hypothesis.hypothesis = "Increasing the dataset size improves model performance."
hypothesis.reason = "Previous experiments have shown better accuracy with more data."

# Create a mock experiment
experiment = QlibModelExperiment(sub_tasks=["task1", "task2"], result="The model accuracy improved by 5%.")

# Create a mock trace and add a history entry
trace = Trace(scen=scenario)
trace.hist.append((hypothesis, experiment, HypothesisFeedback(
observations="Initial observation.",
feedback_for_hypothesis="The hypothesis seems valid.",
new_hypothesis="Try adding more diverse data.",
reasoning="More data has shown to improve performance.",
attitude="yes"
)))

# Create the QlibFactorExperiment2Feedback object
feedback_generator = QlibFactorExperiment2Feedback()

# Generate feedback for the mock experiment
feedback = feedback_generator.generateFeedback(experiment, hypothesis, trace)

# Print the generated feedback
print("Generated Feedback:", feedback)

0 comments on commit 6c278e9

Please sign in to comment.