-
-
Notifications
You must be signed in to change notification settings - Fork 119
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
44e5610
commit 25e89bf
Showing
18 changed files
with
1,132 additions
and
31 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,78 @@ | ||
import json | ||
from rdagent.oai.llm_utils import APIBackend | ||
|
||
def generate_feedback(result, code, hypothesis): | ||
# Define the system prompt | ||
sys_prompt = ( | ||
"You are a professional code review assistant. You will receive some code, a result, and a hypothesis. " | ||
"Your task is to provide feedback on how well the code and result support or refute the hypothesis. " | ||
"Please provide detailed and constructive feedback." | ||
) | ||
|
||
# Define the user prompt | ||
usr_prompt = (f''' | ||
"Given the following hypothesis, result, and code, provide feedback on how well the code and result support or refute the hypothesis. " | ||
"Hypothesis: {hypothesis}\n" | ||
"Result: {result}\n" | ||
"Code:\n```python\n{code}\n```\n" | ||
"Please provide detailed and constructive feedback." | ||
''') | ||
|
||
try: | ||
# Call the APIBackend to generate the response | ||
response = APIBackend().build_messages_and_create_chat_completion( | ||
user_prompt=usr_prompt, | ||
system_prompt=sys_prompt, | ||
json_mode=True, | ||
) | ||
|
||
# Log the raw response for debugging | ||
print("Raw Response:\n", response) | ||
|
||
# Parse the JSON response to extract the feedback | ||
response_json = json.loads(response) | ||
feedback = response_json.get("feedback", "No feedback provided") | ||
|
||
print("Generated Feedback:\n", feedback) | ||
|
||
return feedback | ||
|
||
except json.JSONDecodeError as e: | ||
print("Error parsing JSON response from LLM:", e) | ||
except Exception as e: | ||
print("An unexpected error occurred:", e) | ||
|
||
def test_generate_feedback(): | ||
result = "The model achieved an accuracy of 85% on the validation set." | ||
code = ''' | ||
import torch | ||
import torch.nn as nn | ||
class Net(nn.Module): | ||
def __init__(self, input_dim, output_dim=1, act="LeakyReLU"): | ||
super(Net, self).__init__() | ||
self.drop_input = nn.Dropout(0.05) | ||
self.fc = nn.Linear(input_dim, output_dim) | ||
if act == "LeakyReLU": | ||
self.activation = nn.LeakyReLU(negative_slope=0.1, inplace=False) | ||
elif act == "SiLU": | ||
self.activation = nn.SiLU() | ||
else: | ||
raise NotImplementedError(f"Activation function {act} is not supported") | ||
self.bn = nn.BatchNorm1d(output_dim) | ||
def forward(self, x): | ||
x = self.drop_input(x) | ||
x = self.fc(x) | ||
x = self.bn(x) | ||
x = self.activation(x) | ||
return x | ||
''' | ||
hypothesis = "The data shows time-series quality." | ||
|
||
feedback = generate_feedback(result, code, hypothesis) | ||
print("Final Feedback:\n", feedback) | ||
|
||
if __name__ == "__main__": | ||
test_generate_feedback() | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,97 @@ | ||
from abc import abstractmethod | ||
from pathlib import Path | ||
from typing import Tuple | ||
|
||
from jinja2 import Environment, StrictUndefined | ||
|
||
from rdagent.components.coder.model_coder.model import ModelExperiment | ||
from rdagent.core.prompts import Prompts | ||
from rdagent.core.proposal import ( | ||
Hypothesis, | ||
Hypothesis2Experiment, | ||
HypothesisGen, | ||
HypothesisSet, | ||
Scenario, | ||
Trace, | ||
) | ||
from rdagent.oai.llm_utils import APIBackend | ||
|
||
prompt_dict = Prompts(file_path=Path(__file__).parent / "prompts.yaml") | ||
|
||
ModelHypothesis = Hypothesis | ||
|
||
class ModelHypothesisGen(HypothesisGen): | ||
def __init__(self, scen: Scenario): | ||
super().__init__(scen) | ||
|
||
# The following methods are scenario related so they should be implemented in the subclass | ||
@abstractmethod | ||
def prepare_context(self, trace: Trace) -> Tuple[dict, bool]: | ||
... | ||
|
||
@abstractmethod | ||
def convert_response(self, response: str) -> ModelHypothesis: | ||
... | ||
|
||
def gen(self, trace: Trace) -> ModelHypothesis: | ||
context_dict, json_flag = self.prepare_context(trace) | ||
|
||
system_prompt = ( | ||
Environment(undefined=StrictUndefined) | ||
.from_string(prompt_dict["model_hypothesis_gen"]["system_prompt"]) | ||
.render( | ||
scenario=self.scen.get_scenario_all_desc(), | ||
hypothesis_output_format=context_dict["hypothesis_output_format"], | ||
) | ||
) | ||
user_prompt = ( | ||
Environment(undefined=StrictUndefined) | ||
.from_string(prompt_dict["model_hypothesis_gen"]["user_prompt"]) | ||
.render( | ||
hypothesis_and_feedback=context_dict["hypothesis_and_feedback"], | ||
RAG=context_dict["RAG"], | ||
) | ||
) | ||
|
||
resp = APIBackend().build_messages_and_create_chat_completion(user_prompt, system_prompt, json_mode=json_flag) | ||
|
||
hypothesis = self.convert_response(resp) | ||
|
||
return hypothesis | ||
|
||
|
||
class ModelHypothesis2Experiment(Hypothesis2Experiment[ModelExperiment]): | ||
def __init__(self) -> None: | ||
super().__init__() | ||
|
||
@abstractmethod | ||
def prepare_context(self, hs: HypothesisSet) -> Tuple[dict, bool]: | ||
... | ||
|
||
@abstractmethod | ||
def convert_response(self, response: str) -> ModelExperiment: | ||
... | ||
|
||
def convert(self, hs: HypothesisSet) -> ModelExperiment: | ||
context, json_flag = self.prepare_context(hs) | ||
system_prompt = ( | ||
Environment(undefined=StrictUndefined) | ||
.from_string(prompt_dict["model_hypothesis2experiment"]["system_prompt"]) | ||
.render( | ||
scenario=hs.trace.scen.get_scenario_all_desc(), | ||
experiment_output_format=context["experiment_output_format"], | ||
) | ||
) | ||
user_prompt = ( | ||
Environment(undefined=StrictUndefined) | ||
.from_string(prompt_dict["model_hypothesis2experiment"]["user_prompt"]) | ||
.render( | ||
hypothesis_and_feedback=context["hypothesis_and_feedback"], | ||
model_list=context["model_list"], | ||
RAG=context["RAG"], | ||
) | ||
) | ||
|
||
resp = APIBackend().build_messages_and_create_chat_completion(user_prompt, system_prompt, json_mode=json_flag) | ||
|
||
return self.convert_response(resp) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,78 @@ | ||
import json | ||
from pathlib import Path | ||
from typing import List, Tuple | ||
|
||
from jinja2 import Environment, StrictUndefined | ||
|
||
from rdagent.components.coder.factor_coder.factor import FactorExperiment, FactorTask | ||
from rdagent.components.coder.factor_coder.utils import get_data_folder_intro | ||
from rdagent.components.proposal.factor_proposal import ( | ||
FactorHypothesis, | ||
FactorHypothesis2Experiment, | ||
FactorHypothesisGen, | ||
) | ||
from rdagent.core.prompts import Prompts | ||
from rdagent.core.proposal import HypothesisSet, Scenario, Trace | ||
|
||
prompt_dict = Prompts(file_path=Path(__file__).parent / "prompts.yaml") | ||
|
||
QlibFactorHypothesis = FactorHypothesis | ||
|
||
|
||
class QlibFactorHypothesisGen(FactorHypothesisGen): | ||
def __init__(self, scen: Scenario) -> Tuple[dict, bool]: | ||
super().__init__(scen) | ||
|
||
def prepare_context(self, trace: Trace) -> None: | ||
hypothesis_feedback = ( | ||
Environment(undefined=StrictUndefined) | ||
.from_string(prompt_dict["hypothesis_and_feedback"]) | ||
.render(trace=trace) | ||
) | ||
context_dict = { | ||
"hypothesis_and_feedback": hypothesis_feedback, | ||
"RAG": ..., | ||
"hypothesis_output_format": prompt_dict["hypothesis_output_format"], | ||
} | ||
return context_dict, True | ||
|
||
def convert_response(self, response: str) -> FactorHypothesis: | ||
response_dict = json.loads(response) | ||
hypothesis = QlibFactorHypothesis(hypothesis=response_dict["hypothesis"], reason=response_dict["reason"]) | ||
return hypothesis | ||
|
||
|
||
class QlibFactorHypothesis2Experiment(FactorHypothesis2Experiment): | ||
def prepare_context(self, hs: HypothesisSet) -> Tuple[dict | bool]: | ||
scenario = hs.trace.scen.get_scenario_all_desc() | ||
experiment_output_format = prompt_dict["experiment_output_format"] | ||
|
||
hypothesis_and_feedback = ( | ||
Environment(undefined=StrictUndefined) | ||
.from_string(prompt_dict["hypothesis_and_feedback"]) | ||
.render(trace=hs.trace) | ||
) | ||
|
||
experiment_list: List[FactorExperiment] = [t[1] for t in hs.trace.hist] | ||
|
||
factor_list = [] | ||
for experiment in experiment_list: | ||
factor_list.extend(experiment.sub_tasks) | ||
|
||
return { | ||
"scenario": scenario, | ||
"hypothesis_and_feedback": hypothesis_and_feedback, | ||
"experiment_output_format": experiment_output_format, | ||
"factor_list": factor_list, | ||
"RAG": ..., | ||
}, True | ||
|
||
def convert_response(self, response: str) -> FactorExperiment: | ||
response_dict = json.loads(response) | ||
tasks = [] | ||
for factor_name in response_dict: | ||
description = response_dict[factor_name]["description"] | ||
formulation = response_dict[factor_name]["formulation"] | ||
variables = response_dict[factor_name]["variables"] | ||
tasks.append(FactorTask(factor_name, description, formulation, variables)) | ||
return FactorExperiment(tasks) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1 +1,38 @@ | ||
# TODO define QlibModelExperiment here which should be subclass of Experiment | ||
from pathlib import Path | ||
from rdagent.components.coder.model_coder.model import ModelExperiment | ||
from rdagent.core.prompts import Prompts | ||
from rdagent.core.scenario import Scenario | ||
|
||
prompt_dict = Prompts(file_path=Path(__file__).parent / "prompts.yaml") | ||
|
||
QlibModelExperiment = ModelExperiment | ||
|
||
class QlibModelScenario(Scenario): | ||
@property | ||
def background(self) -> str: | ||
return prompt_dict["qlib_model_background"] | ||
|
||
@property | ||
def output_format(self) -> str: | ||
return prompt_dict["qlib_model_output_format"] | ||
|
||
@property | ||
def interface(self) -> str: | ||
return prompt_dict["qlib_model_interface"] | ||
|
||
@property | ||
def simulator(self) -> str: | ||
return prompt_dict["qlib_model_simulator"] | ||
|
||
def get_scenario_all_desc(self) -> str: | ||
return f"""Background of the scenario: | ||
{self.background} | ||
The source data you can use: | ||
{self.source_data} | ||
The interface you should follow to write the runnable code: | ||
{self.interface} | ||
The output of your code should be in the format: | ||
{self.output_format} | ||
The simulator user can use to test your model: | ||
{self.simulator} | ||
""" |
Oops, something went wrong.