diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 403acfea..8a1d78a7 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -90,5 +90,6 @@ jobs: - name: Propose relevant file to issues run: | poetry run patchwork ResolveIssue --log debug \ + --openai_api_key=${{ secrets.OPENAI_KEY }} \ --github_api_key=${{ secrets.SCM_GITHUB_KEY }} \ - --issue_url=https://github.com/patched-codes/patchwork/issues/20 \ No newline at end of file + --issue_url=https://github.com/patched-codes/patchwork/issues/20 diff --git a/patchwork/patchflows/ResolveIssue/ResolveIssue.py b/patchwork/patchflows/ResolveIssue/ResolveIssue.py index fa24d739..09fa999a 100644 --- a/patchwork/patchflows/ResolveIssue/ResolveIssue.py +++ b/patchwork/patchflows/ResolveIssue/ResolveIssue.py @@ -1,6 +1,8 @@ from pathlib import Path import yaml +import json +import tempfile from patchwork.step import Step from patchwork.steps import ( @@ -8,18 +10,32 @@ GenerateCodeRepositoryEmbeddings, QueryEmbeddings, ReadIssues, + PreparePrompt, + CallOpenAI, + ExtractModelResponse, + ModifyCode, + CommitChanges, + CreatePR, ) _DEFAULT_INPUT_FILE = Path(__file__).parent / "defaults.yml" - +_DEFAULT_PROMPT_JSON = Path(__file__).parent / "prompt.json" class ResolveIssue(Step): def __init__(self, inputs: dict): final_inputs = yaml.safe_load(_DEFAULT_INPUT_FILE.read_text()) + if final_inputs is None: final_inputs = {} final_inputs.update(inputs) - + + if "prompt_template_file" not in final_inputs.keys(): + final_inputs["prompt_template_file"] = _DEFAULT_PROMPT_JSON + + final_inputs["pr_title"] = f"PatchWork {self.__class__.__name__}" + final_inputs["branch_prefix"] = f"{self.__class__.__name__.lower()}-" + + self.fix_issue = bool(final_inputs.get("fix_issue", False)) self.inputs = final_inputs def run(self) -> dict: @@ -47,5 +63,48 @@ def run(self) -> dict: outputs = CreateIssueComment(self.inputs).run() self.inputs.update(outputs) + + if self.fix_issue: + extracted_code_contexts = [] + # Call LLM to make necessary updates to files to resolve the issue + for result in self.inputs["embedding_results"]: + with open(result["path"], "r") as file: + file_content = file.read() + lines = file_content.splitlines(keepends=True) + extracted_code_contexts.append( + { + "uri": result["path"], + "startLine": 0, + "endLine": len(lines), + "affectedCode": file_content, + "messageText": "\n".join(self.inputs["texts"]), + }) + + self.inputs["prompt_values"] = extracted_code_contexts + + # Save extracted data to JSON + output_file = Path(tempfile.mktemp(".json")) + with open(output_file, "w", encoding="utf-8") as f: + json.dump(extracted_code_contexts, f, indent=2) + + self.inputs["code_file"] = output_file + self.inputs["prompt_id"] = "resolve_issue" + self.inputs["response_partitions"] = {"patch": []} + outputs = PreparePrompt(self.inputs).run() + self.inputs.update(outputs) + outputs = CallOpenAI(self.inputs).run() + self.inputs.update(outputs) + outputs = ExtractModelResponse(self.inputs).run() + self.inputs.update(outputs) + + # Modify code files with the suggested changes + outputs = ModifyCode(self.inputs).run() + self.inputs.update(outputs) + + # Commit changes and create PR + outputs = CommitChanges(self.inputs).run() + self.inputs.update(outputs) + outputs = CreatePR(self.inputs).run() + self.inputs.update(outputs) return self.inputs diff --git a/patchwork/patchflows/ResolveIssue/defaults.yml b/patchwork/patchflows/ResolveIssue/defaults.yml index e4d5bee2..4e359c7c 100644 --- a/patchwork/patchflows/ResolveIssue/defaults.yml +++ b/patchwork/patchflows/ResolveIssue/defaults.yml @@ -3,6 +3,7 @@ # gitlab_api_key: required-for-gitlab-scm # issue_url: required +fix_issue: false # GenerateEmbeddings Inputs # For OpenAI API use the following the select the model # openai_embedding_model: text-embedding-3-small @@ -10,3 +11,13 @@ # huggingface_embedding_model: codellama/CodeLlama-70b-Instruct-hf # For either API, use the following to provide the API key # openai_api_key: required-for-openai +# google_api_key: required-for-google +# client_base_url: https://api.openai.com/v1 +# model: gpt-3.5-turbo + +# CommitChanges Inputs +disable_branch: false + +# CreatePR Inputs +disable_pr: false +force_pr_creation: true \ No newline at end of file diff --git a/patchwork/patchflows/ResolveIssue/prompt.json b/patchwork/patchflows/ResolveIssue/prompt.json new file mode 100644 index 00000000..621af553 --- /dev/null +++ b/patchwork/patchflows/ResolveIssue/prompt.json @@ -0,0 +1,12 @@ +[ + { + "id": "resolve_issue", + "prompts": [ + { + "role": "system", + "content": "You are a senior software engineer who is best in the world at resolving bugs. Users will give you a code snippet and you will generate a fix based on the provided bug message. Minimize the amount of changes needed for the fix. If no changes are necessary return the original code as is. Only respond with the new code, do not add any comments or change the indentation. Make sure you respond with the full code and not only the parts that are changed.\n\nResolve the bug described below by making necessary updates to the code.\n\n{{messageText}}." + }, + {"role": "user", "content": "{{affectedCode}}"} + ] + } +] \ No newline at end of file