Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add gemini support through requests #56

Merged
merged 5 commits into from
May 3, 2024
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions patchwork/patchflows/AutoFix/AutoFix.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from patchwork.logger import logger
from patchwork.step import Step
from patchwork.steps import (
CallOpenAI,
CallLLM,
CommitChanges,
CreatePR,
ExtractCode,
Expand Down Expand Up @@ -71,7 +71,7 @@ def run(self) -> dict:
for i in range(self.n):
outputs = PreparePrompt(self.inputs).run()
self.inputs.update(outputs)
outputs = CallOpenAI(self.inputs).run()
outputs = CallLLM(self.inputs).run()
self.inputs.update(outputs)
outputs = ExtractModelResponse(self.inputs).run()
self.inputs.update(outputs)
Expand Down
8 changes: 4 additions & 4 deletions patchwork/patchflows/DependencyUpgrade/DependencyUpgrade.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from patchwork.step import Step
from patchwork.steps import (
AnalyzeImpact,
CallOpenAI,
CallLLM,
CommitChanges,
CreatePR,
ExtractDiff,
Expand Down Expand Up @@ -76,7 +76,7 @@ def run(self) -> dict:
analyze_inputs["response_partitions"] = {"impacted_methods": ["A. Impacted methods:", ""]}
outputs = PreparePrompt(analyze_inputs).run()
analyze_inputs.update(outputs)
outputs = CallOpenAI(analyze_inputs).run()
outputs = CallLLM(analyze_inputs).run()
analyze_inputs.update(outputs)
outputs = ExtractModelResponse(analyze_inputs).run()
analyze_inputs.update(outputs)
Expand All @@ -88,7 +88,7 @@ def run(self) -> dict:
analyze_inputs["response_partitions"] = {"patch": []}
outputs = PreparePrompt(analyze_inputs).run()
analyze_inputs.update(outputs)
outputs = CallOpenAI(analyze_inputs).run()
outputs = CallLLM(analyze_inputs).run()
analyze_inputs.update(outputs)
outputs = ExtractModelResponse(analyze_inputs).run()
analyze_inputs.update(outputs)
Expand All @@ -99,7 +99,7 @@ def run(self) -> dict:
self.inputs["response_partitions"] = {"patch": []}
outputs = PreparePrompt(self.inputs).run()
self.inputs.update(outputs)
outputs = CallOpenAI(self.inputs).run()
outputs = CallLLM(self.inputs).run()
self.inputs.update(outputs)
outputs = ExtractModelResponse(self.inputs).run()
self.inputs.update(outputs)
Expand Down
4 changes: 2 additions & 2 deletions patchwork/patchflows/GenerateREADME/GenerateREADME.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from patchwork.step import Step
from patchwork.steps import (
CallCode2Prompt,
CallOpenAI,
CallLLM,
CommitChanges,
CreatePR,
ExtractModelResponse,
Expand Down Expand Up @@ -47,7 +47,7 @@ def run(self) -> dict:
self.inputs["response_partitions"] = {"patch": []}
outputs = PreparePrompt(self.inputs).run()
self.inputs.update(outputs)
outputs = CallOpenAI(self.inputs).run()
outputs = CallLLM(self.inputs).run()
self.inputs.update(outputs)
outputs = ExtractModelResponse(self.inputs).run()
self.inputs.update(outputs)
Expand Down
6 changes: 3 additions & 3 deletions patchwork/patchflows/PRReview/PRReview.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

from patchwork.step import Step
from patchwork.steps import (
CallOpenAI,
CallLLM,
CreatePRComment,
ExtractModelResponse,
PreparePR,
Expand Down Expand Up @@ -63,7 +63,7 @@ def run(self) -> dict:
}
outputs = PreparePrompt(self.inputs).run()
self.inputs.update(outputs)
outputs = CallOpenAI(self.inputs).run()
outputs = CallLLM(self.inputs).run()
self.inputs.update(outputs)
outputs = ExtractModelResponse(self.inputs).run()
self.inputs.update(outputs)
Expand All @@ -87,7 +87,7 @@ def run(self) -> dict:

outputs = PreparePrompt(self.inputs).run()
self.inputs.update(outputs)
outputs = CallOpenAI(self.inputs).run()
outputs = CallLLM(self.inputs).run()
self.inputs.update(outputs)
header = self.inputs["openai_responses"][0]

Expand Down
185 changes: 185 additions & 0 deletions patchwork/steps/CallLLM/CallLLM.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,185 @@
import json
import os
import tempfile
from pathlib import Path
from pprint import pformat
from textwrap import indent
from typing import Any, Protocol

import requests
from openai import OpenAI

from patchwork.logger import logger
from patchwork.step import Step

_TOKEN_URL = "https://app.patched.codes/signin"
_DEFAULT_PATCH_URL = "https://patchwork.patched.codes/v1"


class LLMModel(Protocol):
def call(self, prompts) -> list[str]:
pass


class CallGemini(LLMModel):
def __init__(
self, model: str, model_args: dict[str, Any], client_args: dict[str, Any], key: str, allow_truncated: bool
):
client_values = client_args.copy()

self.model = model
self.base_url = client_values.pop("base_url", "https://generativelanguage.googleapis.com/v1")
self.model_args = model_args
self.api_key = key
self.allow_truncated = allow_truncated

def call(self, prompts):
contents = []
for prompt in prompts:
try:
response = requests.post(
f"{self.base_url}/models/{self.model}:generateContent",
params=dict(key=self.api_key),
json=dict(generationConfig=self.model_args, contents=[dict(parts=[dict(text=prompt)])]),
)
response.raise_for_status()
response_dict = response.json()
except Exception as e:
logger.error(e)
continue

candidate = response_dict.get("candidates", [{}])[0]
text_response = candidate.get("content", {}).get("parts", [{}])[0].get("text", "")
if text_response == "":
logger.error(f"No response choice given")
content = ""
elif candidate.get("finishReason", "").upper() == "MAX_TOKENS":
if self.allow_truncated:
content = text_response
else:
logger.error(
f"Response truncated because of finish reason = length."
f" Use --allow_truncated option to process truncated responses."
)
content = ""
else:
content = text_response
logger.debug(f"Response received: \n{indent(content, ' ')}")

contents.append(content)

return contents


class CallOpenAI(LLMModel):
def __init__(
self, model: str, model_args: dict[str, Any], client_args: dict[str, Any], key: str, allow_truncated: bool
):
self.model = model
self.model_args = model_args
self.allow_truncated = allow_truncated
self.client = OpenAI(api_key=key, **client_args)

def call(self, prompts) -> list[str]:
contents = []
for prompt in prompts:
logger.debug(f"Message sent: \n{indent(pformat(prompt), ' ')}")
completion = self.client.chat.completions.create(model=self.model, messages=prompt, **self.model_args)

if len(completion.choices) < 1:
logger.error(f"No response choice given")
content = ""
elif completion.choices[0].finish_reason == "length":
if self.allow_truncated:
content = completion.choices[0].message.content
else:
logger.error(
f"Response truncated because of finish reason = length."
f" Use --allow_truncated option to process truncated responses."
)
content = ""
else:
content = completion.choices[0].message.content
logger.debug(f"Response received: \n{indent(content, ' ')}")

contents.append(content)

return contents


class CallLLM(Step):
required_keys = {"prompt_file"}

def __init__(self, inputs: dict):
logger.info(f"Run started {self.__class__.__name__}")

# Set 'openai_key' from inputs or environment if not already set
inputs.setdefault("openai_api_key", os.environ.get("OPENAI_API_KEY"))

if not all(key in inputs.keys() for key in self.required_keys):
raise ValueError(f'Missing required data: "{self.required_keys}"')

self.prompt_file = Path(inputs["prompt_file"])
if not self.prompt_file.is_file():
raise ValueError(f'Unable to find Prompt file: "{self.prompt_file}"')
try:
with open(self.prompt_file, "r") as fp:
json.load(fp)
except json.JSONDecodeError as e:
raise ValueError(f'Invalid Json Prompt file "{self.prompt_file}": {e}')

self.model_args = {key[len("model_") :]: value for key, value in inputs.items() if key.startswith("model_")}
self.client_args = {key[len("client_") :]: value for key, value in inputs.items() if key.startswith("client_")}

openai_key = inputs.get("openai_api_key") or os.environ.get("OPENAI_API_KEY")
if openai_key is not None:
self.openai_api_key = openai_key

patched_key = inputs.get("patched_api_key")
if patched_key is not None:
self.openai_api_key = patched_key
self.client_args["base_url"] = _DEFAULT_PATCH_URL

if self.openai_api_key is not None:
self.llm = CallOpenAI(
model=inputs["model"],
model_args=self.model_args,
client_args=self.client_args,
key=self.openai_api_key,
allow_truncated=inputs.get("allow_truncated", False),
)
return

google_key = inputs.get("google_api_key")
if google_key is not None:
self.llm = CallGemini(
model=inputs["model"],
model_args=self.model_args,
client_args=self.client_args,
key=google_key,
allow_truncated=inputs.get("allow_truncated", False),
)
return

raise ValueError(
f"Model API key not found.\n"
f'Please login at: "{_TOKEN_URL}",\n'
"Please go to the Integration's tab and generate an API key.\n"
"Please copy the access token that is generated, "
"and add `--patched_api_key=<token>` to the command line.\n"
"\n"
"If you are using a OpenAI API Key, please set `--openai_api_key=<token>`.\n"
)

def run(self) -> dict:
with open(self.prompt_file, "r") as fp:
prompts = json.load(fp)

contents = self.llm.call(prompts)

response_file = Path(tempfile.mktemp(".json"))
with open(response_file, "w") as outfile:
json.dump(contents, outfile, indent=2)

logger.info(f"Run completed {self.__class__.__name__}")
return dict(new_code=response_file, openai_responses=contents)
File renamed without changes.
File renamed without changes.
101 changes: 0 additions & 101 deletions patchwork/steps/CallOpenAI/CallOpenAI.py

This file was deleted.

Loading
Loading