Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/main' into o1-support-with-struc…
Browse files Browse the repository at this point in the history
…tured-output
  • Loading branch information
CTY-git committed Jan 23, 2025
2 parents 5d684db + 031849e commit 4389017
Show file tree
Hide file tree
Showing 82 changed files with 4,861 additions and 2,047 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ jobs:
path: dist/

- name: Sign the dists with Sigstore
uses: sigstore/gh-action-sigstore-python@v2.1.1
uses: sigstore/gh-action-sigstore-python@v3.0.0
with:
inputs: >-
./dist/*.tar.gz
Expand Down
67 changes: 55 additions & 12 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,10 @@
name: Tests

on:
pull_request:
types:
- ready_for_review
- review_requested
push:
branches-ignore:
- main
Expand All @@ -9,8 +13,19 @@ on:
- generatereadme-*
- generatedocstring-*
- generateunittests-*
- generatecodeusageexample-*
- resolveissue-*

- demo*

# Credits to https://blog.maximeheckel.com/posts/building-perfect-github-action-frontend-teams/#you-are-terminated
concurrency:
# Here the group is defined by the head_ref of the PR
group: ${{ github.head_ref || github.ref_name }}
# Here we specify that we'll cancel any "in progress" workflow of the same group. Thus if we push, ammend a commit and push
# again the previous workflow will be cancelled, thus saving us github action build minutes and avoid any conflicts
cancel-in-progress: true

jobs:
all-test:
runs-on: ubuntu-latest
Expand Down Expand Up @@ -70,7 +85,7 @@ jobs:
id: setup-python
uses: actions/setup-python@v5
with:
python-version: '3.8'
python-version: '3.9'

- name: Install Poetry
uses: snok/install-poetry@v1
Expand All @@ -92,15 +107,17 @@ jobs:

- name: AutoFix Patchwork
run: |
poetry run patchwork AutoFix --log debug \
source .venv/bin/activate
patchwork AutoFix --log debug \
--patched_api_key=${{ secrets.PATCHED_API_KEY }} \
--github_api_key=${{ secrets.SCM_GITHUB_KEY }} \
--force_pr_creation \
--disable_telemetry
- name: Dependency Upgrade
run: |
poetry run patchwork DependencyUpgrade --log debug \
source .venv/bin/activate
patchwork DependencyUpgrade --log debug \
--libraries_api_key=${{ secrets.LIBRARIES_KEY }} \
--patched_api_key=${{ secrets.PATCHED_API_KEY }} \
--github_api_key=${{ secrets.SCM_GITHUB_KEY }} \
Expand All @@ -109,6 +126,8 @@ jobs:
--disable_telemetry
rag-test:
# disabled because this currently takes too long
if: false
runs-on: ubuntu-latest
steps:
- name: Checkout code
Expand All @@ -123,7 +142,7 @@ jobs:
id: setup-python
uses: actions/setup-python@v5
with:
python-version: '3.8'
python-version: '3.9'

- name: Install Poetry
uses: snok/install-poetry@v1
Expand All @@ -143,13 +162,15 @@ jobs:
if: steps.cached-poetry-dependencies.outputs.cache-hit != 'true'
run: poetry install --no-interaction --only main --extras rag

- name: Propose relevant file to issues
- name: Resolve issue
run: |
poetry run patchwork ResolveIssue --log debug \
source .venv/bin/activate
patchwork ResolveIssue --log debug \
--patched_api_key=${{ secrets.PATCHED_API_KEY }} \
--github_api_key=${{ secrets.SCM_GITHUB_KEY }} \
--issue_url=https://github.com/patched-codes/patchwork/issues/20 \
--issue_url=https://github.com/patched-codes/patchwork/issues/1129 \
--disable_telemetry
--max_llm_calls=10
main-test:
runs-on: ubuntu-latest
Expand All @@ -166,7 +187,7 @@ jobs:
id: setup-python
uses: actions/setup-python@v5
with:
python-version: '3.8'
python-version: '3.9'

- name: Install Poetry
uses: snok/install-poetry@v1
Expand All @@ -188,30 +209,52 @@ jobs:

- name: PR Review
run: |
poetry run patchwork PRReview --log debug \
source .venv/bin/activate
patchwork PRReview --log debug \
--patched_api_key=${{ secrets.PATCHED_API_KEY }} \
--github_api_key=${{ secrets.SCM_GITHUB_KEY }} \
--pr_url=https://github.com/patched-codes/patchwork/pull/${{ steps.findPr.outputs.number }} \
--disable_telemetry
- name: Generate Docstring
run: |
poetry run patchwork GenerateDocstring --log debug \
source .venv/bin/activate
patchwork GenerateDocstring --log debug \
--patched_api_key=${{ secrets.PATCHED_API_KEY }} \
--github_api_key=${{ secrets.SCM_GITHUB_KEY }} \
--base_path=tests/cicd/generate_docstring \
--disable_telemetry
- name : Generate Diagram
run: |
source .venv/bin/activate
patchwork GenerateDiagram --log debug \
--patched_api_key=${{ secrets.PATCHED_API_KEY }} \
--github_api_key=${{ secrets.SCM_GITHUB_KEY }} \
--folder_path=patchwork/steps \
--disable_telemetry
- name: Generate UnitTests
run: |
poetry run patchwork GenerateUnitTests --log debug \
source .venv/bin/activate
patchwork GenerateUnitTests --log debug \
--patched_api_key=${{ secrets.PATCHED_API_KEY }} \
--github_api_key=${{ secrets.SCM_GITHUB_KEY }} \
--folder_path=tests/cicd/generate_docstring \
--disable_telemetry
- name: Generate Code Usage Example
run: |
source .venv/bin/activate
patchwork GenerateCodeUsageExample --log debug \
--patched_api_key=${{ secrets.PATCHED_API_KEY }} \
--github_api_key=${{ secrets.SCM_GITHUB_KEY }} \
--folder_path=tests/cicd/generate_docstring \
--disable_telemetry
- name: Generate README
run: |
source .venv/bin/activate
# Specify the parent folder you want to check
PARENT_FOLDER="./patchwork/steps"
# Command to run if README.md is not found
Expand All @@ -224,7 +267,7 @@ jobs:
# Convert to a Git-friendly branch name: replace spaces with underscores, remove slashes, etc.
branch_name=$(echo "$base_name" | sed -e 's/[^a-zA-Z0-9]/_/g' -e 's/__*/_/g' -e 's/^_//g' -e 's/_$//g')
poetry run patchwork GenerateREADME --log debug \
patchwork GenerateREADME --log debug \
--patched_api_key=${{ secrets.PATCHED_API_KEY }} \
--github_api_key=${{ secrets.SCM_GITHUB_KEY }} \
--filter=*.py \
Expand Down
5 changes: 3 additions & 2 deletions patchwork/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -238,9 +238,10 @@ def cli(
if not disable_telemetry:
patched.send_public_telemetry(patchflow_name, inputs)

with patched.patched_telemetry(patchflow_name, {}):
with patched.patched_telemetry(patchflow_name, {}) as output_dict:
patchflow_instance = patchflow_class(inputs)
patchflow_instance.run()
patchflow_output = patchflow_instance.run()
output_dict.update(patchflow_output)
except Exception as e:
logger.debug(traceback.format_exc())
logger.error(f"Error running patchflow {patchflow}: {e}")
Expand Down
79 changes: 77 additions & 2 deletions patchwork/common/client/llm/aio.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,21 @@
from __future__ import annotations

import os

from openai.types.chat import (
ChatCompletion,
ChatCompletionMessageParam,
ChatCompletionToolChoiceOptionParam,
ChatCompletionToolParam,
completion_create_params,
)
from typing_extensions import Dict, Iterable, List, Optional, Union

from patchwork.common.client.llm.anthropic import AnthropicLlmClient
from patchwork.common.client.llm.google import GoogleLlmClient
from patchwork.common.client.llm.openai_ import OpenAiLlmClient
from patchwork.common.client.llm.protocol import NOT_GIVEN, LlmClient, NotGiven
from patchwork.common.constants import DEFAULT_PATCH_URL
from patchwork.logger import logger


Expand All @@ -29,10 +37,43 @@ def get_models(self) -> set[str]:
def is_model_supported(self, model: str) -> bool:
return any(client.is_model_supported(model) for client in self.__clients)

def is_prompt_supported(self, messages: Iterable[ChatCompletionMessageParam], model: str) -> int:
def is_prompt_supported(
self,
messages: Iterable[ChatCompletionMessageParam],
model: str,
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
response_format: dict | completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
) -> int:
for client in self.__clients:
if client.is_model_supported(model):
return client.is_prompt_supported(messages, model)
return client.is_prompt_supported(
messages=messages,
model=model,
frequency_penalty=frequency_penalty,
logit_bias=logit_bias,
logprobs=logprobs,
max_tokens=max_tokens,
n=n,
presence_penalty=presence_penalty,
response_format=response_format,
stop=stop,
temperature=temperature,
tools=tools,
tool_choice=tool_choice,
top_logprobs=top_logprobs,
top_p=top_p,
)
return -1

def truncate_messages(
Expand All @@ -56,6 +97,8 @@ def chat_completion(
response_format: dict | completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
) -> ChatCompletion:
Expand All @@ -74,6 +117,8 @@ def chat_completion(
response_format,
stop,
temperature,
tools,
tool_choice,
top_logprobs,
top_p,
)
Expand All @@ -82,3 +127,33 @@ def chat_completion(
f"Model {model} is not supported by {client_names} clients. "
f"Please ensure that the respective API keys are correct."
)

@staticmethod
def create_aio_client(inputs) -> "AioLlmClient" | None:
clients = []

patched_key = inputs.get("patched_api_key")
if patched_key is not None:
client = OpenAiLlmClient(patched_key, DEFAULT_PATCH_URL)
clients.append(client)

openai_key = inputs.get("openai_api_key") or os.environ.get("OPENAI_API_KEY")
if openai_key is not None:
client_args = {key[len("client_") :]: value for key, value in inputs.items() if key.startswith("client_")}
client = OpenAiLlmClient(openai_key, **client_args)
clients.append(client)

google_key = inputs.get("google_api_key")
if google_key is not None:
client = GoogleLlmClient(google_key)
clients.append(client)

anthropic_key = inputs.get("anthropic_api_key")
if anthropic_key is not None:
client = AnthropicLlmClient(anthropic_key)
clients.append(client)

if len(clients) == 0:
return None

return AioLlmClient(*clients)
Loading

0 comments on commit 4389017

Please sign in to comment.