From a723720c5a8d41a5d5c194f02253feafd790307d Mon Sep 17 00:00:00 2001 From: George Adams Date: Thu, 25 May 2023 19:19:40 -0400 Subject: [PATCH 1/2] Improving debugger.py Use typing to make the function signatures clearer. Use constants for hard-coded values. Create a helper function for checking file extensions. Improve the error handling in the walk_directory function. Use f-strings for better readability and performance. Add function and module-level docstrings for better understanding of the code. --- debugger.py | 85 ++++++++++++++++++++++++++++++----------------------- 1 file changed, 49 insertions(+), 36 deletions(-) diff --git a/debugger.py b/debugger.py index b187b3501..83b1bfc1f 100644 --- a/debugger.py +++ b/debugger.py @@ -1,49 +1,60 @@ -import modal import os - -stub = modal.Stub("smol-debugger-v1") -generatedDir = "generated" -openai_image = modal.Image.debian_slim().pip_install("openai") - - - -def read_file(filename): +import modal +from typing import Dict + +# Constants +STUB = modal.Stub("smol-debugger-v1") +GENERATED_DIR = "generated" +IMAGE_EXTENSIONS = ['.png', '.jpg', '.jpeg', '.gif', '.bmp', '.svg', '.ico', '.tif', '.tiff'] +OPENAI_IMAGE = modal.Image.debian_slim().pip_install("openai") + +def read_file(filename: str) -> str: + """ + Reads a file and returns its content. + """ with open(filename, 'r') as file: return file.read() -def walk_directory(directory): - image_extensions = ['.png', '.jpg', '.jpeg', '.gif', '.bmp', '.svg', '.ico', '.tif', '.tiff'] +def is_image(file: str) -> bool: + """ + Checks if a file is an image by its extension. + """ + return any(file.endswith(ext) for ext in IMAGE_EXTENSIONS) + +def walk_directory(directory: str) -> Dict[str, str]: + """ + Walks through a directory and returns a dictionary with the relative file path + and its content. Only non-image files are included. + """ code_contents = {} for root, dirs, files in os.walk(directory): for file in files: - if not any(file.endswith(ext) for ext in image_extensions): + if not is_image(file): + relative_filepath = os.path.relpath(os.path.join(root, file), directory) try: - relative_filepath = os.path.relpath(os.path.join(root, file), directory) code_contents[relative_filepath] = read_file(os.path.join(root, file)) except Exception as e: - code_contents[relative_filepath] = f"Error reading file {file}: {str(e)}" + code_contents[relative_filepath] = f"Error reading file {file}: {e}" return code_contents +@STUB.local_entrypoint() +def main(prompt: str, directory=GENERATED_DIR, model="gpt-3.5-turbo"): + """ + Main function to debug a program for a user based on their file system. + """ + code_contents = walk_directory(directory) + context = "\n".join(f"{path}:\n{contents}" for path, contents in code_contents.items()) + system_prompt = "You are an AI debugger who is trying to debug a program for a user based on their file system. The user has provided you with the following files and their contents, finally followed by the error message or issue they are facing." + user_prompt = f"My files are as follows: {context}\n\nMy issue is as follows: {prompt}\n\nGive me ideas for what could be wrong and what fixes to do in which files." -@stub.local_entrypoint() -def main(prompt, directory=generatedDir, model="gpt-3.5-turbo"): - code_contents = walk_directory(directory) - - # Now, `code_contents` is a dictionary that contains the content of all your non-image files - # You can send this to OpenAI's text-davinci-003 for help + res = generate_response.call(system_prompt, user_prompt, model) - context = "\n".join(f"{path}:\n{contents}" for path, contents in code_contents.items()) - system = "You are an AI debugger who is trying to debug a program for a user based on their file system. The user has provided you with the following files and their contents, finally folllowed by the error message or issue they are facing." - prompt = "My files are as follows: " + context + "\n\n" + "My issue is as follows: " + prompt - prompt += "\n\nGive me ideas for what could be wrong and what fixes to do in which files." - res = generate_response.call(system, prompt, model) - # print res in teal - print("\033[96m" + res + "\033[0m") + # Print response in teal + print(f"\033[96m{res}\033[0m") - -@stub.function( - image=openai_image, +@STUB.function( + image=OPENAI_IMAGE, secret=modal.Secret.from_dotenv(), retries=modal.Retries( max_retries=3, @@ -53,16 +64,19 @@ def main(prompt, directory=generatedDir, model="gpt-3.5-turbo"): concurrency_limit=5, timeout=120, ) -def generate_response(system_prompt, user_prompt, model="gpt-3.5-turbo", *args): +def generate_response(system_prompt: str, user_prompt: str, model="gpt-3.5-turbo", *args) -> str: + """ + Generates a response from OpenAI's API based on the system and user prompts. + """ import openai # Set up your OpenAI API credentials - openai.api_key = os.environ["OPENAI_API_KEY"] + openai.api_key = os.getenv("OPENAI_API_KEY") - messages = [] + messages = [{"# Continue from previous message messages.append({"role": "system", "content": system_prompt}) messages.append({"role": "user", "content": user_prompt}) - # loop thru each arg and add it to messages alternating role between "assistant" and "user" + role = "assistant" for value in args: messages.append({"role": role, "content": value}) @@ -70,7 +84,6 @@ def generate_response(system_prompt, user_prompt, model="gpt-3.5-turbo", *args): params = { 'model': model, - # "model": "gpt-4", "messages": messages, "max_tokens": 1500, "temperature": 0, @@ -81,4 +94,4 @@ def generate_response(system_prompt, user_prompt, model="gpt-3.5-turbo", *args): # Get the reply from the API response reply = response.choices[0]["message"]["content"] - return reply \ No newline at end of file + return reply From bf511ca8041a8d04456919faa5f3814f1f219895 Mon Sep 17 00:00:00 2001 From: "swyx.io" Date: Mon, 29 May 2023 19:50:34 -0400 Subject: [PATCH 2/2] Update debugger.py --- debugger.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/debugger.py b/debugger.py index bf7174123..41652b785 100644 --- a/debugger.py +++ b/debugger.py @@ -3,7 +3,7 @@ from typing import Dict # Constants -STUB = modal.Stub("smol-debugger-v1") +MODAL = modal.Stub("smol-debugger-v1") GENERATED_DIR = "generated" BLOB_EXTENSIONS = ['.png', '.jpg', '.jpeg', '.gif', '.bmp', '.svg', '.ico', '.tif', '.tiff'] OPENAI_IMAGE = modal.Image.debian_slim().pip_install("openai") @@ -31,7 +31,7 @@ def walk_directory(directory: str) -> Dict[str, str]: code_contents[relative_filepath] = f"Error reading file {filename}: {e}" return code_contents -@STUB.local_entrypoint() +@MODAL.local_entrypoint() def main(prompt: str, directory=GENERATED_DIR, model="gpt-3.5-turbo"): """ Main function to debug a program for a user based on their file system. @@ -47,7 +47,7 @@ def main(prompt: str, directory=GENERATED_DIR, model="gpt-3.5-turbo"): # Print response in teal print(f"\033[96m{res}\033[0m") -@STUB.function( +@MODAL.function( image=OPENAI_IMAGE, secret=modal.Secret.from_dotenv(), retries=modal.Retries(