Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Improving debugger.py #41

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
77 changes: 42 additions & 35 deletions debugger.py
Original file line number Diff line number Diff line change
@@ -1,49 +1,54 @@
import modal
import os

stub = modal.Stub("smol-debugger-v1")
generatedDir = "generated"
openai_image = modal.Image.debian_slim().pip_install("openai")



def read_file(filename):
import modal
from typing import Dict

# Constants
MODAL = modal.Stub("smol-debugger-v1")
GENERATED_DIR = "generated"
BLOB_EXTENSIONS = ['.png', '.jpg', '.jpeg', '.gif', '.bmp', '.svg', '.ico', '.tif', '.tiff']
OPENAI_IMAGE = modal.Image.debian_slim().pip_install("openai")

def read_file(filename: str) -> str:
"""
Reads a file and returns its content.
"""
with open(filename, 'r') as file:
return file.read()

def walk_directory(directory):
image_extensions = ['.png', '.jpg', '.jpeg', '.gif', '.bmp', '.svg', '.ico', '.tif', '.tiff']
def walk_directory(directory: str) -> Dict[str, str]:
"""
Walks through a directory and returns a dictionary with the relative file path
and its content. Only non-blob files are included.
"""
code_contents = {}
for dirpath, _, filenames in os.walk(directory):
for filename in filenames:
if not any(filename.endswith(ext) for ext in image_extensions):
if not any(filename.endswith(ext) for ext in BLOB_EXTENSIONS):
relative_filepath = os.path.relpath(os.path.join(dirpath, filename), directory)
try:
relative_filepath = os.path.relpath(os.path.join(dirpath, filename), directory)
code_contents[relative_filepath] = read_file(os.path.join(dirpath, filename))
except Exception as e:
code_contents[relative_filepath] = f"Error reading file {filename}: {str(e)}"
code_contents[relative_filepath] = f"Error reading file {filename}: {e}"
return code_contents

@MODAL.local_entrypoint()
def main(prompt: str, directory=GENERATED_DIR, model="gpt-3.5-turbo"):
"""
Main function to debug a program for a user based on their file system.
"""
code_contents = walk_directory(directory)

context = "\n".join(f"{path}:\n{contents}" for path, contents in code_contents.items())
system_prompt = "You are an AI debugger who is trying to debug a program for a user based on their file system. The user has provided you with the following files and their contents, finally followed by the error message or issue they are facing."
user_prompt = f"My files are as follows: {context}\n\nMy issue is as follows: {prompt}\n\nGive me ideas for what could be wrong and what fixes to do in which files."

@stub.local_entrypoint()
def main(prompt, directory=generatedDir, model="gpt-3.5-turbo"):
code_contents = walk_directory(directory)

# Now, `code_contents` is a dictionary that contains the content of all your non-image files
# You can send this to OpenAI's text-davinci-003 for help
res = generate_response.call(system_prompt, user_prompt, model)

context = "\n".join(f"{path}:\n{contents}" for path, contents in code_contents.items())
system = "You are an AI debugger who is trying to debug a program for a user based on their file system. The user has provided you with the following files and their contents, finally folllowed by the error message or issue they are facing."
prompt = "My files are as follows: " + context + "\n\n" + "My issue is as follows: " + prompt
prompt += "\n\nGive me ideas for what could be wrong and what fixes to do in which files."
res = generate_response.call(system, prompt, model)
# print res in teal
print("\033[96m" + res + "\033[0m")
# Print response in teal
print(f"\033[96m{res}\033[0m")


@stub.function(
image=openai_image,
@MODAL.function(
image=OPENAI_IMAGE,
secret=modal.Secret.from_dotenv(),
retries=modal.Retries(
max_retries=3,
Expand All @@ -53,24 +58,26 @@ def main(prompt, directory=generatedDir, model="gpt-3.5-turbo"):
concurrency_limit=5,
timeout=120,
)
def generate_response(system_prompt, user_prompt, model="gpt-3.5-turbo", *args):
def generate_response(system_prompt: str, user_prompt: str, model="gpt-3.5-turbo", *args) -> str:
"""
Generates a response from OpenAI's API based on the system and user prompts.
"""
import openai

# Set up your OpenAI API credentials
openai.api_key = os.environ["OPENAI_API_KEY"]
openai.api_key = os.getenv("OPENAI_API_KEY")

messages = []
messages.append({"role": "system", "content": system_prompt})
messages.append({"role": "user", "content": user_prompt})
# loop thru each arg and add it to messages alternating role between "assistant" and "user"

role = "assistant"
for value in args:
messages.append({"role": role, "content": value})
role = "user" if role == "assistant" else "assistant"

params = {
'model': model,
# "model": "gpt-4",
"messages": messages,
"max_tokens": 1500,
"temperature": 0,
Expand All @@ -81,4 +88,4 @@ def generate_response(system_prompt, user_prompt, model="gpt-3.5-turbo", *args):

# Get the reply from the API response
reply = response.choices[0]["message"]["content"]
return reply
return reply