diff --git a/frontend/src/components/waitlist-modal.tsx b/frontend/src/components/waitlist-modal.tsx
index 8d4c8e76bf33..e9207f75e5b0 100644
--- a/frontend/src/components/waitlist-modal.tsx
+++ b/frontend/src/components/waitlist-modal.tsx
@@ -16,7 +16,7 @@ export function WaitlistModal({ ghToken, githubAuthUrl }: WaitlistModalProps) {
- {ghToken ? "Just a little longer!" : "Sign in with GitHub"}
+ {ghToken ? "GitHub Token Expired" : "Sign in with GitHub"}
{!ghToken && (
@@ -34,9 +34,7 @@ export function WaitlistModal({ ghToken, githubAuthUrl }: WaitlistModalProps) {
)}
{ghToken && (
- Thanks for your patience! We're accepting new members
- progressively. If you haven't joined the waitlist yet,
- now's the time!
+ Your GitHub token has expired. Please click below to reconnect your GitHub account.
)}
@@ -54,14 +52,16 @@ export function WaitlistModal({ ghToken, githubAuthUrl }: WaitlistModalProps) {
/>
)}
{ghToken && (
-
- Join Waitlist
-
+ }
+ className="bg-[#791B80] w-full"
+ onClick={() => {
+ if (githubAuthUrl) {
+ window.location.href = githubAuthUrl;
+ }
+ }}
+ />
)}
diff --git a/frontend/src/routes/_oh.tsx b/frontend/src/routes/_oh.tsx
index 875b54e920e8..7c76d31773d6 100644
--- a/frontend/src/routes/_oh.tsx
+++ b/frontend/src/routes/_oh.tsx
@@ -196,9 +196,10 @@ export default function MainApp() {
}, []);
React.useEffect(() => {
- // If the github token is invalid, open the account settings modal again
+ // If the github token is invalid, clear it and show the reconnect screen
if (isGitHubErrorReponse(user)) {
- setAccountSettingsModalOpen(true);
+ localStorage.removeItem("ghToken");
+ setAccountSettingsModalOpen(false);
}
}, [user]);
@@ -213,10 +214,6 @@ export default function MainApp() {
};
const handleAccountSettingsModalClose = () => {
- // If the user closes the modal without connecting to GitHub,
- // we need to log them out to clear the invalid token from the
- // local storage
- if (isGitHubErrorReponse(user)) handleUserLogout();
setAccountSettingsModalOpen(false);
};
diff --git a/openhands/events/serialization/action.py b/openhands/events/serialization/action.py
index defac3b5dda6..f34b4b0ec0cf 100644
--- a/openhands/events/serialization/action.py
+++ b/openhands/events/serialization/action.py
@@ -69,7 +69,7 @@ def action_from_dict(action: dict) -> Action:
# images_urls has been renamed to image_urls
if 'images_urls' in args:
args['image_urls'] = args.pop('images_urls')
-
+
try:
decoded_action = action_class(**args)
if 'timeout' in action:
diff --git a/openhands/resolver/patching/__init__.py b/openhands/resolver/patching/__init__.py
index 5c31f160a0a0..165a623af537 100644
--- a/openhands/resolver/patching/__init__.py
+++ b/openhands/resolver/patching/__init__.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
-from .patch import parse_patch
from .apply import apply_diff
+from .patch import parse_patch
-__all__ = ["parse_patch", "apply_diff"]
+__all__ = ['parse_patch', 'apply_diff']
diff --git a/openhands/resolver/patching/apply.py b/openhands/resolver/patching/apply.py
index f13e814292cb..24f2266f56cf 100644
--- a/openhands/resolver/patching/apply.py
+++ b/openhands/resolver/patching/apply.py
@@ -10,33 +10,33 @@
def _apply_diff_with_subprocess(diff, lines, reverse=False):
# call out to patch program
- patchexec = which("patch")
+ patchexec = which('patch')
if not patchexec:
- raise SubprocessException("cannot find patch program", code=-1)
+ raise SubprocessException('cannot find patch program', code=-1)
tempdir = tempfile.gettempdir()
- filepath = os.path.join(tempdir, "wtp-" + str(hash(diff.header)))
- oldfilepath = filepath + ".old"
- newfilepath = filepath + ".new"
- rejfilepath = filepath + ".rej"
- patchfilepath = filepath + ".patch"
- with open(oldfilepath, "w") as f:
- f.write("\n".join(lines) + "\n")
+ filepath = os.path.join(tempdir, 'wtp-' + str(hash(diff.header)))
+ oldfilepath = filepath + '.old'
+ newfilepath = filepath + '.new'
+ rejfilepath = filepath + '.rej'
+ patchfilepath = filepath + '.patch'
+ with open(oldfilepath, 'w') as f:
+ f.write('\n'.join(lines) + '\n')
- with open(patchfilepath, "w") as f:
+ with open(patchfilepath, 'w') as f:
f.write(diff.text)
args = [
patchexec,
- "--reverse" if reverse else "--forward",
- "--quiet",
- "--no-backup-if-mismatch",
- "-o",
+ '--reverse' if reverse else '--forward',
+ '--quiet',
+ '--no-backup-if-mismatch',
+ '-o',
newfilepath,
- "-i",
+ '-i',
patchfilepath,
- "-r",
+ '-r',
rejfilepath,
oldfilepath,
]
@@ -58,7 +58,7 @@ def _apply_diff_with_subprocess(diff, lines, reverse=False):
# do this last to ensure files get cleaned up
if ret != 0:
- raise SubprocessException("patch program failed", code=ret)
+ raise SubprocessException('patch program failed', code=ret)
return lines, rejlines
diff --git a/openhands/resolver/patching/exceptions.py b/openhands/resolver/patching/exceptions.py
index 594b079e8365..30653c56da18 100644
--- a/openhands/resolver/patching/exceptions.py
+++ b/openhands/resolver/patching/exceptions.py
@@ -7,7 +7,7 @@ def __init__(self, msg, hunk=None):
self.hunk = hunk
if hunk is not None:
super(HunkException, self).__init__(
- "{msg}, in hunk #{n}".format(msg=msg, n=hunk)
+ '{msg}, in hunk #{n}'.format(msg=msg, n=hunk)
)
else:
super(HunkException, self).__init__(msg)
diff --git a/openhands/resolver/patching/patch.py b/openhands/resolver/patching/patch.py
index c0304e06543b..7e3b98ed0883 100644
--- a/openhands/resolver/patching/patch.py
+++ b/openhands/resolver/patching/patch.py
@@ -8,67 +8,67 @@
from .snippets import findall_regex, split_by_regex
header = namedtuple(
- "header",
- "index_path old_path old_version new_path new_version",
+ 'header',
+ 'index_path old_path old_version new_path new_version',
)
-diffobj = namedtuple("diffobj", "header changes text")
-Change = namedtuple("Change", "old new line hunk")
+diffobj = namedtuple('diffobj', 'header changes text')
+Change = namedtuple('Change', 'old new line hunk')
-file_timestamp_str = "(.+?)(?:\t|:| +)(.*)"
+file_timestamp_str = '(.+?)(?:\t|:| +)(.*)'
# .+? was previously [^:\t\n\r\f\v]+
# general diff regex
-diffcmd_header = re.compile("^diff.* (.+) (.+)$")
-unified_header_index = re.compile("^Index: (.+)$")
-unified_header_old_line = re.compile(r"^--- " + file_timestamp_str + "$")
-unified_header_new_line = re.compile(r"^\+\+\+ " + file_timestamp_str + "$")
-unified_hunk_start = re.compile(r"^@@ -(\d+),?(\d*) \+(\d+),?(\d*) @@(.*)$")
-unified_change = re.compile("^([-+ ])(.*)$")
-
-context_header_old_line = re.compile(r"^\*\*\* " + file_timestamp_str + "$")
-context_header_new_line = re.compile("^--- " + file_timestamp_str + "$")
-context_hunk_start = re.compile(r"^\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*$")
-context_hunk_old = re.compile(r"^\*\*\* (\d+),?(\d*) \*\*\*\*$")
-context_hunk_new = re.compile(r"^--- (\d+),?(\d*) ----$")
-context_change = re.compile("^([-+ !]) (.*)$")
-
-ed_hunk_start = re.compile(r"^(\d+),?(\d*)([acd])$")
-ed_hunk_end = re.compile("^.$")
+diffcmd_header = re.compile('^diff.* (.+) (.+)$')
+unified_header_index = re.compile('^Index: (.+)$')
+unified_header_old_line = re.compile(r'^--- ' + file_timestamp_str + '$')
+unified_header_new_line = re.compile(r'^\+\+\+ ' + file_timestamp_str + '$')
+unified_hunk_start = re.compile(r'^@@ -(\d+),?(\d*) \+(\d+),?(\d*) @@(.*)$')
+unified_change = re.compile('^([-+ ])(.*)$')
+
+context_header_old_line = re.compile(r'^\*\*\* ' + file_timestamp_str + '$')
+context_header_new_line = re.compile('^--- ' + file_timestamp_str + '$')
+context_hunk_start = re.compile(r'^\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*$')
+context_hunk_old = re.compile(r'^\*\*\* (\d+),?(\d*) \*\*\*\*$')
+context_hunk_new = re.compile(r'^--- (\d+),?(\d*) ----$')
+context_change = re.compile('^([-+ !]) (.*)$')
+
+ed_hunk_start = re.compile(r'^(\d+),?(\d*)([acd])$')
+ed_hunk_end = re.compile('^.$')
# much like forward ed, but no 'c' type
-rcs_ed_hunk_start = re.compile(r"^([ad])(\d+) ?(\d*)$")
+rcs_ed_hunk_start = re.compile(r'^([ad])(\d+) ?(\d*)$')
-default_hunk_start = re.compile(r"^(\d+),?(\d*)([acd])(\d+),?(\d*)$")
-default_hunk_mid = re.compile("^---$")
-default_change = re.compile("^([><]) (.*)$")
+default_hunk_start = re.compile(r'^(\d+),?(\d*)([acd])(\d+),?(\d*)$')
+default_hunk_mid = re.compile('^---$')
+default_change = re.compile('^([><]) (.*)$')
# Headers
# git has a special index header and no end part
-git_diffcmd_header = re.compile("^diff --git a/(.+) b/(.+)$")
-git_header_index = re.compile(r"^index ([a-f0-9]+)..([a-f0-9]+) ?(\d*)$")
-git_header_old_line = re.compile("^--- (.+)$")
-git_header_new_line = re.compile(r"^\+\+\+ (.+)$")
-git_header_file_mode = re.compile(r"^(new|deleted) file mode \d{6}$")
-git_header_binary_file = re.compile("^Binary files (.+) and (.+) differ")
-git_binary_patch_start = re.compile(r"^GIT binary patch$")
-git_binary_literal_start = re.compile(r"^literal (\d+)$")
-git_binary_delta_start = re.compile(r"^delta (\d+)$")
-base85string = re.compile(r"^[0-9A-Za-z!#$%&()*+;<=>?@^_`{|}~-]+$")
-
-bzr_header_index = re.compile("=== (.+)")
+git_diffcmd_header = re.compile('^diff --git a/(.+) b/(.+)$')
+git_header_index = re.compile(r'^index ([a-f0-9]+)..([a-f0-9]+) ?(\d*)$')
+git_header_old_line = re.compile('^--- (.+)$')
+git_header_new_line = re.compile(r'^\+\+\+ (.+)$')
+git_header_file_mode = re.compile(r'^(new|deleted) file mode \d{6}$')
+git_header_binary_file = re.compile('^Binary files (.+) and (.+) differ')
+git_binary_patch_start = re.compile(r'^GIT binary patch$')
+git_binary_literal_start = re.compile(r'^literal (\d+)$')
+git_binary_delta_start = re.compile(r'^delta (\d+)$')
+base85string = re.compile(r'^[0-9A-Za-z!#$%&()*+;<=>?@^_`{|}~-]+$')
+
+bzr_header_index = re.compile('=== (.+)')
bzr_header_old_line = unified_header_old_line
bzr_header_new_line = unified_header_new_line
svn_header_index = unified_header_index
-svn_header_timestamp_version = re.compile(r"\((?:working copy|revision (\d+))\)")
-svn_header_timestamp = re.compile(r".*(\(.*\))$")
+svn_header_timestamp_version = re.compile(r'\((?:working copy|revision (\d+))\)')
+svn_header_timestamp = re.compile(r'.*(\(.*\))$')
cvs_header_index = unified_header_index
-cvs_header_rcs = re.compile(r"^RCS file: (.+)(?:,\w{1}$|$)")
-cvs_header_timestamp = re.compile(r"(.+)\t([\d.]+)")
-cvs_header_timestamp_colon = re.compile(r":([\d.]+)\t(.+)")
-old_cvs_diffcmd_header = re.compile("^diff.* (.+):(.*) (.+):(.*)$")
+cvs_header_rcs = re.compile(r'^RCS file: (.+)(?:,\w{1}$|$)')
+cvs_header_timestamp = re.compile(r'(.+)\t([\d.]+)')
+cvs_header_timestamp_colon = re.compile(r':([\d.]+)\t(.+)')
+old_cvs_diffcmd_header = re.compile('^diff.* (.+):(.*) (.+):(.*)$')
def parse_patch(text):
@@ -97,7 +97,7 @@ def parse_patch(text):
break
for diff in diffs:
- difftext = "\n".join(diff) + "\n"
+ difftext = '\n'.join(diff) + '\n'
h = parse_header(diff)
d = parse_diff(diff)
if h or d:
@@ -133,10 +133,10 @@ def parse_scm_header(text):
if res:
old_path = res.old_path
new_path = res.new_path
- if old_path.startswith("a/"):
+ if old_path.startswith('a/'):
old_path = old_path[2:]
- if new_path.startswith("b/"):
+ if new_path.startswith('b/'):
new_path = new_path[2:]
return header(
@@ -240,10 +240,10 @@ def parse_git_header(text):
new_path = binary.group(2)
if old_path and new_path:
- if old_path.startswith("a/"):
+ if old_path.startswith('a/'):
old_path = old_path[2:]
- if new_path.startswith("b/"):
+ if new_path.startswith('b/'):
new_path = new_path[2:]
return header(
index_path=None,
@@ -256,19 +256,19 @@ def parse_git_header(text):
# if we go through all of the text without finding our normal info,
# use the cmd if available
if cmd_old_path and cmd_new_path and old_version and new_version:
- if cmd_old_path.startswith("a/"):
+ if cmd_old_path.startswith('a/'):
cmd_old_path = cmd_old_path[2:]
- if cmd_new_path.startswith("b/"):
+ if cmd_new_path.startswith('b/'):
cmd_new_path = cmd_new_path[2:]
return header(
index_path=None,
# wow, I kind of hate this:
# assume /dev/null if the versions are zeroed out
- old_path="/dev/null" if old_version == "0000000" else cmd_old_path,
+ old_path='/dev/null' if old_version == '0000000' else cmd_old_path,
old_version=old_version,
- new_path="/dev/null" if new_version == "0000000" else cmd_new_path,
+ new_path='/dev/null' if new_version == '0000000' else cmd_new_path,
new_version=new_version,
)
@@ -569,10 +569,10 @@ def parse_default_diff(text):
kind = c.group(1)
line = c.group(2)
- if kind == "<" and (r != old_len or r == 0):
+ if kind == '<' and (r != old_len or r == 0):
changes.append(Change(old + r, None, line, hunk_n))
r += 1
- elif kind == ">" and (i != new_len or i == 0):
+ elif kind == '>' and (i != new_len or i == 0):
changes.append(Change(None, new + i, line, hunk_n))
i += 1
@@ -627,13 +627,13 @@ def parse_unified_diff(text):
kind = c.group(1)
line = c.group(2)
- if kind == "-" and (r != old_len or r == 0):
+ if kind == '-' and (r != old_len or r == 0):
changes.append(Change(old + r, None, line, hunk_n))
r += 1
- elif kind == "+" and (i != new_len or i == 0):
+ elif kind == '+' and (i != new_len or i == 0):
changes.append(Change(None, new + i, line, hunk_n))
i += 1
- elif kind == " ":
+ elif kind == ' ':
if r != old_len and i != new_len:
changes.append(Change(old + r, new + i, line, hunk_n))
r += 1
@@ -667,7 +667,7 @@ def parse_context_diff(text):
k = 0
parts = split_by_regex(hunk, context_hunk_new)
if len(parts) != 2:
- raise exceptions.ParseException("Context diff invalid", hunk_n)
+ raise exceptions.ParseException('Context diff invalid', hunk_n)
old_hunk = parts[0]
new_hunk = parts[1]
@@ -695,7 +695,7 @@ def parse_context_diff(text):
# now have old and new set, can start processing?
if len(old_hunk) > 0 and len(new_hunk) == 0:
- msg = "Got unexpected change in removal hunk: "
+ msg = 'Got unexpected change in removal hunk: '
# only removes left?
while len(old_hunk) > 0:
c = context_change.match(old_hunk[0])
@@ -707,22 +707,22 @@ def parse_context_diff(text):
kind = c.group(1)
line = c.group(2)
- if kind == "-" and (j != old_len or j == 0):
+ if kind == '-' and (j != old_len or j == 0):
changes.append(Change(old + j, None, line, hunk_n))
j += 1
- elif kind == " " and (
+ elif kind == ' ' and (
(j != old_len and k != new_len) or (j == 0 or k == 0)
):
changes.append(Change(old + j, new + k, line, hunk_n))
j += 1
k += 1
- elif kind == "+" or kind == "!":
+ elif kind == '+' or kind == '!':
raise exceptions.ParseException(msg + kind, hunk_n)
continue
if len(old_hunk) == 0 and len(new_hunk) > 0:
- msg = "Got unexpected change in removal hunk: "
+ msg = 'Got unexpected change in removal hunk: '
# only insertions left?
while len(new_hunk) > 0:
c = context_change.match(new_hunk[0])
@@ -734,16 +734,16 @@ def parse_context_diff(text):
kind = c.group(1)
line = c.group(2)
- if kind == "+" and (k != new_len or k == 0):
+ if kind == '+' and (k != new_len or k == 0):
changes.append(Change(None, new + k, line, hunk_n))
k += 1
- elif kind == " " and (
+ elif kind == ' ' and (
(j != old_len and k != new_len) or (j == 0 or k == 0)
):
changes.append(Change(old + j, new + k, line, hunk_n))
j += 1
k += 1
- elif kind == "-" or kind == "!":
+ elif kind == '-' or kind == '!':
raise exceptions.ParseException(msg + kind, hunk_n)
continue
@@ -765,17 +765,17 @@ def parse_context_diff(text):
if not (oc or nc):
del old_hunk[0]
del new_hunk[0]
- elif okind == " " and nkind == " " and oline == nline:
+ elif okind == ' ' and nkind == ' ' and oline == nline:
changes.append(Change(old + j, new + k, oline, hunk_n))
j += 1
k += 1
del old_hunk[0]
del new_hunk[0]
- elif okind == "-" or okind == "!" and (j != old_len or j == 0):
+ elif okind == '-' or okind == '!' and (j != old_len or j == 0):
changes.append(Change(old + j, None, oline, hunk_n))
j += 1
del old_hunk[0]
- elif nkind == "+" or nkind == "!" and (k != new_len or k == 0):
+ elif nkind == '+' or nkind == '!' and (k != new_len or k == 0):
changes.append(Change(None, new + k, nline, hunk_n))
k += 1
del new_hunk[0]
@@ -821,7 +821,7 @@ def parse_ed_diff(text):
old_end = int(o.group(2)) if len(o.group(2)) else old
hunk_kind = o.group(3)
- if hunk_kind == "d":
+ if hunk_kind == 'd':
k = 0
while old_end >= old:
changes.append(Change(old + k, None, None, hunk_n))
@@ -832,7 +832,7 @@ def parse_ed_diff(text):
while len(hunk) > 0:
e = ed_hunk_end.match(hunk[0])
- if not e and hunk_kind == "c":
+ if not e and hunk_kind == 'c':
k = 0
while old_end >= old:
changes.append(Change(old + k, None, None, hunk_n))
@@ -852,7 +852,7 @@ def parse_ed_diff(text):
)
i += 1
j += 1
- if not e and hunk_kind == "a":
+ if not e and hunk_kind == 'a':
changes.append(
Change(
None,
@@ -900,7 +900,7 @@ def parse_rcs_ed_diff(text):
old = int(o.group(2))
size = int(o.group(3))
- if hunk_kind == "a":
+ if hunk_kind == 'a':
old += total_change_size + 1
total_change_size += size
while size > 0 and len(hunk) > 0:
@@ -910,7 +910,7 @@ def parse_rcs_ed_diff(text):
del hunk[0]
- elif hunk_kind == "d":
+ elif hunk_kind == 'd':
total_change_size -= size
while size > 0:
changes.append(Change(old + j, None, None, hunk_n))
@@ -938,8 +938,8 @@ def parse_git_binary_diff(text):
# the sizes are used as latch-up
new_size = 0
old_size = 0
- old_encoded = ""
- new_encoded = ""
+ old_encoded = ''
+ new_encoded = ''
for line in lines:
if cmd_old_path is None and cmd_new_path is None:
hm = git_diffcmd_header.match(line)
@@ -978,11 +978,11 @@ def parse_git_binary_diff(text):
change = Change(None, 0, added_data, None)
changes.append(change)
new_size = 0
- new_encoded = ""
+ new_encoded = ''
else:
# Invalid line format
new_size = 0
- new_encoded = ""
+ new_encoded = ''
# the second is removed file
if old_size == 0:
@@ -1006,10 +1006,10 @@ def parse_git_binary_diff(text):
change = Change(0, None, None, removed_data)
changes.append(change)
old_size = 0
- old_encoded = ""
+ old_encoded = ''
else:
# Invalid line format
old_size = 0
- old_encoded = ""
+ old_encoded = ''
return changes
diff --git a/openhands/resolver/patching/snippets.py b/openhands/resolver/patching/snippets.py
index 710b1191b560..f9d9e620d0f7 100644
--- a/openhands/resolver/patching/snippets.py
+++ b/openhands/resolver/patching/snippets.py
@@ -54,7 +54,7 @@ def is_exe(fpath):
if is_exe(program):
return program
else:
- for path in os.environ["PATH"].split(os.pathsep):
+ for path in os.environ['PATH'].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
diff --git a/tests/unit/resolver/test_guess_success.py b/tests/unit/resolver/test_guess_success.py
index 9bf3da2b3d02..d6b0e946adda 100644
--- a/tests/unit/resolver/test_guess_success.py
+++ b/tests/unit/resolver/test_guess_success.py
@@ -1,22 +1,22 @@
-from openhands.resolver.issue_definitions import IssueHandler
-from openhands.resolver.github_issue import GithubIssue
-from openhands.events.action.message import MessageAction
from openhands.core.config import LLMConfig
+from openhands.events.action.message import MessageAction
+from openhands.resolver.github_issue import GithubIssue
+from openhands.resolver.issue_definitions import IssueHandler
def test_guess_success_multiline_explanation():
# Mock data
issue = GithubIssue(
- owner="test",
- repo="test",
+ owner='test',
+ repo='test',
number=1,
- title="Test Issue",
- body="Test body",
+ title='Test Issue',
+ body='Test body',
thread_comments=None,
review_comments=None,
)
- history = [MessageAction(content="Test message")]
- llm_config = LLMConfig(model="test", api_key="test")
+ history = [MessageAction(content='Test message')]
+ llm_config = LLMConfig(model='test', api_key='test')
# Create a mock response with multi-line explanation
mock_response = """--- success
@@ -31,7 +31,7 @@ def test_guess_success_multiline_explanation():
Automatic fix generated by OpenHands 🙌"""
# Create a handler instance
- handler = IssueHandler("test", "test", "test")
+ handler = IssueHandler('test', 'test', 'test')
# Mock the litellm.completion call
def mock_completion(*args, **kwargs):
@@ -61,11 +61,11 @@ def __init__(self, content):
# Verify the results
assert success is True
- assert "The PR successfully addressed the issue by:" in explanation
- assert "Fixed bug A" in explanation
- assert "Added test B" in explanation
- assert "Updated documentation C" in explanation
- assert "Automatic fix generated by OpenHands" in explanation
+ assert 'The PR successfully addressed the issue by:' in explanation
+ assert 'Fixed bug A' in explanation
+ assert 'Added test B' in explanation
+ assert 'Updated documentation C' in explanation
+ assert 'Automatic fix generated by OpenHands' in explanation
finally:
# Restore the original function
litellm.completion = original_completion
diff --git a/tests/unit/resolver/test_issue_handler.py b/tests/unit/resolver/test_issue_handler.py
index 7df7f0fe4027..e03eb1897655 100644
--- a/tests/unit/resolver/test_issue_handler.py
+++ b/tests/unit/resolver/test_issue_handler.py
@@ -1,17 +1,18 @@
-from unittest.mock import patch, MagicMock
-from openhands.resolver.issue_definitions import IssueHandler, PRHandler
-from openhands.resolver.github_issue import GithubIssue, ReviewThread
-from openhands.events.action.message import MessageAction
+from unittest.mock import MagicMock, patch
+
from openhands.core.config import LLMConfig
+from openhands.events.action.message import MessageAction
+from openhands.resolver.github_issue import GithubIssue, ReviewThread
+from openhands.resolver.issue_definitions import IssueHandler, PRHandler
def test_get_converted_issues_initializes_review_comments():
# Mock the necessary dependencies
- with patch("requests.get") as mock_get:
+ with patch('requests.get') as mock_get:
# Mock the response for issues
mock_issues_response = MagicMock()
mock_issues_response.json.return_value = [
- {"number": 1, "title": "Test Issue", "body": "Test Body"}
+ {'number': 1, 'title': 'Test Issue', 'body': 'Test Body'}
]
# Mock the response for comments
mock_comments_response = MagicMock()
@@ -26,7 +27,7 @@ def test_get_converted_issues_initializes_review_comments():
] # Need two comment responses because we make two API calls
# Create an instance of IssueHandler
- handler = IssueHandler("test-owner", "test-repo", "test-token")
+ handler = IssueHandler('test-owner', 'test-repo', 'test-token')
# Get converted issues
issues = handler.get_converted_issues()
@@ -39,35 +40,35 @@ def test_get_converted_issues_initializes_review_comments():
# Verify other fields are set correctly
assert issues[0].number == 1
- assert issues[0].title == "Test Issue"
- assert issues[0].body == "Test Body"
- assert issues[0].owner == "test-owner"
- assert issues[0].repo == "test-repo"
+ assert issues[0].title == 'Test Issue'
+ assert issues[0].body == 'Test Body'
+ assert issues[0].owner == 'test-owner'
+ assert issues[0].repo == 'test-repo'
def test_pr_handler_guess_success_with_thread_comments():
# Create a PR handler instance
- handler = PRHandler("test-owner", "test-repo", "test-token")
+ handler = PRHandler('test-owner', 'test-repo', 'test-token')
# Create a mock issue with thread comments but no review comments
issue = GithubIssue(
- owner="test-owner",
- repo="test-repo",
+ owner='test-owner',
+ repo='test-repo',
number=1,
- title="Test PR",
- body="Test Body",
- thread_comments=["First comment", "Second comment"],
- closing_issues=["Issue description"],
+ title='Test PR',
+ body='Test Body',
+ thread_comments=['First comment', 'Second comment'],
+ closing_issues=['Issue description'],
review_comments=None,
thread_ids=None,
- head_branch="test-branch",
+ head_branch='test-branch',
)
# Create mock history
- history = [MessageAction(content="Fixed the issue by implementing X and Y")]
+ history = [MessageAction(content='Fixed the issue by implementing X and Y')]
# Create mock LLM config
- llm_config = LLMConfig(model="test-model", api_key="test-key")
+ llm_config = LLMConfig(model='test-model', api_key='test-key')
# Mock the LLM response
mock_response = MagicMock()
@@ -84,7 +85,7 @@ def test_pr_handler_guess_success_with_thread_comments():
]
# Test the guess_success method
- with patch("litellm.completion", return_value=mock_response):
+ with patch('litellm.completion', return_value=mock_response):
success, success_list, explanation = handler.guess_success(
issue, history, llm_config
)
@@ -92,39 +93,39 @@ def test_pr_handler_guess_success_with_thread_comments():
# Verify the results
assert success is True
assert success_list == [True]
- assert "successfully address" in explanation
+ assert 'successfully address' in explanation
def test_pr_handler_get_converted_issues_with_comments():
# Mock the necessary dependencies
- with patch("requests.get") as mock_get:
+ with patch('requests.get') as mock_get:
# Mock the response for PRs
mock_prs_response = MagicMock()
mock_prs_response.json.return_value = [
{
- "number": 1,
- "title": "Test PR",
- "body": "Test Body fixes #1",
- "head": {"ref": "test-branch"},
+ 'number': 1,
+ 'title': 'Test PR',
+ 'body': 'Test Body fixes #1',
+ 'head': {'ref': 'test-branch'},
}
]
# Mock the response for PR comments
mock_comments_response = MagicMock()
mock_comments_response.json.return_value = [
- {"body": "First comment"},
- {"body": "Second comment"},
+ {'body': 'First comment'},
+ {'body': 'Second comment'},
]
# Mock the response for PR metadata (GraphQL)
mock_graphql_response = MagicMock()
mock_graphql_response.json.return_value = {
- "data": {
- "repository": {
- "pullRequest": {
- "closingIssuesReferences": {"edges": []},
- "reviews": {"nodes": []},
- "reviewThreads": {"edges": []},
+ 'data': {
+ 'repository': {
+ 'pullRequest': {
+ 'closingIssuesReferences': {'edges': []},
+ 'reviews': {'nodes': []},
+ 'reviewThreads': {'edges': []},
}
}
}
@@ -138,7 +139,7 @@ def test_pr_handler_get_converted_issues_with_comments():
# Mock the response for fetching the external issue referenced in PR body
mock_external_issue_response = MagicMock()
mock_external_issue_response.json.return_value = {
- "body": "This is additional context from an externally referenced issue."
+ 'body': 'This is additional context from an externally referenced issue.'
}
mock_get.side_effect = [
@@ -150,11 +151,11 @@ def test_pr_handler_get_converted_issues_with_comments():
]
# Mock the post request for GraphQL
- with patch("requests.post") as mock_post:
+ with patch('requests.post') as mock_post:
mock_post.return_value = mock_graphql_response
# Create an instance of PRHandler
- handler = PRHandler("test-owner", "test-repo", "test-token")
+ handler = PRHandler('test-owner', 'test-repo', 'test-token')
# Get converted issues
prs = handler.get_converted_issues()
@@ -163,43 +164,43 @@ def test_pr_handler_get_converted_issues_with_comments():
assert len(prs) == 1
# Verify that thread_comments are set correctly
- assert prs[0].thread_comments == ["First comment", "Second comment"]
+ assert prs[0].thread_comments == ['First comment', 'Second comment']
# Verify other fields are set correctly
assert prs[0].number == 1
- assert prs[0].title == "Test PR"
- assert prs[0].body == "Test Body fixes #1"
- assert prs[0].owner == "test-owner"
- assert prs[0].repo == "test-repo"
- assert prs[0].head_branch == "test-branch"
+ assert prs[0].title == 'Test PR'
+ assert prs[0].body == 'Test Body fixes #1'
+ assert prs[0].owner == 'test-owner'
+ assert prs[0].repo == 'test-repo'
+ assert prs[0].head_branch == 'test-branch'
assert prs[0].closing_issues == [
- "This is additional context from an externally referenced issue."
+ 'This is additional context from an externally referenced issue.'
]
def test_pr_handler_guess_success_only_review_comments():
# Create a PR handler instance
- handler = PRHandler("test-owner", "test-repo", "test-token")
+ handler = PRHandler('test-owner', 'test-repo', 'test-token')
# Create a mock issue with only review comments
issue = GithubIssue(
- owner="test-owner",
- repo="test-repo",
+ owner='test-owner',
+ repo='test-repo',
number=1,
- title="Test PR",
- body="Test Body",
+ title='Test PR',
+ body='Test Body',
thread_comments=None,
- closing_issues=["Issue description"],
- review_comments=["Please fix the formatting", "Add more tests"],
+ closing_issues=['Issue description'],
+ review_comments=['Please fix the formatting', 'Add more tests'],
thread_ids=None,
- head_branch="test-branch",
+ head_branch='test-branch',
)
# Create mock history
- history = [MessageAction(content="Fixed the formatting and added more tests")]
+ history = [MessageAction(content='Fixed the formatting and added more tests')]
# Create mock LLM config
- llm_config = LLMConfig(model="test-model", api_key="test-key")
+ llm_config = LLMConfig(model='test-model', api_key='test-key')
# Mock the LLM response
mock_response = MagicMock()
@@ -216,7 +217,7 @@ def test_pr_handler_guess_success_only_review_comments():
]
# Test the guess_success method
- with patch("litellm.completion", return_value=mock_response):
+ with patch('litellm.completion', return_value=mock_response):
success, success_list, explanation = handler.guess_success(
issue, history, llm_config
)
@@ -224,32 +225,32 @@ def test_pr_handler_guess_success_only_review_comments():
# Verify the results
assert success is True
assert success_list == [True]
- assert "successfully address" in explanation
+ assert 'successfully address' in explanation
def test_pr_handler_guess_success_no_comments():
# Create a PR handler instance
- handler = PRHandler("test-owner", "test-repo", "test-token")
+ handler = PRHandler('test-owner', 'test-repo', 'test-token')
# Create a mock issue with no comments
issue = GithubIssue(
- owner="test-owner",
- repo="test-repo",
+ owner='test-owner',
+ repo='test-repo',
number=1,
- title="Test PR",
- body="Test Body",
+ title='Test PR',
+ body='Test Body',
thread_comments=None,
- closing_issues=["Issue description"],
+ closing_issues=['Issue description'],
review_comments=None,
thread_ids=None,
- head_branch="test-branch",
+ head_branch='test-branch',
)
# Create mock history
- history = [MessageAction(content="Fixed the issue")]
+ history = [MessageAction(content='Fixed the issue')]
# Create mock LLM config
- llm_config = LLMConfig(model="test-model", api_key="test-key")
+ llm_config = LLMConfig(model='test-model', api_key='test-key')
# Test that it returns appropriate message when no comments are present
success, success_list, explanation = handler.guess_success(
@@ -257,29 +258,29 @@ def test_pr_handler_guess_success_no_comments():
)
assert success is False
assert success_list is None
- assert explanation == "No feedback was found to process"
+ assert explanation == 'No feedback was found to process'
def test_get_issue_comments_with_specific_comment_id():
# Mock the necessary dependencies
- with patch("requests.get") as mock_get:
+ with patch('requests.get') as mock_get:
# Mock the response for comments
mock_comments_response = MagicMock()
mock_comments_response.json.return_value = [
- {"id": 123, "body": "First comment"},
- {"id": 456, "body": "Second comment"},
+ {'id': 123, 'body': 'First comment'},
+ {'id': 456, 'body': 'Second comment'},
]
mock_get.return_value = mock_comments_response
# Create an instance of IssueHandler
- handler = IssueHandler("test-owner", "test-repo", "test-token")
+ handler = IssueHandler('test-owner', 'test-repo', 'test-token')
# Get comments with a specific comment_id
specific_comment = handler._get_issue_comments(issue_number=1, comment_id=123)
# Verify only the specific comment is returned
- assert specific_comment == ["First comment"]
+ assert specific_comment == ['First comment']
def test_pr_handler_get_converted_issues_with_specific_thread_comment():
@@ -287,50 +288,50 @@ def test_pr_handler_get_converted_issues_with_specific_thread_comment():
specific_comment_id = 123
# Mock GraphQL response for review threads
- with patch("requests.get") as mock_get:
+ with patch('requests.get') as mock_get:
# Mock the response for PRs
mock_prs_response = MagicMock()
mock_prs_response.json.return_value = [
{
- "number": 1,
- "title": "Test PR",
- "body": "Test Body",
- "head": {"ref": "test-branch"},
+ 'number': 1,
+ 'title': 'Test PR',
+ 'body': 'Test Body',
+ 'head': {'ref': 'test-branch'},
}
]
# Mock the response for PR comments
mock_comments_response = MagicMock()
mock_comments_response.json.return_value = [
- {"body": "First comment", "id": 123},
- {"body": "Second comment", "id": 124},
+ {'body': 'First comment', 'id': 123},
+ {'body': 'Second comment', 'id': 124},
]
# Mock the response for PR metadata (GraphQL)
mock_graphql_response = MagicMock()
mock_graphql_response.json.return_value = {
- "data": {
- "repository": {
- "pullRequest": {
- "closingIssuesReferences": {"edges": []},
- "reviews": {"nodes": []},
- "reviewThreads": {
- "edges": [
+ 'data': {
+ 'repository': {
+ 'pullRequest': {
+ 'closingIssuesReferences': {'edges': []},
+ 'reviews': {'nodes': []},
+ 'reviewThreads': {
+ 'edges': [
{
- "node": {
- "id": "review-thread-1",
- "isResolved": False,
- "comments": {
- "nodes": [
+ 'node': {
+ 'id': 'review-thread-1',
+ 'isResolved': False,
+ 'comments': {
+ 'nodes': [
{
- "fullDatabaseId": 121,
- "body": "Specific review comment",
- "path": "file1.txt",
+ 'fullDatabaseId': 121,
+ 'body': 'Specific review comment',
+ 'path': 'file1.txt',
},
{
- "fullDatabaseId": 456,
- "body": "Another review comment",
- "path": "file2.txt",
+ 'fullDatabaseId': 456,
+ 'body': 'Another review comment',
+ 'path': 'file2.txt',
},
]
},
@@ -356,11 +357,11 @@ def test_pr_handler_get_converted_issues_with_specific_thread_comment():
]
# Mock the post request for GraphQL
- with patch("requests.post") as mock_post:
+ with patch('requests.post') as mock_post:
mock_post.return_value = mock_graphql_response
# Create an instance of PRHandler
- handler = PRHandler("test-owner", "test-repo", "test-token")
+ handler = PRHandler('test-owner', 'test-repo', 'test-token')
# Get converted issues
prs = handler.get_converted_issues(comment_id=specific_comment_id)
@@ -369,17 +370,17 @@ def test_pr_handler_get_converted_issues_with_specific_thread_comment():
assert len(prs) == 1
# Verify that thread_comments are set correctly
- assert prs[0].thread_comments == ["First comment"]
+ assert prs[0].thread_comments == ['First comment']
assert prs[0].review_comments == []
assert prs[0].review_threads == []
# Verify other fields are set correctly
assert prs[0].number == 1
- assert prs[0].title == "Test PR"
- assert prs[0].body == "Test Body"
- assert prs[0].owner == "test-owner"
- assert prs[0].repo == "test-repo"
- assert prs[0].head_branch == "test-branch"
+ assert prs[0].title == 'Test PR'
+ assert prs[0].body == 'Test Body'
+ assert prs[0].owner == 'test-owner'
+ assert prs[0].repo == 'test-repo'
+ assert prs[0].head_branch == 'test-branch'
def test_pr_handler_get_converted_issues_with_specific_review_thread_comment():
@@ -387,50 +388,50 @@ def test_pr_handler_get_converted_issues_with_specific_review_thread_comment():
specific_comment_id = 123
# Mock GraphQL response for review threads
- with patch("requests.get") as mock_get:
+ with patch('requests.get') as mock_get:
# Mock the response for PRs
mock_prs_response = MagicMock()
mock_prs_response.json.return_value = [
{
- "number": 1,
- "title": "Test PR",
- "body": "Test Body",
- "head": {"ref": "test-branch"},
+ 'number': 1,
+ 'title': 'Test PR',
+ 'body': 'Test Body',
+ 'head': {'ref': 'test-branch'},
}
]
# Mock the response for PR comments
mock_comments_response = MagicMock()
mock_comments_response.json.return_value = [
- {"body": "First comment", "id": 120},
- {"body": "Second comment", "id": 124},
+ {'body': 'First comment', 'id': 120},
+ {'body': 'Second comment', 'id': 124},
]
# Mock the response for PR metadata (GraphQL)
mock_graphql_response = MagicMock()
mock_graphql_response.json.return_value = {
- "data": {
- "repository": {
- "pullRequest": {
- "closingIssuesReferences": {"edges": []},
- "reviews": {"nodes": []},
- "reviewThreads": {
- "edges": [
+ 'data': {
+ 'repository': {
+ 'pullRequest': {
+ 'closingIssuesReferences': {'edges': []},
+ 'reviews': {'nodes': []},
+ 'reviewThreads': {
+ 'edges': [
{
- "node": {
- "id": "review-thread-1",
- "isResolved": False,
- "comments": {
- "nodes": [
+ 'node': {
+ 'id': 'review-thread-1',
+ 'isResolved': False,
+ 'comments': {
+ 'nodes': [
{
- "fullDatabaseId": specific_comment_id,
- "body": "Specific review comment",
- "path": "file1.txt",
+ 'fullDatabaseId': specific_comment_id,
+ 'body': 'Specific review comment',
+ 'path': 'file1.txt',
},
{
- "fullDatabaseId": 456,
- "body": "Another review comment",
- "path": "file1.txt",
+ 'fullDatabaseId': 456,
+ 'body': 'Another review comment',
+ 'path': 'file1.txt',
},
]
},
@@ -456,11 +457,11 @@ def test_pr_handler_get_converted_issues_with_specific_review_thread_comment():
]
# Mock the post request for GraphQL
- with patch("requests.post") as mock_post:
+ with patch('requests.post') as mock_post:
mock_post.return_value = mock_graphql_response
# Create an instance of PRHandler
- handler = PRHandler("test-owner", "test-repo", "test-token")
+ handler = PRHandler('test-owner', 'test-repo', 'test-token')
# Get converted issues
prs = handler.get_converted_issues(comment_id=specific_comment_id)
@@ -475,17 +476,17 @@ def test_pr_handler_get_converted_issues_with_specific_review_thread_comment():
assert isinstance(prs[0].review_threads[0], ReviewThread)
assert (
prs[0].review_threads[0].comment
- == "Specific review comment\n---\nlatest feedback:\nAnother review comment\n"
+ == 'Specific review comment\n---\nlatest feedback:\nAnother review comment\n'
)
- assert prs[0].review_threads[0].files == ["file1.txt"]
+ assert prs[0].review_threads[0].files == ['file1.txt']
# Verify other fields are set correctly
assert prs[0].number == 1
- assert prs[0].title == "Test PR"
- assert prs[0].body == "Test Body"
- assert prs[0].owner == "test-owner"
- assert prs[0].repo == "test-repo"
- assert prs[0].head_branch == "test-branch"
+ assert prs[0].title == 'Test PR'
+ assert prs[0].body == 'Test Body'
+ assert prs[0].owner == 'test-owner'
+ assert prs[0].repo == 'test-repo'
+ assert prs[0].head_branch == 'test-branch'
def test_pr_handler_get_converted_issues_with_specific_comment_and_issue_refs():
@@ -493,50 +494,50 @@ def test_pr_handler_get_converted_issues_with_specific_comment_and_issue_refs():
specific_comment_id = 123
# Mock GraphQL response for review threads
- with patch("requests.get") as mock_get:
+ with patch('requests.get') as mock_get:
# Mock the response for PRs
mock_prs_response = MagicMock()
mock_prs_response.json.return_value = [
{
- "number": 1,
- "title": "Test PR fixes #3",
- "body": "Test Body",
- "head": {"ref": "test-branch"},
+ 'number': 1,
+ 'title': 'Test PR fixes #3',
+ 'body': 'Test Body',
+ 'head': {'ref': 'test-branch'},
}
]
# Mock the response for PR comments
mock_comments_response = MagicMock()
mock_comments_response.json.return_value = [
- {"body": "First comment", "id": 120},
- {"body": "Second comment", "id": 124},
+ {'body': 'First comment', 'id': 120},
+ {'body': 'Second comment', 'id': 124},
]
# Mock the response for PR metadata (GraphQL)
mock_graphql_response = MagicMock()
mock_graphql_response.json.return_value = {
- "data": {
- "repository": {
- "pullRequest": {
- "closingIssuesReferences": {"edges": []},
- "reviews": {"nodes": []},
- "reviewThreads": {
- "edges": [
+ 'data': {
+ 'repository': {
+ 'pullRequest': {
+ 'closingIssuesReferences': {'edges': []},
+ 'reviews': {'nodes': []},
+ 'reviewThreads': {
+ 'edges': [
{
- "node": {
- "id": "review-thread-1",
- "isResolved": False,
- "comments": {
- "nodes": [
+ 'node': {
+ 'id': 'review-thread-1',
+ 'isResolved': False,
+ 'comments': {
+ 'nodes': [
{
- "fullDatabaseId": specific_comment_id,
- "body": "Specific review comment that references #6",
- "path": "file1.txt",
+ 'fullDatabaseId': specific_comment_id,
+ 'body': 'Specific review comment that references #6',
+ 'path': 'file1.txt',
},
{
- "fullDatabaseId": 456,
- "body": "Another review comment referencing #7",
- "path": "file2.txt",
+ 'fullDatabaseId': 456,
+ 'body': 'Another review comment referencing #7',
+ 'path': 'file2.txt',
},
]
},
@@ -557,13 +558,13 @@ def test_pr_handler_get_converted_issues_with_specific_comment_and_issue_refs():
# Mock the response for fetching the external issue referenced in PR body
mock_external_issue_response_in_body = MagicMock()
mock_external_issue_response_in_body.json.return_value = {
- "body": "External context #1."
+ 'body': 'External context #1.'
}
# Mock the response for fetching the external issue referenced in review thread
mock_external_issue_response_review_thread = MagicMock()
mock_external_issue_response_review_thread.json.return_value = {
- "body": "External context #2."
+ 'body': 'External context #2.'
}
mock_get.side_effect = [
@@ -576,11 +577,11 @@ def test_pr_handler_get_converted_issues_with_specific_comment_and_issue_refs():
]
# Mock the post request for GraphQL
- with patch("requests.post") as mock_post:
+ with patch('requests.post') as mock_post:
mock_post.return_value = mock_graphql_response
# Create an instance of PRHandler
- handler = PRHandler("test-owner", "test-repo", "test-token")
+ handler = PRHandler('test-owner', 'test-repo', 'test-token')
# Get converted issues
prs = handler.get_converted_issues(comment_id=specific_comment_id)
@@ -595,52 +596,52 @@ def test_pr_handler_get_converted_issues_with_specific_comment_and_issue_refs():
assert isinstance(prs[0].review_threads[0], ReviewThread)
assert (
prs[0].review_threads[0].comment
- == "Specific review comment that references #6\n---\nlatest feedback:\nAnother review comment referencing #7\n"
+ == 'Specific review comment that references #6\n---\nlatest feedback:\nAnother review comment referencing #7\n'
)
assert prs[0].closing_issues == [
- "External context #1.",
- "External context #2.",
+ 'External context #1.',
+ 'External context #2.',
] # Only includes references inside comment ID and body PR
# Verify other fields are set correctly
assert prs[0].number == 1
- assert prs[0].title == "Test PR fixes #3"
- assert prs[0].body == "Test Body"
- assert prs[0].owner == "test-owner"
- assert prs[0].repo == "test-repo"
- assert prs[0].head_branch == "test-branch"
+ assert prs[0].title == 'Test PR fixes #3'
+ assert prs[0].body == 'Test Body'
+ assert prs[0].owner == 'test-owner'
+ assert prs[0].repo == 'test-repo'
+ assert prs[0].head_branch == 'test-branch'
def test_pr_handler_get_converted_issues_with_duplicate_issue_refs():
# Mock the necessary dependencies
- with patch("requests.get") as mock_get:
+ with patch('requests.get') as mock_get:
# Mock the response for PRs
mock_prs_response = MagicMock()
mock_prs_response.json.return_value = [
{
- "number": 1,
- "title": "Test PR",
- "body": "Test Body fixes #1",
- "head": {"ref": "test-branch"},
+ 'number': 1,
+ 'title': 'Test PR',
+ 'body': 'Test Body fixes #1',
+ 'head': {'ref': 'test-branch'},
}
]
# Mock the response for PR comments
mock_comments_response = MagicMock()
mock_comments_response.json.return_value = [
- {"body": "First comment addressing #1"},
- {"body": "Second comment addressing #2"},
+ {'body': 'First comment addressing #1'},
+ {'body': 'Second comment addressing #2'},
]
# Mock the response for PR metadata (GraphQL)
mock_graphql_response = MagicMock()
mock_graphql_response.json.return_value = {
- "data": {
- "repository": {
- "pullRequest": {
- "closingIssuesReferences": {"edges": []},
- "reviews": {"nodes": []},
- "reviewThreads": {"edges": []},
+ 'data': {
+ 'repository': {
+ 'pullRequest': {
+ 'closingIssuesReferences': {'edges': []},
+ 'reviews': {'nodes': []},
+ 'reviewThreads': {'edges': []},
}
}
}
@@ -654,13 +655,13 @@ def test_pr_handler_get_converted_issues_with_duplicate_issue_refs():
# Mock the response for fetching the external issue referenced in PR body
mock_external_issue_response_in_body = MagicMock()
mock_external_issue_response_in_body.json.return_value = {
- "body": "External context #1."
+ 'body': 'External context #1.'
}
# Mock the response for fetching the external issue referenced in review thread
mock_external_issue_response_in_comment = MagicMock()
mock_external_issue_response_in_comment.json.return_value = {
- "body": "External context #2."
+ 'body': 'External context #2.'
}
mock_get.side_effect = [
@@ -673,11 +674,11 @@ def test_pr_handler_get_converted_issues_with_duplicate_issue_refs():
]
# Mock the post request for GraphQL
- with patch("requests.post") as mock_post:
+ with patch('requests.post') as mock_post:
mock_post.return_value = mock_graphql_response
# Create an instance of PRHandler
- handler = PRHandler("test-owner", "test-repo", "test-token")
+ handler = PRHandler('test-owner', 'test-repo', 'test-token')
# Get converted issues
prs = handler.get_converted_issues()
@@ -687,18 +688,18 @@ def test_pr_handler_get_converted_issues_with_duplicate_issue_refs():
# Verify that thread_comments are set correctly
assert prs[0].thread_comments == [
- "First comment addressing #1",
- "Second comment addressing #2",
+ 'First comment addressing #1',
+ 'Second comment addressing #2',
]
# Verify other fields are set correctly
assert prs[0].number == 1
- assert prs[0].title == "Test PR"
- assert prs[0].body == "Test Body fixes #1"
- assert prs[0].owner == "test-owner"
- assert prs[0].repo == "test-repo"
- assert prs[0].head_branch == "test-branch"
+ assert prs[0].title == 'Test PR'
+ assert prs[0].body == 'Test Body fixes #1'
+ assert prs[0].owner == 'test-owner'
+ assert prs[0].repo == 'test-repo'
+ assert prs[0].head_branch == 'test-branch'
assert prs[0].closing_issues == [
- "External context #1.",
- "External context #2.",
+ 'External context #1.',
+ 'External context #2.',
]
diff --git a/tests/unit/resolver/test_issue_handler_error_handling.py b/tests/unit/resolver/test_issue_handler_error_handling.py
index 54adff3466fd..d80cd8df48e6 100644
--- a/tests/unit/resolver/test_issue_handler_error_handling.py
+++ b/tests/unit/resolver/test_issue_handler_error_handling.py
@@ -1,94 +1,93 @@
-import pytest
+from unittest.mock import MagicMock, patch
+
import requests
-from unittest.mock import patch, MagicMock
from openhands.resolver.issue_definitions import PRHandler
-from openhands.resolver.github_issue import ReviewThread
def test_handle_nonexistent_issue_reference():
- handler = PRHandler("test-owner", "test-repo", "test-token")
-
+ handler = PRHandler('test-owner', 'test-repo', 'test-token')
+
# Mock the requests.get to simulate a 404 error
mock_response = MagicMock()
- mock_response.raise_for_status.side_effect = requests.exceptions.HTTPError("404 Client Error: Not Found")
-
+ mock_response.raise_for_status.side_effect = requests.exceptions.HTTPError('404 Client Error: Not Found')
+
with patch('requests.get', return_value=mock_response):
# Call the method with a non-existent issue reference
result = handler._PRHandler__get_context_from_external_issues_references(
closing_issues=[],
closing_issue_numbers=[],
- issue_body="This references #999999", # Non-existent issue
+ issue_body='This references #999999', # Non-existent issue
review_comments=[],
review_threads=[],
thread_comments=None
)
-
+
# The method should return an empty list since the referenced issue couldn't be fetched
assert result == []
def test_handle_rate_limit_error():
- handler = PRHandler("test-owner", "test-repo", "test-token")
-
+ handler = PRHandler('test-owner', 'test-repo', 'test-token')
+
# Mock the requests.get to simulate a rate limit error
mock_response = MagicMock()
mock_response.raise_for_status.side_effect = requests.exceptions.HTTPError(
- "403 Client Error: Rate Limit Exceeded"
+ '403 Client Error: Rate Limit Exceeded'
)
-
+
with patch('requests.get', return_value=mock_response):
# Call the method with an issue reference
result = handler._PRHandler__get_context_from_external_issues_references(
closing_issues=[],
closing_issue_numbers=[],
- issue_body="This references #123",
+ issue_body='This references #123',
review_comments=[],
review_threads=[],
thread_comments=None
)
-
+
# The method should return an empty list since the request was rate limited
assert result == []
def test_handle_network_error():
- handler = PRHandler("test-owner", "test-repo", "test-token")
-
+ handler = PRHandler('test-owner', 'test-repo', 'test-token')
+
# Mock the requests.get to simulate a network error
- with patch('requests.get', side_effect=requests.exceptions.ConnectionError("Network Error")):
+ with patch('requests.get', side_effect=requests.exceptions.ConnectionError('Network Error')):
# Call the method with an issue reference
result = handler._PRHandler__get_context_from_external_issues_references(
closing_issues=[],
closing_issue_numbers=[],
- issue_body="This references #123",
+ issue_body='This references #123',
review_comments=[],
review_threads=[],
thread_comments=None
)
-
+
# The method should return an empty list since the network request failed
assert result == []
def test_successful_issue_reference():
- handler = PRHandler("test-owner", "test-repo", "test-token")
-
+ handler = PRHandler('test-owner', 'test-repo', 'test-token')
+
# Mock a successful response
mock_response = MagicMock()
mock_response.raise_for_status.return_value = None
- mock_response.json.return_value = {"body": "This is the referenced issue body"}
-
+ mock_response.json.return_value = {'body': 'This is the referenced issue body'}
+
with patch('requests.get', return_value=mock_response):
# Call the method with an issue reference
result = handler._PRHandler__get_context_from_external_issues_references(
closing_issues=[],
closing_issue_numbers=[],
- issue_body="This references #123",
+ issue_body='This references #123',
review_comments=[],
review_threads=[],
thread_comments=None
)
-
+
# The method should return a list with the referenced issue body
- assert result == ["This is the referenced issue body"]
\ No newline at end of file
+ assert result == ['This is the referenced issue body']
diff --git a/tests/unit/resolver/test_issue_references.py b/tests/unit/resolver/test_issue_references.py
index e4da644983db..de843b03255a 100644
--- a/tests/unit/resolver/test_issue_references.py
+++ b/tests/unit/resolver/test_issue_references.py
@@ -2,13 +2,13 @@
def test_extract_issue_references():
- handler = IssueHandler("test-owner", "test-repo", "test-token")
+ handler = IssueHandler('test-owner', 'test-repo', 'test-token')
# Test basic issue reference
- assert handler._extract_issue_references("Fixes #123") == [123]
+ assert handler._extract_issue_references('Fixes #123') == [123]
# Test multiple issue references
- assert handler._extract_issue_references("Fixes #123, #456") == [123, 456]
+ assert handler._extract_issue_references('Fixes #123, #456') == [123, 456]
# Test issue references in code blocks should be ignored
assert handler._extract_issue_references("""
@@ -22,13 +22,13 @@ def func():
""") == [789]
# Test issue references in inline code should be ignored
- assert handler._extract_issue_references("This `#123` should be ignored but #456 should be extracted") == [456]
+ assert handler._extract_issue_references('This `#123` should be ignored but #456 should be extracted') == [456]
# Test issue references in URLs should be ignored
- assert handler._extract_issue_references("Check http://example.com/#123 but #456 should be extracted") == [456]
+ assert handler._extract_issue_references('Check http://example.com/#123 but #456 should be extracted') == [456]
# Test issue references in markdown links should be extracted
- assert handler._extract_issue_references("[Link to #123](http://example.com) and #456") == [123, 456]
+ assert handler._extract_issue_references('[Link to #123](http://example.com) and #456') == [123, 456]
# Test issue references with text around them
- assert handler._extract_issue_references("Issue #123 is fixed and #456 is pending") == [123, 456]
+ assert handler._extract_issue_references('Issue #123 is fixed and #456 is pending') == [123, 456]
diff --git a/tests/unit/resolver/test_pr_handler_guess_success.py b/tests/unit/resolver/test_pr_handler_guess_success.py
index bc29fbe2632e..e7e7705e8747 100644
--- a/tests/unit/resolver/test_pr_handler_guess_success.py
+++ b/tests/unit/resolver/test_pr_handler_guess_success.py
@@ -1,39 +1,39 @@
import json
-from unittest.mock import patch, MagicMock
+from unittest.mock import MagicMock, patch
-from openhands.resolver.issue_definitions import PRHandler
-from openhands.resolver.github_issue import GithubIssue, ReviewThread
-from openhands.events.action.message import MessageAction
from openhands.core.config import LLMConfig
+from openhands.events.action.message import MessageAction
+from openhands.resolver.github_issue import GithubIssue, ReviewThread
+from openhands.resolver.issue_definitions import PRHandler
def test_guess_success_review_threads_litellm_call():
"""Test that the litellm.completion() call for review threads contains the expected content."""
# Create a PR handler instance
- handler = PRHandler("test-owner", "test-repo", "test-token")
+ handler = PRHandler('test-owner', 'test-repo', 'test-token')
# Create a mock issue with review threads
issue = GithubIssue(
- owner="test-owner",
- repo="test-repo",
+ owner='test-owner',
+ repo='test-repo',
number=1,
- title="Test PR",
- body="Test Body",
+ title='Test PR',
+ body='Test Body',
thread_comments=None,
- closing_issues=["Issue 1 description", "Issue 2 description"],
+ closing_issues=['Issue 1 description', 'Issue 2 description'],
review_comments=None,
review_threads=[
ReviewThread(
- comment="Please fix the formatting\n---\nlatest feedback:\nAdd docstrings",
- files=["/src/file1.py", "/src/file2.py"],
+ comment='Please fix the formatting\n---\nlatest feedback:\nAdd docstrings',
+ files=['/src/file1.py', '/src/file2.py'],
),
ReviewThread(
- comment="Add more tests\n---\nlatest feedback:\nAdd test cases",
- files=["/tests/test_file.py"],
+ comment='Add more tests\n---\nlatest feedback:\nAdd test cases',
+ files=['/tests/test_file.py'],
),
],
- thread_ids=["1", "2"],
- head_branch="test-branch",
+ thread_ids=['1', '2'],
+ head_branch='test-branch',
)
# Create mock history with a detailed response
@@ -47,7 +47,7 @@ def test_guess_success_review_threads_litellm_call():
]
# Create mock LLM config
- llm_config = LLMConfig(model="test-model", api_key="test-key")
+ llm_config = LLMConfig(model='test-model', api_key='test-key')
# Mock the LLM response
mock_response = MagicMock()
@@ -64,7 +64,7 @@ def test_guess_success_review_threads_litellm_call():
]
# Test the guess_success method
- with patch("litellm.completion") as mock_completion:
+ with patch('litellm.completion') as mock_completion:
mock_completion.return_value = mock_response
success, success_list, explanation = handler.guess_success(
issue, history, llm_config
@@ -75,63 +75,63 @@ def test_guess_success_review_threads_litellm_call():
# Check first call
first_call = mock_completion.call_args_list[0]
- first_prompt = first_call[1]["messages"][0]["content"]
+ first_prompt = first_call[1]['messages'][0]['content']
assert (
- "Issue descriptions:\n"
- + json.dumps(["Issue 1 description", "Issue 2 description"], indent=4)
+ 'Issue descriptions:\n'
+ + json.dumps(['Issue 1 description', 'Issue 2 description'], indent=4)
in first_prompt
)
assert (
- "Feedback:\nPlease fix the formatting\n---\nlatest feedback:\nAdd docstrings"
+ 'Feedback:\nPlease fix the formatting\n---\nlatest feedback:\nAdd docstrings'
in first_prompt
)
assert (
- "Files locations:\n"
- + json.dumps(["/src/file1.py", "/src/file2.py"], indent=4)
+ 'Files locations:\n'
+ + json.dumps(['/src/file1.py', '/src/file2.py'], indent=4)
in first_prompt
)
- assert "Last message from AI agent:\n" + history[0].content in first_prompt
+ assert 'Last message from AI agent:\n' + history[0].content in first_prompt
# Check second call
second_call = mock_completion.call_args_list[1]
- second_prompt = second_call[1]["messages"][0]["content"]
+ second_prompt = second_call[1]['messages'][0]['content']
assert (
- "Issue descriptions:\n"
- + json.dumps(["Issue 1 description", "Issue 2 description"], indent=4)
+ 'Issue descriptions:\n'
+ + json.dumps(['Issue 1 description', 'Issue 2 description'], indent=4)
in second_prompt
)
assert (
- "Feedback:\nAdd more tests\n---\nlatest feedback:\nAdd test cases"
+ 'Feedback:\nAdd more tests\n---\nlatest feedback:\nAdd test cases'
in second_prompt
)
assert (
- "Files locations:\n" + json.dumps(["/tests/test_file.py"], indent=4)
+ 'Files locations:\n' + json.dumps(['/tests/test_file.py'], indent=4)
in second_prompt
)
- assert "Last message from AI agent:\n" + history[0].content in second_prompt
+ assert 'Last message from AI agent:\n' + history[0].content in second_prompt
def test_guess_success_thread_comments_litellm_call():
"""Test that the litellm.completion() call for thread comments contains the expected content."""
# Create a PR handler instance
- handler = PRHandler("test-owner", "test-repo", "test-token")
+ handler = PRHandler('test-owner', 'test-repo', 'test-token')
# Create a mock issue with thread comments
issue = GithubIssue(
- owner="test-owner",
- repo="test-repo",
+ owner='test-owner',
+ repo='test-repo',
number=1,
- title="Test PR",
- body="Test Body",
+ title='Test PR',
+ body='Test Body',
thread_comments=[
- "Please improve error handling",
- "Add input validation",
- "latest feedback:\nHandle edge cases",
+ 'Please improve error handling',
+ 'Add input validation',
+ 'latest feedback:\nHandle edge cases',
],
- closing_issues=["Issue 1 description", "Issue 2 description"],
+ closing_issues=['Issue 1 description', 'Issue 2 description'],
review_comments=None,
thread_ids=None,
- head_branch="test-branch",
+ head_branch='test-branch',
)
# Create mock history with a detailed response
@@ -145,7 +145,7 @@ def test_guess_success_thread_comments_litellm_call():
]
# Create mock LLM config
- llm_config = LLMConfig(model="test-model", api_key="test-key")
+ llm_config = LLMConfig(model='test-model', api_key='test-key')
# Mock the LLM response
mock_response = MagicMock()
@@ -162,7 +162,7 @@ def test_guess_success_thread_comments_litellm_call():
]
# Test the guess_success method
- with patch("litellm.completion") as mock_completion:
+ with patch('litellm.completion') as mock_completion:
mock_completion.return_value = mock_response
success, success_list, explanation = handler.guess_success(
issue, history, llm_config
@@ -171,77 +171,77 @@ def test_guess_success_thread_comments_litellm_call():
# Verify the litellm.completion() call
mock_completion.assert_called_once()
call_args = mock_completion.call_args
- prompt = call_args[1]["messages"][0]["content"]
+ prompt = call_args[1]['messages'][0]['content']
# Check prompt content
assert (
- "Issue descriptions:\n"
- + json.dumps(["Issue 1 description", "Issue 2 description"], indent=4)
+ 'Issue descriptions:\n'
+ + json.dumps(['Issue 1 description', 'Issue 2 description'], indent=4)
in prompt
)
- assert "PR Thread Comments:\n" + "\n---\n".join(issue.thread_comments) in prompt
- assert "Last message from AI agent:\n" + history[0].content in prompt
+ assert 'PR Thread Comments:\n' + '\n---\n'.join(issue.thread_comments) in prompt
+ assert 'Last message from AI agent:\n' + history[0].content in prompt
def test_check_feedback_with_llm():
"""Test the _check_feedback_with_llm helper function."""
# Create a PR handler instance
- handler = PRHandler("test-owner", "test-repo", "test-token")
+ handler = PRHandler('test-owner', 'test-repo', 'test-token')
# Create mock LLM config
- llm_config = LLMConfig(model="test-model", api_key="test-key")
+ llm_config = LLMConfig(model='test-model', api_key='test-key')
# Test cases for different LLM responses
test_cases = [
{
- "response": "--- success\ntrue\n--- explanation\nChanges look good",
- "expected": (True, "Changes look good"),
+ 'response': '--- success\ntrue\n--- explanation\nChanges look good',
+ 'expected': (True, 'Changes look good'),
},
{
- "response": "--- success\nfalse\n--- explanation\nNot all issues fixed",
- "expected": (False, "Not all issues fixed"),
+ 'response': '--- success\nfalse\n--- explanation\nNot all issues fixed',
+ 'expected': (False, 'Not all issues fixed'),
},
{
- "response": "Invalid response format",
- "expected": (
+ 'response': 'Invalid response format',
+ 'expected': (
False,
- "Failed to decode answer from LLM response: Invalid response format",
+ 'Failed to decode answer from LLM response: Invalid response format',
),
},
{
- "response": "--- success\ntrue\n--- explanation\nMultiline\nexplanation\nhere",
- "expected": (True, "Multiline\nexplanation\nhere"),
+ 'response': '--- success\ntrue\n--- explanation\nMultiline\nexplanation\nhere',
+ 'expected': (True, 'Multiline\nexplanation\nhere'),
},
]
for case in test_cases:
# Mock the LLM response
mock_response = MagicMock()
- mock_response.choices = [MagicMock(message=MagicMock(content=case["response"]))]
+ mock_response.choices = [MagicMock(message=MagicMock(content=case['response']))]
# Test the function
- with patch("litellm.completion", return_value=mock_response):
+ with patch('litellm.completion', return_value=mock_response):
success, explanation = handler._check_feedback_with_llm(
- "test prompt", llm_config
+ 'test prompt', llm_config
)
- assert (success, explanation) == case["expected"]
+ assert (success, explanation) == case['expected']
def test_check_review_thread():
"""Test the _check_review_thread helper function."""
# Create a PR handler instance
- handler = PRHandler("test-owner", "test-repo", "test-token")
+ handler = PRHandler('test-owner', 'test-repo', 'test-token')
# Create test data
review_thread = ReviewThread(
- comment="Please fix the formatting\n---\nlatest feedback:\nAdd docstrings",
- files=["/src/file1.py", "/src/file2.py"],
+ comment='Please fix the formatting\n---\nlatest feedback:\nAdd docstrings',
+ files=['/src/file1.py', '/src/file2.py'],
)
issues_context = json.dumps(
- ["Issue 1 description", "Issue 2 description"], indent=4
+ ['Issue 1 description', 'Issue 2 description'], indent=4
)
- last_message = "I have fixed the formatting and added docstrings"
- llm_config = LLMConfig(model="test-model", api_key="test-key")
+ last_message = 'I have fixed the formatting and added docstrings'
+ llm_config = LLMConfig(model='test-model', api_key='test-key')
# Mock the LLM response
mock_response = MagicMock()
@@ -258,7 +258,7 @@ def test_check_review_thread():
]
# Test the function
- with patch("litellm.completion") as mock_completion:
+ with patch('litellm.completion') as mock_completion:
mock_completion.return_value = mock_response
success, explanation = handler._check_review_thread(
review_thread, issues_context, last_message, llm_config
@@ -267,37 +267,37 @@ def test_check_review_thread():
# Verify the litellm.completion() call
mock_completion.assert_called_once()
call_args = mock_completion.call_args
- prompt = call_args[1]["messages"][0]["content"]
+ prompt = call_args[1]['messages'][0]['content']
# Check prompt content
- assert "Issue descriptions:\n" + issues_context in prompt
- assert "Feedback:\n" + review_thread.comment in prompt
+ assert 'Issue descriptions:\n' + issues_context in prompt
+ assert 'Feedback:\n' + review_thread.comment in prompt
assert (
- "Files locations:\n" + json.dumps(review_thread.files, indent=4) in prompt
+ 'Files locations:\n' + json.dumps(review_thread.files, indent=4) in prompt
)
- assert "Last message from AI agent:\n" + last_message in prompt
+ assert 'Last message from AI agent:\n' + last_message in prompt
# Check result
assert success is True
- assert explanation == "Changes look good"
+ assert explanation == 'Changes look good'
def test_check_thread_comments():
"""Test the _check_thread_comments helper function."""
# Create a PR handler instance
- handler = PRHandler("test-owner", "test-repo", "test-token")
+ handler = PRHandler('test-owner', 'test-repo', 'test-token')
# Create test data
thread_comments = [
- "Please improve error handling",
- "Add input validation",
- "latest feedback:\nHandle edge cases",
+ 'Please improve error handling',
+ 'Add input validation',
+ 'latest feedback:\nHandle edge cases',
]
issues_context = json.dumps(
- ["Issue 1 description", "Issue 2 description"], indent=4
+ ['Issue 1 description', 'Issue 2 description'], indent=4
)
- last_message = "I have added error handling and input validation"
- llm_config = LLMConfig(model="test-model", api_key="test-key")
+ last_message = 'I have added error handling and input validation'
+ llm_config = LLMConfig(model='test-model', api_key='test-key')
# Mock the LLM response
mock_response = MagicMock()
@@ -314,7 +314,7 @@ def test_check_thread_comments():
]
# Test the function
- with patch("litellm.completion") as mock_completion:
+ with patch('litellm.completion') as mock_completion:
mock_completion.return_value = mock_response
success, explanation = handler._check_thread_comments(
thread_comments, issues_context, last_message, llm_config
@@ -323,34 +323,34 @@ def test_check_thread_comments():
# Verify the litellm.completion() call
mock_completion.assert_called_once()
call_args = mock_completion.call_args
- prompt = call_args[1]["messages"][0]["content"]
+ prompt = call_args[1]['messages'][0]['content']
# Check prompt content
- assert "Issue descriptions:\n" + issues_context in prompt
- assert "PR Thread Comments:\n" + "\n---\n".join(thread_comments) in prompt
- assert "Last message from AI agent:\n" + last_message in prompt
+ assert 'Issue descriptions:\n' + issues_context in prompt
+ assert 'PR Thread Comments:\n' + '\n---\n'.join(thread_comments) in prompt
+ assert 'Last message from AI agent:\n' + last_message in prompt
# Check result
assert success is True
- assert explanation == "Changes look good"
+ assert explanation == 'Changes look good'
def test_check_review_comments():
"""Test the _check_review_comments helper function."""
# Create a PR handler instance
- handler = PRHandler("test-owner", "test-repo", "test-token")
+ handler = PRHandler('test-owner', 'test-repo', 'test-token')
# Create test data
review_comments = [
- "Please improve code readability",
- "Add comments to complex functions",
- "Follow PEP 8 style guide",
+ 'Please improve code readability',
+ 'Add comments to complex functions',
+ 'Follow PEP 8 style guide',
]
issues_context = json.dumps(
- ["Issue 1 description", "Issue 2 description"], indent=4
+ ['Issue 1 description', 'Issue 2 description'], indent=4
)
- last_message = "I have improved code readability and added comments"
- llm_config = LLMConfig(model="test-model", api_key="test-key")
+ last_message = 'I have improved code readability and added comments'
+ llm_config = LLMConfig(model='test-model', api_key='test-key')
# Mock the LLM response
mock_response = MagicMock()
@@ -367,7 +367,7 @@ def test_check_review_comments():
]
# Test the function
- with patch("litellm.completion") as mock_completion:
+ with patch('litellm.completion') as mock_completion:
mock_completion.return_value = mock_response
success, explanation = handler._check_review_comments(
review_comments, issues_context, last_message, llm_config
@@ -376,39 +376,39 @@ def test_check_review_comments():
# Verify the litellm.completion() call
mock_completion.assert_called_once()
call_args = mock_completion.call_args
- prompt = call_args[1]["messages"][0]["content"]
+ prompt = call_args[1]['messages'][0]['content']
# Check prompt content
- assert "Issue descriptions:\n" + issues_context in prompt
- assert "PR Review Comments:\n" + "\n---\n".join(review_comments) in prompt
- assert "Last message from AI agent:\n" + last_message in prompt
+ assert 'Issue descriptions:\n' + issues_context in prompt
+ assert 'PR Review Comments:\n' + '\n---\n'.join(review_comments) in prompt
+ assert 'Last message from AI agent:\n' + last_message in prompt
# Check result
assert success is True
- assert explanation == "Changes look good"
+ assert explanation == 'Changes look good'
def test_guess_success_review_comments_litellm_call():
"""Test that the litellm.completion() call for review comments contains the expected content."""
# Create a PR handler instance
- handler = PRHandler("test-owner", "test-repo", "test-token")
+ handler = PRHandler('test-owner', 'test-repo', 'test-token')
# Create a mock issue with review comments
issue = GithubIssue(
- owner="test-owner",
- repo="test-repo",
+ owner='test-owner',
+ repo='test-repo',
number=1,
- title="Test PR",
- body="Test Body",
+ title='Test PR',
+ body='Test Body',
thread_comments=None,
- closing_issues=["Issue 1 description", "Issue 2 description"],
+ closing_issues=['Issue 1 description', 'Issue 2 description'],
review_comments=[
- "Please improve code readability",
- "Add comments to complex functions",
- "Follow PEP 8 style guide",
+ 'Please improve code readability',
+ 'Add comments to complex functions',
+ 'Follow PEP 8 style guide',
],
thread_ids=None,
- head_branch="test-branch",
+ head_branch='test-branch',
)
# Create mock history with a detailed response
@@ -422,7 +422,7 @@ def test_guess_success_review_comments_litellm_call():
]
# Create mock LLM config
- llm_config = LLMConfig(model="test-model", api_key="test-key")
+ llm_config = LLMConfig(model='test-model', api_key='test-key')
# Mock the LLM response
mock_response = MagicMock()
@@ -439,7 +439,7 @@ def test_guess_success_review_comments_litellm_call():
]
# Test the guess_success method
- with patch("litellm.completion") as mock_completion:
+ with patch('litellm.completion') as mock_completion:
mock_completion.return_value = mock_response
success, success_list, explanation = handler.guess_success(
issue, history, llm_config
@@ -448,13 +448,13 @@ def test_guess_success_review_comments_litellm_call():
# Verify the litellm.completion() call
mock_completion.assert_called_once()
call_args = mock_completion.call_args
- prompt = call_args[1]["messages"][0]["content"]
+ prompt = call_args[1]['messages'][0]['content']
# Check prompt content
assert (
- "Issue descriptions:\n"
- + json.dumps(["Issue 1 description", "Issue 2 description"], indent=4)
+ 'Issue descriptions:\n'
+ + json.dumps(['Issue 1 description', 'Issue 2 description'], indent=4)
in prompt
)
- assert "PR Review Comments:\n" + "\n---\n".join(issue.review_comments) in prompt
- assert "Last message from AI agent:\n" + history[0].content in prompt
+ assert 'PR Review Comments:\n' + '\n---\n'.join(issue.review_comments) in prompt
+ assert 'Last message from AI agent:\n' + history[0].content in prompt
diff --git a/tests/unit/resolver/test_pr_title_escaping.py b/tests/unit/resolver/test_pr_title_escaping.py
index 03f2b7104807..45dd523b036a 100644
--- a/tests/unit/resolver/test_pr_title_escaping.py
+++ b/tests/unit/resolver/test_pr_title_escaping.py
@@ -1,45 +1,46 @@
-from openhands.resolver.github_issue import GithubIssue
-from openhands.resolver.send_pull_request import make_commit
import os
-import tempfile
import subprocess
+import tempfile
+
+from openhands.resolver.github_issue import GithubIssue
+from openhands.resolver.send_pull_request import make_commit
def test_commit_message_with_quotes():
# Create a temporary directory and initialize git repo
with tempfile.TemporaryDirectory() as temp_dir:
- subprocess.run(["git", "init", temp_dir], check=True)
+ subprocess.run(['git', 'init', temp_dir], check=True)
# Create a test file and add it to git
- test_file = os.path.join(temp_dir, "test.txt")
- with open(test_file, "w") as f:
- f.write("test content")
+ test_file = os.path.join(temp_dir, 'test.txt')
+ with open(test_file, 'w') as f:
+ f.write('test content')
- subprocess.run(["git", "-C", temp_dir, "add", "test.txt"], check=True)
+ subprocess.run(['git', '-C', temp_dir, 'add', 'test.txt'], check=True)
# Create a test issue with problematic title
issue = GithubIssue(
- owner="test-owner",
- repo="test-repo",
+ owner='test-owner',
+ repo='test-repo',
number=123,
title="Issue with 'quotes' and \"double quotes\" and ",
- body="Test body",
+ body='Test body',
labels=[],
assignees=[],
- state="open",
- created_at="2024-01-01T00:00:00Z",
- updated_at="2024-01-01T00:00:00Z",
+ state='open',
+ created_at='2024-01-01T00:00:00Z',
+ updated_at='2024-01-01T00:00:00Z',
closed_at=None,
head_branch=None,
thread_ids=None,
)
# Make the commit
- make_commit(temp_dir, issue, "issue")
+ make_commit(temp_dir, issue, 'issue')
# Get the commit message
result = subprocess.run(
- ["git", "-C", temp_dir, "log", "-1", "--pretty=%B"],
+ ['git', '-C', temp_dir, 'log', '-1', '--pretty=%B'],
capture_output=True,
text=True,
check=True,
@@ -48,7 +49,7 @@ def test_commit_message_with_quotes():
# The commit message should contain the quotes without excessive escaping
expected = "Fix issue #123: Issue with 'quotes' and \"double quotes\" and "
- assert commit_msg == expected, f"Expected: {expected}\nGot: {commit_msg}"
+ assert commit_msg == expected, f'Expected: {expected}\nGot: {commit_msg}'
def test_pr_title_with_quotes(monkeypatch):
@@ -56,39 +57,39 @@ def test_pr_title_with_quotes(monkeypatch):
class MockResponse:
def __init__(self, status_code=201):
self.status_code = status_code
- self.text = ""
+ self.text = ''
def json(self):
- return {"html_url": "https://github.com/test/test/pull/1"}
+ return {'html_url': 'https://github.com/test/test/pull/1'}
def raise_for_status(self):
pass
def mock_post(*args, **kwargs):
# Verify that the PR title is not over-escaped
- data = kwargs.get("json", {})
- title = data.get("title", "")
+ data = kwargs.get('json', {})
+ title = data.get('title', '')
expected = "Fix issue #123: Issue with 'quotes' and \"double quotes\" and "
assert (
title == expected
- ), f"PR title was incorrectly escaped.\nExpected: {expected}\nGot: {title}"
+ ), f'PR title was incorrectly escaped.\nExpected: {expected}\nGot: {title}'
return MockResponse()
class MockGetResponse:
def __init__(self, status_code=200):
self.status_code = status_code
- self.text = ""
+ self.text = ''
def json(self):
- return {"default_branch": "main"}
+ return {'default_branch': 'main'}
def raise_for_status(self):
pass
- monkeypatch.setattr("requests.post", mock_post)
- monkeypatch.setattr("requests.get", lambda *args, **kwargs: MockGetResponse())
+ monkeypatch.setattr('requests.post', mock_post)
+ monkeypatch.setattr('requests.get', lambda *args, **kwargs: MockGetResponse())
monkeypatch.setattr(
- "openhands.resolver.send_pull_request.branch_exists",
+ 'openhands.resolver.send_pull_request.branch_exists',
lambda *args, **kwargs: False,
)
@@ -97,69 +98,69 @@ def raise_for_status(self):
def mock_run(*args, **kwargs):
print(f"Running command: {args[0] if args else kwargs.get('args', [])}")
- if isinstance(args[0], list) and args[0][0] == "git":
- if "push" in args[0]:
+ if isinstance(args[0], list) and args[0][0] == 'git':
+ if 'push' in args[0]:
return subprocess.CompletedProcess(
- args[0], returncode=0, stdout="", stderr=""
+ args[0], returncode=0, stdout='', stderr=''
)
return original_run(*args, **kwargs)
return original_run(*args, **kwargs)
- monkeypatch.setattr("subprocess.run", mock_run)
+ monkeypatch.setattr('subprocess.run', mock_run)
# Create a temporary directory and initialize git repo
with tempfile.TemporaryDirectory() as temp_dir:
- print("Initializing git repo...")
- subprocess.run(["git", "init", temp_dir], check=True)
+ print('Initializing git repo...')
+ subprocess.run(['git', 'init', temp_dir], check=True)
# Add these lines to configure git
subprocess.run(
- ["git", "-C", temp_dir, "config", "user.name", "Test User"], check=True
+ ['git', '-C', temp_dir, 'config', 'user.name', 'Test User'], check=True
)
subprocess.run(
- ["git", "-C", temp_dir, "config", "user.email", "test@example.com"],
+ ['git', '-C', temp_dir, 'config', 'user.email', 'test@example.com'],
check=True,
)
# Create a test file and add it to git
- test_file = os.path.join(temp_dir, "test.txt")
- with open(test_file, "w") as f:
- f.write("test content")
+ test_file = os.path.join(temp_dir, 'test.txt')
+ with open(test_file, 'w') as f:
+ f.write('test content')
- print("Adding and committing test file...")
- subprocess.run(["git", "-C", temp_dir, "add", "test.txt"], check=True)
+ print('Adding and committing test file...')
+ subprocess.run(['git', '-C', temp_dir, 'add', 'test.txt'], check=True)
subprocess.run(
- ["git", "-C", temp_dir, "commit", "-m", "Initial commit"], check=True
+ ['git', '-C', temp_dir, 'commit', '-m', 'Initial commit'], check=True
)
# Create a test issue with problematic title
- print("Creating test issue...")
+ print('Creating test issue...')
issue = GithubIssue(
- owner="test-owner",
- repo="test-repo",
+ owner='test-owner',
+ repo='test-repo',
number=123,
title="Issue with 'quotes' and \"double quotes\" and ",
- body="Test body",
+ body='Test body',
labels=[],
assignees=[],
- state="open",
- created_at="2024-01-01T00:00:00Z",
- updated_at="2024-01-01T00:00:00Z",
+ state='open',
+ created_at='2024-01-01T00:00:00Z',
+ updated_at='2024-01-01T00:00:00Z',
closed_at=None,
head_branch=None,
thread_ids=None,
)
# Try to send a PR - this will fail if the title is incorrectly escaped
- print("Sending PR...")
- from openhands.resolver.send_pull_request import send_pull_request
+ print('Sending PR...')
from openhands.core.config import LLMConfig
+ from openhands.resolver.send_pull_request import send_pull_request
send_pull_request(
github_issue=issue,
- github_token="dummy-token",
- github_username="test-user",
+ github_token='dummy-token',
+ github_username='test-user',
patch_dir=temp_dir,
- llm_config=LLMConfig(model="test-model", api_key="test-key"),
- pr_type="ready",
+ llm_config=LLMConfig(model='test-model', api_key='test-key'),
+ pr_type='ready',
)
diff --git a/tests/unit/resolver/test_resolve_issues.py b/tests/unit/resolver/test_resolve_issues.py
index bcf5f36481e7..6c6c3975bba8 100644
--- a/tests/unit/resolver/test_resolve_issues.py
+++ b/tests/unit/resolver/test_resolve_issues.py
@@ -1,57 +1,57 @@
import os
import tempfile
-import pytest
+from unittest.mock import AsyncMock, MagicMock, patch
+import pytest
-from unittest.mock import AsyncMock, patch, MagicMock
+from openhands.core.config import LLMConfig
+from openhands.events.action import CmdRunAction
+from openhands.events.observation import CmdOutputObservation, NullObservation
+from openhands.resolver.github_issue import GithubIssue, ReviewThread
from openhands.resolver.issue_definitions import IssueHandler, PRHandler
from openhands.resolver.resolve_issue import (
- initialize_runtime,
complete_runtime,
+ initialize_runtime,
process_issue,
)
-from openhands.resolver.github_issue import GithubIssue, ReviewThread
-from openhands.events.action import CmdRunAction
-from openhands.events.observation import CmdOutputObservation, NullObservation
from openhands.resolver.resolver_output import ResolverOutput
-from openhands.core.config import LLMConfig
@pytest.fixture
def mock_output_dir():
with tempfile.TemporaryDirectory() as temp_dir:
- repo_path = os.path.join(temp_dir, "repo")
+ repo_path = os.path.join(temp_dir, 'repo')
# Initialize a GitHub repo in "repo" and add a commit with "README.md"
os.makedirs(repo_path)
- os.system(f"git init {repo_path}")
- readme_path = os.path.join(repo_path, "README.md")
- with open(readme_path, "w") as f:
- f.write("hello world")
- os.system(f"git -C {repo_path} add README.md")
+ os.system(f'git init {repo_path}')
+ readme_path = os.path.join(repo_path, 'README.md')
+ with open(readme_path, 'w') as f:
+ f.write('hello world')
+ os.system(f'git -C {repo_path} add README.md')
os.system(f"git -C {repo_path} commit -m 'Initial commit'")
yield temp_dir
@pytest.fixture
def mock_subprocess():
- with patch("subprocess.check_output") as mock_check_output:
+ with patch('subprocess.check_output') as mock_check_output:
yield mock_check_output
@pytest.fixture
def mock_os():
- with patch("os.system") as mock_system, patch("os.path.join") as mock_join:
+ with patch('os.system') as mock_system, patch('os.path.join') as mock_join:
yield mock_system, mock_join
@pytest.fixture
def mock_prompt_template():
- return "Issue: {{ body }}\n\nPlease fix this issue."
+ return 'Issue: {{ body }}\n\nPlease fix this issue.'
@pytest.fixture
def mock_followup_prompt_template():
- return "Issue context: {{ issues }}\n\nReview comments: {{ review_comments }}\n\nReview threads: {{ review_threads }}\n\nFiles: {{ files }}\n\nPlease fix this issue."
+ return 'Issue context: {{ issues }}\n\nReview comments: {{ review_comments }}\n\nReview threads: {{ review_threads }}\n\nFiles: {{ files }}\n\nPlease fix this issue.'
def create_cmd_output(exit_code: int, content: str, command_id: int, command: str):
@@ -64,11 +64,11 @@ def test_initialize_runtime():
mock_runtime = MagicMock()
mock_runtime.run_action.side_effect = [
create_cmd_output(
- exit_code=0, content="", command_id=1, command="cd /workspace"
+ exit_code=0, content='', command_id=1, command='cd /workspace'
),
create_cmd_output(
exit_code=0,
- content="",
+ content='',
command_id=2,
command='git config --global core.pager ""',
),
@@ -77,26 +77,26 @@ def test_initialize_runtime():
initialize_runtime(mock_runtime)
assert mock_runtime.run_action.call_count == 2
- mock_runtime.run_action.assert_any_call(CmdRunAction(command="cd /workspace"))
+ mock_runtime.run_action.assert_any_call(CmdRunAction(command='cd /workspace'))
mock_runtime.run_action.assert_any_call(
CmdRunAction(command='git config --global core.pager ""')
)
def test_download_issues_from_github():
- handler = IssueHandler("owner", "repo", "token")
+ handler = IssueHandler('owner', 'repo', 'token')
mock_issues_response = MagicMock()
mock_issues_response.json.side_effect = [
[
- {"number": 1, "title": "Issue 1", "body": "This is an issue"},
+ {'number': 1, 'title': 'Issue 1', 'body': 'This is an issue'},
{
- "number": 2,
- "title": "PR 1",
- "body": "This is a pull request",
- "pull_request": {},
+ 'number': 2,
+ 'title': 'PR 1',
+ 'body': 'This is a pull request',
+ 'pull_request': {},
},
- {"number": 3, "title": "Issue 2", "body": "This is another issue"},
+ {'number': 3, 'title': 'Issue 2', 'body': 'This is another issue'},
],
None,
]
@@ -107,41 +107,41 @@ def test_download_issues_from_github():
mock_comments_response.raise_for_status = MagicMock()
def get_mock_response(url, *args, **kwargs):
- if "/comments" in url:
+ if '/comments' in url:
return mock_comments_response
return mock_issues_response
- with patch("requests.get", side_effect=get_mock_response):
+ with patch('requests.get', side_effect=get_mock_response):
issues = handler.get_converted_issues()
assert len(issues) == 2
- assert handler.issue_type == "issue"
+ assert handler.issue_type == 'issue'
assert all(isinstance(issue, GithubIssue) for issue in issues)
assert [issue.number for issue in issues] == [1, 3]
- assert [issue.title for issue in issues] == ["Issue 1", "Issue 2"]
+ assert [issue.title for issue in issues] == ['Issue 1', 'Issue 2']
assert [issue.review_comments for issue in issues] == [None, None]
assert [issue.closing_issues for issue in issues] == [None, None]
assert [issue.thread_ids for issue in issues] == [None, None]
def test_download_pr_from_github():
- handler = PRHandler("owner", "repo", "token")
+ handler = PRHandler('owner', 'repo', 'token')
mock_pr_response = MagicMock()
mock_pr_response.json.side_effect = [
[
{
- "number": 1,
- "title": "PR 1",
- "body": "This is a pull request",
- "head": {"ref": "b1"},
+ 'number': 1,
+ 'title': 'PR 1',
+ 'body': 'This is a pull request',
+ 'head': {'ref': 'b1'},
},
{
- "number": 2,
- "title": "My PR",
- "body": "This is another pull request",
- "head": {"ref": "b2"},
+ 'number': 2,
+ 'title': 'My PR',
+ 'body': 'This is another pull request',
+ 'head': {'ref': 'b2'},
},
- {"number": 3, "title": "PR 3", "body": "Final PR", "head": {"ref": "b3"}},
+ {'number': 3, 'title': 'PR 3', 'body': 'Final PR', 'head': {'ref': 'b3'}},
],
None,
]
@@ -155,55 +155,55 @@ def test_download_pr_from_github():
# Mock for GraphQL request (for download_pr_metadata)
mock_graphql_response = MagicMock()
mock_graphql_response.json.side_effect = lambda: {
- "data": {
- "repository": {
- "pullRequest": {
- "closingIssuesReferences": {
- "edges": [
- {"node": {"body": "Issue 1 body", "number": 1}},
- {"node": {"body": "Issue 2 body", "number": 2}},
+ 'data': {
+ 'repository': {
+ 'pullRequest': {
+ 'closingIssuesReferences': {
+ 'edges': [
+ {'node': {'body': 'Issue 1 body', 'number': 1}},
+ {'node': {'body': 'Issue 2 body', 'number': 2}},
]
},
- "reviewThreads": {
- "edges": [
+ 'reviewThreads': {
+ 'edges': [
{
- "node": {
- "isResolved": False,
- "id": "1",
- "comments": {
- "nodes": [
+ 'node': {
+ 'isResolved': False,
+ 'id': '1',
+ 'comments': {
+ 'nodes': [
{
- "body": "Unresolved comment 1",
- "path": "/frontend/header.tsx",
+ 'body': 'Unresolved comment 1',
+ 'path': '/frontend/header.tsx',
},
- {"body": "Follow up thread"},
+ {'body': 'Follow up thread'},
]
},
}
},
{
- "node": {
- "isResolved": True,
- "id": "2",
- "comments": {
- "nodes": [
+ 'node': {
+ 'isResolved': True,
+ 'id': '2',
+ 'comments': {
+ 'nodes': [
{
- "body": "Resolved comment 1",
- "path": "/some/file.py",
+ 'body': 'Resolved comment 1',
+ 'path': '/some/file.py',
}
]
},
}
},
{
- "node": {
- "isResolved": False,
- "id": "3",
- "comments": {
- "nodes": [
+ 'node': {
+ 'isResolved': False,
+ 'id': '3',
+ 'comments': {
+ 'nodes': [
{
- "body": "Unresolved comment 3",
- "path": "/another/file.py",
+ 'body': 'Unresolved comment 3',
+ 'path': '/another/file.py',
}
]
},
@@ -219,34 +219,34 @@ def test_download_pr_from_github():
mock_graphql_response.raise_for_status = MagicMock()
def get_mock_response(url, *args, **kwargs):
- if "/comments" in url:
+ if '/comments' in url:
return mock_comments_response
return mock_pr_response
- with patch("requests.get", side_effect=get_mock_response):
- with patch("requests.post", return_value=mock_graphql_response):
+ with patch('requests.get', side_effect=get_mock_response):
+ with patch('requests.post', return_value=mock_graphql_response):
issues = handler.get_converted_issues()
assert len(issues) == 3
- assert handler.issue_type == "pr"
+ assert handler.issue_type == 'pr'
assert all(isinstance(issue, GithubIssue) for issue in issues)
assert [issue.number for issue in issues] == [1, 2, 3]
- assert [issue.title for issue in issues] == ["PR 1", "My PR", "PR 3"]
- assert [issue.head_branch for issue in issues] == ["b1", "b2", "b3"]
+ assert [issue.title for issue in issues] == ['PR 1', 'My PR', 'PR 3']
+ assert [issue.head_branch for issue in issues] == ['b1', 'b2', 'b3']
assert len(issues[0].review_threads) == 2 # Only unresolved threads
assert (
issues[0].review_threads[0].comment
- == "Unresolved comment 1\n---\nlatest feedback:\nFollow up thread\n"
+ == 'Unresolved comment 1\n---\nlatest feedback:\nFollow up thread\n'
)
- assert issues[0].review_threads[0].files == ["/frontend/header.tsx"]
+ assert issues[0].review_threads[0].files == ['/frontend/header.tsx']
assert (
issues[0].review_threads[1].comment
- == "latest feedback:\nUnresolved comment 3\n"
+ == 'latest feedback:\nUnresolved comment 3\n'
)
- assert issues[0].review_threads[1].files == ["/another/file.py"]
- assert issues[0].closing_issues == ["Issue 1 body", "Issue 2 body"]
- assert issues[0].thread_ids == ["1", "3"]
+ assert issues[0].review_threads[1].files == ['/another/file.py']
+ assert issues[0].closing_issues == ['Issue 1 body', 'Issue 2 body']
+ assert issues[0].thread_ids == ['1', '3']
@pytest.mark.asyncio
@@ -254,34 +254,34 @@ async def test_complete_runtime():
mock_runtime = MagicMock()
mock_runtime.run_action.side_effect = [
create_cmd_output(
- exit_code=0, content="", command_id=1, command="cd /workspace"
+ exit_code=0, content='', command_id=1, command='cd /workspace'
),
create_cmd_output(
exit_code=0,
- content="",
+ content='',
command_id=2,
command='git config --global core.pager ""',
),
create_cmd_output(
exit_code=0,
- content="",
+ content='',
command_id=3,
- command="git config --global --add safe.directory /workspace",
+ command='git config --global --add safe.directory /workspace',
),
create_cmd_output(
exit_code=0,
- content="",
+ content='',
command_id=4,
- command="git diff base_commit_hash fix",
+ command='git diff base_commit_hash fix',
),
create_cmd_output(
- exit_code=0, content="git diff content", command_id=5, command="git apply"
+ exit_code=0, content='git diff content', command_id=5, command='git apply'
),
]
- result = await complete_runtime(mock_runtime, "base_commit_hash")
+ result = await complete_runtime(mock_runtime, 'base_commit_hash')
- assert result == {"git_patch": "git diff content"}
+ assert result == {'git_patch': 'git diff content'}
assert mock_runtime.run_action.call_count == 5
@@ -296,65 +296,65 @@ async def test_process_issue(mock_output_dir, mock_prompt_template):
# Set up test data
issue = GithubIssue(
- owner="test_owner",
- repo="test_repo",
+ owner='test_owner',
+ repo='test_repo',
number=1,
- title="Test Issue",
- body="This is a test issue",
+ title='Test Issue',
+ body='This is a test issue',
)
- base_commit = "abcdef1234567890"
- repo_instruction = "Resolve this repo"
+ base_commit = 'abcdef1234567890'
+ repo_instruction = 'Resolve this repo'
max_iterations = 5
- llm_config = LLMConfig(model="test_model", api_key="test_api_key")
- runtime_container_image = "test_image:latest"
+ llm_config = LLMConfig(model='test_model', api_key='test_api_key')
+ runtime_container_image = 'test_image:latest'
# Test cases for different scenarios
test_cases = [
{
- "name": "successful_run",
- "run_controller_return": MagicMock(
- history=[NullObservation(content="")],
+ 'name': 'successful_run',
+ 'run_controller_return': MagicMock(
+ history=[NullObservation(content='')],
metrics=MagicMock(
- get=MagicMock(return_value={"test_result": "passed"})
+ get=MagicMock(return_value={'test_result': 'passed'})
),
last_error=None,
),
- "run_controller_raises": None,
- "expected_success": True,
- "expected_error": None,
- "expected_explanation": "Issue resolved successfully",
+ 'run_controller_raises': None,
+ 'expected_success': True,
+ 'expected_error': None,
+ 'expected_explanation': 'Issue resolved successfully',
},
{
- "name": "value_error",
- "run_controller_return": None,
- "run_controller_raises": ValueError("Test value error"),
- "expected_success": False,
- "expected_error": "Agent failed to run or crashed",
- "expected_explanation": "Agent failed to run",
+ 'name': 'value_error',
+ 'run_controller_return': None,
+ 'run_controller_raises': ValueError('Test value error'),
+ 'expected_success': False,
+ 'expected_error': 'Agent failed to run or crashed',
+ 'expected_explanation': 'Agent failed to run',
},
{
- "name": "runtime_error",
- "run_controller_return": None,
- "run_controller_raises": RuntimeError("Test runtime error"),
- "expected_success": False,
- "expected_error": "Agent failed to run or crashed",
- "expected_explanation": "Agent failed to run",
+ 'name': 'runtime_error',
+ 'run_controller_return': None,
+ 'run_controller_raises': RuntimeError('Test runtime error'),
+ 'expected_success': False,
+ 'expected_error': 'Agent failed to run or crashed',
+ 'expected_explanation': 'Agent failed to run',
},
{
- "name": "json_decode_error",
- "run_controller_return": MagicMock(
- history=[NullObservation(content="")],
+ 'name': 'json_decode_error',
+ 'run_controller_return': MagicMock(
+ history=[NullObservation(content='')],
metrics=MagicMock(
- get=MagicMock(return_value={"test_result": "passed"})
+ get=MagicMock(return_value={'test_result': 'passed'})
),
last_error=None,
),
- "run_controller_raises": None,
- "expected_success": True,
- "expected_error": None,
- "expected_explanation": "Non-JSON explanation",
- "is_pr": True,
- "comment_success": [
+ 'run_controller_raises': None,
+ 'expected_success': True,
+ 'expected_error': None,
+ 'expected_explanation': 'Non-JSON explanation',
+ 'is_pr': True,
+ 'comment_success': [
True,
False,
], # To trigger the PR success logging code path
@@ -371,31 +371,31 @@ async def test_process_issue(mock_output_dir, mock_prompt_template):
# Mock return values
mock_create_runtime.return_value = MagicMock(connect=AsyncMock())
- if test_case["run_controller_raises"]:
- mock_run_controller.side_effect = test_case["run_controller_raises"]
+ if test_case['run_controller_raises']:
+ mock_run_controller.side_effect = test_case['run_controller_raises']
else:
- mock_run_controller.return_value = test_case["run_controller_return"]
+ mock_run_controller.return_value = test_case['run_controller_return']
mock_run_controller.side_effect = None
- mock_complete_runtime.return_value = {"git_patch": "test patch"}
+ mock_complete_runtime.return_value = {'git_patch': 'test patch'}
handler_instance.guess_success.return_value = (
- test_case["expected_success"],
- test_case.get("comment_success", None),
- test_case["expected_explanation"],
+ test_case['expected_success'],
+ test_case.get('comment_success', None),
+ test_case['expected_explanation'],
)
- handler_instance.get_instruction.return_value = ("Test instruction", [])
- handler_instance.issue_type = "pr" if test_case.get("is_pr", False) else "issue"
+ handler_instance.get_instruction.return_value = ('Test instruction', [])
+ handler_instance.issue_type = 'pr' if test_case.get('is_pr', False) else 'issue'
with patch(
- "openhands.resolver.resolve_issue.create_runtime", mock_create_runtime
+ 'openhands.resolver.resolve_issue.create_runtime', mock_create_runtime
), patch(
- "openhands.resolver.resolve_issue.initialize_runtime",
+ 'openhands.resolver.resolve_issue.initialize_runtime',
mock_initialize_runtime,
), patch(
- "openhands.resolver.resolve_issue.run_controller", mock_run_controller
+ 'openhands.resolver.resolve_issue.run_controller', mock_run_controller
), patch(
- "openhands.resolver.resolve_issue.complete_runtime", mock_complete_runtime
- ), patch("openhands.resolver.resolve_issue.logger"):
+ 'openhands.resolver.resolve_issue.complete_runtime', mock_complete_runtime
+ ), patch('openhands.resolver.resolve_issue.logger'):
# Call the function
result = await process_issue(
issue,
@@ -411,15 +411,15 @@ async def test_process_issue(mock_output_dir, mock_prompt_template):
)
# Assert the result
- expected_issue_type = "pr" if test_case.get("is_pr", False) else "issue"
+ expected_issue_type = 'pr' if test_case.get('is_pr', False) else 'issue'
assert handler_instance.issue_type == expected_issue_type
assert isinstance(result, ResolverOutput)
assert result.issue == issue
assert result.base_commit == base_commit
- assert result.git_patch == "test patch"
- assert result.success == test_case["expected_success"]
- assert result.success_explanation == test_case["expected_explanation"]
- assert result.error == test_case["expected_error"]
+ assert result.git_patch == 'test patch'
+ assert result.success == test_case['expected_success']
+ assert result.success_explanation == test_case['expected_explanation']
+ assert result.error == test_case['expected_error']
# Assert that the mocked functions were called
mock_create_runtime.assert_called_once()
@@ -428,7 +428,7 @@ async def test_process_issue(mock_output_dir, mock_prompt_template):
mock_complete_runtime.assert_called_once()
# Assert that guess_success was called only for successful runs
- if test_case["expected_success"]:
+ if test_case['expected_success']:
handler_instance.guess_success.assert_called_once()
else:
handler_instance.guess_success.assert_not_called()
@@ -436,29 +436,29 @@ async def test_process_issue(mock_output_dir, mock_prompt_template):
def test_get_instruction(mock_prompt_template, mock_followup_prompt_template):
issue = GithubIssue(
- owner="test_owner",
- repo="test_repo",
+ owner='test_owner',
+ repo='test_repo',
number=123,
- title="Test Issue",
- body="This is a test issue refer to image ",
+ title='Test Issue',
+ body='This is a test issue refer to image ',
)
- issue_handler = IssueHandler("owner", "repo", "token")
+ issue_handler = IssueHandler('owner', 'repo', 'token')
instruction, images_urls = issue_handler.get_instruction(
issue, mock_prompt_template, None
)
- expected_instruction = "Issue: Test Issue\n\nThis is a test issue refer to image \n\nPlease fix this issue."
+ expected_instruction = 'Issue: Test Issue\n\nThis is a test issue refer to image \n\nPlease fix this issue.'
- assert images_urls == ["https://sampleimage.com/image1.png"]
- assert issue_handler.issue_type == "issue"
+ assert images_urls == ['https://sampleimage.com/image1.png']
+ assert issue_handler.issue_type == 'issue'
assert instruction == expected_instruction
issue = GithubIssue(
- owner="test_owner",
- repo="test_repo",
+ owner='test_owner',
+ repo='test_repo',
number=123,
- title="Test Issue",
- body="This is a test issue",
- closing_issues=["Issue 1 fix the type"],
+ title='Test Issue',
+ body='This is a test issue',
+ closing_issues=['Issue 1 fix the type'],
review_threads=[
ReviewThread(
comment="There is still a typo 'pthon' instead of 'python'", files=[]
@@ -466,30 +466,30 @@ def test_get_instruction(mock_prompt_template, mock_followup_prompt_template):
],
)
- pr_handler = PRHandler("owner", "repo", "token")
+ pr_handler = PRHandler('owner', 'repo', 'token')
instruction, images_urls = pr_handler.get_instruction(
issue, mock_followup_prompt_template, None
)
expected_instruction = "Issue context: [\n \"Issue 1 fix the type\"\n]\n\nReview comments: None\n\nReview threads: [\n \"There is still a typo 'pthon' instead of 'python'\"\n]\n\nFiles: []\n\nPlease fix this issue."
assert images_urls == []
- assert pr_handler.issue_type == "pr"
+ assert pr_handler.issue_type == 'pr'
assert instruction == expected_instruction
def test_file_instruction():
issue = GithubIssue(
- owner="test_owner",
- repo="test_repo",
+ owner='test_owner',
+ repo='test_repo',
number=123,
- title="Test Issue",
- body="This is a test issue ",
+ title='Test Issue',
+ body='This is a test issue ',
)
# load prompt from openhands/resolver/prompts/resolve/basic.jinja
- with open("openhands/resolver/prompts/resolve/basic.jinja", "r") as f:
+ with open('openhands/resolver/prompts/resolve/basic.jinja', 'r') as f:
prompt = f.read()
# Test without thread comments
- issue_handler = IssueHandler("owner", "repo", "token")
+ issue_handler = IssueHandler('owner', 'repo', 'token')
instruction, images_urls = issue_handler.get_instruction(issue, prompt, None)
expected_instruction = """Please fix the following issue for the repository in /workspace.
An environment has been set up for you to start working. You may assume all necessary tools are installed.
@@ -505,28 +505,28 @@ def test_file_instruction():
When you think you have fixed the issue through code changes, please finish the interaction."""
assert instruction == expected_instruction
- assert images_urls == ["https://sampleimage.com/sample.png"]
+ assert images_urls == ['https://sampleimage.com/sample.png']
def test_file_instruction_with_repo_instruction():
issue = GithubIssue(
- owner="test_owner",
- repo="test_repo",
+ owner='test_owner',
+ repo='test_repo',
number=123,
- title="Test Issue",
- body="This is a test issue",
+ title='Test Issue',
+ body='This is a test issue',
)
# load prompt from openhands/resolver/prompts/resolve/basic.jinja
- with open("openhands/resolver/prompts/resolve/basic.jinja", "r") as f:
+ with open('openhands/resolver/prompts/resolve/basic.jinja', 'r') as f:
prompt = f.read()
# load repo instruction from openhands/resolver/prompts/repo_instructions/all-hands-ai___openhands-resolver.txt
with open(
- "openhands/resolver/prompts/repo_instructions/all-hands-ai___openhands-resolver.txt",
- "r",
+ 'openhands/resolver/prompts/repo_instructions/all-hands-ai___openhands-resolver.txt',
+ 'r',
) as f:
repo_instruction = f.read()
- issue_handler = IssueHandler("owner", "repo", "token")
+ issue_handler = IssueHandler('owner', 'repo', 'token')
instruction, image_urls = issue_handler.get_instruction(
issue, prompt, repo_instruction
)
@@ -549,226 +549,226 @@ def test_file_instruction_with_repo_instruction():
When you think you have fixed the issue through code changes, please finish the interaction."""
assert instruction == expected_instruction
- assert issue_handler.issue_type == "issue"
+ assert issue_handler.issue_type == 'issue'
assert image_urls == []
def test_guess_success():
mock_issue = GithubIssue(
- owner="test_owner",
- repo="test_repo",
+ owner='test_owner',
+ repo='test_repo',
number=1,
- title="Test Issue",
- body="This is a test issue",
+ title='Test Issue',
+ body='This is a test issue',
)
mock_history = [
create_cmd_output(
- exit_code=0, content="", command_id=1, command="cd /workspace"
+ exit_code=0, content='', command_id=1, command='cd /workspace'
)
]
- mock_llm_config = LLMConfig(model="test_model", api_key="test_api_key")
+ mock_llm_config = LLMConfig(model='test_model', api_key='test_api_key')
mock_completion_response = MagicMock()
mock_completion_response.choices = [
MagicMock(
message=MagicMock(
- content="--- success\ntrue\n--- explanation\nIssue resolved successfully"
+ content='--- success\ntrue\n--- explanation\nIssue resolved successfully'
)
)
]
- issue_handler = IssueHandler("owner", "repo", "token")
+ issue_handler = IssueHandler('owner', 'repo', 'token')
- with patch("litellm.completion", MagicMock(return_value=mock_completion_response)):
+ with patch('litellm.completion', MagicMock(return_value=mock_completion_response)):
success, comment_success, explanation = issue_handler.guess_success(
mock_issue, mock_history, mock_llm_config
)
- assert issue_handler.issue_type == "issue"
+ assert issue_handler.issue_type == 'issue'
assert comment_success is None
assert success
- assert explanation == "Issue resolved successfully"
+ assert explanation == 'Issue resolved successfully'
def test_guess_success_with_thread_comments():
mock_issue = GithubIssue(
- owner="test_owner",
- repo="test_repo",
+ owner='test_owner',
+ repo='test_repo',
number=1,
- title="Test Issue",
- body="This is a test issue",
+ title='Test Issue',
+ body='This is a test issue',
thread_comments=[
- "First comment",
- "Second comment",
- "latest feedback:\nPlease add tests",
+ 'First comment',
+ 'Second comment',
+ 'latest feedback:\nPlease add tests',
],
)
- mock_history = [MagicMock(message="I have added tests for this case")]
- mock_llm_config = LLMConfig(model="test_model", api_key="test_api_key")
+ mock_history = [MagicMock(message='I have added tests for this case')]
+ mock_llm_config = LLMConfig(model='test_model', api_key='test_api_key')
mock_completion_response = MagicMock()
mock_completion_response.choices = [
MagicMock(
message=MagicMock(
- content="--- success\ntrue\n--- explanation\nTests have been added to verify thread comments handling"
+ content='--- success\ntrue\n--- explanation\nTests have been added to verify thread comments handling'
)
)
]
- issue_handler = IssueHandler("owner", "repo", "token")
+ issue_handler = IssueHandler('owner', 'repo', 'token')
- with patch("litellm.completion", MagicMock(return_value=mock_completion_response)):
+ with patch('litellm.completion', MagicMock(return_value=mock_completion_response)):
success, comment_success, explanation = issue_handler.guess_success(
mock_issue, mock_history, mock_llm_config
)
- assert issue_handler.issue_type == "issue"
+ assert issue_handler.issue_type == 'issue'
assert comment_success is None
assert success
- assert "Tests have been added" in explanation
+ assert 'Tests have been added' in explanation
def test_instruction_with_thread_comments():
# Create an issue with thread comments
issue = GithubIssue(
- owner="test_owner",
- repo="test_repo",
+ owner='test_owner',
+ repo='test_repo',
number=123,
- title="Test Issue",
- body="This is a test issue",
+ title='Test Issue',
+ body='This is a test issue',
thread_comments=[
- "First comment",
- "Second comment",
- "latest feedback:\nPlease add tests",
+ 'First comment',
+ 'Second comment',
+ 'latest feedback:\nPlease add tests',
],
)
# Load the basic prompt template
- with open("openhands/resolver/prompts/resolve/basic.jinja", "r") as f:
+ with open('openhands/resolver/prompts/resolve/basic.jinja', 'r') as f:
prompt = f.read()
- issue_handler = IssueHandler("owner", "repo", "token")
+ issue_handler = IssueHandler('owner', 'repo', 'token')
instruction, images_urls = issue_handler.get_instruction(issue, prompt, None)
# Verify that thread comments are included in the instruction
- assert "First comment" in instruction
- assert "Second comment" in instruction
- assert "Please add tests" in instruction
- assert "Issue Thread Comments:" in instruction
+ assert 'First comment' in instruction
+ assert 'Second comment' in instruction
+ assert 'Please add tests' in instruction
+ assert 'Issue Thread Comments:' in instruction
assert images_urls == []
def test_guess_success_failure():
mock_issue = GithubIssue(
- owner="test_owner",
- repo="test_repo",
+ owner='test_owner',
+ repo='test_repo',
number=1,
- title="Test Issue",
- body="This is a test issue",
+ title='Test Issue',
+ body='This is a test issue',
thread_comments=[
- "First comment",
- "Second comment",
- "latest feedback:\nPlease add tests",
+ 'First comment',
+ 'Second comment',
+ 'latest feedback:\nPlease add tests',
],
)
- mock_history = [MagicMock(message="I have added tests for this case")]
- mock_llm_config = LLMConfig(model="test_model", api_key="test_api_key")
+ mock_history = [MagicMock(message='I have added tests for this case')]
+ mock_llm_config = LLMConfig(model='test_model', api_key='test_api_key')
mock_completion_response = MagicMock()
mock_completion_response.choices = [
MagicMock(
message=MagicMock(
- content="--- success\ntrue\n--- explanation\nTests have been added to verify thread comments handling"
+ content='--- success\ntrue\n--- explanation\nTests have been added to verify thread comments handling'
)
)
]
- issue_handler = IssueHandler("owner", "repo", "token")
+ issue_handler = IssueHandler('owner', 'repo', 'token')
- with patch("litellm.completion", MagicMock(return_value=mock_completion_response)):
+ with patch('litellm.completion', MagicMock(return_value=mock_completion_response)):
success, comment_success, explanation = issue_handler.guess_success(
mock_issue, mock_history, mock_llm_config
)
- assert issue_handler.issue_type == "issue"
+ assert issue_handler.issue_type == 'issue'
assert comment_success is None
assert success
- assert "Tests have been added" in explanation
+ assert 'Tests have been added' in explanation
def test_guess_success_negative_case():
mock_issue = GithubIssue(
- owner="test_owner",
- repo="test_repo",
+ owner='test_owner',
+ repo='test_repo',
number=1,
- title="Test Issue",
- body="This is a test issue",
+ title='Test Issue',
+ body='This is a test issue',
)
mock_history = [
create_cmd_output(
- exit_code=0, content="", command_id=1, command="cd /workspace"
+ exit_code=0, content='', command_id=1, command='cd /workspace'
)
]
- mock_llm_config = LLMConfig(model="test_model", api_key="test_api_key")
+ mock_llm_config = LLMConfig(model='test_model', api_key='test_api_key')
mock_completion_response = MagicMock()
mock_completion_response.choices = [
MagicMock(
message=MagicMock(
- content="--- success\nfalse\n--- explanation\nIssue not resolved"
+ content='--- success\nfalse\n--- explanation\nIssue not resolved'
)
)
]
- issue_handler = IssueHandler("owner", "repo", "token")
+ issue_handler = IssueHandler('owner', 'repo', 'token')
- with patch("litellm.completion", MagicMock(return_value=mock_completion_response)):
+ with patch('litellm.completion', MagicMock(return_value=mock_completion_response)):
success, comment_success, explanation = issue_handler.guess_success(
mock_issue, mock_history, mock_llm_config
)
- assert issue_handler.issue_type == "issue"
+ assert issue_handler.issue_type == 'issue'
assert comment_success is None
assert not success
- assert explanation == "Issue not resolved"
+ assert explanation == 'Issue not resolved'
def test_guess_success_invalid_output():
mock_issue = GithubIssue(
- owner="test_owner",
- repo="test_repo",
+ owner='test_owner',
+ repo='test_repo',
number=1,
- title="Test Issue",
- body="This is a test issue",
+ title='Test Issue',
+ body='This is a test issue',
)
mock_history = [
create_cmd_output(
- exit_code=0, content="", command_id=1, command="cd /workspace"
+ exit_code=0, content='', command_id=1, command='cd /workspace'
)
]
- mock_llm_config = LLMConfig(model="test_model", api_key="test_api_key")
+ mock_llm_config = LLMConfig(model='test_model', api_key='test_api_key')
mock_completion_response = MagicMock()
mock_completion_response.choices = [
- MagicMock(message=MagicMock(content="This is not a valid output"))
+ MagicMock(message=MagicMock(content='This is not a valid output'))
]
- issue_handler = IssueHandler("owner", "repo", "token")
+ issue_handler = IssueHandler('owner', 'repo', 'token')
- with patch("litellm.completion", MagicMock(return_value=mock_completion_response)):
+ with patch('litellm.completion', MagicMock(return_value=mock_completion_response)):
success, comment_success, explanation = issue_handler.guess_success(
mock_issue, mock_history, mock_llm_config
)
- assert issue_handler.issue_type == "issue"
+ assert issue_handler.issue_type == 'issue'
assert comment_success is None
assert not success
assert (
explanation
- == "Failed to decode answer from LLM response: This is not a valid output"
+ == 'Failed to decode answer from LLM response: This is not a valid output'
)
def test_download_pr_with_review_comments():
- handler = PRHandler("owner", "repo", "token")
+ handler = PRHandler('owner', 'repo', 'token')
mock_pr_response = MagicMock()
mock_pr_response.json.side_effect = [
[
{
- "number": 1,
- "title": "PR 1",
- "body": "This is a pull request",
- "head": {"ref": "b1"},
+ 'number': 1,
+ 'title': 'PR 1',
+ 'body': 'This is a pull request',
+ 'head': {'ref': 'b1'},
},
],
None,
@@ -783,14 +783,14 @@ def test_download_pr_with_review_comments():
# Mock for GraphQL request with review comments but no threads
mock_graphql_response = MagicMock()
mock_graphql_response.json.side_effect = lambda: {
- "data": {
- "repository": {
- "pullRequest": {
- "closingIssuesReferences": {"edges": []},
- "reviews": {
- "nodes": [
- {"body": "Please fix this typo"},
- {"body": "Add more tests"},
+ 'data': {
+ 'repository': {
+ 'pullRequest': {
+ 'closingIssuesReferences': {'edges': []},
+ 'reviews': {
+ 'nodes': [
+ {'body': 'Please fix this typo'},
+ {'body': 'Add more tests'},
]
},
}
@@ -801,32 +801,32 @@ def test_download_pr_with_review_comments():
mock_graphql_response.raise_for_status = MagicMock()
def get_mock_response(url, *args, **kwargs):
- if "/comments" in url:
+ if '/comments' in url:
return mock_comments_response
return mock_pr_response
- with patch("requests.get", side_effect=get_mock_response):
- with patch("requests.post", return_value=mock_graphql_response):
+ with patch('requests.get', side_effect=get_mock_response):
+ with patch('requests.post', return_value=mock_graphql_response):
issues = handler.get_converted_issues()
assert len(issues) == 1
- assert handler.issue_type == "pr"
+ assert handler.issue_type == 'pr'
assert isinstance(issues[0], GithubIssue)
assert issues[0].number == 1
- assert issues[0].title == "PR 1"
- assert issues[0].head_branch == "b1"
+ assert issues[0].title == 'PR 1'
+ assert issues[0].head_branch == 'b1'
# Verify review comments are set but threads are empty
assert len(issues[0].review_comments) == 2
- assert issues[0].review_comments[0] == "Please fix this typo"
- assert issues[0].review_comments[1] == "Add more tests"
+ assert issues[0].review_comments[0] == 'Please fix this typo'
+ assert issues[0].review_comments[1] == 'Add more tests'
assert not issues[0].review_threads
assert not issues[0].closing_issues
assert not issues[0].thread_ids
def test_download_issue_with_specific_comment():
- handler = IssueHandler("owner", "repo", "token")
+ handler = IssueHandler('owner', 'repo', 'token')
# Define the specific comment_id to filter
specific_comment_id = 101
@@ -835,7 +835,7 @@ def test_download_issue_with_specific_comment():
mock_issue_response = MagicMock()
mock_issue_response.json.side_effect = [
[
- {"number": 1, "title": "Issue 1", "body": "This is an issue"},
+ {'number': 1, 'title': 'Issue 1', 'body': 'This is an issue'},
],
None,
]
@@ -844,32 +844,32 @@ def test_download_issue_with_specific_comment():
mock_comments_response = MagicMock()
mock_comments_response.json.return_value = [
{
- "id": specific_comment_id,
- "body": "Specific comment body",
- "issue_url": "https://api.github.com/repos/owner/repo/issues/1",
+ 'id': specific_comment_id,
+ 'body': 'Specific comment body',
+ 'issue_url': 'https://api.github.com/repos/owner/repo/issues/1',
},
{
- "id": 102,
- "body": "Another comment body",
- "issue_url": "https://api.github.com/repos/owner/repo/issues/2",
+ 'id': 102,
+ 'body': 'Another comment body',
+ 'issue_url': 'https://api.github.com/repos/owner/repo/issues/2',
},
]
mock_comments_response.raise_for_status = MagicMock()
def get_mock_response(url, *args, **kwargs):
- if "/comments" in url:
+ if '/comments' in url:
return mock_comments_response
return mock_issue_response
- with patch("requests.get", side_effect=get_mock_response):
+ with patch('requests.get', side_effect=get_mock_response):
issues = handler.get_converted_issues(comment_id=specific_comment_id)
assert len(issues) == 1
assert issues[0].number == 1
- assert issues[0].title == "Issue 1"
- assert issues[0].thread_comments == ["Specific comment body"]
+ assert issues[0].title == 'Issue 1'
+ assert issues[0].thread_comments == ['Specific comment body']
-if __name__ == "__main__":
+if __name__ == '__main__':
pytest.main()