diff --git a/frontend/__tests__/components/chat/chat-interface.test.tsx b/frontend/__tests__/components/chat/chat-interface.test.tsx
index f4e1915c4960..cabf2a0f112b 100644
--- a/frontend/__tests__/components/chat/chat-interface.test.tsx
+++ b/frontend/__tests__/components/chat/chat-interface.test.tsx
@@ -40,6 +40,7 @@ describe("Empty state", () => {
const { store } = renderWithProviders(, {
preloadedState: {
chat: { messages: [] },
+ initialQuery: { selectedRepository: null, files: [], initialQuery: null, importedProjectZip: null },
},
});
@@ -62,6 +63,7 @@ describe("Empty state", () => {
renderWithProviders(, {
preloadedState: {
chat: { messages: [] },
+ initialQuery: { selectedRepository: null, files: [], initialQuery: null, importedProjectZip: null },
},
});
@@ -91,6 +93,7 @@ describe("Empty state", () => {
const { store } = renderWithProviders(, {
preloadedState: {
chat: { messages: [] },
+ initialQuery: { selectedRepository: null, files: [], initialQuery: null, importedProjectZip: null },
},
});
@@ -119,6 +122,7 @@ describe("Empty state", () => {
const { rerender } = renderWithProviders(, {
preloadedState: {
chat: { messages: [] },
+ initialQuery: { selectedRepository: null, files: [], initialQuery: null, importedProjectZip: null },
},
});
diff --git a/frontend/__tests__/initial-query.test.tsx b/frontend/__tests__/initial-query.test.tsx
index f0e0a169dbf2..6e6e24e53a00 100644
--- a/frontend/__tests__/initial-query.test.tsx
+++ b/frontend/__tests__/initial-query.test.tsx
@@ -6,12 +6,12 @@ describe("Initial Query Behavior", () => {
it("should clear initial query when clearInitialQuery is dispatched", () => {
// Set up initial query in the store
store.dispatch(setInitialQuery("test query"));
- expect(store.getState().initalQuery.initialQuery).toBe("test query");
+ expect(store.getState().initialQuery.initialQuery).toBe("test query");
// Clear the initial query
store.dispatch(clearInitialQuery());
// Verify initial query is cleared
- expect(store.getState().initalQuery.initialQuery).toBeNull();
+ expect(store.getState().initialQuery.initialQuery).toBeNull();
});
});
diff --git a/frontend/public/config.json b/frontend/public/config.json
index 94900dcbaf31..7dbb7e1d966c 100644
--- a/frontend/public/config.json
+++ b/frontend/public/config.json
@@ -2,4 +2,4 @@
"APP_MODE": "oss",
"GITHUB_CLIENT_ID": "",
"POSTHOG_CLIENT_KEY": "phc_3ESMmY9SgqEAGBB6sMGK5ayYHkeUuknH2vP6FmWH9RA"
-}
\ No newline at end of file
+}
diff --git a/frontend/src/components/chat-interface.tsx b/frontend/src/components/chat-interface.tsx
index f0004bd749d4..88d5766f6efe 100644
--- a/frontend/src/components/chat-interface.tsx
+++ b/frontend/src/components/chat-interface.tsx
@@ -46,6 +46,7 @@ export function ChatInterface() {
const { messages } = useSelector((state: RootState) => state.chat);
const { curAgentState } = useSelector((state: RootState) => state.agent);
+ const { selectedRepository } = useSelector((state: RootState) => state.initialQuery);
const [feedbackPolarity, setFeedbackPolarity] = React.useState<
"positive" | "negative"
@@ -175,7 +176,7 @@ export function ChatInterface() {
{(curAgentState === AgentState.AWAITING_USER_INPUT ||
curAgentState === AgentState.FINISHED) && (
- {rootLoaderData?.ghToken ? (
+ {rootLoaderData?.ghToken && selectedRepository ? (
state.initalQuery,
+ (state: RootState) => state.initialQuery,
);
const { ghToken, repo } = useLoaderData();
diff --git a/frontend/src/components/project-menu/ProjectMenuCard.tsx b/frontend/src/components/project-menu/ProjectMenuCard.tsx
index 1a32c2f802d1..b3db61aaa6f5 100644
--- a/frontend/src/components/project-menu/ProjectMenuCard.tsx
+++ b/frontend/src/components/project-menu/ProjectMenuCard.tsx
@@ -77,6 +77,7 @@ Please push the changes to GitHub and open a pull request.
{!working && contextMenuIsOpen && (
setConnectToGitHubModalOpen(true)}
onPushToGitHub={handlePushToGitHub}
onDownloadWorkspace={handleDownloadWorkspace}
diff --git a/frontend/src/components/project-menu/project.menu-card-context-menu.tsx b/frontend/src/components/project-menu/project.menu-card-context-menu.tsx
index 8beaf05c1de2..e56554eb29a5 100644
--- a/frontend/src/components/project-menu/project.menu-card-context-menu.tsx
+++ b/frontend/src/components/project-menu/project.menu-card-context-menu.tsx
@@ -6,6 +6,11 @@ import { I18nKey } from "#/i18n/declaration";
interface ProjectMenuCardContextMenuProps {
isConnectedToGitHub: boolean;
+ githubData: {
+ avatar: string;
+ repoName: string;
+ lastCommit: GitHubCommit;
+ } | null;
onConnectToGitHub: () => void;
onPushToGitHub: () => void;
onDownloadWorkspace: () => void;
@@ -14,6 +19,7 @@ interface ProjectMenuCardContextMenuProps {
export function ProjectMenuCardContextMenu({
isConnectedToGitHub,
+ githubData,
onConnectToGitHub,
onPushToGitHub,
onDownloadWorkspace,
@@ -31,7 +37,7 @@ export function ProjectMenuCardContextMenu({
{t(I18nKey.PROJECT_MENU_CARD_CONTEXT_MENU$CONNECT_TO_GITHUB_LABEL)}
)}
- {isConnectedToGitHub && (
+ {isConnectedToGitHub && githubData && (
{t(I18nKey.PROJECT_MENU_CARD_CONTEXT_MENU$PUSH_TO_GITHUB_LABEL)}
diff --git a/frontend/src/routes/_oh._index/task-form.tsx b/frontend/src/routes/_oh._index/task-form.tsx
index 15d06907033f..b7a7f55141ab 100644
--- a/frontend/src/routes/_oh._index/task-form.tsx
+++ b/frontend/src/routes/_oh._index/task-form.tsx
@@ -18,7 +18,7 @@ export const TaskForm = React.forwardRef((_, ref) => {
const navigation = useNavigation();
const { selectedRepository, files } = useSelector(
- (state: RootState) => state.initalQuery,
+ (state: RootState) => state.initialQuery,
);
const [text, setText] = React.useState("");
diff --git a/frontend/src/routes/_oh.app.tsx b/frontend/src/routes/_oh.app.tsx
index 664b6124af41..e73483392ec0 100644
--- a/frontend/src/routes/_oh.app.tsx
+++ b/frontend/src/routes/_oh.app.tsx
@@ -28,7 +28,7 @@ import { EventHandler } from "#/components/event-handler";
export const clientLoader = async () => {
const ghToken = localStorage.getItem("ghToken");
const repo =
- store.getState().initalQuery.selectedRepository ||
+ store.getState().initialQuery.selectedRepository ||
localStorage.getItem("repo");
const settings = getSettings();
diff --git a/frontend/src/store.ts b/frontend/src/store.ts
index 77d5dfc6fbf1..036bd2cfc909 100644
--- a/frontend/src/store.ts
+++ b/frontend/src/store.ts
@@ -12,7 +12,7 @@ import statusReducer from "./state/statusSlice";
export const rootReducer = combineReducers({
fileState: fileStateReducer,
- initalQuery: initialQueryReducer,
+ initialQuery: initialQueryReducer,
browser: browserReducer,
chat: chatReducer,
code: codeReducer,
diff --git a/openhands/agenthub/codeact_agent/codeact_agent.py b/openhands/agenthub/codeact_agent/codeact_agent.py
index 39b9e69247be..6743de87ade6 100644
--- a/openhands/agenthub/codeact_agent/codeact_agent.py
+++ b/openhands/agenthub/codeact_agent/codeact_agent.py
@@ -187,7 +187,9 @@ def get_action_message(
)
]
elif isinstance(action, CmdRunAction) and action.source == 'user':
- content = [TextContent(text=f'User executed the command:\n{action.command}')]
+ content = [
+ TextContent(text=f'User executed the command:\n{action.command}')
+ ]
return [
Message(
role='user',
diff --git a/openhands/events/action/message.py b/openhands/events/action/message.py
index 86d7c439e936..d86526419664 100644
--- a/openhands/events/action/message.py
+++ b/openhands/events/action/message.py
@@ -24,6 +24,7 @@ def images_urls(self):
@images_urls.setter
def images_urls(self, value):
self.image_urls = value
+
def __str__(self) -> str:
ret = f'**MessageAction** (source={self.source})\n'
ret += f'CONTENT: {self.content}'
diff --git a/openhands/events/serialization/action.py b/openhands/events/serialization/action.py
index defac3b5dda6..f34b4b0ec0cf 100644
--- a/openhands/events/serialization/action.py
+++ b/openhands/events/serialization/action.py
@@ -69,7 +69,7 @@ def action_from_dict(action: dict) -> Action:
# images_urls has been renamed to image_urls
if 'images_urls' in args:
args['image_urls'] = args.pop('images_urls')
-
+
try:
decoded_action = action_class(**args)
if 'timeout' in action:
diff --git a/openhands/resolver/patching/__init__.py b/openhands/resolver/patching/__init__.py
index 5c31f160a0a0..165a623af537 100644
--- a/openhands/resolver/patching/__init__.py
+++ b/openhands/resolver/patching/__init__.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
-from .patch import parse_patch
from .apply import apply_diff
+from .patch import parse_patch
-__all__ = ["parse_patch", "apply_diff"]
+__all__ = ['parse_patch', 'apply_diff']
diff --git a/openhands/resolver/patching/apply.py b/openhands/resolver/patching/apply.py
index f13e814292cb..24f2266f56cf 100644
--- a/openhands/resolver/patching/apply.py
+++ b/openhands/resolver/patching/apply.py
@@ -10,33 +10,33 @@
def _apply_diff_with_subprocess(diff, lines, reverse=False):
# call out to patch program
- patchexec = which("patch")
+ patchexec = which('patch')
if not patchexec:
- raise SubprocessException("cannot find patch program", code=-1)
+ raise SubprocessException('cannot find patch program', code=-1)
tempdir = tempfile.gettempdir()
- filepath = os.path.join(tempdir, "wtp-" + str(hash(diff.header)))
- oldfilepath = filepath + ".old"
- newfilepath = filepath + ".new"
- rejfilepath = filepath + ".rej"
- patchfilepath = filepath + ".patch"
- with open(oldfilepath, "w") as f:
- f.write("\n".join(lines) + "\n")
+ filepath = os.path.join(tempdir, 'wtp-' + str(hash(diff.header)))
+ oldfilepath = filepath + '.old'
+ newfilepath = filepath + '.new'
+ rejfilepath = filepath + '.rej'
+ patchfilepath = filepath + '.patch'
+ with open(oldfilepath, 'w') as f:
+ f.write('\n'.join(lines) + '\n')
- with open(patchfilepath, "w") as f:
+ with open(patchfilepath, 'w') as f:
f.write(diff.text)
args = [
patchexec,
- "--reverse" if reverse else "--forward",
- "--quiet",
- "--no-backup-if-mismatch",
- "-o",
+ '--reverse' if reverse else '--forward',
+ '--quiet',
+ '--no-backup-if-mismatch',
+ '-o',
newfilepath,
- "-i",
+ '-i',
patchfilepath,
- "-r",
+ '-r',
rejfilepath,
oldfilepath,
]
@@ -58,7 +58,7 @@ def _apply_diff_with_subprocess(diff, lines, reverse=False):
# do this last to ensure files get cleaned up
if ret != 0:
- raise SubprocessException("patch program failed", code=ret)
+ raise SubprocessException('patch program failed', code=ret)
return lines, rejlines
diff --git a/openhands/resolver/patching/exceptions.py b/openhands/resolver/patching/exceptions.py
index 594b079e8365..30653c56da18 100644
--- a/openhands/resolver/patching/exceptions.py
+++ b/openhands/resolver/patching/exceptions.py
@@ -7,7 +7,7 @@ def __init__(self, msg, hunk=None):
self.hunk = hunk
if hunk is not None:
super(HunkException, self).__init__(
- "{msg}, in hunk #{n}".format(msg=msg, n=hunk)
+ '{msg}, in hunk #{n}'.format(msg=msg, n=hunk)
)
else:
super(HunkException, self).__init__(msg)
diff --git a/openhands/resolver/patching/patch.py b/openhands/resolver/patching/patch.py
index c0304e06543b..7e3b98ed0883 100644
--- a/openhands/resolver/patching/patch.py
+++ b/openhands/resolver/patching/patch.py
@@ -8,67 +8,67 @@
from .snippets import findall_regex, split_by_regex
header = namedtuple(
- "header",
- "index_path old_path old_version new_path new_version",
+ 'header',
+ 'index_path old_path old_version new_path new_version',
)
-diffobj = namedtuple("diffobj", "header changes text")
-Change = namedtuple("Change", "old new line hunk")
+diffobj = namedtuple('diffobj', 'header changes text')
+Change = namedtuple('Change', 'old new line hunk')
-file_timestamp_str = "(.+?)(?:\t|:| +)(.*)"
+file_timestamp_str = '(.+?)(?:\t|:| +)(.*)'
# .+? was previously [^:\t\n\r\f\v]+
# general diff regex
-diffcmd_header = re.compile("^diff.* (.+) (.+)$")
-unified_header_index = re.compile("^Index: (.+)$")
-unified_header_old_line = re.compile(r"^--- " + file_timestamp_str + "$")
-unified_header_new_line = re.compile(r"^\+\+\+ " + file_timestamp_str + "$")
-unified_hunk_start = re.compile(r"^@@ -(\d+),?(\d*) \+(\d+),?(\d*) @@(.*)$")
-unified_change = re.compile("^([-+ ])(.*)$")
-
-context_header_old_line = re.compile(r"^\*\*\* " + file_timestamp_str + "$")
-context_header_new_line = re.compile("^--- " + file_timestamp_str + "$")
-context_hunk_start = re.compile(r"^\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*$")
-context_hunk_old = re.compile(r"^\*\*\* (\d+),?(\d*) \*\*\*\*$")
-context_hunk_new = re.compile(r"^--- (\d+),?(\d*) ----$")
-context_change = re.compile("^([-+ !]) (.*)$")
-
-ed_hunk_start = re.compile(r"^(\d+),?(\d*)([acd])$")
-ed_hunk_end = re.compile("^.$")
+diffcmd_header = re.compile('^diff.* (.+) (.+)$')
+unified_header_index = re.compile('^Index: (.+)$')
+unified_header_old_line = re.compile(r'^--- ' + file_timestamp_str + '$')
+unified_header_new_line = re.compile(r'^\+\+\+ ' + file_timestamp_str + '$')
+unified_hunk_start = re.compile(r'^@@ -(\d+),?(\d*) \+(\d+),?(\d*) @@(.*)$')
+unified_change = re.compile('^([-+ ])(.*)$')
+
+context_header_old_line = re.compile(r'^\*\*\* ' + file_timestamp_str + '$')
+context_header_new_line = re.compile('^--- ' + file_timestamp_str + '$')
+context_hunk_start = re.compile(r'^\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*$')
+context_hunk_old = re.compile(r'^\*\*\* (\d+),?(\d*) \*\*\*\*$')
+context_hunk_new = re.compile(r'^--- (\d+),?(\d*) ----$')
+context_change = re.compile('^([-+ !]) (.*)$')
+
+ed_hunk_start = re.compile(r'^(\d+),?(\d*)([acd])$')
+ed_hunk_end = re.compile('^.$')
# much like forward ed, but no 'c' type
-rcs_ed_hunk_start = re.compile(r"^([ad])(\d+) ?(\d*)$")
+rcs_ed_hunk_start = re.compile(r'^([ad])(\d+) ?(\d*)$')
-default_hunk_start = re.compile(r"^(\d+),?(\d*)([acd])(\d+),?(\d*)$")
-default_hunk_mid = re.compile("^---$")
-default_change = re.compile("^([><]) (.*)$")
+default_hunk_start = re.compile(r'^(\d+),?(\d*)([acd])(\d+),?(\d*)$')
+default_hunk_mid = re.compile('^---$')
+default_change = re.compile('^([><]) (.*)$')
# Headers
# git has a special index header and no end part
-git_diffcmd_header = re.compile("^diff --git a/(.+) b/(.+)$")
-git_header_index = re.compile(r"^index ([a-f0-9]+)..([a-f0-9]+) ?(\d*)$")
-git_header_old_line = re.compile("^--- (.+)$")
-git_header_new_line = re.compile(r"^\+\+\+ (.+)$")
-git_header_file_mode = re.compile(r"^(new|deleted) file mode \d{6}$")
-git_header_binary_file = re.compile("^Binary files (.+) and (.+) differ")
-git_binary_patch_start = re.compile(r"^GIT binary patch$")
-git_binary_literal_start = re.compile(r"^literal (\d+)$")
-git_binary_delta_start = re.compile(r"^delta (\d+)$")
-base85string = re.compile(r"^[0-9A-Za-z!#$%&()*+;<=>?@^_`{|}~-]+$")
-
-bzr_header_index = re.compile("=== (.+)")
+git_diffcmd_header = re.compile('^diff --git a/(.+) b/(.+)$')
+git_header_index = re.compile(r'^index ([a-f0-9]+)..([a-f0-9]+) ?(\d*)$')
+git_header_old_line = re.compile('^--- (.+)$')
+git_header_new_line = re.compile(r'^\+\+\+ (.+)$')
+git_header_file_mode = re.compile(r'^(new|deleted) file mode \d{6}$')
+git_header_binary_file = re.compile('^Binary files (.+) and (.+) differ')
+git_binary_patch_start = re.compile(r'^GIT binary patch$')
+git_binary_literal_start = re.compile(r'^literal (\d+)$')
+git_binary_delta_start = re.compile(r'^delta (\d+)$')
+base85string = re.compile(r'^[0-9A-Za-z!#$%&()*+;<=>?@^_`{|}~-]+$')
+
+bzr_header_index = re.compile('=== (.+)')
bzr_header_old_line = unified_header_old_line
bzr_header_new_line = unified_header_new_line
svn_header_index = unified_header_index
-svn_header_timestamp_version = re.compile(r"\((?:working copy|revision (\d+))\)")
-svn_header_timestamp = re.compile(r".*(\(.*\))$")
+svn_header_timestamp_version = re.compile(r'\((?:working copy|revision (\d+))\)')
+svn_header_timestamp = re.compile(r'.*(\(.*\))$')
cvs_header_index = unified_header_index
-cvs_header_rcs = re.compile(r"^RCS file: (.+)(?:,\w{1}$|$)")
-cvs_header_timestamp = re.compile(r"(.+)\t([\d.]+)")
-cvs_header_timestamp_colon = re.compile(r":([\d.]+)\t(.+)")
-old_cvs_diffcmd_header = re.compile("^diff.* (.+):(.*) (.+):(.*)$")
+cvs_header_rcs = re.compile(r'^RCS file: (.+)(?:,\w{1}$|$)')
+cvs_header_timestamp = re.compile(r'(.+)\t([\d.]+)')
+cvs_header_timestamp_colon = re.compile(r':([\d.]+)\t(.+)')
+old_cvs_diffcmd_header = re.compile('^diff.* (.+):(.*) (.+):(.*)$')
def parse_patch(text):
@@ -97,7 +97,7 @@ def parse_patch(text):
break
for diff in diffs:
- difftext = "\n".join(diff) + "\n"
+ difftext = '\n'.join(diff) + '\n'
h = parse_header(diff)
d = parse_diff(diff)
if h or d:
@@ -133,10 +133,10 @@ def parse_scm_header(text):
if res:
old_path = res.old_path
new_path = res.new_path
- if old_path.startswith("a/"):
+ if old_path.startswith('a/'):
old_path = old_path[2:]
- if new_path.startswith("b/"):
+ if new_path.startswith('b/'):
new_path = new_path[2:]
return header(
@@ -240,10 +240,10 @@ def parse_git_header(text):
new_path = binary.group(2)
if old_path and new_path:
- if old_path.startswith("a/"):
+ if old_path.startswith('a/'):
old_path = old_path[2:]
- if new_path.startswith("b/"):
+ if new_path.startswith('b/'):
new_path = new_path[2:]
return header(
index_path=None,
@@ -256,19 +256,19 @@ def parse_git_header(text):
# if we go through all of the text without finding our normal info,
# use the cmd if available
if cmd_old_path and cmd_new_path and old_version and new_version:
- if cmd_old_path.startswith("a/"):
+ if cmd_old_path.startswith('a/'):
cmd_old_path = cmd_old_path[2:]
- if cmd_new_path.startswith("b/"):
+ if cmd_new_path.startswith('b/'):
cmd_new_path = cmd_new_path[2:]
return header(
index_path=None,
# wow, I kind of hate this:
# assume /dev/null if the versions are zeroed out
- old_path="/dev/null" if old_version == "0000000" else cmd_old_path,
+ old_path='/dev/null' if old_version == '0000000' else cmd_old_path,
old_version=old_version,
- new_path="/dev/null" if new_version == "0000000" else cmd_new_path,
+ new_path='/dev/null' if new_version == '0000000' else cmd_new_path,
new_version=new_version,
)
@@ -569,10 +569,10 @@ def parse_default_diff(text):
kind = c.group(1)
line = c.group(2)
- if kind == "<" and (r != old_len or r == 0):
+ if kind == '<' and (r != old_len or r == 0):
changes.append(Change(old + r, None, line, hunk_n))
r += 1
- elif kind == ">" and (i != new_len or i == 0):
+ elif kind == '>' and (i != new_len or i == 0):
changes.append(Change(None, new + i, line, hunk_n))
i += 1
@@ -627,13 +627,13 @@ def parse_unified_diff(text):
kind = c.group(1)
line = c.group(2)
- if kind == "-" and (r != old_len or r == 0):
+ if kind == '-' and (r != old_len or r == 0):
changes.append(Change(old + r, None, line, hunk_n))
r += 1
- elif kind == "+" and (i != new_len or i == 0):
+ elif kind == '+' and (i != new_len or i == 0):
changes.append(Change(None, new + i, line, hunk_n))
i += 1
- elif kind == " ":
+ elif kind == ' ':
if r != old_len and i != new_len:
changes.append(Change(old + r, new + i, line, hunk_n))
r += 1
@@ -667,7 +667,7 @@ def parse_context_diff(text):
k = 0
parts = split_by_regex(hunk, context_hunk_new)
if len(parts) != 2:
- raise exceptions.ParseException("Context diff invalid", hunk_n)
+ raise exceptions.ParseException('Context diff invalid', hunk_n)
old_hunk = parts[0]
new_hunk = parts[1]
@@ -695,7 +695,7 @@ def parse_context_diff(text):
# now have old and new set, can start processing?
if len(old_hunk) > 0 and len(new_hunk) == 0:
- msg = "Got unexpected change in removal hunk: "
+ msg = 'Got unexpected change in removal hunk: '
# only removes left?
while len(old_hunk) > 0:
c = context_change.match(old_hunk[0])
@@ -707,22 +707,22 @@ def parse_context_diff(text):
kind = c.group(1)
line = c.group(2)
- if kind == "-" and (j != old_len or j == 0):
+ if kind == '-' and (j != old_len or j == 0):
changes.append(Change(old + j, None, line, hunk_n))
j += 1
- elif kind == " " and (
+ elif kind == ' ' and (
(j != old_len and k != new_len) or (j == 0 or k == 0)
):
changes.append(Change(old + j, new + k, line, hunk_n))
j += 1
k += 1
- elif kind == "+" or kind == "!":
+ elif kind == '+' or kind == '!':
raise exceptions.ParseException(msg + kind, hunk_n)
continue
if len(old_hunk) == 0 and len(new_hunk) > 0:
- msg = "Got unexpected change in removal hunk: "
+ msg = 'Got unexpected change in removal hunk: '
# only insertions left?
while len(new_hunk) > 0:
c = context_change.match(new_hunk[0])
@@ -734,16 +734,16 @@ def parse_context_diff(text):
kind = c.group(1)
line = c.group(2)
- if kind == "+" and (k != new_len or k == 0):
+ if kind == '+' and (k != new_len or k == 0):
changes.append(Change(None, new + k, line, hunk_n))
k += 1
- elif kind == " " and (
+ elif kind == ' ' and (
(j != old_len and k != new_len) or (j == 0 or k == 0)
):
changes.append(Change(old + j, new + k, line, hunk_n))
j += 1
k += 1
- elif kind == "-" or kind == "!":
+ elif kind == '-' or kind == '!':
raise exceptions.ParseException(msg + kind, hunk_n)
continue
@@ -765,17 +765,17 @@ def parse_context_diff(text):
if not (oc or nc):
del old_hunk[0]
del new_hunk[0]
- elif okind == " " and nkind == " " and oline == nline:
+ elif okind == ' ' and nkind == ' ' and oline == nline:
changes.append(Change(old + j, new + k, oline, hunk_n))
j += 1
k += 1
del old_hunk[0]
del new_hunk[0]
- elif okind == "-" or okind == "!" and (j != old_len or j == 0):
+ elif okind == '-' or okind == '!' and (j != old_len or j == 0):
changes.append(Change(old + j, None, oline, hunk_n))
j += 1
del old_hunk[0]
- elif nkind == "+" or nkind == "!" and (k != new_len or k == 0):
+ elif nkind == '+' or nkind == '!' and (k != new_len or k == 0):
changes.append(Change(None, new + k, nline, hunk_n))
k += 1
del new_hunk[0]
@@ -821,7 +821,7 @@ def parse_ed_diff(text):
old_end = int(o.group(2)) if len(o.group(2)) else old
hunk_kind = o.group(3)
- if hunk_kind == "d":
+ if hunk_kind == 'd':
k = 0
while old_end >= old:
changes.append(Change(old + k, None, None, hunk_n))
@@ -832,7 +832,7 @@ def parse_ed_diff(text):
while len(hunk) > 0:
e = ed_hunk_end.match(hunk[0])
- if not e and hunk_kind == "c":
+ if not e and hunk_kind == 'c':
k = 0
while old_end >= old:
changes.append(Change(old + k, None, None, hunk_n))
@@ -852,7 +852,7 @@ def parse_ed_diff(text):
)
i += 1
j += 1
- if not e and hunk_kind == "a":
+ if not e and hunk_kind == 'a':
changes.append(
Change(
None,
@@ -900,7 +900,7 @@ def parse_rcs_ed_diff(text):
old = int(o.group(2))
size = int(o.group(3))
- if hunk_kind == "a":
+ if hunk_kind == 'a':
old += total_change_size + 1
total_change_size += size
while size > 0 and len(hunk) > 0:
@@ -910,7 +910,7 @@ def parse_rcs_ed_diff(text):
del hunk[0]
- elif hunk_kind == "d":
+ elif hunk_kind == 'd':
total_change_size -= size
while size > 0:
changes.append(Change(old + j, None, None, hunk_n))
@@ -938,8 +938,8 @@ def parse_git_binary_diff(text):
# the sizes are used as latch-up
new_size = 0
old_size = 0
- old_encoded = ""
- new_encoded = ""
+ old_encoded = ''
+ new_encoded = ''
for line in lines:
if cmd_old_path is None and cmd_new_path is None:
hm = git_diffcmd_header.match(line)
@@ -978,11 +978,11 @@ def parse_git_binary_diff(text):
change = Change(None, 0, added_data, None)
changes.append(change)
new_size = 0
- new_encoded = ""
+ new_encoded = ''
else:
# Invalid line format
new_size = 0
- new_encoded = ""
+ new_encoded = ''
# the second is removed file
if old_size == 0:
@@ -1006,10 +1006,10 @@ def parse_git_binary_diff(text):
change = Change(0, None, None, removed_data)
changes.append(change)
old_size = 0
- old_encoded = ""
+ old_encoded = ''
else:
# Invalid line format
old_size = 0
- old_encoded = ""
+ old_encoded = ''
return changes
diff --git a/openhands/resolver/patching/snippets.py b/openhands/resolver/patching/snippets.py
index 710b1191b560..f9d9e620d0f7 100644
--- a/openhands/resolver/patching/snippets.py
+++ b/openhands/resolver/patching/snippets.py
@@ -54,7 +54,7 @@ def is_exe(fpath):
if is_exe(program):
return program
else:
- for path in os.environ["PATH"].split(os.pathsep):
+ for path in os.environ['PATH'].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
diff --git a/openhands/resolver/prompts/repo_instructions/all-hands-ai___openhands-resolver.txt b/openhands/resolver/prompts/repo_instructions/all-hands-ai___openhands-resolver.txt
index ca040d591683..e4bc3b165d28 100644
--- a/openhands/resolver/prompts/repo_instructions/all-hands-ai___openhands-resolver.txt
+++ b/openhands/resolver/prompts/repo_instructions/all-hands-ai___openhands-resolver.txt
@@ -1,4 +1,4 @@
This is a Python repo for openhands-resolver, a library that attempts to resolve github issues with the AI agent OpenHands.
- Setup: `poetry install --with test --with dev`
-- Testing: `poetry run pytest tests/test_*.py`
\ No newline at end of file
+- Testing: `poetry run pytest tests/test_*.py`
diff --git a/openhands/resolver/prompts/repo_instructions/rbren___rss-parser.txt b/openhands/resolver/prompts/repo_instructions/rbren___rss-parser.txt
index b6e8fba1a200..5ef52d64c35d 100644
--- a/openhands/resolver/prompts/repo_instructions/rbren___rss-parser.txt
+++ b/openhands/resolver/prompts/repo_instructions/rbren___rss-parser.txt
@@ -1,4 +1,4 @@
This is a node repo for an RSS parser.
- Setup: `yes | npm install`
- Testing: `SKIP_BROWSER_TESTS=1 npm test`
-- Writing Tests: Add to the `test` directory.
\ No newline at end of file
+- Writing Tests: Add to the `test` directory.
diff --git a/openhands/resolver/prompts/resolve/basic-with-tests.jinja b/openhands/resolver/prompts/resolve/basic-with-tests.jinja
index 54c35910ec62..595489c42841 100644
--- a/openhands/resolver/prompts/resolve/basic-with-tests.jinja
+++ b/openhands/resolver/prompts/resolve/basic-with-tests.jinja
@@ -14,4 +14,4 @@ For all changes to actual application code (e.g. in Python or Javascript), add a
Run the tests, and if they pass you are done!
You do NOT need to write new tests if there are only changes to documentation or configuration files.
-When you think you have fixed the issue through code changes, please call the finish action to end the interaction.
\ No newline at end of file
+When you think you have fixed the issue through code changes, please call the finish action to end the interaction.
diff --git a/openhands/resolver/prompts/resolve/basic.jinja b/openhands/resolver/prompts/resolve/basic.jinja
index b3bec7ef7f53..a5bb806cc4df 100644
--- a/openhands/resolver/prompts/resolve/basic.jinja
+++ b/openhands/resolver/prompts/resolve/basic.jinja
@@ -10,4 +10,4 @@ You SHOULD INCLUDE PROPER INDENTATION in your edit commands.{% if repo_instructi
Some basic information about this repository:
{{ repo_instruction }}{% endif %}
-When you think you have fixed the issue through code changes, please finish the interaction.
\ No newline at end of file
+When you think you have fixed the issue through code changes, please finish the interaction.
diff --git a/pyproject.toml b/pyproject.toml
index fc1807f72fc3..b33e0fe77ff4 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -95,6 +95,7 @@ reportlab = "*"
[tool.coverage.run]
concurrency = ["gevent"]
+
[tool.poetry.group.runtime.dependencies]
jupyterlab = "*"
notebook = "*"
@@ -125,6 +126,7 @@ ignore = ["D1"]
[tool.ruff.lint.pydocstyle]
convention = "google"
+
[tool.poetry.group.evaluation.dependencies]
streamlit = "*"
whatthepatch = "*"
diff --git a/tests/unit/resolver/test_guess_success.py b/tests/unit/resolver/test_guess_success.py
index 9bf3da2b3d02..d6b0e946adda 100644
--- a/tests/unit/resolver/test_guess_success.py
+++ b/tests/unit/resolver/test_guess_success.py
@@ -1,22 +1,22 @@
-from openhands.resolver.issue_definitions import IssueHandler
-from openhands.resolver.github_issue import GithubIssue
-from openhands.events.action.message import MessageAction
from openhands.core.config import LLMConfig
+from openhands.events.action.message import MessageAction
+from openhands.resolver.github_issue import GithubIssue
+from openhands.resolver.issue_definitions import IssueHandler
def test_guess_success_multiline_explanation():
# Mock data
issue = GithubIssue(
- owner="test",
- repo="test",
+ owner='test',
+ repo='test',
number=1,
- title="Test Issue",
- body="Test body",
+ title='Test Issue',
+ body='Test body',
thread_comments=None,
review_comments=None,
)
- history = [MessageAction(content="Test message")]
- llm_config = LLMConfig(model="test", api_key="test")
+ history = [MessageAction(content='Test message')]
+ llm_config = LLMConfig(model='test', api_key='test')
# Create a mock response with multi-line explanation
mock_response = """--- success
@@ -31,7 +31,7 @@ def test_guess_success_multiline_explanation():
Automatic fix generated by OpenHands 🙌"""
# Create a handler instance
- handler = IssueHandler("test", "test", "test")
+ handler = IssueHandler('test', 'test', 'test')
# Mock the litellm.completion call
def mock_completion(*args, **kwargs):
@@ -61,11 +61,11 @@ def __init__(self, content):
# Verify the results
assert success is True
- assert "The PR successfully addressed the issue by:" in explanation
- assert "Fixed bug A" in explanation
- assert "Added test B" in explanation
- assert "Updated documentation C" in explanation
- assert "Automatic fix generated by OpenHands" in explanation
+ assert 'The PR successfully addressed the issue by:' in explanation
+ assert 'Fixed bug A' in explanation
+ assert 'Added test B' in explanation
+ assert 'Updated documentation C' in explanation
+ assert 'Automatic fix generated by OpenHands' in explanation
finally:
# Restore the original function
litellm.completion = original_completion
diff --git a/tests/unit/resolver/test_issue_handler_error_handling.py b/tests/unit/resolver/test_issue_handler_error_handling.py
index 54adff3466fd..2b04e3b13111 100644
--- a/tests/unit/resolver/test_issue_handler_error_handling.py
+++ b/tests/unit/resolver/test_issue_handler_error_handling.py
@@ -1,94 +1,97 @@
-import pytest
+from unittest.mock import MagicMock, patch
+
import requests
-from unittest.mock import patch, MagicMock
from openhands.resolver.issue_definitions import PRHandler
-from openhands.resolver.github_issue import ReviewThread
def test_handle_nonexistent_issue_reference():
- handler = PRHandler("test-owner", "test-repo", "test-token")
-
+ handler = PRHandler('test-owner', 'test-repo', 'test-token')
+
# Mock the requests.get to simulate a 404 error
mock_response = MagicMock()
- mock_response.raise_for_status.side_effect = requests.exceptions.HTTPError("404 Client Error: Not Found")
-
+ mock_response.raise_for_status.side_effect = requests.exceptions.HTTPError(
+ '404 Client Error: Not Found'
+ )
+
with patch('requests.get', return_value=mock_response):
# Call the method with a non-existent issue reference
result = handler._PRHandler__get_context_from_external_issues_references(
closing_issues=[],
closing_issue_numbers=[],
- issue_body="This references #999999", # Non-existent issue
+ issue_body='This references #999999', # Non-existent issue
review_comments=[],
review_threads=[],
- thread_comments=None
+ thread_comments=None,
)
-
+
# The method should return an empty list since the referenced issue couldn't be fetched
assert result == []
def test_handle_rate_limit_error():
- handler = PRHandler("test-owner", "test-repo", "test-token")
-
+ handler = PRHandler('test-owner', 'test-repo', 'test-token')
+
# Mock the requests.get to simulate a rate limit error
mock_response = MagicMock()
mock_response.raise_for_status.side_effect = requests.exceptions.HTTPError(
- "403 Client Error: Rate Limit Exceeded"
+ '403 Client Error: Rate Limit Exceeded'
)
-
+
with patch('requests.get', return_value=mock_response):
# Call the method with an issue reference
result = handler._PRHandler__get_context_from_external_issues_references(
closing_issues=[],
closing_issue_numbers=[],
- issue_body="This references #123",
+ issue_body='This references #123',
review_comments=[],
review_threads=[],
- thread_comments=None
+ thread_comments=None,
)
-
+
# The method should return an empty list since the request was rate limited
assert result == []
def test_handle_network_error():
- handler = PRHandler("test-owner", "test-repo", "test-token")
-
+ handler = PRHandler('test-owner', 'test-repo', 'test-token')
+
# Mock the requests.get to simulate a network error
- with patch('requests.get', side_effect=requests.exceptions.ConnectionError("Network Error")):
+ with patch(
+ 'requests.get', side_effect=requests.exceptions.ConnectionError('Network Error')
+ ):
# Call the method with an issue reference
result = handler._PRHandler__get_context_from_external_issues_references(
closing_issues=[],
closing_issue_numbers=[],
- issue_body="This references #123",
+ issue_body='This references #123',
review_comments=[],
review_threads=[],
- thread_comments=None
+ thread_comments=None,
)
-
+
# The method should return an empty list since the network request failed
assert result == []
def test_successful_issue_reference():
- handler = PRHandler("test-owner", "test-repo", "test-token")
-
+ handler = PRHandler('test-owner', 'test-repo', 'test-token')
+
# Mock a successful response
mock_response = MagicMock()
mock_response.raise_for_status.return_value = None
- mock_response.json.return_value = {"body": "This is the referenced issue body"}
-
+ mock_response.json.return_value = {'body': 'This is the referenced issue body'}
+
with patch('requests.get', return_value=mock_response):
# Call the method with an issue reference
result = handler._PRHandler__get_context_from_external_issues_references(
closing_issues=[],
closing_issue_numbers=[],
- issue_body="This references #123",
+ issue_body='This references #123',
review_comments=[],
review_threads=[],
- thread_comments=None
+ thread_comments=None,
)
-
+
# The method should return a list with the referenced issue body
- assert result == ["This is the referenced issue body"]
\ No newline at end of file
+ assert result == ['This is the referenced issue body']
diff --git a/tests/unit/resolver/test_issue_references.py b/tests/unit/resolver/test_issue_references.py
index e4da644983db..1252f8555540 100644
--- a/tests/unit/resolver/test_issue_references.py
+++ b/tests/unit/resolver/test_issue_references.py
@@ -2,13 +2,13 @@
def test_extract_issue_references():
- handler = IssueHandler("test-owner", "test-repo", "test-token")
+ handler = IssueHandler('test-owner', 'test-repo', 'test-token')
# Test basic issue reference
- assert handler._extract_issue_references("Fixes #123") == [123]
+ assert handler._extract_issue_references('Fixes #123') == [123]
# Test multiple issue references
- assert handler._extract_issue_references("Fixes #123, #456") == [123, 456]
+ assert handler._extract_issue_references('Fixes #123, #456') == [123, 456]
# Test issue references in code blocks should be ignored
assert handler._extract_issue_references("""
@@ -22,13 +22,21 @@ def func():
""") == [789]
# Test issue references in inline code should be ignored
- assert handler._extract_issue_references("This `#123` should be ignored but #456 should be extracted") == [456]
+ assert handler._extract_issue_references(
+ 'This `#123` should be ignored but #456 should be extracted'
+ ) == [456]
# Test issue references in URLs should be ignored
- assert handler._extract_issue_references("Check http://example.com/#123 but #456 should be extracted") == [456]
+ assert handler._extract_issue_references(
+ 'Check http://example.com/#123 but #456 should be extracted'
+ ) == [456]
# Test issue references in markdown links should be extracted
- assert handler._extract_issue_references("[Link to #123](http://example.com) and #456") == [123, 456]
+ assert handler._extract_issue_references(
+ '[Link to #123](http://example.com) and #456'
+ ) == [123, 456]
# Test issue references with text around them
- assert handler._extract_issue_references("Issue #123 is fixed and #456 is pending") == [123, 456]
+ assert handler._extract_issue_references(
+ 'Issue #123 is fixed and #456 is pending'
+ ) == [123, 456]
diff --git a/tests/unit/resolver/test_pr_handler_guess_success.py b/tests/unit/resolver/test_pr_handler_guess_success.py
index bc29fbe2632e..e7e7705e8747 100644
--- a/tests/unit/resolver/test_pr_handler_guess_success.py
+++ b/tests/unit/resolver/test_pr_handler_guess_success.py
@@ -1,39 +1,39 @@
import json
-from unittest.mock import patch, MagicMock
+from unittest.mock import MagicMock, patch
-from openhands.resolver.issue_definitions import PRHandler
-from openhands.resolver.github_issue import GithubIssue, ReviewThread
-from openhands.events.action.message import MessageAction
from openhands.core.config import LLMConfig
+from openhands.events.action.message import MessageAction
+from openhands.resolver.github_issue import GithubIssue, ReviewThread
+from openhands.resolver.issue_definitions import PRHandler
def test_guess_success_review_threads_litellm_call():
"""Test that the litellm.completion() call for review threads contains the expected content."""
# Create a PR handler instance
- handler = PRHandler("test-owner", "test-repo", "test-token")
+ handler = PRHandler('test-owner', 'test-repo', 'test-token')
# Create a mock issue with review threads
issue = GithubIssue(
- owner="test-owner",
- repo="test-repo",
+ owner='test-owner',
+ repo='test-repo',
number=1,
- title="Test PR",
- body="Test Body",
+ title='Test PR',
+ body='Test Body',
thread_comments=None,
- closing_issues=["Issue 1 description", "Issue 2 description"],
+ closing_issues=['Issue 1 description', 'Issue 2 description'],
review_comments=None,
review_threads=[
ReviewThread(
- comment="Please fix the formatting\n---\nlatest feedback:\nAdd docstrings",
- files=["/src/file1.py", "/src/file2.py"],
+ comment='Please fix the formatting\n---\nlatest feedback:\nAdd docstrings',
+ files=['/src/file1.py', '/src/file2.py'],
),
ReviewThread(
- comment="Add more tests\n---\nlatest feedback:\nAdd test cases",
- files=["/tests/test_file.py"],
+ comment='Add more tests\n---\nlatest feedback:\nAdd test cases',
+ files=['/tests/test_file.py'],
),
],
- thread_ids=["1", "2"],
- head_branch="test-branch",
+ thread_ids=['1', '2'],
+ head_branch='test-branch',
)
# Create mock history with a detailed response
@@ -47,7 +47,7 @@ def test_guess_success_review_threads_litellm_call():
]
# Create mock LLM config
- llm_config = LLMConfig(model="test-model", api_key="test-key")
+ llm_config = LLMConfig(model='test-model', api_key='test-key')
# Mock the LLM response
mock_response = MagicMock()
@@ -64,7 +64,7 @@ def test_guess_success_review_threads_litellm_call():
]
# Test the guess_success method
- with patch("litellm.completion") as mock_completion:
+ with patch('litellm.completion') as mock_completion:
mock_completion.return_value = mock_response
success, success_list, explanation = handler.guess_success(
issue, history, llm_config
@@ -75,63 +75,63 @@ def test_guess_success_review_threads_litellm_call():
# Check first call
first_call = mock_completion.call_args_list[0]
- first_prompt = first_call[1]["messages"][0]["content"]
+ first_prompt = first_call[1]['messages'][0]['content']
assert (
- "Issue descriptions:\n"
- + json.dumps(["Issue 1 description", "Issue 2 description"], indent=4)
+ 'Issue descriptions:\n'
+ + json.dumps(['Issue 1 description', 'Issue 2 description'], indent=4)
in first_prompt
)
assert (
- "Feedback:\nPlease fix the formatting\n---\nlatest feedback:\nAdd docstrings"
+ 'Feedback:\nPlease fix the formatting\n---\nlatest feedback:\nAdd docstrings'
in first_prompt
)
assert (
- "Files locations:\n"
- + json.dumps(["/src/file1.py", "/src/file2.py"], indent=4)
+ 'Files locations:\n'
+ + json.dumps(['/src/file1.py', '/src/file2.py'], indent=4)
in first_prompt
)
- assert "Last message from AI agent:\n" + history[0].content in first_prompt
+ assert 'Last message from AI agent:\n' + history[0].content in first_prompt
# Check second call
second_call = mock_completion.call_args_list[1]
- second_prompt = second_call[1]["messages"][0]["content"]
+ second_prompt = second_call[1]['messages'][0]['content']
assert (
- "Issue descriptions:\n"
- + json.dumps(["Issue 1 description", "Issue 2 description"], indent=4)
+ 'Issue descriptions:\n'
+ + json.dumps(['Issue 1 description', 'Issue 2 description'], indent=4)
in second_prompt
)
assert (
- "Feedback:\nAdd more tests\n---\nlatest feedback:\nAdd test cases"
+ 'Feedback:\nAdd more tests\n---\nlatest feedback:\nAdd test cases'
in second_prompt
)
assert (
- "Files locations:\n" + json.dumps(["/tests/test_file.py"], indent=4)
+ 'Files locations:\n' + json.dumps(['/tests/test_file.py'], indent=4)
in second_prompt
)
- assert "Last message from AI agent:\n" + history[0].content in second_prompt
+ assert 'Last message from AI agent:\n' + history[0].content in second_prompt
def test_guess_success_thread_comments_litellm_call():
"""Test that the litellm.completion() call for thread comments contains the expected content."""
# Create a PR handler instance
- handler = PRHandler("test-owner", "test-repo", "test-token")
+ handler = PRHandler('test-owner', 'test-repo', 'test-token')
# Create a mock issue with thread comments
issue = GithubIssue(
- owner="test-owner",
- repo="test-repo",
+ owner='test-owner',
+ repo='test-repo',
number=1,
- title="Test PR",
- body="Test Body",
+ title='Test PR',
+ body='Test Body',
thread_comments=[
- "Please improve error handling",
- "Add input validation",
- "latest feedback:\nHandle edge cases",
+ 'Please improve error handling',
+ 'Add input validation',
+ 'latest feedback:\nHandle edge cases',
],
- closing_issues=["Issue 1 description", "Issue 2 description"],
+ closing_issues=['Issue 1 description', 'Issue 2 description'],
review_comments=None,
thread_ids=None,
- head_branch="test-branch",
+ head_branch='test-branch',
)
# Create mock history with a detailed response
@@ -145,7 +145,7 @@ def test_guess_success_thread_comments_litellm_call():
]
# Create mock LLM config
- llm_config = LLMConfig(model="test-model", api_key="test-key")
+ llm_config = LLMConfig(model='test-model', api_key='test-key')
# Mock the LLM response
mock_response = MagicMock()
@@ -162,7 +162,7 @@ def test_guess_success_thread_comments_litellm_call():
]
# Test the guess_success method
- with patch("litellm.completion") as mock_completion:
+ with patch('litellm.completion') as mock_completion:
mock_completion.return_value = mock_response
success, success_list, explanation = handler.guess_success(
issue, history, llm_config
@@ -171,77 +171,77 @@ def test_guess_success_thread_comments_litellm_call():
# Verify the litellm.completion() call
mock_completion.assert_called_once()
call_args = mock_completion.call_args
- prompt = call_args[1]["messages"][0]["content"]
+ prompt = call_args[1]['messages'][0]['content']
# Check prompt content
assert (
- "Issue descriptions:\n"
- + json.dumps(["Issue 1 description", "Issue 2 description"], indent=4)
+ 'Issue descriptions:\n'
+ + json.dumps(['Issue 1 description', 'Issue 2 description'], indent=4)
in prompt
)
- assert "PR Thread Comments:\n" + "\n---\n".join(issue.thread_comments) in prompt
- assert "Last message from AI agent:\n" + history[0].content in prompt
+ assert 'PR Thread Comments:\n' + '\n---\n'.join(issue.thread_comments) in prompt
+ assert 'Last message from AI agent:\n' + history[0].content in prompt
def test_check_feedback_with_llm():
"""Test the _check_feedback_with_llm helper function."""
# Create a PR handler instance
- handler = PRHandler("test-owner", "test-repo", "test-token")
+ handler = PRHandler('test-owner', 'test-repo', 'test-token')
# Create mock LLM config
- llm_config = LLMConfig(model="test-model", api_key="test-key")
+ llm_config = LLMConfig(model='test-model', api_key='test-key')
# Test cases for different LLM responses
test_cases = [
{
- "response": "--- success\ntrue\n--- explanation\nChanges look good",
- "expected": (True, "Changes look good"),
+ 'response': '--- success\ntrue\n--- explanation\nChanges look good',
+ 'expected': (True, 'Changes look good'),
},
{
- "response": "--- success\nfalse\n--- explanation\nNot all issues fixed",
- "expected": (False, "Not all issues fixed"),
+ 'response': '--- success\nfalse\n--- explanation\nNot all issues fixed',
+ 'expected': (False, 'Not all issues fixed'),
},
{
- "response": "Invalid response format",
- "expected": (
+ 'response': 'Invalid response format',
+ 'expected': (
False,
- "Failed to decode answer from LLM response: Invalid response format",
+ 'Failed to decode answer from LLM response: Invalid response format',
),
},
{
- "response": "--- success\ntrue\n--- explanation\nMultiline\nexplanation\nhere",
- "expected": (True, "Multiline\nexplanation\nhere"),
+ 'response': '--- success\ntrue\n--- explanation\nMultiline\nexplanation\nhere',
+ 'expected': (True, 'Multiline\nexplanation\nhere'),
},
]
for case in test_cases:
# Mock the LLM response
mock_response = MagicMock()
- mock_response.choices = [MagicMock(message=MagicMock(content=case["response"]))]
+ mock_response.choices = [MagicMock(message=MagicMock(content=case['response']))]
# Test the function
- with patch("litellm.completion", return_value=mock_response):
+ with patch('litellm.completion', return_value=mock_response):
success, explanation = handler._check_feedback_with_llm(
- "test prompt", llm_config
+ 'test prompt', llm_config
)
- assert (success, explanation) == case["expected"]
+ assert (success, explanation) == case['expected']
def test_check_review_thread():
"""Test the _check_review_thread helper function."""
# Create a PR handler instance
- handler = PRHandler("test-owner", "test-repo", "test-token")
+ handler = PRHandler('test-owner', 'test-repo', 'test-token')
# Create test data
review_thread = ReviewThread(
- comment="Please fix the formatting\n---\nlatest feedback:\nAdd docstrings",
- files=["/src/file1.py", "/src/file2.py"],
+ comment='Please fix the formatting\n---\nlatest feedback:\nAdd docstrings',
+ files=['/src/file1.py', '/src/file2.py'],
)
issues_context = json.dumps(
- ["Issue 1 description", "Issue 2 description"], indent=4
+ ['Issue 1 description', 'Issue 2 description'], indent=4
)
- last_message = "I have fixed the formatting and added docstrings"
- llm_config = LLMConfig(model="test-model", api_key="test-key")
+ last_message = 'I have fixed the formatting and added docstrings'
+ llm_config = LLMConfig(model='test-model', api_key='test-key')
# Mock the LLM response
mock_response = MagicMock()
@@ -258,7 +258,7 @@ def test_check_review_thread():
]
# Test the function
- with patch("litellm.completion") as mock_completion:
+ with patch('litellm.completion') as mock_completion:
mock_completion.return_value = mock_response
success, explanation = handler._check_review_thread(
review_thread, issues_context, last_message, llm_config
@@ -267,37 +267,37 @@ def test_check_review_thread():
# Verify the litellm.completion() call
mock_completion.assert_called_once()
call_args = mock_completion.call_args
- prompt = call_args[1]["messages"][0]["content"]
+ prompt = call_args[1]['messages'][0]['content']
# Check prompt content
- assert "Issue descriptions:\n" + issues_context in prompt
- assert "Feedback:\n" + review_thread.comment in prompt
+ assert 'Issue descriptions:\n' + issues_context in prompt
+ assert 'Feedback:\n' + review_thread.comment in prompt
assert (
- "Files locations:\n" + json.dumps(review_thread.files, indent=4) in prompt
+ 'Files locations:\n' + json.dumps(review_thread.files, indent=4) in prompt
)
- assert "Last message from AI agent:\n" + last_message in prompt
+ assert 'Last message from AI agent:\n' + last_message in prompt
# Check result
assert success is True
- assert explanation == "Changes look good"
+ assert explanation == 'Changes look good'
def test_check_thread_comments():
"""Test the _check_thread_comments helper function."""
# Create a PR handler instance
- handler = PRHandler("test-owner", "test-repo", "test-token")
+ handler = PRHandler('test-owner', 'test-repo', 'test-token')
# Create test data
thread_comments = [
- "Please improve error handling",
- "Add input validation",
- "latest feedback:\nHandle edge cases",
+ 'Please improve error handling',
+ 'Add input validation',
+ 'latest feedback:\nHandle edge cases',
]
issues_context = json.dumps(
- ["Issue 1 description", "Issue 2 description"], indent=4
+ ['Issue 1 description', 'Issue 2 description'], indent=4
)
- last_message = "I have added error handling and input validation"
- llm_config = LLMConfig(model="test-model", api_key="test-key")
+ last_message = 'I have added error handling and input validation'
+ llm_config = LLMConfig(model='test-model', api_key='test-key')
# Mock the LLM response
mock_response = MagicMock()
@@ -314,7 +314,7 @@ def test_check_thread_comments():
]
# Test the function
- with patch("litellm.completion") as mock_completion:
+ with patch('litellm.completion') as mock_completion:
mock_completion.return_value = mock_response
success, explanation = handler._check_thread_comments(
thread_comments, issues_context, last_message, llm_config
@@ -323,34 +323,34 @@ def test_check_thread_comments():
# Verify the litellm.completion() call
mock_completion.assert_called_once()
call_args = mock_completion.call_args
- prompt = call_args[1]["messages"][0]["content"]
+ prompt = call_args[1]['messages'][0]['content']
# Check prompt content
- assert "Issue descriptions:\n" + issues_context in prompt
- assert "PR Thread Comments:\n" + "\n---\n".join(thread_comments) in prompt
- assert "Last message from AI agent:\n" + last_message in prompt
+ assert 'Issue descriptions:\n' + issues_context in prompt
+ assert 'PR Thread Comments:\n' + '\n---\n'.join(thread_comments) in prompt
+ assert 'Last message from AI agent:\n' + last_message in prompt
# Check result
assert success is True
- assert explanation == "Changes look good"
+ assert explanation == 'Changes look good'
def test_check_review_comments():
"""Test the _check_review_comments helper function."""
# Create a PR handler instance
- handler = PRHandler("test-owner", "test-repo", "test-token")
+ handler = PRHandler('test-owner', 'test-repo', 'test-token')
# Create test data
review_comments = [
- "Please improve code readability",
- "Add comments to complex functions",
- "Follow PEP 8 style guide",
+ 'Please improve code readability',
+ 'Add comments to complex functions',
+ 'Follow PEP 8 style guide',
]
issues_context = json.dumps(
- ["Issue 1 description", "Issue 2 description"], indent=4
+ ['Issue 1 description', 'Issue 2 description'], indent=4
)
- last_message = "I have improved code readability and added comments"
- llm_config = LLMConfig(model="test-model", api_key="test-key")
+ last_message = 'I have improved code readability and added comments'
+ llm_config = LLMConfig(model='test-model', api_key='test-key')
# Mock the LLM response
mock_response = MagicMock()
@@ -367,7 +367,7 @@ def test_check_review_comments():
]
# Test the function
- with patch("litellm.completion") as mock_completion:
+ with patch('litellm.completion') as mock_completion:
mock_completion.return_value = mock_response
success, explanation = handler._check_review_comments(
review_comments, issues_context, last_message, llm_config
@@ -376,39 +376,39 @@ def test_check_review_comments():
# Verify the litellm.completion() call
mock_completion.assert_called_once()
call_args = mock_completion.call_args
- prompt = call_args[1]["messages"][0]["content"]
+ prompt = call_args[1]['messages'][0]['content']
# Check prompt content
- assert "Issue descriptions:\n" + issues_context in prompt
- assert "PR Review Comments:\n" + "\n---\n".join(review_comments) in prompt
- assert "Last message from AI agent:\n" + last_message in prompt
+ assert 'Issue descriptions:\n' + issues_context in prompt
+ assert 'PR Review Comments:\n' + '\n---\n'.join(review_comments) in prompt
+ assert 'Last message from AI agent:\n' + last_message in prompt
# Check result
assert success is True
- assert explanation == "Changes look good"
+ assert explanation == 'Changes look good'
def test_guess_success_review_comments_litellm_call():
"""Test that the litellm.completion() call for review comments contains the expected content."""
# Create a PR handler instance
- handler = PRHandler("test-owner", "test-repo", "test-token")
+ handler = PRHandler('test-owner', 'test-repo', 'test-token')
# Create a mock issue with review comments
issue = GithubIssue(
- owner="test-owner",
- repo="test-repo",
+ owner='test-owner',
+ repo='test-repo',
number=1,
- title="Test PR",
- body="Test Body",
+ title='Test PR',
+ body='Test Body',
thread_comments=None,
- closing_issues=["Issue 1 description", "Issue 2 description"],
+ closing_issues=['Issue 1 description', 'Issue 2 description'],
review_comments=[
- "Please improve code readability",
- "Add comments to complex functions",
- "Follow PEP 8 style guide",
+ 'Please improve code readability',
+ 'Add comments to complex functions',
+ 'Follow PEP 8 style guide',
],
thread_ids=None,
- head_branch="test-branch",
+ head_branch='test-branch',
)
# Create mock history with a detailed response
@@ -422,7 +422,7 @@ def test_guess_success_review_comments_litellm_call():
]
# Create mock LLM config
- llm_config = LLMConfig(model="test-model", api_key="test-key")
+ llm_config = LLMConfig(model='test-model', api_key='test-key')
# Mock the LLM response
mock_response = MagicMock()
@@ -439,7 +439,7 @@ def test_guess_success_review_comments_litellm_call():
]
# Test the guess_success method
- with patch("litellm.completion") as mock_completion:
+ with patch('litellm.completion') as mock_completion:
mock_completion.return_value = mock_response
success, success_list, explanation = handler.guess_success(
issue, history, llm_config
@@ -448,13 +448,13 @@ def test_guess_success_review_comments_litellm_call():
# Verify the litellm.completion() call
mock_completion.assert_called_once()
call_args = mock_completion.call_args
- prompt = call_args[1]["messages"][0]["content"]
+ prompt = call_args[1]['messages'][0]['content']
# Check prompt content
assert (
- "Issue descriptions:\n"
- + json.dumps(["Issue 1 description", "Issue 2 description"], indent=4)
+ 'Issue descriptions:\n'
+ + json.dumps(['Issue 1 description', 'Issue 2 description'], indent=4)
in prompt
)
- assert "PR Review Comments:\n" + "\n---\n".join(issue.review_comments) in prompt
- assert "Last message from AI agent:\n" + history[0].content in prompt
+ assert 'PR Review Comments:\n' + '\n---\n'.join(issue.review_comments) in prompt
+ assert 'Last message from AI agent:\n' + history[0].content in prompt
diff --git a/tests/unit/resolver/test_pr_title_escaping.py b/tests/unit/resolver/test_pr_title_escaping.py
index 03f2b7104807..45dd523b036a 100644
--- a/tests/unit/resolver/test_pr_title_escaping.py
+++ b/tests/unit/resolver/test_pr_title_escaping.py
@@ -1,45 +1,46 @@
-from openhands.resolver.github_issue import GithubIssue
-from openhands.resolver.send_pull_request import make_commit
import os
-import tempfile
import subprocess
+import tempfile
+
+from openhands.resolver.github_issue import GithubIssue
+from openhands.resolver.send_pull_request import make_commit
def test_commit_message_with_quotes():
# Create a temporary directory and initialize git repo
with tempfile.TemporaryDirectory() as temp_dir:
- subprocess.run(["git", "init", temp_dir], check=True)
+ subprocess.run(['git', 'init', temp_dir], check=True)
# Create a test file and add it to git
- test_file = os.path.join(temp_dir, "test.txt")
- with open(test_file, "w") as f:
- f.write("test content")
+ test_file = os.path.join(temp_dir, 'test.txt')
+ with open(test_file, 'w') as f:
+ f.write('test content')
- subprocess.run(["git", "-C", temp_dir, "add", "test.txt"], check=True)
+ subprocess.run(['git', '-C', temp_dir, 'add', 'test.txt'], check=True)
# Create a test issue with problematic title
issue = GithubIssue(
- owner="test-owner",
- repo="test-repo",
+ owner='test-owner',
+ repo='test-repo',
number=123,
title="Issue with 'quotes' and \"double quotes\" and ",
- body="Test body",
+ body='Test body',
labels=[],
assignees=[],
- state="open",
- created_at="2024-01-01T00:00:00Z",
- updated_at="2024-01-01T00:00:00Z",
+ state='open',
+ created_at='2024-01-01T00:00:00Z',
+ updated_at='2024-01-01T00:00:00Z',
closed_at=None,
head_branch=None,
thread_ids=None,
)
# Make the commit
- make_commit(temp_dir, issue, "issue")
+ make_commit(temp_dir, issue, 'issue')
# Get the commit message
result = subprocess.run(
- ["git", "-C", temp_dir, "log", "-1", "--pretty=%B"],
+ ['git', '-C', temp_dir, 'log', '-1', '--pretty=%B'],
capture_output=True,
text=True,
check=True,
@@ -48,7 +49,7 @@ def test_commit_message_with_quotes():
# The commit message should contain the quotes without excessive escaping
expected = "Fix issue #123: Issue with 'quotes' and \"double quotes\" and "
- assert commit_msg == expected, f"Expected: {expected}\nGot: {commit_msg}"
+ assert commit_msg == expected, f'Expected: {expected}\nGot: {commit_msg}'
def test_pr_title_with_quotes(monkeypatch):
@@ -56,39 +57,39 @@ def test_pr_title_with_quotes(monkeypatch):
class MockResponse:
def __init__(self, status_code=201):
self.status_code = status_code
- self.text = ""
+ self.text = ''
def json(self):
- return {"html_url": "https://github.com/test/test/pull/1"}
+ return {'html_url': 'https://github.com/test/test/pull/1'}
def raise_for_status(self):
pass
def mock_post(*args, **kwargs):
# Verify that the PR title is not over-escaped
- data = kwargs.get("json", {})
- title = data.get("title", "")
+ data = kwargs.get('json', {})
+ title = data.get('title', '')
expected = "Fix issue #123: Issue with 'quotes' and \"double quotes\" and "
assert (
title == expected
- ), f"PR title was incorrectly escaped.\nExpected: {expected}\nGot: {title}"
+ ), f'PR title was incorrectly escaped.\nExpected: {expected}\nGot: {title}'
return MockResponse()
class MockGetResponse:
def __init__(self, status_code=200):
self.status_code = status_code
- self.text = ""
+ self.text = ''
def json(self):
- return {"default_branch": "main"}
+ return {'default_branch': 'main'}
def raise_for_status(self):
pass
- monkeypatch.setattr("requests.post", mock_post)
- monkeypatch.setattr("requests.get", lambda *args, **kwargs: MockGetResponse())
+ monkeypatch.setattr('requests.post', mock_post)
+ monkeypatch.setattr('requests.get', lambda *args, **kwargs: MockGetResponse())
monkeypatch.setattr(
- "openhands.resolver.send_pull_request.branch_exists",
+ 'openhands.resolver.send_pull_request.branch_exists',
lambda *args, **kwargs: False,
)
@@ -97,69 +98,69 @@ def raise_for_status(self):
def mock_run(*args, **kwargs):
print(f"Running command: {args[0] if args else kwargs.get('args', [])}")
- if isinstance(args[0], list) and args[0][0] == "git":
- if "push" in args[0]:
+ if isinstance(args[0], list) and args[0][0] == 'git':
+ if 'push' in args[0]:
return subprocess.CompletedProcess(
- args[0], returncode=0, stdout="", stderr=""
+ args[0], returncode=0, stdout='', stderr=''
)
return original_run(*args, **kwargs)
return original_run(*args, **kwargs)
- monkeypatch.setattr("subprocess.run", mock_run)
+ monkeypatch.setattr('subprocess.run', mock_run)
# Create a temporary directory and initialize git repo
with tempfile.TemporaryDirectory() as temp_dir:
- print("Initializing git repo...")
- subprocess.run(["git", "init", temp_dir], check=True)
+ print('Initializing git repo...')
+ subprocess.run(['git', 'init', temp_dir], check=True)
# Add these lines to configure git
subprocess.run(
- ["git", "-C", temp_dir, "config", "user.name", "Test User"], check=True
+ ['git', '-C', temp_dir, 'config', 'user.name', 'Test User'], check=True
)
subprocess.run(
- ["git", "-C", temp_dir, "config", "user.email", "test@example.com"],
+ ['git', '-C', temp_dir, 'config', 'user.email', 'test@example.com'],
check=True,
)
# Create a test file and add it to git
- test_file = os.path.join(temp_dir, "test.txt")
- with open(test_file, "w") as f:
- f.write("test content")
+ test_file = os.path.join(temp_dir, 'test.txt')
+ with open(test_file, 'w') as f:
+ f.write('test content')
- print("Adding and committing test file...")
- subprocess.run(["git", "-C", temp_dir, "add", "test.txt"], check=True)
+ print('Adding and committing test file...')
+ subprocess.run(['git', '-C', temp_dir, 'add', 'test.txt'], check=True)
subprocess.run(
- ["git", "-C", temp_dir, "commit", "-m", "Initial commit"], check=True
+ ['git', '-C', temp_dir, 'commit', '-m', 'Initial commit'], check=True
)
# Create a test issue with problematic title
- print("Creating test issue...")
+ print('Creating test issue...')
issue = GithubIssue(
- owner="test-owner",
- repo="test-repo",
+ owner='test-owner',
+ repo='test-repo',
number=123,
title="Issue with 'quotes' and \"double quotes\" and ",
- body="Test body",
+ body='Test body',
labels=[],
assignees=[],
- state="open",
- created_at="2024-01-01T00:00:00Z",
- updated_at="2024-01-01T00:00:00Z",
+ state='open',
+ created_at='2024-01-01T00:00:00Z',
+ updated_at='2024-01-01T00:00:00Z',
closed_at=None,
head_branch=None,
thread_ids=None,
)
# Try to send a PR - this will fail if the title is incorrectly escaped
- print("Sending PR...")
- from openhands.resolver.send_pull_request import send_pull_request
+ print('Sending PR...')
from openhands.core.config import LLMConfig
+ from openhands.resolver.send_pull_request import send_pull_request
send_pull_request(
github_issue=issue,
- github_token="dummy-token",
- github_username="test-user",
+ github_token='dummy-token',
+ github_username='test-user',
patch_dir=temp_dir,
- llm_config=LLMConfig(model="test-model", api_key="test-key"),
- pr_type="ready",
+ llm_config=LLMConfig(model='test-model', api_key='test-key'),
+ pr_type='ready',
)