+
Pull Request Viewer
+
+ );
+};
+
+export default PullRequestViewer;
diff --git a/tests/unit/resolver/test_guess_success.py b/tests/unit/resolver/test_guess_success.py
new file mode 100644
index 000000000000..9bf3da2b3d02
--- /dev/null
+++ b/tests/unit/resolver/test_guess_success.py
@@ -0,0 +1,71 @@
+from openhands.resolver.issue_definitions import IssueHandler
+from openhands.resolver.github_issue import GithubIssue
+from openhands.events.action.message import MessageAction
+from openhands.core.config import LLMConfig
+
+
+def test_guess_success_multiline_explanation():
+ # Mock data
+ issue = GithubIssue(
+ owner="test",
+ repo="test",
+ number=1,
+ title="Test Issue",
+ body="Test body",
+ thread_comments=None,
+ review_comments=None,
+ )
+ history = [MessageAction(content="Test message")]
+ llm_config = LLMConfig(model="test", api_key="test")
+
+ # Create a mock response with multi-line explanation
+ mock_response = """--- success
+true
+
+--- explanation
+The PR successfully addressed the issue by:
+- Fixed bug A
+- Added test B
+- Updated documentation C
+
+Automatic fix generated by OpenHands 🙌"""
+
+ # Create a handler instance
+ handler = IssueHandler("test", "test", "test")
+
+ # Mock the litellm.completion call
+ def mock_completion(*args, **kwargs):
+ class MockResponse:
+ class Choice:
+ class Message:
+ def __init__(self, content):
+ self.content = content
+
+ def __init__(self, content):
+ self.message = self.Message(content)
+
+ def __init__(self, content):
+ self.choices = [self.Choice(content)]
+
+ return MockResponse(mock_response)
+
+ # Patch the litellm.completion function
+ import litellm
+
+ original_completion = litellm.completion
+ litellm.completion = mock_completion
+
+ try:
+ # Call guess_success
+ success, _, explanation = handler.guess_success(issue, history, llm_config)
+
+ # Verify the results
+ assert success is True
+ assert "The PR successfully addressed the issue by:" in explanation
+ assert "Fixed bug A" in explanation
+ assert "Added test B" in explanation
+ assert "Updated documentation C" in explanation
+ assert "Automatic fix generated by OpenHands" in explanation
+ finally:
+ # Restore the original function
+ litellm.completion = original_completion
diff --git a/tests/unit/resolver/test_issue_handler.py b/tests/unit/resolver/test_issue_handler.py
new file mode 100644
index 000000000000..7df7f0fe4027
--- /dev/null
+++ b/tests/unit/resolver/test_issue_handler.py
@@ -0,0 +1,704 @@
+from unittest.mock import patch, MagicMock
+from openhands.resolver.issue_definitions import IssueHandler, PRHandler
+from openhands.resolver.github_issue import GithubIssue, ReviewThread
+from openhands.events.action.message import MessageAction
+from openhands.core.config import LLMConfig
+
+
+def test_get_converted_issues_initializes_review_comments():
+ # Mock the necessary dependencies
+ with patch("requests.get") as mock_get:
+ # Mock the response for issues
+ mock_issues_response = MagicMock()
+ mock_issues_response.json.return_value = [
+ {"number": 1, "title": "Test Issue", "body": "Test Body"}
+ ]
+ # Mock the response for comments
+ mock_comments_response = MagicMock()
+ mock_comments_response.json.return_value = []
+
+ # Set up the mock to return different responses for different calls
+ # First call is for issues, second call is for comments
+ mock_get.side_effect = [
+ mock_issues_response,
+ mock_comments_response,
+ mock_comments_response,
+ ] # Need two comment responses because we make two API calls
+
+ # Create an instance of IssueHandler
+ handler = IssueHandler("test-owner", "test-repo", "test-token")
+
+ # Get converted issues
+ issues = handler.get_converted_issues()
+
+ # Verify that we got exactly one issue
+ assert len(issues) == 1
+
+ # Verify that review_comments is initialized as None
+ assert issues[0].review_comments is None
+
+ # Verify other fields are set correctly
+ assert issues[0].number == 1
+ assert issues[0].title == "Test Issue"
+ assert issues[0].body == "Test Body"
+ assert issues[0].owner == "test-owner"
+ assert issues[0].repo == "test-repo"
+
+
+def test_pr_handler_guess_success_with_thread_comments():
+ # Create a PR handler instance
+ handler = PRHandler("test-owner", "test-repo", "test-token")
+
+ # Create a mock issue with thread comments but no review comments
+ issue = GithubIssue(
+ owner="test-owner",
+ repo="test-repo",
+ number=1,
+ title="Test PR",
+ body="Test Body",
+ thread_comments=["First comment", "Second comment"],
+ closing_issues=["Issue description"],
+ review_comments=None,
+ thread_ids=None,
+ head_branch="test-branch",
+ )
+
+ # Create mock history
+ history = [MessageAction(content="Fixed the issue by implementing X and Y")]
+
+ # Create mock LLM config
+ llm_config = LLMConfig(model="test-model", api_key="test-key")
+
+ # Mock the LLM response
+ mock_response = MagicMock()
+ mock_response.choices = [
+ MagicMock(
+ message=MagicMock(
+ content="""--- success
+true
+
+--- explanation
+The changes successfully address the feedback."""
+ )
+ )
+ ]
+
+ # Test the guess_success method
+ with patch("litellm.completion", return_value=mock_response):
+ success, success_list, explanation = handler.guess_success(
+ issue, history, llm_config
+ )
+
+ # Verify the results
+ assert success is True
+ assert success_list == [True]
+ assert "successfully address" in explanation
+
+
+def test_pr_handler_get_converted_issues_with_comments():
+ # Mock the necessary dependencies
+ with patch("requests.get") as mock_get:
+ # Mock the response for PRs
+ mock_prs_response = MagicMock()
+ mock_prs_response.json.return_value = [
+ {
+ "number": 1,
+ "title": "Test PR",
+ "body": "Test Body fixes #1",
+ "head": {"ref": "test-branch"},
+ }
+ ]
+
+ # Mock the response for PR comments
+ mock_comments_response = MagicMock()
+ mock_comments_response.json.return_value = [
+ {"body": "First comment"},
+ {"body": "Second comment"},
+ ]
+
+ # Mock the response for PR metadata (GraphQL)
+ mock_graphql_response = MagicMock()
+ mock_graphql_response.json.return_value = {
+ "data": {
+ "repository": {
+ "pullRequest": {
+ "closingIssuesReferences": {"edges": []},
+ "reviews": {"nodes": []},
+ "reviewThreads": {"edges": []},
+ }
+ }
+ }
+ }
+
+ # Set up the mock to return different responses
+ # We need to return empty responses for subsequent pages
+ mock_empty_response = MagicMock()
+ mock_empty_response.json.return_value = []
+
+ # Mock the response for fetching the external issue referenced in PR body
+ mock_external_issue_response = MagicMock()
+ mock_external_issue_response.json.return_value = {
+ "body": "This is additional context from an externally referenced issue."
+ }
+
+ mock_get.side_effect = [
+ mock_prs_response, # First call for PRs
+ mock_empty_response, # Second call for PRs (empty page)
+ mock_comments_response, # Third call for PR comments
+ mock_empty_response, # Fourth call for PR comments (empty page)
+ mock_external_issue_response, # Mock response for the external issue reference #1
+ ]
+
+ # Mock the post request for GraphQL
+ with patch("requests.post") as mock_post:
+ mock_post.return_value = mock_graphql_response
+
+ # Create an instance of PRHandler
+ handler = PRHandler("test-owner", "test-repo", "test-token")
+
+ # Get converted issues
+ prs = handler.get_converted_issues()
+
+ # Verify that we got exactly one PR
+ assert len(prs) == 1
+
+ # Verify that thread_comments are set correctly
+ assert prs[0].thread_comments == ["First comment", "Second comment"]
+
+ # Verify other fields are set correctly
+ assert prs[0].number == 1
+ assert prs[0].title == "Test PR"
+ assert prs[0].body == "Test Body fixes #1"
+ assert prs[0].owner == "test-owner"
+ assert prs[0].repo == "test-repo"
+ assert prs[0].head_branch == "test-branch"
+ assert prs[0].closing_issues == [
+ "This is additional context from an externally referenced issue."
+ ]
+
+
+def test_pr_handler_guess_success_only_review_comments():
+ # Create a PR handler instance
+ handler = PRHandler("test-owner", "test-repo", "test-token")
+
+ # Create a mock issue with only review comments
+ issue = GithubIssue(
+ owner="test-owner",
+ repo="test-repo",
+ number=1,
+ title="Test PR",
+ body="Test Body",
+ thread_comments=None,
+ closing_issues=["Issue description"],
+ review_comments=["Please fix the formatting", "Add more tests"],
+ thread_ids=None,
+ head_branch="test-branch",
+ )
+
+ # Create mock history
+ history = [MessageAction(content="Fixed the formatting and added more tests")]
+
+ # Create mock LLM config
+ llm_config = LLMConfig(model="test-model", api_key="test-key")
+
+ # Mock the LLM response
+ mock_response = MagicMock()
+ mock_response.choices = [
+ MagicMock(
+ message=MagicMock(
+ content="""--- success
+true
+
+--- explanation
+The changes successfully address the review comments."""
+ )
+ )
+ ]
+
+ # Test the guess_success method
+ with patch("litellm.completion", return_value=mock_response):
+ success, success_list, explanation = handler.guess_success(
+ issue, history, llm_config
+ )
+
+ # Verify the results
+ assert success is True
+ assert success_list == [True]
+ assert "successfully address" in explanation
+
+
+def test_pr_handler_guess_success_no_comments():
+ # Create a PR handler instance
+ handler = PRHandler("test-owner", "test-repo", "test-token")
+
+ # Create a mock issue with no comments
+ issue = GithubIssue(
+ owner="test-owner",
+ repo="test-repo",
+ number=1,
+ title="Test PR",
+ body="Test Body",
+ thread_comments=None,
+ closing_issues=["Issue description"],
+ review_comments=None,
+ thread_ids=None,
+ head_branch="test-branch",
+ )
+
+ # Create mock history
+ history = [MessageAction(content="Fixed the issue")]
+
+ # Create mock LLM config
+ llm_config = LLMConfig(model="test-model", api_key="test-key")
+
+ # Test that it returns appropriate message when no comments are present
+ success, success_list, explanation = handler.guess_success(
+ issue, history, llm_config
+ )
+ assert success is False
+ assert success_list is None
+ assert explanation == "No feedback was found to process"
+
+
+def test_get_issue_comments_with_specific_comment_id():
+ # Mock the necessary dependencies
+ with patch("requests.get") as mock_get:
+ # Mock the response for comments
+ mock_comments_response = MagicMock()
+ mock_comments_response.json.return_value = [
+ {"id": 123, "body": "First comment"},
+ {"id": 456, "body": "Second comment"},
+ ]
+
+ mock_get.return_value = mock_comments_response
+
+ # Create an instance of IssueHandler
+ handler = IssueHandler("test-owner", "test-repo", "test-token")
+
+ # Get comments with a specific comment_id
+ specific_comment = handler._get_issue_comments(issue_number=1, comment_id=123)
+
+ # Verify only the specific comment is returned
+ assert specific_comment == ["First comment"]
+
+
+def test_pr_handler_get_converted_issues_with_specific_thread_comment():
+ # Define the specific comment_id to filter
+ specific_comment_id = 123
+
+ # Mock GraphQL response for review threads
+ with patch("requests.get") as mock_get:
+ # Mock the response for PRs
+ mock_prs_response = MagicMock()
+ mock_prs_response.json.return_value = [
+ {
+ "number": 1,
+ "title": "Test PR",
+ "body": "Test Body",
+ "head": {"ref": "test-branch"},
+ }
+ ]
+
+ # Mock the response for PR comments
+ mock_comments_response = MagicMock()
+ mock_comments_response.json.return_value = [
+ {"body": "First comment", "id": 123},
+ {"body": "Second comment", "id": 124},
+ ]
+
+ # Mock the response for PR metadata (GraphQL)
+ mock_graphql_response = MagicMock()
+ mock_graphql_response.json.return_value = {
+ "data": {
+ "repository": {
+ "pullRequest": {
+ "closingIssuesReferences": {"edges": []},
+ "reviews": {"nodes": []},
+ "reviewThreads": {
+ "edges": [
+ {
+ "node": {
+ "id": "review-thread-1",
+ "isResolved": False,
+ "comments": {
+ "nodes": [
+ {
+ "fullDatabaseId": 121,
+ "body": "Specific review comment",
+ "path": "file1.txt",
+ },
+ {
+ "fullDatabaseId": 456,
+ "body": "Another review comment",
+ "path": "file2.txt",
+ },
+ ]
+ },
+ }
+ }
+ ]
+ },
+ }
+ }
+ }
+ }
+
+ # Set up the mock to return different responses
+ # We need to return empty responses for subsequent pages
+ mock_empty_response = MagicMock()
+ mock_empty_response.json.return_value = []
+
+ mock_get.side_effect = [
+ mock_prs_response, # First call for PRs
+ mock_empty_response, # Second call for PRs (empty page)
+ mock_comments_response, # Third call for PR comments
+ mock_empty_response, # Fourth call for PR comments (empty page)
+ ]
+
+ # Mock the post request for GraphQL
+ with patch("requests.post") as mock_post:
+ mock_post.return_value = mock_graphql_response
+
+ # Create an instance of PRHandler
+ handler = PRHandler("test-owner", "test-repo", "test-token")
+
+ # Get converted issues
+ prs = handler.get_converted_issues(comment_id=specific_comment_id)
+
+ # Verify that we got exactly one PR
+ assert len(prs) == 1
+
+ # Verify that thread_comments are set correctly
+ assert prs[0].thread_comments == ["First comment"]
+ assert prs[0].review_comments == []
+ assert prs[0].review_threads == []
+
+ # Verify other fields are set correctly
+ assert prs[0].number == 1
+ assert prs[0].title == "Test PR"
+ assert prs[0].body == "Test Body"
+ assert prs[0].owner == "test-owner"
+ assert prs[0].repo == "test-repo"
+ assert prs[0].head_branch == "test-branch"
+
+
+def test_pr_handler_get_converted_issues_with_specific_review_thread_comment():
+ # Define the specific comment_id to filter
+ specific_comment_id = 123
+
+ # Mock GraphQL response for review threads
+ with patch("requests.get") as mock_get:
+ # Mock the response for PRs
+ mock_prs_response = MagicMock()
+ mock_prs_response.json.return_value = [
+ {
+ "number": 1,
+ "title": "Test PR",
+ "body": "Test Body",
+ "head": {"ref": "test-branch"},
+ }
+ ]
+
+ # Mock the response for PR comments
+ mock_comments_response = MagicMock()
+ mock_comments_response.json.return_value = [
+ {"body": "First comment", "id": 120},
+ {"body": "Second comment", "id": 124},
+ ]
+
+ # Mock the response for PR metadata (GraphQL)
+ mock_graphql_response = MagicMock()
+ mock_graphql_response.json.return_value = {
+ "data": {
+ "repository": {
+ "pullRequest": {
+ "closingIssuesReferences": {"edges": []},
+ "reviews": {"nodes": []},
+ "reviewThreads": {
+ "edges": [
+ {
+ "node": {
+ "id": "review-thread-1",
+ "isResolved": False,
+ "comments": {
+ "nodes": [
+ {
+ "fullDatabaseId": specific_comment_id,
+ "body": "Specific review comment",
+ "path": "file1.txt",
+ },
+ {
+ "fullDatabaseId": 456,
+ "body": "Another review comment",
+ "path": "file1.txt",
+ },
+ ]
+ },
+ }
+ }
+ ]
+ },
+ }
+ }
+ }
+ }
+
+ # Set up the mock to return different responses
+ # We need to return empty responses for subsequent pages
+ mock_empty_response = MagicMock()
+ mock_empty_response.json.return_value = []
+
+ mock_get.side_effect = [
+ mock_prs_response, # First call for PRs
+ mock_empty_response, # Second call for PRs (empty page)
+ mock_comments_response, # Third call for PR comments
+ mock_empty_response, # Fourth call for PR comments (empty page)
+ ]
+
+ # Mock the post request for GraphQL
+ with patch("requests.post") as mock_post:
+ mock_post.return_value = mock_graphql_response
+
+ # Create an instance of PRHandler
+ handler = PRHandler("test-owner", "test-repo", "test-token")
+
+ # Get converted issues
+ prs = handler.get_converted_issues(comment_id=specific_comment_id)
+
+ # Verify that we got exactly one PR
+ assert len(prs) == 1
+
+ # Verify that thread_comments are set correctly
+ assert prs[0].thread_comments is None
+ assert prs[0].review_comments == []
+ assert len(prs[0].review_threads) == 1
+ assert isinstance(prs[0].review_threads[0], ReviewThread)
+ assert (
+ prs[0].review_threads[0].comment
+ == "Specific review comment\n---\nlatest feedback:\nAnother review comment\n"
+ )
+ assert prs[0].review_threads[0].files == ["file1.txt"]
+
+ # Verify other fields are set correctly
+ assert prs[0].number == 1
+ assert prs[0].title == "Test PR"
+ assert prs[0].body == "Test Body"
+ assert prs[0].owner == "test-owner"
+ assert prs[0].repo == "test-repo"
+ assert prs[0].head_branch == "test-branch"
+
+
+def test_pr_handler_get_converted_issues_with_specific_comment_and_issue_refs():
+ # Define the specific comment_id to filter
+ specific_comment_id = 123
+
+ # Mock GraphQL response for review threads
+ with patch("requests.get") as mock_get:
+ # Mock the response for PRs
+ mock_prs_response = MagicMock()
+ mock_prs_response.json.return_value = [
+ {
+ "number": 1,
+ "title": "Test PR fixes #3",
+ "body": "Test Body",
+ "head": {"ref": "test-branch"},
+ }
+ ]
+
+ # Mock the response for PR comments
+ mock_comments_response = MagicMock()
+ mock_comments_response.json.return_value = [
+ {"body": "First comment", "id": 120},
+ {"body": "Second comment", "id": 124},
+ ]
+
+ # Mock the response for PR metadata (GraphQL)
+ mock_graphql_response = MagicMock()
+ mock_graphql_response.json.return_value = {
+ "data": {
+ "repository": {
+ "pullRequest": {
+ "closingIssuesReferences": {"edges": []},
+ "reviews": {"nodes": []},
+ "reviewThreads": {
+ "edges": [
+ {
+ "node": {
+ "id": "review-thread-1",
+ "isResolved": False,
+ "comments": {
+ "nodes": [
+ {
+ "fullDatabaseId": specific_comment_id,
+ "body": "Specific review comment that references #6",
+ "path": "file1.txt",
+ },
+ {
+ "fullDatabaseId": 456,
+ "body": "Another review comment referencing #7",
+ "path": "file2.txt",
+ },
+ ]
+ },
+ }
+ }
+ ]
+ },
+ }
+ }
+ }
+ }
+
+ # Set up the mock to return different responses
+ # We need to return empty responses for subsequent pages
+ mock_empty_response = MagicMock()
+ mock_empty_response.json.return_value = []
+
+ # Mock the response for fetching the external issue referenced in PR body
+ mock_external_issue_response_in_body = MagicMock()
+ mock_external_issue_response_in_body.json.return_value = {
+ "body": "External context #1."
+ }
+
+ # Mock the response for fetching the external issue referenced in review thread
+ mock_external_issue_response_review_thread = MagicMock()
+ mock_external_issue_response_review_thread.json.return_value = {
+ "body": "External context #2."
+ }
+
+ mock_get.side_effect = [
+ mock_prs_response, # First call for PRs
+ mock_empty_response, # Second call for PRs (empty page)
+ mock_comments_response, # Third call for PR comments
+ mock_empty_response, # Fourth call for PR comments (empty page)
+ mock_external_issue_response_in_body,
+ mock_external_issue_response_review_thread,
+ ]
+
+ # Mock the post request for GraphQL
+ with patch("requests.post") as mock_post:
+ mock_post.return_value = mock_graphql_response
+
+ # Create an instance of PRHandler
+ handler = PRHandler("test-owner", "test-repo", "test-token")
+
+ # Get converted issues
+ prs = handler.get_converted_issues(comment_id=specific_comment_id)
+
+ # Verify that we got exactly one PR
+ assert len(prs) == 1
+
+ # Verify that thread_comments are set correctly
+ assert prs[0].thread_comments is None
+ assert prs[0].review_comments == []
+ assert len(prs[0].review_threads) == 1
+ assert isinstance(prs[0].review_threads[0], ReviewThread)
+ assert (
+ prs[0].review_threads[0].comment
+ == "Specific review comment that references #6\n---\nlatest feedback:\nAnother review comment referencing #7\n"
+ )
+ assert prs[0].closing_issues == [
+ "External context #1.",
+ "External context #2.",
+ ] # Only includes references inside comment ID and body PR
+
+ # Verify other fields are set correctly
+ assert prs[0].number == 1
+ assert prs[0].title == "Test PR fixes #3"
+ assert prs[0].body == "Test Body"
+ assert prs[0].owner == "test-owner"
+ assert prs[0].repo == "test-repo"
+ assert prs[0].head_branch == "test-branch"
+
+
+def test_pr_handler_get_converted_issues_with_duplicate_issue_refs():
+ # Mock the necessary dependencies
+ with patch("requests.get") as mock_get:
+ # Mock the response for PRs
+ mock_prs_response = MagicMock()
+ mock_prs_response.json.return_value = [
+ {
+ "number": 1,
+ "title": "Test PR",
+ "body": "Test Body fixes #1",
+ "head": {"ref": "test-branch"},
+ }
+ ]
+
+ # Mock the response for PR comments
+ mock_comments_response = MagicMock()
+ mock_comments_response.json.return_value = [
+ {"body": "First comment addressing #1"},
+ {"body": "Second comment addressing #2"},
+ ]
+
+ # Mock the response for PR metadata (GraphQL)
+ mock_graphql_response = MagicMock()
+ mock_graphql_response.json.return_value = {
+ "data": {
+ "repository": {
+ "pullRequest": {
+ "closingIssuesReferences": {"edges": []},
+ "reviews": {"nodes": []},
+ "reviewThreads": {"edges": []},
+ }
+ }
+ }
+ }
+
+ # Set up the mock to return different responses
+ # We need to return empty responses for subsequent pages
+ mock_empty_response = MagicMock()
+ mock_empty_response.json.return_value = []
+
+ # Mock the response for fetching the external issue referenced in PR body
+ mock_external_issue_response_in_body = MagicMock()
+ mock_external_issue_response_in_body.json.return_value = {
+ "body": "External context #1."
+ }
+
+ # Mock the response for fetching the external issue referenced in review thread
+ mock_external_issue_response_in_comment = MagicMock()
+ mock_external_issue_response_in_comment.json.return_value = {
+ "body": "External context #2."
+ }
+
+ mock_get.side_effect = [
+ mock_prs_response, # First call for PRs
+ mock_empty_response, # Second call for PRs (empty page)
+ mock_comments_response, # Third call for PR comments
+ mock_empty_response, # Fourth call for PR comments (empty page)
+ mock_external_issue_response_in_body, # Mock response for the external issue reference #1
+ mock_external_issue_response_in_comment,
+ ]
+
+ # Mock the post request for GraphQL
+ with patch("requests.post") as mock_post:
+ mock_post.return_value = mock_graphql_response
+
+ # Create an instance of PRHandler
+ handler = PRHandler("test-owner", "test-repo", "test-token")
+
+ # Get converted issues
+ prs = handler.get_converted_issues()
+
+ # Verify that we got exactly one PR
+ assert len(prs) == 1
+
+ # Verify that thread_comments are set correctly
+ assert prs[0].thread_comments == [
+ "First comment addressing #1",
+ "Second comment addressing #2",
+ ]
+
+ # Verify other fields are set correctly
+ assert prs[0].number == 1
+ assert prs[0].title == "Test PR"
+ assert prs[0].body == "Test Body fixes #1"
+ assert prs[0].owner == "test-owner"
+ assert prs[0].repo == "test-repo"
+ assert prs[0].head_branch == "test-branch"
+ assert prs[0].closing_issues == [
+ "External context #1.",
+ "External context #2.",
+ ]
diff --git a/tests/unit/resolver/test_pr_handler_guess_success.py b/tests/unit/resolver/test_pr_handler_guess_success.py
new file mode 100644
index 000000000000..bc29fbe2632e
--- /dev/null
+++ b/tests/unit/resolver/test_pr_handler_guess_success.py
@@ -0,0 +1,460 @@
+import json
+from unittest.mock import patch, MagicMock
+
+from openhands.resolver.issue_definitions import PRHandler
+from openhands.resolver.github_issue import GithubIssue, ReviewThread
+from openhands.events.action.message import MessageAction
+from openhands.core.config import LLMConfig
+
+
+def test_guess_success_review_threads_litellm_call():
+ """Test that the litellm.completion() call for review threads contains the expected content."""
+ # Create a PR handler instance
+ handler = PRHandler("test-owner", "test-repo", "test-token")
+
+ # Create a mock issue with review threads
+ issue = GithubIssue(
+ owner="test-owner",
+ repo="test-repo",
+ number=1,
+ title="Test PR",
+ body="Test Body",
+ thread_comments=None,
+ closing_issues=["Issue 1 description", "Issue 2 description"],
+ review_comments=None,
+ review_threads=[
+ ReviewThread(
+ comment="Please fix the formatting\n---\nlatest feedback:\nAdd docstrings",
+ files=["/src/file1.py", "/src/file2.py"],
+ ),
+ ReviewThread(
+ comment="Add more tests\n---\nlatest feedback:\nAdd test cases",
+ files=["/tests/test_file.py"],
+ ),
+ ],
+ thread_ids=["1", "2"],
+ head_branch="test-branch",
+ )
+
+ # Create mock history with a detailed response
+ history = [
+ MessageAction(
+ content="""I have made the following changes:
+1. Fixed formatting in file1.py and file2.py
+2. Added docstrings to all functions
+3. Added test cases in test_file.py"""
+ )
+ ]
+
+ # Create mock LLM config
+ llm_config = LLMConfig(model="test-model", api_key="test-key")
+
+ # Mock the LLM response
+ mock_response = MagicMock()
+ mock_response.choices = [
+ MagicMock(
+ message=MagicMock(
+ content="""--- success
+true
+
+--- explanation
+The changes successfully address the feedback."""
+ )
+ )
+ ]
+
+ # Test the guess_success method
+ with patch("litellm.completion") as mock_completion:
+ mock_completion.return_value = mock_response
+ success, success_list, explanation = handler.guess_success(
+ issue, history, llm_config
+ )
+
+ # Verify the litellm.completion() calls
+ assert mock_completion.call_count == 2 # One call per review thread
+
+ # Check first call
+ first_call = mock_completion.call_args_list[0]
+ first_prompt = first_call[1]["messages"][0]["content"]
+ assert (
+ "Issue descriptions:\n"
+ + json.dumps(["Issue 1 description", "Issue 2 description"], indent=4)
+ in first_prompt
+ )
+ assert (
+ "Feedback:\nPlease fix the formatting\n---\nlatest feedback:\nAdd docstrings"
+ in first_prompt
+ )
+ assert (
+ "Files locations:\n"
+ + json.dumps(["/src/file1.py", "/src/file2.py"], indent=4)
+ in first_prompt
+ )
+ assert "Last message from AI agent:\n" + history[0].content in first_prompt
+
+ # Check second call
+ second_call = mock_completion.call_args_list[1]
+ second_prompt = second_call[1]["messages"][0]["content"]
+ assert (
+ "Issue descriptions:\n"
+ + json.dumps(["Issue 1 description", "Issue 2 description"], indent=4)
+ in second_prompt
+ )
+ assert (
+ "Feedback:\nAdd more tests\n---\nlatest feedback:\nAdd test cases"
+ in second_prompt
+ )
+ assert (
+ "Files locations:\n" + json.dumps(["/tests/test_file.py"], indent=4)
+ in second_prompt
+ )
+ assert "Last message from AI agent:\n" + history[0].content in second_prompt
+
+
+def test_guess_success_thread_comments_litellm_call():
+ """Test that the litellm.completion() call for thread comments contains the expected content."""
+ # Create a PR handler instance
+ handler = PRHandler("test-owner", "test-repo", "test-token")
+
+ # Create a mock issue with thread comments
+ issue = GithubIssue(
+ owner="test-owner",
+ repo="test-repo",
+ number=1,
+ title="Test PR",
+ body="Test Body",
+ thread_comments=[
+ "Please improve error handling",
+ "Add input validation",
+ "latest feedback:\nHandle edge cases",
+ ],
+ closing_issues=["Issue 1 description", "Issue 2 description"],
+ review_comments=None,
+ thread_ids=None,
+ head_branch="test-branch",
+ )
+
+ # Create mock history with a detailed response
+ history = [
+ MessageAction(
+ content="""I have made the following changes:
+1. Added try/catch blocks for error handling
+2. Added input validation checks
+3. Added handling for edge cases"""
+ )
+ ]
+
+ # Create mock LLM config
+ llm_config = LLMConfig(model="test-model", api_key="test-key")
+
+ # Mock the LLM response
+ mock_response = MagicMock()
+ mock_response.choices = [
+ MagicMock(
+ message=MagicMock(
+ content="""--- success
+true
+
+--- explanation
+The changes successfully address the feedback."""
+ )
+ )
+ ]
+
+ # Test the guess_success method
+ with patch("litellm.completion") as mock_completion:
+ mock_completion.return_value = mock_response
+ success, success_list, explanation = handler.guess_success(
+ issue, history, llm_config
+ )
+
+ # Verify the litellm.completion() call
+ mock_completion.assert_called_once()
+ call_args = mock_completion.call_args
+ prompt = call_args[1]["messages"][0]["content"]
+
+ # Check prompt content
+ assert (
+ "Issue descriptions:\n"
+ + json.dumps(["Issue 1 description", "Issue 2 description"], indent=4)
+ in prompt
+ )
+ assert "PR Thread Comments:\n" + "\n---\n".join(issue.thread_comments) in prompt
+ assert "Last message from AI agent:\n" + history[0].content in prompt
+
+
+def test_check_feedback_with_llm():
+ """Test the _check_feedback_with_llm helper function."""
+ # Create a PR handler instance
+ handler = PRHandler("test-owner", "test-repo", "test-token")
+
+ # Create mock LLM config
+ llm_config = LLMConfig(model="test-model", api_key="test-key")
+
+ # Test cases for different LLM responses
+ test_cases = [
+ {
+ "response": "--- success\ntrue\n--- explanation\nChanges look good",
+ "expected": (True, "Changes look good"),
+ },
+ {
+ "response": "--- success\nfalse\n--- explanation\nNot all issues fixed",
+ "expected": (False, "Not all issues fixed"),
+ },
+ {
+ "response": "Invalid response format",
+ "expected": (
+ False,
+ "Failed to decode answer from LLM response: Invalid response format",
+ ),
+ },
+ {
+ "response": "--- success\ntrue\n--- explanation\nMultiline\nexplanation\nhere",
+ "expected": (True, "Multiline\nexplanation\nhere"),
+ },
+ ]
+
+ for case in test_cases:
+ # Mock the LLM response
+ mock_response = MagicMock()
+ mock_response.choices = [MagicMock(message=MagicMock(content=case["response"]))]
+
+ # Test the function
+ with patch("litellm.completion", return_value=mock_response):
+ success, explanation = handler._check_feedback_with_llm(
+ "test prompt", llm_config
+ )
+ assert (success, explanation) == case["expected"]
+
+
+def test_check_review_thread():
+ """Test the _check_review_thread helper function."""
+ # Create a PR handler instance
+ handler = PRHandler("test-owner", "test-repo", "test-token")
+
+ # Create test data
+ review_thread = ReviewThread(
+ comment="Please fix the formatting\n---\nlatest feedback:\nAdd docstrings",
+ files=["/src/file1.py", "/src/file2.py"],
+ )
+ issues_context = json.dumps(
+ ["Issue 1 description", "Issue 2 description"], indent=4
+ )
+ last_message = "I have fixed the formatting and added docstrings"
+ llm_config = LLMConfig(model="test-model", api_key="test-key")
+
+ # Mock the LLM response
+ mock_response = MagicMock()
+ mock_response.choices = [
+ MagicMock(
+ message=MagicMock(
+ content="""--- success
+true
+
+--- explanation
+Changes look good"""
+ )
+ )
+ ]
+
+ # Test the function
+ with patch("litellm.completion") as mock_completion:
+ mock_completion.return_value = mock_response
+ success, explanation = handler._check_review_thread(
+ review_thread, issues_context, last_message, llm_config
+ )
+
+ # Verify the litellm.completion() call
+ mock_completion.assert_called_once()
+ call_args = mock_completion.call_args
+ prompt = call_args[1]["messages"][0]["content"]
+
+ # Check prompt content
+ assert "Issue descriptions:\n" + issues_context in prompt
+ assert "Feedback:\n" + review_thread.comment in prompt
+ assert (
+ "Files locations:\n" + json.dumps(review_thread.files, indent=4) in prompt
+ )
+ assert "Last message from AI agent:\n" + last_message in prompt
+
+ # Check result
+ assert success is True
+ assert explanation == "Changes look good"
+
+
+def test_check_thread_comments():
+ """Test the _check_thread_comments helper function."""
+ # Create a PR handler instance
+ handler = PRHandler("test-owner", "test-repo", "test-token")
+
+ # Create test data
+ thread_comments = [
+ "Please improve error handling",
+ "Add input validation",
+ "latest feedback:\nHandle edge cases",
+ ]
+ issues_context = json.dumps(
+ ["Issue 1 description", "Issue 2 description"], indent=4
+ )
+ last_message = "I have added error handling and input validation"
+ llm_config = LLMConfig(model="test-model", api_key="test-key")
+
+ # Mock the LLM response
+ mock_response = MagicMock()
+ mock_response.choices = [
+ MagicMock(
+ message=MagicMock(
+ content="""--- success
+true
+
+--- explanation
+Changes look good"""
+ )
+ )
+ ]
+
+ # Test the function
+ with patch("litellm.completion") as mock_completion:
+ mock_completion.return_value = mock_response
+ success, explanation = handler._check_thread_comments(
+ thread_comments, issues_context, last_message, llm_config
+ )
+
+ # Verify the litellm.completion() call
+ mock_completion.assert_called_once()
+ call_args = mock_completion.call_args
+ prompt = call_args[1]["messages"][0]["content"]
+
+ # Check prompt content
+ assert "Issue descriptions:\n" + issues_context in prompt
+ assert "PR Thread Comments:\n" + "\n---\n".join(thread_comments) in prompt
+ assert "Last message from AI agent:\n" + last_message in prompt
+
+ # Check result
+ assert success is True
+ assert explanation == "Changes look good"
+
+
+def test_check_review_comments():
+ """Test the _check_review_comments helper function."""
+ # Create a PR handler instance
+ handler = PRHandler("test-owner", "test-repo", "test-token")
+
+ # Create test data
+ review_comments = [
+ "Please improve code readability",
+ "Add comments to complex functions",
+ "Follow PEP 8 style guide",
+ ]
+ issues_context = json.dumps(
+ ["Issue 1 description", "Issue 2 description"], indent=4
+ )
+ last_message = "I have improved code readability and added comments"
+ llm_config = LLMConfig(model="test-model", api_key="test-key")
+
+ # Mock the LLM response
+ mock_response = MagicMock()
+ mock_response.choices = [
+ MagicMock(
+ message=MagicMock(
+ content="""--- success
+true
+
+--- explanation
+Changes look good"""
+ )
+ )
+ ]
+
+ # Test the function
+ with patch("litellm.completion") as mock_completion:
+ mock_completion.return_value = mock_response
+ success, explanation = handler._check_review_comments(
+ review_comments, issues_context, last_message, llm_config
+ )
+
+ # Verify the litellm.completion() call
+ mock_completion.assert_called_once()
+ call_args = mock_completion.call_args
+ prompt = call_args[1]["messages"][0]["content"]
+
+ # Check prompt content
+ assert "Issue descriptions:\n" + issues_context in prompt
+ assert "PR Review Comments:\n" + "\n---\n".join(review_comments) in prompt
+ assert "Last message from AI agent:\n" + last_message in prompt
+
+ # Check result
+ assert success is True
+ assert explanation == "Changes look good"
+
+
+def test_guess_success_review_comments_litellm_call():
+ """Test that the litellm.completion() call for review comments contains the expected content."""
+ # Create a PR handler instance
+ handler = PRHandler("test-owner", "test-repo", "test-token")
+
+ # Create a mock issue with review comments
+ issue = GithubIssue(
+ owner="test-owner",
+ repo="test-repo",
+ number=1,
+ title="Test PR",
+ body="Test Body",
+ thread_comments=None,
+ closing_issues=["Issue 1 description", "Issue 2 description"],
+ review_comments=[
+ "Please improve code readability",
+ "Add comments to complex functions",
+ "Follow PEP 8 style guide",
+ ],
+ thread_ids=None,
+ head_branch="test-branch",
+ )
+
+ # Create mock history with a detailed response
+ history = [
+ MessageAction(
+ content="""I have made the following changes:
+1. Improved code readability by breaking down complex functions
+2. Added detailed comments to all complex functions
+3. Fixed code style to follow PEP 8"""
+ )
+ ]
+
+ # Create mock LLM config
+ llm_config = LLMConfig(model="test-model", api_key="test-key")
+
+ # Mock the LLM response
+ mock_response = MagicMock()
+ mock_response.choices = [
+ MagicMock(
+ message=MagicMock(
+ content="""--- success
+true
+
+--- explanation
+The changes successfully address the feedback."""
+ )
+ )
+ ]
+
+ # Test the guess_success method
+ with patch("litellm.completion") as mock_completion:
+ mock_completion.return_value = mock_response
+ success, success_list, explanation = handler.guess_success(
+ issue, history, llm_config
+ )
+
+ # Verify the litellm.completion() call
+ mock_completion.assert_called_once()
+ call_args = mock_completion.call_args
+ prompt = call_args[1]["messages"][0]["content"]
+
+ # Check prompt content
+ assert (
+ "Issue descriptions:\n"
+ + json.dumps(["Issue 1 description", "Issue 2 description"], indent=4)
+ in prompt
+ )
+ assert "PR Review Comments:\n" + "\n---\n".join(issue.review_comments) in prompt
+ assert "Last message from AI agent:\n" + history[0].content in prompt
diff --git a/tests/unit/resolver/test_pr_title_escaping.py b/tests/unit/resolver/test_pr_title_escaping.py
new file mode 100644
index 000000000000..03f2b7104807
--- /dev/null
+++ b/tests/unit/resolver/test_pr_title_escaping.py
@@ -0,0 +1,165 @@
+from openhands.resolver.github_issue import GithubIssue
+from openhands.resolver.send_pull_request import make_commit
+import os
+import tempfile
+import subprocess
+
+
+def test_commit_message_with_quotes():
+ # Create a temporary directory and initialize git repo
+ with tempfile.TemporaryDirectory() as temp_dir:
+ subprocess.run(["git", "init", temp_dir], check=True)
+
+ # Create a test file and add it to git
+ test_file = os.path.join(temp_dir, "test.txt")
+ with open(test_file, "w") as f:
+ f.write("test content")
+
+ subprocess.run(["git", "-C", temp_dir, "add", "test.txt"], check=True)
+
+ # Create a test issue with problematic title
+ issue = GithubIssue(
+ owner="test-owner",
+ repo="test-repo",
+ number=123,
+ title="Issue with 'quotes' and \"double quotes\" and