Skip to content

Commit

Permalink
Merge pull request #173 from vvincent1234/fix/adapt_latest_browser-use
Browse files Browse the repository at this point in the history
Fix/adapt latest browser use
  • Loading branch information
warmshao authored Jan 27, 2025
2 parents 284f0b7 + b9080c3 commit 566bca7
Show file tree
Hide file tree
Showing 14 changed files with 454 additions and 486 deletions.
7 changes: 2 additions & 5 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,3 @@
browser-use==0.1.19
langchain-google-genai==2.0.8
browser-use==0.1.29
pyperclip==1.9.0
gradio==5.9.1
langchain-ollama==0.2.2
langchain-openai==0.2.14
gradio==5.10.0
393 changes: 200 additions & 193 deletions src/agent/custom_agent.py

Large diffs are not rendered by default.

58 changes: 17 additions & 41 deletions src/agent/custom_massage_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
AIMessage,
BaseMessage,
HumanMessage,
ToolMessage
)
from langchain_openai import ChatOpenAI
from ..utils.llm import DeepSeekR1ChatOpenAI
Expand All @@ -31,69 +32,44 @@ def __init__(
action_descriptions: str,
system_prompt_class: Type[SystemPrompt],
max_input_tokens: int = 128000,
estimated_tokens_per_character: int = 3,
estimated_characters_per_token: int = 3,
image_tokens: int = 800,
include_attributes: list[str] = [],
max_error_length: int = 400,
max_actions_per_step: int = 10,
tool_call_in_content: bool = False,
use_function_calling: bool = True
message_context: Optional[str] = None,
use_deepseek_r1: bool = False
):
super().__init__(
llm=llm,
task=task,
action_descriptions=action_descriptions,
system_prompt_class=system_prompt_class,
max_input_tokens=max_input_tokens,
estimated_tokens_per_character=estimated_tokens_per_character,
estimated_characters_per_token=estimated_characters_per_token,
image_tokens=image_tokens,
include_attributes=include_attributes,
max_error_length=max_error_length,
max_actions_per_step=max_actions_per_step,
tool_call_in_content=tool_call_in_content,
message_context=message_context
)
self.use_function_calling = use_function_calling
self.tool_id = 1
self.use_deepseek_r1 = use_deepseek_r1
# Custom: Move Task info to state_message
self.history = MessageHistory()
self._add_message_with_tokens(self.system_prompt)

if self.use_function_calling:
tool_calls = [
{
'name': 'CustomAgentOutput',
'args': {
'current_state': {
'prev_action_evaluation': 'Unknown - No previous actions to evaluate.',
'important_contents': '',
'completed_contents': '',
'thought': 'Now Google is open. Need to type OpenAI to search.',
'summary': 'Type OpenAI to search.',
},
'action': [],
},
'id': '',
'type': 'tool_call',
}
]
if self.tool_call_in_content:
# openai throws error if tool_calls are not responded -> move to content
example_tool_call = AIMessage(
content=f'{tool_calls}',
tool_calls=[],
)
else:
example_tool_call = AIMessage(
content=f'',
tool_calls=tool_calls,
)

self._add_message_with_tokens(example_tool_call)
if self.message_context:
context_message = HumanMessage(content=self.message_context)
self._add_message_with_tokens(context_message)

def cut_messages(self):
"""Get current message list, potentially trimmed to max tokens"""
diff = self.history.total_tokens - self.max_input_tokens
while diff > 0 and len(self.history.messages) > 1:
self.history.remove_message(1) # alway remove the oldest one
min_message_len = 2 if self.message_context is not None else 1

while diff > 0 and len(self.history.messages) > min_message_len:
self.history.remove_message(min_message_len) # alway remove the oldest message
diff = self.history.total_tokens - self.max_input_tokens

def add_state_message(
Expand All @@ -119,10 +95,10 @@ def _count_text_tokens(self, text: str) -> int:
tokens = self.llm.get_num_tokens(text)
except Exception:
tokens = (
len(text) // self.ESTIMATED_TOKENS_PER_CHARACTER
len(text) // self.estimated_characters_per_token
) # Rough estimate if no tokenizer available
else:
tokens = (
len(text) // self.ESTIMATED_TOKENS_PER_CHARACTER
len(text) // self.estimated_characters_per_token
) # Rough estimate if no tokenizer available
return tokens
65 changes: 40 additions & 25 deletions src/agent/custom_prompts.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import pdb
from typing import List, Optional

from browser_use.agent.prompts import SystemPrompt
from browser_use.agent.prompts import SystemPrompt, AgentMessagePrompt
from browser_use.agent.views import ActionResult
from browser_use.browser.views import BrowserState
from langchain_core.messages import HumanMessage, SystemMessage
Expand All @@ -19,19 +19,14 @@ def important_rules(self) -> str:
{
"current_state": {
"prev_action_evaluation": "Success|Failed|Unknown - Analyze the current elements and the image to check if the previous goals/actions are successful like intended by the task. Ignore the action result. The website is the ground truth. Also mention if something unexpected happened like new suggestions in an input field. Shortly state why/why not. Note that the result you output must be consistent with the reasoning you output afterwards. If you consider it to be 'Failed,' you should reflect on this during your thought.",
"important_contents": "Output important contents closely related to user\'s instruction or task on the current page. If there is, please output the contents. If not, please output empty string ''.",
"important_contents": "Output important contents closely related to user\'s instruction on the current page. If there is, please output the contents. If not, please output empty string ''.",
"task_progress": "Task Progress is a general summary of the current contents that have been completed. Just summarize the contents that have been actually completed based on the content at current step and the history operations. Please list each completed item individually, such as: 1. Input username. 2. Input Password. 3. Click confirm button. Please return string type not a list.",
"future_plans": "Based on the user's request and the current state, outline the remaining steps needed to complete the task. This should be a concise list of actions yet to be performed, such as: 1. Select a date. 2. Choose a specific time slot. 3. Confirm booking. Please return string type not a list.",
"thought": "Think about the requirements that have been completed in previous operations and the requirements that need to be completed in the next one operation. If your output of prev_action_evaluation is 'Failed', please reflect and output your reflection here.",
"summary": "Please generate a brief natural language description for the operation in next actions based on your Thought."
},
"action": [
{
"action_name": {
// action-specific parameters
}
},
// ... more actions in sequence
* actions in sequences, please refer to **Common action sequences**. Each output action MUST be formated as: \{action_name\: action_params\}*
]
}
Expand All @@ -44,7 +39,6 @@ def important_rules(self) -> str:
{"click_element": {"index": 3}}
]
- Navigation and extraction: [
{"open_new_tab": {}},
{"go_to_url": {"url": "https://example.com"}},
{"extract_page_content": {}}
]
Expand Down Expand Up @@ -127,7 +121,7 @@ def get_system_message(self) -> SystemMessage:
AGENT_PROMPT = f"""You are a precise browser automation agent that interacts with websites through structured commands. Your role is to:
1. Analyze the provided webpage elements and structure
2. Plan a sequence of actions to accomplish the given task
3. Respond with valid JSON containing your action sequence and state assessment
3. Your final result MUST be a valid JSON as the **RESPONSE FORMAT** described, containing your action sequence and state assessment, No need extra content to expalin.
Current date and time: {time_str}
Expand All @@ -142,7 +136,7 @@ def get_system_message(self) -> SystemMessage:
return SystemMessage(content=AGENT_PROMPT)


class CustomAgentMessagePrompt:
class CustomAgentMessagePrompt(AgentMessagePrompt):
def __init__(
self,
state: BrowserState,
Expand All @@ -151,11 +145,12 @@ def __init__(
max_error_length: int = 400,
step_info: Optional[CustomAgentStepInfo] = None,
):
self.state = state
self.result = result
self.max_error_length = max_error_length
self.include_attributes = include_attributes
self.step_info = step_info
super(CustomAgentMessagePrompt, self).__init__(state=state,
result=result,
include_attributes=include_attributes,
max_error_length=max_error_length,
step_info=step_info
)

def get_user_message(self) -> HumanMessage:
if self.step_info:
Expand All @@ -164,8 +159,26 @@ def get_user_message(self) -> HumanMessage:
step_info_description = ''

elements_text = self.state.element_tree.clickable_elements_to_string(include_attributes=self.include_attributes)
if not elements_text:

has_content_above = (self.state.pixels_above or 0) > 0
has_content_below = (self.state.pixels_below or 0) > 0

if elements_text != '':
if has_content_above:
elements_text = (
f'... {self.state.pixels_above} pixels above - scroll or extract content to see more ...\n{elements_text}'
)
else:
elements_text = f'[Start of page]\n{elements_text}'
if has_content_below:
elements_text = (
f'{elements_text}\n... {self.state.pixels_below} pixels below - scroll or extract content to see more ...'
)
else:
elements_text = f'{elements_text}\n[End of page]'
else:
elements_text = 'empty page'

state_description = f"""
{step_info_description}
1. Task: {self.step_info.task}
Expand All @@ -181,15 +194,17 @@ def get_user_message(self) -> HumanMessage:
"""

if self.result:

for i, result in enumerate(self.result):
if result.extracted_content:
state_description += f"\nResult of action {i + 1}/{len(self.result)}: {result.extracted_content}"
if result.error:
# only use last 300 characters of error
error = result.error[-self.max_error_length:]
state_description += (
f"\nError of action {i + 1}/{len(self.result)}: ...{error}"
)
if result.include_in_memory:
if result.extracted_content:
state_description += f"\nResult of previous action {i + 1}/{len(self.result)}: {result.extracted_content}"
if result.error:
# only use last 300 characters of error
error = result.error[-self.max_error_length:]
state_description += (
f"\nError of previous action {i + 1}/{len(self.result)}: ...{error}"
)

if self.state.screenshot:
# Format message for vision model
Expand Down
2 changes: 1 addition & 1 deletion src/agent/custom_views.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def type_with_custom_actions(
) -> Type["CustomAgentOutput"]:
"""Extend actions with custom actions"""
return create_model(
"AgentOutput",
"CustomAgentOutput",
__base__=CustomAgentOutput,
action=(
list[custom_actions],
Expand Down
Loading

0 comments on commit 566bca7

Please sign in to comment.