diff --git a/frontend/package-lock.json b/frontend/package-lock.json index 1c2642e89479..de97ff5fa845 100644 --- a/frontend/package-lock.json +++ b/frontend/package-lock.json @@ -1,12 +1,12 @@ { "name": "openhands-frontend", - "version": "0.14.1", + "version": "0.14.2", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "openhands-frontend", - "version": "0.14.1", + "version": "0.14.2", "dependencies": { "@monaco-editor/react": "^4.6.0", "@nextui-org/react": "^2.4.8", diff --git a/frontend/package.json b/frontend/package.json index 480070de4525..b64d07daa114 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -1,6 +1,6 @@ { "name": "openhands-frontend", - "version": "0.14.1", + "version": "0.14.2", "private": true, "type": "module", "engines": { diff --git a/openhands/core/message.py b/openhands/core/message.py index a707ea3881ea..a5b67917eaee 100644 --- a/openhands/core/message.py +++ b/openhands/core/message.py @@ -56,6 +56,7 @@ class Message(BaseModel): cache_enabled: bool = False vision_enabled: bool = False # function calling + function_calling_enabled: bool = False # - tool calls (from LLM) tool_calls: list[ChatCompletionMessageToolCall] | None = None # - tool execution result (to LLM) @@ -72,22 +73,22 @@ def serialize_model(self) -> dict: # - into a single string: for providers that don't support list of content items (e.g. no vision, no tool calls) # - into a list of content items: the new APIs of providers with vision/prompt caching/tool calls # NOTE: remove this when litellm or providers support the new API - if ( - self.cache_enabled - or self.vision_enabled - or self.tool_call_id is not None - or self.tool_calls is not None - ): + if self.cache_enabled or self.vision_enabled or self.function_calling_enabled: return self._list_serializer() + # some providers, like HF and Groq/llama, don't support a list here, but a single string return self._string_serializer() - def _string_serializer(self): + def _string_serializer(self) -> dict: + # convert content to a single string content = '\n'.join( item.text for item in self.content if isinstance(item, TextContent) ) - return {'content': content, 'role': self.role} + message_dict: dict = {'content': content, 'role': self.role} + + # add tool call keys if we have a tool call or response + return self._add_tool_call_keys(message_dict) - def _list_serializer(self): + def _list_serializer(self) -> dict: content: list[dict] = [] role_tool_with_prompt_caching = False for item in self.content: @@ -102,24 +103,37 @@ def _list_serializer(self): elif isinstance(item, ImageContent) and self.vision_enabled: content.extend(d) - ret: dict = {'content': content, 'role': self.role} + message_dict: dict = {'content': content, 'role': self.role} + # pop content if it's empty if not content or ( len(content) == 1 and content[0]['type'] == 'text' and content[0]['text'] == '' ): - ret.pop('content') + message_dict.pop('content') if role_tool_with_prompt_caching: - ret['cache_control'] = {'type': 'ephemeral'} + message_dict['cache_control'] = {'type': 'ephemeral'} + + # add tool call keys if we have a tool call or response + return self._add_tool_call_keys(message_dict) + def _add_tool_call_keys(self, message_dict: dict) -> dict: + """Add tool call keys if we have a tool call or response. + + NOTE: this is necessary for both native and non-native tool calling""" + + # an assistant message calling a tool + if self.tool_calls is not None: + message_dict['tool_calls'] = self.tool_calls + + # an observation message with tool response if self.tool_call_id is not None: assert ( self.name is not None ), 'name is required when tool_call_id is not None' - ret['tool_call_id'] = self.tool_call_id - ret['name'] = self.name - if self.tool_calls: - ret['tool_calls'] = self.tool_calls - return ret + message_dict['tool_call_id'] = self.tool_call_id + message_dict['name'] = self.name + + return message_dict diff --git a/openhands/llm/fn_call_converter.py b/openhands/llm/fn_call_converter.py index 5ddbcb305064..b33c7b43503b 100644 --- a/openhands/llm/fn_call_converter.py +++ b/openhands/llm/fn_call_converter.py @@ -320,9 +320,8 @@ def convert_fncall_messages_to_non_fncall_messages( converted_messages = [] first_user_message_encountered = False for message in messages: - role, content = message['role'], message['content'] - if content is None: - content = '' + role = message['role'] + content = message.get('content', '') # 1. SYSTEM MESSAGES # append system prompt suffix to content @@ -339,6 +338,7 @@ def convert_fncall_messages_to_non_fncall_messages( f'Unexpected content type {type(content)}. Expected str or list. Content: {content}' ) converted_messages.append({'role': 'system', 'content': content}) + # 2. USER MESSAGES (no change) elif role == 'user': # Add in-context learning example for the first user message @@ -447,10 +447,12 @@ def convert_fncall_messages_to_non_fncall_messages( f'Unexpected content type {type(content)}. Expected str or list. Content: {content}' ) converted_messages.append({'role': 'assistant', 'content': content}) + # 4. TOOL MESSAGES (tool outputs) elif role == 'tool': - # Convert tool result as assistant message - prefix = f'EXECUTION RESULT of [{message["name"]}]:\n' + # Convert tool result as user message + tool_name = message.get('name', 'function') + prefix = f'EXECUTION RESULT of [{tool_name}]:\n' # and omit "tool_call_id" AND "name" if isinstance(content, str): content = prefix + content diff --git a/openhands/llm/llm.py b/openhands/llm/llm.py index 0f9f6376c79c..2191818f8216 100644 --- a/openhands/llm/llm.py +++ b/openhands/llm/llm.py @@ -122,6 +122,9 @@ def __init__( drop_params=self.config.drop_params, ) + with warnings.catch_warnings(): + warnings.simplefilter('ignore') + self.init_model_info() if self.vision_is_active(): logger.debug('LLM: model has vision enabled') if self.is_caching_prompt_active(): @@ -143,16 +146,6 @@ def __init__( drop_params=self.config.drop_params, ) - with warnings.catch_warnings(): - warnings.simplefilter('ignore') - self.init_model_info() - if self.vision_is_active(): - logger.debug('LLM: model has vision enabled') - if self.is_caching_prompt_active(): - logger.debug('LLM: caching prompt enabled') - if self.is_function_calling_active(): - logger.debug('LLM: model supports function calling') - self._completion_unwrapped = self._completion @self.retry_decorator( @@ -342,6 +335,13 @@ def init_model_info(self): pass logger.debug(f'Model info: {self.model_info}') + if self.config.model.startswith('huggingface'): + # HF doesn't support the OpenAI default value for top_p (1) + logger.debug( + f'Setting top_p to 0.9 for Hugging Face model: {self.config.model}' + ) + self.config.top_p = 0.9 if self.config.top_p == 1 else self.config.top_p + # Set the max tokens in an LM-specific way if not set if self.config.max_input_tokens is None: if ( @@ -566,6 +566,7 @@ def format_messages_for_llm(self, messages: Message | list[Message]) -> list[dic for message in messages: message.cache_enabled = self.is_caching_prompt_active() message.vision_enabled = self.vision_is_active() + message.function_calling_enabled = self.is_function_calling_active() # let pydantic handle the serialization return [message.model_dump() for message in messages] diff --git a/openhands/server/github.py b/openhands/server/github_utils.py similarity index 100% rename from openhands/server/github.py rename to openhands/server/github_utils.py diff --git a/openhands/server/listen.py b/openhands/server/listen.py index ed3124249f2a..06f0d6d00bf4 100644 --- a/openhands/server/listen.py +++ b/openhands/server/listen.py @@ -14,7 +14,7 @@ from openhands.core.schema.action import ActionType from openhands.security.options import SecurityAnalyzers from openhands.server.data_models.feedback import FeedbackDataModel, store_feedback -from openhands.server.github import ( +from openhands.server.github_utils import ( GITHUB_CLIENT_ID, GITHUB_CLIENT_SECRET, UserVerifier, diff --git a/pyproject.toml b/pyproject.toml index 5c990fd2a45f..038012583ec8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "openhands-ai" -version = "0.14.1" +version = "0.14.2" description = "OpenHands: Code Less, Make More" authors = ["OpenHands"] license = "MIT"