Skip to content

Commit

Permalink
Merge
Browse files Browse the repository at this point in the history
  • Loading branch information
SmartManoj committed Sep 18, 2024
1 parent fde9b58 commit 609b9f6
Show file tree
Hide file tree
Showing 5 changed files with 93 additions and 62 deletions.
53 changes: 42 additions & 11 deletions openhands/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,18 +113,23 @@ def __init__(
):
self.config.max_input_tokens = self.model_info['max_input_tokens']
else:
# Max input tokens for gpt3.5, so this is a safe fallback for any potentially viable model
# Safe fallback for any potentially viable model
self.config.max_input_tokens = 4096
if config.max_output_tokens is None:
if (
self.model_info is not None
and 'max_output_tokens' in self.model_info
and isinstance(self.model_info['max_output_tokens'], int)
):
self.config.max_output_tokens = self.model_info['max_output_tokens']
else:
# Max output tokens for gpt3.5, so this is a safe fallback for any potentially viable model
self.config.max_output_tokens = 1024

if self.config.max_output_tokens is None:
# Safe default for any potentially viable model
self.config.max_output_tokens = 4096
if self.model_info is not None:
# max_output_tokens has precedence over max_tokens, if either exists.
# litellm has models with both, one or none of these 2 parameters!
if 'max_output_tokens' in self.model_info and isinstance(
self.model_info['max_output_tokens'], int
):
self.config.max_output_tokens = self.model_info['max_output_tokens']
elif 'max_tokens' in self.model_info and isinstance(
self.model_info['max_tokens'], int
):
self.config.max_output_tokens = self.model_info['max_tokens']

if self.config.drop_params:
litellm.drop_params = self.config.drop_params
Expand All @@ -138,6 +143,30 @@ def __init__(
litellm.OllamaConfig.num_ctx = total
logger.info(f'Setting OllamaConfig.num_ctx to {total}')

# This only seems to work with Google as the provider, not with OpenRouter!
gemini_safety_settings = (
[
{
'category': 'HARM_CATEGORY_HARASSMENT',
'threshold': 'BLOCK_NONE',
},
{
'category': 'HARM_CATEGORY_HATE_SPEECH',
'threshold': 'BLOCK_NONE',
},
{
'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
'threshold': 'BLOCK_NONE',
},
{
'category': 'HARM_CATEGORY_DANGEROUS_CONTENT',
'threshold': 'BLOCK_NONE',
},
]
if self.config.model.lower().startswith('gemini')
else None
)

self.completion_unwrapped = partial(
litellm_completion,
model=self.config.model,
Expand All @@ -150,6 +179,7 @@ def __init__(
temperature=self.config.temperature,
top_p=self.config.top_p,
caching=self.config.enable_cache,
safety_settings=gemini_safety_settings,
)

if self.vision_is_active():
Expand Down Expand Up @@ -294,6 +324,7 @@ def wrapper(*args, **kwargs):
temperature=self.config.temperature,
top_p=self.config.top_p,
drop_params=True,
safety_settings=gemini_safety_settings,
)

@retry(
Expand Down
2 changes: 1 addition & 1 deletion openhands/runtime/client/runtime.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,11 +51,11 @@ def __init__(self, container: docker.models.containers.Container):

self.buffer: list[str] = []
self.lock = threading.Lock()
self._stop_event = threading.Event()
self.log_generator = container.logs(stream=True, follow=True)
self.log_stream_thread = threading.Thread(target=self.stream_logs)
self.log_stream_thread.daemon = True
self.log_stream_thread.start()
self._stop_event = threading.Event()

def append(self, log_line: str):
with self.lock:
Expand Down
90 changes: 45 additions & 45 deletions poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ uvicorn = "*"
types-toml = "*"
numpy = "*"
json-repair = "*"
browsergym = "0.6.0" # integrate browsergym as the browsing interface
browsergym = "0.6.3" # integrate browsergym as the browsing interface
html2text = "*"
e2b = "^0.17.1"
pexpect = "*"
Expand Down
8 changes: 4 additions & 4 deletions tests/unit/test_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,12 @@

@pytest.fixture
def default_config():
return LLMConfig(model='gpt-3.5-turbo', api_key='test_key')
return LLMConfig(model='gpt-4o', api_key='test_key')


def test_llm_init_with_default_config(default_config):
llm = LLM(default_config)
assert llm.config.model == 'gpt-3.5-turbo'
assert llm.config.model == 'gpt-4o'
assert llm.config.api_key == 'test_key'
assert isinstance(llm.metrics, Metrics)

Expand All @@ -35,7 +35,7 @@ def test_llm_init_without_model_info(mock_get_model_info, default_config):
mock_get_model_info.side_effect = Exception('Model info not available')
llm = LLM(default_config)
assert llm.config.max_input_tokens == 4096
assert llm.config.max_output_tokens == 1024
assert llm.config.max_output_tokens == 4096


def test_llm_init_with_custom_config():
Expand All @@ -57,7 +57,7 @@ def test_llm_init_with_custom_config():


def test_llm_init_with_metrics():
config = LLMConfig(model='gpt-3.5-turbo', api_key='test_key')
config = LLMConfig(model='gpt-4o', api_key='test_key')
metrics = Metrics()
llm = LLM(config, metrics=metrics)
assert llm.metrics is metrics
Expand Down

0 comments on commit 609b9f6

Please sign in to comment.