Skip to content

Commit

Permalink
fix ollama mode can not work (#154)
Browse files Browse the repository at this point in the history
  • Loading branch information
wawa0210 authored Jul 1, 2024
1 parent 21d95e0 commit c5fd04e
Show file tree
Hide file tree
Showing 2 changed files with 4 additions and 2 deletions.
4 changes: 3 additions & 1 deletion aios/llm_kernel/llm_classes/ollama_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,9 @@ def process(self,
response = ollama.chat(
model=self.model_name.split("/")[-1],
messages=messages,
num_predict=self.max_new_tokens
options= ollama.Options(
num_predict=self.max_new_tokens
)
)
agent_process.set_response(
Response(
Expand Down
2 changes: 1 addition & 1 deletion aios/llm_kernel/llms.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def __init__(self,
)
# For locally-deployed LLM
else:
if use_backend == "ollama" and llm_name.startswith("ollama"):
if use_backend == "ollama" or llm_name.startswith("ollama"):
self.model = OllamaLLM(
llm_name=llm_name,
max_gpu_memory=max_gpu_memory,
Expand Down

0 comments on commit c5fd04e

Please sign in to comment.