Skip to content

Commit

Permalink
start adding lm_studio
Browse files Browse the repository at this point in the history
  • Loading branch information
yrobla committed Jan 20, 2025
1 parent 3deb074 commit 79aa4aa
Show file tree
Hide file tree
Showing 4 changed files with 25 additions and 4 deletions.
22 changes: 22 additions & 0 deletions src/codegate/pipeline/codegate_context_retriever/codegate.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,28 @@ def name(self) -> str:
Returns the name of this pipeline step.
"""
return "codegate-context-retriever"

@staticmethod
def process_user_message(user_message: str) -> str:
"""
Process a user message by extracting content inside <task> tags, if present,
and removing all <environment_details> tags along with their content.
Args:
user_message (str): The input user message.
Returns:
str: The processed user message.
"""
# Extract content inside <task> tags if present
task_match = re.search(r"<task>(.*?)</task>", user_message, re.DOTALL)
if task_match:
user_message = task_match.group(1).strip()

# Remove all content inside <environment_details> tags and the tags themselves
user_message = re.sub(r"<environment_details>.*?</environment_details>", "", user_message, flags=re.DOTALL).strip()

return user_message

def generate_context_str(self, objects: list[object], context: PipelineContext) -> str:
context_str = ""
Expand Down
1 change: 1 addition & 0 deletions src/codegate/providers/ollama/completion_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ async def ollama_stream_generator(
try:
async for chunk in stream:
try:
yield f"{chunk.model_dump_json()}\n\n"
# TODO We should wire in the client info so we can respond with
# the correct format and start to handle multiple clients
# in a more robust way.
Expand Down
3 changes: 0 additions & 3 deletions src/codegate/providers/ollama/provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,6 @@ async def create_completion(request: Request):

is_fim_request = self._is_fim_request(request, data)
try:
print("i create completion with data", data)
stream = await self.complete(data, api_key=None, is_fim_request=is_fim_request)
except httpx.ConnectError as e:
logger = structlog.get_logger("codegate")
Expand All @@ -103,6 +102,4 @@ async def create_completion(request: Request):
else:
# just continue raising the exception
raise e
print("result is")
print(self._completion_handler.create_response(stream))
return self._completion_handler.create_response(stream)
3 changes: 2 additions & 1 deletion src/codegate/providers/openai/provider.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import json

import httpx
import structlog
from fastapi import Header, HTTPException, Request

Expand Down Expand Up @@ -35,6 +36,7 @@ def _setup_routes(self):

@self.router.post(f"/{self.provider_route_name}/chat/completions")
@self.router.post(f"/{self.provider_route_name}/completions")
@self.router.post(f"/{self.provider_route_name}/v1/chat/completions")
async def create_completion(
request: Request,
authorization: str = Header(..., description="Bearer token"),
Expand All @@ -45,7 +47,6 @@ async def create_completion(
api_key = authorization.split(" ")[1]
body = await request.body()
data = json.loads(body)

is_fim_request = self._is_fim_request(request, data)
try:
stream = await self.complete(data, api_key, is_fim_request=is_fim_request)
Expand Down

0 comments on commit 79aa4aa

Please sign in to comment.