Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Cline Support #463

Merged
merged 9 commits into from
Jan 23, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,16 @@ With Aider, you can choose from two leading AI model providers:
- 💻 Local LLMs with [Ollama](https://ollama.com/)
- 🧠 [OpenAI API](https://openai.com/api/)

- **[Cline](https://github.com/cline/cline)**

With Cline, you can choose between differnet leading AI model providers:

- 🤖 [Anthropic API](https://www.anthropic.com/api)
- 🧠 [OpenAI API](https://openai.com/api/)
- 💻 [LM Studio](https://lmstudio.ai/)
- 💻 Local LLMs with [Ollama](https://ollama.com/)


### Privacy first

Unlike E.T., your code never phones home! 🛸 CodeGate is designed with privacy
Expand Down
1 change: 1 addition & 0 deletions src/codegate/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
"anthropic": "https://api.anthropic.com/v1",
"vllm": "http://localhost:8000", # Base URL without /v1 path
"ollama": "http://localhost:11434", # Default Ollama server URL
"lm_studio": "http://localhost:1234",
}


Expand Down
12 changes: 10 additions & 2 deletions src/codegate/pipeline/cli/cli.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import re
import shlex

from litellm import ChatCompletionRequest
Expand Down Expand Up @@ -76,12 +77,19 @@ async def process(

if last_user_message is not None:
last_user_message_str, _ = last_user_message
splitted_message = last_user_message_str.lower().split(" ")
cleaned_message_str = re.sub(r"<.*?>", "", last_user_message_str).strip()
splitted_message = cleaned_message_str.lower().split(" ")
# We expect codegate as the first word in the message
if splitted_message[0] == "codegate":
context.shortcut_response = True
args = shlex.split(last_user_message_str)
args = shlex.split(cleaned_message_str)
cmd_out = await codegate_cli(args[1:])

if cleaned_message_str != last_user_message_str:
# it came from Cline, need to wrap into tags
cmd_out = (
f"<attempt_completion><result>{cmd_out}</result></attempt_completion>\n"
)
return PipelineResult(
response=PipelineResponse(
step_name=self.name,
Expand Down
29 changes: 24 additions & 5 deletions src/codegate/pipeline/codegate_context_retriever/codegate.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def generate_context_str(self, objects: list[object], context: PipelineContext)
matched_packages = []
for obj in objects:
# The object is already a dictionary with 'properties'
package_obj = obj["properties"]
package_obj = obj["properties"] # type: ignore
matched_packages.append(f"{package_obj['name']} ({package_obj['type']})")
# Add one alert for each package found
context.add_alert(
Expand Down Expand Up @@ -91,13 +91,16 @@ async def process(
) # type: ignore
logger.info(f"Found {len(bad_snippet_packages)} bad packages in code snippets.")

# Remove code snippets from the user messages and search for bad packages
# Remove code snippets and file listing from the user messages and search for bad packages
# in the rest of the user query/messsages
user_messages = re.sub(r"```.*?```", "", user_message, flags=re.DOTALL)
user_messages = re.sub(r"⋮...*?⋮...\n\n", "", user_messages, flags=re.DOTALL)
user_messages = re.sub(
r"<environment_details>.*?</environment_details>", "", user_messages, flags=re.DOTALL
)

# split messages into double newlines, to avoid passing so many content in the search
split_messages = user_messages.split("\n\n")
split_messages = re.split(r"</?task>|(\n\n)", user_messages)
collected_bad_packages = []
for item_message in split_messages:
# Vector search to find bad packages
Expand Down Expand Up @@ -126,10 +129,26 @@ async def process(
# Make a copy of the request
new_request = request.copy()

# Add the context to the last user message
# Format: "Context: {context_str} \n Query: {last user message content}"
message = new_request["messages"][last_user_idx]
context_msg = f'Context: {context_str} \n\n Query: {message["content"]}' # type: ignore
message_str = str(message["content"]) # type: ignore
# Add the context to the last user message
if message_str.strip().startswith("<task>"):
# formatting of cline
match = re.match(r"(<task>)(.*?)(</task>)(.*)", message_str, re.DOTALL)
if match:
task_start, task_content, task_end, rest_of_message = match.groups()

# Embed the context into the task block
updated_task_content = (
f"{task_start}Context: {context_str}\n"
+ f"Query: {task_content.strip()}</details>{task_end}"
)

# Combine the updated task block with the rest of the message
context_msg = updated_task_content + rest_of_message
else:
context_msg = f"Context: {context_str} \n\n Query: {message_str}" # type: ignore
message["content"] = context_msg

logger.debug("Final context message", context_message=context_msg)
Expand Down
3 changes: 2 additions & 1 deletion src/codegate/pipeline/extract_snippets/extract_snippets.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,8 @@ def extract_snippets(message: str) -> List[CodeSnippet]:

#  just correct the typescript exception
lang_map = {"typescript": "javascript"}
lang = lang_map.get(lang, lang)
if lang:
lang = lang_map.get(lang, lang)
snippets.append(CodeSnippet(filepath=filename, code=content, language=lang))

return snippets
Expand Down
26 changes: 20 additions & 6 deletions src/codegate/pipeline/secrets/secrets.py
Original file line number Diff line number Diff line change
Expand Up @@ -451,17 +451,31 @@ async def process_chunk(
):
return [chunk]

is_cline_client = any(
"Cline" in str(message.trigger_string or "")
for message in input_context.alerts_raised or []
)

# Check if this is the first chunk (delta role will be present, others will not)
if len(chunk.choices) > 0 and chunk.choices[0].delta.role:
redacted_count = input_context.metadata["redacted_secrets_count"]
secret_text = "secret" if redacted_count == 1 else "secrets"
# Create notification chunk
notification_chunk = self._create_chunk(
chunk,
f"\n🛡️ [CodeGate prevented {redacted_count} {secret_text}]"
f"(http://localhost:9090/?search=codegate-secrets) from being leaked "
f"by redacting them.\n\n",
)
if is_cline_client:
notification_chunk = self._create_chunk(
chunk,
f"<thinking>\n🛡️ [CodeGate prevented {redacted_count} {secret_text}]"
f"(http://localhost:9090/?search=codegate-secrets) from being leaked "
f"by redacting them.</thinking>\n\n",
)
notification_chunk.choices[0].delta.role = "assistant"
else:
notification_chunk = self._create_chunk(
chunk,
f"\n🛡️ [CodeGate prevented {redacted_count} {secret_text}]"
f"(http://localhost:9090/?search=codegate-secrets) from being leaked "
f"by redacting them.\n\n",
)

# Reset the counter
input_context.metadata["redacted_secrets_count"] = 0
Expand Down
14 changes: 10 additions & 4 deletions src/codegate/pipeline/secrets/signatures.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import re
from pathlib import Path
from threading import Lock
from typing import ClassVar, Dict, List, NamedTuple, Optional
from typing import ClassVar, Dict, List, NamedTuple, Optional, Union

import structlog
import yaml
Expand Down Expand Up @@ -215,16 +215,22 @@ def _load_signatures(cls) -> None:
raise

@classmethod
def find_in_string(cls, text: str) -> List[Match]:
"""Search for secrets in the provided string."""
def find_in_string(cls, text: Union[str, List[str]]) -> List[Match]:
"""Search for secrets in the provided string or list of strings."""
if not text:
return []

if not cls._yaml_path:
raise RuntimeError("SecretFinder not initialized.")

matches = []
lines = text.splitlines()

# Split text into lines for processing
try:
lines = text.splitlines()
except Exception as e:
logger.warning(f"Error splitting text into lines: {e}")
return []

for line_num, line in enumerate(lines, start=1):
for group in cls._signature_groups:
Expand Down
13 changes: 12 additions & 1 deletion src/codegate/pipeline/systemmsg.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ def get_existing_system_message(request: ChatCompletionRequest) -> Optional[dict
Returns:
The existing system message if found, otherwise None.
"""

for message in request.get("messages", []):
if message["role"] == "system":
return message
Expand Down Expand Up @@ -50,8 +51,18 @@ def add_or_update_system_message(
context.add_alert("add-system-message", trigger_string=json.dumps(system_message))
new_request["messages"].insert(0, system_message)
else:
# Handle both string and list content types (needed for Cline (sends list)
existing_content = request_system_message["content"]
new_content = system_message["content"]

# Convert list to string if necessary (needed for Cline (sends list)
if isinstance(existing_content, list):
existing_content = "\n".join(str(item) for item in existing_content)
if isinstance(new_content, list):
new_content = "\n".join(str(item) for item in new_content)

# Update existing system message
updated_content = request_system_message["content"] + "\n\n" + system_message["content"]
updated_content = existing_content + "\n\n" + new_content
context.add_alert("update-system-message", trigger_string=updated_content)
request_system_message["content"] = updated_content

Expand Down
1 change: 1 addition & 0 deletions src/codegate/providers/anthropic/completion_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ async def execute_completion(
api_key: Optional[str],
stream: bool = False,
is_fim_request: bool = False,
base_tool: Optional[str] = "",
) -> Union[ModelResponse, AsyncIterator[ModelResponse]]:
"""
Ensures the model name is prefixed with 'anthropic/' to explicitly route to Anthropic's API.
Expand Down
5 changes: 5 additions & 0 deletions src/codegate/providers/anthropic/provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,9 +32,14 @@ def _setup_routes(self):
Sets up the /messages route for the provider as expected by the Anthropic
API. Extracts the API key from the "x-api-key" header and passes it to the
completion handler.

There are two routes:
- /messages: This is the route that is used by the Anthropic API with Continue.dev
- /v1/messages: This is the route that is used by the Anthropic API with Cline
"""

@self.router.post(f"/{self.provider_route_name}/messages")
@self.router.post(f"/{self.provider_route_name}/v1/messages")
async def create_message(
request: Request,
x_api_key: str = Header(None),
Expand Down
24 changes: 18 additions & 6 deletions src/codegate/providers/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -220,20 +220,32 @@ async def complete(
data.get("base_url"),
is_fim_request,
)
if input_pipeline_result.response:
if input_pipeline_result.response and input_pipeline_result.context:
return await self._pipeline_response_formatter.handle_pipeline_response(
input_pipeline_result.response, streaming, context=input_pipeline_result.context
)

provider_request = self._input_normalizer.denormalize(input_pipeline_result.request)
if input_pipeline_result.request:
provider_request = self._input_normalizer.denormalize(input_pipeline_result.request)
if is_fim_request:
provider_request = self._fim_normalizer.denormalize(provider_request)
provider_request = self._fim_normalizer.denormalize(provider_request) # type: ignore

# Execute the completion and translate the response
# This gives us either a single response or a stream of responses
# based on the streaming flag
is_cline_client = any(
"Cline" in str(message.get("content", "")) for message in data.get("messages", [])
)
base_tool = ""
if is_cline_client:
base_tool = "cline"

model_response = await self._completion_handler.execute_completion(
provider_request, api_key=api_key, stream=streaming, is_fim_request=is_fim_request
provider_request,
api_key=api_key,
stream=streaming,
is_fim_request=is_fim_request,
base_tool=base_tool,
)
if not streaming:
normalized_response = self._output_normalizer.normalize(model_response)
Expand All @@ -242,9 +254,9 @@ async def complete(
return self._output_normalizer.denormalize(pipeline_output)

pipeline_output_stream = await self._run_output_stream_pipeline(
input_pipeline_result.context, model_response, is_fim_request=is_fim_request
input_pipeline_result.context, model_response, is_fim_request=is_fim_request # type: ignore
)
return self._cleanup_after_streaming(pipeline_output_stream, input_pipeline_result.context)
return self._cleanup_after_streaming(pipeline_output_stream, input_pipeline_result.context) # type: ignore

def get_routes(self) -> APIRouter:
return self.router
1 change: 1 addition & 0 deletions src/codegate/providers/completion/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ async def execute_completion(
api_key: Optional[str],
stream: bool = False, # TODO: remove this param?
is_fim_request: bool = False,
base_tool: Optional[str] = "",
) -> Union[ModelResponse, AsyncIterator[ModelResponse]]:
"""Execute the completion request"""
pass
Expand Down
1 change: 1 addition & 0 deletions src/codegate/providers/litellmshim/litellmshim.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ async def execute_completion(
api_key: Optional[str],
stream: bool = False,
is_fim_request: bool = False,
base_tool: Optional[str] = "",
) -> Union[ModelResponse, AsyncIterator[ModelResponse]]:
"""
Execute the completion request with LiteLLM's API
Expand Down
1 change: 1 addition & 0 deletions src/codegate/providers/llamacpp/completion_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ async def execute_completion(
api_key: Optional[str],
stream: bool = False,
is_fim_request: bool = False,
base_tool: Optional[str] = "",
) -> Union[ModelResponse, AsyncIterator[ModelResponse]]:
"""
Execute the completion request with inference engine API
Expand Down
Loading
Loading