diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index 00cf16f06..297b91930 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -8,6 +8,7 @@ Thanks to these individuals for making reNgine awesome by fixing bugs, resolving * [Suprita-25](https://github.com/Suprita-25) * [TheBinitGhimire](https://github.com/TheBinitGhimire) * [Vinay Leo](https://github.com/vinaynm) +* [Erdem Ozgen](https://github.com/ErdemOzgen) *If you have created a Pull request, feel free to add your name here, because we know you are awesome and deserve thanks from the community!* diff --git a/Makefile b/Makefile index f1b8d1d1d..e37b2af73 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,7 @@ COMPOSE_PREFIX_CMD := COMPOSE_DOCKER_CLI_BUILD=1 COMPOSE_ALL_FILES := -f docker-compose.yml -SERVICES := db web proxy redis celery celery-beat +SERVICES := db web proxy redis celery celery-beat ollama # -------------------------- @@ -20,7 +20,7 @@ setup: ## Generate certificates. up: ## Build and start all services. ${COMPOSE_PREFIX_CMD} docker-compose ${COMPOSE_ALL_FILES} up -d --build ${SERVICES} - + docker exec -it ollama ollama run llama2 build: ## Build all services. ${COMPOSE_PREFIX_CMD} docker-compose ${COMPOSE_ALL_FILES} build ${SERVICES} diff --git a/docker-compose.dev.yml b/docker-compose.dev.yml index 4fbe8e539..496f210b9 100644 --- a/docker-compose.dev.yml +++ b/docker-compose.dev.yml @@ -115,6 +115,17 @@ services: - celery-beat networks: - rengine_network + ollama: + image: ollama/ollama + container_name: ollama + volumes: + - ollama_data:/root/.ollama + ports: + - "11434:11434" + networks: + - rengine_network + restart: always + command: ["ollama", "run", "llama2-uncensored"] networks: rengine_network: @@ -126,3 +137,4 @@ volumes: github_repos: wordlist: scan_results: + ollama_data: diff --git a/docker-compose.yml b/docker-compose.yml index 9bd3e018a..d575104ae 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -140,7 +140,15 @@ services: - scan_results:/usr/src/scan_results networks: - rengine_network - + ollama: + image: ollama/ollama + container_name: ollama + volumes: + - ollama_data:/root/.ollama + ports: + - "11434:11434" + networks: + - rengine_network networks: rengine_network: @@ -154,6 +162,7 @@ volumes: wordlist: scan_results: static_volume: + ollama_data: secrets: proxy.ca: diff --git a/web/celery-entrypoint.sh b/web/celery-entrypoint.sh index f1d49ff1d..775fd1869 100755 --- a/web/celery-entrypoint.sh +++ b/web/celery-entrypoint.sh @@ -159,6 +159,8 @@ exec "$@" # httpx seems to have issue, use alias instead!!! echo 'alias httpx="/go/bin/httpx"' >> ~/.bashrc +# for localgpt install langchain +python3 -m pip install langchain==0.0.343 # watchmedo auto-restart --recursive --pattern="*.py" --directory="/usr/src/app/reNgine/" -- celery -A reNgine.tasks worker --autoscale=10,0 -l INFO -Q scan_queue & echo "Starting Workers..." diff --git a/web/reNgine/gpt.py b/web/reNgine/gpt.py index 4bae2563e..2e6682ff1 100644 --- a/web/reNgine/gpt.py +++ b/web/reNgine/gpt.py @@ -2,13 +2,16 @@ import re from reNgine.common_func import get_open_ai_key, extract_between from reNgine.definitions import VULNERABILITY_DESCRIPTION_SYSTEM_MESSAGE, ATTACK_SUGGESTION_GPT_SYSTEM_PROMPT +from langchain.llms import Ollama class GPTVulnerabilityReportGenerator: def __init__(self): self.api_key = get_open_ai_key() self.model_name = 'gpt-3.5-turbo' - + if not self.api_key: + self.ollama = Ollama(base_url='http://ollama:11434', model="llama2-uncensored") + def get_vulnerability_description(self, description): """Generate Vulnerability Description using GPT. @@ -24,93 +27,91 @@ def get_vulnerability_description(self, description): } """ if not self.api_key: - return { - 'status': False, - 'error': 'No OpenAI keys provided.' - } - openai.api_key = self.api_key - try: - gpt_response = openai.ChatCompletion.create( - model=self.model_name, - messages=[ - {'role': 'system', 'content': VULNERABILITY_DESCRIPTION_SYSTEM_MESSAGE}, - {'role': 'user', 'content': description} - ] - ) + prompt = ATTACK_SUGGESTION_GPT_SYSTEM_PROMPT + "\nUser: " + input + response_content = self.ollama(prompt) + else: + openai.api_key = self.api_key + try: + gpt_response = openai.ChatCompletion.create( + model=self.model_name, + messages=[ + {'role': 'system', 'content': VULNERABILITY_DESCRIPTION_SYSTEM_MESSAGE}, + {'role': 'user', 'content': description} + ] + ) - response_content = gpt_response['choices'][0]['message']['content'] + response_content = gpt_response['choices'][0]['message']['content'] + except Exception as e: + return { + 'status': False, + 'error': str(e) + } + vuln_description_pattern = re.compile( + r"[Vv]ulnerability [Dd]escription:(.*?)(?:\n\n[Ii]mpact:|$)", + re.DOTALL + ) + impact_pattern = re.compile( + r"[Ii]mpact:(.*?)(?:\n\n[Rr]emediation:|$)", + re.DOTALL + ) + remediation_pattern = re.compile( + r"[Rr]emediation:(.*?)(?:\n\n[Rr]eferences:|$)", + re.DOTALL + ) - vuln_description_pattern = re.compile( - r"[Vv]ulnerability [Dd]escription:(.*?)(?:\n\n[Ii]mpact:|$)", - re.DOTALL - ) - impact_pattern = re.compile( - r"[Ii]mpact:(.*?)(?:\n\n[Rr]emediation:|$)", - re.DOTALL - ) - remediation_pattern = re.compile( - r"[Rr]emediation:(.*?)(?:\n\n[Rr]eferences:|$)", - re.DOTALL - ) + description_section = extract_between(response_content, vuln_description_pattern) + impact_section = extract_between(response_content, impact_pattern) + remediation_section = extract_between(response_content, remediation_pattern) + references_start_index = response_content.find("References:") + references_section = response_content[references_start_index + len("References:"):].strip() - description_section = extract_between(response_content, vuln_description_pattern) - impact_section = extract_between(response_content, impact_pattern) - remediation_section = extract_between(response_content, remediation_pattern) - references_start_index = response_content.find("References:") - references_section = response_content[references_start_index + len("References:"):].strip() - - url_pattern = re.compile(r'https://\S+') - urls = url_pattern.findall(references_section) - - return { - 'status': True, - 'description': description_section, - 'impact': impact_section, - 'remediation': remediation_section, - 'references': urls, - } - except Exception as e: - return { - 'status': False, - 'error': str(e) - } + url_pattern = re.compile(r'https://\S+') + urls = url_pattern.findall(references_section) + return { + 'status': True, + 'description': description_section, + 'impact': impact_section, + 'remediation': remediation_section, + 'references': urls, + } class GPTAttackSuggestionGenerator: def __init__(self): self.api_key = get_open_ai_key() self.model_name = 'gpt-3.5-turbo' + if not self.api_key: + self.ollama = Ollama(base_url='http://ollama:11434', model="llama2-uncensored") def get_attack_suggestion(self, input): ''' input (str): input for gpt ''' if not self.api_key: - return { - 'status': False, - 'error': 'No OpenAI keys provided.', - 'input': input - } - openai.api_key = self.api_key - print(input) - try: - gpt_response = openai.ChatCompletion.create( - model=self.model_name, - messages=[ - {'role': 'system', 'content': ATTACK_SUGGESTION_GPT_SYSTEM_PROMPT}, - {'role': 'user', 'content': input} - ] - ) - response_content = gpt_response['choices'][0]['message']['content'] - return { - 'status': True, - 'description': response_content, - 'input': input - } - except Exception as e: - return { - 'status': False, - 'error': str(e), - 'input': input - } + prompt = ATTACK_SUGGESTION_GPT_SYSTEM_PROMPT + "\nUser: " + input + response_content = self.ollama(prompt) + else: + openai.api_key = self.api_key + print(input) + try: + gpt_response = openai.ChatCompletion.create( + model=self.model_name, + messages=[ + {'role': 'system', 'content': ATTACK_SUGGESTION_GPT_SYSTEM_PROMPT}, + {'role': 'user', 'content': input} + ] + ) + response_content = gpt_response['choices'][0]['message']['content'] + except Exception as e: + return { + 'status': False, + 'error': str(e), + 'input': input + } + return { + 'status': True, + 'description': response_content, + 'input': input + } + \ No newline at end of file diff --git a/web/requirements.txt b/web/requirements.txt index 49966c967..1d5eb5faa 100644 --- a/web/requirements.txt +++ b/web/requirements.txt @@ -37,3 +37,4 @@ whatportis weasyprint==53.3 wafw00f==2.2.0 xmltodict==0.13.0 +langchain==0.0.343 \ No newline at end of file