From 849c166377a71ffa7790f3a0d13e2bb8f6ec7c99 Mon Sep 17 00:00:00 2001 From: Pavel Nakonechnyi Date: Wed, 20 Jul 2022 13:50:41 +0200 Subject: [PATCH 01/42] indicate that DefectDojo is altered by Netcetera --- dojo/templatetags/display_tags.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dojo/templatetags/display_tags.py b/dojo/templatetags/display_tags.py index 0095428194e..f876d5992cf 100644 --- a/dojo/templatetags/display_tags.py +++ b/dojo/templatetags/display_tags.py @@ -127,7 +127,7 @@ def dojo_version(): version = __version__ if settings.FOOTER_VERSION: version = settings.FOOTER_VERSION - return "v. {}".format(version) + return "NCA build, v. {}".format(version) @register.simple_tag From ce0b895c1cca50dbe36f3075a3b4259cc7436197 Mon Sep 17 00:00:00 2001 From: Pavel Nakonechnyi Date: Mon, 29 Aug 2022 17:41:43 +0200 Subject: [PATCH 02/42] add LDAP support. LDAP is configured via environment variables, see DD_LDAP_* Original author: Lars Meijers --- Dockerfile.django-debian | 6 ++++ Dockerfile.nginx-debian | 2 ++ dojo/settings/settings.dist.py | 56 ++++++++++++++++++++++++++++++++++ requirements.txt | 2 ++ 4 files changed, 66 insertions(+) diff --git a/Dockerfile.django-debian b/Dockerfile.django-debian index f58f22b5be2..7bd48942ef4 100644 --- a/Dockerfile.django-debian +++ b/Dockerfile.django-debian @@ -23,6 +23,8 @@ RUN \ uuid-runtime \ # libcurl4-openssl-dev is required for installing pycurl python package libcurl4-openssl-dev \ + libldap2-dev \ + libsasl2-dev \ && \ apt-get clean && \ rm -rf /var/lib/apt/lists && \ @@ -64,6 +66,10 @@ RUN \ true COPY --from=build /tmp/wheels /tmp/wheels COPY requirements.txt ./ +# fixes inability to connect to LDAPS servers +RUN mkdir -p /etc/ldap && \ + echo "TLS_CACERT /etc/ssl/certs/ca-certificates.crt" >> /etc/ldap/ldap.conf + RUN export PYCURL_SSL_LIBRARY=openssl && \ pip3 install \ --no-cache-dir \ diff --git a/Dockerfile.nginx-debian b/Dockerfile.nginx-debian index 4ace9c1d5c8..70d514246ae 100644 --- a/Dockerfile.nginx-debian +++ b/Dockerfile.nginx-debian @@ -18,6 +18,8 @@ RUN \ libmariadb-dev-compat \ libpq-dev \ postgresql-client \ + libldap2-dev \ + libsasl2-dev \ xmlsec1 \ git \ uuid-runtime \ diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py index 640f59360bc..6d89de9782b 100644 --- a/dojo/settings/settings.dist.py +++ b/dojo/settings/settings.dist.py @@ -3,6 +3,8 @@ from datetime import timedelta from celery.schedules import crontab from dojo import __version__ +import ldap +from django_auth_ldap.config import LDAPSearch, PosixGroupType, ActiveDirectoryGroupType import environ from netaddr import IPNetwork, IPSet import json @@ -281,6 +283,23 @@ DD_ENABLE_AUDITLOG=(bool, True), # Specifies whether the "first seen" date of a given report should be used over the "last seen" date DD_USE_FIRST_SEEN=(bool, False), + # LDAP + DD_LDAP_SERVER_URI=(str, 'ldap://ldap.example.com'), + DD_LDAP_BIND_DN=(str, ''), + DD_LDAP_BIND_PASSWORD=(str, ''), + DD_LDAP_USER_SEARCH_BASE=(str, ''), + DD_LDAP_USER_SEARCH_FILTER=(str, '(uid=%(user)s)'), + DD_LDAP_USER_ATTR_USERNAME=(str, 'uid'), + DD_LDAP_USER_ATTR_FNAME=(str, 'cn'), + DD_LDAP_USER_ATTR_LNAME=(str, 'sn'), + DD_LDAP_USER_ATTR_MAIL=(str, 'mail'), + DD_LDAP_GROUP_SEARCH_BASE=(str, ''), + DD_LDAP_GROUP_SEARCH_FILTER=(str, '(objectClass=posixGroup)'), + DD_LDAP_GROUP_TYPE=(str, 'POSIX'), + DD_LDAP_GROUP_ATTR_NAME=(str, 'cn'), + DD_LDAP_GROUP_REQUIRE=(str, ''), + DD_LDAP_USER_FLAG_ACTIVE=(str, ''), + DD_LDAP_USER_FLAG_SUPERUSER=(str, ''), ) @@ -360,6 +379,37 @@ def generate_url(scheme, double_slashes, user, password, host, port, path, param TAG_PREFETCHING = env('DD_TAG_PREFETCHING') +# ------------------------------------------------------------------------------ +# LDAP +# ------------------------------------------------------------------------------ +AUTH_LDAP_SERVER_URI = env('DD_LDAP_SERVER_URI') +AUTH_LDAP_BIND_DN = env('DD_LDAP_BIND_DN') +AUTH_LDAP_BIND_PASSWORD = env('DD_LDAP_BIND_PASSWORD') +AUTH_LDAP_USER_SEARCH = LDAPSearch( + env('DD_LDAP_USER_SEARCH_BASE'), ldap.SCOPE_SUBTREE, env('DD_LDAP_USER_SEARCH_FILTER') +) + +AUTH_LDAP_USER_ATTR_MAP = { + "username": env('DD_LDAP_USER_ATTR_USERNAME'), + "first_name": env('DD_LDAP_USER_ATTR_FNAME'), + "last_name": env('DD_LDAP_USER_ATTR_LNAME'), + "email": env('DD_LDAP_USER_ATTR_MAIL'), +} + +AUTH_LDAP_GROUP_SEARCH = LDAPSearch( + env('DD_LDAP_GROUP_SEARCH_BASE'), ldap.SCOPE_SUBTREE, env('DD_LDAP_GROUP_SEARCH_FILTER') +) +AUTH_LDAP_GROUP_TYPE = PosixGroupType(name_attr=env('DD_LDAP_GROUP_ATTR_NAME')) if env('DD_LDAP_GROUP_TYPE') == 'POSIX' else ActiveDirectoryGroupType(name_attr=env('DD_LDAP_GROUP_ATTR_NAME')) + + +# Simple group restrictions +AUTH_LDAP_REQUIRE_GROUP = env('DD_LDAP_GROUP_REQUIRE') + +AUTH_LDAP_USER_FLAGS_BY_GROUP = { + "is_active": env('DD_LDAP_USER_FLAG_ACTIVE'), + "is_superuser": env('DD_LDAP_USER_FLAG_SUPERUSER'), +} + # ------------------------------------------------------------------------------ # DATABASE # ------------------------------------------------------------------------------ @@ -468,6 +518,7 @@ def generate_url(scheme, double_slashes, user, password, host, port, path, param # These are the individidual modules supported by social-auth AUTHENTICATION_BACKENDS = ( + 'django_auth_ldap.backend.LDAPBackend', 'social_core.backends.auth0.Auth0OAuth2', 'social_core.backends.google.GoogleOAuth2', 'dojo.okta.OktaOAuth2', @@ -1615,6 +1666,11 @@ def saml2_attrib_map_format(dict): 'level': '%s' % LOG_LEVEL, 'propagate': False, }, + 'django_auth_ldap': { + 'handlers': [r'%s' % LOGGING_HANDLER], + 'level': '%s' % LOG_LEVEL, + 'propagate': False, + }, } } diff --git a/requirements.txt b/requirements.txt index 6090cc59362..111376dc173 100644 --- a/requirements.txt +++ b/requirements.txt @@ -83,3 +83,5 @@ boto3==1.34.32 # Required for Celery Broker AWS (SQS) support netaddr==0.10.1 vulners==2.1.2 fontawesomefree==6.5.1 +python-ldap==3.4.3 +django-auth-ldap==4.5.0 From bdb62dfee0654283c372a1b62e8bf141d7224259 Mon Sep 17 00:00:00 2001 From: Pavel Nakonechnyi Date: Tue, 30 Aug 2022 10:38:22 +0200 Subject: [PATCH 03/42] docker: install globally certificates (*.crt) placed into 'docker/certs' directory --- Dockerfile.django-debian | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Dockerfile.django-debian b/Dockerfile.django-debian index 7bd48942ef4..faa81f3364b 100644 --- a/Dockerfile.django-debian +++ b/Dockerfile.django-debian @@ -92,6 +92,10 @@ COPY \ COPY wsgi.py manage.py docker/unit-tests.sh ./ COPY dojo/ ./dojo/ +# install custom CA certificates +COPY docker/certs/*.crt /usr/local/share/ca-certificates +RUN update-ca-certificates + # Add extra fixtures to docker image which are loaded by the initializer COPY docker/extra_fixtures/* /app/dojo/fixtures/ From e040a26164cbeff6f890d05d6177bb6ab3d7314a Mon Sep 17 00:00:00 2001 From: Pavel Nakonechnyi Date: Sun, 25 Jun 2023 15:52:49 +0200 Subject: [PATCH 04/42] tools: Tenable: set vuln_id_from_tool property of finding to Nessus plugin ID on import --- dojo/tools/tenable/csv_format.py | 2 ++ dojo/tools/tenable/xml_format.py | 6 ++++++ 2 files changed, 8 insertions(+) diff --git a/dojo/tools/tenable/csv_format.py b/dojo/tools/tenable/csv_format.py index a2e2b72d086..bab22eb1ae1 100644 --- a/dojo/tools/tenable/csv_format.py +++ b/dojo/tools/tenable/csv_format.py @@ -93,6 +93,7 @@ def get_findings(self, filename: str, test: Test): mitigation = str(row.get("Solution", "N/A")) impact = row.get("Description", "N/A") references = row.get("See Also", "N/A") + vuln_id_from_tool = row.get("Plugin", "N/A") # Determine if the current row has already been processed dupe_key = ( severity @@ -113,6 +114,7 @@ def get_findings(self, filename: str, test: Test): mitigation=mitigation, impact=impact, references=references, + vuln_id_from_tool=vuln_id_from_tool, ) # manage CVSS vector (only v3.x for now) diff --git a/dojo/tools/tenable/xml_format.py b/dojo/tools/tenable/xml_format.py index aa8b17c9b19..ecc051f9901 100644 --- a/dojo/tools/tenable/xml_format.py +++ b/dojo/tools/tenable/xml_format.py @@ -203,6 +203,11 @@ def get_findings(self, filename: str, test: Test) -> list: if cvssv3_score_element_text is not None: cvssv3_score = cvssv3_score_element_text + vuln_id_from_tool = None + vuln_id_from_tool_text = self.safely_get_element_text(item.find("pluginID")) + if vuln_id_from_tool_text is not None: + vuln_id_from_tool = vuln_id_from_tool_text + # Determine the current entry has already been parsed in # this report dupe_key = severity + title @@ -218,6 +223,7 @@ def get_findings(self, filename: str, test: Test) -> list: cwe=cwe, cvssv3=cvssv3, cvssv3_score=cvssv3_score, + vuln_id_from_tool=vuln_id_from_tool, ) find.unsaved_endpoints = [] find.unsaved_vulnerability_ids = [] From 81508b73a06e7135c6aa31d83adf8b6ae8155bae Mon Sep 17 00:00:00 2001 From: Pavel Nakonechnyi Date: Fri, 19 Aug 2022 14:25:43 +0200 Subject: [PATCH 05/42] tools: introduce NeuVector API client --- dojo/tool_config/factory.py | 2 + dojo/tools/neuvector_api/__init__.py | 0 dojo/tools/neuvector_api/api_client.py | 192 +++++++++++++++++++++++++ 3 files changed, 194 insertions(+) create mode 100644 dojo/tools/neuvector_api/__init__.py create mode 100644 dojo/tools/neuvector_api/api_client.py diff --git a/dojo/tool_config/factory.py b/dojo/tool_config/factory.py index 180164b00b6..b110781957d 100755 --- a/dojo/tool_config/factory.py +++ b/dojo/tool_config/factory.py @@ -4,6 +4,7 @@ from dojo.tools.api_edgescan.api_client import EdgescanAPI from dojo.tools.api_sonarqube.api_client import SonarQubeAPI from dojo.tools.api_vulners.api_client import VulnersAPI +from dojo.tools.neuvector_api.api_client import NeuVectorAPI SCAN_APIS = { @@ -13,6 +14,7 @@ 'Edgescan': EdgescanAPI, 'SonarQube': SonarQubeAPI, 'Vulners': VulnersAPI, + 'NeuVector': NeuVectorAPI, } diff --git a/dojo/tools/neuvector_api/__init__.py b/dojo/tools/neuvector_api/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dojo/tools/neuvector_api/api_client.py b/dojo/tools/neuvector_api/api_client.py new file mode 100644 index 00000000000..4a49f5bc13f --- /dev/null +++ b/dojo/tools/neuvector_api/api_client.py @@ -0,0 +1,192 @@ +import json +import requests + +from dojo.models import Tool_Configuration, Tool_Type + + +class NeuVectorAPI: + def __init__(self, tool_config=None): + tool_type, _ = Tool_Type.objects.get_or_create(name='NeuVector') + + if not tool_config: + try: + tool_config = Tool_Configuration.objects.get(tool_type=tool_type) + except Tool_Configuration.DoesNotExist: + raise Exception( + 'No NeuVector tool is configured. \n' + 'Create a new Tool at Settings -> Tool Configuration' + ) + except Tool_Configuration.MultipleObjectsReturned: + raise Exception( + 'More than one Tool Configuration for NeuVector exists. \n' + 'Please specify at Product configuration which one should be used.' + ) + + self.nv_api_url = tool_config.url + if tool_config.authentication_type == "Password": + self.nv_user = tool_config.username + self.nv_pass = tool_config.password + else: + raise Exception('NeuVector Authentication type {} not supported'.format(tool_config.authentication_type)) + + self.login() + + def login(self): + """ + Login on NeuVector and get token. + :return: + """ + url = '{}/v1/auth'.format(self.nv_api_url) + headers = { + 'User-Agent': 'DefectDojo', + 'Accept': 'application/json', + 'Content-Type': 'application/json' + } + params = { + "password": { + "username": self.nv_user, + "password": self.nv_pass + } + } + resp = requests.post(url, headers=headers, data=json.dumps(params)) + if resp.ok: + self.nv_token = resp.json().get('token').get('token') + else: + raise Exception("Unable to authenticate on NeuVector due to {} - {}".format( + resp.status_code, resp.content.decode("utf-8") + )) + + def logout(self): + """ + Logout from NeuVector. + :return: + """ + url = '{}/v1/auth'.format(self.nv_api_url) + headers = { + 'User-Agent': 'DefectDojo', + 'Accept': 'application/json', + 'Content-Type': 'application/json', + 'X-Auth-Token': self.nv_token + } + requests.delete(url, headers=headers) + + def get_all_vp(self): + """ + Returns all vulnerability profiles with 'default' name. + """ + url = '{}/v1/vulnerability/profile/default'.format(self.nv_api_url) + headers = { + 'User-Agent': 'DefectDojo', + 'Accept': 'application/json', + 'Content-Type': 'application/json', + 'X-Auth-Token': self.nv_token + } + resp = requests.get(url, headers=headers) + if resp.ok: + return resp.json().get('profile').get('entries') + else: + raise Exception("Unable to fetch vulnerability profiles due to {} - {}".format( + resp.status_code, resp.content.decode("utf-8") + )) + + def create_vulnerability_profile(self, vp_id, name, comment, namespaces=[], images=[]): + """ + Creates a vulnerability profile in 'default' name with the provided parameters. + """ + url = '{}/v1/vulnerability/profile/default/entry'.format(self.nv_api_url) + headers = { + 'User-Agent': 'DefectDojo', + 'Accept': 'application/json', + 'Content-Type': 'application/json', + 'X-Auth-Token': self.nv_token + } + params = { + "config": { + "id": vp_id, + "name": name, + "comment": comment, + "days": 0, + "domains": namespaces, + "images": images + } + } + resp = requests.post(url, headers=headers, data=json.dumps(params)) + if resp.ok: + return + else: + raise Exception("Unable to create a vulnerability profile due to {} - {}. data: {}".format( + resp.status_code, resp.content.decode("utf-8"), json.dumps(params) + )) + + def update_vulnerability_profile(self, vp_id, name, comment, namespaces=[], images=[]): + """ + Updates a vulnerability profile in 'default' name with the provided parameters. + """ + url = '{}/v1/vulnerability/profile/default'.format(self.nv_api_url) + headers = { + 'User-Agent': 'DefectDojo', + 'Accept': 'application/json', + 'Content-Type': 'application/json', + 'X-Auth-Token': self.nv_token + } + params = { + "config": { + "name": "default", + "entries": [ + { + "id": vp_id, + "name": name, + "comment": comment, + "days": 0, + "domains": namespaces, + "images": images + } + ] + } + } + resp = requests.post(url, headers=headers, data=json.dumps(params)) + if resp.ok: + return + else: + raise Exception("Unable to update a vulnerability profile due to {} - {}. data: {}".format( + resp.status_code, resp.content.decode("utf-8"), json.dumps(params) + )) + + def delete_vulnerability_profile(self, vp_id): + """ + Deletes vulnerability profile in 'default' name. + """ + url = '{}/v1/vulnerability/profile/default/entry/{}'.format(self.nv_api_url, vp_id) + headers = { + 'User-Agent': 'DefectDojo', + 'Accept': 'application/json', + 'Content-Type': 'application/json', + 'X-Auth-Token': self.nv_token + } + resp = requests.delete(url, headers=headers) + if resp.ok: + return + else: + raise Exception("Unable to delete vulnerability profile due to {} - {}".format( + resp.status_code, resp.content.decode("utf-8") + )) + + def test_connection(self): + """ + Returns number of namespaces or raises error. + """ + url = '{}/v1/domain'.format(self.nv_api_url) + headers = { + 'User-Agent': 'DefectDojo', + 'Accept': 'application/json', + 'Content-Type': 'application/json', + 'X-Auth-Token': self.nv_token + } + resp = requests.get(url, headers=headers) + if resp.ok: + num_namespaces = len(resp.json().get('domains')) + return f'You have access to {num_namespaces} namespaces' + else: + raise Exception("Unable to connect to NeuVector due to {} - {}, token: {}".format( + resp.status_code, resp.content.decode("utf-8"), self.nv_token + )) From 686f4400bfba161eb1e5128372963499e372454c Mon Sep 17 00:00:00 2001 From: Pavel Nakonechnyi Date: Fri, 19 Aug 2022 14:26:07 +0200 Subject: [PATCH 06/42] trigger NeuVector client API when risk acceptance changed --- dojo/risk_acceptance/helper.py | 15 +++ dojo/tools/neuvector_api/updater.py | 153 ++++++++++++++++++++++++++++ dojo/tools/tool_issue_updater.py | 38 +++++++ 3 files changed, 206 insertions(+) create mode 100644 dojo/tools/neuvector_api/updater.py diff --git a/dojo/risk_acceptance/helper.py b/dojo/risk_acceptance/helper.py index 8034ce713c4..ba3ee5fcc72 100644 --- a/dojo/risk_acceptance/helper.py +++ b/dojo/risk_acceptance/helper.py @@ -8,6 +8,7 @@ from django.urls import reverse from dojo.celery import app from dojo.models import System_Settings, Risk_Acceptance, Finding +from dojo.tools import tool_issue_updater import logging logger = logging.getLogger(__name__) @@ -31,6 +32,8 @@ def expire_now(risk_acceptance): finding.save(dedupe_option=False) reactivated_findings.append(finding) + # best effort external tool integration + tool_issue_updater.async_tool_ra_remove(finding) # findings remain in this risk acceptance for reporting / metrics purposes else: logger.debug('%i:%s already active, no changes made.', finding.id, finding) @@ -68,6 +71,8 @@ def reinstate(risk_acceptance, old_expiration_date): # Update any endpoint statuses on each of the findings update_endpoint_statuses(finding, True) finding.save(dedupe_option=False) + # best effort external tool integration + tool_issue_updater.async_tool_ra_update(finding) reinstated_findings.append(finding) else: logger.debug('%i:%s: already inactive, not making any changes', finding.id, finding) @@ -88,6 +93,8 @@ def delete(eng, risk_acceptance): # Update any endpoint statuses on each of the findings update_endpoint_statuses(finding, False) finding.save(dedupe_option=False) + # best effort external tool integration + tool_issue_updater.async_tool_ra_remove(finding) # best effort jira integration, no status changes post_jira_comments(risk_acceptance, findings, unaccepted_message_creator) @@ -113,6 +120,8 @@ def remove_finding_from_risk_acceptance(risk_acceptance, finding): finding.save(dedupe_option=False) # best effort jira integration, no status changes post_jira_comments(risk_acceptance, [finding], unaccepted_message_creator) + # best effort external tool integration + tool_issue_updater.async_tool_ra_remove(finding) def add_findings_to_risk_acceptance(risk_acceptance, findings): @@ -124,6 +133,8 @@ def add_findings_to_risk_acceptance(risk_acceptance, findings): # Update any endpoint statuses on each of the findings update_endpoint_statuses(finding, True) risk_acceptance.accepted_findings.add(finding) + # best effort external tool integration + tool_issue_updater.async_tool_ra_update(finding) risk_acceptance.save() # best effort jira integration, no status changes @@ -284,6 +295,8 @@ def simple_risk_accept(finding, perform_save=True): # post_jira_comment might reload from database so see unaccepted finding. but the comment # only contains some text so that's ok post_jira_comment(finding, accepted_message_creator) + # best effort external tool integration + tool_issue_updater.async_tool_ra_update(finding) def risk_unaccept(finding, perform_save=True): @@ -304,6 +317,8 @@ def risk_unaccept(finding, perform_save=True): # post_jira_comment might reload from database so see unaccepted finding. but the comment # only contains some text so that's ok post_jira_comment(finding, unaccepted_message_creator) + # best effort external tool integration + tool_issue_updater.async_tool_ra_remove(finding) def remove_from_any_risk_acceptance(finding): diff --git a/dojo/tools/neuvector_api/updater.py b/dojo/tools/neuvector_api/updater.py new file mode 100644 index 00000000000..b20a861b52d --- /dev/null +++ b/dojo/tools/neuvector_api/updater.py @@ -0,0 +1,153 @@ +import logging +import json + +from django.core.exceptions import ValidationError +from dojo.tools.neuvector_api.api_client import NeuVectorAPI +from dojo.tools.neuvector.parser import NEUVECTOR_IMAGE_SCAN_ENGAGEMENT_NAME +from dojo.tools.neuvector.parser import NEUVECTOR_CONTAINER_SCAN_ENGAGEMENT_NAME +from dojo.models import Risk_Acceptance + +logger = logging.getLogger(__name__) + + +def determine_namespaces(finding): + namespaces = [] + if finding.test.engagement.name != NEUVECTOR_CONTAINER_SCAN_ENGAGEMENT_NAME: + return namespaces + + test = finding.test.title + # test name example: "namespace_name / deployment_name" + t = test.split(" / ") + if len(t) == 2: + namespaces.append(t[0]) + else: + # do not append strings like 'unknown' as it creates incorrect + # filter + pass + return namespaces + +def determine_images(finding): + images = [] + if finding.test.engagement.name != NEUVECTOR_IMAGE_SCAN_ENGAGEMENT_NAME: + return images + + # image name can be extracted from notes only, a note example: + + # {"scan_timestamp":1661429956,"used_by":"workload_name","base_os":"alpine:3.16.2","workload_image":"image_path","labels":{... + notes = finding.test.notes.all() + # apparently, the latest note has index 0 + last_note = notes[0] + note_json = json.loads(last_note.entry) + if 'workload_image' in note_json: + # indeed, only one image. initially, it was assumed that there could be + # several images for a single finding/test + images.append(note_json['workload_image']) + + return images + +def find_free_id(client): + vps = client.get_all_vp() + if (not vps) or (len(vps) == 0): + return 1000 + # we assume that identifiers are sorted + vp_id = vps[-1]['id'] + vp_id += 1 + return vp_id + +def find_id(client, name, namespaces=[], images=[]): + ns_set_orig = set(namespaces) + im_set_orig = set(images) + vps = client.get_all_vp() + for vp in vps: + if vp['name'] == name: + if (ns_set_orig == set(vp['domains'])) and (im_set_orig == set(vp['images'])): + return vp['id'] + return -1 + +def produce_comment(finding): + comment = "risk accepted in DefectDojo" + + ra = finding.risk_acceptance + if ra.recommendation: + for i in Risk_Acceptance.TREATMENT_CHOICES: + if i[0] == ra.recommendation: + comment += "; recommendation: " + i[1] + if ra.recommendation_details: + comment += "; recommendation details: " + ra.recommendation_details + if ra.decision: + for i in Risk_Acceptance.TREATMENT_CHOICES: + if i[0] == ra.decision: + comment += "; decision: " + i[1] + if ra.decision_details: + comment += "; decision details: " + ra.decision_details + if ra.accepted_by: + comment += "; accepted by: " + ra.accepted_by + if ra.updated: + comment += "; updated on: " + ra.updated.strftime("%Y-%m-%d, %H:%M:%S") + if ra.owner: + comment += "; owner: " + ra.owner.get_full_name() + + return comment + + +class NeuVectorApiUpdater(): + @staticmethod + def prepare_client(): + return NeuVectorAPI() + + def delete_client(self, client): + client.logout() + + def update_risk_acceptance(self, finding): + client = self.prepare_client() + # finding title is "CVE-XXX: short description" + # we need only CVE part which is vulnerability['name'] in NV scan result + vp_name = finding.title.split(':')[0] + vp_namespaces = determine_namespaces(finding) + vp_images = determine_images(finding) + # find existing profile. we consider profiles as different even if they + # differ only in namespace/image filters + vp_id = find_id(client, name=vp_name, namespaces=vp_namespaces, images=vp_images) + + logger.debug('update_risk_acceptance %s, id: %d', vp_name, vp_id) + if finding.risk_accepted: + vp_comment = produce_comment(finding) + # if there is no existing profile, create one + if vp_id < 0: + logger.debug('creating a vulnerability profile') + vp_id = find_free_id(client) + client.create_vulnerability_profile(vp_id=vp_id, name=vp_name, comment=vp_comment, namespaces=vp_namespaces, images=vp_images) + else: + logger.debug('updating a vulnerability profile') + # update an existing profile + # in fact, only comment will be updated + client.update_vulnerability_profile(vp_id=vp_id, name=vp_name, comment=vp_comment, namespaces=vp_namespaces, images=vp_images) + + if not finding.risk_accepted: + logger.debug('risk not accepted') + # if profile does not exist, nothing to delete + if vp_id > 0: + logger.debug('deleting a vulnerability profile') + client.delete_vulnerability_profile(vp_id) + + self.delete_client(client) + + def check_remove_risk_acceptance(self, finding): + client = self.prepare_client() + # finding title is "CVE-XXX: short description" + # we need only CVE part which is vulnerability['name'] in NV scan result + vp_name = finding.title.split(':')[0] + vp_namespaces = determine_namespaces(finding) + vp_images = determine_images(finding) + # find existing profile. we consider profiles as different even if they + # differ only in namespace/image filters + vp_id = find_id(client, name=vp_name, namespaces=vp_namespaces, images=vp_images) + + logger.debug("check_remove_risk_acceptance {}, id: {}, namespaces: {}, images: {}".format(vp_name, vp_id, vp_namespaces, vp_images)) + + # if profile does not exist, nothing to delete + if vp_id > 0: + logger.debug('deleting a vulnerability profile') + client.delete_vulnerability_profile(vp_id) + + self.delete_client(client) diff --git a/dojo/tools/tool_issue_updater.py b/dojo/tools/tool_issue_updater.py index b9c4fe20b1a..52debef0bd7 100644 --- a/dojo/tools/tool_issue_updater.py +++ b/dojo/tools/tool_issue_updater.py @@ -3,6 +3,7 @@ from dojo.decorators import (dojo_async_task, dojo_model_from_id, dojo_model_to_id) from dojo.tools.api_sonarqube.parser import SCAN_SONARQUBE_API +from dojo.tools.neuvector.parser import NEUVECTOR_SCAN_NAME def async_tool_issue_update(finding, *args, **kwargs): @@ -38,3 +39,40 @@ def update_findings_from_source_issues(**kwargs): for finding in findings: SonarQubeApiUpdaterFromSource().update(finding) + + +def async_tool_ra_update(finding, *args, **kwargs): + if is_tool_ra_update_needed(finding): + tool_ra_update(finding) + + +def async_tool_ra_remove(finding, *args, **kwargs): + if is_tool_ra_update_needed(finding): + tool_ra_remove(finding) + + +def is_tool_ra_update_needed(finding, *args, **kwargs): + test_type = finding.test.test_type + return test_type.name == NEUVECTOR_SCAN_NAME + + +@dojo_model_to_id +@dojo_async_task +@app.task +@dojo_model_from_id +def tool_ra_update(finding, *args, **kwargs): + test_type = finding.test.test_type + if test_type.name == NEUVECTOR_SCAN_NAME: + from dojo.tools.neuvector_api.updater import NeuVectorApiUpdater + NeuVectorApiUpdater().update_risk_acceptance(finding) + + +@dojo_model_to_id +@dojo_async_task +@app.task +@dojo_model_from_id +def tool_ra_remove(finding, *args, **kwargs): + test_type = finding.test.test_type + if test_type.name == NEUVECTOR_SCAN_NAME: + from dojo.tools.neuvector_api.updater import NeuVectorApiUpdater + NeuVectorApiUpdater().check_remove_risk_acceptance(finding) From 1df1c97753bca960911b772f0ed6c8f2d70d9dd6 Mon Sep 17 00:00:00 2001 From: Pavel Nakonechnyi Date: Fri, 19 Aug 2022 14:20:48 +0200 Subject: [PATCH 07/42] introduce Tenable.SC API client tool --- dojo/tool_config/factory.py | 2 + dojo/tools/tenablesc_api/__init__.py | 0 dojo/tools/tenablesc_api/api_client.py | 134 +++++++++++++++++++++++++ requirements.txt | 1 + 4 files changed, 137 insertions(+) create mode 100644 dojo/tools/tenablesc_api/__init__.py create mode 100644 dojo/tools/tenablesc_api/api_client.py diff --git a/dojo/tool_config/factory.py b/dojo/tool_config/factory.py index b110781957d..b6f6399fa76 100755 --- a/dojo/tool_config/factory.py +++ b/dojo/tool_config/factory.py @@ -5,6 +5,7 @@ from dojo.tools.api_sonarqube.api_client import SonarQubeAPI from dojo.tools.api_vulners.api_client import VulnersAPI from dojo.tools.neuvector_api.api_client import NeuVectorAPI +from dojo.tools.tenablesc_api.api_client import TenableScAPI SCAN_APIS = { @@ -15,6 +16,7 @@ 'SonarQube': SonarQubeAPI, 'Vulners': VulnersAPI, 'NeuVector': NeuVectorAPI, + 'Tenable SC': TenableScAPI, } diff --git a/dojo/tools/tenablesc_api/__init__.py b/dojo/tools/tenablesc_api/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dojo/tools/tenablesc_api/api_client.py b/dojo/tools/tenablesc_api/api_client.py new file mode 100644 index 00000000000..083a0b704d5 --- /dev/null +++ b/dojo/tools/tenablesc_api/api_client.py @@ -0,0 +1,134 @@ +import json +import logging +import requests + +from dojo.models import Tool_Configuration, Tool_Type + +from tenable.sc import TenableSC + +logger = logging.getLogger(__name__) + + +class TenableScAPI: + def __init__(self, tool_config=None): + tool_type, _ = Tool_Type.objects.get_or_create(name='Tenable SC') + + if not tool_config: + try: + tool_config = Tool_Configuration.objects.get(tool_type=tool_type) + except Tool_Configuration.DoesNotExist: + raise Exception( + 'No Tenable SC tool is configured. \n' + 'Create a new Tool at Settings -> Tool Configuration' + ) + except Tool_Configuration.MultipleObjectsReturned: + raise Exception( + 'More than one Tool Configuration for Nessus exists. \n' + 'Please specify at Product configuration which one should be used.' + ) + + sc_url = tool_config.url + if tool_config.authentication_type == "Password": + sc_a_key = tool_config.username + sc_s_key = tool_config.password + else: + raise Exception('Tenable SC Authentication type {} not supported'.format(tool_config.authentication_type)) + + # due to invalid certificates configuration on sc.netcetera.com we have + # to explicitly do insecure connections + validate_cert = False + self.sc = TenableSC(url=sc_url, retries=1, + ssl_verify=validate_cert, + access_key=sc_a_key, secret_key=sc_s_key) + + try: + self.login() + except: + logger.exception('something bad happened during authentication on Tenable.SC') + raise + + def login(self): + """ + Login on Nessus and get token. + :return: + """ + try: + self.sc.login() + except: + logger.exception('something bad happened during authentication on Tenable.SC') + raise + + def logout(self): + """ + Logout from NeuVector. + :return: + """ + try: + self.sc.logout() + except: + logger.exception('something bad happened during authentication on Tenable.SC') + raise + + def get_all_ar(self): + """ + Returns all accepted risks from all repos. + """ + ar_list = None + try: + ar_list = self.sc.accept_risks.list() + except: + logger.exception('can not fetch accepted risks from Tenable.SC') + raise + return ar_list + + def create_ar_rule(self, plugin_id, repo_id, asset_list, comment): + """ + Creates an accept risk rule. In all repositories. + """ + try: + rule = None + if len(asset_list) == 0: + rule = self.sc.accept_risks.create(plugin_id, repos=[repo_id], comments=comment) + else: + ips = [] + for asset in asset_list: + ips.append(asset['ip']) + # we just take a port from the first asset, only one port is expected anyway + rule = self.sc.accept_risks.create(plugin_id, repos=[repo_id], ips=ips, port=asset_list[0]['port'], comments=comment) + + self.sc.accept_risks.apply(rule['id'], 0) + except: + logger.exception('can not create accept risk rule on Tenable.SC') + raise + + def update_ar_rule(self, rule_id, plugin_id, repo_id, asset_list, comment): + """ + Updates an accept risk rule. + """ + try: + self.sc.accept_risks.delete(rule_id) + except: + logger.exception('can not delete accept risk rule from Tenable.SC') + raise + + self.create_ar_rule(plugin_id, repo_id, asset_list, comment) + + def delete_ar_rule(self, rule_id): + """ + Deletes vulnerability profile in 'default' name. + """ + try: + self.sc.accept_risks.delete(rule_id) + except: + logger.exception('can not delete accept risk rule from Tenable.SC') + raise + + def test_connection(self): + """ + Returns current user details. + """ + try: + user = self.sc.current.user() + return f'User info: {user}' + except: + raise Exception("can't fetch current user details") diff --git a/requirements.txt b/requirements.txt index 111376dc173..f1925e23951 100644 --- a/requirements.txt +++ b/requirements.txt @@ -85,3 +85,4 @@ vulners==2.1.2 fontawesomefree==6.5.1 python-ldap==3.4.3 django-auth-ldap==4.5.0 +pyTenable>=1.4.13 From a63e9661437bce3ede1cf78c4f110e05fc23c086 Mon Sep 17 00:00:00 2001 From: Pavel Nakonechnyi Date: Fri, 19 Aug 2022 14:21:27 +0200 Subject: [PATCH 08/42] tools: update accept risk rules in Tenable.SC when the same happens in DD --- dojo/tools/tenablesc_api/updater.py | 177 ++++++++++++++++++++++++++++ dojo/tools/tool_issue_updater.py | 8 +- 2 files changed, 184 insertions(+), 1 deletion(-) create mode 100644 dojo/tools/tenablesc_api/updater.py diff --git a/dojo/tools/tenablesc_api/updater.py b/dojo/tools/tenablesc_api/updater.py new file mode 100644 index 00000000000..253f17b6b4b --- /dev/null +++ b/dojo/tools/tenablesc_api/updater.py @@ -0,0 +1,177 @@ +import logging +import json +import ipaddress + +from dojo.tools.tenablesc_api.api_client import TenableScAPI +from dojo.models import Risk_Acceptance + +logger = logging.getLogger(__name__) + + +def find_id(client, plugin_id, repo_id, asset_list): + ars = client.get_all_ar() + for ar in ars: + if ar['plugin']['id'] != plugin_id: + continue + # we only support assets defined explicitly by their addresses or rules + # that do not specify assets + if ar['hostType'] != 'ip' and ar['hostType'] != 'all': + continue + # if we know repository, compare it + if repo_id: + if ar['repository']['id'] != repo_id: + continue + # validate that the rule covers one of the expected asssets + if ar['hostType'] == 'ip': + # accepted risk may have IP addresses in the following format: + # 192.168.25.27-192.168.25.30,192.168.25.40 *sigh*, we have to + # parse it at first + ipn = [] + for entry in ar['hostValue'].split(','): + ip_range = entry.split('-', 1) + + if len(ip_range) == 2: + start_ip = ipaddress.ip_address(ip_range[0]) + end_ip = ipaddress.ip_address(ip_range[1]) + while start_ip <= end_ip: + ipn.append(start_ip) + start_ip += 1 + else: + ipn.append(ipaddress.ip_address(ip_range[0])) + + for asset in asset_list: + # we do not compare port as Tenable does not support port + # per-host: there is only one port for several IP addresses + if ipaddress.ip_address(asset['ip']) in ipn: + return ar['id'] + # special case of all assets + if len(asset_list) == 0 and ar['hostType'] == 'all': + return ar['id'] + return "" + +def get_repository_id(client, finding): + # find repository ID from notes + notes = finding.test.notes.all() + # apparently, the latest note has index 0 + last_note = notes[0] + # it has JSON structure in it, like that: + # {"scan_instance_id": "3228", "repository_id": "15", "orig_url": + # "https://tenable_host/#scan_results/view/3228"} + note_json = json.loads(last_note.entry) + if note_json and 'repository_id' in note_json: + return note_json['repository_id'] + return None + +def get_assets(client, finding): + assets = [] + + for ep in finding.endpoints.all(): + asset = { + 'ip': ep.host, + 'port': ep.port if ep.port != None else 0, + } + # if endpoint happend to be referenced by DNS, we can't do anything + # about that due to limitation of Tenable.SC: we need an IP address in + # order to reference it properly. on the other hand, DefectDojo does + # not have an additional field for IP address. + try: + ipaddress.ip_address(asset['ip']) + assets.append(asset) + except: + pass + + return assets + +def produce_comment(finding): + comment = "risk accepted in DefectDojo" + + ra = finding.risk_acceptance + if ra.recommendation: + for i in Risk_Acceptance.TREATMENT_CHOICES: + if i[0] == ra.recommendation: + comment += "; recommendation: " + i[1] + if ra.recommendation_details: + comment += "; recommendation details: " + ra.recommendation_details + if ra.decision: + for i in Risk_Acceptance.TREATMENT_CHOICES: + if i[0] == ra.decision: + comment += "; decision: " + i[1] + if ra.decision_details: + comment += "; decision details: " + ra.decision_details + if ra.accepted_by: + comment += "; accepted by: " + ra.accepted_by + if ra.updated: + comment += "; updated on: " + ra.updated.strftime("%Y-%m-%d, %H:%M:%S") + if ra.owner: + comment += "; owner: " + ra.owner.get_full_name() + + return comment + + +class TenableScApiUpdater(): + @staticmethod + def prepare_client(): + return TenableScAPI() + + def delete_client(self, client): + client.logout() + + def update_risk_acceptance(self, finding): + ar_plugin_id = finding.vuln_id_from_tool + if len(ar_plugin_id) == 0: + logger.info("finding does not have ID information") + return + + client = self.prepare_client() + + repo_id = get_repository_id(client, finding) + + asset_list = get_assets(client, finding) + + ar_id = find_id(client, ar_plugin_id, repo_id, asset_list) + + logger.debug("update_risk_acceptance. plugin id: {}, existing ar id: {}, repo id: {}, assets: {}.".format(ar_plugin_id, ar_id, repo_id, asset_list)) + + if finding.risk_accepted: + ar_comment = produce_comment(finding) + # if there is no existing rule, create one + if len(ar_id) == 0: + logger.debug('creating an accept risk rule') + client.create_ar_rule(ar_plugin_id, repo_id, asset_list, ar_comment) + else: + logger.debug('updating an accept risk rule') + # update an existing rule + # in fact, only comment will be updated + client.update_ar_rule(ar_id, ar_plugin_id, repo_id, asset_list, ar_comment) + + if not finding.risk_accepted: + logger.debug('risk not accepted') + # if profile does not exist, nothing to delete + if len(ar_id) > 0: + logger.debug('deleting an accept risk rule') + client.delete_ar_rule(ar_id) + + self.delete_client(client) + + def check_remove_risk_acceptance(self, finding): + ar_plugin_id = finding.vuln_id_from_tool + if len(ar_plugin_id) == 0: + logger.info("finding does not have ID information") + return + + client = self.prepare_client() + + repo_id = get_repository_id(client, finding) + + asset_list = get_assets(client, finding) + + ar_id = find_id(client, ar_plugin_id, repo_id, asset_list) + + logger.debug("check_remove_risk_acceptance. plugin id: {}, existing ar id: {}, repo id: {}, assets: {}.".format(ar_plugin_id, ar_id, repo_id, asset_list)) + + # if accept risk rule does not exist, nothing to delete + if len(ar_id) > 0: + logger.debug('deleting an accept risk rule') + client.delete_ar_rule(ar_id) + + self.delete_client(client) diff --git a/dojo/tools/tool_issue_updater.py b/dojo/tools/tool_issue_updater.py index 52debef0bd7..4424f906569 100644 --- a/dojo/tools/tool_issue_updater.py +++ b/dojo/tools/tool_issue_updater.py @@ -53,7 +53,7 @@ def async_tool_ra_remove(finding, *args, **kwargs): def is_tool_ra_update_needed(finding, *args, **kwargs): test_type = finding.test.test_type - return test_type.name == NEUVECTOR_SCAN_NAME + return test_type.name in (NEUVECTOR_SCAN_NAME, "Nessus Scan") @dojo_model_to_id @@ -65,6 +65,9 @@ def tool_ra_update(finding, *args, **kwargs): if test_type.name == NEUVECTOR_SCAN_NAME: from dojo.tools.neuvector_api.updater import NeuVectorApiUpdater NeuVectorApiUpdater().update_risk_acceptance(finding) + if test_type.name == "Nessus Scan": + from dojo.tools.tenablesc_api.updater import TenableScApiUpdater + TenableScApiUpdater().update_risk_acceptance(finding) @dojo_model_to_id @@ -76,3 +79,6 @@ def tool_ra_remove(finding, *args, **kwargs): if test_type.name == NEUVECTOR_SCAN_NAME: from dojo.tools.neuvector_api.updater import NeuVectorApiUpdater NeuVectorApiUpdater().check_remove_risk_acceptance(finding) + if test_type.name == "Nessus Scan": + from dojo.tools.tenablesc_api.updater import TenableScApiUpdater + TenableScApiUpdater().check_remove_risk_acceptance(finding) From e55364c4505aa97e1d152327d2127755192cf1fb Mon Sep 17 00:00:00 2001 From: Pavel Nakonechnyi Date: Mon, 5 Sep 2022 11:22:18 +0200 Subject: [PATCH 09/42] dojo: settings: add DD_DATABASE_OPTIONS env to set DB engine options Django database engine supports OPTIONS entry in DATABASES setting. This entry is the only way to configure specific non-standard database connection settings, for instance: mutual TLS authentication. This commit adds DD_DATABASE_OPTIONS environment variable support as dictionary, empty by default. --- dojo/settings/settings.dist.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py index 6d89de9782b..9061f62a417 100644 --- a/dojo/settings/settings.dist.py +++ b/dojo/settings/settings.dist.py @@ -90,6 +90,7 @@ DD_DATABASE_PASSWORD=(str, 'defectdojo'), DD_DATABASE_PORT=(int, 3306), DD_DATABASE_USER=(str, 'defectdojo'), + DD_DATABASE_OPTIONS=(dict, {}), DD_SECRET_KEY=(str, ''), DD_CREDENTIAL_AES_256_KEY=(str, '.'), DD_DATA_UPLOAD_MAX_MEMORY_SIZE=(int, 8388608), # Max post size set to 8mb @@ -431,6 +432,7 @@ def generate_url(scheme, double_slashes, user, password, host, port, path, param 'PASSWORD': env('DD_DATABASE_PASSWORD'), 'HOST': env('DD_DATABASE_HOST'), 'PORT': env('DD_DATABASE_PORT'), + 'OPTIONS': env('DD_DATABASE_OPTIONS'), } } From cb5deb8bd63203f0bcfe30ed0d91ad2393f0b551 Mon Sep 17 00:00:00 2001 From: Pavel Nakonechnyi Date: Wed, 7 Sep 2022 10:56:52 +0200 Subject: [PATCH 10/42] helm: configmap: define DD_DATABASE_OPTIONS from Postgres mTLS connection parameters --- helm/defectdojo/templates/configmap.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/helm/defectdojo/templates/configmap.yaml b/helm/defectdojo/templates/configmap.yaml index 811a9891535..dc67761e0b5 100644 --- a/helm/defectdojo/templates/configmap.yaml +++ b/helm/defectdojo/templates/configmap.yaml @@ -32,6 +32,12 @@ data: DD_DATABASE_PORT: '{{ if eq .Values.database "postgresql" }}{{ .Values.postgresql.primary.service.ports.postgresql }}{{ end }}{{ if eq .Values.database "postgresqlha" }}{{ .Values.postgresqlha.service.ports.postgresql }}{{ end }}{{ if eq .Values.database "mysql" }}{{ .Values.mysql.primary.service.ports.mysql }}{{ end }}' DD_DATABASE_USER: {{ if eq .Values.database "postgresql" }}{{ .Values.postgresql.auth.username }}{{ end }}{{ if eq .Values.database "postgresqlha" }}{{ .Values.postgresqlha.postgresql.username }}{{ end }}{{ if eq .Values.database "mysql" }}{{ .Values.mysql.auth.username }}{{ end }} DD_DATABASE_NAME: {{ if eq .Values.database "postgresql" }}{{ .Values.postgresql.auth.database }}{{ end }}{{ if eq .Values.database "postgresqlha" }}{{ .Values.postgresqlha.postgresql.database }}{{ end }}{{ if eq .Values.database "mysql" }}{{ .Values.mysql.auth.database }}{{ end }} + {{- if .Values.postgresql.tls.enabled }} + # /run/defectdojo is hardcoded here due to + # https://github.com/kubernetes/kubernetes/issues/57923 as we have to have + # writable directory in order to change permissions for clientKey file + DD_DATABASE_OPTIONS: 'sslmode=require,sslcert=/run/defectdojo/{{ .Values.postgresql.tls.clientCert }},sslkey=/run/defectdojo/{{ .Values.postgresql.tls.clientKey }},sslrootcert=/run/defectdojo/{{ .Values.postgresql.tls.caCert }}' + {{- end }} DD_INITIALIZE: '{{ .Values.initializer.run }}' DD_UWSGI_ENDPOINT: /run/defectdojo/uwsgi.sock DD_UWSGI_HOST: localhost From fc401b9a9559d11a50cab3297d957e1609693594 Mon Sep 17 00:00:00 2001 From: Pavel Nakonechnyi Date: Wed, 7 Sep 2022 10:59:36 +0200 Subject: [PATCH 11/42] helm: support mounting Postgres mTLS client certs (incluides fix client key permissions) Due to https://github.com/kubernetes/kubernetes/issues/57923 we have to copy the provided client secrets to a temporary writable location and adjust private key file permissions (to 0600). This has to be done for all Pods which may connect to the database. --- .../templates/celery-worker-deployment.yaml | 26 +++++++++++++++++- .../templates/django-deployment.yaml | 24 +++++++++++++++++ .../defectdojo/templates/initializer-job.yaml | 27 ++++++++++++++++++- 3 files changed, 75 insertions(+), 2 deletions(-) diff --git a/helm/defectdojo/templates/celery-worker-deployment.yaml b/helm/defectdojo/templates/celery-worker-deployment.yaml index b6ca15e687a..26d86d71607 100644 --- a/helm/defectdojo/templates/celery-worker-deployment.yaml +++ b/helm/defectdojo/templates/celery-worker-deployment.yaml @@ -50,6 +50,8 @@ spec: configMap: name: {{ .Values.django.uwsgi.certificates.configName }} {{- end }} + - name: run + emptyDir: {} {{- range .Values.celery.extraVolumes }} - name: userconfig-{{ .name }} {{ .type }}: @@ -62,6 +64,13 @@ spec: path: {{ .hostPath }} {{- end }} {{- end }} + {{- if .Values.postgresql.tls.enabled }} + - name: postgresql-tls-volume + secret: + secretName: {{ .Values.postgresql.tls.secretName }} + # we need it permissive to access as low-privileged user + defaultMode: 0644 + {{- end }} containers: {{- if .Values.cloudsql.enabled }} - name: cloudsql-proxy @@ -91,7 +100,15 @@ spec: securityContext: {{- toYaml .Values.securityContext.djangoSecurityContext | nindent 10 }} {{- end }} - command: ['/entrypoint-celery-worker.sh'] + command: + - /bin/sh + - -cx + - | + {{- if .Values.postgresql.tls.enabled }} + cp -a /run/defectdojo/{{ .Values.postgresql.tls.secretName }}/..data/* /run/defectdojo/ + chmod 600 /run/defectdojo/{{ .Values.postgresql.tls.clientKey }} + {{- end }} + /entrypoint-celery-worker.sh volumeMounts: {{- if .Values.django.uwsgi.certificates.enabled }} - name: cert-mount @@ -103,6 +120,13 @@ spec: mountPath: {{ .path }} subPath: {{ .subPath }} {{- end }} + {{- if .Values.postgresql.tls.enabled }} + - name: run + # here we store client TLS certificates + mountPath: /run/defectdojo + - name: postgresql-tls-volume + mountPath: /run/defectdojo/{{ .Values.postgresql.tls.secretName }} + {{- end }} envFrom: - configMapRef: name: {{ $fullName }} diff --git a/helm/defectdojo/templates/django-deployment.yaml b/helm/defectdojo/templates/django-deployment.yaml index 398aa767cb3..9eca74ddd9b 100644 --- a/helm/defectdojo/templates/django-deployment.yaml +++ b/helm/defectdojo/templates/django-deployment.yaml @@ -82,6 +82,15 @@ spec: emptyDir: {} {{- end }} {{- end }} + # we are mounting a secret which contains client TLS certificates to a + # temporary directory + {{- if .Values.postgresql.tls.enabled }} + - name: postgresql-tls-volume + secret: + secretName: {{ .Values.postgresql.tls.secretName }} + # we need it permissive to access as low-privileged user + defaultMode: 0644 + {{- end }} containers: {{- if .Values.cloudsql.enabled }} - name: cloudsql-proxy @@ -147,6 +156,10 @@ spec: - name: {{ .Values.django.mediaPersistentVolume.name }} mountPath: {{.Values.extraConfigs.DD_MEDIA_ROOT | default "/app/media" | quote }} {{- end }} + {{- if .Values.postgresql.tls.enabled }} + - name: postgresql-tls-volume + mountPath: /run/defectdojo/{{ .Values.postgresql.tls.secretName }} + {{- end }} ports: - name: http-uwsgi protocol: TCP @@ -207,6 +220,17 @@ spec: {{- if .Values.extraEnv }} {{- toYaml .Values.extraEnv | nindent 8 }} {{- end }} + {{- if .Values.postgresql.tls.enabled }} + lifecycle: + postStart: + exec: + command: + - /bin/sh + - -cx + - | + cp -a /run/defectdojo/{{ .Values.postgresql.tls.secretName }}/..data/* /run/defectdojo/ + chmod 600 /run/defectdojo/{{ .Values.postgresql.tls.clientKey }} + {{- end }} {{- if .Values.django.uwsgi.livenessProbe.enabled }} livenessProbe: httpGet: diff --git a/helm/defectdojo/templates/initializer-job.yaml b/helm/defectdojo/templates/initializer-job.yaml index e90015e8756..1ef8ccfc93f 100644 --- a/helm/defectdojo/templates/initializer-job.yaml +++ b/helm/defectdojo/templates/initializer-job.yaml @@ -48,6 +48,17 @@ spec: path: {{ .hostPath }} {{- end }} {{- end }} + # we are mounting a secret which contains client TLS certificates to a + # temporary directory + {{- if .Values.postgresql.tls.enabled }} + - name: run + emptyDir: {} + - name: postgresql-tls-volume + secret: + secretName: {{ .Values.postgresql.tls.secretName }} + # we need it permissive to access as low-privileged user + defaultMode: 0644 + {{- end }} containers: {{- if .Values.cloudsql.enabled }} - name: cloudsql-proxy @@ -84,8 +95,22 @@ spec: mountPath: {{ .path }} subPath: {{ .subPath }} {{- end }} + {{- if .Values.postgresql.tls.enabled }} + - name: run + # here we store client TLS certificates + mountPath: /run/defectdojo + - name: postgresql-tls-volume + mountPath: /run/defectdojo/{{ .Values.postgresql.tls.secretName }} + {{- end }} command: - - /entrypoint-initializer.sh + - /bin/sh + - -cx + - | + {{- if .Values.postgresql.tls.enabled }} + cp -a /run/defectdojo/{{ .Values.postgresql.tls.secretName }}/..data/* /run/defectdojo/ + chmod 600 /run/defectdojo/{{ .Values.postgresql.tls.clientKey }} + {{- end }} + /entrypoint-initializer.sh envFrom: - configMapRef: name: {{ $fullName }} From 3967e5dc43ec305c42fc910be9bd0ef82d7a2c12 Mon Sep 17 00:00:00 2001 From: Pavel Nakonechnyi Date: Wed, 7 Sep 2022 11:01:08 +0200 Subject: [PATCH 12/42] helm: values: add example for Postgres mTLS connection --- helm/defectdojo/values.yaml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/helm/defectdojo/values.yaml b/helm/defectdojo/values.yaml index c6eddcba413..c9b098b7706 100644 --- a/helm/defectdojo/values.yaml +++ b/helm/defectdojo/values.yaml @@ -407,6 +407,17 @@ postgresql: shmVolume: chmod: enabled: false + # mTLS support + tls: + # set to true to enable mutual TLS + enabled: false + # if enabled, define a secret containing certificates + secretName: pgsql-certs + # filenames has to correspond to entries in 'secretName' above + clientCert: client.crt + clientKey: client.key + caCert: ca.crt + # To use an external PostgreSQL instance, set enabled to false and uncomment # the line below: From 40f28415c2b4bd7a9738d885e8f9f84fc0118339 Mon Sep 17 00:00:00 2001 From: Pavel Nakonechnyi Date: Tue, 13 Sep 2022 15:17:45 +0200 Subject: [PATCH 13/42] dojo: settings: support configuring LDAP FIND_GROUP_PERMS and MIRROR_GROUPS via environment --- dojo/settings/settings.dist.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py index 9061f62a417..c382047b95f 100644 --- a/dojo/settings/settings.dist.py +++ b/dojo/settings/settings.dist.py @@ -301,6 +301,9 @@ DD_LDAP_GROUP_REQUIRE=(str, ''), DD_LDAP_USER_FLAG_ACTIVE=(str, ''), DD_LDAP_USER_FLAG_SUPERUSER=(str, ''), + DD_LDAP_FIND_GROUP_PERMS=(bool, False), + DD_LDAP_CACHE_TIMEOUT=(int, 3600), + DD_LDAP_MIRROR_GROUPS=(bool, False), ) @@ -411,6 +414,10 @@ def generate_url(scheme, double_slashes, user, password, host, port, path, param "is_superuser": env('DD_LDAP_USER_FLAG_SUPERUSER'), } +AUTH_LDAP_FIND_GROUP_PERMS = env('DD_LDAP_FIND_GROUP_PERMS') +AUTH_LDAP_CACHE_TIMEOUT = env('DD_LDAP_CACHE_TIMEOUT') +AUTH_LDAP_MIRROR_GROUPS = env('DD_LDAP_MIRROR_GROUPS') + # ------------------------------------------------------------------------------ # DATABASE # ------------------------------------------------------------------------------ From cec0d5030750d62b8f16338e22c7a366115d9fe2 Mon Sep 17 00:00:00 2001 From: Pavel Nakonechnyi Date: Tue, 13 Sep 2022 15:21:03 +0200 Subject: [PATCH 14/42] dojo: group: create/delete Dojo_Group when auth backend creates/deletes one When using non-DefectDojo native authentication backend, there could be a case when groups are created by this backend. For instance, when LDAP module is configured to mirror groups. This commit handles this case and creates/deletes Dojo_Group instance when "auth_group" is created/deleted. --- dojo/group/utils.py | 45 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 44 insertions(+), 1 deletion(-) diff --git a/dojo/group/utils.py b/dojo/group/utils.py index be7f5ea1d63..2ddf5e57a2d 100644 --- a/dojo/group/utils.py +++ b/dojo/group/utils.py @@ -27,11 +27,19 @@ def group_post_save_handler(sender, **kwargs): created = kwargs.pop('created') group = kwargs.pop('instance') if created: + # will also be called when "auth_group" is created by underlying + # authentication method (see the slot below). at this moment + # "auth_group" is not ready yet on database level, but is already set + # as a class member. + if group.auth_group is not None: + return + # Create authentication group auth_group = Group(name=get_auth_group_name(group)) auth_group.save() group.auth_group = auth_group group.save() + user = get_current_user() if user and not settings.AZUREAD_TENANT_OAUTH2_GET_GROUPS: # Add the current user as the owner of the group @@ -41,7 +49,28 @@ def group_post_save_handler(sender, **kwargs): member.role = Role.objects.get(is_owner=True) member.save() # Add user to authentication group as well - auth_group.user_set.add(user) + group.auth_group.user_set.add(user) + else: + # rename auth_group if necessary + if group.auth_group: + group.auth_group.name = group.name + group.auth_group.save() + + +# will be called when underlying authentication model creates a group or it is +# created manually in the slot above +@receiver(post_save, sender=Group) +def auth_group_post_save_handler(sender, **kwargs): + created = kwargs.pop('created') + group = kwargs.pop('instance') + if created: + # at this moment we may have the corresponding Dojo_Group instance if + # auth_group was created in the slot above. otherwize, there is no + # Dojo_Group yet and we need to create one. + try: + Dojo_Group.objects.get(name=group.name) + except: + Dojo_Group.objects.create(name=group.name, auth_group=group) @receiver(post_delete, sender=Dojo_Group) @@ -52,6 +81,20 @@ def group_post_delete_handler(sender, **kwargs): group.auth_group.delete() +@receiver(post_delete, sender=Group) +def auth_group_post_delete_handler(sender, **kwargs): + group = kwargs.pop('instance') + # DefectDojo group doesn't get deleted automatically when underlying group + # is deleted. we need to avoid post_delete recursion. + try: + dgroup = Dojo_Group.objects.get(name=group.name) + dgroup.auth_group = None + dgroup.save() + dgroup.delete() + except: + pass + + @receiver(post_save, sender=Dojo_Group_Member) def group_member_post_save_handler(sender, **kwargs): created = kwargs.pop('created') From 5feed0cd3947b9609ba41f0f44d70876c1b4f503 Mon Sep 17 00:00:00 2001 From: Pavel Nakonechnyi Date: Tue, 13 Sep 2022 15:29:38 +0200 Subject: [PATCH 15/42] dojo: utils: update Dojo_Group_Member when auth backend updates one When using non-DefectDojo native authentication backend, there could be a case when group membership is updated by this backend. For instance, when LDAP module is configured to mirror groups it will automatically add users to groups. This commit handles this case and update Dojo native group membership according to internal groups configured by auth backend. --- dojo/utils.py | 59 ++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 58 insertions(+), 1 deletion(-) diff --git a/dojo/utils.py b/dojo/utils.py index fe17b240d22..1bbff0a299a 100644 --- a/dojo/utils.py +++ b/dojo/utils.py @@ -28,7 +28,7 @@ from dojo.github import add_external_issue_github, update_external_issue_github, close_external_issue_github, reopen_external_issue_github from dojo.models import Finding, Engagement, Finding_Group, Finding_Template, Product, \ Test, User, Dojo_User, System_Settings, Notifications, Endpoint, Benchmark_Type, \ - Language_Type, Languages, Dojo_Group_Member, NOTIFICATION_CHOICES + Language_Type, Languages, Dojo_Group_Member, Dojo_Group, NOTIFICATION_CHOICES from asteval import Interpreter from dojo.notifications.helper import create_notification import logging @@ -1775,6 +1775,63 @@ def user_post_save(sender, instance, created, **kwargs): instance.save() +# called when underlying authentication backend alters a User, we handle only +# the case when user is NOT created. the purpose of this slot is to update Dojo +# native user group membership from membership managed by underlying +# authentication backend. +@receiver(post_save, sender=User) +def user_post_save_auth_backend(sender, instance, created, **kwargs): + if not created: + # do not do anything if the user does not belong to any groups + backend_groups = instance.groups.all() + if backend_groups.count() == 0: + return + # refreshing user's group membership: keeping only those Dojo groups + # which correspond to internal groups. as Dojo_Group and Group are + # different classes we can't easily compare lists. thus, the code below + # is quite ugly. + dojo_user = Dojo_User.objects.get(username=instance.username) + existing_membership = Dojo_Group_Member.objects.filter(user=dojo_user) + for em in existing_membership: + delete_membership = True + for bg in backend_groups: + if em.group.name == bg.name: + delete_membership = False + break + if delete_membership: + em.delete() + + updated_membership = Dojo_Group_Member.objects.filter(user=dojo_user) + + # need this to set default role + system_settings = System_Settings.objects.get() + for bg in backend_groups: + add_membership = True + for um in updated_membership: + if um.group.name == bg.name: + add_membership = False + break + + if not add_membership: + continue + + # if Dojo group that corresponds to backend authentication group + # does not exist yet, it is an issue, but we have to escape from it + # in a safe way, so we just do not add user to the group without + # throwing 500 error. + try: + dojo_group = Dojo_Group.objects.get(name=bg.name) + gm = None + if system_settings.default_group_role: + gm = Dojo_Group_Member(group=dojo_group, user=dojo_user, + role=system_settings.default_group_role) + else: + gm = Dojo_Group_Member(group=dojo_group, user=dojo_user) + gm.save() + except: + pass + + @receiver(post_save, sender=Engagement) def engagement_post_Save(sender, instance, created, **kwargs): if created: From 6e1e7def62368f004d7ac658152591f2c9c0762e Mon Sep 17 00:00:00 2001 From: Pavel Nakonechnyi Date: Thu, 13 Oct 2022 08:56:01 +0200 Subject: [PATCH 16/42] packaging: getting rid of mysql (mariadb) This commit removes mysql requirements from Python and Docker. It also changes the default database engine in settings to avoid failure during dummy startup. This commit does not have chances to be merged into upstream. --- Dockerfile.django-debian | 10 +++++----- Dockerfile.nginx-debian | 6 +++--- dojo/settings/settings.dist.py | 2 +- requirements.txt | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Dockerfile.django-debian b/Dockerfile.django-debian index faa81f3364b..9ce82bae6fb 100644 --- a/Dockerfile.django-debian +++ b/Dockerfile.django-debian @@ -14,9 +14,9 @@ RUN \ gcc \ build-essential \ dnsutils \ - default-mysql-client \ - libmariadb-dev-compat \ - libpq-dev \ + # default-mysql-client \ + # libmariadb-dev-compat \ + # libpq-dev \ postgresql-client \ xmlsec1 \ git \ @@ -50,8 +50,8 @@ RUN \ libjpeg62 \ libtiff5 \ dnsutils \ - default-mysql-client \ - libmariadb3 \ + # default-mysql-client \ + # libmariadb3 \ xmlsec1 \ git \ uuid-runtime \ diff --git a/Dockerfile.nginx-debian b/Dockerfile.nginx-debian index 70d514246ae..1d9c7676145 100644 --- a/Dockerfile.nginx-debian +++ b/Dockerfile.nginx-debian @@ -14,9 +14,9 @@ RUN \ gcc \ build-essential \ dnsutils \ - default-mysql-client \ - libmariadb-dev-compat \ - libpq-dev \ + # default-mysql-client \ + # libmariadb-dev-compat \ + # libpq-dev \ postgresql-client \ libldap2-dev \ libsasl2-dev \ diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py index c382047b95f..5abd792004d 100644 --- a/dojo/settings/settings.dist.py +++ b/dojo/settings/settings.dist.py @@ -82,7 +82,7 @@ # models should be passed to celery by ID, default is False (for now) DD_FORCE_LOWERCASE_TAGS=(bool, True), DD_MAX_TAG_LENGTH=(int, 25), - DD_DATABASE_ENGINE=(str, 'django.db.backends.mysql'), + DD_DATABASE_ENGINE=(str, 'django.db.backends.postgresql'), DD_DATABASE_HOST=(str, 'mysql'), DD_DATABASE_NAME=(str, 'defectdojo'), # default django database name for testing is test_ diff --git a/requirements.txt b/requirements.txt index f1925e23951..1a1acc87805 100644 --- a/requirements.txt +++ b/requirements.txt @@ -33,7 +33,7 @@ jira==3.6.0 PyGithub==1.58.2 lxml==5.1.0 Markdown==3.5.2 -mysqlclient==2.1.1 +# mysqlclient==2.1.1 openpyxl==3.1.2 xlrd==1.2.0 Pillow==10.2.0 # required by django-imagekit From 8e33a0bb25cf059fac81a6d84fcda1d61af8f4a8 Mon Sep 17 00:00:00 2001 From: Pavel Nakonechnyi Date: Thu, 13 Oct 2022 10:16:54 +0200 Subject: [PATCH 17/42] packaging: remove unnecessary packages (dnsutils, uuid-runtime) --- Dockerfile.django-debian | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/Dockerfile.django-debian b/Dockerfile.django-debian index 9ce82bae6fb..cc7d3aae0d3 100644 --- a/Dockerfile.django-debian +++ b/Dockerfile.django-debian @@ -49,13 +49,14 @@ RUN \ libopenjp2-7 \ libjpeg62 \ libtiff5 \ - dnsutils \ + # dnsutils \ # default-mysql-client \ # libmariadb3 \ xmlsec1 \ + # required by gitpython module git \ - uuid-runtime \ - libpq-dev \ + # uuid-runtime \ + # libpq-dev \ # only required for the dbshell (used by the initializer job) postgresql-client \ # libcurl4-openssl-dev is required for installing pycurl python package From 81a6af39a4e6ac72654667982179272d4cbf601a Mon Sep 17 00:00:00 2001 From: Pavel Nakonechnyi Date: Thu, 13 Oct 2022 10:18:11 +0200 Subject: [PATCH 18/42] packaging: add libssl-dev as dependency due to pycurl module --- Dockerfile.django-debian | 4 ++++ Dockerfile.nginx-debian | 2 ++ 2 files changed, 6 insertions(+) diff --git a/Dockerfile.django-debian b/Dockerfile.django-debian index cc7d3aae0d3..6c24d46a831 100644 --- a/Dockerfile.django-debian +++ b/Dockerfile.django-debian @@ -23,6 +23,8 @@ RUN \ uuid-runtime \ # libcurl4-openssl-dev is required for installing pycurl python package libcurl4-openssl-dev \ + # apparently, libssl-dev is also needed + libssl-dev \ libldap2-dev \ libsasl2-dev \ && \ @@ -61,6 +63,8 @@ RUN \ postgresql-client \ # libcurl4-openssl-dev is required for installing pycurl python package libcurl4-openssl-dev \ + # apparently, libssl-dev is also needed + libssl-dev \ && \ apt-get clean && \ rm -rf /var/lib/apt/lists && \ diff --git a/Dockerfile.nginx-debian b/Dockerfile.nginx-debian index 1d9c7676145..2085126b0c9 100644 --- a/Dockerfile.nginx-debian +++ b/Dockerfile.nginx-debian @@ -25,6 +25,8 @@ RUN \ uuid-runtime \ # libcurl4-openssl-dev is required for installing pycurl python package libcurl4-openssl-dev \ + # apparently, libssl-dev is also needed + libssl-dev \ && \ apt-get clean && \ rm -rf /var/lib/apt/lists && \ From f483b6d73fe9cc717efeb4bd301bf200e1eba6f0 Mon Sep 17 00:00:00 2001 From: Pavel Nakonechnyi Date: Thu, 13 Oct 2022 10:20:14 +0200 Subject: [PATCH 19/42] packaging: remove unnecessary python stuff, test scripts and tests themselves --- Dockerfile.django-debian | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/Dockerfile.django-debian b/Dockerfile.django-debian index 6c24d46a831..d7149a1accb 100644 --- a/Dockerfile.django-debian +++ b/Dockerfile.django-debian @@ -80,16 +80,18 @@ RUN export PYCURL_SSL_LIBRARY=openssl && \ --no-cache-dir \ --no-index \ --find-links=/tmp/wheels \ - -r ./requirements.txt + -r ./requirements.txt && \ + # remove tests installed by python modules + rm -rf /usr/local/lib/python*/site-packages/*/tests /usr/local/lib/python*/site-packages/slapdtest/certs COPY \ docker/entrypoint-celery-beat.sh \ docker/entrypoint-celery-worker.sh \ docker/entrypoint-initializer.sh \ docker/entrypoint-uwsgi.sh \ - docker/entrypoint-uwsgi-dev.sh \ - docker/entrypoint-unit-tests.sh \ - docker/entrypoint-unit-tests-devDocker.sh \ + # docker/entrypoint-uwsgi-dev.sh \ + # docker/entrypoint-unit-tests.sh \ + # docker/entrypoint-unit-tests-devDocker.sh \ docker/wait-for-it.sh \ docker/secret-file-loader.sh \ docker/certs/* \ @@ -104,7 +106,7 @@ RUN update-ca-certificates # Add extra fixtures to docker image which are loaded by the initializer COPY docker/extra_fixtures/* /app/dojo/fixtures/ -COPY tests/ ./tests/ +# COPY tests/ ./tests/ RUN \ # Remove placeholder copied from docker/certs rm -f /readme.txt && \ From 799e14baa88acbb4aad4c2ba59fe5404938650e9 Mon Sep 17 00:00:00 2001 From: Pavel Nakonechnyi Date: Thu, 13 Oct 2022 10:21:06 +0200 Subject: [PATCH 20/42] packaging: use only curl when fetching data --- Dockerfile.nginx-debian | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile.nginx-debian b/Dockerfile.nginx-debian index 2085126b0c9..a5a30bf9644 100644 --- a/Dockerfile.nginx-debian +++ b/Dockerfile.nginx-debian @@ -46,14 +46,14 @@ ENV \ node="nodejs" RUN \ apt-get -y update && \ - apt-get -y install --no-install-recommends apt-transport-https ca-certificates curl wget gnupg && \ + apt-get -y install --no-install-recommends apt-transport-https ca-certificates curl gnupg && \ curl -sSL https://deb.nodesource.com/gpgkey/nodesource.gpg.key | apt-key add --no-tty - && \ echo 'deb https://deb.nodesource.com/node_20.x bullseye main' > /etc/apt/sources.list.d/nodesource.list && \ echo 'deb-src https://deb.nodesource.com/node_20.x bullseye main' >> /etc/apt/sources.list.d/nodesource.list && \ apt-get update -y -o Dir::Etc::sourcelist="sources.list.d/nodesource.list" \ -o Dir::Etc::sourceparts="-" -o APT::Get::List-Cleanup="0" && \ curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - && \ - wget https://github.com/yarnpkg/yarn/releases/download/v1.22.10/yarn_1.22.10_all.deb && \ + curl -o yarn_1.22.10_all.deb -sSL https://github.com/yarnpkg/yarn/releases/download/v1.22.10/yarn_1.22.10_all.deb && \ dpkg -i yarn_1.22.10_all.deb && \ echo "$(yarn --version)" && \ apt-get -y install --no-install-recommends nodejs && \ From e374b7c9d828276a68ec3d05f1742e9cfb257a81 Mon Sep 17 00:00:00 2001 From: Pavel Nakonechnyi Date: Thu, 13 Oct 2022 10:21:37 +0200 Subject: [PATCH 21/42] packaging: perform yarn autoclean and remove huge bunch of unnecessary docs --- Dockerfile.nginx-debian | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/Dockerfile.nginx-debian b/Dockerfile.nginx-debian index a5a30bf9644..d350963960c 100644 --- a/Dockerfile.nginx-debian +++ b/Dockerfile.nginx-debian @@ -71,8 +71,13 @@ RUN pip3 install \ COPY components/ ./components/ RUN \ - cd components && \ - yarn + cd ./components && \ + yarn && \ + yarn autoclean --init && \ + yarn autoclean --force + +# removing unnecessary bootswatch docs +RUN rm -rf /app/static/bootswatch/docs/3 COPY manage.py ./ COPY dojo/ ./dojo/ From ece5ff93c47a566ab69365fcf847e0bf0fa46e02 Mon Sep 17 00:00:00 2001 From: Pavel Nakonechnyi Date: Thu, 13 Oct 2022 10:22:21 +0200 Subject: [PATCH 22/42] packaging: remove setuid/setgid permissions from binaries --- Dockerfile.django-debian | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Dockerfile.django-debian b/Dockerfile.django-debian index d7149a1accb..cf158eff0cb 100644 --- a/Dockerfile.django-debian +++ b/Dockerfile.django-debian @@ -132,7 +132,9 @@ RUN \ mkdir -p media/threat && chown -R ${uid} media && \ # To avoid warning: (staticfiles.W004) The directory '/app/components/node_modules' in the STATICFILES_DIRS setting does not exist. mkdir -p components/node_modules && \ - chown ${appuser} components/node_modules + chown ${appuser} components/node_modules && \ + # removing setuid bits + find / -xdev -perm /6000 -type f -exec chmod a-s {} \; || true USER ${uid} ENV \ # Only variables that are not defined in settings.dist.py From 391e62a17ffaf312ca85d67fec013f8782b5496c Mon Sep 17 00:00:00 2001 From: Pavel Nakonechnyi Date: Thu, 13 Oct 2022 10:23:06 +0200 Subject: [PATCH 23/42] packaging: upgrade system packages during image preparation --- Dockerfile.django-debian | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Dockerfile.django-debian b/Dockerfile.django-debian index cf158eff0cb..985686f7cc5 100644 --- a/Dockerfile.django-debian +++ b/Dockerfile.django-debian @@ -10,6 +10,7 @@ FROM base as build WORKDIR /app RUN \ apt-get -y update && \ + apt-get -y upgrade && \ apt-get -y install --no-install-recommends \ gcc \ build-essential \ @@ -44,6 +45,7 @@ ARG appuser=defectdojo ENV appuser ${appuser} RUN \ apt-get -y update && \ + apt-get -y upgrade && \ # ugly fix to install postgresql-client without errors mkdir -p /usr/share/man/man1 /usr/share/man/man7 && \ apt-get -y install --no-install-recommends \ From 1e9df2207e85d6852dfcfa74e832b6584467864c Mon Sep 17 00:00:00 2001 From: Pavel Nakonechnyi Date: Tue, 18 Oct 2022 17:12:46 +0200 Subject: [PATCH 24/42] helm: celery-beat: support mounting Postgres mTLS client certs and fixing client key permissions --- .../templates/celery-beat-deployment.yaml | 20 ++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/helm/defectdojo/templates/celery-beat-deployment.yaml b/helm/defectdojo/templates/celery-beat-deployment.yaml index 605e41b5b92..14509a05690 100644 --- a/helm/defectdojo/templates/celery-beat-deployment.yaml +++ b/helm/defectdojo/templates/celery-beat-deployment.yaml @@ -64,6 +64,13 @@ spec: path: {{ .hostPath }} {{- end }} {{- end }} + {{- if .Values.postgresql.tls.enabled }} + - name: postgresql-tls-volume + secret: + secretName: {{ .Values.postgresql.tls.secretName }} + # we need it permissive to access as low-privileged user + defaultMode: 0644 + {{- end }} containers: {{- if .Values.cloudsql.enabled }} - name: cloudsql-proxy @@ -87,7 +94,14 @@ spec: {{- end }} {{- end }} - command: - - /entrypoint-celery-beat.sh + - /bin/sh + - -cx + - | + {{- if .Values.postgresql.tls.enabled }} + cp -a /run/defectdojo/{{ .Values.postgresql.tls.secretName }}/..data/* /run/defectdojo/ + chmod 600 /run/defectdojo/{{ .Values.postgresql.tls.clientKey }} + {{- end }} + /entrypoint-celery-beat.sh name: celery image: "{{ template "celery.repository" . }}:{{ .Values.tag }}" imagePullPolicy: {{ .Values.imagePullPolicy }} @@ -108,6 +122,10 @@ spec: mountPath: {{ .path }} subPath: {{ .subPath }} {{- end }} + {{- if .Values.postgresql.tls.enabled }} + - name: postgresql-tls-volume + mountPath: /run/defectdojo/{{ .Values.postgresql.tls.secretName }} + {{- end }} envFrom: - configMapRef: name: {{ $fullName }} From 7131fa39f161dfbbf194707357cd929625ccb9ce Mon Sep 17 00:00:00 2001 From: Pavel Nakonechnyi Date: Mon, 14 Nov 2022 21:09:15 +0100 Subject: [PATCH 25/42] github: nca-build-docker-image: automate Docker image build and Helm chart packaging --- .github/workflows/nca-build-docker-image.yml | 229 +++++++++++++++++++ 1 file changed, 229 insertions(+) create mode 100644 .github/workflows/nca-build-docker-image.yml diff --git a/.github/workflows/nca-build-docker-image.yml b/.github/workflows/nca-build-docker-image.yml new file mode 100644 index 00000000000..f8c0a1ec3ac --- /dev/null +++ b/.github/workflows/nca-build-docker-image.yml @@ -0,0 +1,229 @@ +name: "NCA: build Docker images and package Helm chart" + +env: + DD_REGISTRY_INTERNAL: "docker-registry-internal.extranet.netcetera.biz" + DD_REGISTRY_INTERNAL_RELEASE: "docker-registry-internal-release.extranet.netcetera.biz" + DD_PATH: "/nca-459-7/defectdojo" + GIT_USERNAME: "DefectDojo NCA release bot" + GIT_EMAIL: "dojo-nca-release-bot@users.noreply.github.com" + HELM_CHARTS_BRANCH_PREFIX: "helm-charts-nca" + +on: + workflow_dispatch: + push: + branch: + # uncomment this (and update to the current major version) if you want an + # image built for each commit + #- tag-2.16.1-NCA + # nca/X naming is used in actions below, adjust accordingly if needed + - nca/feature/* + tag: + - nca/release/* + +# On each push to the aforementioned branches or tags with names matching the +# pattern above, the following happens: +# +# - Depending on Git reference name (branch or tag name) we determine a release +# for a would-be artifact. +# - nca/release/ +# - nca/feature/ +# - tag-VERSION-NCA --> VERSION-nca-GIT_SHA_FIRST_8_CHARS, i.e. 2.16.1-nca-abcdef +# +# - Note that when pushing to nca/feature branch or creating a nca/release tag, +# the artifacts will have the same name (will overwrite the previous +# versions). However, when pushing to tag-VERSION-NCA branch, each resulting +# artifact will be unique. +# +# - We also determine if it is a release build or just a test build. This +# affects on where the resulting image will be pushed to. +# +# - Docker images (nginx and django) are built. They are tagged with release +# determined above and pushed to repositories stated in environment variables +# above. The same image is pushed to both repositories. +# +# - Helm chart is packaged. +# - Helm package is pushed to the release named as 'helm-'. +# - The package is stored under a subdirectory named in the same way as Docker +# image (described above). +# - index.yaml file is pushed to a branch named 'helm-charts-nca-'. +# The branch is created from helm-charts upstream branch. Changes are pushed +# force flag. + + +jobs: + build_images: + name: build and push DefectDojo Docker images + runs-on: ubuntu-latest + strategy: + matrix: + docker-image: [django, nginx] + steps: + - name: set Docker tag to the release + if: ${{ startsWith(github.ref_name, 'nca/release/') }} + run: | + echo "RELEASE_VERSION=${GITHUB_REF_NAME#nca/release/}" >> $GITHUB_ENV + echo "DD_REGISTRY=${DD_REGISTRY_INTERNAL_RELEASE}" >> $GITHUB_ENV + echo "DD_REPO=${DD_REGISTRY_INTERNAL_RELEASE}${DD_PATH}" >> $GITHUB_ENV + echo "NCA_HELM_BRANCH=${HELM_CHARTS_BRANCH_PREFIX}" >> $GITHUB_ENV + + - name: set Docker tag to the feature branch name + if: ${{ startsWith(github.ref_name, 'nca/feature/') }} + run: | + echo "RELEASE_VERSION=${GITHUB_REF_NAME#nca/feature/}" >> $GITHUB_ENV + echo "DD_REGISTRY=${DD_REGISTRY_INTERNAL}" >> $GITHUB_ENV + echo "DD_REPO=${DD_REGISTRY_INTERNAL}${DD_PATH}" >> $GITHUB_ENV + echo "NCA_HELM_BRANCH=${HELM_CHARTS_BRANCH_PREFIX}-${RELEASE_VERSION}" >> $GITHUB_ENV + + - name: set Docker tag to the main branch name and hash + if: ${{ startsWith(github.ref_name, 'tag-') }} + run: | + RELEASE_VERSION="${GITHUB_REF_NAME#tag-}" + echo "RELEASE_VERSION=$(echo $RELEASE_VERSION | tr '[:upper:]' '[:lower:]')-${GITHUB_SHA:0:8}" >> $GITHUB_ENV + echo "DD_REGISTRY=${DD_REGISTRY_INTERNAL}" >> $GITHUB_ENV + echo "DD_REPO=${DD_REGISTRY_INTERNAL}${DD_PATH}" >> $GITHUB_ENV + echo "NCA_HELM_BRANCH=${HELM_CHARTS_BRANCH_PREFIX}-${RELEASE_VERSION}" >> $GITHUB_ENV + + - name: reporting the resulting versions + run: | + echo building docker image tag ${{ env.RELEASE_VERSION }} + echo packaging Helm chart into ${{ env.NCA_HELM_BRANCH }} branch + + - name: checkout the repo + uses: actions/checkout@v3 + + - name: login to Docker repository + uses: docker/login-action@v2 + with: + registry: ${{ env.DD_REGISTRY }} + username: ${{ secrets.NCA_REPO_USERNAME }} + password: ${{ secrets.NCA_REPO_PASSWORD }} + + - name: setup Docker buildx + id: buildx + uses: docker/setup-buildx-action@v2 + + - name: cache Docker layers + uses: actions/cache@v3 + env: + docker-image: ${{ matrix.docker-image }} + with: + path: /tmp/.buildx-cache-${{ env.docker-image }} + key: ${{ runner.os }}-buildx-${{ env.docker-image }}-${{ github.sha }}-${{ github.run_id }} + restore-keys: | + ${{ runner.os }}-buildx-${{ env.docker-image }}-${{ github.sha }} + ${{ runner.os }}-buildx-${{ env.docker-image }}- + + - name: storing Netcetera Active Directory certificate in would-be container + env: + AD_CERT: ${{ secrets.AD_CERT }} + run: echo "$AD_CERT" > docker/certs/ad-ca.crt + + - name: build and push image + uses: docker/build-push-action@v3 + env: + docker-image: ${{ matrix.docker-image }} + with: + push: true + tags: ${{ env.DD_REPO }}/defectdojo-${{ env.docker-image}}:${{ env.RELEASE_VERSION }} + file: ./Dockerfile.${{ env.docker-image }}-debian + context: . + cache-from: type=local,src=/tmp/.buildx-cache-${{ env.docker-image }} + cache-to: type=local,dest=/tmp/.buildx-cache-${{ env.docker-image }} + + package_helm: + name: package Helm chart + runs-on: ubuntu-latest + steps: + # the first steps must be copy-pasted from above + - name: set Docker tag to the release + if: ${{ startsWith(github.ref_name, 'nca/release/') }} + run: | + echo "RELEASE_VERSION=${GITHUB_REF_NAME#nca/release/}" >> $GITHUB_ENV + echo "DD_REGISTRY=${DD_REGISTRY_INTERNAL_RELEASE}" >> $GITHUB_ENV + echo "DD_REPO=${DD_REGISTRY_INTERNAL_RELEASE}${DD_PATH}" >> $GITHUB_ENV + echo "NCA_HELM_BRANCH=${HELM_CHARTS_BRANCH_PREFIX}" >> $GITHUB_ENV + + - name: set Docker tag to the feature branch name + if: ${{ startsWith(github.ref_name, 'nca/feature/') }} + run: | + echo "RELEASE_VERSION=${GITHUB_REF_NAME#nca/feature/}" >> $GITHUB_ENV + echo "DD_REGISTRY=${DD_REGISTRY_INTERNAL}" >> $GITHUB_ENV + echo "DD_REPO=${DD_REGISTRY_INTERNAL}${DD_PATH}" >> $GITHUB_ENV + echo "NCA_HELM_BRANCH=${HELM_CHARTS_BRANCH_PREFIX}-${RELEASE_VERSION}" >> $GITHUB_ENV + + - name: set Docker tag to the main branch name and hash + if: ${{ startsWith(github.ref_name, 'tag-') }} + run: | + RELEASE_VERSION="${GITHUB_REF_NAME#tag-}" + echo "RELEASE_VERSION=$(echo $RELEASE_VERSION | tr '[:upper:]' '[:lower:]')-${GITHUB_SHA:0:8}" >> $GITHUB_ENV + echo "DD_REGISTRY=${DD_REGISTRY_INTERNAL}" >> $GITHUB_ENV + echo "DD_REPO=${DD_REGISTRY_INTERNAL}${DD_PATH}" >> $GITHUB_ENV + echo "NCA_HELM_BRANCH=${HELM_CHARTS_BRANCH_PREFIX}-${RELEASE_VERSION}" >> $GITHUB_ENV + + - name: reporting the resulting versions + run: | + echo building docker image tag ${{ env.RELEASE_VERSION }} + echo packaging Helm chart into ${{ env.NCA_HELM_BRANCH }} branch + + - name: checkout + uses: actions/checkout@v3 + + - name: install Helm + uses: azure/setup-helm@v3 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: configure Helm repos + run: | + helm repo add bitnami https://charts.bitnami.com/bitnami + helm dependency list ./helm/defectdojo + helm dependency update ./helm/defectdojo + + - name: package Helm chart + id: package-helm-chart + run: | + mkdir build + helm package helm/defectdojo/ --destination ./build + echo "chart_version=$(ls build | sed 's|defectdojo-||' | sed 's|\.tgz||')" >> $GITHUB_ENV + app_version=$(helm show chart helm/defectdojo/ | grep appVersion) + echo "app_version=${app_version#appVersion: }" >> $GITHUB_ENV + echo "NCA_HELM_RELEASE=helm-${{ env.RELEASE_VERSION }}" >> $GITHUB_ENV + + - name: create a release + id: create_release + uses: ncipollo/release-action@v1.11.2 + with: + commit: ${{ github.sha }} + tag: ${{ env.NCA_HELM_RELEASE }} + draft: false + prerelease: false + removeArtifacts: true + name: Release ${{ env.NCA_HELM_RELEASE }} + token: ${{ secrets.GITHUB_TOKEN }} + + - name: upload release asset + uses: actions/upload-release-asset@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ steps.create_release.outputs.upload_url }} + asset_path: ./build/defectdojo-${{ env.chart_version }}.tgz + asset_name: defectdojo-${{ env.chart_version }}.tgz + asset_content_type: application/tar+gzip + + - name: update Helm repository index + run: | + git config --global user.name "${{ env.GIT_USERNAME }}" + git config --global user.email "${{ env.GIT_EMAIL }}" + git remote update + git fetch --all + git checkout -b "${{ env.NCA_HELM_BRANCH }}" origin/helm-charts + if [ ! -f ./index.yaml ]; then + helm repo index ./build --url "${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}/releases/download/${{ env.NCA_HELM_RELEASE }}/" + else + helm repo index ./build --url "${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}/releases/download/${{ env.NCA_HELM_RELEASE }}/" --merge ./index.yaml + fi + cp -f ./build/index.yaml ./index.yaml + git add ./index.yaml + git commit -m "Update index.yaml" + git push -f -u origin "${{ env.NCA_HELM_BRANCH }}" From 0ea67a4404460901995870ebd78dfed7d3c62932 Mon Sep 17 00:00:00 2001 From: Lars Meijers Date: Fri, 24 Nov 2023 11:35:27 +0100 Subject: [PATCH 26/42] reimporter: fix and rewrite See also discussion in https://github.com/DefectDojo/django-DefectDojo/pull/9050 --- dojo/importers/reimporter/reimporter.py | 462 ++++++++++++------------ unittests/test_import_reimport.py | 2 +- 2 files changed, 228 insertions(+), 236 deletions(-) diff --git a/dojo/importers/reimporter/reimporter.py b/dojo/importers/reimporter/reimporter.py index 107068d11fa..782c88c0d26 100644 --- a/dojo/importers/reimporter/reimporter.py +++ b/dojo/importers/reimporter/reimporter.py @@ -22,6 +22,18 @@ deduplicationLogger = logging.getLogger("dojo.specific-loggers.deduplication") +def add_note_if_not_exists(finding, test, user, text): + existing_note = finding.notes.filter( + entry=text % test.test_type, author=user + ) + if len(existing_note) == 0: + note = Notes( + entry=text % test.test_type, author=user + ) + note.save() + finding.notes.add(note) + + class DojoDefaultReImporter(object): @dojo_async_task @app.task(ignore_result=False) @@ -46,20 +58,18 @@ def process_parsed_findings( **kwargs, ): - items = parsed_findings - original_items = list(test.finding_set.all()) - new_items = [] + reimported_findings = parsed_findings + original_findings = list(test.finding_set.all()) + new_findings = [] finding_count = 0 - finding_added_count = 0 reactivated_count = 0 reactivated_items = [] unchanged_count = 0 - unchanged_items = [] + unchanged_findings = [] - logger.debug("starting reimport of %i items.", len(items) if items else 0) + logger.debug("starting reimport of %i items.", len(reimported_findings) if reimported_findings else 0) deduplication_algorithm = test.deduplication_algorithm - i = 0 group_names_to_findings_dict = {} logger.debug( "STEP 1: looping over findings from the reimported report and trying to match them to existing findings" @@ -68,35 +78,35 @@ def process_parsed_findings( "Algorithm used for matching new findings to existing findings: %s", deduplication_algorithm, ) - for item in items: + for index, reimported_finding in enumerate(reimported_findings): # FIXME hack to remove when all parsers have unit tests for this attribute - if item.severity.lower().startswith("info") and item.severity != "Info": - item.severity = "Info" + if reimported_finding.severity.lower().startswith("info") and reimported_finding.severity != "Info": + reimported_finding.severity = "Info" - item.numerical_severity = Finding.get_numerical_severity(item.severity) + reimported_finding.numerical_severity = Finding.get_numerical_severity(reimported_finding.severity) if minimum_severity and ( - Finding.SEVERITIES[item.severity] > Finding.SEVERITIES[minimum_severity] + Finding.SEVERITIES[reimported_finding.severity] > Finding.SEVERITIES[minimum_severity] ): # finding's severity is below the configured threshold : ignoring the finding continue # existing findings may be from before we had component_name/version fields component_name = ( - item.component_name if hasattr(item, "component_name") else None + reimported_finding.component_name if hasattr(reimported_finding, "component_name") else None ) component_version = ( - item.component_version if hasattr(item, "component_version") else None + reimported_finding.component_version if hasattr(reimported_finding, "component_version") else None ) - if not hasattr(item, "test"): - item.test = test + if not hasattr(reimported_finding, "test"): + reimported_finding.test = test if service: - item.service = service + reimported_finding.service = service - if item.dynamic_finding: - for e in item.unsaved_endpoints: + if reimported_finding.dynamic_finding: + for e in reimported_finding.unsaved_endpoints: try: e.clean() except ValidationError as err: @@ -105,257 +115,243 @@ def process_parsed_findings( "{}".format(err) ) - item.hash_code = item.compute_hash_code() - deduplicationLogger.debug("item's hash_code: %s", item.hash_code) + reimported_finding.hash_code = reimported_finding.compute_hash_code() + deduplicationLogger.debug("item's hash_code: %s", reimported_finding.hash_code) - findings = reimporter_utils.match_new_finding_to_existing_finding( - item, test, deduplication_algorithm + existing_findings = reimporter_utils.match_new_finding_to_existing_finding( + reimported_finding, test, deduplication_algorithm ) deduplicationLogger.debug( - "found %i findings matching with current new finding", len(findings) + "found %i findings matching with current new finding", len(existing_findings) ) - if findings: - # existing finding found - finding = findings[0] - if finding.false_p or finding.out_of_scope or finding.risk_accepted: - logger.debug( - "%i: skipping existing finding (it is marked as false positive:%s and/or out of scope:%s or is a risk accepted:%s): %i:%s:%s:%s", - i, - finding.false_p, - finding.out_of_scope, - finding.risk_accepted, - finding.id, - finding, - finding.component_name, - finding.component_version, - ) - if ( - finding.false_p == item.false_p - and finding.out_of_scope == item.out_of_scope - and finding.risk_accepted == item.risk_accepted - ): - unchanged_items.append(finding) - unchanged_count += 1 - continue - elif finding.is_mitigated: + if existing_findings: + # existing findings found + existing_finding = existing_findings[0] + if existing_finding.is_mitigated: # if the reimported item has a mitigation time, we can compare - if item.is_mitigated: - unchanged_items.append(finding) + if reimported_finding.is_mitigated: + unchanged_findings.append(existing_finding) unchanged_count += 1 - if item.mitigated: + if reimported_finding.mitigated: logger.debug( "item mitigated time: " - + str(item.mitigated.timestamp()) + + str(reimported_finding.mitigated.timestamp()) ) logger.debug( "finding mitigated time: " - + str(finding.mitigated.timestamp()) + + str(existing_finding.mitigated.timestamp()) ) - if ( - item.mitigated.timestamp() - == finding.mitigated.timestamp() - ): + if reimported_finding.mitigated.timestamp() == existing_finding.mitigated.timestamp(): logger.debug( "New imported finding and already existing finding have the same mitigation date, will skip as they are the same." ) - continue - if ( - item.mitigated.timestamp() - != finding.mitigated.timestamp() - ): + else: logger.debug( "New imported finding and already existing finding are both mitigated but have different dates, not taking action" ) # TODO: implement proper date-aware reimporting mechanism, if an imported finding is closed more recently than the defectdojo finding, then there might be details in the scanner that should be added - continue - else: - # even if there is no mitigation time, skip it, because both the current finding and the reimported finding are is_mitigated - continue + # existing_finding is mitigated but reimported_finding is not else: - if not do_not_reactivate: - logger.debug( - "%i: reactivating: %i:%s:%s:%s", - i, - finding.id, - finding, - finding.component_name, - finding.component_version, - ) - finding.mitigated = None - finding.is_mitigated = False - finding.mitigated_by = None - finding.active = True - if verified is not None: - finding.verified = verified if do_not_reactivate: logger.debug( "%i: skipping reactivating by user's choice do_not_reactivate: %i:%s:%s:%s", - i, - finding.id, - finding, - finding.component_name, - finding.component_version, - ) - existing_note = finding.notes.filter( - entry="Finding has skipped reactivation from %s re-upload with user decision do_not_reactivate." - % scan_type, - author=user, + index, + existing_finding.id, + existing_finding, + existing_finding.component_name, + existing_finding.component_version, ) - if len(existing_note) == 0: + add_note_if_not_exists(existing_finding, test, user, "Finding has skipped reactivation from %s re-upload with user decision do_not_reactivate.") + existing_finding.save(dedupe_option=False) + else: + # i.e. Reactivate findings + if existing_finding.false_p or existing_finding.out_of_scope or existing_finding.risk_accepted: + # If the existing_finding in DD is in one of the above states, + # we no longer sync the scanners state similar to do_not_reactivate=True + unchanged_findings.append(existing_finding) + unchanged_count += 1 + else: + logger.debug( + "%i: reactivating: %i:%s:%s:%s", + index, + existing_finding.id, + existing_finding, + existing_finding.component_name, + existing_finding.component_version, + ) + existing_finding.mitigated = None + existing_finding.is_mitigated = False + existing_finding.mitigated_by = None + existing_finding.active = True + if verified is not None: + existing_finding.verified = verified + # existing findings may be from before we had component_name/version fields + existing_finding.component_name = ( + existing_finding.component_name + if existing_finding.component_name + else component_name + ) + existing_finding.component_version = ( + existing_finding.component_version + if existing_finding.component_version + else component_version + ) + + # don't dedupe before endpoints are added + existing_finding.save(dedupe_option=False) note = Notes( - entry="Finding has skipped reactivation from %s re-upload with user decision do_not_reactivate." - % scan_type, - author=user, + entry="Re-activated by %s re-upload." % scan_type, author=user ) note.save() - finding.notes.add(note) - finding.save(dedupe_option=False) - continue - # existing findings may be from before we had component_name/version fields - finding.component_name = ( - finding.component_name - if finding.component_name - else component_name - ) - finding.component_version = ( - finding.component_version - if finding.component_version - else component_version - ) - # don't dedupe before endpoints are added - finding.save(dedupe_option=False) - note = Notes( - entry="Re-activated by %s re-upload." % scan_type, author=user - ) - note.save() - - endpoint_statuses = finding.status_finding.exclude( - Q(false_positive=True) - | Q(out_of_scope=True) - | Q(risk_accepted=True) - ) - reimporter_utils.chunk_endpoints_and_reactivate(endpoint_statuses) + endpoint_statuses = existing_finding.status_finding.exclude( + Q(false_positive=True) + | Q(out_of_scope=True) + | Q(risk_accepted=True) + ) + reimporter_utils.chunk_endpoints_and_reactivate(endpoint_statuses) - finding.notes.add(note) - reactivated_items.append(finding) - reactivated_count += 1 + existing_finding.notes.add(note) + reactivated_items.append(existing_finding) + reactivated_count += 1 + # Existing finding is not mitigated else: - # if finding associated to new item is none of risk accepted, mitigated, false positive or out of scope - # existing findings may be from before we had component_name/version fields logger.debug( - "%i: updating existing finding: %i:%s:%s:%s", - i, - finding.id, - finding, - finding.component_name, - finding.component_version, + "Reimported item matches a finding that is currently open." ) - if not (finding.mitigated and finding.is_mitigated): + if reimported_finding.is_mitigated: logger.debug( - "Reimported item matches a finding that is currently open." + "Reimported mitigated item matches a finding that is currently open, closing." ) - if item.is_mitigated: - logger.debug( - "Reimported mitigated item matches a finding that is currently open, closing." - ) - # TODO: Implement a date comparison for opened defectdojo findings before closing them by reimporting, as they could be force closed by the scanner but a DD user forces it open ? - logger.debug( - "%i: closing: %i:%s:%s:%s", - i, - finding.id, - finding, - finding.component_name, - finding.component_version, - ) - finding.mitigated = item.mitigated - finding.is_mitigated = True - finding.mitigated_by = item.mitigated_by - finding.active = False - if verified is not None: - finding.verified = verified - elif item.risk_accepted or item.false_p or item.out_of_scope: - logger.debug('Reimported mitigated item matches a finding that is currently open, closing.') - logger.debug('%i: closing: %i:%s:%s:%s', i, finding.id, finding, finding.component_name, finding.component_version) - finding.risk_accepted = item.risk_accepted - finding.false_p = item.false_p - finding.out_of_scope = item.out_of_scope - finding.active = False + # TODO: Implement a date comparison for opened defectdojo findings before closing them by reimporting, as they could be force closed by the scanner but a DD user forces it open ? + logger.debug( + "%i: closing: %i:%s:%s:%s", + index, + existing_finding.id, + existing_finding, + existing_finding.component_name, + existing_finding.component_version, + ) + existing_finding.mitigated = reimported_finding.mitigated + existing_finding.is_mitigated = True + existing_finding.mitigated_by = reimported_finding.mitigated_by + existing_finding.active = False + if verified is not None: + existing_finding.verified = verified + add_note_if_not_exists(existing_finding, test, user, "Mitigated by %s re-upload.") + existing_finding.save(dedupe_option=False) + # reimported_finding is not mitigated but is risk accepted by the scanner + elif reimported_finding.risk_accepted: + # A risk accepted finding is not explicitly mitigated, so we need to add it to avoid mitigation + # as otherwise it will get mitigated in close_old_findings + # keeps https://github.com/DefectDojo/django-DefectDojo/pull/7447 behaviour the same + unchanged_findings.append(existing_finding) + unchanged_count += 1 + if not existing_finding.risk_accepted: + # If the existing_finding in DD is not risk accepted + # then we risk accept it and set it to inactive + logger.debug('Reimported risk_accepted item matches ' + 'a finding that is currently not risk_accepted.') + logger.debug('%i: risk accepting: %i:%s:%s:%s', index, existing_finding.id, + existing_finding, existing_finding.component_name, + existing_finding.component_version) + existing_finding.risk_accepted = reimported_finding.risk_accepted + existing_finding.active = False if verified is not None: - finding.verified = verified - else: - # if finding is the same but list of affected was changed, finding is marked as unchanged. This is a known issue - unchanged_items.append(finding) - unchanged_count += 1 + existing_finding.verified = verified + note = Notes( + entry="Risk accepted by %s re-upload." % test.test_type, author=user + ) + note.save() + existing_finding.notes.add(note) + existing_finding.save(dedupe_option=False) + # If the scanner says the reimported_finding is either + # (false positive or out of scope but not risk accepted or mitigated) + # we take over these values and close the finding + elif reimported_finding.false_p or reimported_finding.out_of_scope: + logger.debug('Reimported false positive or out of scope' + ' item matches a finding that is currently open, closing.') + logger.debug('%i: closing: %i:%s:%s:%s', index, existing_finding.id, existing_finding, existing_finding.component_name, existing_finding.component_version) + existing_finding.false_p = reimported_finding.false_p + existing_finding.out_of_scope = reimported_finding.out_of_scope + existing_finding.active = False + if verified is not None: + existing_finding.verified = verified + # because existing_finding is not added to unchanged_items, + # it will get mitigated in close_old_findings + else: + # if finding is the same but list of affected was changed, + # finding is marked as unchanged. This is a known issue + unchanged_findings.append(existing_finding) + unchanged_count += 1 - if (component_name is not None and not finding.component_name) or ( - component_version is not None and not finding.component_version + if (component_name is not None and not existing_finding.component_name) or ( + component_version is not None and not existing_finding.component_version ): - finding.component_name = ( - finding.component_name - if finding.component_name + existing_finding.component_name = ( + existing_finding.component_name + if existing_finding.component_name else component_name ) - finding.component_version = ( - finding.component_version - if finding.component_version + existing_finding.component_version = ( + existing_finding.component_version + if existing_finding.component_version else component_version ) - finding.save(dedupe_option=False) + existing_finding.save(dedupe_option=False) - if finding.dynamic_finding: + if existing_finding.dynamic_finding: logger.debug( "Re-import found an existing dynamic finding for this new finding. Checking the status of endpoints" ) - reimporter_utils.update_endpoint_status(finding, item, user) + reimporter_utils.update_endpoint_status(existing_finding, reimported_finding, user) else: - # no existing finding found - item.reporter = user - item.last_reviewed = timezone.now() - item.last_reviewed_by = user + # no existing finding, found + reimported_finding.reporter = user + reimported_finding.last_reviewed = timezone.now() + reimported_finding.last_reviewed_by = user if active is not None: # indicates an override. Otherwise, do not change the value of item.active - item.active = active + reimported_finding.active = active if verified is not None: # indicates an override. Otherwise, do not change the value of verified - item.verified = verified + reimported_finding.verified = verified # if scan_date was provided, override value from parser if scan_date: - item.date = scan_date.date() + reimported_finding.date = scan_date.date() # Save it. Don't dedupe before endpoints are added. - item.save(dedupe_option=False) + reimported_finding.save(dedupe_option=False) logger.debug( "%i: reimport created new finding as no existing finding match: %i:%s:%s:%s", - i, - item.id, - item, - item.component_name, - item.component_version, + index, + reimported_finding.id, + reimported_finding, + reimported_finding.component_name, + reimported_finding.component_version, ) # only new items get auto grouped to avoid confusion around already existing items that are already grouped if is_finding_groups_enabled() and group_by: # If finding groups are enabled, group all findings by group name - name = finding_helper.get_group_by_group_name(item, group_by) + name = finding_helper.get_group_by_group_name(reimported_finding, group_by) if name is not None: if name in group_names_to_findings_dict: - group_names_to_findings_dict[name].append(item) + group_names_to_findings_dict[name].append(reimported_finding) else: - group_names_to_findings_dict[name] = [item] + group_names_to_findings_dict[name] = [reimported_finding] - finding_added_count += 1 - new_items.append(item) - finding = item + new_findings.append(reimported_finding) + existing_finding = reimported_finding - if hasattr(item, "unsaved_req_resp"): - for req_resp in item.unsaved_req_resp: + if hasattr(reimported_finding, "unsaved_req_resp"): + for req_resp in reimported_finding.unsaved_req_resp: burp_rr = BurpRawRequestResponse( - finding=finding, + finding=existing_finding, burpRequestBase64=base64.b64encode( req_resp["req"].encode("utf-8") ), @@ -366,35 +362,35 @@ def process_parsed_findings( burp_rr.clean() burp_rr.save() - if item.unsaved_request and item.unsaved_response: + if reimported_finding.unsaved_request and reimported_finding.unsaved_response: burp_rr = BurpRawRequestResponse( - finding=finding, + finding=existing_finding, burpRequestBase64=base64.b64encode( - item.unsaved_request.encode() + reimported_finding.unsaved_request.encode() ), burpResponseBase64=base64.b64encode( - item.unsaved_response.encode() + reimported_finding.unsaved_response.encode() ), ) burp_rr.clean() burp_rr.save() # for existing findings: make sure endpoints are present or created - if finding: + if existing_finding: finding_count += 1 importer_utils.chunk_endpoints_and_disperse( - finding, test, item.unsaved_endpoints + existing_finding, test, reimported_finding.unsaved_endpoints ) if endpoints_to_add: importer_utils.chunk_endpoints_and_disperse( - finding, test, endpoints_to_add + existing_finding, test, endpoints_to_add ) - if item.unsaved_tags: - finding.tags = item.unsaved_tags + if reimported_finding.unsaved_tags: + existing_finding.tags = reimported_finding.unsaved_tags - if item.unsaved_files: - for unsaved_file in item.unsaved_files: + if reimported_finding.unsaved_files: + for unsaved_file in reimported_finding.unsaved_files: data = base64.b64decode(unsaved_file.get("data")) title = unsaved_file.get("title", "") ( @@ -405,30 +401,30 @@ def process_parsed_findings( ) file_upload.file.save(title, ContentFile(data)) file_upload.save() - finding.files.add(file_upload) + existing_finding.files.add(file_upload) - if finding.unsaved_vulnerability_ids: - importer_utils.handle_vulnerability_ids(finding) + if existing_finding.unsaved_vulnerability_ids: + importer_utils.handle_vulnerability_ids(existing_finding) # existing findings may be from before we had component_name/version fields - finding.component_name = ( - finding.component_name if finding.component_name else component_name + existing_finding.component_name = ( + existing_finding.component_name if existing_finding.component_name else component_name ) - finding.component_version = ( - finding.component_version - if finding.component_version + existing_finding.component_version = ( + existing_finding.component_version + if existing_finding.component_version else component_version ) # finding = new finding or existing finding still in the upload report # to avoid pushing a finding group multiple times, we push those outside of the loop if is_finding_groups_enabled() and group_by: - finding.save() + existing_finding.save() else: - finding.save(push_to_jira=push_to_jira) + existing_finding.save(push_to_jira=push_to_jira) to_mitigate = ( - set(original_items) - set(reactivated_items) - set(unchanged_items) + set(original_findings) - set(reactivated_items) - set(unchanged_findings) ) # due to #3958 we can have duplicates inside the same report # this could mean that a new finding is created and right after @@ -436,21 +432,21 @@ def process_parsed_findings( # following finding in the same report # this means untouched can have this finding inside it, # while it is in fact a new finding. So we substract new_items - untouched = set(unchanged_items) - set(to_mitigate) - set(new_items) + untouched = set(unchanged_findings) - set(to_mitigate) - set(new_findings) - for (group_name, findings) in group_names_to_findings_dict.items(): - finding_helper.add_findings_to_auto_group(group_name, findings, group_by, create_finding_groups_for_all_findings, **kwargs) + for (group_name, existing_findings) in group_names_to_findings_dict.items(): + finding_helper.add_findings_to_auto_group(group_name, existing_findings, group_by, create_finding_groups_for_all_findings, **kwargs) if push_to_jira: - if findings[0].finding_group is not None: - jira_helper.push_to_jira(findings[0].finding_group) + if existing_findings[0].finding_group is not None: + jira_helper.push_to_jira(existing_findings[0].finding_group) else: - jira_helper.push_to_jira(findings[0]) + jira_helper.push_to_jira(existing_findings[0]) if is_finding_groups_enabled() and push_to_jira: for finding_group in set( [ finding.finding_group - for finding in reactivated_items + unchanged_items + for finding in reactivated_items + unchanged_findings if finding.finding_group is not None and not finding.is_mitigated ] ): @@ -465,7 +461,7 @@ def process_parsed_findings( finding, ], ) - for finding in new_items + for finding in new_findings ] serialized_reactivated_items = [ serializers.serialize( @@ -501,7 +497,7 @@ def process_parsed_findings( serialized_untouched, ) - return new_items, reactivated_items, to_mitigate, untouched + return new_findings, reactivated_items, to_mitigate, untouched def close_old_findings( self, test, to_mitigate, scan_date_time, user, push_to_jira=None @@ -528,11 +524,7 @@ def close_old_findings( else: finding.save(push_to_jira=push_to_jira, dedupe_option=False) - note = Notes( - entry="Mitigated by %s re-upload." % test.test_type, author=user - ) - note.save() - finding.notes.add(note) + add_note_if_not_exists(finding, test, user, "Mitigated by %s re-upload.") mitigated_findings.append(finding) if is_finding_groups_enabled() and push_to_jira: diff --git a/unittests/test_import_reimport.py b/unittests/test_import_reimport.py index d765377b8f6..f185a4f10b9 100644 --- a/unittests/test_import_reimport.py +++ b/unittests/test_import_reimport.py @@ -1087,7 +1087,7 @@ def test_import_reimport_keep_false_positive_and_out_of_scope(self): active_findings_before = self.get_test_findings_api(test_id, active=True) self.assert_finding_count_json(0, active_findings_before) - with assertTestImportModelsCreated(self, reimports=1, affected_findings=1, created=1): + with assertTestImportModelsCreated(self, reimports=1, affected_findings=1, created=1, untouched=3): reimport0 = self.reimport_scan_with_params(test_id, self.zap_sample0_filename) self.assertEqual(reimport0['test'], test_id) From 2ca5f906b28b7a438a506cda3c81ad75e2c9d9ff Mon Sep 17 00:00:00 2001 From: Lars Meijers Date: Wed, 7 Feb 2024 14:55:09 +0100 Subject: [PATCH 27/42] added healthcheck middleware for ALBs --- dojo/middleware.py | 19 +++++++++++++++++++ dojo/settings/settings.dist.py | 1 + helm/defectdojo/values.yaml | 2 ++ nginx/nginx.conf | 8 +++++++- nginx/nginx_TLS.conf | 7 +++++++ 5 files changed, 36 insertions(+), 1 deletion(-) diff --git a/dojo/middleware.py b/dojo/middleware.py index 733c66f4cd4..8ca34a3d7cb 100644 --- a/dojo/middleware.py +++ b/dojo/middleware.py @@ -6,6 +6,7 @@ from threading import local from django.db import models from django.urls import reverse +from django.http import HttpResponse logger = logging.getLogger(__name__) @@ -164,3 +165,21 @@ def __init__(self, get_response): def __call__(self, request): request.META.update(settings.ADDITIONAL_HEADERS) return self.get_response(request) + + +class HealthCheckMiddleware: + """ + Middleware that will allow for a healthcheck to return UP without the caller being in the + DJANGO ALLOWED_HOSTS list. Needed for AWS ALB healthchecks and improves general k8 healthchecks + """ + + def __init__(self, get_response): + + self.get_response = get_response + + def __call__(self, request): + if request.META['PATH_INFO'] == '/health': + return HttpResponse('UP!') + else: + response = self.get_response(request) + return response diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py index 5abd792004d..7e9c3232d65 100644 --- a/dojo/settings/settings.dist.py +++ b/dojo/settings/settings.dist.py @@ -922,6 +922,7 @@ def generate_url(scheme, double_slashes, user, password, host, port, path, param # MIDDLEWARE # ------------------------------------------------------------------------------ DJANGO_MIDDLEWARE_CLASSES = [ + 'dojo.middleware.HealthCheckMiddleware', 'django.middleware.common.CommonMiddleware', 'dojo.middleware.APITrailingSlashMiddleware', 'dojo.middleware.DojoSytemSettingsMiddleware', diff --git a/helm/defectdojo/values.yaml b/helm/defectdojo/values.yaml index c9b098b7706..4ee27a4fd92 100644 --- a/helm/defectdojo/values.yaml +++ b/helm/defectdojo/values.yaml @@ -201,6 +201,8 @@ django: # Depending on the size and complexity of your scans, you might want to increase the default ingress timeouts if you see repeated 504 Gateway Timeouts # nginx.ingress.kubernetes.io/proxy-read-timeout: "1800" # nginx.ingress.kubernetes.io/proxy-send-timeout: "1800" + # specific for AWS deployments Defectdojo has the /health endpoint for ALB healthchecks + # alb.ingress.kubernetes.io/healthcheck-path: /health nginx: tls: enabled: false diff --git a/nginx/nginx.conf b/nginx/nginx.conf index aaa62e7e431..f0368ed55e4 100644 --- a/nginx/nginx.conf +++ b/nginx/nginx.conf @@ -72,7 +72,13 @@ http { include /etc/nginx/wsgi_params; access_log off; } - + # Used by AWS ALB health checks + location = /health { + limit_except GET { deny all; } + include /run/defectdojo/uwsgi_pass; + include /etc/nginx/wsgi_params; + access_log off; + } error_page 500 502 503 504 /50x.html; } diff --git a/nginx/nginx_TLS.conf b/nginx/nginx_TLS.conf index 59edae6e9c0..cac7a890404 100644 --- a/nginx/nginx_TLS.conf +++ b/nginx/nginx_TLS.conf @@ -134,6 +134,13 @@ http { include /etc/nginx/wsgi_params; access_log off; } + # Used by AWS ALB health checks + location = /health { + limit_except GET { deny all; } + include /run/defectdojo/uwsgi_pass; + include /etc/nginx/wsgi_params; + access_log off; + } error_page 500 502 503 504 /50x.html; } } From fc5d598fa94fe19a0a362fe915d806ac3fb34a0a Mon Sep 17 00:00:00 2001 From: Pavel Nakonechnyi Date: Fri, 1 Mar 2024 09:40:21 +0100 Subject: [PATCH 28/42] github workflows: sync nca workflow with the current upstream implementation --- .github/workflows/nca-build-docker-image.yml | 40 ++++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/.github/workflows/nca-build-docker-image.yml b/.github/workflows/nca-build-docker-image.yml index f8c0a1ec3ac..44a4c60a66a 100644 --- a/.github/workflows/nca-build-docker-image.yml +++ b/.github/workflows/nca-build-docker-image.yml @@ -57,6 +57,8 @@ jobs: strategy: matrix: docker-image: [django, nginx] + os: [debian] + platform: [amd64] steps: - name: set Docker tag to the release if: ${{ startsWith(github.ref_name, 'nca/release/') }} @@ -89,10 +91,10 @@ jobs: echo packaging Helm chart into ${{ env.NCA_HELM_BRANCH }} branch - name: checkout the repo - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: login to Docker repository - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: registry: ${{ env.DD_REGISTRY }} username: ${{ secrets.NCA_REPO_USERNAME }} @@ -100,10 +102,10 @@ jobs: - name: setup Docker buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: cache Docker layers - uses: actions/cache@v3 + uses: actions/cache@v4 env: docker-image: ${{ matrix.docker-image }} with: @@ -119,7 +121,7 @@ jobs: run: echo "$AD_CERT" > docker/certs/ad-ca.crt - name: build and push image - uses: docker/build-push-action@v3 + uses: docker/build-push-action@v5 env: docker-image: ${{ matrix.docker-image }} with: @@ -166,10 +168,14 @@ jobs: echo packaging Helm chart into ${{ env.NCA_HELM_BRANCH }} branch - name: checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 + with: + fetch-depth: 0 - name: install Helm uses: azure/setup-helm@v3 + with: + version: v3.4.0 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} @@ -191,32 +197,26 @@ jobs: - name: create a release id: create_release - uses: ncipollo/release-action@v1.11.2 + uses: softprops/action-gh-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GITHUB_REPOSITORY: netceteragroup/django-DefectDojo with: - commit: ${{ github.sha }} - tag: ${{ env.NCA_HELM_RELEASE }} draft: false prerelease: false - removeArtifacts: true + files: ./build/defectdojo-${{ env.chart_version }}.tgz name: Release ${{ env.NCA_HELM_RELEASE }} + tag_name: ${{ env.NCA_HELM_RELEASE }} + target_commitish: ${{ github.sha }} token: ${{ secrets.GITHUB_TOKEN }} - - name: upload release asset - uses: actions/upload-release-asset@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ steps.create_release.outputs.upload_url }} - asset_path: ./build/defectdojo-${{ env.chart_version }}.tgz - asset_name: defectdojo-${{ env.chart_version }}.tgz - asset_content_type: application/tar+gzip - - name: update Helm repository index run: | git config --global user.name "${{ env.GIT_USERNAME }}" git config --global user.email "${{ env.GIT_EMAIL }}" git remote update git fetch --all + git stash git checkout -b "${{ env.NCA_HELM_BRANCH }}" origin/helm-charts if [ ! -f ./index.yaml ]; then helm repo index ./build --url "${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}/releases/download/${{ env.NCA_HELM_RELEASE }}/" From ee62ae262b7891ae66b5d93da3646ca91e84e487 Mon Sep 17 00:00:00 2001 From: Lars Meijers Date: Fri, 8 Mar 2024 16:43:42 +0100 Subject: [PATCH 29/42] changed Oauth to oicd plugin and configured it.... --- dojo/settings/settings.dist.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py index 7e9c3232d65..3381a930e35 100644 --- a/dojo/settings/settings.dist.py +++ b/dojo/settings/settings.dist.py @@ -534,6 +534,7 @@ def generate_url(scheme, double_slashes, user, password, host, port, path, param 'social_core.backends.azuread_tenant.AzureADTenantOAuth2', 'social_core.backends.gitlab.GitLabOAuth2', 'social_core.backends.keycloak.KeycloakOAuth2', + 'social_core.backends.keycloak.OpenIdConnectAuth', 'social_core.backends.github_enterprise.GithubEnterpriseOAuth2', 'dojo.remote_user.RemoteUserBackend', 'django.contrib.auth.backends.RemoteUserBackend', @@ -631,13 +632,16 @@ def generate_url(scheme, double_slashes, user, password, host, port, path, param SOCIAL_AUTH_TRAILING_SLASH = env('DD_SOCIAL_AUTH_TRAILING_SLASH') KEYCLOAK_OAUTH2_ENABLED = env('DD_SOCIAL_AUTH_KEYCLOAK_OAUTH2_ENABLED') -SOCIAL_AUTH_KEYCLOAK_KEY = env('DD_SOCIAL_AUTH_KEYCLOAK_KEY') -SOCIAL_AUTH_KEYCLOAK_SECRET = env('DD_SOCIAL_AUTH_KEYCLOAK_SECRET') -SOCIAL_AUTH_KEYCLOAK_PUBLIC_KEY = env('DD_SOCIAL_AUTH_KEYCLOAK_PUBLIC_KEY') -SOCIAL_AUTH_KEYCLOAK_AUTHORIZATION_URL = env('DD_SOCIAL_AUTH_KEYCLOAK_AUTHORIZATION_URL') -SOCIAL_AUTH_KEYCLOAK_ACCESS_TOKEN_URL = env('DD_SOCIAL_AUTH_KEYCLOAK_ACCESS_TOKEN_URL') +SOCIAL_AUTH_OIDC_OIDC_ENDPOINT = env("DD_SOCIAL_AUTH_OIDC_OIDC_ENDPOINT") +SOCIAL_AUTH_OIDC_KEY = env('DD_SOCIAL_AUTH_KEYCLOAK_KEY') +SOCIAL_AUTH_OIDC_SECRET = env('DD_SOCIAL_AUTH_KEYCLOAK_SECRET') SOCIAL_AUTH_KEYCLOAK_LOGIN_BUTTON_TEXT = env('DD_SOCIAL_AUTH_KEYCLOAK_LOGIN_BUTTON_TEXT') +# SOCIAL_AUTH_KEYCLOAK_PUBLIC_KEY = env('DD_SOCIAL_AUTH_KEYCLOAK_PUBLIC_KEY') +# SOCIAL_AUTH_KEYCLOAK_AUTHORIZATION_URL = env('DD_SOCIAL_AUTH_KEYCLOAK_AUTHORIZATION_URL') +# SOCIAL_AUTH_KEYCLOAK_ACCESS_TOKEN_URL = env('DD_SOCIAL_AUTH_KEYCLOAK_ACCESS_TOKEN_URL') + + GITHUB_ENTERPRISE_OAUTH2_ENABLED = env('DD_SOCIAL_AUTH_GITHUB_ENTERPRISE_OAUTH2_ENABLED') SOCIAL_AUTH_GITHUB_ENTERPRISE_URL = env('DD_SOCIAL_AUTH_GITHUB_ENTERPRISE_URL') SOCIAL_AUTH_GITHUB_ENTERPRISE_API_URL = env('DD_SOCIAL_AUTH_GITHUB_ENTERPRISE_API_URL') From eb9710a4f659f00f796c6d6bc28abe1148381f96 Mon Sep 17 00:00:00 2001 From: Lars Meijers Date: Mon, 11 Mar 2024 17:15:54 +0100 Subject: [PATCH 30/42] changed to single quotes --- dojo/settings/settings.dist.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py index 3381a930e35..2783612a081 100644 --- a/dojo/settings/settings.dist.py +++ b/dojo/settings/settings.dist.py @@ -632,7 +632,7 @@ def generate_url(scheme, double_slashes, user, password, host, port, path, param SOCIAL_AUTH_TRAILING_SLASH = env('DD_SOCIAL_AUTH_TRAILING_SLASH') KEYCLOAK_OAUTH2_ENABLED = env('DD_SOCIAL_AUTH_KEYCLOAK_OAUTH2_ENABLED') -SOCIAL_AUTH_OIDC_OIDC_ENDPOINT = env("DD_SOCIAL_AUTH_OIDC_OIDC_ENDPOINT") +SOCIAL_AUTH_OIDC_OIDC_ENDPOINT = env('DD_SOCIAL_AUTH_OIDC_OIDC_ENDPOINT') SOCIAL_AUTH_OIDC_KEY = env('DD_SOCIAL_AUTH_KEYCLOAK_KEY') SOCIAL_AUTH_OIDC_SECRET = env('DD_SOCIAL_AUTH_KEYCLOAK_SECRET') SOCIAL_AUTH_KEYCLOAK_LOGIN_BUTTON_TEXT = env('DD_SOCIAL_AUTH_KEYCLOAK_LOGIN_BUTTON_TEXT') From 1e42642f033bd6a45050a5e2ddd5ec05b8cfd33d Mon Sep 17 00:00:00 2001 From: Lars Meijers Date: Mon, 11 Mar 2024 17:21:44 +0100 Subject: [PATCH 31/42] define the new variable --- dojo/settings/settings.dist.py | 1 + 1 file changed, 1 insertion(+) diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py index 2783612a081..62238a27e3f 100644 --- a/dojo/settings/settings.dist.py +++ b/dojo/settings/settings.dist.py @@ -133,6 +133,7 @@ DD_SOCIAL_AUTH_GITLAB_API_URL=(str, 'https://gitlab.com'), DD_SOCIAL_AUTH_GITLAB_SCOPE=(list, ['read_user', 'openid']), DD_SOCIAL_AUTH_KEYCLOAK_OAUTH2_ENABLED=(bool, False), + DD_SOCIAL_AUTH_OIDC_OIDC_ENDPOINT=(str, ''), DD_SOCIAL_AUTH_KEYCLOAK_KEY=(str, ''), DD_SOCIAL_AUTH_KEYCLOAK_SECRET=(str, ''), DD_SOCIAL_AUTH_KEYCLOAK_PUBLIC_KEY=(str, ''), From 38951349c444d2961c4c75c8eed10dc88ee07e6b Mon Sep 17 00:00:00 2001 From: Lars Meijers Date: Mon, 11 Mar 2024 17:36:36 +0100 Subject: [PATCH 32/42] remove keycloak dependency instead try and use OICD --- dojo/settings/settings.dist.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py index 62238a27e3f..d9c9bd5a75a 100644 --- a/dojo/settings/settings.dist.py +++ b/dojo/settings/settings.dist.py @@ -534,7 +534,7 @@ def generate_url(scheme, double_slashes, user, password, host, port, path, param 'dojo.okta.OktaOAuth2', 'social_core.backends.azuread_tenant.AzureADTenantOAuth2', 'social_core.backends.gitlab.GitLabOAuth2', - 'social_core.backends.keycloak.KeycloakOAuth2', + # 'social_core.backends.keycloak.KeycloakOAuth2', 'social_core.backends.keycloak.OpenIdConnectAuth', 'social_core.backends.github_enterprise.GithubEnterpriseOAuth2', 'dojo.remote_user.RemoteUserBackend', From fb7ee48c4a634aa7a2e1a08484dc97e3d672023b Mon Sep 17 00:00:00 2001 From: Lars Meijers Date: Mon, 11 Mar 2024 17:54:05 +0100 Subject: [PATCH 33/42] oops completely wrong class name --- dojo/settings/settings.dist.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py index d9c9bd5a75a..c9ff5abdbbc 100644 --- a/dojo/settings/settings.dist.py +++ b/dojo/settings/settings.dist.py @@ -535,7 +535,7 @@ def generate_url(scheme, double_slashes, user, password, host, port, path, param 'social_core.backends.azuread_tenant.AzureADTenantOAuth2', 'social_core.backends.gitlab.GitLabOAuth2', # 'social_core.backends.keycloak.KeycloakOAuth2', - 'social_core.backends.keycloak.OpenIdConnectAuth', + 'social_core.backends.open_id_connect.OpenIdConnectAuth', 'social_core.backends.github_enterprise.GithubEnterpriseOAuth2', 'dojo.remote_user.RemoteUserBackend', 'django.contrib.auth.backends.RemoteUserBackend', From 195e10c3305f1a518d6cf51d8b0383b43021ae4d Mon Sep 17 00:00:00 2001 From: Lars Meijers Date: Thu, 14 Mar 2024 17:40:52 +0100 Subject: [PATCH 34/42] set oidc instead of keycloak for redirect on view --- dojo/user/views.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dojo/user/views.py b/dojo/user/views.py index f021aa00460..3bd7e41cb6e 100644 --- a/dojo/user/views.py +++ b/dojo/user/views.py @@ -127,7 +127,7 @@ def login_view(request): elif settings.GITLAB_OAUTH2_ENABLED: social_auth = 'gitlab' elif settings.KEYCLOAK_OAUTH2_ENABLED: - social_auth = 'keycloak' + social_auth = 'oidc' elif settings.AUTH0_OAUTH2_ENABLED: social_auth = 'auth0' elif settings.GITHUB_ENTERPRISE_OAUTH2_ENABLED: From 178817f4eebbaf7887606dadedb8a1c262d22c92 Mon Sep 17 00:00:00 2001 From: Lars Meijers Date: Thu, 14 Mar 2024 17:41:08 +0100 Subject: [PATCH 35/42] experiment to see what is in the token and response --- dojo/pipeline.py | 8 ++++++++ dojo/settings/settings.dist.py | 1 + 2 files changed, 9 insertions(+) diff --git a/dojo/pipeline.py b/dojo/pipeline.py index 0ce76220e98..5756e7777d5 100644 --- a/dojo/pipeline.py +++ b/dojo/pipeline.py @@ -65,6 +65,14 @@ def modify_permissions(backend, uid, user=None, social=None, *args, **kwargs): pass +def update_keycloak_groups(backend, uid, user=None, social=None, *args, **kwargs): + if settings.KEYCLOAK_OAUTH2_ENABLED: + soc = user.social_auth.order_by("-created").first() + token = soc.extra_data['access_token'] + print("accesstoken: " + str(token)) + print("response raw: " + str(kwargs['response'])) + + def update_azure_groups(backend, uid, user=None, social=None, *args, **kwargs): if settings.AZUREAD_TENANT_OAUTH2_ENABLED and settings.AZUREAD_TENANT_OAUTH2_GET_GROUPS and isinstance(backend, AzureADTenantOAuth2): # In some wild cases, there could be two social auth users diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py index c9ff5abdbbc..9574e5a3c6a 100644 --- a/dojo/settings/settings.dist.py +++ b/dojo/settings/settings.dist.py @@ -572,6 +572,7 @@ def generate_url(scheme, double_slashes, user, password, host, port, path, param 'social_core.pipeline.social_auth.load_extra_data', 'social_core.pipeline.user.user_details', 'dojo.pipeline.update_azure_groups', + 'dojo.pipeline.update_keycloak_groups', 'dojo.pipeline.update_product_access', ) From 172642ed191872173986960fc62c783290e063fb Mon Sep 17 00:00:00 2001 From: Lars Meijers Date: Fri, 15 Mar 2024 08:42:49 +0100 Subject: [PATCH 36/42] fix button link --- dojo/templates/dojo/login.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dojo/templates/dojo/login.html b/dojo/templates/dojo/login.html index 55da0d7f7d5..42ac84368a7 100644 --- a/dojo/templates/dojo/login.html +++ b/dojo/templates/dojo/login.html @@ -88,7 +88,7 @@

{% trans "Login" %}

{% if KEYCLOAK_ENABLED is True %} {% endif %} From b647ba124b0eb9ff8c821dc47394163928475c00 Mon Sep 17 00:00:00 2001 From: Lars Meijers Date: Fri, 15 Mar 2024 09:11:15 +0100 Subject: [PATCH 37/42] lets sync some groups --- dojo/pipeline.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/dojo/pipeline.py b/dojo/pipeline.py index 5756e7777d5..7ad8bf3ca4f 100644 --- a/dojo/pipeline.py +++ b/dojo/pipeline.py @@ -66,12 +66,19 @@ def modify_permissions(backend, uid, user=None, social=None, *args, **kwargs): def update_keycloak_groups(backend, uid, user=None, social=None, *args, **kwargs): - if settings.KEYCLOAK_OAUTH2_ENABLED: + if settings.KEYCLOAK_OAUTH2_ENABLED: #need another setting to enable syncing soc = user.social_auth.order_by("-created").first() token = soc.extra_data['access_token'] print("accesstoken: " + str(token)) print("response raw: " + str(kwargs['response'])) - + if 'groups' not in kwargs['response'] or kwargs['response']['groups'] == "": + logger.warning("No groups in response. Stopping to update groups of user based on azureAD") + return + group_IDs = kwargs['response']['groups'] # probably need another setting with a regex ? + if len(group_IDs) > 0: + assign_user_to_groups(user, group_IDs, 'Keycloak') + if settings.AZUREAD_TENANT_OAUTH2_CLEANUP_GROUPS: + cleanup_old_groups_for_user(user, group_IDs) def update_azure_groups(backend, uid, user=None, social=None, *args, **kwargs): if settings.AZUREAD_TENANT_OAUTH2_ENABLED and settings.AZUREAD_TENANT_OAUTH2_GET_GROUPS and isinstance(backend, AzureADTenantOAuth2): From b2bd939919d7ef1cb4fcc78102ccf25448639115 Mon Sep 17 00:00:00 2001 From: Lars Meijers Date: Fri, 15 Mar 2024 09:46:56 +0100 Subject: [PATCH 38/42] added keycloak as social choise group for import --- dojo/models.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/dojo/models.py b/dojo/models.py index 2440ddd9d97..5d6c3997f88 100755 --- a/dojo/models.py +++ b/dojo/models.py @@ -249,8 +249,10 @@ class UserContactInfo(models.Model): class Dojo_Group(models.Model): AZURE = 'AzureAD' + KEYCLOAK = 'Keycloak' SOCIAL_CHOICES = ( (AZURE, _('AzureAD')), + (KEYCLOAK, _('Keycloak')), ) name = models.CharField(max_length=255, unique=True) description = models.CharField(max_length=4000, null=True, blank=True) From 4ca38574cb7b63e0c1c2d14c6f3d57416efedb2a Mon Sep 17 00:00:00 2001 From: Lars Meijers Date: Fri, 15 Mar 2024 10:54:13 +0100 Subject: [PATCH 39/42] add database change for keycloak social provider --- .../0202_alter_dojo_group_social_provider.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 dojo/db_migrations/0202_alter_dojo_group_social_provider.py diff --git a/dojo/db_migrations/0202_alter_dojo_group_social_provider.py b/dojo/db_migrations/0202_alter_dojo_group_social_provider.py new file mode 100644 index 00000000000..0d6930ddfce --- /dev/null +++ b/dojo/db_migrations/0202_alter_dojo_group_social_provider.py @@ -0,0 +1,18 @@ +# Generated by Django 4.1.13 on 2024-03-15 09:31 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('dojo', '0201_populate_finding_sla_expiration_date'), + ] + + operations = [ + migrations.AlterField( + model_name='dojo_group', + name='social_provider', + field=models.CharField(blank=True, choices=[('AzureAD', 'AzureAD'), ('Keycloak', 'Keycloak')], help_text='Group imported from a social provider.', max_length=10, null=True, verbose_name='Social Authentication Provider'), + ), + ] From 49412f10d70dccd0b923d194cc53ea4691186a64 Mon Sep 17 00:00:00 2001 From: Lars Meijers Date: Fri, 15 Mar 2024 14:02:13 +0100 Subject: [PATCH 40/42] added filter and keycloak specific booleans --- dojo/context_processors.py | 3 +++ dojo/group/utils.py | 2 +- dojo/pipeline.py | 24 ++++++++++++++++-------- dojo/settings/settings.dist.py | 7 +++++++ 4 files changed, 27 insertions(+), 9 deletions(-) diff --git a/dojo/context_processors.py b/dojo/context_processors.py index c0bbb250469..9da80ff0b8e 100644 --- a/dojo/context_processors.py +++ b/dojo/context_processors.py @@ -17,6 +17,9 @@ def globalize_vars(request): "AZUREAD_TENANT_OAUTH2_GET_GROUPS": settings.AZUREAD_TENANT_OAUTH2_GET_GROUPS, "AZUREAD_TENANT_OAUTH2_GROUPS_FILTER": settings.AZUREAD_TENANT_OAUTH2_GROUPS_FILTER, "AZUREAD_TENANT_OAUTH2_CLEANUP_GROUPS": settings.AZUREAD_TENANT_OAUTH2_CLEANUP_GROUPS, + "KEYCLOAK_TENANT_OAUTH2_GET_GROUPS": settings.AZUREAD_TENANT_OAUTH2_GET_GROUPS, + "KEYCLOAK_TENANT_OAUTH2_GROUPS_FILTER": settings.AZUREAD_TENANT_OAUTH2_GROUPS_FILTER, + "KEYCLOAK_TENANT_OAUTH2_CLEANUP_GROUPS": settings.AZUREAD_TENANT_OAUTH2_CLEANUP_GROUPS, "KEYCLOAK_ENABLED": settings.KEYCLOAK_OAUTH2_ENABLED, "SOCIAL_AUTH_KEYCLOAK_LOGIN_BUTTON_TEXT": settings.SOCIAL_AUTH_KEYCLOAK_LOGIN_BUTTON_TEXT, "GITHUB_ENTERPRISE_ENABLED": settings.GITHUB_ENTERPRISE_OAUTH2_ENABLED, diff --git a/dojo/group/utils.py b/dojo/group/utils.py index 2ddf5e57a2d..e0167bc0192 100644 --- a/dojo/group/utils.py +++ b/dojo/group/utils.py @@ -41,7 +41,7 @@ def group_post_save_handler(sender, **kwargs): group.save() user = get_current_user() - if user and not settings.AZUREAD_TENANT_OAUTH2_GET_GROUPS: + if user and not settings.AZUREAD_TENANT_OAUTH2_GET_GROUPS and not settings.KEYCLOAK_TENANT_OAUTH2_GET_GROUPS: # Add the current user as the owner of the group member = Dojo_Group_Member() member.user = user diff --git a/dojo/pipeline.py b/dojo/pipeline.py index 7ad8bf3ca4f..2868e587148 100644 --- a/dojo/pipeline.py +++ b/dojo/pipeline.py @@ -7,6 +7,7 @@ from django.conf import settings from dojo.models import Product, Product_Member, Product_Type, Role, Dojo_Group, Dojo_Group_Member from social_core.backends.azuread_tenant import AzureADTenantOAuth2 +from social_core.backends.open_id_connect import OpenIdConnectAuth from social_core.backends.google import GoogleOAuth2 from dojo.authorization.roles_permissions import Permissions, Roles from dojo.product.queries import get_authorized_products @@ -66,19 +67,26 @@ def modify_permissions(backend, uid, user=None, social=None, *args, **kwargs): def update_keycloak_groups(backend, uid, user=None, social=None, *args, **kwargs): - if settings.KEYCLOAK_OAUTH2_ENABLED: #need another setting to enable syncing + if settings.KEYCLOAK_OAUTH2_ENABLED and settings.KEYCLOAK_TENANT_OAUTH2_GET_GROUPS and isinstance(backend, OpenIdConnectAuth): soc = user.social_auth.order_by("-created").first() token = soc.extra_data['access_token'] - print("accesstoken: " + str(token)) - print("response raw: " + str(kwargs['response'])) + #print("accesstoken: " + str(token)) + #print("response raw: " + str(kwargs['response'])) + group_names = [] if 'groups' not in kwargs['response'] or kwargs['response']['groups'] == "": logger.warning("No groups in response. Stopping to update groups of user based on azureAD") return - group_IDs = kwargs['response']['groups'] # probably need another setting with a regex ? - if len(group_IDs) > 0: - assign_user_to_groups(user, group_IDs, 'Keycloak') - if settings.AZUREAD_TENANT_OAUTH2_CLEANUP_GROUPS: - cleanup_old_groups_for_user(user, group_IDs) + group_ids = kwargs['response']['groups'] # probably need another setting with a regex ? + for group_from_response in group_ids: + if settings.KEYCLOAK_TENANT_OAUTH2_GROUPS_FILTER == "" or re.search(settings.KEYCLOAK_TENANT_OAUTH2_GROUPS_FILTER, group_from_response): + group_names.append(group_from_response) + else: + logger.debug("Skipping group " + group_from_response + " due to KEYCLOAK_TENANT_OAUTH2_GROUPS_FILTER " + settings.KEYCLOAK_TENANT_OAUTH2_GROUPS_FILTER) + + if len(group_names) > 0: + assign_user_to_groups(user, group_names, 'Keycloak') + if settings.KEYCLOAK_TENANT_OAUTH2_CLEANUP_GROUPS: + cleanup_old_groups_for_user(user, group_names) def update_azure_groups(backend, uid, user=None, social=None, *args, **kwargs): if settings.AZUREAD_TENANT_OAUTH2_ENABLED and settings.AZUREAD_TENANT_OAUTH2_GET_GROUPS and isinstance(backend, AzureADTenantOAuth2): diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py index 9574e5a3c6a..81518c60f35 100644 --- a/dojo/settings/settings.dist.py +++ b/dojo/settings/settings.dist.py @@ -123,6 +123,9 @@ DD_SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_GET_GROUPS=(bool, False), DD_SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_GROUPS_FILTER=(str, ''), DD_SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_CLEANUP_GROUPS=(bool, True), + DD_SOCIAL_AUTH_KEYCLOAK_OAUTH2_GET_GROUPS=(bool, False), + DD_SOCIAL_AUTH_KEYCLOAK_OAUTH2_CLEANUP_GROUPS=(bool, True), + DD_SOCIAL_AUTH_KEYCLOAK_OAUTH2_GROUPS_FILTER=(str, ''), DD_SOCIAL_AUTH_GITLAB_OAUTH2_ENABLED=(bool, False), DD_SOCIAL_AUTH_GITLAB_PROJECT_AUTO_IMPORT=(bool, False), DD_SOCIAL_AUTH_GITLAB_PROJECT_IMPORT_TAGS=(bool, False), @@ -639,6 +642,10 @@ def generate_url(scheme, double_slashes, user, password, host, port, path, param SOCIAL_AUTH_OIDC_SECRET = env('DD_SOCIAL_AUTH_KEYCLOAK_SECRET') SOCIAL_AUTH_KEYCLOAK_LOGIN_BUTTON_TEXT = env('DD_SOCIAL_AUTH_KEYCLOAK_LOGIN_BUTTON_TEXT') +KEYCLOAK_TENANT_OAUTH2_GET_GROUPS = env('DD_SOCIAL_AUTH_KEYCLOAK_OAUTH2_GET_GROUPS') +KEYCLOAK_TENANT_OAUTH2_CLEANUP_GROUPS = env('DD_SOCIAL_AUTH_KEYCLOAK_OAUTH2_CLEANUP_GROUPS') +KEYCLOAK_TENANT_OAUTH2_GROUPS_FILTER = env('DD_SOCIAL_AUTH_KEYCLOAK_OAUTH2_GROUPS_FILTER') + # SOCIAL_AUTH_KEYCLOAK_PUBLIC_KEY = env('DD_SOCIAL_AUTH_KEYCLOAK_PUBLIC_KEY') # SOCIAL_AUTH_KEYCLOAK_AUTHORIZATION_URL = env('DD_SOCIAL_AUTH_KEYCLOAK_AUTHORIZATION_URL') # SOCIAL_AUTH_KEYCLOAK_ACCESS_TOKEN_URL = env('DD_SOCIAL_AUTH_KEYCLOAK_ACCESS_TOKEN_URL') From 91306aeedb202810296050616d89da127becb6e4 Mon Sep 17 00:00:00 2001 From: Lars Meijers Date: Fri, 15 Mar 2024 14:44:05 +0100 Subject: [PATCH 41/42] cleanup and small improvement --- dojo/group/utils.py | 5 +++-- dojo/pipeline.py | 6 +----- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/dojo/group/utils.py b/dojo/group/utils.py index e0167bc0192..b61e422a0b5 100644 --- a/dojo/group/utils.py +++ b/dojo/group/utils.py @@ -2,7 +2,7 @@ from django.contrib.auth.models import Group from django.db.models.signals import post_save, post_delete from django.dispatch import receiver -from dojo.models import Dojo_Group, Dojo_Group_Member, Role +from dojo.models import Dojo_Group, Dojo_Group_Member, Role, Dojo_User from django.conf import settings @@ -41,7 +41,8 @@ def group_post_save_handler(sender, **kwargs): group.save() user = get_current_user() - if user and not settings.AZUREAD_TENANT_OAUTH2_GET_GROUPS and not settings.KEYCLOAK_TENANT_OAUTH2_GET_GROUPS: + if user and isinstance(user, Dojo_User): + #if user and not settings.AZUREAD_TENANT_OAUTH2_GET_GROUPS and not settings.KEYCLOAK_TENANT_OAUTH2_GET_GROUPS: # Add the current user as the owner of the group member = Dojo_Group_Member() member.user = user diff --git a/dojo/pipeline.py b/dojo/pipeline.py index 2868e587148..fba14699410 100644 --- a/dojo/pipeline.py +++ b/dojo/pipeline.py @@ -68,15 +68,11 @@ def modify_permissions(backend, uid, user=None, social=None, *args, **kwargs): def update_keycloak_groups(backend, uid, user=None, social=None, *args, **kwargs): if settings.KEYCLOAK_OAUTH2_ENABLED and settings.KEYCLOAK_TENANT_OAUTH2_GET_GROUPS and isinstance(backend, OpenIdConnectAuth): - soc = user.social_auth.order_by("-created").first() - token = soc.extra_data['access_token'] - #print("accesstoken: " + str(token)) - #print("response raw: " + str(kwargs['response'])) group_names = [] if 'groups' not in kwargs['response'] or kwargs['response']['groups'] == "": logger.warning("No groups in response. Stopping to update groups of user based on azureAD") return - group_ids = kwargs['response']['groups'] # probably need another setting with a regex ? + group_ids = kwargs['response']['groups'] for group_from_response in group_ids: if settings.KEYCLOAK_TENANT_OAUTH2_GROUPS_FILTER == "" or re.search(settings.KEYCLOAK_TENANT_OAUTH2_GROUPS_FILTER, group_from_response): group_names.append(group_from_response) From 09f7f11ee6a99d4571115bb90038687f56aab14e Mon Sep 17 00:00:00 2001 From: Lars Meijers Date: Mon, 18 Mar 2024 16:48:33 +0100 Subject: [PATCH 42/42] cleanup comment --- dojo/group/utils.py | 1 - 1 file changed, 1 deletion(-) diff --git a/dojo/group/utils.py b/dojo/group/utils.py index b61e422a0b5..07589a5e996 100644 --- a/dojo/group/utils.py +++ b/dojo/group/utils.py @@ -42,7 +42,6 @@ def group_post_save_handler(sender, **kwargs): user = get_current_user() if user and isinstance(user, Dojo_User): - #if user and not settings.AZUREAD_TENANT_OAUTH2_GET_GROUPS and not settings.KEYCLOAK_TENANT_OAUTH2_GET_GROUPS: # Add the current user as the owner of the group member = Dojo_Group_Member() member.user = user