From 6d1ae7000803fe58fe65e3444e18c8f4fe12dc79 Mon Sep 17 00:00:00 2001 From: DefectDojo release bot Date: Mon, 6 Jan 2025 15:12:56 +0000 Subject: [PATCH 01/99] Update versions in application files --- components/package.json | 2 +- docs/content/en/open_source/upgrading/2.43.md | 7 +++++++ dojo/__init__.py | 2 +- helm/defectdojo/Chart.yaml | 4 ++-- 4 files changed, 11 insertions(+), 4 deletions(-) create mode 100644 docs/content/en/open_source/upgrading/2.43.md diff --git a/components/package.json b/components/package.json index a8d871e7356..086741e6f2d 100644 --- a/components/package.json +++ b/components/package.json @@ -1,6 +1,6 @@ { "name": "defectdojo", - "version": "2.42.0", + "version": "2.43.0-dev", "license" : "BSD-3-Clause", "private": true, "dependencies": { diff --git a/docs/content/en/open_source/upgrading/2.43.md b/docs/content/en/open_source/upgrading/2.43.md new file mode 100644 index 00000000000..faa443cfea2 --- /dev/null +++ b/docs/content/en/open_source/upgrading/2.43.md @@ -0,0 +1,7 @@ +--- +title: 'Upgrading to DefectDojo Version 2.43.x' +toc_hide: true +weight: -20250106 +description: No special instructions. +--- +There are no special instructions for upgrading to 2.43.x. Check the [Release Notes](https://github.com/DefectDojo/django-DefectDojo/releases/tag/2.43.0) for the contents of the release. diff --git a/dojo/__init__.py b/dojo/__init__.py index 79dc67f72f9..3a2e4a630a2 100644 --- a/dojo/__init__.py +++ b/dojo/__init__.py @@ -4,6 +4,6 @@ # Django starts so that shared_task will use this app. from .celery import app as celery_app # noqa: F401 -__version__ = "2.42.0" +__version__ = "2.43.0-dev" __url__ = "https://github.com/DefectDojo/django-DefectDojo" __docs__ = "https://documentation.defectdojo.com" diff --git a/helm/defectdojo/Chart.yaml b/helm/defectdojo/Chart.yaml index 3b6c3581684..f64a6e60077 100644 --- a/helm/defectdojo/Chart.yaml +++ b/helm/defectdojo/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 -appVersion: "2.42.0" +appVersion: "2.43.0-dev" description: A Helm chart for Kubernetes to install DefectDojo name: defectdojo -version: 1.6.167 +version: 1.6.168-dev icon: https://www.defectdojo.org/img/favicon.ico maintainers: - name: madchap From 3a7f24bf0482d9da2fa9294e4a73ed9e59910937 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Jan 2025 17:53:59 -0600 Subject: [PATCH 02/99] Bump boto3 from 1.35.91 to 1.35.92 (#11508) Bumps [boto3](https://github.com/boto/boto3) from 1.35.91 to 1.35.92. - [Release notes](https://github.com/boto/boto3/releases) - [Commits](https://github.com/boto/boto3/compare/1.35.91...1.35.92) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index e112ad95afa..f1a898d4601 100644 --- a/requirements.txt +++ b/requirements.txt @@ -69,7 +69,7 @@ django-ratelimit==4.1.0 argon2-cffi==23.1.0 blackduck==1.1.3 pycurl==7.45.4 # Required for Celery Broker AWS (SQS) support -boto3==1.35.91 # Required for Celery Broker AWS (SQS) support +boto3==1.35.92 # Required for Celery Broker AWS (SQS) support netaddr==1.3.0 vulners==2.2.3 fontawesomefree==6.6.0 From 0b3ac89ff0fcb72f5b04689cc9e220678d8bd09a Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 7 Jan 2025 13:42:41 -0600 Subject: [PATCH 03/99] Update dependency @tabler/icons from 3.26.0 to v3.27.1 (docs/package.json) (#11519) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- docs/package-lock.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/package-lock.json b/docs/package-lock.json index 01229f0d6b9..16564bef43e 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -2411,9 +2411,9 @@ } }, "node_modules/@tabler/icons": { - "version": "3.26.0", - "resolved": "https://registry.npmjs.org/@tabler/icons/-/icons-3.26.0.tgz", - "integrity": "sha512-oO3D4ss+DxzxqU1aDy0f1HmToyrO0gcQWIMpzHAfV1quPUx0BZYvNm5xz1DQb4DxNm/+xNvbBGLJy4pzTLYWag==", + "version": "3.27.1", + "resolved": "https://registry.npmjs.org/@tabler/icons/-/icons-3.27.1.tgz", + "integrity": "sha512-Utt80OeExBV/Pi9m9Z7ZRHOHQPapMO8t4mCSMb/n5oiLWCEU2HX+9mGzo772SVxyDZ3YQBgmzIYnGKhL3tUSSQ==", "license": "MIT", "funding": { "type": "github", From 7c3d4c118329194baa1411fb5d3005a6e89fae08 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Jan 2025 13:43:22 -0600 Subject: [PATCH 04/99] Bump boto3 from 1.35.92 to 1.35.93 (#11520) Bumps [boto3](https://github.com/boto/boto3) from 1.35.92 to 1.35.93. - [Release notes](https://github.com/boto/boto3/releases) - [Commits](https://github.com/boto/boto3/compare/1.35.92...1.35.93) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index f1a898d4601..d1e0c25b108 100644 --- a/requirements.txt +++ b/requirements.txt @@ -69,7 +69,7 @@ django-ratelimit==4.1.0 argon2-cffi==23.1.0 blackduck==1.1.3 pycurl==7.45.4 # Required for Celery Broker AWS (SQS) support -boto3==1.35.92 # Required for Celery Broker AWS (SQS) support +boto3==1.35.93 # Required for Celery Broker AWS (SQS) support netaddr==1.3.0 vulners==2.2.3 fontawesomefree==6.6.0 From 7e6ef26e6b7918574ad240f1b59b4dcc87c65d42 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 7 Jan 2025 14:22:38 -0600 Subject: [PATCH 05/99] Update postgres:17.2-alpine Docker digest from 17.2 to 17.2-alpine (docker-compose.yml) (#11521) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-compose.yml b/docker-compose.yml index c0cc991e6ca..7110b61c13c 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -105,7 +105,7 @@ services: source: ./docker/extra_settings target: /app/docker/extra_settings postgres: - image: postgres:17.2-alpine@sha256:d37d2c160d34430877c802e5adc22824a2ad453499db9bab1a2ceb2be6c1a46f + image: postgres:17.2-alpine@sha256:f58b02ec01778a7c590c3dcf869da3f4294d89adc70e6f55d63b0b8c6f78faa1 environment: POSTGRES_DB: ${DD_DATABASE_NAME:-defectdojo} POSTGRES_USER: ${DD_DATABASE_USER:-defectdojo} From 97e38c832fe5c5dcfb93bc206291a643a8110cb9 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 7 Jan 2025 21:32:34 -0600 Subject: [PATCH 06/99] Update eps1lon/actions-label-merge-conflict action from v3.0.2 to v3.0.3 (.github/workflows/detect-merge-conflicts.yaml) (#11510) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- .github/workflows/detect-merge-conflicts.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/detect-merge-conflicts.yaml b/.github/workflows/detect-merge-conflicts.yaml index 934543cec4e..f3bdda58562 100644 --- a/.github/workflows/detect-merge-conflicts.yaml +++ b/.github/workflows/detect-merge-conflicts.yaml @@ -16,7 +16,7 @@ jobs: runs-on: ubuntu-latest steps: - name: check if prs are conflicted - uses: eps1lon/actions-label-merge-conflict@1b1b1fcde06a9b3d089f3464c96417961dde1168 # v3.0.2 + uses: eps1lon/actions-label-merge-conflict@1df065ebe6e3310545d4f4c4e862e43bdca146f0 # v3.0.3 with: dirtyLabel: "conflicts-detected" repoToken: "${{ secrets.GITHUB_TOKEN }}" From a0a41c509db2f203ee9978b14dcf2f2eacd16050 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 10 Jan 2025 16:22:21 -0600 Subject: [PATCH 07/99] Bump boto3 from 1.35.93 to 1.35.96 (#11538) Bumps [boto3](https://github.com/boto/boto3) from 1.35.93 to 1.35.96. - [Release notes](https://github.com/boto/boto3/releases) - [Commits](https://github.com/boto/boto3/compare/1.35.93...1.35.96) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index d1e0c25b108..c02638a4df9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -69,7 +69,7 @@ django-ratelimit==4.1.0 argon2-cffi==23.1.0 blackduck==1.1.3 pycurl==7.45.4 # Required for Celery Broker AWS (SQS) support -boto3==1.35.93 # Required for Celery Broker AWS (SQS) support +boto3==1.35.96 # Required for Celery Broker AWS (SQS) support netaddr==1.3.0 vulners==2.2.3 fontawesomefree==6.6.0 From 7076fcb67a0d095f598ec12d4a5deb70f2b53041 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 10 Jan 2025 16:22:47 -0600 Subject: [PATCH 08/99] Bump sqlalchemy from 2.0.36 to 2.0.37 (#11537) Bumps [sqlalchemy](https://github.com/sqlalchemy/sqlalchemy) from 2.0.36 to 2.0.37. - [Release notes](https://github.com/sqlalchemy/sqlalchemy/releases) - [Changelog](https://github.com/sqlalchemy/sqlalchemy/blob/main/CHANGES.rst) - [Commits](https://github.com/sqlalchemy/sqlalchemy/commits) --- updated-dependencies: - dependency-name: sqlalchemy dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index c02638a4df9..550ffb39bd9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -37,7 +37,7 @@ python-dateutil==2.9.0.post0 pytz==2024.2 redis==5.2.1 requests==2.32.3 -sqlalchemy==2.0.36 # Required by Celery broker transport +sqlalchemy==2.0.37 # Required by Celery broker transport urllib3==1.26.18 uWSGI==2.0.28 vobject==0.9.9 From d91983113f3747aa3219f0acbb971b945bc7e0ed Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 10 Jan 2025 16:23:16 -0600 Subject: [PATCH 09/99] Update nginx/nginx-prometheus-exporter Docker tag from 1.4.0 to v1.4.1 (helm/defectdojo/values.yaml) (#11534) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- helm/defectdojo/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helm/defectdojo/values.yaml b/helm/defectdojo/values.yaml index 0deb30aaff4..bfab904f663 100644 --- a/helm/defectdojo/values.yaml +++ b/helm/defectdojo/values.yaml @@ -125,7 +125,7 @@ monitoring: # Add the nginx prometheus exporter sidecar prometheus: enabled: false - image: nginx/nginx-prometheus-exporter:1.4.0 + image: nginx/nginx-prometheus-exporter:1.4.1 imagePullPolicy: IfNotPresent annotations: {} From 9a8dd4ae3f35cf528ec5731e80a5b0f7bc21bff4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 10 Jan 2025 16:23:44 -0600 Subject: [PATCH 10/99] Bump python-gitlab from 5.3.0 to 5.3.1 (#11530) Bumps [python-gitlab](https://github.com/python-gitlab/python-gitlab) from 5.3.0 to 5.3.1. - [Release notes](https://github.com/python-gitlab/python-gitlab/releases) - [Changelog](https://github.com/python-gitlab/python-gitlab/blob/main/CHANGELOG.md) - [Commits](https://github.com/python-gitlab/python-gitlab/compare/v5.3.0...v5.3.1) --- updated-dependencies: - dependency-name: python-gitlab dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 550ffb39bd9..cc5d9264e73 100644 --- a/requirements.txt +++ b/requirements.txt @@ -46,7 +46,7 @@ titlecase==2.4.1 social-auth-app-django==5.4.2 social-auth-core==4.5.4 gitpython==3.1.43 -python-gitlab==5.3.0 +python-gitlab==5.3.1 cpe==1.3.1 packageurl-python==0.16.0 django-crum==0.7.9 From a182aab551c6fc4ffe1783a6182530f782dd87d6 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 10 Jan 2025 16:24:41 -0600 Subject: [PATCH 11/99] Update docker/build-push-action action from v6.10.0 to v6.11.0 (.github/workflows/release-x-manual-docker-containers.yml) (#11529) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- .github/workflows/build-docker-images-for-testing.yml | 2 +- .github/workflows/release-x-manual-docker-containers.yml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build-docker-images-for-testing.yml b/.github/workflows/build-docker-images-for-testing.yml index 245b340ba36..c3fa84f3845 100644 --- a/.github/workflows/build-docker-images-for-testing.yml +++ b/.github/workflows/build-docker-images-for-testing.yml @@ -35,7 +35,7 @@ jobs: - name: Build id: docker_build - uses: docker/build-push-action@48aba3b46d1b1fec4febb7c5d0c644b249a11355 # v6.10.0 + uses: docker/build-push-action@b32b51a8eda65d6793cd0494a773d4f6bcef32dc # v6.11.0 timeout-minutes: 10 env: DOCKER_BUILD_CHECKS_ANNOTATIONS: false diff --git a/.github/workflows/release-x-manual-docker-containers.yml b/.github/workflows/release-x-manual-docker-containers.yml index bf0061c7e6e..adf75cf544b 100644 --- a/.github/workflows/release-x-manual-docker-containers.yml +++ b/.github/workflows/release-x-manual-docker-containers.yml @@ -51,7 +51,7 @@ jobs: - name: Build and push images with debian if: ${{ matrix.os == 'debian' }} - uses: docker/build-push-action@48aba3b46d1b1fec4febb7c5d0c644b249a11355 # v6.10.0 + uses: docker/build-push-action@b32b51a8eda65d6793cd0494a773d4f6bcef32dc # v6.11.0 env: DOCKER_BUILD_CHECKS_ANNOTATIONS: false REPO_ORG: ${{ env.repoorg }} @@ -64,7 +64,7 @@ jobs: - name: Build and push images with alpine if: ${{ matrix.os == 'alpine' }} - uses: docker/build-push-action@48aba3b46d1b1fec4febb7c5d0c644b249a11355 # v6.10.0 + uses: docker/build-push-action@b32b51a8eda65d6793cd0494a773d4f6bcef32dc # v6.11.0 env: DOCKER_BUILD_CHECKS_ANNOTATIONS: false REPO_ORG: ${{ env.repoorg }} From ab19ed41db2b4862bba9fee3298de699aaeb21ee Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 10 Jan 2025 16:25:10 -0600 Subject: [PATCH 12/99] Update dependency @tabler/icons from 3.27.1 to v3.28.1 (docs/package.json) (#11527) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- docs/package-lock.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/package-lock.json b/docs/package-lock.json index 16564bef43e..12994fca578 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -2411,9 +2411,9 @@ } }, "node_modules/@tabler/icons": { - "version": "3.27.1", - "resolved": "https://registry.npmjs.org/@tabler/icons/-/icons-3.27.1.tgz", - "integrity": "sha512-Utt80OeExBV/Pi9m9Z7ZRHOHQPapMO8t4mCSMb/n5oiLWCEU2HX+9mGzo772SVxyDZ3YQBgmzIYnGKhL3tUSSQ==", + "version": "3.28.1", + "resolved": "https://registry.npmjs.org/@tabler/icons/-/icons-3.28.1.tgz", + "integrity": "sha512-h7nqKEvFooLtFxhMOC1/2eiV+KRXhBUuDUUJrJlt6Ft6tuMw2eU/9GLQgrTk41DNmIEzp/LI83K9J9UUU8YBYQ==", "license": "MIT", "funding": { "type": "github", From 2e073e40fb02cd515254c27a59fe34b8527f28b9 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 10 Jan 2025 16:25:31 -0600 Subject: [PATCH 13/99] Update postgres:17.2-alpine Docker digest from 17.2 to 17.2-alpine (docker-compose.yml) (#11526) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-compose.yml b/docker-compose.yml index 7110b61c13c..3043ed27852 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -105,7 +105,7 @@ services: source: ./docker/extra_settings target: /app/docker/extra_settings postgres: - image: postgres:17.2-alpine@sha256:f58b02ec01778a7c590c3dcf869da3f4294d89adc70e6f55d63b0b8c6f78faa1 + image: postgres:17.2-alpine@sha256:0bcc5bbbb2aa9c9b4c6505845918c7eb55d783cf5c1f434fac33012579fb149d environment: POSTGRES_DB: ${DD_DATABASE_NAME:-defectdojo} POSTGRES_USER: ${DD_DATABASE_USER:-defectdojo} From f32f6190b6b61dac9d5428111f90c4e1e9568327 Mon Sep 17 00:00:00 2001 From: DefectDojo release bot Date: Mon, 13 Jan 2025 15:46:46 +0000 Subject: [PATCH 14/99] Update versions in application files --- components/package.json | 2 +- dojo/__init__.py | 2 +- helm/defectdojo/Chart.yaml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/components/package.json b/components/package.json index 340fcde82b0..086741e6f2d 100644 --- a/components/package.json +++ b/components/package.json @@ -1,6 +1,6 @@ { "name": "defectdojo", - "version": "2.42.1", + "version": "2.43.0-dev", "license" : "BSD-3-Clause", "private": true, "dependencies": { diff --git a/dojo/__init__.py b/dojo/__init__.py index c5a06c6f17c..3a2e4a630a2 100644 --- a/dojo/__init__.py +++ b/dojo/__init__.py @@ -4,6 +4,6 @@ # Django starts so that shared_task will use this app. from .celery import app as celery_app # noqa: F401 -__version__ = "2.42.1" +__version__ = "2.43.0-dev" __url__ = "https://github.com/DefectDojo/django-DefectDojo" __docs__ = "https://documentation.defectdojo.com" diff --git a/helm/defectdojo/Chart.yaml b/helm/defectdojo/Chart.yaml index 5f5ce6805e6..682a3d78bcc 100644 --- a/helm/defectdojo/Chart.yaml +++ b/helm/defectdojo/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 -appVersion: "2.42.1" +appVersion: "2.43.0-dev" description: A Helm chart for Kubernetes to install DefectDojo name: defectdojo -version: 1.6.168 +version: 1.6.169-dev icon: https://www.defectdojo.org/img/favicon.ico maintainers: - name: madchap From bd9bc46b6d898ffeac3fd70a782413bdd1e622f0 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 12:18:13 -0600 Subject: [PATCH 15/99] Update stefanzweifel/git-auto-commit-action action from v5.0.1 to v5.1.0 (.github/workflows/release-3-master-into-dev.yml) (#11550) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- .github/workflows/plantuml.yml | 2 +- .github/workflows/release-1-create-pr.yml | 2 +- .github/workflows/release-3-master-into-dev.yml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/plantuml.yml b/.github/workflows/plantuml.yml index 6beb590899b..8b48a694cab 100644 --- a/.github/workflows/plantuml.yml +++ b/.github/workflows/plantuml.yml @@ -33,7 +33,7 @@ jobs: with: args: -v -tpng ${{ steps.getfile.outputs.files }} - name: Push Local Changes - uses: stefanzweifel/git-auto-commit-action@8621497c8c39c72f3e2a999a26b4ca1b5058a842 # v5.0.1 + uses: stefanzweifel/git-auto-commit-action@e348103e9026cc0eee72ae06630dbe30c8bf7a79 # v5.1.0 with: commit_user_name: "PlantUML_bot" commit_user_email: "noreply@defectdojo.org" diff --git a/.github/workflows/release-1-create-pr.yml b/.github/workflows/release-1-create-pr.yml index 5b65c02ec93..e21cbcb55a2 100644 --- a/.github/workflows/release-1-create-pr.yml +++ b/.github/workflows/release-1-create-pr.yml @@ -75,7 +75,7 @@ jobs: grep -H version helm/defectdojo/Chart.yaml - name: Push version changes - uses: stefanzweifel/git-auto-commit-action@8621497c8c39c72f3e2a999a26b4ca1b5058a842 # v5.0.1 + uses: stefanzweifel/git-auto-commit-action@e348103e9026cc0eee72ae06630dbe30c8bf7a79 # v5.1.0 with: commit_user_name: "${{ env.GIT_USERNAME }}" commit_user_email: "${{ env.GIT_EMAIL }}" diff --git a/.github/workflows/release-3-master-into-dev.yml b/.github/workflows/release-3-master-into-dev.yml index ede4cf33d37..337a61fe310 100644 --- a/.github/workflows/release-3-master-into-dev.yml +++ b/.github/workflows/release-3-master-into-dev.yml @@ -73,7 +73,7 @@ jobs: if: endsWith(github.event.inputs.release_number_new, '.0') && endsWith(github.event.inputs.release_number_dev, '.0-dev') - name: Push version changes - uses: stefanzweifel/git-auto-commit-action@8621497c8c39c72f3e2a999a26b4ca1b5058a842 # v5.0.1 + uses: stefanzweifel/git-auto-commit-action@e348103e9026cc0eee72ae06630dbe30c8bf7a79 # v5.1.0 with: commit_user_name: "${{ env.GIT_USERNAME }}" commit_user_email: "${{ env.GIT_EMAIL }}" @@ -139,7 +139,7 @@ jobs: grep version components/package.json - name: Push version changes - uses: stefanzweifel/git-auto-commit-action@8621497c8c39c72f3e2a999a26b4ca1b5058a842 # v5.0.1 + uses: stefanzweifel/git-auto-commit-action@e348103e9026cc0eee72ae06630dbe30c8bf7a79 # v5.1.0 with: commit_user_name: "${{ env.GIT_USERNAME }}" commit_user_email: "${{ env.GIT_EMAIL }}" From 5635daa7ab20a26a29395f873e7aad8f3914dc83 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 14:56:44 -0600 Subject: [PATCH 16/99] Update softprops/action-gh-release action from v2.0.9 to v2.2.1 (.github/workflows/release-x-manual-helm-chart.yml) (#11515) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- .github/workflows/release-x-manual-helm-chart.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-x-manual-helm-chart.yml b/.github/workflows/release-x-manual-helm-chart.yml index b2df7a45ce5..b435c6a45cd 100644 --- a/.github/workflows/release-x-manual-helm-chart.yml +++ b/.github/workflows/release-x-manual-helm-chart.yml @@ -73,7 +73,7 @@ jobs: echo "chart_version=$(ls build | cut -d '-' -f 2 | sed 's|\.tgz||')" >> $GITHUB_ENV - name: Create release ${{ github.event.inputs.release_number }} - uses: softprops/action-gh-release@e7a8f85e1c67a31e6ed99a94b41bd0b71bbee6b8 # v2.0.9 + uses: softprops/action-gh-release@c95fe1489396fe8a9eb87c0abf8aa5b2ef267fda # v2.2.1 with: name: '${{ github.event.inputs.release_number }} 🌈' tag_name: ${{ github.event.inputs.release_number }} From 0e2c82885d605174557795220c2083cfdd878b66 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 14:57:34 -0600 Subject: [PATCH 17/99] Update actions/upload-artifact action from v4.5.0 to v4.6.0 (.github/workflows/fetch-oas.yml) (#11547) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- .github/workflows/build-docker-images-for-testing.yml | 2 +- .github/workflows/fetch-oas.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-docker-images-for-testing.yml b/.github/workflows/build-docker-images-for-testing.yml index c3fa84f3845..2d7032546fc 100644 --- a/.github/workflows/build-docker-images-for-testing.yml +++ b/.github/workflows/build-docker-images-for-testing.yml @@ -49,7 +49,7 @@ jobs: # export docker images to be used in next jobs below - name: Upload image ${{ matrix.docker-image }} as artifact timeout-minutes: 10 - uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b # v4.5.0 + uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 with: name: built-docker-image-${{ matrix.docker-image }}-${{ matrix.os }} path: ${{ matrix.docker-image }}-${{ matrix.os }}_img diff --git a/.github/workflows/fetch-oas.yml b/.github/workflows/fetch-oas.yml index cc5c499f22b..8f2b5514436 100644 --- a/.github/workflows/fetch-oas.yml +++ b/.github/workflows/fetch-oas.yml @@ -51,7 +51,7 @@ jobs: run: docker compose down - name: Upload oas.${{ matrix.file-type }} as artifact - uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b # v4.5.0 + uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 with: name: oas-${{ matrix.file-type }} path: oas.${{ matrix.file-type }} From 3dacf4d7f0c50956cfa5406d8d14630af8739e32 Mon Sep 17 00:00:00 2001 From: "J. Q." <55899496+jawadqur@users.noreply.github.com> Date: Tue, 14 Jan 2025 16:44:18 -0600 Subject: [PATCH 18/99] Make django service type configurable (#10660) * Add service type in values for django * Add service type via values * Update helm/defectdojo/templates/django-service.yaml Co-authored-by: Charles Neill <1749665+cneill@users.noreply.github.com> --------- Co-authored-by: Charles Neill <1749665+cneill@users.noreply.github.com> --- helm/defectdojo/templates/django-service.yaml | 7 +++---- helm/defectdojo/values.yaml | 1 + 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/helm/defectdojo/templates/django-service.yaml b/helm/defectdojo/templates/django-service.yaml index 3823886bbd2..f8c20aa092f 100644 --- a/helm/defectdojo/templates/django-service.yaml +++ b/helm/defectdojo/templates/django-service.yaml @@ -38,9 +38,8 @@ spec: port: 9113 targetPort: http-metrics {{- end }} -{{- if .Values.django.serviceType }} - type: {{ .Values.django.serviceType }} -{{- end }} -{{- if .Values.gke.useGKEIngress }} +{{- if .Values.django.service.type }} + type: {{ .Values.django.service.type }} +{{- else if .Values.gke.useGKEIngress }} type: NodePort {{- end }} diff --git a/helm/defectdojo/values.yaml b/helm/defectdojo/values.yaml index bfab904f663..3fe30de95ef 100644 --- a/helm/defectdojo/values.yaml +++ b/helm/defectdojo/values.yaml @@ -209,6 +209,7 @@ django: annotations: {} service: annotations: {} + type: "" affinity: {} ingress: enabled: true From 34bdf0b598783e6d507683e22de41a6c5a05613b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 14 Jan 2025 17:19:05 -0600 Subject: [PATCH 19/99] Bump nginx from `4152318` to `814a8e8` (#11556) Bumps nginx from `4152318` to `814a8e8`. --- updated-dependencies: - dependency-name: nginx dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile.nginx-alpine | 2 +- Dockerfile.nginx-debian | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile.nginx-alpine b/Dockerfile.nginx-alpine index 9c266b9734a..96f32eaa3bf 100644 --- a/Dockerfile.nginx-alpine +++ b/Dockerfile.nginx-alpine @@ -140,7 +140,7 @@ COPY manage.py ./ COPY dojo/ ./dojo/ RUN env DD_SECRET_KEY='.' python3 manage.py collectstatic --noinput && true -FROM nginx:1.27.3-alpine@sha256:41523187cf7d7a2f2677a80609d9caa14388bf5c1fbca9c410ba3de602aaaab4 +FROM nginx:1.27.3-alpine@sha256:814a8e88df978ade80e584cc5b333144b9372a8e3c98872d07137dbf3b44d0e4 ARG uid=1001 ARG appuser=defectdojo COPY --from=collectstatic /app/static/ /usr/share/nginx/html/static/ diff --git a/Dockerfile.nginx-debian b/Dockerfile.nginx-debian index f55d77bfe8f..978e74dfb43 100644 --- a/Dockerfile.nginx-debian +++ b/Dockerfile.nginx-debian @@ -73,7 +73,7 @@ COPY dojo/ ./dojo/ RUN env DD_SECRET_KEY='.' python3 manage.py collectstatic --noinput && true -FROM nginx:1.27.3-alpine@sha256:41523187cf7d7a2f2677a80609d9caa14388bf5c1fbca9c410ba3de602aaaab4 +FROM nginx:1.27.3-alpine@sha256:814a8e88df978ade80e584cc5b333144b9372a8e3c98872d07137dbf3b44d0e4 ARG uid=1001 ARG appuser=defectdojo COPY --from=collectstatic /app/static/ /usr/share/nginx/html/static/ From 742514315dd5244840f5ddc152a4ab6f200527ad Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 14 Jan 2025 17:21:57 -0600 Subject: [PATCH 20/99] Update mikefarah/yq action from v4.44.6 to v4.45.1 (.github/workflows/release-x-manual-helm-chart.yml) (#11551) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- .github/workflows/release-x-manual-helm-chart.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-x-manual-helm-chart.yml b/.github/workflows/release-x-manual-helm-chart.yml index b435c6a45cd..1a969e788f5 100644 --- a/.github/workflows/release-x-manual-helm-chart.yml +++ b/.github/workflows/release-x-manual-helm-chart.yml @@ -56,7 +56,7 @@ jobs: helm dependency update ./helm/defectdojo - name: Add yq - uses: mikefarah/yq@4839dbbf80445070a31c7a9c1055da527db2d5ee # v4.44.6 + uses: mikefarah/yq@8bf425b4d1344db7cd469a8d10a390876e0c77fd # v4.45.1 - name: Pin version docker version id: pin_image From b1fa85ca6e68a2da9c4a0524f87eec9bb2964d83 Mon Sep 17 00:00:00 2001 From: kiblik <5609770+kiblik@users.noreply.github.com> Date: Wed, 15 Jan 2025 04:45:58 +0100 Subject: [PATCH 21/99] Ruff: Fix RUF046 (#11492) --- dojo/product/views.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dojo/product/views.py b/dojo/product/views.py index 654169363dc..f6c5af6c418 100644 --- a/dojo/product/views.py +++ b/dojo/product/views.py @@ -546,7 +546,7 @@ def view_product_metrics(request, pid): end_date = filters["end_date"] r = relativedelta(end_date, start_date) - weeks_between = int(ceil((((r.years * 12) + r.months) * 4.33) + (r.days / 7))) + weeks_between = ceil((((r.years * 12) + r.months) * 4.33) + (r.days / 7)) if weeks_between <= 0: weeks_between += 2 From a9a3642225db8cd29babff5427fa5d59be28fb47 Mon Sep 17 00:00:00 2001 From: kiblik <5609770+kiblik@users.noreply.github.com> Date: Wed, 15 Jan 2025 04:48:45 +0100 Subject: [PATCH 22/99] Ruff: Fix RUF051 (#11497) --- dojo/forms.py | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/dojo/forms.py b/dojo/forms.py index 334a958e93f..3837b4d3933 100644 --- a/dojo/forms.py +++ b/dojo/forms.py @@ -1951,9 +1951,7 @@ class MetricsFilterForm(forms.Form): # add the ability to exclude the exclude_product_types field def __init__(self, *args, **kwargs): - exclude_product_types = kwargs.get("exclude_product_types", False) - if "exclude_product_types" in kwargs: - del kwargs["exclude_product_types"] + exclude_product_types = kwargs.pop("exclude_product_types", False) super().__init__(*args, **kwargs) if exclude_product_types: del self.fields["exclude_product_types"] @@ -3208,10 +3206,7 @@ def __init__(self, *args, **kwargs): self.helper.form_method = "post" # If true crispy-forms will render a
..
tags - self.helper.form_tag = kwargs.get("form_tag", True) - - if "form_tag" in kwargs: - del kwargs["form_tag"] + self.helper.form_tag = kwargs.pop("form_tag", True) self.engagement_survey = kwargs.get("engagement_survey") @@ -3223,13 +3218,12 @@ def __init__(self, *args, **kwargs): self.helper.form_class = kwargs.get("form_class", "") - self.question = kwargs.get("question") + self.question = kwargs.pop("question", None) if not self.question: msg = "Need a question to render" raise ValueError(msg) - del kwargs["question"] super().__init__(*args, **kwargs) From d959bdd30b12638536fb1d38e6b8822cb86c42ee Mon Sep 17 00:00:00 2001 From: kiblik <5609770+kiblik@users.noreply.github.com> Date: Wed, 15 Jan 2025 04:49:38 +0100 Subject: [PATCH 23/99] Ruff: Fix D403 (#11498) --- dojo/importers/base_importer.py | 2 +- dojo/jira_link/views.py | 4 ++-- dojo/tools/scantist/parser.py | 4 ++-- dojo/tools/utils.py | 2 +- dojo/tools/wiz/parser.py | 2 +- unittests/test_finding_model.py | 10 +++++----- unittests/tools/test_sonarqube_parser.py | 2 +- 7 files changed, 13 insertions(+), 13 deletions(-) diff --git a/dojo/importers/base_importer.py b/dojo/importers/base_importer.py index cab58fd718b..53f55205c83 100644 --- a/dojo/importers/base_importer.py +++ b/dojo/importers/base_importer.py @@ -182,7 +182,7 @@ def parse_dynamic_test_type_findings_from_tests( tests: list[Test], ) -> list[Finding]: """ - currently we only support import one Test + Currently we only support import one Test so for parser that support multiple tests (like SARIF) we aggregate all the findings into one uniq test """ diff --git a/dojo/jira_link/views.py b/dojo/jira_link/views.py index 372b48fbfbe..bb4d4ce3146 100644 --- a/dojo/jira_link/views.py +++ b/dojo/jira_link/views.py @@ -50,7 +50,7 @@ def webhook_responser_handler( @require_POST def webhook(request, secret=None): """ - for examples of incoming json, see the unit tests for the webhook: + For examples of incoming json, see the unit tests for the webhook: https://github.com/DefectDojo/django-DefectDojo/blob/master/unittests/test_jira_webhook.py or the officials docs (which are not always clear): https://developer.atlassian.com/server/jira/platform/webhooks/ @@ -168,7 +168,7 @@ def webhook(request, secret=None): def check_for_and_create_comment(parsed_json): """ - example incoming requests from JIRA Server 8.14.0 + Example incoming requests from JIRA Server 8.14.0 { "timestamp":1610269967824, "webhookEvent":"comment_created", diff --git a/dojo/tools/scantist/parser.py b/dojo/tools/scantist/parser.py index 6b8721d3080..31db1d6e6d5 100644 --- a/dojo/tools/scantist/parser.py +++ b/dojo/tools/scantist/parser.py @@ -34,14 +34,14 @@ def get_findings(self, file, test): def get_items(self, tree, test): """ - tree list: input tree list of all the vulnerability findings + Tree list: input tree list of all the vulnerability findings test: : purpose: parses input rawto extract dojo """ def get_findings(vuln, test): """ - vuln : input vulnerable node + Vuln : input vulnerable node test : """ vulnerability_id = vuln.get("Public ID") diff --git a/dojo/tools/utils.py b/dojo/tools/utils.py index 19f9841ae50..23049469cec 100644 --- a/dojo/tools/utils.py +++ b/dojo/tools/utils.py @@ -6,7 +6,7 @@ def get_npm_cwe(item_node): """ - possible values: + Possible values: "cwe": null "cwe": ["CWE-173", "CWE-200","CWE-601"] (or []) "cwe": "CWE-1234" diff --git a/dojo/tools/wiz/parser.py b/dojo/tools/wiz/parser.py index 219cf715149..ea6ab50fbfe 100644 --- a/dojo/tools/wiz/parser.py +++ b/dojo/tools/wiz/parser.py @@ -156,7 +156,7 @@ def _construct_string_field(self, fields: dict[str, str], row: dict) -> str: def _parse_tags(self, tags: str) -> list[str]: """ - parse the Tag string dict, and convert to a list of strings. + Parse the Tag string dict, and convert to a list of strings. The format of the tags is is "{""key"":""value""}" format """ diff --git a/unittests/test_finding_model.py b/unittests/test_finding_model.py index a156301274a..2e2be817b7e 100644 --- a/unittests/test_finding_model.py +++ b/unittests/test_finding_model.py @@ -332,7 +332,7 @@ def run(self, result=None): def test_sla_expiration_date(self): """ - tests if the SLA expiration date and SLA days remaining are calculated correctly + Tests if the SLA expiration date and SLA days remaining are calculated correctly after a finding's severity is updated """ user, _ = User.objects.get_or_create(username="admin") @@ -357,7 +357,7 @@ def test_sla_expiration_date(self): def test_sla_expiration_date_after_finding_severity_updated(self): """ - tests if the SLA expiration date and SLA days remaining are calculated correctly + Tests if the SLA expiration date and SLA days remaining are calculated correctly after a finding's severity is updated """ user, _ = User.objects.get_or_create(username="admin") @@ -389,7 +389,7 @@ def test_sla_expiration_date_after_finding_severity_updated(self): def test_sla_expiration_date_after_product_updated(self): """ - tests if the SLA expiration date and SLA days remaining are calculated correctly + Tests if the SLA expiration date and SLA days remaining are calculated correctly after a product changed from one SLA configuration to another """ user, _ = User.objects.get_or_create(username="admin") @@ -428,7 +428,7 @@ def test_sla_expiration_date_after_product_updated(self): def test_sla_expiration_date_after_sla_configuration_updated(self): """ - tests if the SLA expiration date and SLA days remaining are calculated correctly + Tests if the SLA expiration date and SLA days remaining are calculated correctly after the SLA configuration on a product was updated to a different number of SLA days """ user, _ = User.objects.get_or_create(username="admin") @@ -461,7 +461,7 @@ def test_sla_expiration_date_after_sla_configuration_updated(self): def test_sla_expiration_date_after_sla_not_enforced(self): """ - tests if the SLA expiration date is none after the after the SLA configuration on a + Tests if the SLA expiration date is none after the after the SLA configuration on a product was updated to not enforce all SLA remediation days """ user, _ = User.objects.get_or_create(username="admin") diff --git a/unittests/tools/test_sonarqube_parser.py b/unittests/tools/test_sonarqube_parser.py index ef4912510b0..0b93fa4bb05 100644 --- a/unittests/tools/test_sonarqube_parser.py +++ b/unittests/tools/test_sonarqube_parser.py @@ -380,7 +380,7 @@ def test_detailed_parse_file_with_vuln_issue_3725(self): def test_detailed_parse_file_table_has_whitespace(self): """ - from version 3.1.1: sonarqube-report has new template with some change. + From version 3.1.1: sonarqube-report has new template with some change. see: https://github.com/soprasteria/sonar-report/commit/7dab559e7ecf9ed319345e9262a8b160bd3af94f Data table will have some whitespaces, parser should strip it before compare or use these properties. """ From 67da86634ee3832c231bdfeb9491fc5f6426a75e Mon Sep 17 00:00:00 2001 From: kiblik <5609770+kiblik@users.noreply.github.com> Date: Wed, 15 Jan 2025 04:55:15 +0100 Subject: [PATCH 24/99] Ruff: Fix RUF056 (#11501) --- dojo/finding/views.py | 2 +- dojo/importers/default_reimporter.py | 2 +- dojo/product/views.py | 6 +++--- dojo/test/views.py | 2 +- unittests/tools/test_tfsec_parser.py | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/dojo/finding/views.py b/dojo/finding/views.py index 625cb090901..13025afc4b3 100644 --- a/dojo/finding/views.py +++ b/dojo/finding/views.py @@ -685,7 +685,7 @@ def get_typed_note_form(self, request: HttpRequest, context: dict): def get_form(self, request: HttpRequest, context: dict): return ( self.get_typed_note_form(request, context) - if context.get("note_type_activation", 0) + if context.get("note_type_activation") else self.get_note_form(request) ) diff --git a/dojo/importers/default_reimporter.py b/dojo/importers/default_reimporter.py index 0c4159ed669..dfe7fe4027e 100644 --- a/dojo/importers/default_reimporter.py +++ b/dojo/importers/default_reimporter.py @@ -737,7 +737,7 @@ def process_results( Determine how to to return the results based on whether the process was ran asynchronous or not """ - if not kwargs.get("sync", False): + if not kwargs.get("sync"): serialized_new_items = [ serialize("json", [finding]) for finding in self.new_items ] diff --git a/dojo/product/views.py b/dojo/product/views.py index f6c5af6c418..58c21853523 100644 --- a/dojo/product/views.py +++ b/dojo/product/views.py @@ -598,7 +598,7 @@ def view_product_metrics(request, pid): unix_timestamp = (tcalendar.timegm(date.timetuple()) * 1000) # Open findings - if open_findings_dict.get(finding.get("id", None), None): + if open_findings_dict.get(finding.get("id", None)): if unix_timestamp not in critical_weekly: critical_weekly[unix_timestamp] = {"count": 0, "week": html_date} if unix_timestamp not in high_weekly: @@ -651,7 +651,7 @@ def view_product_metrics(request, pid): open_objs_by_severity[finding.get("severity")] += 1 # Close findings - elif closed_findings_dict.get(finding.get("id", None), None): + elif closed_findings_dict.get(finding.get("id", None)): if unix_timestamp in open_close_weekly: open_close_weekly[unix_timestamp]["closed"] += 1 else: @@ -662,7 +662,7 @@ def view_product_metrics(request, pid): closed_objs_by_severity[finding.get("severity")] += 1 # Risk Accepted findings - if accepted_findings_dict.get(finding.get("id", None), None): + if accepted_findings_dict.get(finding.get("id", None)): if unix_timestamp in open_close_weekly: open_close_weekly[unix_timestamp]["accepted"] += 1 else: diff --git a/dojo/test/views.py b/dojo/test/views.py index 8c9f42d586a..2ea9b249058 100644 --- a/dojo/test/views.py +++ b/dojo/test/views.py @@ -173,7 +173,7 @@ def get_typed_note_form(self, request: HttpRequest, context: dict): def get_form(self, request: HttpRequest, context: dict): return ( self.get_typed_note_form(request, context) - if context.get("note_type_activation", 0) + if context.get("note_type_activation") else self.get_note_form(request) ) diff --git a/unittests/tools/test_tfsec_parser.py b/unittests/tools/test_tfsec_parser.py index 814a5154d61..c7ce3927771 100644 --- a/unittests/tools/test_tfsec_parser.py +++ b/unittests/tools/test_tfsec_parser.py @@ -96,7 +96,7 @@ def test_parse_many_findings_current(self): severities = {} for finding in findings: - if severities.get(finding.severity, None): + if severities.get(finding.severity): numSeverity = severities.get(finding.severity) numSeverity += 1 severities[finding.severity] = numSeverity From ad5040ea0fe972d49c05f9a344edb0dea9e9c799 Mon Sep 17 00:00:00 2001 From: maxi-bee <84531851+maxi-bee@users.noreply.github.com> Date: Wed, 15 Jan 2025 04:56:05 +0100 Subject: [PATCH 25/99] Update kubescape parser.py (#11542) - removes the resource objects (a whole manifest) from "steps to reproduce" as it is often so long (thousands of lines) that conflicts with the default Jira configurations - potentially, the Jira integration should validate that, but that isn't possibly the case - note that also, there is arguably little value on storing this very large objects on the database (for duplicates and originals) --- dojo/tools/kubescape/parser.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/dojo/tools/kubescape/parser.py b/dojo/tools/kubescape/parser.py index daa6635870a..53140f30584 100644 --- a/dojo/tools/kubescape/parser.py +++ b/dojo/tools/kubescape/parser.py @@ -98,8 +98,6 @@ def get_findings(self, filename, test): steps_to_reproduce = "The following rules have failed :" + "\n" steps_to_reproduce += "\t**Rules:** " + str(json.dumps(control["rules"], indent=4)) + "\n" - steps_to_reproduce += "Resource object may contain evidence:" + "\n" - steps_to_reproduce += "\t**Resource object:** " + str(json.dumps(resource["object"], indent=4)) find = Finding( title=textwrap.shorten(title, 150), From cd12513257f5131948be948694848906714842da Mon Sep 17 00:00:00 2001 From: kiblik <5609770+kiblik@users.noreply.github.com> Date: Wed, 15 Jan 2025 05:08:55 +0100 Subject: [PATCH 26/99] Ruff: Fix PTH100, merge PTH10 (#11502) --- dojo/management/commands/import_surveys.py | 3 +-- dojo/settings/settings.dist.py | 2 +- ruff.toml | 2 +- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/dojo/management/commands/import_surveys.py b/dojo/management/commands/import_surveys.py index 6fcdd53bde7..d1577648806 100644 --- a/dojo/management/commands/import_surveys.py +++ b/dojo/management/commands/import_surveys.py @@ -1,4 +1,3 @@ -import os from pathlib import Path from django.core.management.base import BaseCommand @@ -28,7 +27,7 @@ def handle(self, *args, **options): row = cursor.fetchone() ctype_id = row[0] # Find the current id in the surveys file - path = Path(os.path.abspath(__file__)).parent + path = Path(__file__).parent.absolute() path = path[:-19] + "fixtures/initial_surveys.json" contents = open(path, encoding="utf-8").readlines() for line in contents: diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py index 4f72fa171ce..ccc7cc5752e 100644 --- a/dojo/settings/settings.dist.py +++ b/dojo/settings/settings.dist.py @@ -935,7 +935,7 @@ def saml2_attrib_map_format(dict): SAML_ATTRIBUTE_MAPPING = saml2_attrib_map_format(env("DD_SAML2_ATTRIBUTES_MAP")) SAML_FORCE_AUTH = env("DD_SAML2_FORCE_AUTH") SAML_ALLOW_UNKNOWN_ATTRIBUTES = env("DD_SAML2_ALLOW_UNKNOWN_ATTRIBUTE") - BASEDIR = Path(path.abspath(__file__)).parent + BASEDIR = Path(__file__).parent.absolute() if len(env("DD_SAML2_ENTITY_ID")) == 0: SAML2_ENTITY_ID = f"{SITE_URL}/saml2/metadata/" else: diff --git a/ruff.toml b/ruff.toml index 12b556d5cf3..a4a2daa69ff 100644 --- a/ruff.toml +++ b/ruff.toml @@ -66,7 +66,7 @@ select = [ "TCH", "INT", "ARG003", "ARG004", "ARG005", - "PTH2", "PTH101", "PTH102", "PTH103", "PTH104", "PTH105", "PTH106", "PTH107", "PTH108", "PTH109", "PTH110", "PTH111", "PTH112", "PTH113", "PTH114", "PTH115", "PTH116", "PTH117", "PTH119", "PTH120", "PTH121", "PTH122", "PTH124", + "PTH2", "PTH10", "PTH110", "PTH111", "PTH112", "PTH113", "PTH114", "PTH115", "PTH116", "PTH117", "PTH119", "PTH120", "PTH121", "PTH122", "PTH124", "TD001", "TD004", "TD005", "PD", "PGH", From 7ecef22c76d0886cef55beb7ea1982aebe6146f9 Mon Sep 17 00:00:00 2001 From: Nguyen Dinh Bien <44922242+biennd279@users.noreply.github.com> Date: Wed, 15 Jan 2025 23:54:39 +0700 Subject: [PATCH 27/99] Feature: Checkmarx Cxflow SAST parser (#9719) * draf parser * fix typo * draft parser path node * add parser * add dedup aglo * integration docs * commented unused var * Revert "Merge remote-tracking branch 'upstream/dev' into feature-checkmarx-cxflow-sast" This reverts commit b167f2b5205b427ac0b26ae7fd3f6b4667a01cde, reversing changes made to 5257a25204dbc9e6603b3b64bc1d78eddb824140. * Revert "Revert "Merge remote-tracking branch 'upstream/dev' into feature-checkmarx-cxflow-sast"" This reverts commit f9cdafb72881454741e8fbb3dd2358dfb2c79fd5. * update doc and remove unused var * update parser * update parser test * Revert "update parser test" This reverts commit c1592332c6d8b1d6da513ca1a7f3732bee755084. * fix ruff * Update .settings.dist.py.sha256sum * fix ruff * fix ruff * fix ruff #n * trigger ci * trigger ci * Fix ruff --------- Co-authored-by: biennd4 Co-authored-by: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> --- .../parsers/file/checkmarx_cxflow_sast.md | 22 + dojo/settings/settings.dist.py | 2 + dojo/tools/checkmarx_cxflow_sast/__init__.py | 0 dojo/tools/checkmarx_cxflow_sast/parser.py | 149 ++ .../checkmarx_cxflow_sast/1-finding.json | 192 +++ .../checkmarx_cxflow_sast/4-findings.json | 1220 +++++++++++++++++ .../checkmarx_cxflow_sast/no_finding.json | 21 + .../test_checkmarx_cxflow_sast_parser.py | 74 + 8 files changed, 1680 insertions(+) create mode 100644 docs/content/en/connecting_your_tools/parsers/file/checkmarx_cxflow_sast.md create mode 100644 dojo/tools/checkmarx_cxflow_sast/__init__.py create mode 100644 dojo/tools/checkmarx_cxflow_sast/parser.py create mode 100644 unittests/scans/checkmarx_cxflow_sast/1-finding.json create mode 100644 unittests/scans/checkmarx_cxflow_sast/4-findings.json create mode 100644 unittests/scans/checkmarx_cxflow_sast/no_finding.json create mode 100644 unittests/tools/test_checkmarx_cxflow_sast_parser.py diff --git a/docs/content/en/connecting_your_tools/parsers/file/checkmarx_cxflow_sast.md b/docs/content/en/connecting_your_tools/parsers/file/checkmarx_cxflow_sast.md new file mode 100644 index 00000000000..b984b7dd694 --- /dev/null +++ b/docs/content/en/connecting_your_tools/parsers/file/checkmarx_cxflow_sast.md @@ -0,0 +1,22 @@ +--- +title: "Checkmarx CxFlow SAST" +toc_hide: true +--- + +CxFlow is a Spring Boot application written by Checkmarx that enables initiations of scans and result orchestration. +CxFlow support interactive with various Checkmarx product. +This parser support JSON format export by bug tracker. + +``` +#YAML +cx-flow: + bug-tracker:Json + +#CLI +--cx-flow.bug-tracker=json +``` + +- `Checkmarx CxFlow SAST`: JSON report from Checkmarx Cxflow. + +### Sample Scan Data +Sample Checkmarx CxFlow SAST scans can be found [here](https://github.com/DefectDojo/django-DefectDojo/tree/master/unittests/scans/checkmarx_cxflow_sast). diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py index ccc7cc5752e..ed1ee76c5c9 100644 --- a/dojo/settings/settings.dist.py +++ b/dojo/settings/settings.dist.py @@ -1288,6 +1288,7 @@ def saml2_attrib_map_format(dict): "Legitify Scan": ["title", "endpoints", "severity"], "ThreatComposer Scan": ["title", "description"], "Invicti Scan": ["title", "description", "severity"], + "Checkmarx CxFlow SAST": ["vuln_id_from_tool", "file_path", "line"], "HackerOne Cases": ["title", "severity"], "KrakenD Audit Scan": ["description", "mitigation", "severity"], "Red Hat Satellite": ["description", "severity"], @@ -1535,6 +1536,7 @@ def saml2_attrib_map_format(dict): "Legitify Scan": DEDUPE_ALGO_HASH_CODE, "ThreatComposer Scan": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL_OR_HASH_CODE, "Invicti Scan": DEDUPE_ALGO_HASH_CODE, + "Checkmarx CxFlow SAST": DEDUPE_ALGO_HASH_CODE, "KrakenD Audit Scan": DEDUPE_ALGO_HASH_CODE, "PTART Report": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, "Red Hat Satellite": DEDUPE_ALGO_HASH_CODE, diff --git a/dojo/tools/checkmarx_cxflow_sast/__init__.py b/dojo/tools/checkmarx_cxflow_sast/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dojo/tools/checkmarx_cxflow_sast/parser.py b/dojo/tools/checkmarx_cxflow_sast/parser.py new file mode 100644 index 00000000000..292bbfc7c5c --- /dev/null +++ b/dojo/tools/checkmarx_cxflow_sast/parser.py @@ -0,0 +1,149 @@ +import json +import logging + +import dateutil.parser + +from dojo.models import Finding + +logger = logging.getLogger(__name__) + + +class _PathNode: + def __init__(self, file: str, line: str, column: str, node_object: str, length: str, snippet: str): + self.file = file + self.line = line + self.column = int(column) + self.node_object = node_object + self.length = int(length) + self.snippet = snippet + + @classmethod + def from_json_object(cls, data): + return _PathNode( + data.get("file"), + data.get("line"), + data.get("column"), + data.get("object"), + data.get("length"), + data.get("snippet"), + ) + + +class _Path: + def __init__(self, sink: _PathNode, source: _PathNode, state: str, paths: [_PathNode]): + self.sink = sink + self.source = source + self.state = state + self.paths = paths + + +class CheckmarxCXFlowSastParser: + def __init__(self): + pass + + def get_scan_types(self): + return ["Checkmarx CxFlow SAST"] + + def get_label_for_scan_types(self, scan_type): + return scan_type # no custom label for now + + def get_description_for_scan_types(self, scan_type): + return "Detailed Report. Import all vulnerabilities from checkmarx without aggregation" + + def get_findings(self, file, test): + if file.name.strip().lower().endswith(".json"): + return self._get_findings_json(file, test) + # TODO: support CxXML format + logger.warning(f"Not supported file format ${file}") + return [] + + def _get_findings_json(self, file, test): + data = json.load(file) + findings = [] + additional_details = data.get("additionalDetails") + scan_start_date = additional_details.get("scanStartDate") + + issues = data.get("xissues", []) + + for issue in issues: + vulnerability = issue.get("vulnerability") + status = issue.get("vulnerabilityStatus") + cwe = issue.get("cwe") + description = issue.get("description") + language = issue.get("language") + severity = issue.get("severity") + link = issue.get("link") + filename = issue.get("filename") + similarity_id = issue.get("similarityId") + + issue_additional_details = issue.get("additionalDetails") + categories = issue_additional_details.get("categories") + results = issue_additional_details.get("results") + + map_paths = {} + + for result in results: + # all path nodes exclude sink, source, state + path_keys = sorted(filter(lambda k: isinstance(k, str) and k.isnumeric(), result.keys())) + + path = _Path( + sink=_PathNode.from_json_object(result.get("sink")), + source=_PathNode.from_json_object(result.get("source")), + state=result.get("state"), + paths=[result[k] for k in path_keys], + ) + + map_paths[str(path.source.line)] = path + + for detail_key in issue.get("details"): + if detail_key not in map_paths: + logger.warning(f"{detail_key} not found in path, ignore") + else: + detail = map_paths[detail_key] + + finding_detail = f"**Category:** {categories}\n" + finding_detail += f"**Language:** {language}\n" + finding_detail += f"**Status:** {status}\n" + finding_detail += f"**Finding link:** [{link}]({link})\n" + finding_detail += f"**Description:** {description}\n" + finding_detail += f"**Source snippet:** `{detail.source.snippet if detail.source is not None else ''}`\n" + finding_detail += f"**Sink snippet:** `{detail.sink.snippet if detail.sink is not None else ''}`\n" + + finding = Finding( + title=vulnerability.replace("_", " ") + " " + detail.sink.file.split("/")[ + -1] if detail.sink is not None else "", + cwe=int(cwe), + date=dateutil.parser.parse(scan_start_date), + static_finding=True, + test=test, + sast_source_object=detail.source.node_object if detail.source is not None else None, + sast_sink_object=detail.sink.node_object if detail.sink is not None else None, + sast_source_file_path=detail.source.file if detail.source is not None else None, + sast_source_line=detail.source.line if detail.source is not None else None, + vuln_id_from_tool=similarity_id, + severity=severity, + file_path=filename, + line=detail.sink.line, + false_p=issue.get("details")[detail_key].get("falsePositive") or self.is_not_exploitable( + detail.state), + description=finding_detail, + verified=self.is_verify(detail.state), + active=self.is_active(detail.state), + ) + + findings.append(finding) + + return findings + + def is_verify(self, state): + # Confirmed, urgent + verifiedStates = ["2", "3"] + return state in verifiedStates + + def is_active(self, state): + # To verify, Confirmed, Urgent, Proposed not exploitable + activeStates = ["0", "2", "3", "4"] + return state in activeStates + + def is_not_exploitable(self, state): + return state == "1" diff --git a/unittests/scans/checkmarx_cxflow_sast/1-finding.json b/unittests/scans/checkmarx_cxflow_sast/1-finding.json new file mode 100644 index 00000000000..dc872a2a66c --- /dev/null +++ b/unittests/scans/checkmarx_cxflow_sast/1-finding.json @@ -0,0 +1,192 @@ +{ + "projectId": "6", + "team": "CxServer", + "project": "some-example", + "link": "http://CX-FLOW-CLEAN/CxWebClient/ViewerMain.aspx?scanid=1000026&projectid=6", + "files": "1", + "loc": "268", + "scanType": "Full", + "version":"8.9.0.210", + "additionalDetails": { + "flow-summary": { + "High": 1 + }, + "scanId": "1000026", + "scanStartDate": "Sunday, January 19, 2020 2:40:11 AM" + }, + "xissues": [ + { + "vulnerability": "Reflected_XSS_All_Clients", + "vulnerabilityStatus": "TO VERIFY", + "similarityId": "14660819", + "cwe": "79", + "description": "", + "language": "Java", + "severity": "High", + "link": "http://CX-FLOW-CLEAN/CxWebClient/ViewerMain.aspx?scanid=1000026&projectid=6&pathid=2", + "filename": "DOS_Login.java", + "falsePositiveCount": 0, + "details": { + "88": { + "falsePositive": false, + "codeSnippet": "username = s.getParser().getRawParameter(USERNAME);", + "comment": "" + } + }, + "additionalDetails": { + "recommendedFix": "http://CX-FLOW-CLEAN/CxWebClient/ScanQueryDescription.aspx?queryID=591&queryVersionCode=56110529&queryTitle=Reflected_XSS_All_Clients", + "categories": "PCI DSS v3.2;PCI DSS (3.2) - 6.5.7 - Cross-site scripting (XSS),OWASP Top 10 2013;A3-Cross-Site Scripting (XSS),FISMA 2014;System And Information Integrity,NIST SP 800-53;SI-15 Information Output Filtering (P0),OWASP Top 10 2017;A7-Cross-Site Scripting (XSS)", + "results": [ + { + "sink": { + "file": "AnotherFile.java", + "line": "107", + "column": "9", + "object": "username", + "length" : "8", + "snippet" : "+ username + \"' and password = '\" + password + \"'\";" + }, + "state": "0", + "source": { + "file": "DOS_Login.java", + "line": "88", + "column": "46", + "object": "getRawParameter", + "length" : "1", + "snippet" : "username = s.getParser().getRawParameter(USERNAME);" + }, + "1" : { + "snippet" : "username = s.getParser().getRawParameter(USERNAME);", + "file" : "DOS_Login.java", + "line" : "88", + "column" : "46", + "length" : "1", + "object" : "getRawParameter" + }, + "2" : { + "snippet" : "username = s.getParser().getRawParameter(USERNAME);", + "file" : "DOS_Login.java", + "line" : "88", + "column" : "6", + "length" : "8", + "object" : "username" + }, + "3" : { + "snippet" : "if (username.equals(\"jeff\") || username.equals(\"dave\"))", + "file" : "DOS_Login.java", + "line" : "92", + "column" : "37", + "length" : "8", + "object" : "username" + }, + "4" : { + "snippet" : "if (username.equals(\"jeff\") || username.equals(\"dave\"))", + "file" : "DOS_Login.java", + "line" : "92", + "column" : "10", + "length" : "8", + "object" : "username" + }, + "5" : { + "snippet" : "+ username + \"' and password = '\" + password + \"'\";", + "file" : "AnotherFile.java", + "line" : "107", + "column" : "9", + "length" : "8", + "object" : "username" + } + } + ], + "CodeBashingLesson" : "https://cxa.codebashing.com/courses/" + }, + "allFalsePositive": false + } + ], + "unFilteredIssues": [ { + "vulnerability" : "Reflected_XSS_All_Clients", + "vulnerabilityStatus" : "TO VERIFY", + "similarityId" : "14660819", + "cwe" : "79", + "description" : "", + "language" : "Java", + "severity" : "High", + "link" : "http://CX-FLOW-CLEAN/CxWebClient/ViewerMain.aspx?scanid=1000026&projectid=6&pathid=2", + "filename" : "DOS_Login.java", + "gitUrl" : "", + "falsePositiveCount" : 0, + "details" : { + "88" : { + "falsePositive" : false, + "comment" : "" + } + }, + "additionalDetails" : { + "recommendedFix" : "http://CX-FLOW-CLEAN/CxWebClient/ScanQueryDescription.aspx?queryID=591&queryVersionCode=56110529&queryTitle=Reflected_XSS_All_Clients", + "categories" : "PCI DSS v3.2;PCI DSS (3.2) - 6.5.7 - Cross-site scripting (XSS),OWASP Top 10 2013;A3-Cross-Site Scripting (XSS),FISMA 2014;System And Information Integrity,NIST SP 800-53;SI-15 Information Output Filtering (P0),OWASP Top 10 2017;A7-Cross-Site Scripting (XSS)", + "results" : [ { + "1" : { + "snippet" : "username = s.getParser().getRawParameter(USERNAME);", + "file" : "DOS_Login.java", + "line" : "88", + "column" : "46", + "length" : "1", + "object" : "getRawParameter" + }, + "2" : { + "snippet" : "username = s.getParser().getRawParameter(USERNAME);", + "file" : "DOS_Login.java", + "line" : "88", + "column" : "6", + "length" : "8", + "object" : "username" + }, + "3" : { + "snippet" : "if (username.equals(\"jeff\") || username.equals(\"dave\"))", + "file" : "DOS_Login.java", + "line" : "92", + "column" : "37", + "length" : "8", + "object" : "username" + }, + "4" : { + "snippet" : "if (username.equals(\"jeff\") || username.equals(\"dave\"))", + "file" : "DOS_Login.java", + "line" : "92", + "column" : "10", + "length" : "8", + "object" : "username" + }, + "5" : { + "snippet" : "+ username + \"' and password = '\" + password + \"'\";", + "file" : "AnotherFile.java", + "line" : "107", + "column" : "9", + "length" : "8", + "object" : "username" + }, + "sink" : { + "snippet" : "+ username + \"' and password = '\" + password + \"'\";", + "file" : "AnotherFile.java", + "line" : "107", + "column" : "9", + "length" : "8", + "object" : "username" + }, + "state" : "0", + "source" : { + "snippet" : "username = s.getParser().getRawParameter(USERNAME);", + "file" : "DOS_Login.java", + "line" : "88", + "column" : "46", + "length" : "1", + "object" : "getRawParameter" + } + } ] + }, + "allFalsePositive" : false + } ], + "reportCreationTime":"Sunday, January 19, 2020 2:41:53 AM", + "deepLink":"http://CX-FLOW-CLEAN/CxWebClient/ViewerMain.aspx?scanid=1000026&projectid=6", + "scanTime":"00h:01m:30s", + "sastResults": false +} \ No newline at end of file diff --git a/unittests/scans/checkmarx_cxflow_sast/4-findings.json b/unittests/scans/checkmarx_cxflow_sast/4-findings.json new file mode 100644 index 00000000000..f8008d29684 --- /dev/null +++ b/unittests/scans/checkmarx_cxflow_sast/4-findings.json @@ -0,0 +1,1220 @@ +{ + "projectId": "6", + "team": "CxServer", + "project": "some-example", + "link": "http://CX-FLOW-CLEAN/CxWebClient/ViewerMain.aspx?scanid=1000026&projectid=6", + "files": "1", + "loc": "268", + "scanType": "Full", + "version":"8.9.0.210", + "additionalDetails": { + "flow-summary": { + "High": 4 + }, + "scanId": "1000026", + "scanStartDate": "Sunday, January 19, 2020 2:40:11 AM" + }, + "xissues": [ + { + "vulnerability": "Reflected_XSS_All_Clients", + "vulnerabilityStatus": "TO VERIFY", + "similarityId": "14660819", + "cwe": "79", + "description": "", + "language": "Java", + "severity": "High", + "link": "http://CX-FLOW-CLEAN/CxWebClient/ViewerMain.aspx?scanid=1000026&projectid=6&pathid=2", + "filename": "DOS_Login.java", + "gitUrl": "", + "falsePositiveCount": 0, + "details": { + "88": { + "falsePositive": false, + "codeSnippet": "\t username = s.getParser().getRawParameter(USERNAME);", + "comment": "" + }, + "89": { + "falsePositive": false, + "codeSnippet": "\t password = s.getParser().getRawParameter(PASSWORD);", + "comment": "" + } + }, + "additionalDetails": { + "recommendedFix": "http://CX-FLOW-CLEAN/CxWebClient/ScanQueryDescription.aspx?queryID=591&queryVersionCode=56110529&queryTitle=Reflected_XSS_All_Clients", + "categories": "PCI DSS v3.2;PCI DSS (3.2) - 6.5.7 - Cross-site scripting (XSS),OWASP Top 10 2013;A3-Cross-Site Scripting (XSS),FISMA 2014;System And Information Integrity,NIST SP 800-53;SI-15 Information Output Filtering (P0),OWASP Top 10 2017;A7-Cross-Site Scripting (XSS)", + "results": [ + { + "sink": { + "file": "DOS_Login.java", + "line": "108", + "column": "20", + "object": "StringElement", + "length" : "3", + "snippet" : "\t ec.addElement(new StringElement(query));" + }, + "state": "0", + "source": { + "file": "DOS_Login.java", + "line": "88", + "column": "46", + "object": "getRawParameter", + "length" : "1", + "snippet" : "\t username = s.getParser().getRawParameter(USERNAME);" + }, + "1" : { + "snippet" : "\t username = s.getParser().getRawParameter(USERNAME);", + "file" : "DOS_Login.java", + "line" : "88", + "column" : "46", + "length" : "1", + "object" : "getRawParameter" + }, + "2" : { + "snippet" : "\t username = s.getParser().getRawParameter(USERNAME);", + "file" : "DOS_Login.java", + "line" : "88", + "column" : "6", + "length" : "8", + "object" : "username" + }, + "3" : { + "snippet" : "\t if (username.equals(\"jeff\") || username.equals(\"dave\"))", + "file" : "DOS_Login.java", + "line" : "92", + "column" : "37", + "length" : "8", + "object" : "username" + }, + "4" : { + "snippet" : "\t if (username.equals(\"jeff\") || username.equals(\"dave\"))", + "file" : "DOS_Login.java", + "line" : "92", + "column" : "10", + "length" : "8", + "object" : "username" + }, + "5" : { + "snippet" : "\t\t + username + \"' and password = '\" + password + \"'\";", + "file" : "DOS_Login.java", + "line" : "107", + "column" : "9", + "length" : "8", + "object" : "username" + }, + "6" : { + "snippet" : "\t String query = \"SELECT * FROM user_system_data WHERE user_name = '\"", + "file" : "DOS_Login.java", + "line" : "106", + "column" : "13", + "length" : "5", + "object" : "query" + }, + "7" : { + "snippet" : "\t ec.addElement(new StringElement(query));", + "file" : "DOS_Login.java", + "line" : "108", + "column" : "38", + "length" : "5", + "object" : "query" + }, + "8" : { + "snippet" : "\t ec.addElement(new StringElement(query));", + "file" : "DOS_Login.java", + "line" : "108", + "column" : "20", + "length" : "3", + "object" : "StringElement" + } + }, + { + "sink": { + "file": "DOS_Login.java", + "line": "108", + "column": "20", + "object": "StringElement", + "length" : "3", + "snippet" : "\t ec.addElement(new StringElement(query));" + }, + "state": "0", + "source": { + "file": "DOS_Login.java", + "line": "89", + "column": "46", + "object": "getRawParameter", + "length" : "1", + "snippet" : "\t password = s.getParser().getRawParameter(PASSWORD);" + }, + "1" : { + "snippet" : "\t password = s.getParser().getRawParameter(PASSWORD);", + "file" : "DOS_Login.java", + "line" : "89", + "column" : "46", + "length" : "1", + "object" : "getRawParameter" + }, + "2" : { + "snippet" : "\t password = s.getParser().getRawParameter(PASSWORD);", + "file" : "DOS_Login.java", + "line" : "89", + "column" : "6", + "length" : "8", + "object" : "password" + }, + "3" : { + "snippet" : "\t\t + username + \"' and password = '\" + password + \"'\";", + "file" : "DOS_Login.java", + "line" : "107", + "column" : "43", + "length" : "8", + "object" : "password" + }, + "4" : { + "snippet" : "\t String query = \"SELECT * FROM user_system_data WHERE user_name = '\"", + "file" : "DOS_Login.java", + "line" : "106", + "column" : "13", + "length" : "5", + "object" : "query" + }, + "5" : { + "snippet" : "\t ec.addElement(new StringElement(query));", + "file" : "DOS_Login.java", + "line" : "108", + "column" : "38", + "length" : "5", + "object" : "query" + }, + "6" : { + "snippet" : "\t ec.addElement(new StringElement(query));", + "file" : "DOS_Login.java", + "line" : "108", + "column" : "20", + "length" : "3", + "object" : "StringElement" + } + } + ] + }, + "allFalsePositive": false + }, + { + "vulnerability": "SQL_Injection", + "vulnerabilityStatus": "TO VERIFY", + "similarityId": "-1987639889", + "cwe": "89", + "description": "", + "language": "Java", + "severity": "High", + "link": "http://CX-FLOW-CLEAN/CxWebClient/ViewerMain.aspx?scanid=1000026&projectid=6&pathid=4", + "filename": "DOS_Login.java", + "falsePositiveCount": 0, + "details": { + "88": { + "falsePositive": false, + "codeSnippet": "\t username = s.getParser().getRawParameter(USERNAME);", + "comment": "" + }, + "89": { + "falsePositive": false, + "codeSnippet": "\t password = s.getParser().getRawParameter(PASSWORD);", + "comment": "" + } + }, + "additionalDetails": { + "recommendedFix": "http://CX-FLOW-CLEAN/CxWebClient/ScanQueryDescription.aspx?queryID=594&queryVersionCode=56142311&queryTitle=SQL_Injection", + "categories": "PCI DSS v3.2;PCI DSS (3.2) - 6.5.1 - Injection flaws - particularly SQL injection,OWASP Top 10 2013;A1-Injection,FISMA 2014;System And Information Integrity,NIST SP 800-53;SI-10 Information Input Validation (P1),OWASP Top 10 2017;A1-Injection,OWASP Mobile Top 10 2016;M7-Client Code Quality", + "results": [ + { + "sink": { + "file": "DOS_Login.java", + "line": "114", + "column": "45", + "object": "executeQuery", + "length" : "1", + "snippet" : "\t\tResultSet results = statement.executeQuery(query);" + }, + "state": "0", + "source": { + "file": "DOS_Login.java", + "line": "88", + "column": "46", + "object": "getRawParameter", + "length" : "1", + "snippet" : "\t username = s.getParser().getRawParameter(USERNAME);" + }, + "1" : { + "snippet" : "\t username = s.getParser().getRawParameter(USERNAME);", + "file" : "DOS_Login.java", + "line" : "88", + "column" : "46", + "length" : "1", + "object" : "getRawParameter" + }, + "2" : { + "snippet" : "\t username = s.getParser().getRawParameter(USERNAME);", + "file" : "DOS_Login.java", + "line" : "88", + "column" : "6", + "length" : "8", + "object" : "username" + }, + "3" : { + "snippet" : "\t if (username.equals(\"jeff\") || username.equals(\"dave\"))", + "file" : "DOS_Login.java", + "line" : "92", + "column" : "37", + "length" : "8", + "object" : "username" + }, + "4" : { + "snippet" : "\t if (username.equals(\"jeff\") || username.equals(\"dave\"))", + "file" : "DOS_Login.java", + "line" : "92", + "column" : "10", + "length" : "8", + "object" : "username" + }, + "5" : { + "snippet" : "\t\t + username + \"' and password = '\" + password + \"'\";", + "file" : "DOS_Login.java", + "line" : "107", + "column" : "9", + "length" : "8", + "object" : "username" + }, + "6" : { + "snippet" : "\t String query = \"SELECT * FROM user_system_data WHERE user_name = '\"", + "file" : "DOS_Login.java", + "line" : "106", + "column" : "13", + "length" : "5", + "object" : "query" + }, + "7" : { + "snippet" : "\t\tResultSet results = statement.executeQuery(query);", + "file" : "DOS_Login.java", + "line" : "114", + "column" : "46", + "length" : "5", + "object" : "query" + }, + "8" : { + "snippet" : "\t\tResultSet results = statement.executeQuery(query);", + "file" : "DOS_Login.java", + "line" : "114", + "column" : "45", + "length" : "1", + "object" : "executeQuery" + } + }, + { + "sink": { + "file": "DOS_Login.java", + "line": "114", + "column": "45", + "object": "executeQuery", + "length" : "1", + "snippet" : "\t\tResultSet results = statement.executeQuery(query);" + }, + "state": "0", + "source": { + "file": "DOS_Login.java", + "line": "89", + "column": "46", + "object": "getRawParameter", + "length" : "1", + "snippet" : "\t password = s.getParser().getRawParameter(PASSWORD);" + }, + "1" : { + "snippet" : "\t password = s.getParser().getRawParameter(PASSWORD);", + "file" : "DOS_Login.java", + "line" : "89", + "column" : "46", + "length" : "1", + "object" : "getRawParameter" + }, + "2" : { + "snippet" : "\t password = s.getParser().getRawParameter(PASSWORD);", + "file" : "DOS_Login.java", + "line" : "89", + "column" : "6", + "length" : "8", + "object" : "password" + }, + "3" : { + "snippet" : "\t\t + username + \"' and password = '\" + password + \"'\";", + "file" : "DOS_Login.java", + "line" : "107", + "column" : "43", + "length" : "8", + "object" : "password" + }, + "4" : { + "snippet" : "\t String query = \"SELECT * FROM user_system_data WHERE user_name = '\"", + "file" : "DOS_Login.java", + "line" : "106", + "column" : "13", + "length" : "5", + "object" : "query" + }, + "5" : { + "snippet" : "\t\tResultSet results = statement.executeQuery(query);", + "file" : "DOS_Login.java", + "line" : "114", + "column" : "46", + "length" : "5", + "object" : "query" + }, + "6" : { + "snippet" : "\t\tResultSet results = statement.executeQuery(query);", + "file" : "DOS_Login.java", + "line" : "114", + "column" : "45", + "length" : "1", + "object" : "executeQuery" + } + } + ], + "CodeBashingLesson" : "https://cxa.codebashing.com/courses/" + }, + "allFalsePositive": false + } + ], + "unFilteredIssues": [ { + "vulnerability" : "Reflected_XSS_All_Clients", + "vulnerabilityStatus" : "TO VERIFY", + "similarityId" : "14660819", + "cwe" : "79", + "description" : "", + "language" : "Java", + "severity" : "High", + "link" : "http://CX-FLOW-CLEAN/CxWebClient/ViewerMain.aspx?scanid=1000026&projectid=6&pathid=2", + "filename" : "DOS_Login.java", + "gitUrl" : "", + "falsePositiveCount" : 0, + "details" : { + "88" : { + "falsePositive" : false, + "comment" : "" + }, + "89" : { + "falsePositive" : false, + "comment" : "" + } + }, + "additionalDetails" : { + "recommendedFix" : "http://CX-FLOW-CLEAN/CxWebClient/ScanQueryDescription.aspx?queryID=591&queryVersionCode=56110529&queryTitle=Reflected_XSS_All_Clients", + "categories" : "PCI DSS v3.2;PCI DSS (3.2) - 6.5.7 - Cross-site scripting (XSS),OWASP Top 10 2013;A3-Cross-Site Scripting (XSS),FISMA 2014;System And Information Integrity,NIST SP 800-53;SI-15 Information Output Filtering (P0),OWASP Top 10 2017;A7-Cross-Site Scripting (XSS)", + "results" : [ { + "1" : { + "snippet" : "\t username = s.getParser().getRawParameter(USERNAME);", + "file" : "DOS_Login.java", + "line" : "88", + "column" : "46", + "length" : "1", + "object" : "getRawParameter" + }, + "2" : { + "snippet" : "\t username = s.getParser().getRawParameter(USERNAME);", + "file" : "DOS_Login.java", + "line" : "88", + "column" : "6", + "length" : "8", + "object" : "username" + }, + "3" : { + "snippet" : "\t if (username.equals(\"jeff\") || username.equals(\"dave\"))", + "file" : "DOS_Login.java", + "line" : "92", + "column" : "37", + "length" : "8", + "object" : "username" + }, + "4" : { + "snippet" : "\t if (username.equals(\"jeff\") || username.equals(\"dave\"))", + "file" : "DOS_Login.java", + "line" : "92", + "column" : "10", + "length" : "8", + "object" : "username" + }, + "5" : { + "snippet" : "\t\t + username + \"' and password = '\" + password + \"'\";", + "file" : "DOS_Login.java", + "line" : "107", + "column" : "9", + "length" : "8", + "object" : "username" + }, + "sink" : { + "snippet" : "\t ec.addElement(new StringElement(query));", + "file" : "DOS_Login.java", + "line" : "108", + "column" : "20", + "length" : "3", + "object" : "StringElement" + }, + "6" : { + "snippet" : "\t String query = \"SELECT * FROM user_system_data WHERE user_name = '\"", + "file" : "DOS_Login.java", + "line" : "106", + "column" : "13", + "length" : "5", + "object" : "query" + }, + "7" : { + "snippet" : "\t ec.addElement(new StringElement(query));", + "file" : "DOS_Login.java", + "line" : "108", + "column" : "38", + "length" : "5", + "object" : "query" + }, + "8" : { + "snippet" : "\t ec.addElement(new StringElement(query));", + "file" : "DOS_Login.java", + "line" : "108", + "column" : "20", + "length" : "3", + "object" : "StringElement" + }, + "state" : "0", + "source" : { + "snippet" : "\t username = s.getParser().getRawParameter(USERNAME);", + "file" : "DOS_Login.java", + "line" : "88", + "column" : "46", + "length" : "1", + "object" : "getRawParameter" + } + }, { + "1" : { + "snippet" : "\t password = s.getParser().getRawParameter(PASSWORD);", + "file" : "DOS_Login.java", + "line" : "89", + "column" : "46", + "length" : "1", + "object" : "getRawParameter" + }, + "2" : { + "snippet" : "\t password = s.getParser().getRawParameter(PASSWORD);", + "file" : "DOS_Login.java", + "line" : "89", + "column" : "6", + "length" : "8", + "object" : "password" + }, + "3" : { + "snippet" : "\t\t + username + \"' and password = '\" + password + \"'\";", + "file" : "DOS_Login.java", + "line" : "107", + "column" : "43", + "length" : "8", + "object" : "password" + }, + "4" : { + "snippet" : "\t String query = \"SELECT * FROM user_system_data WHERE user_name = '\"", + "file" : "DOS_Login.java", + "line" : "106", + "column" : "13", + "length" : "5", + "object" : "query" + }, + "5" : { + "snippet" : "\t ec.addElement(new StringElement(query));", + "file" : "DOS_Login.java", + "line" : "108", + "column" : "38", + "length" : "5", + "object" : "query" + }, + "sink" : { + "snippet" : "\t ec.addElement(new StringElement(query));", + "file" : "DOS_Login.java", + "line" : "108", + "column" : "20", + "length" : "3", + "object" : "StringElement" + }, + "6" : { + "snippet" : "\t ec.addElement(new StringElement(query));", + "file" : "DOS_Login.java", + "line" : "108", + "column" : "20", + "length" : "3", + "object" : "StringElement" + }, + "state" : "0", + "source" : { + "snippet" : "\t password = s.getParser().getRawParameter(PASSWORD);", + "file" : "DOS_Login.java", + "line" : "89", + "column" : "46", + "length" : "1", + "object" : "getRawParameter" + } + } ] + }, + "allFalsePositive" : false + }, { + "vulnerability" : "SQL_Injection", + "vulnerabilityStatus" : "TO VERIFY", + "similarityId" : "-1987639889", + "cwe" : "89", + "description" : "", + "language" : "Java", + "severity" : "High", + "link" : "http://CX-FLOW-CLEAN/CxWebClient/ViewerMain.aspx?scanid=1000026&projectid=6&pathid=4", + "filename" : "DOS_Login.java", + "gitUrl" : "", + "falsePositiveCount" : 0, + "details" : { + "88" : { + "falsePositive" : false, + "comment" : "" + }, + "89" : { + "falsePositive" : false, + "comment" : "" + } + }, + "additionalDetails" : { + "recommendedFix" : "http://CX-FLOW-CLEAN/CxWebClient/ScanQueryDescription.aspx?queryID=594&queryVersionCode=56142311&queryTitle=SQL_Injection", + "categories" : "PCI DSS v3.2;PCI DSS (3.2) - 6.5.1 - Injection flaws - particularly SQL injection,OWASP Top 10 2013;A1-Injection,FISMA 2014;System And Information Integrity,NIST SP 800-53;SI-10 Information Input Validation (P1),OWASP Top 10 2017;A1-Injection,OWASP Mobile Top 10 2016;M7-Client Code Quality", + "results" : [ { + "1" : { + "snippet" : "\t username = s.getParser().getRawParameter(USERNAME);", + "file" : "DOS_Login.java", + "line" : "88", + "column" : "46", + "length" : "1", + "object" : "getRawParameter" + }, + "2" : { + "snippet" : "\t username = s.getParser().getRawParameter(USERNAME);", + "file" : "DOS_Login.java", + "line" : "88", + "column" : "6", + "length" : "8", + "object" : "username" + }, + "3" : { + "snippet" : "\t if (username.equals(\"jeff\") || username.equals(\"dave\"))", + "file" : "DOS_Login.java", + "line" : "92", + "column" : "37", + "length" : "8", + "object" : "username" + }, + "4" : { + "snippet" : "\t if (username.equals(\"jeff\") || username.equals(\"dave\"))", + "file" : "DOS_Login.java", + "line" : "92", + "column" : "10", + "length" : "8", + "object" : "username" + }, + "5" : { + "snippet" : "\t\t + username + \"' and password = '\" + password + \"'\";", + "file" : "DOS_Login.java", + "line" : "107", + "column" : "9", + "length" : "8", + "object" : "username" + }, + "sink" : { + "snippet" : "\t\tResultSet results = statement.executeQuery(query);", + "file" : "DOS_Login.java", + "line" : "114", + "column" : "45", + "length" : "1", + "object" : "executeQuery" + }, + "6" : { + "snippet" : "\t String query = \"SELECT * FROM user_system_data WHERE user_name = '\"", + "file" : "DOS_Login.java", + "line" : "106", + "column" : "13", + "length" : "5", + "object" : "query" + }, + "7" : { + "snippet" : "\t\tResultSet results = statement.executeQuery(query);", + "file" : "DOS_Login.java", + "line" : "114", + "column" : "46", + "length" : "5", + "object" : "query" + }, + "8" : { + "snippet" : "\t\tResultSet results = statement.executeQuery(query);", + "file" : "DOS_Login.java", + "line" : "114", + "column" : "45", + "length" : "1", + "object" : "executeQuery" + }, + "state" : "0", + "source" : { + "snippet" : "\t username = s.getParser().getRawParameter(USERNAME);", + "file" : "DOS_Login.java", + "line" : "88", + "column" : "46", + "length" : "1", + "object" : "getRawParameter" + } + }, { + "1" : { + "snippet" : "\t password = s.getParser().getRawParameter(PASSWORD);", + "file" : "DOS_Login.java", + "line" : "89", + "column" : "46", + "length" : "1", + "object" : "getRawParameter" + }, + "2" : { + "snippet" : "\t password = s.getParser().getRawParameter(PASSWORD);", + "file" : "DOS_Login.java", + "line" : "89", + "column" : "6", + "length" : "8", + "object" : "password" + }, + "3" : { + "snippet" : "\t\t + username + \"' and password = '\" + password + \"'\";", + "file" : "DOS_Login.java", + "line" : "107", + "column" : "43", + "length" : "8", + "object" : "password" + }, + "4" : { + "snippet" : "\t String query = \"SELECT * FROM user_system_data WHERE user_name = '\"", + "file" : "DOS_Login.java", + "line" : "106", + "column" : "13", + "length" : "5", + "object" : "query" + }, + "5" : { + "snippet" : "\t\tResultSet results = statement.executeQuery(query);", + "file" : "DOS_Login.java", + "line" : "114", + "column" : "46", + "length" : "5", + "object" : "query" + }, + "sink" : { + "snippet" : "\t\tResultSet results = statement.executeQuery(query);", + "file" : "DOS_Login.java", + "line" : "114", + "column" : "45", + "length" : "1", + "object" : "executeQuery" + }, + "6" : { + "snippet" : "\t\tResultSet results = statement.executeQuery(query);", + "file" : "DOS_Login.java", + "line" : "114", + "column" : "45", + "length" : "1", + "object" : "executeQuery" + }, + "state" : "0", + "source" : { + "snippet" : "\t password = s.getParser().getRawParameter(PASSWORD);", + "file" : "DOS_Login.java", + "line" : "89", + "column" : "46", + "length" : "1", + "object" : "getRawParameter" + } + } ] + }, + "allFalsePositive" : false + }, { + "vulnerability" : "Heap_Inspection", + "vulnerabilityStatus" : "TO VERIFY", + "cwe" : "244", + "description" : "", + "language" : "Java", + "severity" : "Medium", + "link" : "http://CX-FLOW-CLEAN/CxWebClient/ViewerMain.aspx?scanid=1000026&projectid=6&pathid=1", + "filename" : "DOS_Login.java", + "gitUrl" : "", + "falsePositiveCount" : 0, + "details" : { + "87" : { + "falsePositive" : false, + "comment" : "" + } + }, + "additionalDetails" : { + "recommendedFix" : "http://CX-FLOW-CLEAN/CxWebClient/ScanQueryDescription.aspx?queryID=3771&queryVersionCode=94850879&queryTitle=Heap_Inspection", + "categories" : "OWASP Top 10 2013;A6-Sensitive Data Exposure,FISMA 2014;Media Protection,NIST SP 800-53;SC-4 Information in Shared Resources (P1),OWASP Top 10 2017;A3-Sensitive Data Exposure", + "results" : [ { + "1" : { + "snippet" : "\t String password = \"\";", + "file" : "DOS_Login.java", + "line" : "87", + "column" : "13", + "length" : "8", + "object" : "password" + }, + "sink" : { + "snippet" : "\t String password = \"\";", + "file" : "DOS_Login.java", + "line" : "87", + "column" : "13", + "length" : "8", + "object" : "password" + }, + "state" : "0", + "source" : { + "snippet" : "\t String password = \"\";", + "file" : "DOS_Login.java", + "line" : "87", + "column" : "13", + "length" : "8", + "object" : "password" + } + } ] + }, + "allFalsePositive" : false + }, { + "vulnerability" : "Privacy_Violation", + "vulnerabilityStatus" : "TO VERIFY", + "cwe" : "359", + "description" : "", + "language" : "Java", + "severity" : "Medium", + "link" : "http://CX-FLOW-CLEAN/CxWebClient/ViewerMain.aspx?scanid=1000026&projectid=6&pathid=10", + "filename" : "DOS_Login.java", + "gitUrl" : "", + "falsePositiveCount" : 0, + "details" : { + "89" : { + "falsePositive" : false, + "comment" : "" + } + }, + "additionalDetails" : { + "recommendedFix" : "http://CX-FLOW-CLEAN/CxWebClient/ScanQueryDescription.aspx?queryID=639&queryVersionCode=56620121&queryTitle=Privacy_Violation", + "categories" : "PCI DSS v3.2;PCI DSS (3.2) - 6.5.1 - Injection flaws - particularly SQL injection,OWASP Top 10 2013;A6-Sensitive Data Exposure,FISMA 2014;Identification And Authentication,NIST SP 800-53;SC-4 Information in Shared Resources (P1),OWASP Top 10 2017;A3-Sensitive Data Exposure", + "results" : [ { + "1" : { + "snippet" : "\t password = s.getParser().getRawParameter(PASSWORD);", + "file" : "DOS_Login.java", + "line" : "89", + "column" : "6", + "length" : "8", + "object" : "password" + }, + "2" : { + "snippet" : "\t\t + username + \"' and password = '\" + password + \"'\";", + "file" : "DOS_Login.java", + "line" : "107", + "column" : "43", + "length" : "8", + "object" : "password" + }, + "3" : { + "snippet" : "\t String query = \"SELECT * FROM user_system_data WHERE user_name = '\"", + "file" : "DOS_Login.java", + "line" : "106", + "column" : "13", + "length" : "5", + "object" : "query" + }, + "4" : { + "snippet" : "\t ec.addElement(new StringElement(query));", + "file" : "DOS_Login.java", + "line" : "108", + "column" : "38", + "length" : "5", + "object" : "query" + }, + "5" : { + "snippet" : "\t ec.addElement(new StringElement(query));", + "file" : "DOS_Login.java", + "line" : "108", + "column" : "20", + "length" : "3", + "object" : "StringElement" + }, + "sink" : { + "snippet" : "\t ec.addElement(new StringElement(query));", + "file" : "DOS_Login.java", + "line" : "108", + "column" : "20", + "length" : "3", + "object" : "StringElement" + }, + "state" : "0", + "source" : { + "snippet" : "\t password = s.getParser().getRawParameter(PASSWORD);", + "file" : "DOS_Login.java", + "line" : "89", + "column" : "6", + "length" : "8", + "object" : "password" + } + } ] + }, + "allFalsePositive" : false + }, { + "vulnerability" : "XSRF", + "vulnerabilityStatus" : "TO VERIFY", + "cwe" : "352", + "description" : "", + "language" : "Java", + "severity" : "Medium", + "link" : "http://CX-FLOW-CLEAN/CxWebClient/ViewerMain.aspx?scanid=1000026&projectid=6&pathid=11", + "filename" : "DOS_Login.java", + "gitUrl" : "", + "falsePositiveCount" : 0, + "details" : { + "88" : { + "falsePositive" : false, + "comment" : "" + } + }, + "additionalDetails" : { + "recommendedFix" : "http://CX-FLOW-CLEAN/CxWebClient/ScanQueryDescription.aspx?queryID=648&queryVersionCode=56715926&queryTitle=XSRF", + "categories" : "PCI DSS v3.2;PCI DSS (3.2) - 6.5.9 - Cross-site request forgery,OWASP Top 10 2013;A8-Cross-Site Request Forgery (CSRF),NIST SP 800-53;SC-23 Session Authenticity (P1)", + "results" : [ { + "1" : { + "snippet" : "\t username = s.getParser().getRawParameter(USERNAME);", + "file" : "DOS_Login.java", + "line" : "88", + "column" : "46", + "length" : "1", + "object" : "getRawParameter" + }, + "2" : { + "snippet" : "\t username = s.getParser().getRawParameter(USERNAME);", + "file" : "DOS_Login.java", + "line" : "88", + "column" : "6", + "length" : "8", + "object" : "username" + }, + "3" : { + "snippet" : "\t if (username.equals(\"jeff\") || username.equals(\"dave\"))", + "file" : "DOS_Login.java", + "line" : "92", + "column" : "37", + "length" : "8", + "object" : "username" + }, + "4" : { + "snippet" : "\t if (username.equals(\"jeff\") || username.equals(\"dave\"))", + "file" : "DOS_Login.java", + "line" : "92", + "column" : "10", + "length" : "8", + "object" : "username" + }, + "5" : { + "snippet" : "\t\t\t\t + username", + "file" : "DOS_Login.java", + "line" : "130", + "column" : "11", + "length" : "8", + "object" : "username" + }, + "sink" : { + "snippet" : "\t\t\t statement.executeUpdate(insertData1);", + "file" : "DOS_Login.java", + "line" : "134", + "column" : "31", + "length" : "1", + "object" : "executeUpdate" + }, + "6" : { + "snippet" : "\t\t\t String insertData1 = \"INSERT INTO user_login VALUES ( '\"", + "file" : "DOS_Login.java", + "line" : "129", + "column" : "15", + "length" : "11", + "object" : "insertData1" + }, + "7" : { + "snippet" : "\t\t\t statement.executeUpdate(insertData1);", + "file" : "DOS_Login.java", + "line" : "134", + "column" : "32", + "length" : "11", + "object" : "insertData1" + }, + "8" : { + "snippet" : "\t\t\t statement.executeUpdate(insertData1);", + "file" : "DOS_Login.java", + "line" : "134", + "column" : "31", + "length" : "1", + "object" : "executeUpdate" + }, + "state" : "0", + "source" : { + "snippet" : "\t username = s.getParser().getRawParameter(USERNAME);", + "file" : "DOS_Login.java", + "line" : "88", + "column" : "46", + "length" : "1", + "object" : "getRawParameter" + } + } ] + }, + "allFalsePositive" : false + }, { + "vulnerability" : "Information_Exposure_Through_an_Error_Message", + "vulnerabilityStatus" : "TO VERIFY", + "cwe" : "209", + "description" : "", + "language" : "Java", + "severity" : "Low", + "link" : "http://CX-FLOW-CLEAN/CxWebClient/ViewerMain.aspx?scanid=1000026&projectid=6&pathid=8", + "filename" : "DOS_Login.java", + "gitUrl" : "", + "falsePositiveCount" : 0, + "details" : { + "169" : { + "falsePositive" : false, + "comment" : "" + } + }, + "additionalDetails" : { + "recommendedFix" : "http://CX-FLOW-CLEAN/CxWebClient/ScanQueryDescription.aspx?queryID=622&queryVersionCode=56439377&queryTitle=Information_Exposure_Through_an_Error_Message", + "categories" : "PCI DSS v3.2;PCI DSS (3.2) - 6.5.5 - Improper error handling,OWASP Top 10 2013;A5-Security Misconfiguration,FISMA 2014;Configuration Management,NIST SP 800-53;SI-11 Error Handling (P2),OWASP Top 10 2017;A6-Security Misconfiguration", + "results" : [ { + "1" : { + "snippet" : "\t catch (SQLException sqle)", + "file" : "DOS_Login.java", + "line" : "169", + "column" : "26", + "length" : "4", + "object" : "sqle" + }, + "2" : { + "snippet" : "\t\tec.addElement(new P().addElement(sqle.getMessage()));", + "file" : "DOS_Login.java", + "line" : "171", + "column" : "36", + "length" : "4", + "object" : "sqle" + }, + "3" : { + "snippet" : "\t\tsqle.printStackTrace();", + "file" : "DOS_Login.java", + "line" : "172", + "column" : "3", + "length" : "4", + "object" : "sqle" + }, + "4" : { + "snippet" : "\t\tsqle.printStackTrace();", + "file" : "DOS_Login.java", + "line" : "172", + "column" : "23", + "length" : "1", + "object" : "printStackTrace" + }, + "sink" : { + "snippet" : "\t\tsqle.printStackTrace();", + "file" : "DOS_Login.java", + "line" : "172", + "column" : "23", + "length" : "1", + "object" : "printStackTrace" + }, + "state" : "0", + "source" : { + "snippet" : "\t catch (SQLException sqle)", + "file" : "DOS_Login.java", + "line" : "169", + "column" : "26", + "length" : "4", + "object" : "sqle" + } + }, { + "1" : { + "snippet" : "\t catch (SQLException sqle)", + "file" : "DOS_Login.java", + "line" : "169", + "column" : "26", + "length" : "4", + "object" : "sqle" + }, + "2" : { + "snippet" : "\t\tec.addElement(new P().addElement(sqle.getMessage()));", + "file" : "DOS_Login.java", + "line" : "171", + "column" : "36", + "length" : "4", + "object" : "sqle" + }, + "3" : { + "snippet" : "\t\tec.addElement(new P().addElement(sqle.getMessage()));", + "file" : "DOS_Login.java", + "line" : "171", + "column" : "51", + "length" : "1", + "object" : "getMessage" + }, + "4" : { + "snippet" : "\t\tec.addElement(new P().addElement(sqle.getMessage()));", + "file" : "DOS_Login.java", + "line" : "171", + "column" : "35", + "length" : "1", + "object" : "addElement" + }, + "sink" : { + "snippet" : "\t\tec.addElement(new P().addElement(sqle.getMessage()));", + "file" : "DOS_Login.java", + "line" : "171", + "column" : "35", + "length" : "1", + "object" : "addElement" + }, + "state" : "0", + "source" : { + "snippet" : "\t catch (SQLException sqle)", + "file" : "DOS_Login.java", + "line" : "169", + "column" : "26", + "length" : "4", + "object" : "sqle" + } + } ] + }, + "allFalsePositive" : false + }, { + "vulnerability" : "Improper_Resource_Shutdown_or_Release", + "vulnerabilityStatus" : "TO VERIFY", + "cwe" : "404", + "description" : "", + "language" : "Java", + "severity" : "Low", + "link" : "http://CX-FLOW-CLEAN/CxWebClient/ViewerMain.aspx?scanid=1000026&projectid=6&pathid=6", + "filename" : "DOS_Login.java", + "gitUrl" : "", + "falsePositiveCount" : 0, + "details" : { + "103" : { + "falsePositive" : false, + "comment" : "" + } + }, + "additionalDetails" : { + "recommendedFix" : "http://CX-FLOW-CLEAN/CxWebClient/ScanQueryDescription.aspx?queryID=600&queryVersionCode=56205902&queryTitle=Improper_Resource_Shutdown_or_Release", + "categories" : "NIST SP 800-53;SC-5 Denial of Service Protection (P1)", + "results" : [ { + "1" : { + "snippet" : "\t\tconnection = DatabaseUtilities.makeConnection(s);", + "file" : "DOS_Login.java", + "line" : "103", + "column" : "48", + "length" : "1", + "object" : "makeConnection" + }, + "2" : { + "snippet" : "\t\tconnection = DatabaseUtilities.makeConnection(s);", + "file" : "DOS_Login.java", + "line" : "103", + "column" : "3", + "length" : "10", + "object" : "connection" + }, + "3" : { + "snippet" : "\t\tStatement statement = connection.createStatement(", + "file" : "DOS_Login.java", + "line" : "111", + "column" : "25", + "length" : "10", + "object" : "connection" + }, + "4" : { + "snippet" : "\t\tStatement statement = connection.createStatement(", + "file" : "DOS_Login.java", + "line" : "111", + "column" : "51", + "length" : "1", + "object" : "createStatement" + }, + "sink" : { + "snippet" : "\t\tStatement statement = connection.createStatement(", + "file" : "DOS_Login.java", + "line" : "111", + "column" : "51", + "length" : "1", + "object" : "createStatement" + }, + "state" : "0", + "source" : { + "snippet" : "\t\tconnection = DatabaseUtilities.makeConnection(s);", + "file" : "DOS_Login.java", + "line" : "103", + "column" : "48", + "length" : "1", + "object" : "makeConnection" + } + } ] + }, + "allFalsePositive" : false + }, { + "vulnerability" : "Use_Of_Hardcoded_Password", + "vulnerabilityStatus" : "TO VERIFY", + "cwe" : "259", + "description" : "", + "language" : "Java", + "severity" : "Low", + "link" : "http://CX-FLOW-CLEAN/CxWebClient/ViewerMain.aspx?scanid=1000026&projectid=6&pathid=7", + "filename" : "DOS_Login.java", + "gitUrl" : "", + "falsePositiveCount" : 0, + "details" : { + "64" : { + "falsePositive" : false, + "comment" : "" + } + }, + "additionalDetails" : { + "recommendedFix" : "http://CX-FLOW-CLEAN/CxWebClient/ScanQueryDescription.aspx?queryID=604&queryVersionCode=56248316&queryTitle=Use_Of_Hardcoded_Password", + "categories" : "PCI DSS v3.2;PCI DSS (3.2) - 6.5.10 - Broken authentication and session management,OWASP Top 10 2013;A2-Broken Authentication and Session Management,FISMA 2014;Identification And Authentication,NIST SP 800-53;SC-28 Protection of Information at Rest (P1),OWASP Top 10 2017;A2-Broken Authentication,OWASP Mobile Top 10 2016;M9-Reverse Engineering", + "results" : [ { + "1" : { + "snippet" : " protected final static String PASSWORD = \"Password\";", + "file" : "DOS_Login.java", + "line" : "64", + "column" : "35", + "length" : "8", + "object" : "PASSWORD" + }, + "sink" : { + "snippet" : " protected final static String PASSWORD = \"Password\";", + "file" : "DOS_Login.java", + "line" : "64", + "column" : "35", + "length" : "8", + "object" : "PASSWORD" + }, + "state" : "0", + "source" : { + "snippet" : " protected final static String PASSWORD = \"Password\";", + "file" : "DOS_Login.java", + "line" : "64", + "column" : "35", + "length" : "8", + "object" : "PASSWORD" + } + } ] + }, + "allFalsePositive" : false + } ], + "reportCreationTime":"Sunday, January 19, 2020 2:41:53 AM", + "deepLink":"http://CX-FLOW-CLEAN/CxWebClient/ViewerMain.aspx?scanid=1000026&projectid=6", + "scanTime":"00h:01m:30s", + "sastResults": false +} diff --git a/unittests/scans/checkmarx_cxflow_sast/no_finding.json b/unittests/scans/checkmarx_cxflow_sast/no_finding.json new file mode 100644 index 00000000000..ba73c156ab1 --- /dev/null +++ b/unittests/scans/checkmarx_cxflow_sast/no_finding.json @@ -0,0 +1,21 @@ +{ + "projectId": "5", + "team": "CxServer", + "project": "EmptyClass", + "link": "http://CX-FLOW-CLEAN/CxWebClient/ViewerMain.aspx?scanid=1000024&projectid=5", + "files": "1", + "loc": "6", + "scanType": "Full", + "version":"8.9.0.210", + "additionalDetails": { + "flow-summary": {}, + "scanId": "1000024", + "scanStartDate": "Wednesday, January 15, 2020 1:31:13 PM" + }, + "xissues": [], + "unFilteredIssues": [], + "reportCreationTime":"Wednesday, January 15, 2020 1:32:47 PM", + "deepLink":"http://CX-FLOW-CLEAN/CxWebClient/ViewerMain.aspx?scanid=1000024&projectid=5", + "scanTime":"00h:01m:24s", + "sastResults": false +} \ No newline at end of file diff --git a/unittests/tools/test_checkmarx_cxflow_sast_parser.py b/unittests/tools/test_checkmarx_cxflow_sast_parser.py new file mode 100644 index 00000000000..7481002e3d2 --- /dev/null +++ b/unittests/tools/test_checkmarx_cxflow_sast_parser.py @@ -0,0 +1,74 @@ +import dateutil.parser + +from dojo.models import Engagement, Product, Test +from dojo.tools.checkmarx_cxflow_sast.parser import CheckmarxCXFlowSastParser +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path + + +class TestCheckmarxCxflowSast(DojoTestCase): + + def init(self, reportFilename): + my_file_handle = open(reportFilename, encoding="utf-8") + product = Product() + engagement = Engagement() + test = Test() + engagement.product = product + test.engagement = engagement + return my_file_handle, product, engagement, test + + def test_file_name_aggregated_parse_file_with_no_vulnerabilities_has_no_findings(self): + my_file_handle, _, _, test = self.init( + get_unit_tests_path() + "/scans/checkmarx_cxflow_sast/no_finding.json", + ) + parser = CheckmarxCXFlowSastParser() + findings = parser.get_findings(my_file_handle, test) + self.assertEqual(0, len(findings)) + + def test_file_name_aggregated_parse_file_with_no_vulnerabilities_has_1_finding(self): + my_file_handle, _, _, test = self.init( + get_unit_tests_path() + "/scans/checkmarx_cxflow_sast/1-finding.json", + ) + parser = CheckmarxCXFlowSastParser() + findings = parser.get_findings(my_file_handle, test) + self.assertEqual(1, len(findings)) + finding = findings[0] + self.assertIn("Reflected XSS All Clients", finding.title) + self.assertEqual(79, finding.cwe) + self.assertEqual(dateutil.parser.parse("Sunday, January 19, 2020 2:40:11 AM"), finding.date) + self.assertEqual("getRawParameter", finding.sast_source_object) + self.assertEqual("username", finding.sast_sink_object) + self.assertEqual("DOS_Login.java", finding.sast_source_file_path) + self.assertEqual("88", finding.sast_source_line) + self.assertEqual("14660819", finding.vuln_id_from_tool) + self.assertEqual("High", finding.severity) + self.assertEqual("107", finding.line) + self.assertEqual(False, finding.false_p) + self.assertIn("Java", finding.description) + self.assertIn("http://CX-FLOW-CLEAN/CxWebClient/ViewerMain.aspx?scanid=1000026&projectid=6&pathid=2", + finding.description) + self.assertIn("PCI DSS v3.2;PCI DSS (3.2) - 6.5.7 - Cross-site scripting (XSS),OWASP Top 10 " + "2013;A3-Cross-Site Scripting (XSS),FISMA 2014;System And Information Integrity," + "NIST SP 800-53;SI-15 Information Output Filtering (P0),OWASP Top 10 2017;A7-Cross-Site " + "Scripting (XSS)", finding.description) + self.assertEqual(True, finding.active) + self.assertEqual(False, finding.verified) + + def test_file_name_aggregated_parse_file_with_no_vulnerabilities_has_4_findings(self): + my_file_handle, _, _, test = self.init( + get_unit_tests_path() + "/scans/checkmarx_cxflow_sast/4-findings.json", + ) + parser = CheckmarxCXFlowSastParser() + findings = parser.get_findings(my_file_handle, test) + self.assertEqual(4, len(findings)) + for finding in findings: + self.assertIsNotNone(finding.title) + self.assertIsNotNone(finding.date) + self.assertIsNotNone(finding.sast_source_object) + self.assertIsNotNone(finding.sast_sink_object) + self.assertIsNotNone(finding.sast_source_file_path) + self.assertIsNotNone(finding.sast_source_line) + self.assertIsNotNone(finding.vuln_id_from_tool) + self.assertIsNotNone(finding.severity) + self.assertIsNotNone(finding.line) + self.assertIsNotNone(finding.false_p) + self.assertIsNotNone(finding.description) From 018d1895819dc44e8f8a301572f3bb18179c81ec Mon Sep 17 00:00:00 2001 From: Harold Blankenship <36673698+hblankenship@users.noreply.github.com> Date: Wed, 15 Jan 2025 10:58:02 -0600 Subject: [PATCH 28/99] extract first boot portion of script (#11468) * extract first boot portion of script * added first boot script * needs space --- .dryrunsecurity.yaml | 1 + Dockerfile.django-alpine | 1 + Dockerfile.django-debian | 1 + docker/entrypoint-first-boot.sh | 35 +++++++++++++++++++++++++++++++ docker/entrypoint-initializer.sh | 36 ++------------------------------ 5 files changed, 40 insertions(+), 34 deletions(-) create mode 100644 docker/entrypoint-first-boot.sh diff --git a/.dryrunsecurity.yaml b/.dryrunsecurity.yaml index da92963ddac..cca95d16109 100644 --- a/.dryrunsecurity.yaml +++ b/.dryrunsecurity.yaml @@ -52,6 +52,7 @@ sensitiveCodepaths: - 'docker/entrypoint-celery-beat.sh' - 'docker/entrypoint-celery-worker.sh' - 'docker/entrypoint-initializer.sh' + - 'docker/entrypoint-first-boot.sh' - 'docker/entrypoint-nginx.sh' - 'docker/entrypoint-uwsgi.sh' - 'docker/wait-for-it.sh' diff --git a/Dockerfile.django-alpine b/Dockerfile.django-alpine index 97bb4411796..cfef58fa32c 100644 --- a/Dockerfile.django-alpine +++ b/Dockerfile.django-alpine @@ -68,6 +68,7 @@ COPY \ docker/entrypoint-celery-beat.sh \ docker/entrypoint-celery-worker.sh \ docker/entrypoint-initializer.sh \ + docker/entrypoint-first-boot.sh \ docker/entrypoint-uwsgi.sh \ docker/entrypoint-uwsgi-dev.sh \ docker/entrypoint-unit-tests.sh \ diff --git a/Dockerfile.django-debian b/Dockerfile.django-debian index 26c27cad391..663a75e884d 100644 --- a/Dockerfile.django-debian +++ b/Dockerfile.django-debian @@ -71,6 +71,7 @@ COPY \ docker/entrypoint-celery-beat.sh \ docker/entrypoint-celery-worker.sh \ docker/entrypoint-initializer.sh \ + docker/entrypoint-first-boot.sh \ docker/entrypoint-uwsgi.sh \ docker/entrypoint-uwsgi-dev.sh \ docker/entrypoint-unit-tests.sh \ diff --git a/docker/entrypoint-first-boot.sh b/docker/entrypoint-first-boot.sh new file mode 100644 index 00000000000..ffc782b4ccc --- /dev/null +++ b/docker/entrypoint-first-boot.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# called from entrypoint-initializer.sh when no admin user exists (first boot) +cat </dev/null) ; do + echo "Loading $i" + python3 manage.py loaddata "${i%.*}" + done + + echo "Installing watson search index" + python3 manage.py installwatson + + # surveys fixture needs to be modified as it contains an instance dependant polymorphic content id + echo "Migration of textquestions for surveys" + python3 manage.py migrate_textquestions \ No newline at end of file diff --git a/docker/entrypoint-initializer.sh b/docker/entrypoint-initializer.sh index 08e77dc46ca..45a67105109 100755 --- a/docker/entrypoint-initializer.sh +++ b/docker/entrypoint-initializer.sh @@ -138,40 +138,8 @@ fi if [ -z "${ADMIN_EXISTS}" ] then -cat </dev/null) ; do - echo "Loading $i" - python3 manage.py loaddata "${i%.*}" - done - - echo "Installing watson search index" - python3 manage.py installwatson - - # surveys fixture needs to be modified as it contains an instance dependant polymorphic content id - echo "Migration of textquestions for surveys" - python3 manage.py migrate_textquestions - + . /entrypoint-first-boot.sh + create_announcement_banner initialize_data fi From dabc8f1b261b337471db1d26bd120bb3d23e2c60 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 15 Jan 2025 11:11:57 -0600 Subject: [PATCH 29/99] Bump boto3 from 1.35.96 to 1.35.99 (#11571) Bumps [boto3](https://github.com/boto/boto3) from 1.35.96 to 1.35.99. - [Release notes](https://github.com/boto/boto3/releases) - [Commits](https://github.com/boto/boto3/compare/1.35.96...1.35.99) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index cc5d9264e73..1dae1c01a5e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -69,7 +69,7 @@ django-ratelimit==4.1.0 argon2-cffi==23.1.0 blackduck==1.1.3 pycurl==7.45.4 # Required for Celery Broker AWS (SQS) support -boto3==1.35.96 # Required for Celery Broker AWS (SQS) support +boto3==1.35.99 # Required for Celery Broker AWS (SQS) support netaddr==1.3.0 vulners==2.2.3 fontawesomefree==6.6.0 From fd84a65652ae7552984d2e518654f052d2516a9d Mon Sep 17 00:00:00 2001 From: Harold Blankenship <36673698+hblankenship@users.noreply.github.com> Date: Wed, 15 Jan 2025 11:14:10 -0600 Subject: [PATCH 30/99] call simple risk acceptance (#11482) --- dojo/api_v2/serializers.py | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/dojo/api_v2/serializers.py b/dojo/api_v2/serializers.py index df700e6bf82..5b0913492f2 100644 --- a/dojo/api_v2/serializers.py +++ b/dojo/api_v2/serializers.py @@ -22,6 +22,7 @@ from rest_framework.fields import DictField, MultipleChoiceField import dojo.jira_link.helper as jira_helper +import dojo.risk_acceptance.helper as ra_helper from dojo.authorization.authorization import user_has_permission from dojo.authorization.roles_permissions import Permissions from dojo.endpoint.utils import endpoint_filter, endpoint_meta_import @@ -111,7 +112,6 @@ Vulnerability_Id_Template, get_current_date, ) -from dojo.risk_acceptance.helper import add_findings_to_risk_acceptance, remove_finding_from_risk_acceptance from dojo.tools.factory import ( get_choices_sorted, requires_file, @@ -1532,7 +1532,7 @@ class RiskAcceptanceSerializer(serializers.ModelSerializer): def create(self, validated_data): instance = super().create(validated_data) user = getattr(self.context.get("request", None), "user", None) - add_findings_to_risk_acceptance(user, instance, instance.accepted_findings.all()) + ra_helper.add_findings_to_risk_acceptance(user, instance, instance.accepted_findings.all()) return instance def update(self, instance, validated_data): @@ -1548,10 +1548,10 @@ def update(self, instance, validated_data): instance = super().update(instance, validated_data) user = getattr(self.context.get("request", None), "user", None) # Add the new findings - add_findings_to_risk_acceptance(user, instance, findings_to_add) + ra_helper.add_findings_to_risk_acceptance(user, instance, findings_to_add) # Remove the ones that were not present in the payload for finding in findings_to_remove: - remove_finding_from_risk_acceptance(user, instance, finding) + ra_helper.remove_finding_from_risk_acceptance(user, instance, finding) return instance @extend_schema_field(serializers.CharField()) @@ -1767,6 +1767,13 @@ def get_related_fields(self, obj): def get_display_status(self, obj) -> str: return obj.status() + def process_risk_acceptance(self, data): + is_risk_accepted = data.get("risk_accepted", False) + if is_risk_accepted and not self.instance.risk_accepted and self.instance.test.engagement.product.enable_simple_risk_acceptance and not data.get("active", False): + ra_helper.simple_risk_accept(self.context["request"].user, self.instance) + elif not is_risk_accepted and self.instance.risk_accepted: # turning off risk_accepted + ra_helper.risk_unaccept(self.context["request"].user, self.instance) + # Overriding this to push add Push to JIRA functionality def update(self, instance, validated_data): # remove tags from validated data and store them seperately @@ -1842,6 +1849,10 @@ def validate(self, data): msg = "Active findings cannot be risk accepted." raise serializers.ValidationError(msg) + # assuming we made it past the validations,call risk acceptance properly to make sure notes, etc get created + # doing it here instead of in update because update doesn't know if the value changed + self.process_risk_acceptance(data) + return data def validate_severity(self, value: str) -> str: From d1e224e0f635114e4e6069eb85784647af8574e1 Mon Sep 17 00:00:00 2001 From: kiblik <5609770+kiblik@users.noreply.github.com> Date: Wed, 15 Jan 2025 18:15:03 +0100 Subject: [PATCH 31/99] Ruff: Fix RUF052 (#11499) * Ruff: Fix RUF052 * Apply suggestions from code review Co-authored-by: Charles Neill <1749665+cneill@users.noreply.github.com> --------- Co-authored-by: Charles Neill <1749665+cneill@users.noreply.github.com> --- dojo/decorators.py | 12 +- dojo/tools/qualys/parser.py | 140 +++++++++---------- dojo/tools/qualys_infrascan_webgui/parser.py | 58 ++++---- dojo/tools/veracode/xml_parser.py | 106 +++++++------- 4 files changed, 158 insertions(+), 158 deletions(-) diff --git a/dojo/decorators.py b/dojo/decorators.py index 8f356b0f623..2bdffc37d1a 100644 --- a/dojo/decorators.py +++ b/dojo/decorators.py @@ -148,16 +148,16 @@ def dojo_ratelimit(key="ip", rate=None, method=UNSAFE, block=False): def decorator(fn): @wraps(fn) def _wrapped(request, *args, **kw): - _block = getattr(settings, "RATE_LIMITER_BLOCK", block) - _rate = getattr(settings, "RATE_LIMITER_RATE", rate) - _lockout = getattr(settings, "RATE_LIMITER_ACCOUNT_LOCKOUT", False) + limiter_block = getattr(settings, "RATE_LIMITER_BLOCK", block) + limiter_rate = getattr(settings, "RATE_LIMITER_RATE", rate) + limiter_lockout = getattr(settings, "RATE_LIMITER_ACCOUNT_LOCKOUT", False) old_limited = getattr(request, "limited", False) ratelimited = is_ratelimited(request=request, fn=fn, - key=key, rate=_rate, method=method, + key=key, rate=limiter_rate, method=method, increment=True) request.limited = ratelimited or old_limited - if ratelimited and _block: - if _lockout: + if ratelimited and limiter_block: + if limiter_lockout: username = request.POST.get("username", None) if username: dojo_user = Dojo_User.objects.filter(username=username).first() diff --git a/dojo/tools/qualys/parser.py b/dojo/tools/qualys/parser.py index 59e2c64afb2..863fe89af9a 100644 --- a/dojo/tools/qualys/parser.py +++ b/dojo/tools/qualys/parser.py @@ -136,146 +136,146 @@ def parse_finding(host, tree): # Scan details for vuln_details in host.iterfind("VULN_INFO_LIST/VULN_INFO"): - _temp = issue_row.copy() + temp = issue_row.copy() # Port - _gid = vuln_details.find("QID").attrib["id"] - _port = vuln_details.findtext("PORT") - _temp["port_status"] = _port + gid = vuln_details.find("QID").attrib["id"] + port = vuln_details.findtext("PORT") + temp["port_status"] = port - _category = str(vuln_details.findtext("CATEGORY")) - _result = str(vuln_details.findtext("RESULT")) - _first_found = str(vuln_details.findtext("FIRST_FOUND")) - _last_found = str(vuln_details.findtext("LAST_FOUND")) - _times_found = str(vuln_details.findtext("TIMES_FOUND")) + category = str(vuln_details.findtext("CATEGORY")) + result = str(vuln_details.findtext("RESULT")) + first_found = str(vuln_details.findtext("FIRST_FOUND")) + last_found = str(vuln_details.findtext("LAST_FOUND")) + times_found = str(vuln_details.findtext("TIMES_FOUND")) # Get the date based on the first_seen setting try: if settings.USE_FIRST_SEEN: if date := vuln_details.findtext("FIRST_FOUND"): - _temp["date"] = datetime.datetime.strptime(date, "%Y-%m-%dT%H:%M:%SZ").date() + temp["date"] = datetime.datetime.strptime(date, "%Y-%m-%dT%H:%M:%SZ").date() else: if date := vuln_details.findtext("LAST_FOUND"): - _temp["date"] = datetime.datetime.strptime(date, "%Y-%m-%dT%H:%M:%SZ").date() + temp["date"] = datetime.datetime.strptime(date, "%Y-%m-%dT%H:%M:%SZ").date() except Exception: - _temp["date"] = None + temp["date"] = None # Vuln_status status = vuln_details.findtext("VULN_STATUS") if status == "Active" or status == "Re-Opened" or status == "New": - _temp["active"] = True - _temp["mitigated"] = False - _temp["mitigation_date"] = None + temp["active"] = True + temp["mitigated"] = False + temp["mitigation_date"] = None else: - _temp["active"] = False - _temp["mitigated"] = True + temp["active"] = False + temp["mitigated"] = True last_fixed = vuln_details.findtext("LAST_FIXED") if last_fixed is not None: - _temp["mitigation_date"] = datetime.datetime.strptime( + temp["mitigation_date"] = datetime.datetime.strptime( last_fixed, "%Y-%m-%dT%H:%M:%SZ", ) else: - _temp["mitigation_date"] = None + temp["mitigation_date"] = None # read cvss value if present cvss3 = vuln_details.findtext("CVSS3_FINAL") if cvss3 is not None and cvss3 != "-": - split_cvss(cvss3, _temp) + split_cvss(cvss3, temp) else: cvss2 = vuln_details.findtext("CVSS_FINAL") if cvss2 is not None and cvss2 != "-": - split_cvss(cvss2, _temp) + split_cvss(cvss2, temp) # DefectDojo does not support cvssv2 - _temp["CVSS_vector"] = None + temp["CVSS_vector"] = None - search = f".//GLOSSARY/VULN_DETAILS_LIST/VULN_DETAILS[@id='{_gid}']" + search = f".//GLOSSARY/VULN_DETAILS_LIST/VULN_DETAILS[@id='{gid}']" vuln_item = tree.find(search) if vuln_item is not None: finding = Finding() # Vuln name - _temp["vuln_name"] = vuln_item.findtext("TITLE") + temp["vuln_name"] = vuln_item.findtext("TITLE") # Vuln Description - _description = str(vuln_item.findtext("THREAT")) + description = str(vuln_item.findtext("THREAT")) # Solution Strips Heading Workaround(s) # _temp['solution'] = re.sub('Workaround(s)?:.+\n', '', htmltext(vuln_item.findtext('SOLUTION'))) - _temp["solution"] = htmltext(vuln_item.findtext("SOLUTION")) + temp["solution"] = htmltext(vuln_item.findtext("SOLUTION")) # type - _type = TYPE_MAP.get(vuln_details.findtext("TYPE"), "Unknown") + vul_type = TYPE_MAP.get(vuln_details.findtext("TYPE"), "Unknown") # Vuln_description - _temp["vuln_description"] = "\n".join( + temp["vuln_description"] = "\n".join( [ - htmltext(_description), - htmltext("Type: " + _type), - htmltext("Category: " + _category), - htmltext("QID: " + str(_gid)), - htmltext("Port: " + str(_port)), - htmltext("Result Evidence: " + _result), - htmltext("First Found: " + _first_found), - htmltext("Last Found: " + _last_found), - htmltext("Times Found: " + _times_found), + htmltext(description), + htmltext("Type: " + vul_type), + htmltext("Category: " + category), + htmltext("QID: " + str(gid)), + htmltext("Port: " + str(port)), + htmltext("Result Evidence: " + result), + htmltext("First Found: " + first_found), + htmltext("Last Found: " + last_found), + htmltext("Times Found: " + times_found), ], ) # Impact description - _temp["IMPACT"] = htmltext(vuln_item.findtext("IMPACT")) + temp["IMPACT"] = htmltext(vuln_item.findtext("IMPACT")) # read cvss value if present and not already read from vuln - if _temp.get("CVSS_value") is None: + if temp.get("CVSS_value") is None: cvss3 = vuln_item.findtext("CVSS3_SCORE/CVSS3_BASE") cvss2 = vuln_item.findtext("CVSS_SCORE/CVSS_BASE") if cvss3 is not None and cvss3 != "-": - split_cvss(cvss3, _temp) + split_cvss(cvss3, temp) else: cvss2 = vuln_item.findtext("CVSS_FINAL") if cvss2 is not None and cvss2 != "-": - split_cvss(cvss2, _temp) + split_cvss(cvss2, temp) # DefectDojo does not support cvssv2 - _temp["CVSS_vector"] = None + temp["CVSS_vector"] = None # CVE and LINKS - _temp_cve_details = vuln_item.iterfind("CVE_ID_LIST/CVE_ID") - if _temp_cve_details: - _cl = { + temp_cve_details = vuln_item.iterfind("CVE_ID_LIST/CVE_ID") + if temp_cve_details: + cl = { cve_detail.findtext("ID"): cve_detail.findtext("URL") - for cve_detail in _temp_cve_details + for cve_detail in temp_cve_details } - _temp["cve"] = "\n".join(list(_cl.keys())) - _temp["links"] = "\n".join(list(_cl.values())) + temp["cve"] = "\n".join(list(cl.keys())) + temp["links"] = "\n".join(list(cl.values())) # Generate severity from number in XML's 'SEVERITY' field, if not present default to 'Informational' sev = get_severity(vuln_item.findtext("SEVERITY")) finding = None - if _temp_cve_details: - refs = "\n".join(list(_cl.values())) + if temp_cve_details: + refs = "\n".join(list(cl.values())) finding = Finding( - title="QID-" + _gid[4:] + " | " + _temp["vuln_name"], - mitigation=_temp["solution"], - description=_temp["vuln_description"], + title="QID-" + gid[4:] + " | " + temp["vuln_name"], + mitigation=temp["solution"], + description=temp["vuln_description"], severity=sev, references=refs, - impact=_temp["IMPACT"], - date=_temp["date"], - vuln_id_from_tool=_gid, + impact=temp["IMPACT"], + date=temp["date"], + vuln_id_from_tool=gid, ) else: finding = Finding( - title="QID-" + _gid[4:] + " | " + _temp["vuln_name"], - mitigation=_temp["solution"], - description=_temp["vuln_description"], + title="QID-" + gid[4:] + " | " + temp["vuln_name"], + mitigation=temp["solution"], + description=temp["vuln_description"], severity=sev, - references=_gid, - impact=_temp["IMPACT"], - date=_temp["date"], - vuln_id_from_tool=_gid, + references=gid, + impact=temp["IMPACT"], + date=temp["date"], + vuln_id_from_tool=gid, ) - finding.mitigated = _temp["mitigation_date"] - finding.is_mitigated = _temp["mitigated"] - finding.active = _temp["active"] - if _temp.get("CVSS_vector") is not None: - finding.cvssv3 = _temp.get("CVSS_vector") - if _temp.get("CVSS_value") is not None: - finding.cvssv3_score = _temp.get("CVSS_value") + finding.mitigated = temp["mitigation_date"] + finding.is_mitigated = temp["mitigated"] + finding.active = temp["active"] + if temp.get("CVSS_vector") is not None: + finding.cvssv3 = temp.get("CVSS_vector") + if temp.get("CVSS_value") is not None: + finding.cvssv3_score = temp.get("CVSS_value") finding.verified = True finding.unsaved_endpoints = [] finding.unsaved_endpoints.append(ep) diff --git a/dojo/tools/qualys_infrascan_webgui/parser.py b/dojo/tools/qualys_infrascan_webgui/parser.py index 047d48ff2bf..6a139e7bc47 100644 --- a/dojo/tools/qualys_infrascan_webgui/parser.py +++ b/dojo/tools/qualys_infrascan_webgui/parser.py @@ -28,7 +28,7 @@ def issue_r(raw_row, vuln, scan_date): if issue_row["fqdn"] == "No registered hostname": issue_row["fqdn"] = None # port - _port = raw_row.get("port") + port = raw_row.get("port") # Create Endpoint ep = Endpoint(host=issue_row["fqdn"]) if issue_row["fqdn"] else Endpoint(host=issue_row["ip_address"]) @@ -39,66 +39,66 @@ def issue_r(raw_row, vuln, scan_date): # Scan details - VULNS//VULN indicates we only care about confirmed # vulnerabilities for vuln_cat in raw_row.findall("VULNS/CAT"): - _category = str(vuln_cat.get("value")) + category = str(vuln_cat.get("value")) for vuln_details in vuln_cat.findall("VULN"): - _temp = issue_row + temp = issue_row - _gid = vuln_details.get("number") + gid = vuln_details.get("number") - _temp["port_status"] = _port + temp["port_status"] = port - _result = str(vuln_details.findtext("RESULT")) + result = str(vuln_details.findtext("RESULT")) # Vuln name - _temp["vuln_name"] = vuln_details.findtext("TITLE") + temp["vuln_name"] = vuln_details.findtext("TITLE") # Vuln Description - _description = str(vuln_details.findtext("DIAGNOSIS")) + description = str(vuln_details.findtext("DIAGNOSIS")) # Solution Strips Heading Workaround(s) - _temp["solution"] = htmltext( + temp["solution"] = htmltext( str(vuln_details.findtext("SOLUTION")), ) # Vuln_description - _temp["vuln_description"] = "\n".join( + temp["vuln_description"] = "\n".join( [ - htmltext(_description), - htmltext("**Category:** " + _category), - htmltext("**QID:** " + str(_gid)), - htmltext("**Port:** " + str(_port)), - htmltext("**Result Evidence:** " + _result), + htmltext(description), + htmltext("**Category:** " + category), + htmltext("**QID:** " + str(gid)), + htmltext("**Port:** " + str(port)), + htmltext("**Result Evidence:** " + result), ], ) # Impact description - _temp["IMPACT"] = htmltext( + temp["IMPACT"] = htmltext( str(vuln_details.findtext("CONSEQUENCE")), ) # CVE and LINKS - _cl = [] - _temp_cve_details = vuln_details.iterfind("CVE_ID_LIST/CVE_ID") - if _temp_cve_details: - _cl = { + cl = [] + temp_cve_details = vuln_details.iterfind("CVE_ID_LIST/CVE_ID") + if temp_cve_details: + cl = { cve_detail.findtext("ID"): cve_detail.findtext("URL") - for cve_detail in _temp_cve_details + for cve_detail in temp_cve_details } - _temp["cve"] = "\n".join(list(_cl.keys())) - _temp["links"] = "\n".join(list(_cl.values())) + temp["cve"] = "\n".join(list(cl.keys())) + temp["links"] = "\n".join(list(cl.values())) # The CVE in Qualys report might not have a CVSS score, so findings are informational by default # unless we can find map to a Severity OR a CVSS score from the # findings detail. sev = qualys_convert_severity(vuln_details.get("severity")) - refs = "\n".join(list(_cl.values())) + refs = "\n".join(list(cl.values())) finding = Finding( - title=_temp["vuln_name"], - mitigation=_temp["solution"], - description=_temp["vuln_description"], + title=temp["vuln_name"], + mitigation=temp["solution"], + description=temp["vuln_description"], severity=sev, references=refs, - impact=_temp["IMPACT"], - vuln_id_from_tool=_gid, + impact=temp["IMPACT"], + vuln_id_from_tool=gid, date=scan_date, ) finding.unsaved_endpoints = [] diff --git a/dojo/tools/veracode/xml_parser.py b/dojo/tools/veracode/xml_parser.py index 1e53b5545c4..0a9d3f21561 100644 --- a/dojo/tools/veracode/xml_parser.py +++ b/dojo/tools/veracode/xml_parser.py @@ -87,7 +87,7 @@ def get_findings(self, filename, test): "/x:component", namespaces=XML_NAMESPACE, ): - _library = component.attrib["library"] + library = component.attrib["library"] if "library_id" in component.attrib and component.attrib[ "library_id" ].startswith("maven:"): @@ -95,9 +95,9 @@ def get_findings(self, filename, test): # available to align with CycloneDX + Veracode SCA split_library_id = component.attrib["library_id"].split(":") if len(split_library_id) > 2: - _library = split_library_id[2] - _vendor = component.attrib["vendor"] - _version = component.attrib["version"] + library = split_library_id[2] + vendor = component.attrib["vendor"] + version = component.attrib["version"] for vulnerability in component.findall( "x:vulnerabilities/x:vulnerability", namespaces=XML_NAMESPACE, @@ -107,9 +107,9 @@ def get_findings(self, filename, test): dupes[str(uuid.uuid4())] = self.__xml_sca_flaw_to_finding( test, report_date, - _vendor, - _library, - _version, + vendor, + library, + version, vulnerability, ) @@ -147,16 +147,16 @@ def __xml_flaw_to_finding(cls, app_id, xml_node, mitigation_text, test): # Note that DD's legacy dedupe hashing uses the description field, # so for compatibility, description field should contain very static # info. - _description = xml_node.attrib["description"].replace(". ", ".\n") - finding.description = _description + description = xml_node.attrib["description"].replace(". ", ".\n") + finding.description = description - _references = "None" - if "References:" in _description: - _references = _description[ - _description.index("References:") + 13: + references = "None" + if "References:" in description: + references = description[ + description.index("References:") + 13: ].replace(") ", ")\n") finding.references = ( - _references + references + "\n\nVulnerable Module: " + xml_node.attrib["module"] + "\nType: " @@ -176,8 +176,8 @@ def __xml_flaw_to_finding(cls, app_id, xml_node, mitigation_text, test): except Exception: finding.date = test.target_start - _is_mitigated = False - _mitigated_date = None + is_mitigated = False + mitigated_date = None if ( "mitigation_status" in xml_node.attrib and xml_node.attrib["mitigation_status"].lower() == "accepted" @@ -186,34 +186,34 @@ def __xml_flaw_to_finding(cls, app_id, xml_node, mitigation_text, test): "remediation_status" in xml_node.attrib and xml_node.attrib["remediation_status"].lower() == "fixed" ): - _is_mitigated = True + is_mitigated = True else: # This happens if any mitigation (including 'Potential false positive') # was accepted in VC. for mitigation in xml_node.findall( "x:mitigations/x:mitigation", namespaces=XML_NAMESPACE, ): - _is_mitigated = True - _mitigated_date = datetime.strptime( + is_mitigated = True + mitigated_date = datetime.strptime( mitigation.attrib["date"], "%Y-%m-%d %H:%M:%S %Z", ) - finding.is_mitigated = _is_mitigated - finding.mitigated = _mitigated_date - finding.active = not _is_mitigated + finding.is_mitigated = is_mitigated + finding.mitigated = mitigated_date + finding.active = not is_mitigated # Check if it's a FP in veracode. # Only check in case finding was mitigated, since DD doesn't allow # both `verified` and `false_p` to be true, while `verified` is implied on the import # level, not on the finding-level. - _false_positive = False - if _is_mitigated: - _remediation_status = xml_node.attrib["remediation_status"].lower() + false_positive = False + if is_mitigated: + remediation_status = xml_node.attrib["remediation_status"].lower() if ( - "false positive" in _remediation_status - or "falsepositive" in _remediation_status + "false positive" in remediation_status + or "falsepositive" in remediation_status ): - _false_positive = True - finding.false_p = _false_positive + false_positive = True + finding.false_p = false_positive return finding @@ -227,25 +227,25 @@ def __xml_static_flaw_to_finding( finding.static_finding = True finding.dynamic_finding = False - _line_number = xml_node.attrib["line"] - _functionrelativelocation = xml_node.attrib["functionrelativelocation"] + line_number = xml_node.attrib["line"] + functionrelativelocation = xml_node.attrib["functionrelativelocation"] if ( - _line_number is not None - and _line_number.isdigit() - and _functionrelativelocation is not None - and _functionrelativelocation.isdigit() + line_number is not None + and line_number.isdigit() + and functionrelativelocation is not None + and functionrelativelocation.isdigit() ): - finding.line = int(_line_number) + int(_functionrelativelocation) + finding.line = int(line_number) + int(functionrelativelocation) finding.sast_source_line = finding.line - _source_file = xml_node.attrib.get("sourcefile") - _sourcefilepath = xml_node.attrib.get("sourcefilepath") - finding.file_path = _sourcefilepath + _source_file - finding.sast_source_file_path = _sourcefilepath + _source_file + source_file = xml_node.attrib.get("sourcefile") + sourcefilepath = xml_node.attrib.get("sourcefilepath") + finding.file_path = sourcefilepath + source_file + finding.sast_source_file_path = sourcefilepath + source_file - _sast_source_obj = xml_node.attrib.get("functionprototype") - if isinstance(_sast_source_obj, str): - finding.sast_source_object = _sast_source_obj or None + sast_source_obj = xml_node.attrib.get("functionprototype") + if isinstance(sast_source_obj, str): + finding.sast_source_object = sast_source_obj or None finding.unsaved_tags = ["sast"] @@ -300,8 +300,8 @@ def __xml_sca_flaw_to_finding( # overwrite old matching SCA findings. finding.date = report_date - _description = "This library has known vulnerabilities.\n" - _description += ( + description = "This library has known vulnerabilities.\n" + description += ( "**CVE:** {} ({})\n" "CVS Score: {} ({})\n" "Summary: \n>{}" @@ -315,12 +315,12 @@ def __xml_sca_flaw_to_finding( xml_node.attrib["cve_summary"], ) ) - finding.description = _description + finding.description = description finding.unsaved_tags = ["sca"] - _is_mitigated = False - _mitigated_date = None + is_mitigated = False + mitigated_date = None if ( "mitigation" in xml_node.attrib and xml_node.attrib["mitigation"].lower() == "true" @@ -330,12 +330,12 @@ def __xml_sca_flaw_to_finding( for mitigation in xml_node.findall( "x:mitigations/x:mitigation", namespaces=XML_NAMESPACE, ): - _is_mitigated = True - _mitigated_date = datetime.strptime( + is_mitigated = True + mitigated_date = datetime.strptime( mitigation.attrib["date"], "%Y-%m-%d %H:%M:%S %Z", ) - finding.is_mitigated = _is_mitigated - finding.mitigated = _mitigated_date - finding.active = not _is_mitigated + finding.is_mitigated = is_mitigated + finding.mitigated = mitigated_date + finding.active = not is_mitigated return finding From 64956da5bc48de1a754cc4715b65b805e8bf9aa1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 15 Jan 2025 11:24:19 -0600 Subject: [PATCH 32/99] Bump django-environ from 0.11.2 to 0.12.0 (#11564) Bumps [django-environ](https://github.com/joke2k/django-environ) from 0.11.2 to 0.12.0. - [Release notes](https://github.com/joke2k/django-environ/releases) - [Changelog](https://github.com/joke2k/django-environ/blob/main/CHANGELOG.rst) - [Commits](https://github.com/joke2k/django-environ/commits) --- updated-dependencies: - dependency-name: django-environ dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 1dae1c01a5e..43823ee023f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,7 +7,7 @@ defusedxml==0.7.1 django_celery_results==2.5.1 django-auditlog==2.3.0 django-dbbackup==4.2.1 -django-environ==0.11.2 +django-environ==0.12.0 django-filter==24.3 django-imagekit==5.0.0 django-multiselectfield==0.1.13 From 19377a0fc712fa319f32de11e7e63d588a23cafe Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 15 Jan 2025 11:26:06 -0600 Subject: [PATCH 33/99] Bump django-debug-toolbar from 4.4.6 to 5.0.1 (#11563) Bumps [django-debug-toolbar](https://github.com/django-commons/django-debug-toolbar) from 4.4.6 to 5.0.1. - [Release notes](https://github.com/django-commons/django-debug-toolbar/releases) - [Changelog](https://github.com/django-commons/django-debug-toolbar/blob/main/docs/changes.rst) - [Commits](https://github.com/django-commons/django-debug-toolbar/compare/4.4.6...5.0.1) --- updated-dependencies: - dependency-name: django-debug-toolbar dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 43823ee023f..0b0bea69666 100644 --- a/requirements.txt +++ b/requirements.txt @@ -52,7 +52,7 @@ packageurl-python==0.16.0 django-crum==0.7.9 JSON-log-formatter==1.1 django-split-settings==1.3.2 -django-debug-toolbar==4.4.6 +django-debug-toolbar==5.0.1 django-debug-toolbar-request-history==0.1.4 vcrpy==6.0.2 vcrpy-unittest==0.1.7 From eed7ebcafe59b77d7f934af548946096f0de8330 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 15 Jan 2025 11:53:38 -0600 Subject: [PATCH 34/99] Bump pdfmake from 0.2.17 to 0.2.18 in /components (#11485) Bumps [pdfmake](https://github.com/bpampuch/pdfmake) from 0.2.17 to 0.2.18. - [Release notes](https://github.com/bpampuch/pdfmake/releases) - [Changelog](https://github.com/bpampuch/pdfmake/blob/0.2.18/CHANGELOG.md) - [Commits](https://github.com/bpampuch/pdfmake/compare/0.2.17...0.2.18) --- updated-dependencies: - dependency-name: pdfmake dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- components/package.json | 2 +- components/yarn.lock | 18 +++++++++--------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/components/package.json b/components/package.json index 086741e6f2d..15ddb09f1bb 100644 --- a/components/package.json +++ b/components/package.json @@ -35,7 +35,7 @@ "metismenu": "~3.0.7", "moment": "^2.30.1", "morris.js": "morrisjs/morris.js", - "pdfmake": "^0.2.17", + "pdfmake": "^0.2.18", "startbootstrap-sb-admin-2": "1.0.7" }, "engines": { diff --git a/components/yarn.lock b/components/yarn.lock index b3615a80643..8b3f3e8921e 100644 --- a/components/yarn.lock +++ b/components/yarn.lock @@ -24,10 +24,10 @@ base64-js "1.3.1" unicode-trie "^2.0.0" -"@foliojs-fork/pdfkit@^0.15.2": - version "0.15.2" - resolved "https://registry.yarnpkg.com/@foliojs-fork/pdfkit/-/pdfkit-0.15.2.tgz#6dbe57ed45f1dc022d0219f3810071b9007e347e" - integrity sha512-Wpj6BH4DGn+zAWmCk9agdbAw3Zxt+MpemjssLfYdnretWpZ014uR6Zo51E4ftVP75UA8a7mtt4TiCu09lIKsBw== +"@foliojs-fork/pdfkit@^0.15.3": + version "0.15.3" + resolved "https://registry.yarnpkg.com/@foliojs-fork/pdfkit/-/pdfkit-0.15.3.tgz#590b31e770a98e2af62ce44f268a0d06b41ff32f" + integrity sha512-Obc0Wmy3bm7BINFVvPhcl2rnSSK61DQrlHU8aXnAqDk9LCjWdUOPwhgD8Ywz5VtuFjRxmVOM/kQ/XLIBjDvltw== dependencies: "@foliojs-fork/fontkit" "^1.9.2" "@foliojs-fork/linebreak" "^1.1.1" @@ -503,13 +503,13 @@ pako@~1.0.2: resolved "https://registry.yarnpkg.com/pako/-/pako-1.0.11.tgz#6c9599d340d54dfd3946380252a35705a6b992bf" integrity sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw== -pdfmake@^0.2.17: - version "0.2.17" - resolved "https://registry.yarnpkg.com/pdfmake/-/pdfmake-0.2.17.tgz#64beeb0b09c7e0ade39b6d4b379371818cea3da5" - integrity sha512-ODOp1T232yr/HGjdYCq888paBE7RDCflCOSRDUtR9CyfXneOmnMPZJl8dxP9zEXbKiv9vfk9Z/3eK2V2B/Wx/Q== +pdfmake@^0.2.18: + version "0.2.18" + resolved "https://registry.yarnpkg.com/pdfmake/-/pdfmake-0.2.18.tgz#0be32a9274466494a69285193b64f61f3198ea4e" + integrity sha512-Fe+GnMS8EVZu5rci/CDaQ+xmUoHvx8P+rvIlrwSYM6A5c7Aik8G6lpJbddhjBE2jXGjv6WcUCFCB06uZbjxkMw== dependencies: "@foliojs-fork/linebreak" "^1.1.2" - "@foliojs-fork/pdfkit" "^0.15.2" + "@foliojs-fork/pdfkit" "^0.15.3" iconv-lite "^0.6.3" xmldoc "^1.3.0" From 21380a5f81251680ae5094364e046927a17ff455 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 15 Jan 2025 11:54:20 -0600 Subject: [PATCH 35/99] Bump pillow from 11.0.0 to 11.1.0 (#11488) Bumps [pillow](https://github.com/python-pillow/Pillow) from 11.0.0 to 11.1.0. - [Release notes](https://github.com/python-pillow/Pillow/releases) - [Changelog](https://github.com/python-pillow/Pillow/blob/main/CHANGES.rst) - [Commits](https://github.com/python-pillow/Pillow/compare/11.0.0...11.1.0) --- updated-dependencies: - dependency-name: pillow dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 0b0bea69666..1007c8f7581 100644 --- a/requirements.txt +++ b/requirements.txt @@ -30,7 +30,7 @@ PyGithub==1.58.2 lxml==5.3.0 Markdown==3.7 openpyxl==3.1.5 -Pillow==11.0.0 # required by django-imagekit +Pillow==11.1.0 # required by django-imagekit psycopg[c]==3.2.3 cryptography==44.0.0 python-dateutil==2.9.0.post0 From acf044da3f5149186b31284eeb168c00f8b55564 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 15 Jan 2025 11:54:38 -0600 Subject: [PATCH 36/99] Bump gitpython from 3.1.43 to 3.1.44 (#11489) Bumps [gitpython](https://github.com/gitpython-developers/GitPython) from 3.1.43 to 3.1.44. - [Release notes](https://github.com/gitpython-developers/GitPython/releases) - [Changelog](https://github.com/gitpython-developers/GitPython/blob/main/CHANGES) - [Commits](https://github.com/gitpython-developers/GitPython/compare/3.1.43...3.1.44) --- updated-dependencies: - dependency-name: gitpython dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 1007c8f7581..236bfdb7e53 100644 --- a/requirements.txt +++ b/requirements.txt @@ -45,7 +45,7 @@ whitenoise==5.2.0 titlecase==2.4.1 social-auth-app-django==5.4.2 social-auth-core==4.5.4 -gitpython==3.1.43 +gitpython==3.1.44 python-gitlab==5.3.1 cpe==1.3.1 packageurl-python==0.16.0 From 49e0a5c0da30649d3770e417faf1a38bd1bdef4a Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 15 Jan 2025 20:40:48 -0600 Subject: [PATCH 37/99] Update docker/build-push-action action from v6.11.0 to v6.12.0 (.github/workflows/release-x-manual-docker-containers.yml) (#11574) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- .github/workflows/build-docker-images-for-testing.yml | 2 +- .github/workflows/release-x-manual-docker-containers.yml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build-docker-images-for-testing.yml b/.github/workflows/build-docker-images-for-testing.yml index 2d7032546fc..7e33d6fc87f 100644 --- a/.github/workflows/build-docker-images-for-testing.yml +++ b/.github/workflows/build-docker-images-for-testing.yml @@ -35,7 +35,7 @@ jobs: - name: Build id: docker_build - uses: docker/build-push-action@b32b51a8eda65d6793cd0494a773d4f6bcef32dc # v6.11.0 + uses: docker/build-push-action@67a2d409c0a876cbe6b11854e3e25193efe4e62d # v6.12.0 timeout-minutes: 10 env: DOCKER_BUILD_CHECKS_ANNOTATIONS: false diff --git a/.github/workflows/release-x-manual-docker-containers.yml b/.github/workflows/release-x-manual-docker-containers.yml index adf75cf544b..40af8e4c617 100644 --- a/.github/workflows/release-x-manual-docker-containers.yml +++ b/.github/workflows/release-x-manual-docker-containers.yml @@ -51,7 +51,7 @@ jobs: - name: Build and push images with debian if: ${{ matrix.os == 'debian' }} - uses: docker/build-push-action@b32b51a8eda65d6793cd0494a773d4f6bcef32dc # v6.11.0 + uses: docker/build-push-action@67a2d409c0a876cbe6b11854e3e25193efe4e62d # v6.12.0 env: DOCKER_BUILD_CHECKS_ANNOTATIONS: false REPO_ORG: ${{ env.repoorg }} @@ -64,7 +64,7 @@ jobs: - name: Build and push images with alpine if: ${{ matrix.os == 'alpine' }} - uses: docker/build-push-action@b32b51a8eda65d6793cd0494a773d4f6bcef32dc # v6.11.0 + uses: docker/build-push-action@67a2d409c0a876cbe6b11854e3e25193efe4e62d # v6.12.0 env: DOCKER_BUILD_CHECKS_ANNOTATIONS: false REPO_ORG: ${{ env.repoorg }} From 7b5ab4dbd37341c14ca003e6712e5103cb401b56 Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Thu, 16 Jan 2025 13:56:58 -0600 Subject: [PATCH 38/99] Fix unit tests (#11583) --- dojo/api_v2/serializers.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/dojo/api_v2/serializers.py b/dojo/api_v2/serializers.py index 5b0913492f2..38a36159425 100644 --- a/dojo/api_v2/serializers.py +++ b/dojo/api_v2/serializers.py @@ -1768,7 +1768,11 @@ def get_display_status(self, obj) -> str: return obj.status() def process_risk_acceptance(self, data): - is_risk_accepted = data.get("risk_accepted", False) + is_risk_accepted = data.get("risk_accepted") + # Do not take any action if the `risk_accepted` was not passed + if not isinstance(is_risk_accepted, bool): + return + # Determine how to proceed based on the value of `risk_accepted` if is_risk_accepted and not self.instance.risk_accepted and self.instance.test.engagement.product.enable_simple_risk_acceptance and not data.get("active", False): ra_helper.simple_risk_accept(self.context["request"].user, self.instance) elif not is_risk_accepted and self.instance.risk_accepted: # turning off risk_accepted From 2c59d68ac671ed5a27cc5c69e504c691c5ac6d46 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 16 Jan 2025 19:13:26 -0600 Subject: [PATCH 39/99] Bump boto3 from 1.35.99 to 1.36.0 (#11578) Bumps [boto3](https://github.com/boto/boto3) from 1.35.99 to 1.36.0. - [Release notes](https://github.com/boto/boto3/releases) - [Commits](https://github.com/boto/boto3/compare/1.35.99...1.36.0) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 236bfdb7e53..9afbe859c14 100644 --- a/requirements.txt +++ b/requirements.txt @@ -69,7 +69,7 @@ django-ratelimit==4.1.0 argon2-cffi==23.1.0 blackduck==1.1.3 pycurl==7.45.4 # Required for Celery Broker AWS (SQS) support -boto3==1.35.99 # Required for Celery Broker AWS (SQS) support +boto3==1.36.0 # Required for Celery Broker AWS (SQS) support netaddr==1.3.0 vulners==2.2.3 fontawesomefree==6.6.0 From e1dd49dcc779528147c502b672f8fff7d9bd90e5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 16 Jan 2025 19:14:18 -0600 Subject: [PATCH 40/99] Bump psycopg[c] from 3.2.3 to 3.2.4 (#11579) Bumps [psycopg[c]](https://github.com/psycopg/psycopg) from 3.2.3 to 3.2.4. - [Changelog](https://github.com/psycopg/psycopg/blob/master/docs/news.rst) - [Commits](https://github.com/psycopg/psycopg/compare/3.2.3...3.2.4) --- updated-dependencies: - dependency-name: psycopg[c] dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 9afbe859c14..ec8253eb22b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -31,7 +31,7 @@ lxml==5.3.0 Markdown==3.7 openpyxl==3.1.5 Pillow==11.1.0 # required by django-imagekit -psycopg[c]==3.2.3 +psycopg[c]==3.2.4 cryptography==44.0.0 python-dateutil==2.9.0.post0 pytz==2024.2 From d99f643e05639895120c07b323d2d18503eb7ad1 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 16 Jan 2025 19:26:28 -0600 Subject: [PATCH 41/99] Update manusa/actions-setup-minikube action from v2.13.0 to v2.13.1 (.github/workflows/k8s-tests.yml) (#11582) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- .github/workflows/k8s-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/k8s-tests.yml b/.github/workflows/k8s-tests.yml index 3f169002efb..9dec6f1189b 100644 --- a/.github/workflows/k8s-tests.yml +++ b/.github/workflows/k8s-tests.yml @@ -35,7 +35,7 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Setup Minikube - uses: manusa/actions-setup-minikube@0e8062ceff873bd77979f39cf8fd3621416afe4d # v2.13.0 + uses: manusa/actions-setup-minikube@5d9440a1b535e8b4f541eaac559681a9022df29d # v2.13.1 with: minikube version: 'v1.33.1' kubernetes version: ${{ matrix.k8s }} From 2b5577f73db357ac809969ffa35df0d941374e69 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 16 Jan 2025 21:15:38 -0600 Subject: [PATCH 42/99] Bump django from 5.1.4 to 5.1.5 (#11580) Bumps [django](https://github.com/django/django) from 5.1.4 to 5.1.5. - [Commits](https://github.com/django/django/compare/5.1.4...5.1.5) --- updated-dependencies: - dependency-name: django dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index ec8253eb22b..6c59808cbe8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -21,7 +21,7 @@ django-slack==5.19.0 git+https://github.com/DefectDojo/django-tagging@develop#egg=django-tagging django-watson==1.6.3 django-prometheus==2.3.1 -Django==5.1.4 +Django==5.1.5 djangorestframework==3.15.2 html2text==2024.2.26 humanize==4.11.0 From f5660615843b97afa1965040a80cb9fb96d2ce24 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 16 Jan 2025 21:26:43 -0600 Subject: [PATCH 43/99] Bump vcrpy from 6.0.2 to 7.0.0 (#11481) Bumps [vcrpy](https://github.com/kevin1024/vcrpy) from 6.0.2 to 7.0.0. - [Release notes](https://github.com/kevin1024/vcrpy/releases) - [Changelog](https://github.com/kevin1024/vcrpy/blob/master/docs/changelog.rst) - [Commits](https://github.com/kevin1024/vcrpy/compare/v6.0.2...v7.0.0) --- updated-dependencies: - dependency-name: vcrpy dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 6c59808cbe8..d21396ad058 100644 --- a/requirements.txt +++ b/requirements.txt @@ -54,7 +54,7 @@ JSON-log-formatter==1.1 django-split-settings==1.3.2 django-debug-toolbar==5.0.1 django-debug-toolbar-request-history==0.1.4 -vcrpy==6.0.2 +vcrpy==7.0.0 vcrpy-unittest==0.1.7 django-tagulous==2.1.0 PyJWT==2.10.1 From 0d5d2752e52ac82e1cb4273ef37381a776541a99 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 14:34:29 -0600 Subject: [PATCH 44/99] Update release-drafter/release-drafter action from v6.0.0 to v6.1.0 (.github/workflows/release-drafter.yml) (#11596) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- .github/workflows/release-drafter.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-drafter.yml b/.github/workflows/release-drafter.yml index 7802bfdc1b2..4f90f3a1d2b 100644 --- a/.github/workflows/release-drafter.yml +++ b/.github/workflows/release-drafter.yml @@ -27,7 +27,7 @@ jobs: steps: - name: Create Release id: create_release - uses: release-drafter/release-drafter@3f0f87098bd6b5c5b9a36d49c41d998ea58f9348 # v6.0.0 + uses: release-drafter/release-drafter@b1476f6e6eb133afa41ed8589daba6dc69b4d3f5 # v6.1.0 with: version: ${{ github.event.inputs.version }} env: From 23a53f95f2c18e05d59846b6ac2adf3c12052ea9 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 21:32:07 -0600 Subject: [PATCH 45/99] Update dependency vite from 6.0.7 to v6.0.10 (docs/package.json) (#11598) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- docs/package-lock.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/package-lock.json b/docs/package-lock.json index 12994fca578..a4b2aa3a367 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -4717,9 +4717,9 @@ "license": "MIT" }, "node_modules/vite": { - "version": "6.0.7", - "resolved": "https://registry.npmjs.org/vite/-/vite-6.0.7.tgz", - "integrity": "sha512-RDt8r/7qx9940f8FcOIAH9PTViRrghKaK2K1jY3RaAURrEUbm9Du1mJ72G+jlhtG3WwodnfzY8ORQZbBavZEAQ==", + "version": "6.0.10", + "resolved": "https://registry.npmjs.org/vite/-/vite-6.0.10.tgz", + "integrity": "sha512-MEszunEcMo6pFsfXN1GhCFQqnE25tWRH0MA4f0Q7uanACi4y1Us+ZGpTMnITwCTnYzB2b9cpmnelTlxgTBmaBA==", "dev": true, "license": "MIT", "dependencies": { From 628db8547648812eafdbe1b296c22a8777e8d1a1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 21:32:30 -0600 Subject: [PATCH 46/99] Bump asteval from 1.0.5 to 1.0.6 (#11599) Bumps [asteval](https://github.com/lmfit/asteval) from 1.0.5 to 1.0.6. - [Release notes](https://github.com/lmfit/asteval/releases) - [Commits](https://github.com/lmfit/asteval/compare/1.0.5...1.0.6) --- updated-dependencies: - dependency-name: asteval dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index d21396ad058..63bc8b0c95a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ # requirements.txt for DefectDojo using Python 3.x -asteval==1.0.5 +asteval==1.0.6 bleach==6.2.0 bleach[css] celery==5.4.0 From ed7778e46ebd6febe1c5c207d7ce7e6d211b094e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 21:32:51 -0600 Subject: [PATCH 47/99] Bump boto3 from 1.36.0 to 1.36.2 (#11600) Bumps [boto3](https://github.com/boto/boto3) from 1.36.0 to 1.36.2. - [Release notes](https://github.com/boto/boto3/releases) - [Commits](https://github.com/boto/boto3/compare/1.36.0...1.36.2) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 63bc8b0c95a..6904db97b4d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -69,7 +69,7 @@ django-ratelimit==4.1.0 argon2-cffi==23.1.0 blackduck==1.1.3 pycurl==7.45.4 # Required for Celery Broker AWS (SQS) support -boto3==1.36.0 # Required for Celery Broker AWS (SQS) support +boto3==1.36.2 # Required for Celery Broker AWS (SQS) support netaddr==1.3.0 vulners==2.2.3 fontawesomefree==6.6.0 From facb7e74829bac4fd574736e29f187fc366764a5 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 21:33:18 -0600 Subject: [PATCH 48/99] Update helm/chart-testing-action action from v2.6.1 to v2.7.0 (.github/workflows/test-helm-chart.yml) (#11601) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- .github/workflows/test-helm-chart.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-helm-chart.yml b/.github/workflows/test-helm-chart.yml index 75cf1186411..b7d30cece1b 100644 --- a/.github/workflows/test-helm-chart.yml +++ b/.github/workflows/test-helm-chart.yml @@ -33,7 +33,7 @@ jobs: helm dependency update ./helm/defectdojo - name: Set up chart-testing - uses: helm/chart-testing-action@e6669bcd63d7cb57cb4380c33043eebe5d111992 # v2.6.1 + uses: helm/chart-testing-action@0d28d3144d3a25ea2cc349d6e59901c4ff469b3b # v2.7.0 with: yamale_version: 4.0.4 yamllint_version: 1.35.1 From 8aa12016ee5ef22548d9ae1d53c92b9446ddfeb4 Mon Sep 17 00:00:00 2001 From: DefectDojo release bot Date: Tue, 21 Jan 2025 16:00:07 +0000 Subject: [PATCH 49/99] Update versions in application files --- components/package.json | 2 +- dojo/__init__.py | 2 +- helm/defectdojo/Chart.yaml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/components/package.json b/components/package.json index 1859493eb90..086741e6f2d 100644 --- a/components/package.json +++ b/components/package.json @@ -1,6 +1,6 @@ { "name": "defectdojo", - "version": "2.42.2", + "version": "2.43.0-dev", "license" : "BSD-3-Clause", "private": true, "dependencies": { diff --git a/dojo/__init__.py b/dojo/__init__.py index 5eec1f14dd1..3a2e4a630a2 100644 --- a/dojo/__init__.py +++ b/dojo/__init__.py @@ -4,6 +4,6 @@ # Django starts so that shared_task will use this app. from .celery import app as celery_app # noqa: F401 -__version__ = "2.42.2" +__version__ = "2.43.0-dev" __url__ = "https://github.com/DefectDojo/django-DefectDojo" __docs__ = "https://documentation.defectdojo.com" diff --git a/helm/defectdojo/Chart.yaml b/helm/defectdojo/Chart.yaml index 4248b0ec222..859f8982ecf 100644 --- a/helm/defectdojo/Chart.yaml +++ b/helm/defectdojo/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 -appVersion: "2.42.2" +appVersion: "2.43.0-dev" description: A Helm chart for Kubernetes to install DefectDojo name: defectdojo -version: 1.6.169 +version: 1.6.170-dev icon: https://www.defectdojo.org/img/favicon.ico maintainers: - name: madchap From 4308d26d77b9fb43a838018cde76540c0254d33b Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 21 Jan 2025 16:24:28 -0600 Subject: [PATCH 50/99] chore(deps): update helm release postgresql from 16.3.5 to ~16.4.0 (helm/defectdojo/chart.yaml) (#11609) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- helm/defectdojo/Chart.lock | 6 +++--- helm/defectdojo/Chart.yaml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/helm/defectdojo/Chart.lock b/helm/defectdojo/Chart.lock index 591ab3ae6e9..708a5953c0d 100644 --- a/helm/defectdojo/Chart.lock +++ b/helm/defectdojo/Chart.lock @@ -1,12 +1,12 @@ dependencies: - name: postgresql repository: https://charts.bitnami.com/bitnami - version: 16.3.0 + version: 16.4.5 - name: postgresql-ha repository: https://charts.bitnami.com/bitnami version: 9.4.11 - name: redis repository: https://charts.bitnami.com/bitnami version: 19.6.4 -digest: sha256:896db01c8521d42f6830a84190fb0a679afb2a999a79e3d82226d0b871f7778d -generated: "2024-12-11T06:49:40.425726453Z" +digest: sha256:0ea2056121ece33daa13f76dd1ba99bd627f2b144289396c969101f33acf7223 +generated: "2025-01-21T16:41:50.646103279Z" diff --git a/helm/defectdojo/Chart.yaml b/helm/defectdojo/Chart.yaml index 859f8982ecf..c3f61e2a5ca 100644 --- a/helm/defectdojo/Chart.yaml +++ b/helm/defectdojo/Chart.yaml @@ -10,7 +10,7 @@ maintainers: url: https://github.com/DefectDojo/django-DefectDojo dependencies: - name: postgresql - version: ~16.3.0 + version: ~16.4.0 repository: "https://charts.bitnami.com/bitnami" condition: postgresql.enabled - name: postgresql-ha From 5a528f25a43d5059f45e43d6b7e37bc4c7644064 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 21 Jan 2025 22:04:03 -0600 Subject: [PATCH 51/99] Update gcr.io/cloudsql-docker/gce-proxy Docker tag from 1.37.3 to v1.37.4 (helm/defectdojo/values.yaml) (#11587) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- helm/defectdojo/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helm/defectdojo/values.yaml b/helm/defectdojo/values.yaml index 4a66e65fa62..09f5f525e41 100644 --- a/helm/defectdojo/values.yaml +++ b/helm/defectdojo/values.yaml @@ -481,7 +481,7 @@ cloudsql: image: # set repo and image tag of gce-proxy repository: gcr.io/cloudsql-docker/gce-proxy - tag: 1.37.3 + tag: 1.37.4 pullPolicy: IfNotPresent # set CloudSQL instance: 'project:zone:instancename' instance: "" From 6f72620377eb961ab68cc1c696e136d5e1821d73 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 21 Jan 2025 22:04:27 -0600 Subject: [PATCH 52/99] Update dependency vite from 6.0.10 to v6.0.11 (docs/package.json) (#11604) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- docs/package-lock.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/package-lock.json b/docs/package-lock.json index a4b2aa3a367..ae641bbc8e1 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -4717,9 +4717,9 @@ "license": "MIT" }, "node_modules/vite": { - "version": "6.0.10", - "resolved": "https://registry.npmjs.org/vite/-/vite-6.0.10.tgz", - "integrity": "sha512-MEszunEcMo6pFsfXN1GhCFQqnE25tWRH0MA4f0Q7uanACi4y1Us+ZGpTMnITwCTnYzB2b9cpmnelTlxgTBmaBA==", + "version": "6.0.11", + "resolved": "https://registry.npmjs.org/vite/-/vite-6.0.11.tgz", + "integrity": "sha512-4VL9mQPKoHy4+FE0NnRE/kbY51TOfaknxAjt3fJbGJxhIpBZiqVzlZDEesWWsuREXHwNdAoOFZ9MkPEVXczHwg==", "dev": true, "license": "MIT", "dependencies": { From 1e4ac11a752049b33da370821e96cd497aeb8992 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 22 Jan 2025 14:07:52 -0600 Subject: [PATCH 53/99] Bump boto3 from 1.36.2 to 1.36.3 (#11616) Bumps [boto3](https://github.com/boto/boto3) from 1.36.2 to 1.36.3. - [Release notes](https://github.com/boto/boto3/releases) - [Commits](https://github.com/boto/boto3/compare/1.36.2...1.36.3) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 6904db97b4d..33358945689 100644 --- a/requirements.txt +++ b/requirements.txt @@ -69,7 +69,7 @@ django-ratelimit==4.1.0 argon2-cffi==23.1.0 blackduck==1.1.3 pycurl==7.45.4 # Required for Celery Broker AWS (SQS) support -boto3==1.36.2 # Required for Celery Broker AWS (SQS) support +boto3==1.36.3 # Required for Celery Broker AWS (SQS) support netaddr==1.3.0 vulners==2.2.3 fontawesomefree==6.6.0 From 99ba0381c1deaaf50f3e863313e746e733284a51 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 22 Jan 2025 21:45:45 -0600 Subject: [PATCH 54/99] fix(deps): update dependency @tabler/icons from 3.28.1 to v3.29.0 (docs/package.json) (#11612) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- docs/package-lock.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/package-lock.json b/docs/package-lock.json index ae641bbc8e1..99e924fe2c1 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -2411,9 +2411,9 @@ } }, "node_modules/@tabler/icons": { - "version": "3.28.1", - "resolved": "https://registry.npmjs.org/@tabler/icons/-/icons-3.28.1.tgz", - "integrity": "sha512-h7nqKEvFooLtFxhMOC1/2eiV+KRXhBUuDUUJrJlt6Ft6tuMw2eU/9GLQgrTk41DNmIEzp/LI83K9J9UUU8YBYQ==", + "version": "3.29.0", + "resolved": "https://registry.npmjs.org/@tabler/icons/-/icons-3.29.0.tgz", + "integrity": "sha512-VWNINymdmhay3MDvWVREmRwuWLSrX3YiInKvs5L4AHRF4bAfJabLlEReE0BW/XFsBt22ff8/C8Eam/LXlF97mA==", "license": "MIT", "funding": { "type": "github", From ae03b0908ae88758cab43516ac27f5f6c08d2642 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 23 Jan 2025 13:17:26 -0600 Subject: [PATCH 55/99] Bump boto3 from 1.36.3 to 1.36.4 (#11624) Bumps [boto3](https://github.com/boto/boto3) from 1.36.3 to 1.36.4. - [Release notes](https://github.com/boto/boto3/releases) - [Commits](https://github.com/boto/boto3/compare/1.36.3...1.36.4) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 33358945689..4bddc071da4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -69,7 +69,7 @@ django-ratelimit==4.1.0 argon2-cffi==23.1.0 blackduck==1.1.3 pycurl==7.45.4 # Required for Celery Broker AWS (SQS) support -boto3==1.36.3 # Required for Celery Broker AWS (SQS) support +boto3==1.36.4 # Required for Celery Broker AWS (SQS) support netaddr==1.3.0 vulners==2.2.3 fontawesomefree==6.6.0 From 8cce31d57777bc2d26e7c6a614ab097f7caabf0c Mon Sep 17 00:00:00 2001 From: kiblik <5609770+kiblik@users.noreply.github.com> Date: Thu, 23 Jan 2025 20:32:09 +0100 Subject: [PATCH 56/99] Ruff: Fix DJ012 (#11543) --- dojo/models.py | 54 +++++++++++++++++++++---------------------- requirements-lint.txt | 2 +- 2 files changed, 28 insertions(+), 28 deletions(-) diff --git a/dojo/models.py b/dojo/models.py index ff34cde034a..bc878e9cf15 100644 --- a/dojo/models.py +++ b/dojo/models.py @@ -1702,6 +1702,23 @@ class Meta: models.Index(fields=["product"]), ] + def __hash__(self): + return self.__str__().__hash__() + + def __eq__(self, other): + if isinstance(other, Endpoint): + # Check if the contents of the endpoint match + contents_match = str(self) == str(other) + # Determine if products should be used in the equation + if self.product is not None and other.product is not None: + # Check if the products are the same + products_match = (self.product) == other.product + # Check if the contents match + return products_match and contents_match + return contents_match + + return NotImplemented + def __str__(self): try: if self.host: @@ -1833,23 +1850,6 @@ def clean(self): if errors: raise ValidationError(errors) - def __hash__(self): - return self.__str__().__hash__() - - def __eq__(self, other): - if isinstance(other, Endpoint): - # Check if the contents of the endpoint match - contents_match = str(self) == str(other) - # Determine if products should be used in the equation - if self.product is not None and other.product is not None: - # Check if the products are the same - products_match = (self.product) == other.product - # Check if the contents match - return products_match and contents_match - return contents_match - - return NotImplemented - @property def is_broken(self): try: @@ -2651,6 +2651,16 @@ class Meta: models.Index(fields=["duplicate_finding", "id"]), ] + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + self.unsaved_endpoints = [] + self.unsaved_request = None + self.unsaved_response = None + self.unsaved_tags = None + self.unsaved_files = None + self.unsaved_vulnerability_ids = None + def __str__(self): return self.title @@ -2725,16 +2735,6 @@ def get_absolute_url(self): from django.urls import reverse return reverse("view_finding", args=[str(self.id)]) - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - self.unsaved_endpoints = [] - self.unsaved_request = None - self.unsaved_response = None - self.unsaved_tags = None - self.unsaved_files = None - self.unsaved_vulnerability_ids = None - def copy(self, test=None): copy = self # Save the necessary ManyToMany relationships diff --git a/requirements-lint.txt b/requirements-lint.txt index 8f161d317ec..e3b98ed7307 100644 --- a/requirements-lint.txt +++ b/requirements-lint.txt @@ -1 +1 @@ -ruff==0.8.1 \ No newline at end of file +ruff==0.9.2 \ No newline at end of file From a8040b41b1dfe3b936de506aec579787b58285f2 Mon Sep 17 00:00:00 2001 From: kiblik <5609770+kiblik@users.noreply.github.com> Date: Thu, 23 Jan 2025 20:37:24 +0100 Subject: [PATCH 57/99] feat(disclaimers): Split disclaimers (#10902) * feat(disclaimers): Split disclaimers * fix existing * Add disclaimer_notes to form_fields.html * Add to other places * fix rendering of disc. in report * Add reverse_code to DB migration * Add info to upgrade notes * Inc db-mig --- docs/content/en/open_source/upgrading/2.43.md | 7 +++- .../0220_system_settings_disclaimer_notif.py | 38 +++++++++++++++++++ .../0221_system_settings_disclaimer_notif.py | 21 ++++++++++ dojo/fixtures/defect_dojo_sample_data.json | 2 +- dojo/forms.py | 35 +++++++++++++++++ dojo/models.py | 17 +++++++-- dojo/reports/views.py | 17 ++++++--- dojo/templates/dojo/custom_html_report.html | 6 +++ dojo/templates/dojo/endpoint_pdf_report.html | 2 +- .../templates/dojo/engagement_pdf_report.html | 2 +- dojo/templates/dojo/finding_pdf_report.html | 2 +- .../templates/dojo/findings_list_snippet.html | 6 +++ dojo/templates/dojo/form_fields.html | 9 ++++- .../dojo/product_endpoint_pdf_report.html | 2 +- dojo/templates/dojo/product_pdf_report.html | 2 +- .../dojo/product_type_pdf_report.html | 2 +- dojo/templates/dojo/test_pdf_report.html | 2 +- dojo/templates/dojo/view_test.html | 6 +++ .../notifications/alert/review_requested.tpl | 4 +- .../notifications/mail/engagement_added.tpl | 4 +- dojo/templates/notifications/mail/other.tpl | 4 +- .../notifications/mail/product_added.tpl | 4 +- .../notifications/mail/product_type_added.tpl | 4 +- .../notifications/mail/report_created.tpl | 4 +- .../notifications/mail/review_requested.tpl | 4 +- .../mail/risk_acceptance_expiration.tpl | 4 +- .../notifications/mail/scan_added.tpl | 4 +- .../notifications/mail/sla_breach.tpl | 4 +- .../mail/sla_breach_combined.tpl | 4 +- .../notifications/mail/test_added.tpl | 4 +- .../mail/upcoming_engagement.tpl | 4 +- .../notifications/mail/user_mentioned.tpl | 4 +- .../msteams/engagement_added.tpl | 4 +- .../templates/notifications/msteams/other.tpl | 4 +- .../notifications/msteams/product_added.tpl | 4 +- .../msteams/product_type_added.tpl | 4 +- .../notifications/msteams/report_created.tpl | 4 +- .../msteams/review_requested.tpl | 4 +- .../msteams/risk_acceptance_expiration.tpl | 4 +- .../notifications/msteams/scan_added.tpl | 4 +- .../notifications/msteams/sla_breach.tpl | 4 +- .../notifications/msteams/test_added.tpl | 4 +- .../msteams/upcoming_engagement.tpl | 4 +- .../notifications/msteams/user_mentioned.tpl | 4 +- .../notifications/slack/engagement_added.tpl | 4 +- dojo/templates/notifications/slack/other.tpl | 4 +- .../notifications/slack/product_added.tpl | 4 +- .../slack/product_type_added.tpl | 4 +- .../notifications/slack/report_created.tpl | 4 +- .../notifications/slack/review_requested.tpl | 4 +- .../slack/risk_acceptance_expiration.tpl | 4 +- .../notifications/slack/scan_added.tpl | 4 +- .../notifications/slack/sla_breach.tpl | 4 +- .../notifications/slack/test_added.tpl | 4 +- .../slack/upcoming_engagement.tpl | 4 +- .../notifications/slack/user_mentioned.tpl | 4 +- .../webhooks/subtemplates/base.tpl | 4 +- 57 files changed, 237 insertions(+), 97 deletions(-) create mode 100644 dojo/db_migrations/0220_system_settings_disclaimer_notif.py create mode 100644 dojo/db_migrations/0221_system_settings_disclaimer_notif.py diff --git a/docs/content/en/open_source/upgrading/2.43.md b/docs/content/en/open_source/upgrading/2.43.md index faa443cfea2..596b2a0080d 100644 --- a/docs/content/en/open_source/upgrading/2.43.md +++ b/docs/content/en/open_source/upgrading/2.43.md @@ -2,6 +2,9 @@ title: 'Upgrading to DefectDojo Version 2.43.x' toc_hide: true weight: -20250106 -description: No special instructions. +description: Disclaimer field renamed/split. --- -There are no special instructions for upgrading to 2.43.x. Check the [Release Notes](https://github.com/DefectDojo/django-DefectDojo/releases/tag/2.43.0) for the contents of the release. + +[Pull request #10902](https://github.com/DefectDojo/django-DefectDojo/pull/10902) introduced different kinds of disclaimers within the DefectDojo instance. The original content of the disclaimer was copied to all new fields where it had been used until now (so this change does not require any action on the user's side). However, if users were managing the original disclaimer via API (endpoint `/api/v2/system_settings/1/`, field `disclaimer`), be aware that the fields are now called `disclaimer_notifications` and `disclaimer_reports` (plus there is one additional, previously unused field called `disclaimer_notes`). + +But there are no other special instructions for upgrading to 2.43.x. Check the [Release Notes](https://github.com/DefectDojo/django-DefectDojo/releases/tag/2.43.0) for the contents of the release. diff --git a/dojo/db_migrations/0220_system_settings_disclaimer_notif.py b/dojo/db_migrations/0220_system_settings_disclaimer_notif.py new file mode 100644 index 00000000000..77a9d836c7b --- /dev/null +++ b/dojo/db_migrations/0220_system_settings_disclaimer_notif.py @@ -0,0 +1,38 @@ +# Generated by Django 5.0.8 on 2024-09-12 18:22 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('dojo', '0219_system_settings_enforce_verified_status_jira_and_more'), + ] + + operations = [ + migrations.RenameField( + model_name='system_settings', + old_name='disclaimer', + new_name='disclaimer_notifications', + ), + migrations.AlterField( + model_name='system_settings', + name='disclaimer_notifications', + field=models.TextField(blank=True, default='', help_text='Include this custom disclaimer on all notifications', max_length=3000, verbose_name='Custom Disclaimer for Notifications'), + ), + migrations.AddField( + model_name='system_settings', + name='disclaimer_reports', + field=models.TextField(blank=True, default='', help_text='Include this custom disclaimer on generated reports', max_length=5000, verbose_name='Custom Disclaimer for Reports'), + ), + migrations.AddField( + model_name='system_settings', + name='disclaimer_notes', + field=models.TextField(blank=True, default='', help_text='Include this custom disclaimer next to input form for notes', max_length=3000, verbose_name='Custom Disclaimer for Notes'), + ), + migrations.AddField( + model_name='system_settings', + name='disclaimer_reports_forced', + field=models.BooleanField(default=False, help_text="Disclaimer will be added to all reports even if user didn't selected 'Include disclaimer'.", verbose_name='Force to add disclaimer reports'), + ), + ] diff --git a/dojo/db_migrations/0221_system_settings_disclaimer_notif.py b/dojo/db_migrations/0221_system_settings_disclaimer_notif.py new file mode 100644 index 00000000000..8a979350640 --- /dev/null +++ b/dojo/db_migrations/0221_system_settings_disclaimer_notif.py @@ -0,0 +1,21 @@ +# Generated by Django 5.0.8 on 2024-09-12 18:22 + +from django.db import migrations + + +def copy_notif_field(apps, schema_editor): + system_settings_model = apps.get_model('dojo', 'System_Settings').objects.get() + if system_settings_model.disclaimer_notifications: + system_settings_model.disclaimer_reports = system_settings_model.disclaimer_notifications + system_settings_model.save() + + +class Migration(migrations.Migration): + + dependencies = [ + ('dojo', '0220_system_settings_disclaimer_notif'), + ] + + operations = [ + migrations.RunPython(copy_notif_field, reverse_code=migrations.RunPython.noop), + ] diff --git a/dojo/fixtures/defect_dojo_sample_data.json b/dojo/fixtures/defect_dojo_sample_data.json index 2d0ece6cb16..a0c5414058c 100644 --- a/dojo/fixtures/defect_dojo_sample_data.json +++ b/dojo/fixtures/defect_dojo_sample_data.json @@ -7119,7 +7119,7 @@ "enable_finding_sla": true, "allow_anonymous_survey_repsonse": false, "credentials": "", - "disclaimer": "", + "disclaimer_notifications": "", "risk_acceptance_form_default_days": 180, "risk_acceptance_notify_before_expiration": 10, "enable_credentials": true, diff --git a/dojo/forms.py b/dojo/forms.py index 04ed0d424d7..f9a52f9530f 100644 --- a/dojo/forms.py +++ b/dojo/forms.py @@ -860,6 +860,8 @@ def __init__(self, *args, **kwargs): self.fields["expiration_date"].initial = expiration_date # self.fields['path'].help_text = 'Existing proof uploaded: %s' % self.instance.filename() if self.instance.filename() else 'None' self.fields["accepted_findings"].queryset = get_authorized_findings(Permissions.Risk_Acceptance) + if disclaimer := get_system_setting("disclaimer_notes"): + self.disclaimer = disclaimer.strip() class BaseManageFileFormSet(forms.BaseModelFormSet): @@ -1569,6 +1571,8 @@ def __init__(self, *args, **kwargs): self.fields["severity"].required = False # we need to defer initialization to prevent multiple initializations if other forms are shown self.fields["tags"].widget.tag_options = tagulous.models.options.TagOptions(autocomplete_settings={"width": "200px", "defer": True}) + if disclaimer := get_system_setting("disclaimer_notes"): + self.disclaimer = disclaimer.strip() def clean(self): cleaned_data = super().clean() @@ -1712,6 +1716,11 @@ class Meta: model = Notes fields = ["entry", "private"] + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if disclaimer := get_system_setting("disclaimer_notes"): + self.disclaimer = disclaimer.strip() + class TypedNoteForm(NoteForm): @@ -1763,6 +1772,8 @@ def __init__(self, *args, **kwargs): self.fields["mitigated_by"].queryset = get_authorized_users(Permissions.Test_Edit) self.fields["mitigated"].initial = self.instance.mitigated self.fields["mitigated_by"].initial = self.instance.mitigated_by + if disclaimer := get_system_setting("disclaimer_notes"): + self.disclaimer = disclaimer.strip() def _post_clean(self): super()._post_clean() @@ -1815,6 +1826,11 @@ class Meta: model = Notes fields = ["entry"] + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if disclaimer := get_system_setting("disclaimer_notes"): + self.disclaimer = disclaimer.strip() + class ClearFindingReviewForm(forms.ModelForm): entry = forms.CharField( @@ -1829,6 +1845,11 @@ class Meta: model = Finding fields = ["active", "verified", "false_p", "out_of_scope", "duplicate", "is_mitigated"] + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if disclaimer := get_system_setting("disclaimer_notes"): + self.disclaimer = disclaimer.strip() + class ReviewFindingForm(forms.Form): reviewers = forms.MultipleChoiceField( @@ -1866,6 +1887,8 @@ def __init__(self, *args, **kwargs): self.reviewer_queryset = users # Set the users in the form self.fields["reviewers"].choices = self._get_choices(self.reviewer_queryset) + if disclaimer := get_system_setting("disclaimer_notes"): + self.disclaimer = disclaimer.strip() @staticmethod def _get_choices(queryset): @@ -2303,6 +2326,13 @@ class ReportOptionsForm(forms.Form): include_disclaimer = forms.ChoiceField(choices=yes_no, label="Disclaimer") report_type = forms.ChoiceField(choices=(("HTML", "HTML"),)) + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if get_system_setting("disclaimer_reports_forced"): + self.fields["include_disclaimer"].disabled = True + self.fields["include_disclaimer"].initial = "1" # represents yes + self.fields["include_disclaimer"].help_text = "Administrator of the system enforced placement of disclaimer in all reports. You are not able exclude disclaimer from this report." + class CustomReportOptionsForm(forms.Form): yes_no = (("0", "No"), ("1", "Yes")) @@ -2738,6 +2768,11 @@ class Meta: model = Engagement_Presets exclude = ["product"] + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if disclaimer := get_system_setting("disclaimer_notes"): + self.disclaimer = disclaimer.strip() + class DeleteEngagementPresetsForm(forms.ModelForm): id = forms.IntegerField(required=True, diff --git a/dojo/models.py b/dojo/models.py index bc878e9cf15..d661e2a397d 100644 --- a/dojo/models.py +++ b/dojo/models.py @@ -513,9 +513,20 @@ class System_Settings(models.Model): help_text=_("Enable anyone with a link to the survey to answer a survey"), ) credentials = models.TextField(max_length=3000, blank=True) - disclaimer = models.TextField(max_length=3000, default="", blank=True, - verbose_name=_("Custom Disclaimer"), - help_text=_("Include this custom disclaimer on all notifications and generated reports")) + disclaimer_notifications = models.TextField(max_length=3000, default="", blank=True, + verbose_name=_("Custom Disclaimer for Notifications"), + help_text=_("Include this custom disclaimer on all notifications")) + disclaimer_reports = models.TextField(max_length=5000, default="", blank=True, + verbose_name=_("Custom Disclaimer for Reports"), + help_text=_("Include this custom disclaimer on generated reports")) + disclaimer_reports_forced = models.BooleanField( + default=False, + blank=False, + verbose_name=_("Force to add disclaimer reports"), + help_text=_("Disclaimer will be added to all reports even if user didn't selected 'Include disclaimer'.")) + disclaimer_notes = models.TextField(max_length=3000, default="", blank=True, + verbose_name=_("Custom Disclaimer for Notes"), + help_text=_("Include this custom disclaimer next to input form for notes")) risk_acceptance_form_default_days = models.IntegerField(null=True, blank=True, default=180, help_text=_("Default expiry period for risk acceptance form.")) risk_acceptance_notify_before_expiration = models.IntegerField(null=True, blank=True, default=10, verbose_name=_("Risk acceptance expiration heads up days"), help_text=_("Notify X days before risk acceptance expires. Leave empty to disable.")) diff --git a/dojo/reports/views.py b/dojo/reports/views.py index 4bcd7386769..c57931df109 100644 --- a/dojo/reports/views.py +++ b/dojo/reports/views.py @@ -132,6 +132,10 @@ def _set_state(self, request: HttpRequest): self.host = report_url_resolver(request) self.selected_widgets = self.get_selected_widgets(request) self.widgets = list(self.selected_widgets.values()) + self.include_disclaimer = get_system_setting("disclaimer_reports_forced", 0) + self.disclaimer = get_system_setting("disclaimer_reports") + if self.include_disclaimer and len(self.disclaimer) == 0: + self.disclaimer = "Please configure in System Settings." def get_selected_widgets(self, request): selected_widgets = report_widget_factory(json_data=request.POST["json"], request=request, host=self.host, @@ -164,7 +168,10 @@ def get_context(self): "host": self.host, "finding_notes": self.finding_notes, "finding_images": self.finding_images, - "user_id": self.request.user.id} + "user_id": self.request.user.id, + "include_disclaimer": self.include_disclaimer, + "disclaimer": self.disclaimer, + } def report_findings(request): @@ -285,8 +292,8 @@ def product_endpoint_report(request, pid): include_finding_images = int(request.GET.get("include_finding_images", 0)) include_executive_summary = int(request.GET.get("include_executive_summary", 0)) include_table_of_contents = int(request.GET.get("include_table_of_contents", 0)) - include_disclaimer = int(request.GET.get("include_disclaimer", 0)) - disclaimer = get_system_setting("disclaimer") + include_disclaimer = int(request.GET.get("include_disclaimer", 0)) or (get_system_setting("disclaimer_reports_forced", 0)) + disclaimer = get_system_setting("disclaimer_reports") if include_disclaimer and len(disclaimer) == 0: disclaimer = "Please configure in System Settings." generate = "_generate" in request.GET @@ -363,8 +370,8 @@ def generate_report(request, obj, host_view=False): include_finding_images = int(request.GET.get("include_finding_images", 0)) include_executive_summary = int(request.GET.get("include_executive_summary", 0)) include_table_of_contents = int(request.GET.get("include_table_of_contents", 0)) - include_disclaimer = int(request.GET.get("include_disclaimer", 0)) - disclaimer = get_system_setting("disclaimer") + include_disclaimer = int(request.GET.get("include_disclaimer", 0)) or (get_system_setting("disclaimer_reports_forced", 0)) + disclaimer = get_system_setting("disclaimer_reports") if include_disclaimer and len(disclaimer) == 0: disclaimer = "Please configure in System Settings." diff --git a/dojo/templates/dojo/custom_html_report.html b/dojo/templates/dojo/custom_html_report.html index de89837f747..db7b4e3c070 100644 --- a/dojo/templates/dojo/custom_html_report.html +++ b/dojo/templates/dojo/custom_html_report.html @@ -3,6 +3,12 @@ {% block content %} {{ block.super }}
+ {% if include_disclaimer %} +
+ Disclaimer
+

{{ disclaimer | safe }}

+
+ {% endif %} {% for widget in widgets %} {{ widget.get_html }} {% endfor %} diff --git a/dojo/templates/dojo/endpoint_pdf_report.html b/dojo/templates/dojo/endpoint_pdf_report.html index b214afadb35..10cf2804c97 100644 --- a/dojo/templates/dojo/endpoint_pdf_report.html +++ b/dojo/templates/dojo/endpoint_pdf_report.html @@ -81,7 +81,7 @@
{% if include_disclaimer%}
Disclaimer
-

{{ disclaimer }}

+

{{ disclaimer | safe }}

{% endif %}
diff --git a/dojo/templates/dojo/engagement_pdf_report.html b/dojo/templates/dojo/engagement_pdf_report.html index a9c8ac8101c..ff341274354 100644 --- a/dojo/templates/dojo/engagement_pdf_report.html +++ b/dojo/templates/dojo/engagement_pdf_report.html @@ -158,7 +158,7 @@
{% if include_disclaimer%}
Disclaimer
-

{{ disclaimer }}

+

{{ disclaimer | safe }}

{% endif %}
diff --git a/dojo/templates/dojo/finding_pdf_report.html b/dojo/templates/dojo/finding_pdf_report.html index 6d376c15bcb..ea051d43d0e 100644 --- a/dojo/templates/dojo/finding_pdf_report.html +++ b/dojo/templates/dojo/finding_pdf_report.html @@ -57,7 +57,7 @@
{% if include_disclaimer%}
Disclaimer
-

{{ disclaimer }}

+

{{ disclaimer | safe }}

{% endif %}
diff --git a/dojo/templates/dojo/findings_list_snippet.html b/dojo/templates/dojo/findings_list_snippet.html index 51e85ab8d1b..c83ba1f22fa 100644 --- a/dojo/templates/dojo/findings_list_snippet.html +++ b/dojo/templates/dojo/findings_list_snippet.html @@ -253,6 +253,12 @@

{{ bulk_edit_form.media.js }} {% endcomment %} {{ bulk_edit_form.tags }} + {% if bulk_edit_form.disclaimer %} +
+
Disclaimer
+
{{ bulk_edit_form.disclaimer }}
+
+ {% endif %} diff --git a/dojo/templates/dojo/form_fields.html b/dojo/templates/dojo/form_fields.html index 98706ee46d3..6af19a96aa9 100644 --- a/dojo/templates/dojo/form_fields.html +++ b/dojo/templates/dojo/form_fields.html @@ -80,4 +80,11 @@

{% endif %}
-{% endfor %} \ No newline at end of file +{% endfor %} + +{% if form.disclaimer %} +
+
Disclaimer
+
{{ form.disclaimer }}
+
+{% endif %} diff --git a/dojo/templates/dojo/product_endpoint_pdf_report.html b/dojo/templates/dojo/product_endpoint_pdf_report.html index 614fa0f00f4..0a1cb6e5237 100644 --- a/dojo/templates/dojo/product_endpoint_pdf_report.html +++ b/dojo/templates/dojo/product_endpoint_pdf_report.html @@ -119,7 +119,7 @@
Finding Age
{% if include_disclaimer%}
Disclaimer
-

{{ disclaimer }}

+

{{ disclaimer | safe }}

{% endif %}
diff --git a/dojo/templates/dojo/product_pdf_report.html b/dojo/templates/dojo/product_pdf_report.html index b02b2609de6..25480502757 100644 --- a/dojo/templates/dojo/product_pdf_report.html +++ b/dojo/templates/dojo/product_pdf_report.html @@ -142,7 +142,7 @@
Finding Age
{% if include_disclaimer%}
Disclaimer
-

{{ disclaimer }}

+

{{ disclaimer | safe }}

{% endif %}
diff --git a/dojo/templates/dojo/product_type_pdf_report.html b/dojo/templates/dojo/product_type_pdf_report.html index 4b0fc48d9fc..ec550d1524b 100644 --- a/dojo/templates/dojo/product_type_pdf_report.html +++ b/dojo/templates/dojo/product_type_pdf_report.html @@ -114,7 +114,7 @@

{% if include_disclaimer%}
Disclaimer
-

{{ disclaimer }}

+

{{ disclaimer | safe }}

{% endif %}
diff --git a/dojo/templates/dojo/test_pdf_report.html b/dojo/templates/dojo/test_pdf_report.html index e9ec79d6aa9..b1d1cd7b5fa 100644 --- a/dojo/templates/dojo/test_pdf_report.html +++ b/dojo/templates/dojo/test_pdf_report.html @@ -146,7 +146,7 @@
{% if include_disclaimer%}
Disclaimer
-

{{ disclaimer }}

+

{{ disclaimer | safe }}

{% endif %}
diff --git a/dojo/templates/dojo/view_test.html b/dojo/templates/dojo/view_test.html index a4e0390b91a..e551c4858ea 100644 --- a/dojo/templates/dojo/view_test.html +++ b/dojo/templates/dojo/view_test.html @@ -848,6 +848,12 @@

{{ bulk_edit_form.media.css }} {{ bulk_edit_form.media.js }} {{ bulk_edit_form.tags }} + {% if bulk_edit_form.disclaimer %} +
+
Disclaimer
+
{{ bulk_edit_form.disclaimer }}
+
+ {% endif %} {{ notification_url|full_url }}

- {% if system_settings.disclaimer and system_settings.disclaimer.strip %} + {% if system_settings.disclaimer_notifications and system_settings.disclaimer_notifications.strip %}
{% trans "Disclaimer" %}
-

{{ system_settings.disclaimer }}

+

{{ system_settings.disclaimer_notifications }}

{% endif %} {% endautoescape %} diff --git a/dojo/templates/notifications/mail/other.tpl b/dojo/templates/notifications/mail/other.tpl index 69c96362d2d..65e5d854c3b 100644 --- a/dojo/templates/notifications/mail/other.tpl +++ b/dojo/templates/notifications/mail/other.tpl @@ -31,11 +31,11 @@ {% url 'notifications' as notification_url %} {% trans "You can manage your notification settings here" %}: {{ notification_url|full_url }}

- {% if system_settings.disclaimer and system_settings.disclaimer.strip %} + {% if system_settings.disclaimer_notifications and system_settings.disclaimer_notifications.strip %}
{% trans "Disclaimer" %}
-

{{ system_settings.disclaimer }}

+

{{ system_settings.disclaimer_notifications }}

{% endif %} {% endautoescape %} diff --git a/dojo/templates/notifications/mail/product_added.tpl b/dojo/templates/notifications/mail/product_added.tpl index ef8fa7faa6f..9e5ecded414 100644 --- a/dojo/templates/notifications/mail/product_added.tpl +++ b/dojo/templates/notifications/mail/product_added.tpl @@ -28,11 +28,11 @@ {% url 'notifications' as notification_url %} {% trans "You can manage your notification settings here" %}: {{ notification_url|full_url }}

- {% if system_settings.disclaimer and system_settings.disclaimer.strip %} + {% if system_settings.disclaimer_notifications and system_settings.disclaimer_notifications.strip %}
{% trans "Disclaimer" %}
-

{{ system_settings.disclaimer }}

+

{{ system_settings.disclaimer_notifications }}

{% endif %} {% endautoescape %} diff --git a/dojo/templates/notifications/mail/product_type_added.tpl b/dojo/templates/notifications/mail/product_type_added.tpl index 72d7972f56a..a229882a346 100644 --- a/dojo/templates/notifications/mail/product_type_added.tpl +++ b/dojo/templates/notifications/mail/product_type_added.tpl @@ -28,11 +28,11 @@ {% url 'notifications' as notification_url %} {% trans "You can manage your notification settings here" %}: {{ notification_url|full_url }}

- {% if system_settings.disclaimer and system_settings.disclaimer.strip %} + {% if system_settings.disclaimer_notifications and system_settings.disclaimer_notifications.strip %}
{% trans "Disclaimer" %}
-

{{ system_settings.disclaimer }}

+

{{ system_settings.disclaimer_notifications }}

{% endif %} {% endautoescape %} diff --git a/dojo/templates/notifications/mail/report_created.tpl b/dojo/templates/notifications/mail/report_created.tpl index efbb4b8fd61..0c898d276ec 100644 --- a/dojo/templates/notifications/mail/report_created.tpl +++ b/dojo/templates/notifications/mail/report_created.tpl @@ -25,11 +25,11 @@ {% trans "You can manage your notification settings here" %}: {{ notification_url|full_url }}

- {% if system_settings.disclaimer and system_settings.disclaimer.strip %} + {% if system_settings.disclaimer_notifications and system_settings.disclaimer_notifications.strip %}
{% trans "Disclaimer" %}
-

{{ system_settings.disclaimer }}

+

{{ system_settings.disclaimer_notifications }}

{% endif %} {% endautoescape %} diff --git a/dojo/templates/notifications/mail/review_requested.tpl b/dojo/templates/notifications/mail/review_requested.tpl index 98bde4113ab..c3ef81b1a12 100644 --- a/dojo/templates/notifications/mail/review_requested.tpl +++ b/dojo/templates/notifications/mail/review_requested.tpl @@ -33,11 +33,11 @@ {% url 'notifications' as notification_url %} {% trans "You can manage your notification settings here" %}: {{ notification_url|full_url }}

- {% if system_settings.disclaimer and system_settings.disclaimer.strip %} + {% if system_settings.disclaimer_notifications and system_settings.disclaimer_notifications.strip %}
{% trans "Disclaimer" %}
-

{{ system_settings.disclaimer }}

+

{{ system_settings.disclaimer_notifications }}

{% endif %} {% endautoescape %} diff --git a/dojo/templates/notifications/mail/risk_acceptance_expiration.tpl b/dojo/templates/notifications/mail/risk_acceptance_expiration.tpl index 7fc4916d1df..ce76a2d1b5b 100644 --- a/dojo/templates/notifications/mail/risk_acceptance_expiration.tpl +++ b/dojo/templates/notifications/mail/risk_acceptance_expiration.tpl @@ -52,11 +52,11 @@ {% url 'notifications' as notification_url %} {% trans "You can manage your notification settings here" %}: {{ notification_url|full_url }}

- {% if system_settings.disclaimer and system_settings.disclaimer.strip %} + {% if system_settings.disclaimer_notifications and system_settings.disclaimer_notifications.strip %}
{% trans "Disclaimer" %}
-

{{ system_settings.disclaimer }}

+

{{ system_settings.disclaimer_notifications }}

{% endif %} {% endautoescape %} diff --git a/dojo/templates/notifications/mail/scan_added.tpl b/dojo/templates/notifications/mail/scan_added.tpl index 513b24818db..263585246e0 100644 --- a/dojo/templates/notifications/mail/scan_added.tpl +++ b/dojo/templates/notifications/mail/scan_added.tpl @@ -72,11 +72,11 @@ {% url 'notifications' as notification_url %} {% trans "You can manage your notification settings here" %}: {{ notification_url|full_url }}

- {% if system_settings.disclaimer and system_settings.disclaimer.strip %} + {% if system_settings.disclaimer_notifications and system_settings.disclaimer_notifications.strip %}
{% trans "Disclaimer" %}
-

{{ system_settings.disclaimer }}

+

{{ system_settings.disclaimer_notifications }}

{% endif %} {% endautoescape %} diff --git a/dojo/templates/notifications/mail/sla_breach.tpl b/dojo/templates/notifications/mail/sla_breach.tpl index 692df2ef2e2..97c08762bae 100644 --- a/dojo/templates/notifications/mail/sla_breach.tpl +++ b/dojo/templates/notifications/mail/sla_breach.tpl @@ -45,11 +45,11 @@ {% url 'notifications' as notification_url %} {% trans "You can manage your notification settings here" %}: {{ notification_url|full_url }}

- {% if system_settings.disclaimer and system_settings.disclaimer.strip %} + {% if system_settings.disclaimer_notifications and system_settings.disclaimer_notifications.strip %}
{% trans "Disclaimer" %}
-

{{ system_settings.disclaimer }}

+

{{ system_settings.disclaimer_notifications }}

{% endif %} {% endautoescape %} diff --git a/dojo/templates/notifications/mail/sla_breach_combined.tpl b/dojo/templates/notifications/mail/sla_breach_combined.tpl index 5b88a656e27..4fc8c8f801e 100644 --- a/dojo/templates/notifications/mail/sla_breach_combined.tpl +++ b/dojo/templates/notifications/mail/sla_breach_combined.tpl @@ -55,7 +55,7 @@ {% url 'notifications' as notification_url %} {% trans "You can manage your notification settings here" %}: {{ notification_url|full_url }}

- {% if system_settings.disclaimer and system_settings.disclaimer.strip %} + {% if system_settings.disclaimer_notifications and system_settings.disclaimer_notifications.strip %}
{{ system_settings.disclaimer }}

+ font-family: 'Cambria','times roman',serif">{{ system_settings.disclaimer_notifications }}

{% endif %} {% endautoescape %} diff --git a/dojo/templates/notifications/mail/test_added.tpl b/dojo/templates/notifications/mail/test_added.tpl index 285bb28faf7..3c235ea0762 100644 --- a/dojo/templates/notifications/mail/test_added.tpl +++ b/dojo/templates/notifications/mail/test_added.tpl @@ -30,11 +30,11 @@ {% url 'notifications' as notification_url %} {% trans "You can manage your notification settings here" %}: {{ notification_url|full_url }}

- {% if system_settings.disclaimer and system_settings.disclaimer.strip %} + {% if system_settings.disclaimer_notifications and system_settings.disclaimer_notifications.strip %}
{% trans "Disclaimer" %}
-

{{ system_settings.disclaimer }}

+

{{ system_settings.disclaimer_notifications }}

{% endif %} {% endautoescape %} diff --git a/dojo/templates/notifications/mail/upcoming_engagement.tpl b/dojo/templates/notifications/mail/upcoming_engagement.tpl index ae6da9da7b2..858314449c0 100644 --- a/dojo/templates/notifications/mail/upcoming_engagement.tpl +++ b/dojo/templates/notifications/mail/upcoming_engagement.tpl @@ -28,11 +28,11 @@ {% url 'notifications' as notification_url %} {% trans "You can manage your notification settings here" %}: {{ notification_url|full_url }}

- {% if system_settings.disclaimer and system_settings.disclaimer.strip %} + {% if system_settings.disclaimer_notifications and system_settings.disclaimer_notifications.strip %}
{% trans "Disclaimer" %}
-

{{ system_settings.disclaimer }}

+

{{ system_settings.disclaimer_notifications }}

{% endif %} {% endautoescape %} diff --git a/dojo/templates/notifications/mail/user_mentioned.tpl b/dojo/templates/notifications/mail/user_mentioned.tpl index 651fd9df499..9601da3c9a5 100644 --- a/dojo/templates/notifications/mail/user_mentioned.tpl +++ b/dojo/templates/notifications/mail/user_mentioned.tpl @@ -31,11 +31,11 @@ {% url 'notifications' as notification_url %} {% trans "You can manage your notification settings here" %}: {{ notification_url|full_url }}

- {% if system_settings.disclaimer and system_settings.disclaimer.strip %} + {% if system_settings.disclaimer_notifications and system_settings.disclaimer_notifications.strip %}
{% trans "Disclaimer" %}
-

{{ system_settings.disclaimer }}

+

{{ system_settings.disclaimer_notifications }}

{% endif %} {% endautoescape %} diff --git a/dojo/templates/notifications/msteams/engagement_added.tpl b/dojo/templates/notifications/msteams/engagement_added.tpl index 9d7c7ef5b37..747f8911079 100644 --- a/dojo/templates/notifications/msteams/engagement_added.tpl +++ b/dojo/templates/notifications/msteams/engagement_added.tpl @@ -21,10 +21,10 @@ } ] } - {% if system_settings.disclaimer and system_settings.disclaimer.strip %} + {% if system_settings.disclaimer_notifications and system_settings.disclaimer_notifications.strip %} ,{ "activityTitle": "{% trans "Disclaimer" %}", - "text": "{{ system_settings.disclaimer }}" + "text": "{{ system_settings.disclaimer_notifications }}" } {% endif %} diff --git a/dojo/templates/notifications/msteams/other.tpl b/dojo/templates/notifications/msteams/other.tpl index ef0cbe4bd99..123390f9c58 100644 --- a/dojo/templates/notifications/msteams/other.tpl +++ b/dojo/templates/notifications/msteams/other.tpl @@ -11,10 +11,10 @@ "activityImage": "https://raw.githubusercontent.com/DefectDojo/django-DefectDojo/master/dojo/static/dojo/img/chop.png", "text": "{% autoescape on %} {{ description }} {% endautoescape %}" } - {% if system_settings.disclaimer and system_settings.disclaimer.strip %} + {% if system_settings.disclaimer_notifications and system_settings.disclaimer_notifications.strip %} ,{ "activityTitle": "{% trans "Disclaimer" %}", - "text": "{{ system_settings.disclaimer }}" + "text": "{{ system_settings.disclaimer_notifications }}" } {% endif %} ], diff --git a/dojo/templates/notifications/msteams/product_added.tpl b/dojo/templates/notifications/msteams/product_added.tpl index 31e5a71bf9a..d2def33cd0f 100644 --- a/dojo/templates/notifications/msteams/product_added.tpl +++ b/dojo/templates/notifications/msteams/product_added.tpl @@ -17,10 +17,10 @@ } ] } - {% if system_settings.disclaimer and system_settings.disclaimer.strip %} + {% if system_settings.disclaimer_notifications and system_settings.disclaimer_notifications.strip %} ,{ "activityTitle": "{% trans "Disclaimer" %}", - "text": "{{ system_settings.disclaimer }}" + "text": "{{ system_settings.disclaimer_notifications }}" } {% endif %} ], diff --git a/dojo/templates/notifications/msteams/product_type_added.tpl b/dojo/templates/notifications/msteams/product_type_added.tpl index 926ade5a2c7..b650f23d6c3 100644 --- a/dojo/templates/notifications/msteams/product_type_added.tpl +++ b/dojo/templates/notifications/msteams/product_type_added.tpl @@ -17,10 +17,10 @@ } ] } - {% if system_settings.disclaimer and system_settings.disclaimer.strip %} + {% if system_settings.disclaimer_notifications and system_settings.disclaimer_notifications.strip %} ,{ "activityTitle": "{% trans "Disclaimer" %}", - "text": "{{ system_settings.disclaimer }}" + "text": "{{ system_settings.disclaimer_notifications }}" } {% endif %} ], diff --git a/dojo/templates/notifications/msteams/report_created.tpl b/dojo/templates/notifications/msteams/report_created.tpl index 8a9b246ebf3..42f8398ed25 100644 --- a/dojo/templates/notifications/msteams/report_created.tpl +++ b/dojo/templates/notifications/msteams/report_created.tpl @@ -17,10 +17,10 @@ } ] } - {% if system_settings.disclaimer and system_settings.disclaimer.strip %} + {% if system_settings.disclaimer_notifications and system_settings.disclaimer_notifications.strip %} ,{ "activityTitle": "{% trans "Disclaimer" %}", - "text": "{{ system_settings.disclaimer }}" + "text": "{{ system_settings.disclaimer_notifications }}" } {% endif %} ], diff --git a/dojo/templates/notifications/msteams/review_requested.tpl b/dojo/templates/notifications/msteams/review_requested.tpl index d1a38c49b0c..1ec637c900d 100644 --- a/dojo/templates/notifications/msteams/review_requested.tpl +++ b/dojo/templates/notifications/msteams/review_requested.tpl @@ -29,10 +29,10 @@ } ] } - {% if system_settings.disclaimer and system_settings.disclaimer.strip %} + {% if system_settings.disclaimer_notifications and system_settings.disclaimer_notifications.strip %} ,{ "activityTitle": "{% trans "Disclaimer" %}", - "text": "{{ system_settings.disclaimer }}" + "text": "{{ system_settings.disclaimer_notifications }}" } {% endif %} ], diff --git a/dojo/templates/notifications/msteams/risk_acceptance_expiration.tpl b/dojo/templates/notifications/msteams/risk_acceptance_expiration.tpl index babf9376e87..95e6279b5ff 100644 --- a/dojo/templates/notifications/msteams/risk_acceptance_expiration.tpl +++ b/dojo/templates/notifications/msteams/risk_acceptance_expiration.tpl @@ -35,10 +35,10 @@ } ] } - {% if system_settings.disclaimer and system_settings.disclaimer.strip %} + {% if system_settings.disclaimer_notifications and system_settings.disclaimer_notifications.strip %} ,{ "activityTitle": "{% trans "Disclaimer" %}", - "text": "{{ system_settings.disclaimer }}" + "text": "{{ system_settings.disclaimer_notifications }}" } {% endif %} ], diff --git a/dojo/templates/notifications/msteams/scan_added.tpl b/dojo/templates/notifications/msteams/scan_added.tpl index af32887e9a4..52a67fd76c8 100644 --- a/dojo/templates/notifications/msteams/scan_added.tpl +++ b/dojo/templates/notifications/msteams/scan_added.tpl @@ -26,10 +26,10 @@ } ] } - {% if system_settings.disclaimer and system_settings.disclaimer.strip %} + {% if system_settings.disclaimer_notifications and system_settings.disclaimer_notifications.strip %} ,{ "activityTitle": "{% trans "Disclaimer" %}", - "text": "{{ system_settings.disclaimer }}" + "text": "{{ system_settings.disclaimer_notifications }}" } {% endif %} ], diff --git a/dojo/templates/notifications/msteams/sla_breach.tpl b/dojo/templates/notifications/msteams/sla_breach.tpl index c74b7abea25..31ad6991bf0 100644 --- a/dojo/templates/notifications/msteams/sla_breach.tpl +++ b/dojo/templates/notifications/msteams/sla_breach.tpl @@ -34,10 +34,10 @@ } ] } - {% if system_settings.disclaimer and system_settings.disclaimer.strip %} + {% if system_settings.disclaimer_notifications and system_settings.disclaimer_notifications.strip %} ,{ "activityTitle": "{% trans "Disclaimer" %}", - "text": "{{ system_settings.disclaimer }}" + "text": "{{ system_settings.disclaimer_notifications }}" } {% endif %} ], diff --git a/dojo/templates/notifications/msteams/test_added.tpl b/dojo/templates/notifications/msteams/test_added.tpl index 8d5515138ea..7b22e5d1314 100644 --- a/dojo/templates/notifications/msteams/test_added.tpl +++ b/dojo/templates/notifications/msteams/test_added.tpl @@ -26,10 +26,10 @@ } ] } - {% if system_settings.disclaimer and system_settings.disclaimer.strip %} + {% if system_settings.disclaimer_notifications and system_settings.disclaimer_notifications.strip %} ,{ "activityTitle": "{% trans "Disclaimer" %}", - "text": "{{ system_settings.disclaimer }}" + "text": "{{ system_settings.disclaimer_notifications }}" } {% endif %} ], diff --git a/dojo/templates/notifications/msteams/upcoming_engagement.tpl b/dojo/templates/notifications/msteams/upcoming_engagement.tpl index 06cbbb2e1cc..9cb22d025d7 100644 --- a/dojo/templates/notifications/msteams/upcoming_engagement.tpl +++ b/dojo/templates/notifications/msteams/upcoming_engagement.tpl @@ -30,10 +30,10 @@ } ] } - {% if system_settings.disclaimer and system_settings.disclaimer.strip %} + {% if system_settings.disclaimer_notifications and system_settings.disclaimer_notifications.strip %} ,{ "activityTitle": "{% trans "Disclaimer" %}", - "text": "{{ system_settings.disclaimer }}" + "text": "{{ system_settings.disclaimer_notifications }}" } {% endif %} ], diff --git a/dojo/templates/notifications/msteams/user_mentioned.tpl b/dojo/templates/notifications/msteams/user_mentioned.tpl index 5c09d1c7894..fb27ea5ad1b 100644 --- a/dojo/templates/notifications/msteams/user_mentioned.tpl +++ b/dojo/templates/notifications/msteams/user_mentioned.tpl @@ -25,10 +25,10 @@ } ] } - {% if system_settings.disclaimer and system_settings.disclaimer.strip %} + {% if system_settings.disclaimer_notifications and system_settings.disclaimer_notifications.strip %} ,{ "activityTitle": "{% trans "Disclaimer" %}", - "text": "{{ system_settings.disclaimer }}" + "text": "{{ system_settings.disclaimer_notifications }}" } {% endif %} ], diff --git a/dojo/templates/notifications/slack/engagement_added.tpl b/dojo/templates/notifications/slack/engagement_added.tpl index 711fb2d7e77..212f0f5ee3c 100644 --- a/dojo/templates/notifications/slack/engagement_added.tpl +++ b/dojo/templates/notifications/slack/engagement_added.tpl @@ -3,8 +3,8 @@ {% blocktranslate trimmed with name=engagement.name eng_product=engagement.product eng_url=url|full_url %} The engagement "{{ name }}" has been created in the product "{{ eng_product }}". It can be viewed here: {{ eng_url }} {% endblocktranslate %} -{% if system_settings.disclaimer and system_settings.disclaimer.strip %} +{% if system_settings.disclaimer_notifications and system_settings.disclaimer_notifications.strip %} {% trans "Disclaimer" %}: - {{ system_settings.disclaimer }} + {{ system_settings.disclaimer_notifications }} {% endif %} diff --git a/dojo/templates/notifications/slack/other.tpl b/dojo/templates/notifications/slack/other.tpl index 0bbe40eb8b2..f371af5263d 100644 --- a/dojo/templates/notifications/slack/other.tpl +++ b/dojo/templates/notifications/slack/other.tpl @@ -6,8 +6,8 @@ More information on this event can be found here: {{ event_url }} {% endblocktranslate %} {% endif %} -{% if system_settings.disclaimer|length %} +{% if system_settings.disclaimer_notifications|length %} {% trans "Disclaimer" %}: - {{ system_settings.disclaimer }} + {{ system_settings.disclaimer_notifications }} {% endif %} diff --git a/dojo/templates/notifications/slack/product_added.tpl b/dojo/templates/notifications/slack/product_added.tpl index ca57b54333a..6d1cef84455 100644 --- a/dojo/templates/notifications/slack/product_added.tpl +++ b/dojo/templates/notifications/slack/product_added.tpl @@ -3,8 +3,8 @@ {% blocktranslate trimmed with prod_url=url|full_url %} The new product "{{ title }}" has been added. It can be viewed here: {{ prod_url }} {% endblocktranslate %} -{% if system_settings.disclaimer and system_settings.disclaimer.strip %} +{% if system_settings.disclaimer_notifications and system_settings.disclaimer_notifications.strip %} {% trans "Disclaimer" %}: - {{ system_settings.disclaimer }} + {{ system_settings.disclaimer_notifications }} {% endif %} diff --git a/dojo/templates/notifications/slack/product_type_added.tpl b/dojo/templates/notifications/slack/product_type_added.tpl index 914114ca437..8818a3f101a 100644 --- a/dojo/templates/notifications/slack/product_type_added.tpl +++ b/dojo/templates/notifications/slack/product_type_added.tpl @@ -3,8 +3,8 @@ {% blocktranslate trimmed with prod_url=url|full_url %} The new product type "{{ title }}" has been added. It can be viewed here: {{ prod_url }} {% endblocktranslate %} -{% if system_settings.disclaimer and system_settings.disclaimer.strip %} +{% if system_settings.disclaimer_notifications and system_settings.disclaimer_notifications.strip %} {% trans "Disclaimer" %}: - {{ system_settings.disclaimer }} + {{ system_settings.disclaimer_notifications }} {% endif %} diff --git a/dojo/templates/notifications/slack/report_created.tpl b/dojo/templates/notifications/slack/report_created.tpl index 29b08597b1c..8c5ca4a9290 100644 --- a/dojo/templates/notifications/slack/report_created.tpl +++ b/dojo/templates/notifications/slack/report_created.tpl @@ -3,8 +3,8 @@ {% blocktranslate trimmed with name=report.name report_url=url|full_url %} Your report "{{ name }}" is ready. It can be downloaded here: {{ report_url }} {% endblocktranslate %} -{% if system_settings.disclaimer and system_settings.disclaimer.strip %} +{% if system_settings.disclaimer_notifications and system_settings.disclaimer_notifications.strip %} {% trans "Disclaimer" %}: - {{ system_settings.disclaimer }} + {{ system_settings.disclaimer_notifications }} {% endif %} diff --git a/dojo/templates/notifications/slack/review_requested.tpl b/dojo/templates/notifications/slack/review_requested.tpl index 64a78af408f..395954e7745 100644 --- a/dojo/templates/notifications/slack/review_requested.tpl +++ b/dojo/templates/notifications/slack/review_requested.tpl @@ -15,7 +15,7 @@ {% trans "Full details of the finding can be reviewed at" %} {{ url|full_url }} -{% if system_settings.disclaimer and system_settings.disclaimer.strip %} +{% if system_settings.disclaimer_notifications and system_settings.disclaimer_notifications.strip %} {% trans "Disclaimer:" %} - {{ system_settings.disclaimer }} + {{ system_settings.disclaimer_notifications }} {% endif %} diff --git a/dojo/templates/notifications/slack/risk_acceptance_expiration.tpl b/dojo/templates/notifications/slack/risk_acceptance_expiration.tpl index e3751c7abff..d5222c81826 100644 --- a/dojo/templates/notifications/slack/risk_acceptance_expiration.tpl +++ b/dojo/templates/notifications/slack/risk_acceptance_expiration.tpl @@ -11,8 +11,8 @@ {% blocktranslate trimmed with risk_url=risk_acceptance_url|full_url %} Risk Acceptance can be viewed here: {{ risk_url }} {% endblocktranslate %} -{% if system_settings.disclaimer and system_settings.disclaimer.strip %} +{% if system_settings.disclaimer_notifications and system_settings.disclaimer_notifications.strip %} {% trans "Disclaimer" %}: - {{ system_settings.disclaimer }} + {{ system_settings.disclaimer_notifications }} {% endif %} diff --git a/dojo/templates/notifications/slack/scan_added.tpl b/dojo/templates/notifications/slack/scan_added.tpl index aa72932e8fa..97488ee6c0b 100644 --- a/dojo/templates/notifications/slack/scan_added.tpl +++ b/dojo/templates/notifications/slack/scan_added.tpl @@ -8,8 +8,8 @@ They can be viewed here: {{ scan_url }} {% endblocktranslate %} {% endif %} -{% if system_settings.disclaimer and system_settings.disclaimer.strip %} +{% if system_settings.disclaimer_notifications and system_settings.disclaimer_notifications.strip %} {% trans "Disclaimer" %}: - {{ system_settings.disclaimer }} + {{ system_settings.disclaimer_notifications }} {% endif %} diff --git a/dojo/templates/notifications/slack/sla_breach.tpl b/dojo/templates/notifications/slack/sla_breach.tpl index fc6913e005d..0e9d4ce9e5b 100644 --- a/dojo/templates/notifications/slack/sla_breach.tpl +++ b/dojo/templates/notifications/slack/sla_breach.tpl @@ -6,8 +6,8 @@ Title: {{title}} Severity: {{severity}} You can find details here: {{ sla_url }} {% endblocktranslate %} -{% if system_settings.disclaimer and system_settings.disclaimer.strip %} +{% if system_settings.disclaimer_notifications and system_settings.disclaimer_notifications.strip %} {% trans "Disclaimer" %}: - {{ system_settings.disclaimer }} + {{ system_settings.disclaimer_notifications }} {% endif %} diff --git a/dojo/templates/notifications/slack/test_added.tpl b/dojo/templates/notifications/slack/test_added.tpl index 38c9a9b752e..fddccc1e815 100644 --- a/dojo/templates/notifications/slack/test_added.tpl +++ b/dojo/templates/notifications/slack/test_added.tpl @@ -6,8 +6,8 @@ Title: {{title}} Type: {{ test_type }} You can find details here: {{ test_url }} {% endblocktranslate %} -{% if system_settings.disclaimer and system_settings.disclaimer.strip %} +{% if system_settings.disclaimer_notifications and system_settings.disclaimer_notifications.strip %} {% trans "Disclaimer" %}: - {{ system_settings.disclaimer }} + {{ system_settings.disclaimer_notifications }} {% endif %} diff --git a/dojo/templates/notifications/slack/upcoming_engagement.tpl b/dojo/templates/notifications/slack/upcoming_engagement.tpl index 59f64529da3..1b2e80df6da 100644 --- a/dojo/templates/notifications/slack/upcoming_engagement.tpl +++ b/dojo/templates/notifications/slack/upcoming_engagement.tpl @@ -1,8 +1,8 @@ {% load i18n %}{% blocktranslate trimmed with eng_product=engagement.product start=engagement.target_start %} The engagement "{{ eng_product }}" is starting on {{ start }}. {% endblocktranslate %} -{% if system_settings.disclaimer and system_settings.disclaimer.strip %} +{% if system_settings.disclaimer_notifications and system_settings.disclaimer_notifications.strip %} {% trans "Disclaimer" %}: - {{ system_settings.disclaimer }} + {{ system_settings.disclaimer_notifications }} {% endif %} diff --git a/dojo/templates/notifications/slack/user_mentioned.tpl b/dojo/templates/notifications/slack/user_mentioned.tpl index 3d1cf778012..aba6c9aed6a 100644 --- a/dojo/templates/notifications/slack/user_mentioned.tpl +++ b/dojo/templates/notifications/slack/user_mentioned.tpl @@ -5,8 +5,8 @@ User {{ user }} jotted a note on {{ section }}: Full details of the note can be reviewed at {{ url }} {% endblocktranslate %} -{% if system_settings.disclaimer and system_settings.disclaimer.strip %} +{% if system_settings.disclaimer_notifications and system_settings.disclaimer_notifications.strip %} {% trans "Disclaimer" %}: - {{ system_settings.disclaimer }} + {{ system_settings.disclaimer_notifications }} {% endif %} diff --git a/dojo/templates/notifications/webhooks/subtemplates/base.tpl b/dojo/templates/notifications/webhooks/subtemplates/base.tpl index 3b6e30da989..f8cb3e46bb8 100644 --- a/dojo/templates/notifications/webhooks/subtemplates/base.tpl +++ b/dojo/templates/notifications/webhooks/subtemplates/base.tpl @@ -9,6 +9,6 @@ url_ui: {{ url|full_url }} {% if url_api %} url_api: {{ url_api|full_url }} {% endif %} -{% if system_settings.disclaimer and system_settings.disclaimer.strip %} -disclaimer: {{ system_settings.disclaimer }} +{% if system_settings.disclaimer_notifications and system_settings.disclaimer_notifications.strip %} +disclaimer: {{ system_settings.disclaimer_notifications }} {% endif %} From 9d773a07040ba0218b0da9456daa6b717e050ac4 Mon Sep 17 00:00:00 2001 From: OTaoufiq <37933905+oussama-taoufiq@users.noreply.github.com> Date: Thu, 23 Jan 2025 14:41:06 -0500 Subject: [PATCH 58/99] Make links in the login page visually obvious (#11474) * Make links in the login page visually obvious "I forgot my password" and "I forgot my username" are links without underline and its not visually evident without color vision. * Clean up the PR from the unrelated files --------- Co-authored-by: oussama taoufiq --- dojo/static/dojo/css/dojo.css | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/dojo/static/dojo/css/dojo.css b/dojo/static/dojo/css/dojo.css index 281c2420d7f..07ee1b97342 100644 --- a/dojo/static/dojo/css/dojo.css +++ b/dojo/static/dojo/css/dojo.css @@ -446,6 +446,11 @@ form ul { padding-left: 0px } +form ul li a#reset-password, +form ul li a#forgot-username { + color: rgb(51, 122, 183) +} + form ul#id_accepted_findings { list-style: none; margin-left: 0; From c2ed2518c36f32bf9a2c396010e81155e7b2d766 Mon Sep 17 00:00:00 2001 From: testaccount90009 <122134756+testaccount90009@users.noreply.github.com> Date: Thu, 23 Jan 2025 14:25:09 -0800 Subject: [PATCH 59/99] Mend SCA Parser update (#11395) * Add unit test json files for Mend Platform - SAST Findings Code findings in API 3.0 / Platform for Mend, SAST respectively. * Update test_mend_parser.py * Fix newline eof * Update parser.py Remove locations / path from Description of SCA Platform output and instead implement locations in steps_to_reproduce. * Update test_mend_parser.py Updating value to a placeholder severityRating right now of 2.143. Still working on this and the cvssv3 assertion values. * Update test_mend_parser.py * Refactoring this for SCA only and SAST will be an additional PR - need to restructure format slightly for SAST * Update parser.py * Update parser.py * Update parser.py * fix loc 399 to 3999 for locations + add steps_to_reproduce for SCA Platform similar to SCA Legacy so Findings can be structured similarly * Fix spacing, remove trailing comma * Fix space * Removing redundancy * Fix the join on locations for Platform SCA * Removing redundancy component dependency type from description since it's in impact as well Removing redundancy component dependency type from description since it's in impact as well * Make Impact more readible * Update parser.py * Update format for unit test * Fix case on unit test * Remove duplicate join to fix steps_to_reproduce formatting Remove duplicate join to fix steps_to_reproduce formatting * Fix join for locations and steps_to_reproduce * fix newline eof * fix comma, remove redundant locations reference * Update parser.py * Fix eof * Update parser.py * fix , * Update parser.py * Update parser.py * edit steps_to_reproduce locations found * Update parser.py * fix 500 internal server error bug, add else None to impact for edge case * fix typo * adding cneil suggestion for truncating locations adding cneil suggestion for truncating locations * implement suggestion from Mend engineers * fix new line * Update parser.py * Update parser.py * fix indentation * fix finding_info * attempt to fix conditional logic for only getting ACTIVE findingInfo status * attempt to fix cve and title for new component logic * update unit test for new title check * fix whitespace before comma * add unit test case for legacy title * fix duplicate conditional - add into tree_node logic * add title logic back in * add OPEN in addition to ACTIVE * Update parser.py --- dojo/tools/mend/parser.py | 61 +++++++++++-------- ...-sca-platform-api3-multiple-findings.json} | 0 unittests/tools/test_mend_parser.py | 8 ++- 3 files changed, 42 insertions(+), 27 deletions(-) rename unittests/scans/mend/{mend-sca-platform-api3-eleven-findings.json => mend-sca-platform-api3-multiple-findings.json} (100%) diff --git a/dojo/tools/mend/parser.py b/dojo/tools/mend/parser.py index d397f3490e9..cc18bf3175e 100644 --- a/dojo/tools/mend/parser.py +++ b/dojo/tools/mend/parser.py @@ -39,6 +39,7 @@ def _build_common_output(node, lib_name=None): description = "No Description Available" cvss3_score = None mitigation = "N/A" + locations = [] if "component" in node: description = ( "**Vulnerability Description**: " @@ -56,18 +57,19 @@ def _build_common_output(node, lib_name=None): + "**Library Type**: " + node["component"].get("libraryType", "") + "\n" - + "**Location Found**: " - + node["component"].get("path", "") - + "\n" - + "**Direct or Transitive Dependency**: " - + node["component"].get("dependencyType", "") - + "\n" ) lib_name = node["component"].get("name") component_name = node["component"].get("artifactId") component_version = node["component"].get("version") - impact = node["component"].get("dependencyType") + impact = ( + "**Direct or Transitive Vulnerability**: " + + node["component"].get("dependencyType", "") + + "\n" + ) cvss3_score = node["vulnerability"].get("score", None) + component_path = node["component"].get("path", None) + if component_path: + locations.append(component_path) if "topFix" in node: try: topfix_node = node.get("topFix") @@ -82,7 +84,6 @@ def _build_common_output(node, lib_name=None): ) except Exception: logger.exception("Error handling topFix node.") - elif "library" in node: node.get("project") description = ( @@ -136,18 +137,6 @@ def _build_common_output(node, lib_name=None): ) cwe = 1035 # default OWASP a9 until the report actually has them - # comment out the below for now - working on adding this into the above conditional statements since format can be slightly different - # mitigation = "N/A" - # if "topFix" in node: - # try: - # topfix_node = node.get("topFix") - # mitigation = "**Resolution** ({}): {}\n".format( - # topfix_node.get("date"), - # topfix_node.get("fixResolution"), - # ) - # except Exception: - # logger.exception("Error handling topFix node.") - filepaths = [] if "sourceFiles" in node: try: @@ -159,7 +148,6 @@ def _build_common_output(node, lib_name=None): "Error handling local paths for vulnerability.", ) - locations = [] if "locations" in node: try: locations_node = node.get("locations", []) @@ -171,8 +159,31 @@ def _build_common_output(node, lib_name=None): logger.exception( "Error handling local paths for vulnerability.", ) + if locations: + # Join the locations into a single string + joined_locations = ", ".join(locations) + + # If the length exceeds 3999 characters, trim it + if len(joined_locations) > 3999: + # Iterate over the locations and trim until the total length is <= 3999 + total_length = 0 + truncated_locations = [] + + for loc in locations: + loc_length = len(loc) + # Check if adding this location will exceed the limit + if total_length + loc_length + len(truncated_locations) <= 3996: # 3999 - len("...") = 3996 + truncated_locations.append(loc) + total_length += loc_length + else: + # Stop if adding the next location will exceed the limit + break + + # Add ellipsis at the end to indicate truncation + locations = truncated_locations + locations.append("...") # Add the ellipsis to the end of the locations list - filepaths = locations or filepaths + filepaths = filepaths new_finding = Finding( title=title, @@ -188,7 +199,8 @@ def _build_common_output(node, lib_name=None): dynamic_finding=True, cvssv3=cvss3_vector, cvssv3_score=float(cvss3_score) if cvss3_score is not None else None, - impact=impact, + impact=impact if impact is not None else None, + steps_to_reproduce="**Locations Found**: " + ", ".join(locations) if locations is not None else None, ) if cve: new_finding.unsaved_vulnerability_ids = [cve] @@ -238,7 +250,8 @@ def _build_common_output(node, lib_name=None): tree_node = content["response"] if tree_node: for node in tree_node: - findings.append(_build_common_output(node)) + if node.get("findingInfo", {}).get("status") == "ACTIVE": + findings.append(_build_common_output(node)) def create_finding_key(f: Finding) -> str: # """Hashes the finding's description and title to retrieve a key for deduplication.""" diff --git a/unittests/scans/mend/mend-sca-platform-api3-eleven-findings.json b/unittests/scans/mend/mend-sca-platform-api3-multiple-findings.json similarity index 100% rename from unittests/scans/mend/mend-sca-platform-api3-eleven-findings.json rename to unittests/scans/mend/mend-sca-platform-api3-multiple-findings.json diff --git a/unittests/tools/test_mend_parser.py b/unittests/tools/test_mend_parser.py index cd544d503c1..75bbd54bcbb 100644 --- a/unittests/tools/test_mend_parser.py +++ b/unittests/tools/test_mend_parser.py @@ -42,7 +42,8 @@ def test_parse_file_with_one_sca_vuln_finding(self): findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) finding = list(findings)[0] - self.assertEqual("D:\\MendRepo\\test-product\\test-project\\test-project-subcomponent\\path\\to\\the\\Java\\commons-codec-1.6_donotuse.jar", finding.file_path) + self.assertEqual("**Locations Found**: D:\\MendRepo\\test-product\\test-project\\test-project-subcomponent\\path\\to\\the\\Java\\commons-codec-1.6_donotuse.jar", finding.steps_to_reproduce) + self.assertEqual("WS-2019-0379 | commons-codec-1.6.jar", finding.title) def test_parse_file_with_no_vuln_has_no_findings_platform(self): with open("unittests/scans/mend/mend-sca-platform-api3-no-findings.json", encoding="utf-8") as testfile: @@ -60,9 +61,10 @@ def test_parse_file_with_one_vuln_has_one_findings_platform(self): self.assertEqual("CVE-2024-51744", finding.unsaved_vulnerability_ids[0]) self.assertEqual("CVSS:3.1/AV:N/AC:H/PR:N/UI:R/S:U/C:L/I:N/A:N", finding.cvssv3) self.assertEqual(3.1, finding.cvssv3_score) + self.assertEqual("CVE-2024-51744 | github.com/golang-JWT/jwt-v3.2.2+incompatible", finding.title) def test_parse_file_with_multiple_vuln_has_multiple_finding_platform(self): - with open("unittests/scans/mend/mend-sca-platform-api3-eleven-findings.json", encoding="utf-8") as testfile: + with open("unittests/scans/mend/mend-sca-platform-api3-multiple-findings.json", encoding="utf-8") as testfile: parser = MendParser() findings = parser.get_findings(testfile, Test()) - self.assertEqual(11, len(findings)) + self.assertEqual(5, len(findings)) From feabd7b4ac83f26fc4f714918e5c4440ea5bc660 Mon Sep 17 00:00:00 2001 From: Matt Stanchek Date: Thu, 23 Jan 2025 17:26:00 -0500 Subject: [PATCH 60/99] New HCL AppScan on Cloud SAST parser (#11375) * new HCL AppScan on Cloud SAST parser * Update dojo/tools/hcl_asoc_sast/parser.py Co-authored-by: Charles Neill <1749665+cneill@users.noreply.github.com> * Update dojo/tools/hcl_asoc_sast/parser.py Co-authored-by: Charles Neill <1749665+cneill@users.noreply.github.com> * refactoring for linter * Remove settings sha file * Fix indentions * Update dojo/tools/hcl_asoc_sast/parser.py Co-authored-by: Charles Neill <1749665+cneill@users.noreply.github.com> * Update dojo/tools/hcl_asoc_sast/parser.py Co-authored-by: Charles Neill <1749665+cneill@users.noreply.github.com> * Update dojo/tools/hcl_asoc_sast/parser.py Co-authored-by: Charles Neill <1749665+cneill@users.noreply.github.com> * Update dojo/tools/hcl_asoc_sast/parser.py * Update dojo/tools/hcl_asoc_sast/parser.py * Update dojo/tools/hcl_asoc_sast/parser.py * Update dojo/tools/hcl_asoc_sast/parser.py * Update dojo/tools/hcl_asoc_sast/parser.py * Update dojo/tools/hcl_asoc_sast/parser.py * Update dojo/tools/hcl_asoc_sast/parser.py * Update dojo/tools/hcl_asoc_sast/parser.py * Update dojo/tools/hcl_asoc_sast/parser.py --------- Co-authored-by: Charles Neill <1749665+cneill@users.noreply.github.com> Co-authored-by: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> --- .../parsers/file/hcl_asoc_sast.md | 8 + dojo/settings/settings.dist.py | 3 + dojo/tools/hcl_asoc_sast/__init__.py | 1 + dojo/tools/hcl_asoc_sast/parser.py | 151 + unittests/scans/hcl_asoc_sast/many_issues.xml | 6856 +++++++++++++++++ unittests/scans/hcl_asoc_sast/no_issues.xml | 630 ++ unittests/scans/hcl_asoc_sast/one_issue.xml | 704 ++ unittests/tools/test_hcl_asoc_sast_parser.py | 36 + 8 files changed, 8389 insertions(+) create mode 100644 docs/content/en/connecting_your_tools/parsers/file/hcl_asoc_sast.md create mode 100644 dojo/tools/hcl_asoc_sast/__init__.py create mode 100644 dojo/tools/hcl_asoc_sast/parser.py create mode 100644 unittests/scans/hcl_asoc_sast/many_issues.xml create mode 100644 unittests/scans/hcl_asoc_sast/no_issues.xml create mode 100644 unittests/scans/hcl_asoc_sast/one_issue.xml create mode 100644 unittests/tools/test_hcl_asoc_sast_parser.py diff --git a/docs/content/en/connecting_your_tools/parsers/file/hcl_asoc_sast.md b/docs/content/en/connecting_your_tools/parsers/file/hcl_asoc_sast.md new file mode 100644 index 00000000000..da86383ee37 --- /dev/null +++ b/docs/content/en/connecting_your_tools/parsers/file/hcl_asoc_sast.md @@ -0,0 +1,8 @@ +--- +title: "HCL AppScan on Cloud SAST" +toc_hide: true +--- +HCL Appscan on Cloud can export the results in PDF, XML and CSV formats but this parser only supports the import of XML generated from HCL Appscan on Cloud for SAST scans. + +### Sample Scan Data +Sample HCL AppScan on Cloud SAST scans can be found [here](https://github.com/DefectDojo/django-DefectDojo/tree/master/unittests/scans/hcl_asoc_sast). diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py index 7d1513f13a6..c8320e0bff4 100644 --- a/dojo/settings/settings.dist.py +++ b/dojo/settings/settings.dist.py @@ -1271,6 +1271,7 @@ def saml2_attrib_map_format(dict): "Humble Json Importer": ["title"], "MSDefender Parser": ["title", "description"], "HCLAppScan XML": ["title", "description"], + "HCL AppScan on Cloud SAST XML": ["title", "file_path", "line", "severity"], "KICS Scan": ["file_path", "line", "severity", "description", "title"], "MobSF Scan": ["title", "description", "severity"], "MobSF Scorecard Scan": ["title", "description", "severity"], @@ -1361,6 +1362,7 @@ def saml2_attrib_map_format(dict): "Wazuh": True, "Nuclei Scan": True, "Threagile risks report": True, + "HCL AppScan on Cloud SAST XML": True, "AWS Inspector2 Scan": True, } @@ -1520,6 +1522,7 @@ def saml2_attrib_map_format(dict): "Wazuh Scan": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, "MSDefender Parser": DEDUPE_ALGO_HASH_CODE, "HCLAppScan XML": DEDUPE_ALGO_HASH_CODE, + "HCL AppScan on Cloud SAST XML": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL_OR_HASH_CODE, "KICS Scan": DEDUPE_ALGO_HASH_CODE, "MobSF Scan": DEDUPE_ALGO_HASH_CODE, "MobSF Scorecard Scan": DEDUPE_ALGO_HASH_CODE, diff --git a/dojo/tools/hcl_asoc_sast/__init__.py b/dojo/tools/hcl_asoc_sast/__init__.py new file mode 100644 index 00000000000..c8c5fc2da7d --- /dev/null +++ b/dojo/tools/hcl_asoc_sast/__init__.py @@ -0,0 +1 @@ +__author__ = "xpert98" diff --git a/dojo/tools/hcl_asoc_sast/parser.py b/dojo/tools/hcl_asoc_sast/parser.py new file mode 100644 index 00000000000..538cd785838 --- /dev/null +++ b/dojo/tools/hcl_asoc_sast/parser.py @@ -0,0 +1,151 @@ +from xml.dom import NamespaceErr + +from defusedxml import ElementTree as ET + +from dojo.models import Finding + + +class HCLASoCSASTParser: + def get_scan_types(self): + return ["HCL AppScan on Cloud SAST XML"] + + def get_label_for_scan_types(self, scan_type): + return scan_type + + def get_description_for_scan_types(self, scan_type): + return "Import XML output of HCL AppScan on Cloud SAST" + + def xmltreehelper(self, input): + if input.text is None: + output = None + elif "\n" in input.text: + output = "" + for i in input: + output = output + " " + i.text + else: + output = " " + input.text + return output + + def get_findings(self, file, test): + findings = [] + tree = ET.parse(file) + root = tree.getroot() + if "xml-report" not in root.tag: + msg = "This doesn't seem to be a valid HCL ASoC SAST xml file." + raise NamespaceErr(msg) + report = root.find("issue-group") + if report is not None: + for finding in report: + title = "" + description = "" + for item in finding: + match item.tag: + case "severity": + output = self.xmltreehelper(item) + severity = "Info" if output is None else output.strip(" ").capitalize() + case "cwe": + cwe = int(self.xmltreehelper(item)) + case "issue-type": + title = self.xmltreehelper(item).strip() + description = description + "**Issue-Type:** " + title + "\n" + case "issue-type-name": + title = self.xmltreehelper(item).strip() + description = description + "**Issue-Type-Name:** " + title + "\n" + case "source-file": + location = self.xmltreehelper(item).strip() + description = description + "**Location:** " + location + "\n" + case "line": + line = int(self.xmltreehelper(item).strip()) + description = description + "**Line:** " + str(line) + "\n" + case "threat-class": + threatclass = self.xmltreehelper(item) + description = description + "**Threat-Class:** " + threatclass + "\n" + case "entity": + entity = self.xmltreehelper(item) + title += "_" + entity.strip() + description = description + "**Entity:** " + entity + "\n" + case "security-risks": + security_risks = self.xmltreehelper(item) + description = description + "**Security-Risks:** " + security_risks + "\n" + case "cause-id": + causeid = self.xmltreehelper(item) + title += "_" + causeid.strip() + description = description + "**Cause-Id:** " + causeid + "\n" + case "element": + element = self.xmltreehelper(item) + description = description + "**Element:** " + element + "\n" + case "element-type": + elementtype = self.xmltreehelper(item) + description = description + "**ElementType:** " + elementtype + "\n" + case "variant-group": + variantgroup = item.iter() + description = description + "**Call Trace:** " + "\n" + for vitem in variantgroup: + if vitem.tag == "issue-information": + issueinformation = vitem.iter() + for iitem in issueinformation: + if iitem.tag == "context": + description = description + self.xmltreehelper(iitem) + "\n" + + case "fix": + recommendations = "" + externalreferences = "" + issuetypename = "" + remediation = "" + fix = item.iter() + for fitem in fix: + if fitem.tag == "types": + type = fitem.iter() + for titem in type: + if titem.tag == "name": + issuetypename = self.xmltreehelper(titem) + if fitem.tag == "remediation": + remediation = self.xmltreehelper(fitem) + + articlegroup = root.find("article-group") + if articlegroup is not None: + for articles in articlegroup: + if articles.attrib["id"] == issuetypename.strip() and articles.attrib["api"] == remediation.strip(): + articledetails = articles.iter() + for aitem in articledetails: + if aitem.tag == "cause": + description = description + "**Cause:**" + "\n" + for causeitem in aitem: + if causeitem.attrib["type"] == "string" and causeitem.text is not None: + description = description + causeitem.text + "\n" + if aitem.tag == "recommendations": + for recitem in aitem: + if recitem.attrib["type"] == "string" and recitem.text is not None: + recommendations = recommendations + recitem.text + "\n" + elif recitem.attrib["type"] == "object": + codeblock = recitem.iter() + for codeitem in codeblock: + if codeitem.tag == "item" and codeitem.attrib["type"] == "string": + if codeitem.text is None: + recommendations = recommendations + "\n" + else: + recommendations = recommendations + self.xmltreehelper(codeitem) + "\n" + + if aitem.tag == "externalReferences": + ref = aitem.iter() + for ritem in ref: + if ritem.tag == "title": + externalreferences = externalreferences + self.xmltreehelper(ritem).strip() + "\n" + if ritem.tag == "url": + externalreferences = externalreferences + self.xmltreehelper(ritem).strip() + "\n" + + prepared_finding = Finding( + title=title, + description=description, + file_path=location, + line=line, + severity=severity, + cwe=cwe, + mitigation=recommendations, + references=externalreferences, + dynamic_finding=False, + static_finding=True, + ) + findings.append(prepared_finding) + return findings + return findings diff --git a/unittests/scans/hcl_asoc_sast/many_issues.xml b/unittests/scans/hcl_asoc_sast/many_issues.xml new file mode 100644 index 00000000000..0c9e3caa499 --- /dev/null +++ b/unittests/scans/hcl_asoc_sast/many_issues.xml @@ -0,0 +1,6856 @@ + + + + added + added to request: + Additional Data: + Advisories + Affected Products: + Vulnerable URLs + Concurrent Logins: + Application Data + Application Server: + AppScan Severity + Harmless + This request/response contains binary content, which is not included in generated reports. + Body + Failed Requests + Cause + Causes + Causes: + Id + Name + The following weak cipher suites are supported by the server: + Code + Comment + Comments + Cookie + Cookies + CVE: + CWE: + Detailed Summary + A detailed listing of the scan results, including all issue types found, all recommended remediation tasks, all vulnerable URLs, etc. This section is intended to provide a more detailed understanding of the security status of the application, as well as assist in scoping and prioritizing the work required to remedy issues found. + Tracked or session ID cookies: + Tracked or session ID parameters: + Difference: + Document Map + This report consists of the following sections: + Domain + .Net + JavaScript execution: + Entity + Entity: + Example + Summary + This section provides a high level view of the information gathered during the scan, using graphs or comparative numbers. It is intended to provide a general understanding of the security status of the application. + Expires + Filtered URLs + First Set + Fix + Fix: + Fix Recommendations + General + General Information + Header + High + High severity issues: + Host: + Index + Informational + Informational severity issues: + Introduction + Introduction and Objectives + General information about the scan, including the project name, purpose of the scan, etc. + Issue + Issues Sorted by Issue Type + Issues Sorted by URL + Issues detected across + Issue Type + Issue Types + Issue Types + J2EE + JavaScripts + Login Settings + Low + Low severity issues: + Malicious + manipulated from: + Medium + Medium severity issues: + Method + Name + New URLs + Report Produced on Tree node: + this is now the same as the one below - should be removed + Number of Issues + Objectives + AppScan performs real-time security assessments on web applications. These assessments aim to uncover any security issues in the application, explain the impact and risks associated with these issues, and provide guidance in planning and prioritizing remediation. The objective of this assignment was to perform controlled attack and penetration activities to assess the overall level of security of the application. + of + Operating system: + Original Request + Original Requests and Responses: + Original Response + Parameter + Parameters + Path + PHP + Query + Raw Test Response: + Reason + Reasoning: + Login sequence: + References: + Regulations + Remaining URLs + Remediation Task + removed + removed from request: + Removed URLs + Comprehensive Security Report + AppScan Web Application Security Report + Requested URL + Request + Response + Risk + Risk: + Rules: + Scan started: + Scan file name: + Sections + sections of the regulation: + Violated Section + GDPR Articles + Section Violation by Issue + Secure + Detailed Security Issues by Sections + Security Risks + Security Risks: + Login method: + In-session detection: + In-session pattern: + Severity + Severity: + Unique issues detected across + SSL Version + Table of Contents + Test Description: + Test Login + Test policy: + Test Request: + Test Requests and Responses: + Test Response (first) + Test Response + Test Response (last) + Test Response (next-to-last) + Technical Description: + Test Type: + Threat + WASC Threat Classification + Threat Classification: + TOC + to: + Total security issues included in the report: + Total security issues: + total security issues + Type + Unwanted + URL + URL: + Valid Login + Value + Variant + Visited URLs + Vulnerable URLs + Web server: + Issue Types that this task fixes + Simulation of the pop-up that appears when this page is opened in a browser + Location + Intent Action: + Intent Class: + Intent Data: + Intent Extra: + Intent Package: + Payload + Issues: + Method Signature: + Issue Validation Parameters: + Thread: + Timestamp: + Trace: + Issue Information + This issue was detected by AppScan's Mobile Analyzer. + Call Stack: + Header: + XML: + File Name: + File Permission: + Synopsis: + Dump: + Manifest: + Request: + Method Information + Signature: + File: + Name: + Permissions: + Class + Function + Line + Created by: + Summary of security issues + Issues + Go to Table of Contents + Issue Types: + Application Version: + Scan Name: + First Variant: + Variants Found: + OWASP: + X-Force: + (Only the first one is displayed) + No security issues discovered in the scan + Scan status: + Note that the scan on which this report is based was not completed. + Success + Refer to the site for more details. + Sink + Source + OWASP Top 10 + File Path: + Reference: + Free Plan + Please Note: + This summary report was created with the Application Security Analyzer Free Plan. Once you purchase the full service you will have access to a complete report with detailed descriptions of the issues found and how to remediate them. + Activities: + Coverage + Activities + This report includes important security information about your mobile application. + Fix Recommendations: + Component + Glossary + Privacy: + Symbols Found: + Mobile Application Report + Class Signature: + Defining Class + Controllable Object Fields: + Receivers: + Services: + Receivers + Services + Method Signature: + Issue Information: + Settings For Target: + Provider: + Sample Report + Login Mode: + Views: + Views + None + Automatic + Manual + Calling Line + Calling Method + Class + Classification + Critical + Date Created + Discovery Method + Last Updated + Package + Scans: + Severity Value + Status + API + Element + Scheme + Sink: + Source: + Trace + Source File + Access Complexity + Access Vector + Authentication + Availability Impact + Confidentiality Impact + CVE + CVSS + Description + Exploitability + Integrity Impact + Summary + Activities that were tested for security vulnerabilities, as defined in the app's manifest. + Issue Types that ASoC has tested your application for. + Receivers that were tested for security vulnerabilities, as defined in the app's manifest. + Services that were tested for security vulnerabilities, as defined in the app's manifest. + Titles of Views encountered when crawling the app. + Leaked Information: + Password: + User Name: + Mitigation: + Alternate Fix Suggestions + This method is a part of the application code and appears in each of the grouped issue's traces. You should begin investigating a possible fix in the implementation of the method. + This method is a third-party API, with a common caller in each of the grouped issue's traces. You should begin investigating a possible fix at the caller: + Replace/Repair Vulnerable OpenSource: + Please refer to the details of this issue for fix recommendations. + Business Impact: + Created: + Security Report for: + Regulation Report for: + Notes: + - Details + - Discussion + Contains: + {0} issues + (out of {0}) + - Audit Trail + Cause: + HCL Application Security on Cloud, Version + Directory: + Constant Value: + Found in: + Informational + Low + Medium + High + Critical + User Supplied Credit Card Number: + User Supplied Id: + User Supplied Input: + User Supplied Password: + User Supplied Phone Number: + User Supplied User Name: + - Fix Recommendation + Included for each issue separately. + Port: + Application Name: + Copyleft: + Copyright Risk: + Date: + Library Name: + License Name: + Open Source Report + Licenses + Linking: + Patent Risk: + Reference Type: + Reference URL: + Risk Level: + Libraries with high risk level: + Libraries with low risk level: + Libraries with medium risk level: + Libraries with unknown risk level: + Royalty Free: + Total Open Source Libraries: + AppScan on Cloud + Anyone who distributes a modification of the code or a product that is based on or contains part of the code may be required to make publicly available the source code for the product or modification, regardless of whether the code is dynamically or statically linked. (example: GPL). Attribution and/or license terms may be required. + Anyone who distributes a modification of the code or a product that is based on or contains part of the code may be required to make publicly available the source code for the product or modification, subject to an exception for software that dynamically links to the original code. These licenses include LGPL and GPL with Class Path Exception, as examples. Attribution and/or license terms may be required. + Anyone may use the code without restriction. + Anyone who develops a product that is based on or contains part of the code, or who modifies the code, may be required to make publicly available the source code for that product or modification if s/he (a) distributes the software or (b) enables others to use the software via hosted or web services. (example: Affero). Attribution and/or license terms may be required. + Anyone who develops a product that is based on or contains part of the code, or who modifies the code, may be required to make publicly available the source code for that product or modification if s/he (a) distributes the software or (b) enables others to use the software via hosted or web services. (example: Affero). Attribution and/or license terms may be required. + Anyone who distributes a modification of the code may be required to make the source code for the modification publicly available. Attribution and/or license terms may also be required. + Anyone who distributes the code must provide certain notices as described in the license. These generally require providing attributions and/or license terms with the software. + Specific identified patent risks + Royalty free and no identified patent risks + No patents granted + Royalty free unless litigated + Report created at: + Report for scan: + Open source library name + Risk level + Security Report + Open Source Libraries + Unknown + Reference + In this section you’ll find more details about the fields and their values. + Disabled + Enabled + None + Automatic + Prompt + Recorded login + Unknown + (Modified) + Any + Unknown + Sample Trace + License Type + Scan Security Report + This report lists all the open source libraries found in your scan, and their associated open source Risk Levels. + + Open Source Risk Levels are not the same as the Risk Levels in Security Reports, and not related to the vulnerabilities of specific issues. + You can see if any of the libraries have known vulnerabilities in Issue Management view. + Number Of Libraries + Report Date: + Scanned under Application: + Scan Start Date: + Total Open Source License Types: + Details + Threat Classification: + Fix Groups: + Implementation of {0} + Usage of {0} via {1} + Fix Group #{0}: {1} + This section groups {0} issues of type {1} with significant commonality in the their traces. + This section groups {0} issues with significant commonality in their traces. The following issue types are included: + This section groups {0} issues of type {1} with a common opensource file. + This section groups {0} issues with a common opensource file. The following issue types are included: + These issues are grouped together to try to help you find a common fix that resolves them all. + These method calls are also common to the traces of the issues in this group. They represent other possible These method calls are also common to the traces of the issues in this group. They represent other possible locations to investigate a fix. + All {0} issues in this report appear to be independent, lacking the commonality required in their traces to be grouped together. They all appear in this section. + This section lists the remaining {0} issues that could not be included in any other fix groups. + The following issue types are included: + Ungrouped + Fix Recommendation + Library Version: + API: + at line + Call + Caller: + Description: + Name: + Example Trace: + File + Lost Sink + Not a Validator + Sample Trace + Publish date: + Resolution: + Source and Sink + Tainted Arg + Taint Propagator + via + Virtual Lost Sink + Test Optimization: + Normal + Optimized + Issue ID: + Compliance Security Report + Undefined + Undefined + Title: + Report Date UTC: + Fix Group ID: + Method: + Query String: + URI: + Arguments: + Call Trace: + Object: + Return: + Stack: + Type: + By Fix Groups: + By Issue Types: + Fix-Groups + Library: + Location: + Status: + Common API Call: + Common Fix Point: + Common Open Source: + Common Fix Point: + OpenSource + API: + Location of fix: + Library name: + Location of fix: + Advisory: + Custom Advisory: + Hosts + Fast + Faster + Fastest + No Optimization + How to Fix: + Report Name: + Technology: + Scan Information + General Advisory: + Finding specific advisory: + Example: + Exploit Example: + (none) + Not applicable for this issue. + HTTP Only + JS Stack Trace + Same Site + False + True + (Mixed) + Articles + CWE + Exploit example + External references + Recommendations + Language: + How to Fix + See also issue-details 'Resolution' section below. + Mitigation + Important: + Note: The number of issues found exceeded the maximum that can be shown in a single set of results. +The scan results show {0} representitive issues. + Personal Scan + Personal Scans are deleted after {0} days, unless promoted to the application within that time. + Additional Information: + Fixed + In Progress + New + Noise + Open + Passed + Reopened + Definitive + Scan Coverage Findings + Suspect + Cipher Suites: + ID + Fix recommendation + Default (Production) + Default (Staging) + Default + Body + Cookie + Global + Header + Header Name + Link + Other + Page + Parameter + Parameter Name + Query + Role + Source Line + Unspecified + Critical + High + Low + Medium + Unspecified + Report for application: + This report lists all the open source libraries found in your application, and their associated open source Risk Levels. + License Details + Library Name + Version + Undefined + Critical severity issues: + Copyleft applies on modifications as well as own code that uses the open-source software. + Non-copyleft license. + Copyleft applies only to modifications. + Undefined + Dynamic linking will not infect the linking code. + The licensing of the linking code will remain unaffected. + Undefined + Linking will infect the code linking code. + Alpine + Arch Linux + Bower + Build Configuration File + Details available in CDNJS + Debian + .NET + Eclipse OSGI Bundle + Details available in GitHub repository + License information in host site + License File + Node package manager + NuGet Package + Other + POM file + Project Home Page + Python Package Index + Readme File + RPM + RubyGems + License assigned manually by a user in the organization + Undefined + High + Low + Medium + Undefined + Unknown + Royalty-free unless litigated. + No patents granted. + Royalty-free and no identified patent risks. + Undefined severity issues: + Last Found + CVSS Version + Total Items: + IAST call stack: + Undefined + - Comments + Method: + Both + Config + Hash + Dependency Root: + Source-file and Package-manager + Package-manager + Source-file + None + + + + + HCL + Application Security on Cloud + sample-php + Unspecified + Friday, October 18, 2024 + FullReport + 83 + False + 30 + 20000 + False + ASoC + + + 1 + 1 + 1 + 1 + 0 + 1 + 1 + 0 + 0 + 0 + 0 + 0 + 0 + + + + Authentication Bypass + + + Configuration + + + Cross-Site Scripting + + + Injection + + + Missing Standardized Error Handling Mechanism + + + PathTraversal + + + PrivilegeEscalation + + + Reflected Cross Site Scripting + + + AppDOS + + + Logging + + + Privacy + + + SessionManagement.Cookies + + + Missing Encryption of Sensitive Data + + + + + + + + + AccessControl.Bypass + + 1 + + + + Verify Session Data Stored in A Secure Directory + sample-php/config/php.ini:1309 + + + + session.save_path = "N;/path" + + + + + + + + + Verify Session Data Stored in A Secure Directory + High + + Verify Session Data Stored in A Secure Directory + + + + AccessControl.Bypass + + + + + High + 3 + SAST + + 288 + + + catInsufficientAuthorization + + + privilegeEscalation + + + insecureWebAppConfiguration + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Authentication Bypass + sample-php/config/php.ini:1309 + sample-php/config/php.ini + None + 1309 + 0 + 068355ff-748d-ef11-8473-000d3a0fc910 + 418355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → AccessControl.Bypass + Location: → sample-php/config/php.ini:1309 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + AccessControl.Bypass + + 1 + + + + Verify Session Data Stored in A Secure Directory + sample-php/config/php.ini:1325 + + + + session.save_path = "N;MODE;/path" + + + + + + + + + Verify Session Data Stored in A Secure Directory + High + + Verify Session Data Stored in A Secure Directory + + + + AccessControl.Bypass + + + + + High + 3 + SAST + + 288 + + + catInsufficientAuthorization + + + privilegeEscalation + + + insecureWebAppConfiguration + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Authentication Bypass + sample-php/config/php.ini:1325 + sample-php/config/php.ini + None + 1325 + 0 + 068355ff-748d-ef11-8473-000d3a0fc910 + 448355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → AccessControl.Bypass + Location: → sample-php/config/php.ini:1325 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + Configuration + + 1 + + + + The disable_functions parameter is empty + sample-php/config/php.ini:312 + + + + disable_functions = + +; This directive allows you to disable certain classes. + + + + + + + + + The disable_functions parameter is empty + High + + The disable_functions parameter is empty + + + + Configuration + + + + + High + 3 + SAST + + 16 + + + catApplicationMisconfiguration + + + sensitiveInformation + + + insecureWebAppConfiguration + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Configuration + sample-php/config/php.ini:312 + sample-php/config/php.ini + None + 312 + 0 + 0f8355ff-748d-ef11-8473-000d3a0fc910 + 208355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → Configuration + Location: → sample-php/config/php.ini:312 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/adminEditApplicationOwnerForm.php:50 + + + + <input type="text" class="form-control" id="ownerName" name="ownerName" value="<?php echo $cisData[0]['ownername']; ?>" required> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/adminEditApplicationOwnerForm.php:50 + sample-php/src/adminEditApplicationOwnerForm.php + None + 50 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 5f8355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/adminEditApplicationOwnerForm.php:50 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/adminEditApplicationOwnerForm.php:53 + + + + <input type="hidden" id="ownerId" name="ownerId" value="<?php echo $ownerId; ?>"> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/adminEditApplicationOwnerForm.php:53 + sample-php/src/adminEditApplicationOwnerForm.php + None + 53 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 628355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/adminEditApplicationOwnerForm.php:53 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/adminEditApplicationTypeForm.php:48 + + + + <input type="text" class="form-control" id="typeName" name="typeName" value="<?php echo $cisData[0]['typename']; ?>" required> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/adminEditApplicationTypeForm.php:48 + sample-php/src/adminEditApplicationTypeForm.php + None + 48 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 658355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/adminEditApplicationTypeForm.php:48 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/adminEditApplicationTypeForm.php:51 + + + + <input type="hidden" id="typeId" name="typeId" value="<?php echo $typeId; ?>"> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/adminEditApplicationTypeForm.php:51 + sample-php/src/adminEditApplicationTypeForm.php + None + 51 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 688355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/adminEditApplicationTypeForm.php:51 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/adminEditBusinessUnitForm.php:48 + + + + <input type="text" class="form-control" id="businessUnitName" name="businessUnitName" value="<?php echo $cisData[0]['businessunitname']; ?>" required> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/adminEditBusinessUnitForm.php:48 + sample-php/src/adminEditBusinessUnitForm.php + None + 48 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 6b8355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/adminEditBusinessUnitForm.php:48 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/adminEditBusinessUnitForm.php:51 + + + + <input type="hidden" id="buId" name="buId" value="<?php echo $buId; ?>"> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/adminEditBusinessUnitForm.php:51 + sample-php/src/adminEditBusinessUnitForm.php + None + 51 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 6e8355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/adminEditBusinessUnitForm.php:51 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/adminEditCodeLanguageForm.php:48 + + + + <input type="text" class="form-control" id="languageName" name="languageName" value="<?php echo $cisData[0]['languagename']; ?>" required> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/adminEditCodeLanguageForm.php:48 + sample-php/src/adminEditCodeLanguageForm.php + None + 48 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 718355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/adminEditCodeLanguageForm.php:48 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/adminEditCodeLanguageForm.php:51 + + + + <input type="hidden" id="languageId" name="languageId" value="<?php echo $languageId; ?>"> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/adminEditCodeLanguageForm.php:51 + sample-php/src/adminEditCodeLanguageForm.php + None + 51 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 748355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/adminEditCodeLanguageForm.php:51 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/adminEditDataClassificationForm.php:48 + + + + <input type="text" class="form-control" id="dataClassificationName" name="dataClassificationName" value="<?php echo $cisData[0]['dataclassificationname']; ?>" required> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/adminEditDataClassificationForm.php:48 + sample-php/src/adminEditDataClassificationForm.php + None + 48 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 778355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/adminEditDataClassificationForm.php:48 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/adminEditDataClassificationForm.php:51 + + + + <input type="hidden" id="dataClassificationId" name="dataClassificationId" value="<?php echo $dataClassificationId; ?>"> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/adminEditDataClassificationForm.php:51 + sample-php/src/adminEditDataClassificationForm.php + None + 51 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 7a8355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/adminEditDataClassificationForm.php:51 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/adminEditDeploymentEnvironmentForm.php:48 + + + + <input type="text" class="form-control" id="deploymentEnvironmentName" name="deploymentEnvironmentName" value="<?php echo $cisData[0]['deploymentenvironmentname']; ?>" required> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/adminEditDeploymentEnvironmentForm.php:48 + sample-php/src/adminEditDeploymentEnvironmentForm.php + None + 48 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 7d8355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/adminEditDeploymentEnvironmentForm.php:48 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/adminEditDeploymentEnvironmentForm.php:51 + + + + <input type="hidden" id="deploymentEnvironmentId" name="deploymentEnvironmentId" value="<?php echo $deploymentEnvironmentId; ?>"> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/adminEditDeploymentEnvironmentForm.php:51 + sample-php/src/adminEditDeploymentEnvironmentForm.php + None + 51 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 808355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/adminEditDeploymentEnvironmentForm.php:51 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/adminEditExposureLevelForm.php:48 + + + + <input type="text" class="form-control" id="exposureLevelName" name="exposureLevelName" value="<?php echo $cisData[0]['exposurename']; ?>" required> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/adminEditExposureLevelForm.php:48 + sample-php/src/adminEditExposureLevelForm.php + None + 48 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 838355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/adminEditExposureLevelForm.php:48 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/adminEditExposureLevelForm.php:51 + + + + <input type="hidden" id="exposureId" name="exposureId" value="<?php echo $exposureId; ?>"> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/adminEditExposureLevelForm.php:51 + sample-php/src/adminEditExposureLevelForm.php + None + 51 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 868355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/adminEditExposureLevelForm.php:51 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/adminEditLifecycleStageForm.php:48 + + + + <input type="text" class="form-control" id="lifecycleStageName" name="lifecycleStageName" value="<?php echo $cisData[0]['lifecyclestagename']; ?>" required> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/adminEditLifecycleStageForm.php:48 + sample-php/src/adminEditLifecycleStageForm.php + None + 48 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 8a8355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/adminEditLifecycleStageForm.php:48 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/adminEditLifecycleStageForm.php:51 + + + + <input type="hidden" id="lifecycleStageId" name="lifecycleStageId" value="<?php echo $lifecycleStageId; ?>"> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/adminEditLifecycleStageForm.php:51 + sample-php/src/adminEditLifecycleStageForm.php + None + 51 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 908355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/adminEditLifecycleStageForm.php:51 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/adminEditRiskLevelForm.php:48 + + + + <input type="text" class="form-control" id="riskLevelName" name="riskLevelName" value="<?php echo $cisData[0]['risklevelname']; ?>" required> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/adminEditRiskLevelForm.php:48 + sample-php/src/adminEditRiskLevelForm.php + None + 48 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 938355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/adminEditRiskLevelForm.php:48 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/adminEditRiskLevelForm.php:51 + + + + <input type="hidden" id="riskLevelId" name="riskLevelId" value="<?php echo $riskLevelId; ?>"> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/adminEditRiskLevelForm.php:51 + sample-php/src/adminEditRiskLevelForm.php + None + 51 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 968355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/adminEditRiskLevelForm.php:51 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/adminEditUserForm.php:50 + + + + <input type="password" class="form-control" id="userPassword" name="userPassword"> + </div> + <div class="form-group"> + <label for="userFirstName">First Name</label> + <input type="text" class="form-control" id="userFirstName" name="userFirstName" value="<?php echo $userData[2]; ?>" required> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/adminEditUserForm.php:50 + sample-php/src/adminEditUserForm.php + None + 50 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 998355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/adminEditUserForm.php:50 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/adminEditUserForm.php:58 + + + + <input type="text" class="form-control" id="userLastName" name="userLastName" value="<?php echo $userData[3]; ?>" required> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/adminEditUserForm.php:58 + sample-php/src/adminEditUserForm.php + None + 58 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 9c8355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/adminEditUserForm.php:58 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/adminEditUserForm.php:61 + + + + <input type="hidden" id="userId" name="userId" value="<?php echo $userId; ?>"> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/adminEditUserForm.php:61 + sample-php/src/adminEditUserForm.php + None + 61 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 9f8355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/adminEditUserForm.php:61 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/editApplicationForm.php:85 + + + + <input type="hidden" id="appId" name="appId" value="<?php echo $applicationData[0]['id']; ?>"> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/editApplicationForm.php:85 + sample-php/src/editApplicationForm.php + None + 85 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 4f6fa805-758d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/editApplicationForm.php:85 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/editApplicationForm.php:90 + + + + <input type="text" class="form-control" id="commonName" name="commonName" value="<?php echo $applicationData[0]['commonname']; ?>" required> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/editApplicationForm.php:90 + sample-php/src/editApplicationForm.php + None + 90 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 526fa805-758d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/editApplicationForm.php:90 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/editApplicationForm.php:96 + + + + <input type="hidden" id="currentPrimaryOwnerId" value="<?php echo $applicationData[0]['primaryownerid']; ?>"> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/editApplicationForm.php:96 + sample-php/src/editApplicationForm.php + None + 96 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 556fa805-758d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/editApplicationForm.php:96 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/editApplicationForm.php:102 + + + + <input type="text" class="form-control" id="secondaryOwners" name="secondaryOwners" value="<?php echo $applicationData[0]['secondaryowners']; ?>"> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/editApplicationForm.php:102 + sample-php/src/editApplicationForm.php + None + 102 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 586fa805-758d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/editApplicationForm.php:102 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/editApplicationForm.php:117 + + + + <input type="text" class="form-control" id="codeRepoUrl" name="codeRepoUrl" value="<?php echo $applicationData[0]['coderepourl']; ?>"> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/editApplicationForm.php:117 + sample-php/src/editApplicationForm.php + None + 117 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 5b6fa805-758d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/editApplicationForm.php:117 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/editApplicationForm.php:121 + + + + <input type="text" class="form-control" id="binaryRepoUrl" name="binaryRepoUrl" value="<?php echo $applicationData[0]['binaryrepourl']; ?>"> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/editApplicationForm.php:121 + sample-php/src/editApplicationForm.php + None + 121 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 5e6fa805-758d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/editApplicationForm.php:121 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/editApplicationForm.php:126 + + + + <input type="hidden" id="currentPrimaryLanguageId" value="<?php echo $applicationData[0]['primarylanguageid']; ?>"> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/editApplicationForm.php:126 + sample-php/src/editApplicationForm.php + None + 126 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 616fa805-758d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/editApplicationForm.php:126 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/editApplicationForm.php:135 + + + + <input type="hidden" id="currentTypeId" value="<?php echo $applicationData[0]['typeid']; ?>"> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/editApplicationForm.php:135 + sample-php/src/editApplicationForm.php + None + 135 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 646fa805-758d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/editApplicationForm.php:135 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/editApplicationForm.php:141 + + + + <input type="hidden" id="currentBusinessUnitId" value="<?php echo $applicationData[0]['businessunitid']; ?>"> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/editApplicationForm.php:141 + sample-php/src/editApplicationForm.php + None + 141 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 676fa805-758d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/editApplicationForm.php:141 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/editApplicationForm.php:146 + + + + <input type="hidden" id="currentExposureId" value="<?php echo $applicationData[0]['exposureid']; ?>"> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/editApplicationForm.php:146 + sample-php/src/editApplicationForm.php + None + 146 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 6a6fa805-758d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/editApplicationForm.php:146 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/editApplicationForm.php:151 + + + + <input type="text" class="form-control" id="numUsers" name="numUsers" value="<?php echo $applicationData[0]['numusers']; ?>"> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/editApplicationForm.php:151 + sample-php/src/editApplicationForm.php + None + 151 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 6d6fa805-758d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/editApplicationForm.php:151 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/editApplicationForm.php:155 + + + + <input type="hidden" id="currentDataClassificationId" value="<?php echo $applicationData[0]['dataclassificationid']; ?>"> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/editApplicationForm.php:155 + sample-php/src/editApplicationForm.php + None + 155 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 706fa805-758d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/editApplicationForm.php:155 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/editApplicationForm.php:160 + + + + <input type="hidden" id="currentDeploymentEnvironmentId" value="<?php echo $applicationData[0]['deploymentenvironmentid']; ?>"> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/editApplicationForm.php:160 + sample-php/src/editApplicationForm.php + None + 160 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 736fa805-758d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/editApplicationForm.php:160 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/editApplicationForm.php:165 + + + + <input type="text" class="form-control" id="deploymentEnvironmentUrl" name="deploymentEnvironmentUrl" value="<?php echo $applicationData[0]['deploymentenvironmenturl']; ?>"> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/editApplicationForm.php:165 + sample-php/src/editApplicationForm.php + None + 165 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 766fa805-758d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/editApplicationForm.php:165 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/editApplicationForm.php:169 + + + + <input type="hidden" id="currentRiskLevelId" value="<?php echo $applicationData[0]['risklevelid']; ?>"> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/editApplicationForm.php:169 + sample-php/src/editApplicationForm.php + None + 169 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 796fa805-758d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/editApplicationForm.php:169 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/editApplicationForm.php:174 + + + + <input type="text" class="form-control" id="regulations" name="regulations" value="<?php echo $applicationData[0]['regulations']; ?>"> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/editApplicationForm.php:174 + sample-php/src/editApplicationForm.php + None + 174 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 7c6fa805-758d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/editApplicationForm.php:174 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/editApplicationForm.php:178 + + + + <input type="text" class="form-control" id="chatChannel" name="chatChannel" value="<?php echo $applicationData[0]['chatchannel']; ?>"> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/editApplicationForm.php:178 + sample-php/src/editApplicationForm.php + None + 178 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 7f6fa805-758d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/editApplicationForm.php:178 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/editApplicationForm.php:182 + + + + <input type="text" class="form-control" id="agileScrumBoardUrl" name="agileScrumBoardUrl" value="<?php echo $applicationData[0]['agilescrumboardurl']; ?>"> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/editApplicationForm.php:182 + sample-php/src/editApplicationForm.php + None + 182 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 826fa805-758d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/editApplicationForm.php:182 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/editApplicationForm.php:186 + + + + <input type="text" class="form-control" id="buildServerUrl" name="buildServerUrl" value="<?php echo $applicationData[0]['buildserverurl']; ?>"> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/editApplicationForm.php:186 + sample-php/src/editApplicationForm.php + None + 186 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 856fa805-758d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/editApplicationForm.php:186 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/editApplicationForm.php:190 + + + + <input type="text" class="form-control" id="age" name="age" value="<?php echo $applicationData[0]['age']; ?>"> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/editApplicationForm.php:190 + sample-php/src/editApplicationForm.php + None + 190 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 886fa805-758d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/editApplicationForm.php:190 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/editApplicationForm.php:194 + + + + <input type="hidden" id="currentLifecycleStageId" value="<?php echo $applicationData[0]['lifecyclestageid']; ?>"> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/editApplicationForm.php:194 + sample-php/src/editApplicationForm.php + None + 194 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 8b6fa805-758d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/editApplicationForm.php:194 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential user controlled data within PHP converted to HTML + sample-php/src/editApplicationForm.php:197 + + + + <input type="hidden" id="appId" name="appId" value="<?php echo $appId; ?>"> + + + + + + + + + Potential user controlled data within PHP converted to HTML + High + + Potential user controlled data within PHP converted to HTML + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/editApplicationForm.php:197 + sample-php/src/editApplicationForm.php + None + 197 + 0 + 0b8355ff-748d-ef11-8473-000d3a0fc910 + 8e6fa805-758d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/editApplicationForm.php:197 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential XSS vulnerability detected in jQuery.append() method + sample-php/src/manageApplicationIntegrations.php:220 + + + + $("#accessibility").append('<option value="'+Accessibility[i].guid+'">'+Accessibility[i].name+'</option>') + + + + + + + + + Potential XSS vulnerability detected in jQuery.append() method + High + + Potential XSS vulnerability detected in jQuery.append() method + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/manageApplicationIntegrations.php:220 + sample-php/src/manageApplicationIntegrations.php + None + 220 + 0 + 0e8355ff-748d-ef11-8473-000d3a0fc910 + 916fa805-758d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/manageApplicationIntegrations.php:220 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential XSS vulnerability detected in jQuery.append() method + sample-php/src/manageApplicationIntegrations.php:224 + + + + $("#businessRisk").append('<option value="'+BusinessRisk[i].guid+'">'+BusinessRisk[i].name+'</option>') + + + + + + + + + Potential XSS vulnerability detected in jQuery.append() method + High + + Potential XSS vulnerability detected in jQuery.append() method + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/manageApplicationIntegrations.php:224 + sample-php/src/manageApplicationIntegrations.php + None + 224 + 0 + 0e8355ff-748d-ef11-8473-000d3a0fc910 + 946fa805-758d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/manageApplicationIntegrations.php:224 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential XSS vulnerability detected in jQuery.append() method + sample-php/src/manageApplicationIntegrations.php:227 + + + + $("#devPhase").append('<option value="'+DevPhase[i].guid+'">'+DevPhase[i].name+'</option>') + + + + + + + + + Potential XSS vulnerability detected in jQuery.append() method + High + + Potential XSS vulnerability detected in jQuery.append() method + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/manageApplicationIntegrations.php:227 + sample-php/src/manageApplicationIntegrations.php + None + 227 + 0 + 0e8355ff-748d-ef11-8473-000d3a0fc910 + 976fa805-758d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/manageApplicationIntegrations.php:227 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential XSS vulnerability detected in jQuery.append() method + sample-php/src/manageApplicationIntegrations.php:230 + + + + $("#devStrategy").append('<option value="'+DevStrategy[i].guid+'">'+DevStrategy[i].name+'</option>') + + + + + + + + + Potential XSS vulnerability detected in jQuery.append() method + High + + Potential XSS vulnerability detected in jQuery.append() method + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/manageApplicationIntegrations.php:230 + sample-php/src/manageApplicationIntegrations.php + None + 230 + 0 + 0e8355ff-748d-ef11-8473-000d3a0fc910 + 9a6fa805-758d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/manageApplicationIntegrations.php:230 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting + + 1 + + + + Potential XSS vulnerability detected in jQuery.append() method + sample-php/src/manageApplicationIntegrations.php:233 + + + + $("#issueTemplate").append('<option value="'+issueTemplates[i].id+'">'+issueTemplates[i].name+'</option>') + + + + + + + + + Potential XSS vulnerability detected in jQuery.append() method + High + + Potential XSS vulnerability detected in jQuery.append() method + + + + CrossSiteScripting + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + clientCodeExecution + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Cross-Site Scripting + sample-php/src/manageApplicationIntegrations.php:233 + sample-php/src/manageApplicationIntegrations.php + None + 233 + 0 + 0e8355ff-748d-ef11-8473-000d3a0fc910 + 9d6fa805-758d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting + Location: → sample-php/src/manageApplicationIntegrations.php:233 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + Injection + + 1 + + + + The file_uploads directive is enabled + sample-php/config/php.ini:825 + + + + file_uploads = On + + + + + + + + + The file_uploads directive is enabled + High + + The file_uploads directive is enabled + + + + Injection + + + + + High + 3 + SAST + + 74 + + + catAbuseOfFunctionality + + + maliciousContent + + + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Injection + sample-php/config/php.ini:825 + sample-php/config/php.ini + None + 825 + 0 + 088355ff-748d-ef11-8473-000d3a0fc910 + 2f8355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → Injection + Location: → sample-php/config/php.ini:825 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + Injection + + 1 + + + + The allow_url_fopen directive is enabled + sample-php/config/php.ini:845 + + + + allow_url_fopen = On + + + + + + + + + The allow_url_fopen directive is enabled + High + + The allow_url_fopen directive is enabled + + + + Injection + + + + + High + 3 + SAST + + 74 + + + catAbuseOfFunctionality + + + maliciousContent + + + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Injection + sample-php/config/php.ini:845 + sample-php/config/php.ini + None + 845 + 0 + 048355ff-748d-ef11-8473-000d3a0fc910 + 328355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → Injection + Location: → sample-php/config/php.ini:845 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + ErrorHandling.Missing + + 1 + + + + The display_errors directive has been enabled + sample-php/config/php.ini:482 + + + + display_errors = On + + + + + + + + + The display_errors directive has been enabled + High + + The display_errors directive has been enabled + + + + ErrorHandling.Missing + + + + + High + 3 + SAST + + 544 + + + catAbuseOfFunctionality + + + WB_maskAnomalies + + + errorMessagesReturned + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Missing Standardized Error Handling Mechanism + sample-php/config/php.ini:482 + sample-php/config/php.ini + None + 482 + 0 + 0c8355ff-748d-ef11-8473-000d3a0fc910 + 298355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → ErrorHandling.Missing + Location: → sample-php/config/php.ini:482 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + PathTraversal + + 1 + + + + Potential path traversal through variable argument + sample-php/src/db.php:42 + + + + file_get_contents($dbPassFile) + + + + + + + + + Potential path traversal through variable argument + High + + Potential path traversal through variable argument + + + + PathTraversal + + + + + High + 3 + SAST + + 73 + + + catPathTraversal + + + dataResourceDownload + WB_anyFileAccess + + + dotDotNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + PathTraversal + sample-php/src/db.php:42 + sample-php/src/db.php + None + 42 + 0 + 028355ff-748d-ef11-8473-000d3a0fc910 + 4c6fa805-758d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → PathTraversal + Location: → sample-php/src/db.php:42 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + PathTraversal + + 1 + + + + Potential path traversal through variable argument + sample-php/src/secret.php:42 + + + + file_get_contents($secretKeyFile) + + + + + + + + + Potential path traversal through variable argument + High + + Potential path traversal through variable argument + + + + PathTraversal + + + + + High + 3 + SAST + + 73 + + + catPathTraversal + + + dataResourceDownload + WB_anyFileAccess + + + dotDotNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + PathTraversal + sample-php/src/secret.php:42 + sample-php/src/secret.php + None + 42 + 0 + 028355ff-748d-ef11-8473-000d3a0fc910 + a06fa805-758d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → PathTraversal + Location: → sample-php/src/secret.php:42 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + PathTraversal + + 1 + + + + Potential path traversal through variable argument + sample-php/src/secret.php:63 + + + + file_get_contents($ivSeedFile) + + + + + + + + + Potential path traversal through variable argument + High + + Potential path traversal through variable argument + + + + PathTraversal + + + + + High + 3 + SAST + + 73 + + + catPathTraversal + + + dataResourceDownload + WB_anyFileAccess + + + dotDotNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + PathTraversal + sample-php/src/secret.php:63 + sample-php/src/secret.php + None + 63 + 0 + 028355ff-748d-ef11-8473-000d3a0fc910 + a36fa805-758d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → PathTraversal + Location: → sample-php/src/secret.php:63 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + PrivilegeEscalation + + 1 + + + + No non-root USER specified in Dockerfile configuration + sample-php/Dockerfile:1 + + + + FROM php:7.4-apache + + + + + + + + + No non-root USER specified in Dockerfile configuration + High + + No non-root USER specified in Dockerfile configuration + + + + PrivilegeEscalation + + + + + High + 3 + SAST + + 266 + + + catInsufficientAuthentication + + + privilegeEscalation + + + insecureWebAppConfiguration + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + PrivilegeEscalation + sample-php/Dockerfile:1 + sample-php/Dockerfile + None + 1 + 0 + 098355ff-748d-ef11-8473-000d3a0fc910 + 1a8355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → PrivilegeEscalation + Location: → sample-php/Dockerfile:1 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting.Reflected + + 1 + + + + JQuery Potentially Setting An Attribute With Tainted Data + sample-php/src/adminApplicationOwners.php:96 + + + + ).attr("href", url) + + + + + + + + + JQuery Potentially Setting An Attribute With Tainted Data + High + + JQuery Potentially Setting An Attribute With Tainted Data + + + + CrossSiteScripting.Reflected + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + userImpersonation + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Reflected Cross Site Scripting + sample-php/src/adminApplicationOwners.php:96 + sample-php/src/adminApplicationOwners.php + None + 96 + 0 + 038355ff-748d-ef11-8473-000d3a0fc910 + 4d8355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting.Reflected + Location: → sample-php/src/adminApplicationOwners.php:96 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting.Reflected + + 1 + + + + JQuery Potentially Setting An Attribute With Tainted Data + sample-php/src/adminApplicationTypes.php:96 + + + + ).attr("href", url) + + + + + + + + + JQuery Potentially Setting An Attribute With Tainted Data + High + + JQuery Potentially Setting An Attribute With Tainted Data + + + + CrossSiteScripting.Reflected + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + userImpersonation + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Reflected Cross Site Scripting + sample-php/src/adminApplicationTypes.php:96 + sample-php/src/adminApplicationTypes.php + None + 96 + 0 + 038355ff-748d-ef11-8473-000d3a0fc910 + 508355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting.Reflected + Location: → sample-php/src/adminApplicationTypes.php:96 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting.Reflected + + 1 + + + + JQuery Potentially Setting An Attribute With Tainted Data + sample-php/src/adminBusinessUnits.php:96 + + + + ).attr("href", url) + + + + + + + + + JQuery Potentially Setting An Attribute With Tainted Data + High + + JQuery Potentially Setting An Attribute With Tainted Data + + + + CrossSiteScripting.Reflected + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + userImpersonation + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Reflected Cross Site Scripting + sample-php/src/adminBusinessUnits.php:96 + sample-php/src/adminBusinessUnits.php + None + 96 + 0 + 038355ff-748d-ef11-8473-000d3a0fc910 + 538355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting.Reflected + Location: → sample-php/src/adminBusinessUnits.php:96 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting.Reflected + + 1 + + + + JQuery Potentially Setting An Attribute With Tainted Data + sample-php/src/adminCodeLanguages.php:96 + + + + ).attr("href", url) + + + + + + + + + JQuery Potentially Setting An Attribute With Tainted Data + High + + JQuery Potentially Setting An Attribute With Tainted Data + + + + CrossSiteScripting.Reflected + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + userImpersonation + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Reflected Cross Site Scripting + sample-php/src/adminCodeLanguages.php:96 + sample-php/src/adminCodeLanguages.php + None + 96 + 0 + 038355ff-748d-ef11-8473-000d3a0fc910 + 568355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting.Reflected + Location: → sample-php/src/adminCodeLanguages.php:96 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting.Reflected + + 1 + + + + JQuery Potentially Setting An Attribute With Tainted Data + sample-php/src/adminDataClassifications.php:96 + + + + ).attr("href", url) + + + + + + + + + JQuery Potentially Setting An Attribute With Tainted Data + High + + JQuery Potentially Setting An Attribute With Tainted Data + + + + CrossSiteScripting.Reflected + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + userImpersonation + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Reflected Cross Site Scripting + sample-php/src/adminDataClassifications.php:96 + sample-php/src/adminDataClassifications.php + None + 96 + 0 + 038355ff-748d-ef11-8473-000d3a0fc910 + 598355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting.Reflected + Location: → sample-php/src/adminDataClassifications.php:96 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting.Reflected + + 1 + + + + JQuery Potentially Setting An Attribute With Tainted Data + sample-php/src/adminDeploymentEnvironments.php:96 + + + + ).attr("href", url) + + + + + + + + + JQuery Potentially Setting An Attribute With Tainted Data + High + + JQuery Potentially Setting An Attribute With Tainted Data + + + + CrossSiteScripting.Reflected + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + userImpersonation + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Reflected Cross Site Scripting + sample-php/src/adminDeploymentEnvironments.php:96 + sample-php/src/adminDeploymentEnvironments.php + None + 96 + 0 + 038355ff-748d-ef11-8473-000d3a0fc910 + 5c8355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting.Reflected + Location: → sample-php/src/adminDeploymentEnvironments.php:96 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting.Reflected + + 1 + + + + JQuery Potentially Setting An Attribute With Tainted Data + sample-php/src/adminExposureLevels.php:96 + + + + ).attr("href", url) + + + + + + + + + JQuery Potentially Setting An Attribute With Tainted Data + High + + JQuery Potentially Setting An Attribute With Tainted Data + + + + CrossSiteScripting.Reflected + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + userImpersonation + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Reflected Cross Site Scripting + sample-php/src/adminExposureLevels.php:96 + sample-php/src/adminExposureLevels.php + None + 96 + 0 + 038355ff-748d-ef11-8473-000d3a0fc910 + a28355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting.Reflected + Location: → sample-php/src/adminExposureLevels.php:96 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting.Reflected + + 1 + + + + JQuery Potentially Setting An Attribute With Tainted Data + sample-php/src/adminLifecycleStages.php:96 + + + + ).attr("href", url) + + + + + + + + + JQuery Potentially Setting An Attribute With Tainted Data + High + + JQuery Potentially Setting An Attribute With Tainted Data + + + + CrossSiteScripting.Reflected + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + userImpersonation + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Reflected Cross Site Scripting + sample-php/src/adminLifecycleStages.php:96 + sample-php/src/adminLifecycleStages.php + None + 96 + 0 + 038355ff-748d-ef11-8473-000d3a0fc910 + a58355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting.Reflected + Location: → sample-php/src/adminLifecycleStages.php:96 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting.Reflected + + 1 + + + + JQuery Potentially Setting An Attribute With Tainted Data + sample-php/src/adminRiskLevels.php:96 + + + + ).attr("href", url) + + + + + + + + + JQuery Potentially Setting An Attribute With Tainted Data + High + + JQuery Potentially Setting An Attribute With Tainted Data + + + + CrossSiteScripting.Reflected + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + userImpersonation + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Reflected Cross Site Scripting + sample-php/src/adminRiskLevels.php:96 + sample-php/src/adminRiskLevels.php + None + 96 + 0 + 038355ff-748d-ef11-8473-000d3a0fc910 + a88355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting.Reflected + Location: → sample-php/src/adminRiskLevels.php:96 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting.Reflected + + 1 + + + + JQuery Potentially Setting An Attribute With Tainted Data + sample-php/src/adminUsers.php:98 + + + + ).attr("href", url) + + + + + + + + + JQuery Potentially Setting An Attribute With Tainted Data + High + + JQuery Potentially Setting An Attribute With Tainted Data + + + + CrossSiteScripting.Reflected + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + userImpersonation + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Reflected Cross Site Scripting + sample-php/src/adminUsers.php:98 + sample-php/src/adminUsers.php + None + 98 + 0 + 038355ff-748d-ef11-8473-000d3a0fc910 + ae8355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting.Reflected + Location: → sample-php/src/adminUsers.php:98 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting.Reflected + + 1 + + + + JQuery Potentially Setting An Attribute With Tainted Data + sample-php/src/applications.php:97 + + + + ).attr("href", url) + + + + + + + + + JQuery Potentially Setting An Attribute With Tainted Data + High + + JQuery Potentially Setting An Attribute With Tainted Data + + + + CrossSiteScripting.Reflected + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + userImpersonation + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Reflected Cross Site Scripting + sample-php/src/applications.php:97 + sample-php/src/applications.php + None + 97 + 0 + 038355ff-748d-ef11-8473-000d3a0fc910 + 436fa805-758d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting.Reflected + Location: → sample-php/src/applications.php:97 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting.Reflected + + 1 + + + + JQuery Potentially Setting An Attribute With Tainted Data + sample-php/src/applications.php:101 + + + + ).attr("href", url) + + + + + + + + + JQuery Potentially Setting An Attribute With Tainted Data + High + + JQuery Potentially Setting An Attribute With Tainted Data + + + + CrossSiteScripting.Reflected + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + userImpersonation + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Reflected Cross Site Scripting + sample-php/src/applications.php:101 + sample-php/src/applications.php + None + 101 + 0 + 038355ff-748d-ef11-8473-000d3a0fc910 + 466fa805-758d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting.Reflected + Location: → sample-php/src/applications.php:101 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + CrossSiteScripting.Reflected + + 1 + + + + JQuery Potentially Setting An Attribute With Tainted Data + sample-php/src/applications.php:105 + + + + ).attr("href", url) + + + + + + + + + JQuery Potentially Setting An Attribute With Tainted Data + High + + JQuery Potentially Setting An Attribute With Tainted Data + + + + CrossSiteScripting.Reflected + + + + + High + 3 + SAST + + 79 + + + catCrossSiteScripting + + + userImpersonation + + + hazardousCharactersNotSanitized + incorrectDataType + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Reflected Cross Site Scripting + sample-php/src/applications.php:105 + sample-php/src/applications.php + None + 105 + 0 + 038355ff-748d-ef11-8473-000d3a0fc910 + 496fa805-758d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → CrossSiteScripting.Reflected + Location: → sample-php/src/applications.php:105 + Severity: → High + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + AppDOS + + 1 + + + + The number of concurrent database connections (persistent or otherwise) has been set too high + sample-php/config/php.ini:1108 + + + + odbc.max_persistent = -1 + + + + + + + + + The number of concurrent database connections (persistent or otherwise) has been set too high + Medium + + The number of concurrent database connections (persistent or otherwise) has been set too high + + + + AppDOS + + + + + Medium + 2 + SAST + + 400 + + + catDenialOfService + + + denialOfService + + + boundsCheckingOnParamValues + inputLengthNotChecked + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + AppDOS + sample-php/config/php.ini:1108 + sample-php/config/php.ini + None + 1108 + 0 + 118355ff-748d-ef11-8473-000d3a0fc910 + 358355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → AppDOS + Location: → sample-php/config/php.ini:1108 + Severity: → Medium + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + AppDOS + + 1 + + + + The number of concurrent database connections (persistent or otherwise) has been set too high + sample-php/config/php.ini:1112 + + + + odbc.max_links = -1 + + + + + + + + + The number of concurrent database connections (persistent or otherwise) has been set too high + Medium + + The number of concurrent database connections (persistent or otherwise) has been set too high + + + + AppDOS + + + + + Medium + 2 + SAST + + 400 + + + catDenialOfService + + + denialOfService + + + boundsCheckingOnParamValues + inputLengthNotChecked + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + AppDOS + sample-php/config/php.ini:1112 + sample-php/config/php.ini + None + 1112 + 0 + 118355ff-748d-ef11-8473-000d3a0fc910 + 388355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → AppDOS + Location: → sample-php/config/php.ini:1112 + Severity: → Medium + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + AppDOS + + 1 + + + + The number of concurrent database connections (persistent or otherwise) has been set too high + sample-php/config/php.ini:1129 + + + + mysqli.max_persistent = -1 + + + + + + + + + The number of concurrent database connections (persistent or otherwise) has been set too high + Medium + + The number of concurrent database connections (persistent or otherwise) has been set too high + + + + AppDOS + + + + + Medium + 2 + SAST + + 400 + + + catDenialOfService + + + denialOfService + + + boundsCheckingOnParamValues + inputLengthNotChecked + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + AppDOS + sample-php/config/php.ini:1129 + sample-php/config/php.ini + None + 1129 + 0 + 118355ff-748d-ef11-8473-000d3a0fc910 + 3b8355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → AppDOS + Location: → sample-php/config/php.ini:1129 + Severity: → Medium + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + AppDOS + + 1 + + + + The number of concurrent database connections (persistent or otherwise) has been set too high + sample-php/config/php.ini:1141 + + + + mysqli.max_links = -1 + + + + + + + + + The number of concurrent database connections (persistent or otherwise) has been set too high + Medium + + The number of concurrent database connections (persistent or otherwise) has been set too high + + + + AppDOS + + + + + Medium + 2 + SAST + + 400 + + + catDenialOfService + + + denialOfService + + + boundsCheckingOnParamValues + inputLengthNotChecked + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + AppDOS + sample-php/config/php.ini:1141 + sample-php/config/php.ini + None + 1141 + 0 + 118355ff-748d-ef11-8473-000d3a0fc910 + 3e8355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → AppDOS + Location: → sample-php/config/php.ini:1141 + Severity: → Medium + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + AccessControl.Bypass + + 1 + + + + The open_basedir directive has been set to an insecure value + sample-php/config/php.ini:302 + + + + open_basedir, if set, limits all file operations to the defined directory + + + + + + + + + The open_basedir directive has been set to an insecure value + Medium + + The open_basedir directive has been set to an insecure value + + + + AccessControl.Bypass + + + + + Medium + 2 + SAST + + 288 + + + catInsufficientAuthorization + + + privilegeEscalation + + + insecureWebAppConfiguration + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Authentication Bypass + sample-php/config/php.ini:302 + sample-php/config/php.ini + None + 302 + 0 + 0a8355ff-748d-ef11-8473-000d3a0fc910 + 1d8355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → AccessControl.Bypass + Location: → sample-php/config/php.ini:302 + Severity: → Medium + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + AccessControl.Bypass + + 1 + + + + The open_basedir directive has been set to an insecure value + sample-php/config/php.ini:338 + + + + open_basedir is set, the cache is disabled + + + + + + + + + The open_basedir directive has been set to an insecure value + Medium + + The open_basedir directive has been set to an insecure value + + + + AccessControl.Bypass + + + + + Medium + 2 + SAST + + 288 + + + catInsufficientAuthorization + + + privilegeEscalation + + + insecureWebAppConfiguration + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Authentication Bypass + sample-php/config/php.ini:338 + sample-php/config/php.ini + None + 338 + 0 + 0a8355ff-748d-ef11-8473-000d3a0fc910 + 238355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → AccessControl.Bypass + Location: → sample-php/config/php.ini:338 + Severity: → Medium + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + Configuration + + 1 + + + + Insecure use of Base image version detected in Dockerfile + sample-php/Dockerfile:1 + + + + FROM php:7.4-apache + + + + + + + + + Insecure use of Base image version detected in Dockerfile + Medium + + Insecure use of Base image version detected in Dockerfile + + + + Configuration + + + + + Medium + 2 + SAST + + 16 + + + catApplicationMisconfiguration + + + sensitiveInformation + + + insecureWebAppConfiguration + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Configuration + sample-php/Dockerfile:1 + sample-php/Dockerfile + None + 1 + 0 + 058355ff-748d-ef11-8473-000d3a0fc910 + 178355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → Configuration + Location: → sample-php/Dockerfile:1 + Severity: → Medium + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + Logging + + 1 + + + + The ignore_repeated_errors directive has been disabled + sample-php/config/php.ini:511 + + + + ignore_repeated_errors = Off + + + + + + + + + The ignore_repeated_errors directive has been disabled + Medium + + The ignore_repeated_errors directive has been disabled + + + + Logging + + + + + Medium + 2 + SAST + + 778 + + + catInsufficientAuthentication + + + WB_maskAnomalies + + + boundsCheckingOnParamValues + incorrectDataType + inputLengthNotChecked + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Logging + sample-php/config/php.ini:511 + sample-php/config/php.ini + None + 511 + 0 + 0d8355ff-748d-ef11-8473-000d3a0fc910 + 2c8355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → Logging + Location: → sample-php/config/php.ini:511 + Severity: → Medium + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + Privacy + + 1 + + + + The expose_php directive is enabled + sample-php/config/php.ini:379 + + + + expose_php = On + + + + + + + + + The expose_php directive is enabled + Medium + + The expose_php directive is enabled + + + + Privacy + + + + + Medium + 2 + SAST + + 359 + + + catInformationLeakage + + + sensitiveInformation + + + WB_InformationLeakage + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Privacy + sample-php/config/php.ini:379 + sample-php/config/php.ini + None + 379 + 0 + 108355ff-748d-ef11-8473-000d3a0fc910 + 268355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → Privacy + Location: → sample-php/config/php.ini:379 + Severity: → Medium + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + SessionManagement.Cookies + + 1 + + + + The session.cookie_lifetime directive is set to zero + sample-php/config/php.ini:1365 + + + + session.cookie_lifetime = 0 + + + + + + + + + The session.cookie_lifetime directive is set to zero + Medium + + The session.cookie_lifetime directive is set to zero + + + + SessionManagement.Cookies + + + + + Medium + 2 + SAST + + 565 + + + catCredentialSessionPrediction + + + sessionCookieNotRAM + unsecureCookieInSSL + userImpersonation + + + insecureWebAppConfiguration + nonSecureCookiesSentOverSSL + sessionCookieNotRAM + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + SessionManagement.Cookies + sample-php/config/php.ini:1365 + sample-php/config/php.ini + None + 1365 + 0 + 078355ff-748d-ef11-8473-000d3a0fc910 + 478355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → SessionManagement.Cookies + Location: → sample-php/config/php.ini:1365 + Severity: → Medium + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + Communications.Unencrypted + + 1 + + + + Open communications scheme detected + sample-php/src/about.php:44 + + + + "http://wenzhixin.net.cn/" + + + + + + + + + Open communications scheme detected + Low + + Open communications scheme detected + + + + Communications.Unencrypted + + + + + Low + 1 + SAST + + 311 + + + catInformationLeakage + + + sensitiveInformation + sensitiveNotOverSSL + + + GETParamOverSSL + insecureWebAppConfiguration + sensitiveDataNotSSL + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Missing Encryption of Sensitive Data + sample-php/src/about.php:44 + sample-php/src/about.php + None + 44 + 0 + 128355ff-748d-ef11-8473-000d3a0fc910 + 4a8355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → Communications.Unencrypted + Location: → sample-php/src/about.php:44 + Severity: → Low + Scanner: → AppScan Static Analyzer +
+
+
+
+ + + Communications.Unencrypted + + 1 + + + + Open communications scheme detected + sample-php/src/adminUsers.php:35 + + + + "http://<?php echo $_SERVER[' + + + + + + + + + Open communications scheme detected + Low + + Open communications scheme detected + + + + Communications.Unencrypted + + + + + Low + 1 + SAST + + 311 + + + catInformationLeakage + + + sensitiveInformation + sensitiveNotOverSSL + + + GETParamOverSSL + insecureWebAppConfiguration + sensitiveDataNotSSL + + Open + Friday, October 18, 2024 + Friday, October 18, 2024 + Missing Encryption of Sensitive Data + sample-php/src/adminUsers.php:35 + sample-php/src/adminUsers.php + None + 35 + 0 + 128355ff-748d-ef11-8473-000d3a0fc910 + ab8355ff-748d-ef11-8473-000d3a0fc910 + + + 10/18/2024 17:18:37 +
+ IssueTypeName: → Communications.Unencrypted + Location: → sample-php/src/adminUsers.php:35 + Severity: → Low + Scanner: → AppScan Static Analyzer +
+
+
+
+
+ + + 2024-10-18 17:18:37Z + The open_basedir directive has been set to an insecure value + sample-php/config/php.ini + AccessControl.Bypass + Authentication Bypass + + + 2024-10-18 17:18:37Z + Verify Session Data Stored in A Secure Directory + sample-php/config/php.ini + AccessControl.Bypass + Authentication Bypass + + + 2024-10-18 17:18:37Z + The number of concurrent database connections (persistent or otherwise) has been set too high + sample-php/config/php.ini + AppDOS + AppDOS + + + 2024-10-18 17:18:37Z + Open communications scheme detected + sample-php/src/about.php + Communications.Unencrypted + Missing Encryption of Sensitive Data + + + 2024-10-18 17:18:37Z + Insecure use of Base image version detected in Dockerfile + sample-php/Dockerfile + Configuration + Configuration + + + 2024-10-18 17:18:37Z + The disable_functions parameter is empty + sample-php/config/php.ini + Configuration + Configuration + + + 2024-10-18 17:18:37Z + Potential user controlled data within PHP converted to HTML + sample-php/src/editApplicationForm.php + CrossSiteScripting + Cross-Site Scripting + + + 2024-10-18 17:18:37Z + Potential XSS vulnerability detected in jQuery.append() method + sample-php/src/manageApplicationIntegrations.php + CrossSiteScripting + Cross-Site Scripting + + + 2024-10-18 17:18:37Z + JQuery Potentially Setting An Attribute With Tainted Data + sample-php/src/applications.php + CrossSiteScripting.Reflected + Reflected Cross Site Scripting + + + 2024-10-18 17:18:37Z + The display_errors directive has been enabled + sample-php/config/php.ini + ErrorHandling.Missing + Missing Standardized Error Handling Mechanism + + + 2024-10-18 17:18:37Z + The allow_url_fopen directive is enabled + sample-php/config/php.ini + Injection + Injection + + + 2024-10-18 17:18:37Z + The file_uploads directive is enabled + sample-php/config/php.ini + Injection + Injection + + + 2024-10-18 17:18:37Z + The ignore_repeated_errors directive has been disabled + sample-php/config/php.ini + Logging + Logging + + + 2024-10-18 17:18:37Z + Potential path traversal through variable argument + sample-php/src/db.php + PathTraversal + PathTraversal + + + 2024-10-18 17:18:37Z + The expose_php directive is enabled + sample-php/config/php.ini + Privacy + Privacy + + + 2024-10-18 17:18:37Z + No non-root USER specified in Dockerfile configuration + sample-php/Dockerfile + PrivilegeEscalation + PrivilegeEscalation + + + 2024-10-18 17:18:37Z + The session.cookie_lifetime directive is set to zero + sample-php/config/php.ini + SessionManagement.Cookies + SessionManagement.Cookies + + +
diff --git a/unittests/scans/hcl_asoc_sast/no_issues.xml b/unittests/scans/hcl_asoc_sast/no_issues.xml new file mode 100644 index 00000000000..57801a47ddf --- /dev/null +++ b/unittests/scans/hcl_asoc_sast/no_issues.xml @@ -0,0 +1,630 @@ + + + + added + added to request: + Additional Data: + Advisories + Affected Products: + Vulnerable URLs + Concurrent Logins: + Application Data + Application Server: + AppScan Severity + Harmless + This request/response contains binary content, which is not included in generated reports. + Body + Failed Requests + Cause + Causes + Causes: + Id + Name + The following weak cipher suites are supported by the server: + Code + Comment + Comments + Cookie + Cookies + CVE: + CWE: + Detailed Summary + A detailed listing of the scan results, including all issue types found, all recommended remediation tasks, all vulnerable URLs, etc. This section is intended to provide a more detailed understanding of the security status of the application, as well as assist in scoping and prioritizing the work required to remedy issues found. + Tracked or session ID cookies: + Tracked or session ID parameters: + Difference: + Document Map + This report consists of the following sections: + Domain + .Net + JavaScript execution: + Entity + Entity: + Example + Summary + This section provides a high level view of the information gathered during the scan, using graphs or comparative numbers. It is intended to provide a general understanding of the security status of the application. + Expires + Filtered URLs + First Set + Fix + Fix: + Fix Recommendations + General + General Information + Header + High + High severity issues: + Host: + Index + Informational + Informational severity issues: + Introduction + Introduction and Objectives + General information about the scan, including the project name, purpose of the scan, etc. + Issue + Issues Sorted by Issue Type + Issues Sorted by URL + Issues detected across + Issue Type + Issue Types + Issue Types + J2EE + JavaScripts + Login Settings + Low + Low severity issues: + Malicious + manipulated from: + Medium + Medium severity issues: + Method + Name + New URLs + Report Produced on Tree node: + this is now the same as the one below - should be removed + Number of Issues + Objectives + AppScan performs real-time security assessments on web applications. These assessments aim to uncover any security issues in the application, explain the impact and risks associated with these issues, and provide guidance in planning and prioritizing remediation. The objective of this assignment was to perform controlled attack and penetration activities to assess the overall level of security of the application. + of + Operating system: + Original Request + Original Requests and Responses: + Original Response + Parameter + Parameters + Path + PHP + Query + Raw Test Response: + Reason + Reasoning: + Login sequence: + References: + Regulations + Remaining URLs + Remediation Task + removed + removed from request: + Removed URLs + Comprehensive Security Report + AppScan Web Application Security Report + Requested URL + Request + Response + Risk + Risk: + Rules: + Scan started: + Scan file name: + Sections + sections of the regulation: + Violated Section + GDPR Articles + Section Violation by Issue + Secure + Detailed Security Issues by Sections + Security Risks + Security Risks: + Login method: + In-session detection: + In-session pattern: + Severity + Severity: + Unique issues detected across + SSL Version + Table of Contents + Test Description: + Test Login + Test policy: + Test Request: + Test Requests and Responses: + Test Response (first) + Test Response + Test Response (last) + Test Response (next-to-last) + Technical Description: + Test Type: + Threat + WASC Threat Classification + Threat Classification: + TOC + to: + Total security issues included in the report: + Total security issues: + total security issues + Type + Unwanted + URL + URL: + Valid Login + Value + Variant + Visited URLs + Vulnerable URLs + Web server: + Issue Types that this task fixes + Simulation of the pop-up that appears when this page is opened in a browser + Location + Intent Action: + Intent Class: + Intent Data: + Intent Extra: + Intent Package: + Payload + Issues: + Method Signature: + Issue Validation Parameters: + Thread: + Timestamp: + Trace: + Issue Information + This issue was detected by AppScan's Mobile Analyzer. + Call Stack: + Header: + XML: + File Name: + File Permission: + Synopsis: + Dump: + Manifest: + Request: + Method Information + Signature: + File: + Name: + Permissions: + Class + Function + Line + Created by: + Summary of security issues + Issues + Go to Table of Contents + Issue Types: + Application Version: + Scan Name: + First Variant: + Variants Found: + OWASP: + X-Force: + (Only the first one is displayed) + No security issues discovered in the scan + Scan status: + Note that the scan on which this report is based was not completed. + Success + Refer to the site for more details. + Sink + Source + OWASP Top 10 + File Path: + Reference: + Free Plan + Please Note: + This summary report was created with the Application Security Analyzer Free Plan. Once you purchase the full service you will have access to a complete report with detailed descriptions of the issues found and how to remediate them. + Activities: + Coverage + Activities + This report includes important security information about your mobile application. + Fix Recommendations: + Component + Glossary + Privacy: + Symbols Found: + Mobile Application Report + Class Signature: + Defining Class + Controllable Object Fields: + Receivers: + Services: + Receivers + Services + Method Signature: + Issue Information: + Settings For Target: + Provider: + Sample Report + Login Mode: + Views: + Views + None + Automatic + Manual + Calling Line + Calling Method + Class + Classification + Critical + Date Created + Discovery Method + Last Updated + Package + Scans: + Severity Value + Status + API + Element + Scheme + Sink: + Source: + Trace + Source File + Access Complexity + Access Vector + Authentication + Availability Impact + Confidentiality Impact + CVE + CVSS + Description + Exploitability + Integrity Impact + Summary + Activities that were tested for security vulnerabilities, as defined in the app's manifest. + Issue Types that ASoC has tested your application for. + Receivers that were tested for security vulnerabilities, as defined in the app's manifest. + Services that were tested for security vulnerabilities, as defined in the app's manifest. + Titles of Views encountered when crawling the app. + Leaked Information: + Password: + User Name: + Mitigation: + Alternate Fix Suggestions + This method is a part of the application code and appears in each of the grouped issue's traces. You should begin investigating a possible fix in the implementation of the method. + This method is a third-party API, with a common caller in each of the grouped issue's traces. You should begin investigating a possible fix at the caller: + Replace/Repair Vulnerable OpenSource: + Please refer to the details of this issue for fix recommendations. + Business Impact: + Created: + Security Report for: + Regulation Report for: + Notes: + - Details + - Discussion + Contains: + {0} issues + (out of {0}) + - Audit Trail + Cause: + HCL Application Security on Cloud, Version + Directory: + Constant Value: + Found in: + Informational + Low + Medium + High + Critical + User Supplied Credit Card Number: + User Supplied Id: + User Supplied Input: + User Supplied Password: + User Supplied Phone Number: + User Supplied User Name: + - Fix Recommendation + Included for each issue separately. + Port: + Application Name: + Copyleft: + Copyright Risk: + Date: + Library Name: + License Name: + Open Source Report + Licenses + Linking: + Patent Risk: + Reference Type: + Reference URL: + Risk Level: + Libraries with high risk level: + Libraries with low risk level: + Libraries with medium risk level: + Libraries with unknown risk level: + Royalty Free: + Total Open Source Libraries: + AppScan on Cloud + Anyone who distributes a modification of the code or a product that is based on or contains part of the code may be required to make publicly available the source code for the product or modification, regardless of whether the code is dynamically or statically linked. (example: GPL). Attribution and/or license terms may be required. + Anyone who distributes a modification of the code or a product that is based on or contains part of the code may be required to make publicly available the source code for the product or modification, subject to an exception for software that dynamically links to the original code. These licenses include LGPL and GPL with Class Path Exception, as examples. Attribution and/or license terms may be required. + Anyone may use the code without restriction. + Anyone who develops a product that is based on or contains part of the code, or who modifies the code, may be required to make publicly available the source code for that product or modification if s/he (a) distributes the software or (b) enables others to use the software via hosted or web services. (example: Affero). Attribution and/or license terms may be required. + Anyone who develops a product that is based on or contains part of the code, or who modifies the code, may be required to make publicly available the source code for that product or modification if s/he (a) distributes the software or (b) enables others to use the software via hosted or web services. (example: Affero). Attribution and/or license terms may be required. + Anyone who distributes a modification of the code may be required to make the source code for the modification publicly available. Attribution and/or license terms may also be required. + Anyone who distributes the code must provide certain notices as described in the license. These generally require providing attributions and/or license terms with the software. + Specific identified patent risks + Royalty free and no identified patent risks + No patents granted + Royalty free unless litigated + Report created at: + Report for scan: + Open source library name + Risk level + Security Report + Open Source Libraries + Unknown + Reference + In this section you’ll find more details about the fields and their values. + Disabled + Enabled + None + Automatic + Prompt + Recorded login + Unknown + (Modified) + Any + Unknown + Sample Trace + License Type + Scan Security Report + This report lists all the open source libraries found in your scan, and their associated open source Risk Levels. + + Open Source Risk Levels are not the same as the Risk Levels in Security Reports, and not related to the vulnerabilities of specific issues. + You can see if any of the libraries have known vulnerabilities in Issue Management view. + Number Of Libraries + Report Date: + Scanned under Application: + Scan Start Date: + Total Open Source License Types: + Details + Threat Classification: + Fix Groups: + Implementation of {0} + Usage of {0} via {1} + Fix Group #{0}: {1} + This section groups {0} issues of type {1} with significant commonality in the their traces. + This section groups {0} issues with significant commonality in their traces. The following issue types are included: + This section groups {0} issues of type {1} with a common opensource file. + This section groups {0} issues with a common opensource file. The following issue types are included: + These issues are grouped together to try to help you find a common fix that resolves them all. + These method calls are also common to the traces of the issues in this group. They represent other possible These method calls are also common to the traces of the issues in this group. They represent other possible locations to investigate a fix. + All {0} issues in this report appear to be independent, lacking the commonality required in their traces to be grouped together. They all appear in this section. + This section lists the remaining {0} issues that could not be included in any other fix groups. + The following issue types are included: + Ungrouped + Fix Recommendation + Library Version: + API: + at line + Call + Caller: + Description: + Name: + Example Trace: + File + Lost Sink + Not a Validator + Sample Trace + Publish date: + Resolution: + Source and Sink + Tainted Arg + Taint Propagator + via + Virtual Lost Sink + Test Optimization: + Normal + Optimized + Issue ID: + Compliance Security Report + Undefined + Undefined + Title: + Report Date UTC: + Fix Group ID: + Method: + Query String: + URI: + Arguments: + Call Trace: + Object: + Return: + Stack: + Type: + By Fix Groups: + By Issue Types: + Fix-Groups + Library: + Location: + Status: + Common API Call: + Common Fix Point: + Common Open Source: + Common Fix Point: + OpenSource + API: + Location of fix: + Library name: + Location of fix: + Advisory: + Custom Advisory: + Hosts + Fast + Faster + Fastest + No Optimization + How to Fix: + Report Name: + Technology: + Scan Information + General Advisory: + Finding specific advisory: + Example: + Exploit Example: + (none) + Not applicable for this issue. + HTTP Only + JS Stack Trace + Same Site + False + True + (Mixed) + Articles + CWE + Exploit example + External references + Recommendations + Language: + How to Fix + See also issue-details 'Resolution' section below. + Mitigation + Important: + Note: The number of issues found exceeded the maximum that can be shown in a single set of results. +The scan results show {0} representitive issues. + Personal Scan + Personal Scans are deleted after {0} days, unless promoted to the application within that time. + Additional Information: + Fixed + In Progress + New + Noise + Open + Passed + Reopened + Definitive + Scan Coverage Findings + Suspect + Cipher Suites: + ID + Fix recommendation + Default (Production) + Default (Staging) + Default + Body + Cookie + Global + Header + Header Name + Link + Other + Page + Parameter + Parameter Name + Query + Role + Source Line + Unspecified + Critical + High + Low + Medium + Unspecified + Report for application: + This report lists all the open source libraries found in your application, and their associated open source Risk Levels. + License Details + Library Name + Version + Undefined + Critical severity issues: + Copyleft applies on modifications as well as own code that uses the open-source software. + Non-copyleft license. + Copyleft applies only to modifications. + Undefined + Dynamic linking will not infect the linking code. + The licensing of the linking code will remain unaffected. + Undefined + Linking will infect the code linking code. + Alpine + Arch Linux + Bower + Build Configuration File + Details available in CDNJS + Debian + .NET + Eclipse OSGI Bundle + Details available in GitHub repository + License information in host site + License File + Node package manager + NuGet Package + Other + POM file + Project Home Page + Python Package Index + Readme File + RPM + RubyGems + License assigned manually by a user in the organization + Undefined + High + Low + Medium + Undefined + Unknown + Royalty-free unless litigated. + No patents granted. + Royalty-free and no identified patent risks. + Undefined severity issues: + Last Found + CVSS Version + Total Items: + IAST call stack: + Undefined + - Comments + Method: + Both + Config + Hash + Dependency Root: + Source-file and Package-manager + Package-manager + Source-file + None + + + + + HCL + Application Security on Cloud + python-sample + Unspecified + Friday, October 18, 2024 + FullReport + 0 + False + 30 + 20000 + False + ASoC + + + 1 + 1 + 1 + 1 + 0 + 1 + 1 + 0 + 0 + 0 + 0 + 0 + 0 + + + + + + + + \ No newline at end of file diff --git a/unittests/scans/hcl_asoc_sast/one_issue.xml b/unittests/scans/hcl_asoc_sast/one_issue.xml new file mode 100644 index 00000000000..237341c42da --- /dev/null +++ b/unittests/scans/hcl_asoc_sast/one_issue.xml @@ -0,0 +1,704 @@ + + + + added + added to request: + Additional Data: + Advisories + Affected Products: + Vulnerable URLs + Concurrent Logins: + Application Data + Application Server: + AppScan Severity + Harmless + This request/response contains binary content, which is not included in generated reports. + Body + Failed Requests + Cause + Causes + Causes: + Id + Name + The following weak cipher suites are supported by the server: + Code + Comment + Comments + Cookie + Cookies + CVE: + CWE: + Detailed Summary + A detailed listing of the scan results, including all issue types found, all recommended remediation tasks, all vulnerable URLs, etc. This section is intended to provide a more detailed understanding of the security status of the application, as well as assist in scoping and prioritizing the work required to remedy issues found. + Tracked or session ID cookies: + Tracked or session ID parameters: + Difference: + Document Map + This report consists of the following sections: + Domain + .Net + JavaScript execution: + Entity + Entity: + Example + Summary + This section provides a high level view of the information gathered during the scan, using graphs or comparative numbers. It is intended to provide a general understanding of the security status of the application. + Expires + Filtered URLs + First Set + Fix + Fix: + Fix Recommendations + General + General Information + Header + High + High severity issues: + Host: + Index + Informational + Informational severity issues: + Introduction + Introduction and Objectives + General information about the scan, including the project name, purpose of the scan, etc. + Issue + Issues Sorted by Issue Type + Issues Sorted by URL + Issues detected across + Issue Type + Issue Types + Issue Types + J2EE + JavaScripts + Login Settings + Low + Low severity issues: + Malicious + manipulated from: + Medium + Medium severity issues: + Method + Name + New URLs + Report Produced on Tree node: + this is now the same as the one below - should be removed + Number of Issues + Objectives + AppScan performs real-time security assessments on web applications. These assessments aim to uncover any security issues in the application, explain the impact and risks associated with these issues, and provide guidance in planning and prioritizing remediation. The objective of this assignment was to perform controlled attack and penetration activities to assess the overall level of security of the application. + of + Operating system: + Original Request + Original Requests and Responses: + Original Response + Parameter + Parameters + Path + PHP + Query + Raw Test Response: + Reason + Reasoning: + Login sequence: + References: + Regulations + Remaining URLs + Remediation Task + removed + removed from request: + Removed URLs + Comprehensive Security Report + AppScan Web Application Security Report + Requested URL + Request + Response + Risk + Risk: + Rules: + Scan started: + Scan file name: + Sections + sections of the regulation: + Violated Section + GDPR Articles + Section Violation by Issue + Secure + Detailed Security Issues by Sections + Security Risks + Security Risks: + Login method: + In-session detection: + In-session pattern: + Severity + Severity: + Unique issues detected across + SSL Version + Table of Contents + Test Description: + Test Login + Test policy: + Test Request: + Test Requests and Responses: + Test Response (first) + Test Response + Test Response (last) + Test Response (next-to-last) + Technical Description: + Test Type: + Threat + WASC Threat Classification + Threat Classification: + TOC + to: + Total security issues included in the report: + Total security issues: + total security issues + Type + Unwanted + URL + URL: + Valid Login + Value + Variant + Visited URLs + Vulnerable URLs + Web server: + Issue Types that this task fixes + Simulation of the pop-up that appears when this page is opened in a browser + Location + Intent Action: + Intent Class: + Intent Data: + Intent Extra: + Intent Package: + Payload + Issues: + Method Signature: + Issue Validation Parameters: + Thread: + Timestamp: + Trace: + Issue Information + This issue was detected by AppScan's Mobile Analyzer. + Call Stack: + Header: + XML: + File Name: + File Permission: + Synopsis: + Dump: + Manifest: + Request: + Method Information + Signature: + File: + Name: + Permissions: + Class + Function + Line + Created by: + Summary of security issues + Issues + Go to Table of Contents + Issue Types: + Application Version: + Scan Name: + First Variant: + Variants Found: + OWASP: + X-Force: + (Only the first one is displayed) + No security issues discovered in the scan + Scan status: + Note that the scan on which this report is based was not completed. + Success + Refer to the site for more details. + Sink + Source + OWASP Top 10 + File Path: + Reference: + Free Plan + Please Note: + This summary report was created with the Application Security Analyzer Free Plan. Once you purchase the full service you will have access to a complete report with detailed descriptions of the issues found and how to remediate them. + Activities: + Coverage + Activities + This report includes important security information about your mobile application. + Fix Recommendations: + Component + Glossary + Privacy: + Symbols Found: + Mobile Application Report + Class Signature: + Defining Class + Controllable Object Fields: + Receivers: + Services: + Receivers + Services + Method Signature: + Issue Information: + Settings For Target: + Provider: + Sample Report + Login Mode: + Views: + Views + None + Automatic + Manual + Calling Line + Calling Method + Class + Classification + Critical + Date Created + Discovery Method + Last Updated + Package + Scans: + Severity Value + Status + API + Element + Scheme + Sink: + Source: + Trace + Source File + Access Complexity + Access Vector + Authentication + Availability Impact + Confidentiality Impact + CVE + CVSS + Description + Exploitability + Integrity Impact + Summary + Activities that were tested for security vulnerabilities, as defined in the app's manifest. + Issue Types that ASoC has tested your application for. + Receivers that were tested for security vulnerabilities, as defined in the app's manifest. + Services that were tested for security vulnerabilities, as defined in the app's manifest. + Titles of Views encountered when crawling the app. + Leaked Information: + Password: + User Name: + Mitigation: + Alternate Fix Suggestions + This method is a part of the application code and appears in each of the grouped issue's traces. You should begin investigating a possible fix in the implementation of the method. + This method is a third-party API, with a common caller in each of the grouped issue's traces. You should begin investigating a possible fix at the caller: + Replace/Repair Vulnerable OpenSource: + Please refer to the details of this issue for fix recommendations. + Business Impact: + Created: + Security Report for: + Regulation Report for: + Notes: + - Details + - Discussion + Contains: + {0} issues + (out of {0}) + - Audit Trail + Cause: + HCL Application Security on Cloud, Version + Directory: + Constant Value: + Found in: + Informational + Low + Medium + High + Critical + User Supplied Credit Card Number: + User Supplied Id: + User Supplied Input: + User Supplied Password: + User Supplied Phone Number: + User Supplied User Name: + - Fix Recommendation + Included for each issue separately. + Port: + Application Name: + Copyleft: + Copyright Risk: + Date: + Library Name: + License Name: + Open Source Report + Licenses + Linking: + Patent Risk: + Reference Type: + Reference URL: + Risk Level: + Libraries with high risk level: + Libraries with low risk level: + Libraries with medium risk level: + Libraries with unknown risk level: + Royalty Free: + Total Open Source Libraries: + AppScan on Cloud + Anyone who distributes a modification of the code or a product that is based on or contains part of the code may be required to make publicly available the source code for the product or modification, regardless of whether the code is dynamically or statically linked. (example: GPL). Attribution and/or license terms may be required. + Anyone who distributes a modification of the code or a product that is based on or contains part of the code may be required to make publicly available the source code for the product or modification, subject to an exception for software that dynamically links to the original code. These licenses include LGPL and GPL with Class Path Exception, as examples. Attribution and/or license terms may be required. + Anyone may use the code without restriction. + Anyone who develops a product that is based on or contains part of the code, or who modifies the code, may be required to make publicly available the source code for that product or modification if s/he (a) distributes the software or (b) enables others to use the software via hosted or web services. (example: Affero). Attribution and/or license terms may be required. + Anyone who develops a product that is based on or contains part of the code, or who modifies the code, may be required to make publicly available the source code for that product or modification if s/he (a) distributes the software or (b) enables others to use the software via hosted or web services. (example: Affero). Attribution and/or license terms may be required. + Anyone who distributes a modification of the code may be required to make the source code for the modification publicly available. Attribution and/or license terms may also be required. + Anyone who distributes the code must provide certain notices as described in the license. These generally require providing attributions and/or license terms with the software. + Specific identified patent risks + Royalty free and no identified patent risks + No patents granted + Royalty free unless litigated + Report created at: + Report for scan: + Open source library name + Risk level + Security Report + Open Source Libraries + Unknown + Reference + In this section you’ll find more details about the fields and their values. + Disabled + Enabled + None + Automatic + Prompt + Recorded login + Unknown + (Modified) + Any + Unknown + Sample Trace + License Type + Scan Security Report + This report lists all the open source libraries found in your scan, and their associated open source Risk Levels. + + Open Source Risk Levels are not the same as the Risk Levels in Security Reports, and not related to the vulnerabilities of specific issues. + You can see if any of the libraries have known vulnerabilities in Issue Management view. + Number Of Libraries + Report Date: + Scanned under Application: + Scan Start Date: + Total Open Source License Types: + Details + Threat Classification: + Fix Groups: + Implementation of {0} + Usage of {0} via {1} + Fix Group #{0}: {1} + This section groups {0} issues of type {1} with significant commonality in the their traces. + This section groups {0} issues with significant commonality in their traces. The following issue types are included: + This section groups {0} issues of type {1} with a common opensource file. + This section groups {0} issues with a common opensource file. The following issue types are included: + These issues are grouped together to try to help you find a common fix that resolves them all. + These method calls are also common to the traces of the issues in this group. They represent other possible These method calls are also common to the traces of the issues in this group. They represent other possible locations to investigate a fix. + All {0} issues in this report appear to be independent, lacking the commonality required in their traces to be grouped together. They all appear in this section. + This section lists the remaining {0} issues that could not be included in any other fix groups. + The following issue types are included: + Ungrouped + Fix Recommendation + Library Version: + API: + at line + Call + Caller: + Description: + Name: + Example Trace: + File + Lost Sink + Not a Validator + Sample Trace + Publish date: + Resolution: + Source and Sink + Tainted Arg + Taint Propagator + via + Virtual Lost Sink + Test Optimization: + Normal + Optimized + Issue ID: + Compliance Security Report + Undefined + Undefined + Title: + Report Date UTC: + Fix Group ID: + Method: + Query String: + URI: + Arguments: + Call Trace: + Object: + Return: + Stack: + Type: + By Fix Groups: + By Issue Types: + Fix-Groups + Library: + Location: + Status: + Common API Call: + Common Fix Point: + Common Open Source: + Common Fix Point: + OpenSource + API: + Location of fix: + Library name: + Location of fix: + Advisory: + Custom Advisory: + Hosts + Fast + Faster + Fastest + No Optimization + How to Fix: + Report Name: + Technology: + Scan Information + General Advisory: + Finding specific advisory: + Example: + Exploit Example: + (none) + Not applicable for this issue. + HTTP Only + JS Stack Trace + Same Site + False + True + (Mixed) + Articles + CWE + Exploit example + External references + Recommendations + Language: + How to Fix + See also issue-details 'Resolution' section below. + Mitigation + Important: + Note: The number of issues found exceeded the maximum that can be shown in a single set of results. +The scan results show {0} representitive issues. + Personal Scan + Personal Scans are deleted after {0} days, unless promoted to the application within that time. + Additional Information: + Fixed + In Progress + New + Noise + Open + Passed + Reopened + Definitive + Scan Coverage Findings + Suspect + Cipher Suites: + ID + Fix recommendation + Default (Production) + Default (Staging) + Default + Body + Cookie + Global + Header + Header Name + Link + Other + Page + Parameter + Parameter Name + Query + Role + Source Line + Unspecified + Critical + High + Low + Medium + Unspecified + Report for application: + This report lists all the open source libraries found in your application, and their associated open source Risk Levels. + License Details + Library Name + Version + Undefined + Critical severity issues: + Copyleft applies on modifications as well as own code that uses the open-source software. + Non-copyleft license. + Copyleft applies only to modifications. + Undefined + Dynamic linking will not infect the linking code. + The licensing of the linking code will remain unaffected. + Undefined + Linking will infect the code linking code. + Alpine + Arch Linux + Bower + Build Configuration File + Details available in CDNJS + Debian + .NET + Eclipse OSGI Bundle + Details available in GitHub repository + License information in host site + License File + Node package manager + NuGet Package + Other + POM file + Project Home Page + Python Package Index + Readme File + RPM + RubyGems + License assigned manually by a user in the organization + Undefined + High + Low + Medium + Undefined + Unknown + Royalty-free unless litigated. + No patents granted. + Royalty-free and no identified patent risks. + Undefined severity issues: + Last Found + CVSS Version + Total Items: + IAST call stack: + Undefined + - Comments + Method: + Both + Config + Hash + Dependency Root: + Source-file and Package-manager + Package-manager + Source-file + None + + + + + HCL + Application Security on Cloud + sample-web + Unspecified + Wednesday, October 16, 2024 + FullReport + 1 + False + 30 + 20000 + False + ASoC + + + 1 + 1 + 1 + 1 + 1 + 1 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + + PrivilegeEscalation + + + + + + + + + PrivilegeEscalation + + 1 + + + + No non-root USER specified in Dockerfile configuration + sample-web\Dockerfile:1 + + + + FROM public.ecr.aws/docker/library/python:latest + + + + + + + + + No non-root USER specified in Dockerfile configuration + High + + No non-root USER specified in Dockerfile configuration + + + + PrivilegeEscalation + + + + + High + 3 + SAST + + 266 + + + catInsufficientAuthentication + + + privilegeEscalation + + + insecureWebAppConfiguration + + Open + Wednesday, October 16, 2024 + Wednesday, October 16, 2024 + PrivilegeEscalation + sample-web\Dockerfile:1 + sample-web\Dockerfile + None + 1 + 0 + 522deb60-ea8b-ef11-8473-000d3a0fc910 + 5a2deb60-ea8b-ef11-8473-000d3a0fc910 + + + + + 2024-10-16 18:13:44Z + No non-root USER specified in Dockerfile configuration + sample-web\Dockerfile + PrivilegeEscalation + PrivilegeEscalation + + + diff --git a/unittests/tools/test_hcl_asoc_sast_parser.py b/unittests/tools/test_hcl_asoc_sast_parser.py new file mode 100644 index 00000000000..d9adbde8c24 --- /dev/null +++ b/unittests/tools/test_hcl_asoc_sast_parser.py @@ -0,0 +1,36 @@ +from dojo.tools.hcl_asoc_sast.parser import HCLASoCSASTParser +from unittests.dojo_test_case import DojoTestCase + + +class TestHCLASoCSASTParser(DojoTestCase): + + def test_no_findings(self): + my_file_handle = open("unittests/scans/hcl_asoc_sast/no_issues.xml", encoding="utf-8") + parser = HCLASoCSASTParser() + findings = parser.get_findings(my_file_handle, None) + my_file_handle.close() + self.assertEqual(0, len(findings)) + + def test_one_finding(self): + my_file_handle = open("unittests/scans/hcl_asoc_sast/one_issue.xml", encoding="utf-8") + parser = HCLASoCSASTParser() + findings = parser.get_findings(my_file_handle, None) + my_file_handle.close() + self.assertEqual(1, len(findings)) + self.assertEqual(findings[0].title, "PrivilegeEscalation") + self.assertEqual(findings[0].severity, "High") + self.assertEqual(findings[0].cwe, 266) + + def test_many_findings(self): + my_file_handle = open("unittests/scans/hcl_asoc_sast/many_issues.xml", encoding="utf-8") + parser = HCLASoCSASTParser() + findings = parser.get_findings(my_file_handle, None) + my_file_handle.close() + self.assertEqual(83, len(findings)) + self.assertEqual(findings[0].title, "Authentication Bypass") + self.assertEqual(findings[2].title, "Configuration") + self.assertEqual(findings[0].severity, "High") + self.assertEqual(findings[9].severity, "High") + self.assertEqual(findings[9].file_path, "sample-php/src/adminEditCodeLanguageForm.php") + self.assertEqual(findings[5].line, 48) + self.assertEqual(findings[9].cwe, 79) From 5e79c703f6aa15d2f5a6d5d2679e047391769722 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sat, 25 Jan 2025 12:25:32 -0600 Subject: [PATCH 61/99] chore(deps): update docker/build-push-action action from v6.12.0 to v6.13.0 (.github/workflows/release-x-manual-docker-containers.yml) (#11636) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- .github/workflows/build-docker-images-for-testing.yml | 2 +- .github/workflows/release-x-manual-docker-containers.yml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build-docker-images-for-testing.yml b/.github/workflows/build-docker-images-for-testing.yml index 7e33d6fc87f..8157fdf5d0b 100644 --- a/.github/workflows/build-docker-images-for-testing.yml +++ b/.github/workflows/build-docker-images-for-testing.yml @@ -35,7 +35,7 @@ jobs: - name: Build id: docker_build - uses: docker/build-push-action@67a2d409c0a876cbe6b11854e3e25193efe4e62d # v6.12.0 + uses: docker/build-push-action@ca877d9245402d1537745e0e356eab47c3520991 # v6.13.0 timeout-minutes: 10 env: DOCKER_BUILD_CHECKS_ANNOTATIONS: false diff --git a/.github/workflows/release-x-manual-docker-containers.yml b/.github/workflows/release-x-manual-docker-containers.yml index 40af8e4c617..051d383d1e7 100644 --- a/.github/workflows/release-x-manual-docker-containers.yml +++ b/.github/workflows/release-x-manual-docker-containers.yml @@ -51,7 +51,7 @@ jobs: - name: Build and push images with debian if: ${{ matrix.os == 'debian' }} - uses: docker/build-push-action@67a2d409c0a876cbe6b11854e3e25193efe4e62d # v6.12.0 + uses: docker/build-push-action@ca877d9245402d1537745e0e356eab47c3520991 # v6.13.0 env: DOCKER_BUILD_CHECKS_ANNOTATIONS: false REPO_ORG: ${{ env.repoorg }} @@ -64,7 +64,7 @@ jobs: - name: Build and push images with alpine if: ${{ matrix.os == 'alpine' }} - uses: docker/build-push-action@67a2d409c0a876cbe6b11854e3e25193efe4e62d # v6.12.0 + uses: docker/build-push-action@ca877d9245402d1537745e0e356eab47c3520991 # v6.13.0 env: DOCKER_BUILD_CHECKS_ANNOTATIONS: false REPO_ORG: ${{ env.repoorg }} From 6204101e43621086fe9d355523054a8718171cc4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 25 Jan 2025 12:26:23 -0600 Subject: [PATCH 62/99] Bump boto3 from 1.36.4 to 1.36.5 (#11635) Bumps [boto3](https://github.com/boto/boto3) from 1.36.4 to 1.36.5. - [Release notes](https://github.com/boto/boto3/releases) - [Commits](https://github.com/boto/boto3/compare/1.36.4...1.36.5) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 4bddc071da4..a80d04aba78 100644 --- a/requirements.txt +++ b/requirements.txt @@ -69,7 +69,7 @@ django-ratelimit==4.1.0 argon2-cffi==23.1.0 blackduck==1.1.3 pycurl==7.45.4 # Required for Celery Broker AWS (SQS) support -boto3==1.36.4 # Required for Celery Broker AWS (SQS) support +boto3==1.36.5 # Required for Celery Broker AWS (SQS) support netaddr==1.3.0 vulners==2.2.3 fontawesomefree==6.6.0 From 67ede99890ff0477838d53a8e5705b80b7a321dc Mon Sep 17 00:00:00 2001 From: DefectDojo release bot Date: Mon, 27 Jan 2025 16:07:05 +0000 Subject: [PATCH 63/99] Update versions in application files --- components/package.json | 2 +- helm/defectdojo/Chart.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/components/package.json b/components/package.json index 5237c9c33af..086741e6f2d 100644 --- a/components/package.json +++ b/components/package.json @@ -1,6 +1,6 @@ { "name": "defectdojo", - "version": "2.42.3", + "version": "2.43.0-dev", "license" : "BSD-3-Clause", "private": true, "dependencies": { diff --git a/helm/defectdojo/Chart.yaml b/helm/defectdojo/Chart.yaml index cc30debc0e3..b6548659b3d 100644 --- a/helm/defectdojo/Chart.yaml +++ b/helm/defectdojo/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 -appVersion: "2.42.3" +appVersion: "2.43.0-dev" description: A Helm chart for Kubernetes to install DefectDojo name: defectdojo -version: 1.6.170 +version: 1.6.171-dev icon: https://www.defectdojo.org/img/favicon.ico maintainers: - name: madchap From 0e216665f28ba9cafec9cb60b089cdd675ae5e96 Mon Sep 17 00:00:00 2001 From: DefectDojo release bot Date: Mon, 27 Jan 2025 16:07:12 +0000 Subject: [PATCH 64/99] Update versions in application files --- components/package.json | 2 +- dojo/__init__.py | 2 +- helm/defectdojo/Chart.yaml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/components/package.json b/components/package.json index 5237c9c33af..086741e6f2d 100644 --- a/components/package.json +++ b/components/package.json @@ -1,6 +1,6 @@ { "name": "defectdojo", - "version": "2.42.3", + "version": "2.43.0-dev", "license" : "BSD-3-Clause", "private": true, "dependencies": { diff --git a/dojo/__init__.py b/dojo/__init__.py index 077a3604cd5..3a2e4a630a2 100644 --- a/dojo/__init__.py +++ b/dojo/__init__.py @@ -4,6 +4,6 @@ # Django starts so that shared_task will use this app. from .celery import app as celery_app # noqa: F401 -__version__ = "2.42.3" +__version__ = "2.43.0-dev" __url__ = "https://github.com/DefectDojo/django-DefectDojo" __docs__ = "https://documentation.defectdojo.com" diff --git a/helm/defectdojo/Chart.yaml b/helm/defectdojo/Chart.yaml index cc30debc0e3..b6548659b3d 100644 --- a/helm/defectdojo/Chart.yaml +++ b/helm/defectdojo/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 -appVersion: "2.42.3" +appVersion: "2.43.0-dev" description: A Helm chart for Kubernetes to install DefectDojo name: defectdojo -version: 1.6.170 +version: 1.6.171-dev icon: https://www.defectdojo.org/img/favicon.ico maintainers: - name: madchap From 571d9780d9ebbcc070122c56911d1d51674995f9 Mon Sep 17 00:00:00 2001 From: Ross Esposito Date: Mon, 27 Jan 2025 11:01:57 -0600 Subject: [PATCH 65/99] remove bad field --- dojo/fixtures/defect_dojo_sample_data.json | 1 - 1 file changed, 1 deletion(-) diff --git a/dojo/fixtures/defect_dojo_sample_data.json b/dojo/fixtures/defect_dojo_sample_data.json index 41c855f2630..641b86ba9e1 100644 --- a/dojo/fixtures/defect_dojo_sample_data.json +++ b/dojo/fixtures/defect_dojo_sample_data.json @@ -16886,7 +16886,6 @@ "enable_notify_sla_exponential_backoff": false, "allow_anonymous_survey_repsonse": false, "credentials": "", - "disclaimer": "", "risk_acceptance_form_default_days": 180, "risk_acceptance_notify_before_expiration": 10, "enable_credentials": true, From cf0a129472289f3c3ca003bdefb3fbcafb10a38e Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 27 Jan 2025 16:25:07 -0600 Subject: [PATCH 66/99] chore(deps): update actions/setup-node action from v4.1.0 to v4.2.0 (.github/workflows/gh-pages.yml) (#11657) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- .github/workflows/gh-pages.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/gh-pages.yml b/.github/workflows/gh-pages.yml index 888cd7eb3e4..3e1576e511d 100644 --- a/.github/workflows/gh-pages.yml +++ b/.github/workflows/gh-pages.yml @@ -19,7 +19,7 @@ jobs: extended: true - name: Setup Node - uses: actions/setup-node@39370e3970a6d050c480ffad4ff0ed4d3fdee5af # v4.1.0 + uses: actions/setup-node@1d0ff469b7ec7b3cb9d8673fde0c81c44821de2a # v4.2.0 with: node-version: '22.5.1' From 761141e56f683fa1a90951ca056fcb8b45a0707e Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 27 Jan 2025 16:25:33 -0600 Subject: [PATCH 67/99] chore(deps): update mccutchen/go-httpbin docker tag from v2.15.0 to v2.16.0 (docker-compose.override.unit_tests_cicd.yml) (#11658) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- docker-compose.override.dev.yml | 2 +- docker-compose.override.unit_tests.yml | 2 +- docker-compose.override.unit_tests_cicd.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docker-compose.override.dev.yml b/docker-compose.override.dev.yml index 581dd627900..0e68fbbffce 100644 --- a/docker-compose.override.dev.yml +++ b/docker-compose.override.dev.yml @@ -54,4 +54,4 @@ services: protocol: tcp mode: host "webhook.endpoint": - image: mccutchen/go-httpbin:v2.15.0@sha256:24528cf5229d0b70065ac27e6c9e4d96f5452a84a3ce4433e56573c18d96827a + image: mccutchen/go-httpbin:v2.16.0@sha256:2b02b8844eab42d432d9c4bbd96a20d7ff348292097eeee4546e79252f72c70e diff --git a/docker-compose.override.unit_tests.yml b/docker-compose.override.unit_tests.yml index baf50d51e60..45b60e30bd2 100644 --- a/docker-compose.override.unit_tests.yml +++ b/docker-compose.override.unit_tests.yml @@ -52,7 +52,7 @@ services: image: busybox:1.37.0-musl entrypoint: ['echo', 'skipping', 'redis'] "webhook.endpoint": - image: mccutchen/go-httpbin:v2.15.0@sha256:24528cf5229d0b70065ac27e6c9e4d96f5452a84a3ce4433e56573c18d96827a + image: mccutchen/go-httpbin:v2.16.0@sha256:2b02b8844eab42d432d9c4bbd96a20d7ff348292097eeee4546e79252f72c70e volumes: defectdojo_postgres_unit_tests: {} defectdojo_media_unit_tests: {} diff --git a/docker-compose.override.unit_tests_cicd.yml b/docker-compose.override.unit_tests_cicd.yml index 1ca70557d41..bd19580c7ff 100644 --- a/docker-compose.override.unit_tests_cicd.yml +++ b/docker-compose.override.unit_tests_cicd.yml @@ -51,7 +51,7 @@ services: image: busybox:1.37.0-musl entrypoint: ['echo', 'skipping', 'redis'] "webhook.endpoint": - image: mccutchen/go-httpbin:v2.15.0@sha256:24528cf5229d0b70065ac27e6c9e4d96f5452a84a3ce4433e56573c18d96827a + image: mccutchen/go-httpbin:v2.16.0@sha256:2b02b8844eab42d432d9c4bbd96a20d7ff348292097eeee4546e79252f72c70e volumes: defectdojo_postgres_unit_tests: {} defectdojo_media_unit_tests: {} From 86a5ee5b7911d75d2db25037513d10d1523788ce Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Jan 2025 16:25:58 -0600 Subject: [PATCH 68/99] Bump openapitools/openapi-generator-cli from v7.10.0 to v7.11.0 (#11662) Bumps openapitools/openapi-generator-cli from v7.10.0 to v7.11.0. --- updated-dependencies: - dependency-name: openapitools/openapi-generator-cli dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile.integration-tests-debian | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile.integration-tests-debian b/Dockerfile.integration-tests-debian index 28c77fc9765..624b9ef7909 100644 --- a/Dockerfile.integration-tests-debian +++ b/Dockerfile.integration-tests-debian @@ -1,7 +1,7 @@ # code: language=Dockerfile -FROM openapitools/openapi-generator-cli:v7.10.0@sha256:f2054a5a7908ad81017d0f0839514ba5eab06ae628914ff71554d46fac1bcf7a AS openapitools +FROM openapitools/openapi-generator-cli:v7.11.0@sha256:a9e7091ac8808c6835cf8ec88252bca603f1f889ef1456b63d8add5781feeca7 AS openapitools FROM python:3.11.9-slim-bookworm@sha256:8c1036ec919826052306dfb5286e4753ffd9d5f6c24fbc352a5399c3b405b57e AS build WORKDIR /app RUN \ From 064f32580b4a5f2b3a2a345024545e8190b3d817 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Jan 2025 16:26:24 -0600 Subject: [PATCH 69/99] Bump boto3 from 1.36.5 to 1.36.6 (#11663) Bumps [boto3](https://github.com/boto/boto3) from 1.36.5 to 1.36.6. - [Release notes](https://github.com/boto/boto3/releases) - [Commits](https://github.com/boto/boto3/compare/1.36.5...1.36.6) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index a80d04aba78..ceabec1bc54 100644 --- a/requirements.txt +++ b/requirements.txt @@ -69,7 +69,7 @@ django-ratelimit==4.1.0 argon2-cffi==23.1.0 blackduck==1.1.3 pycurl==7.45.4 # Required for Celery Broker AWS (SQS) support -boto3==1.36.5 # Required for Celery Broker AWS (SQS) support +boto3==1.36.6 # Required for Celery Broker AWS (SQS) support netaddr==1.3.0 vulners==2.2.3 fontawesomefree==6.6.0 From 1096aad968724b259214bad3f5ba9659a42793da Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 27 Jan 2025 16:27:24 -0600 Subject: [PATCH 70/99] chore(deps): update actions/checkout action from v3 to v4 (.github/workflows/update-sample-data.yml) (#11671) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- .github/workflows/update-sample-data.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/update-sample-data.yml b/.github/workflows/update-sample-data.yml index 810f8bb8cf5..6c20a90066b 100644 --- a/.github/workflows/update-sample-data.yml +++ b/.github/workflows/update-sample-data.yml @@ -16,7 +16,7 @@ jobs: steps: # Checkout the repository - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: ref: ${{ github.ref_name || 'dev'}} From b51ac9d441e4c57c38466814835d9570d69fb104 Mon Sep 17 00:00:00 2001 From: valentijnscholten Date: Tue, 28 Jan 2025 17:01:19 +0100 Subject: [PATCH 71/99] RustyHog: improve description and file_path (#11433) * RustyHog: improve description and file_path * upgrade notes * Update docs/content/en/open_source/upgrading/2.42.md Co-authored-by: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> --------- Co-authored-by: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> --- docs/content/en/open_source/upgrading/2.42.md | 14 +++++++++- docs/content/en/open_source/upgrading/2.43.md | 14 +++++++++- dojo/tools/rusty_hog/parser.py | 26 +++++++++++++------ unittests/tools/test_rusty_hog_parser.py | 3 +++ 4 files changed, 47 insertions(+), 10 deletions(-) diff --git a/docs/content/en/open_source/upgrading/2.42.md b/docs/content/en/open_source/upgrading/2.42.md index 918ffdb901c..c3656494480 100644 --- a/docs/content/en/open_source/upgrading/2.42.md +++ b/docs/content/en/open_source/upgrading/2.42.md @@ -4,4 +4,16 @@ toc_hide: true weight: -20241202 description: No special instructions. --- -There are no special instructions for upgrading to 2.42.x. Check the [Release Notes](https://github.com/DefectDojo/django-DefectDojo/releases/tag/2.42.0) for the contents of the release. + +**Hash Code changes** +A few parsers have been updated to populate more fields. Some of these fields are part of the hash code calculation. To recalculate the hash code please execute the following command: + + `docker compose exec uwsgi /bin/bash -c "python manage.py dedupe.py --parser "Horusec Scan" --hash_code_only` + `docker compose exec uwsgi /bin/bash -c "python manage.py dedupe.py --parser "Qualys Hacker Guardian Scan --hash_code_only"` + `docker compose exec uwsgi /bin/bash -c "python manage.py dedupe.py --parser "Red Hat Satellite --hash_code_only"` + +This command has various command line arguments to tweak its behaviour, for example to trigger a run of the deduplication process. +See [dedupe.py](https://github.com/DefectDojo/django-DefectDojo/blob/master/dojo/management/commands/dedupe.py) for more information. + +Check the [Release Notes](https://github.com/DefectDojo/django-DefectDojo/releases/tag/2.42.0) for the contents of the release. + diff --git a/docs/content/en/open_source/upgrading/2.43.md b/docs/content/en/open_source/upgrading/2.43.md index 596b2a0080d..4b5efcecf2f 100644 --- a/docs/content/en/open_source/upgrading/2.43.md +++ b/docs/content/en/open_source/upgrading/2.43.md @@ -7,4 +7,16 @@ description: Disclaimer field renamed/split. [Pull request #10902](https://github.com/DefectDojo/django-DefectDojo/pull/10902) introduced different kinds of disclaimers within the DefectDojo instance. The original content of the disclaimer was copied to all new fields where it had been used until now (so this change does not require any action on the user's side). However, if users were managing the original disclaimer via API (endpoint `/api/v2/system_settings/1/`, field `disclaimer`), be aware that the fields are now called `disclaimer_notifications` and `disclaimer_reports` (plus there is one additional, previously unused field called `disclaimer_notes`). -But there are no other special instructions for upgrading to 2.43.x. Check the [Release Notes](https://github.com/DefectDojo/django-DefectDojo/releases/tag/2.43.0) for the contents of the release. +**Hash Code changes** +The Rusty Hog parser has been [updated](https://github.com/DefectDojo/django-DefectDojo/pull/11433) to populate more fields. Some of these fields are part of the hash code calculation. To recalculate the hash code and deduplicate existing Rusty Hog findings, please execute the following command: + + `docker compose exec uwsgi /bin/bash -c "python manage.py dedupe.py --parser "Essex Hog Scan (Rusty Hog Scan)" --hash_code_only` + `docker compose exec uwsgi /bin/bash -c "python manage.py dedupe.py --parser "Essex Hog Scan (Choctaw Hog)" --hash_code_only` + `docker compose exec uwsgi /bin/bash -c "python manage.py dedupe.py --parser "Essex Hog Scan (Duroc Hog)" --hash_code_only` + `docker compose exec uwsgi /bin/bash -c "python manage.py dedupe.py --parser "Essex Hog Scan (Gottingen Hog)" --hash_code_only` + `docker compose exec uwsgi /bin/bash -c "python manage.py dedupe.py --parser "Essex Hog Scan (Essex Hog)" --hash_code_only` + +This command has various command line arguments to tweak its behaviour, for example to trigger a run of the deduplication process. +See [dedupe.py](https://github.com/DefectDojo/django-DefectDojo/blob/master/dojo/management/commands/dedupe.py) for more information. + +Check the [Release Notes](https://github.com/DefectDojo/django-DefectDojo/releases/tag/2.43.0) for the contents of the release. \ No newline at end of file diff --git a/dojo/tools/rusty_hog/parser.py b/dojo/tools/rusty_hog/parser.py index a4582106f0d..2faced43ee8 100644 --- a/dojo/tools/rusty_hog/parser.py +++ b/dojo/tools/rusty_hog/parser.py @@ -76,12 +76,17 @@ def __getitem(self, vulnerabilities, scanner): found_secret_string = "" cwe = 200 for vulnerability in vulnerabilities: + description = "" + if vulnerability.get("reason") is not None: + description += "\n**Reason:** {}".format( + vulnerability.get("reason"), + ) if scanner == "Rusty Hog": break if scanner == "Choctaw Hog": """Choctaw Hog""" - found_secret_string = vulnerability.get("stringsFound") - description = f"**This string was found:** {found_secret_string}" + found_secret_string = str(vulnerability.get("stringsFound") or "") + description += f"**This string was found:** {found_secret_string}" if vulnerability.get("commit") is not None: description += "\n**Commit message:** {}".format( vulnerability.get("commit"), @@ -116,8 +121,8 @@ def __getitem(self, vulnerabilities, scanner): ) elif scanner == "Duroc Hog": """Duroc Hog""" - found_secret_string = vulnerability.get("stringsFound") - description = f"**This string was found:** {found_secret_string}" + found_secret_string = str(vulnerability.get("stringsFound") or "") + description += f"**This string was found:** {found_secret_string}" if vulnerability.get("path") is not None: description += "\n**Path of Issue:** {}".format( vulnerability.get("path"), @@ -132,8 +137,8 @@ def __getitem(self, vulnerabilities, scanner): ) elif scanner == "Gottingen Hog": """Gottingen Hog""" - found_secret_string = vulnerability.get("stringsFound") - description = f"**This string was found:** {found_secret_string}" + found_secret_string = str(vulnerability.get("stringsFound") or "") + description += f"**This string was found:** {found_secret_string}" if vulnerability.get("issue_id") is not None: description += "\n**JIRA Issue ID:** {}".format( vulnerability.get("issue_id"), @@ -147,8 +152,8 @@ def __getitem(self, vulnerabilities, scanner): vulnerability.get("url"), vulnerability.get("url"), ) elif scanner == "Essex Hog": - found_secret_string = vulnerability.get("stringsFound") - description = f"**This string was found:** {found_secret_string}" + found_secret_string = str(vulnerability.get("stringsFound") or "") + description += f"**This string was found:** {found_secret_string}" if vulnerability.get("page_id") is not None: description += "\n**Confluence URL:** [{}]({})".format( vulnerability.get("url"), vulnerability.get("url"), @@ -179,10 +184,15 @@ def __getitem(self, vulnerabilities, scanner): vulnerability.get("issue_id"), vulnerability.get("location"), ) + if not file_path: + file_path = vulnerability.get("url") elif scanner == "Essex Hog": title = "{} found in Confluence Page ID {}".format( vulnerability.get("reason"), vulnerability.get("page_id"), ) + if not file_path: + file_path = vulnerability.get("url") + # create the finding object finding = Finding( title=title, diff --git a/unittests/tools/test_rusty_hog_parser.py b/unittests/tools/test_rusty_hog_parser.py index 3d7df04ea0f..ff2420d00ed 100644 --- a/unittests/tools/test_rusty_hog_parser.py +++ b/unittests/tools/test_rusty_hog_parser.py @@ -110,6 +110,9 @@ def test_parse_file_with_multiple_vuln_has_multiple_finding_essexhog(self): parser = RustyhogParser() findings = parser.get_items(testfile, "Essex Hog", Test()) self.assertEqual(3, len(findings)) + self.assertEqual("https://confluence.com/pages/viewpage.action?pageId=12345", findings[0].file_path) + self.assertEqual("['-----BEGIN EC PRIVATE KEY-----']", findings[0].payload) + self.assertEqual("**Reason:** SSH (EC) private key", findings[0].description[:32]) def test_parse_file_with_multiple_vuln_has_multiple_finding_essexhog_content(self): with open("unittests/scans/rusty_hog/essexhog_many_vulns.json", encoding="utf-8") as testfile: From 3f40c65a50918c4127980a4edfcfd57b253acc67 Mon Sep 17 00:00:00 2001 From: manuelsommer <47991713+manuel-sommer@users.noreply.github.com> Date: Tue, 28 Jan 2025 18:03:20 +0100 Subject: [PATCH 72/99] :tada: advance NoseyParker to support version 0.22.0 (#11565) * :tada: advance NoseyParker to support version 0.22.0 * add unittest file * :hammer: restructure * fix version 0.22.0 * ruff * update docs * update docs * cover another usecase * added comment --- .../parsers/file/noseyparker.md | 3 +- dojo/tools/noseyparker/parser.py | 174 ++++++++++++------ .../noseyparker/noseyparker_0_22_0.jsonl | 26 +++ ...oseyparker_0_22_0_without_githistory.jsonl | 6 + unittests/tools/test_noseyparker_parser.py | 20 ++ 5 files changed, 169 insertions(+), 60 deletions(-) create mode 100644 unittests/scans/noseyparker/noseyparker_0_22_0.jsonl create mode 100644 unittests/scans/noseyparker/noseyparker_0_22_0_without_githistory.jsonl diff --git a/docs/content/en/connecting_your_tools/parsers/file/noseyparker.md b/docs/content/en/connecting_your_tools/parsers/file/noseyparker.md index fc08cbf03b5..69abfb99b00 100644 --- a/docs/content/en/connecting_your_tools/parsers/file/noseyparker.md +++ b/docs/content/en/connecting_your_tools/parsers/file/noseyparker.md @@ -6,8 +6,7 @@ Input Type: - This parser takes JSON Lines Output from Nosey Parker: https://github.com/praetorian-inc/noseyparkerSupports -Supports version 0.16.0: -https://github.com/praetorian-inc/noseyparker/releases/tag/v0.16.0 +Supports versions [0.16.0](https://github.com/praetorian-inc/noseyparker/releases/tag/v0.16.0) and [0.22.0](https://github.com/praetorian-inc/noseyparker/releases/tag/v0.22.0) Things to note about the Nosey Parker Parser: - diff --git a/dojo/tools/noseyparker/parser.py b/dojo/tools/noseyparker/parser.py index 65b475a4900..05fcdab49d3 100644 --- a/dojo/tools/noseyparker/parser.py +++ b/dojo/tools/noseyparker/parser.py @@ -17,85 +17,143 @@ def get_label_for_scan_types(self, scan_type): def get_description_for_scan_types(self, scan_type): return "Nosey Parker report file can be imported in JSON Lines format (option --jsonl). " \ - "Supports v0.16.0 of https://github.com/praetorian-inc/noseyparker" + "Supports v0.16.0 and v0.22.0 of https://github.com/praetorian-inc/noseyparker" def get_findings(self, file, test): """ Returns findings from jsonlines file and uses filter to skip findings and determine severity """ - dupes = {} - + self.dupes = {} # Turn JSONL file into DataFrame if file is None: return None if file.name.lower().endswith(".jsonl"): # Process JSON lines into Dict data = [json.loads(line) for line in file] - # Check for empty file if len(data[0]) == 0: return [] - # Parse through each secret in each JSON line for line in data: # Set rule to the current secret type (e.g. AWS S3 Bucket) - try: - rule_name = line["rule_name"] - secret = line["match_content"] - except Exception: + if line.get("rule_name") is not None and line.get("match_content") is not None: + self.version_0_16_0(line, test) + elif line.get("rule_name") is not None and line.get("finding_id") is not None: + self.version_0_22_0(line, test) + else: msg = "Invalid Nosey Parker data, make sure to use Nosey Parker v0.16.0" raise ValueError(msg) - - # Set Finding details - for match in line["matches"]: - # The following path is to account for the variability in the JSON lines output - num_elements = len(match["provenance"]) - 1 - json_path = match["provenance"][num_elements] - - title = f"Secret(s) Found in Repository with Commit ID {json_path['commit_provenance']['commit_metadata']['commit_id']}" - filepath = json_path["commit_provenance"]["blob_path"] - line_num = match["location"]["source_span"]["start"]["line"] - description = f"Secret found of type: {rule_name} \n" \ - f"SECRET starts with: '{secret[:3]}' \n" \ - f"Committer Name: {json_path['commit_provenance']['commit_metadata']['committer_name']} \n" \ - f"Committer Email: {json_path['commit_provenance']['commit_metadata']['committer_email']} \n" \ - f"Commit ID: {json_path['commit_provenance']['commit_metadata']['commit_id']} \n" \ - f"Location: {filepath} line #{line_num} \n" \ - f"Line #{line_num} \n" - - # Internal de-duplication - key = hashlib.md5((filepath + "|" + secret + "|" + str(line_num)).encode("utf-8")).hexdigest() - - # If secret already exists with the same filepath/secret/linenum - if key in dupes: - finding = dupes[key] - finding.nb_occurences += 1 - dupes[key] = finding - else: - dupes[key] = True - # Create Finding object - finding = Finding( - test=test, - cwe=798, - title=title, - description=description, - severity="High", - mitigation="Reset the account/token and remove from source code. Store secrets/tokens/passwords in secret managers or secure vaults.", - date=datetime.today().strftime("%Y-%m-%d"), - verified=False, - active=True, - is_mitigated=False, - file_path=filepath, - line=line_num, - static_finding=True, - nb_occurences=1, - dynamic_finding=False, - - ) - dupes[key] = finding else: msg = "JSON lines format not recognized (.jsonl file extension). Make sure to use Nosey Parker v0.16.0" raise ValueError(msg) - return list(dupes.values()) + return list(self.dupes.values()) + + def version_0_16_0(self, line, test): + rule_name = line["rule_name"] + secret = line["match_content"] + for match in line["matches"]: + # The following path is to account for the variability in the JSON lines output + num_elements = len(match["provenance"]) - 1 + json_path = match["provenance"][num_elements] + + title = f"Secret(s) Found in Repository with Commit ID {json_path['commit_provenance']['commit_metadata']['commit_id']}" + filepath = json_path["commit_provenance"]["blob_path"] + line_num = match["location"]["source_span"]["start"]["line"] + description = f"Secret found of type: {rule_name} \n" \ + f"SECRET starts with: '{secret[:3]}' \n" \ + f"Committer Name: {json_path['commit_provenance']['commit_metadata']['committer_name']} \n" \ + f"Committer Email: {json_path['commit_provenance']['commit_metadata']['committer_email']} \n" \ + f"Commit ID: {json_path['commit_provenance']['commit_metadata']['commit_id']} \n" \ + f"Location: {filepath} line #{line_num} \n" \ + f"Line #{line_num} \n" + + # Internal de-duplication + key = hashlib.md5((filepath + "|" + secret + "|" + str(line_num)).encode("utf-8")).hexdigest() + + # If secret already exists with the same filepath/secret/linenum + if key in self.dupes: + finding = self.dupes[key] + finding.nb_occurences += 1 + self.dupes[key] = finding + else: + self.dupes[key] = True + # Create Finding object + finding = Finding( + test=test, + cwe=798, + title=title, + description=description, + severity="High", + mitigation="Reset the account/token and remove from source code. Store secrets/tokens/passwords in secret managers or secure vaults.", + date=datetime.today().strftime("%Y-%m-%d"), + verified=False, + active=True, + is_mitigated=False, + file_path=filepath, + line=line_num, + static_finding=True, + nb_occurences=1, + dynamic_finding=False, + + ) + self.dupes[key] = finding + + def version_0_22_0(self, line, test): + rule_name = line["rule_name"] + rule_text_id = line["rule_text_id"] + for match in line["matches"]: + # The following path is to account for the variability in the JSON lines output + num_elements = len(match["provenance"]) - 1 + json_path = match["provenance"][num_elements] + line_num = match["location"]["source_span"]["start"]["line"] + # scanned with git history + if json_path.get("first_commit"): + title = f"Secret(s) Found in Repository with Commit ID {json_path['first_commit']['commit_metadata']['commit_id']}" + filepath = json_path["first_commit"]["blob_path"] + description = f"Secret found of type: {rule_name} \n" \ + f"SECRET starts with: '{rule_text_id[:3]}' \n" \ + f"Committer Name: {json_path['first_commit']['commit_metadata']['committer_name']} \n" \ + f"Committer Email: {json_path['first_commit']['commit_metadata']['committer_email']} \n" \ + f"Commit ID: {json_path['first_commit']['commit_metadata']['commit_id']} \n" \ + f"Location: {filepath} line #{line_num} \n" \ + f"Line #{line_num} \n" + # scanned wihout git history + else: + title = "Secret(s) Found in Repository" + filepath = json_path["path"] + description = f"Secret found of type: {rule_name} \n" \ + f"SECRET starts with: '{rule_text_id[:3]}' \n" \ + f"Location: {filepath} line #{line_num} \n" \ + f"Line #{line_num} \n" + + # Internal de-duplication + key = hashlib.md5((filepath + "|" + rule_text_id + "|" + str(line_num)).encode("utf-8")).hexdigest() + + # If secret already exists with the same filepath/secret/linenum + if key in self.dupes: + finding = self.dupes[key] + finding.nb_occurences += 1 + self.dupes[key] = finding + else: + self.dupes[key] = True + # Create Finding object + finding = Finding( + test=test, + cwe=798, + title=title, + description=description, + severity="High", + mitigation="Reset the account/token and remove from source code. Store secrets/tokens/passwords in secret managers or secure vaults.", + date=datetime.today().strftime("%Y-%m-%d"), + verified=False, + active=True, + is_mitigated=False, + file_path=filepath, + line=line_num, + static_finding=True, + nb_occurences=1, + dynamic_finding=False, + ) + self.dupes[key] = finding diff --git a/unittests/scans/noseyparker/noseyparker_0_22_0.jsonl b/unittests/scans/noseyparker/noseyparker_0_22_0.jsonl new file mode 100644 index 00000000000..9021abc22dd --- /dev/null +++ b/unittests/scans/noseyparker/noseyparker_0_22_0.jsonl @@ -0,0 +1,26 @@ +{"finding_id":"84d871b9aec92b82245f7b687d7bf64048d3c68e","rule_name":"AWS API Credentials","rule_text_id":"np.aws.6","rule_structural_id":"39d60c56d8a84ca6ab5999de8fea93657e3cae99","groups":["QUtJQVNQMlRQSEpTNFhVVTNFUEo=","Q1Uwb0t0NEd0MWxIRHRKam5STGZkQlVaV2FkbVlJSGV2cS9UeVV6Lw=="],"num_matches":1,"num_redundant_matches":0,"statuses":[],"comment":null,"mean_score":null,"matches":[{"provenance":[{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"43d6429098e4d3acdf6938dfc159172c4dd99220","committer_name":"Jeroen Willemsen","committer_email":"jeroenwillemsen2001@gmail.com","committer_timestamp":"1678401216 +0100","author_name":"Jeroen Willemsen","author_email":"jeroenwillemsen2001@gmail.com","author_timestamp":"1678401216 +0100","message":"Update docs for okteto challenge 15\n"},"blob_path":"src/main/resources/explanations/challenge15.adoc"}}],"blob_metadata":{"id":"1590fc5b5b352fd538a6372946b04120bc2b5649","num_bytes":1741,"mime_essence":null,"charset":null},"blob_id":"1590fc5b5b352fd538a6372946b04120bc2b5649","location":{"offset_span":{"start":1346,"end":1430},"source_span":{"start":{"line":25,"column":19},"end":{"line":27,"column":0}}},"groups":["QUtJQVNQMlRQSEpTNFhVVTNFUEo=","Q1Uwb0t0NEd0MWxIRHRKam5STGZkQlVaV2FkbVlJSGV2cS9UeVV6Lw=="],"snippet":{"before":"s-east-2\noutput=json\n\n#https://canarytokens.org/manage?token=cs07k832u9t1u4npowbvsw4mb&auth=7f75f2b2a4207c91fbc1ea59f7a495eb\n\naws_access_key_id=AKIASP2TPHJS6R72AFU2aws_secret_access_key=tpRLTDr0/PTZtUkS1rCUeWzQvknekDIpe4U3cxbv\n\n[default]\naws_access_key_id=","matching":"AKIASP2TPHJS4XUU3EPJ\naws_secret_access_key=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\n","after":"region=us-east-2\noutput=json\n\n#https://canarytokens.org/manage?token=n0cnd92mavmv1m61tjmyj9of5&auth=6519be82ef910868529091527c3edb3f\n\naws_access_key_id=AKIASP2TPHJS4XUU3EPJaws_secret_access_key=CU0oKt4Gt1lHDtJjnRLfdBUZWadmYIHevq/TyUz/\n\nhttps://wrongsecrets"},"structural_id":"f7532bb5152f116bf4d4ca4230255303b75433c6","rule_structural_id":"39d60c56d8a84ca6ab5999de8fea93657e3cae99","rule_text_id":"np.aws.6","rule_name":"AWS API Credentials","score":null,"comment":null,"status":null,"redundant_to":[]}]} +{"finding_id":"52f981212f10fdbcffe43a3271a69ff152322ab6","rule_name":"AWS API Credentials","rule_text_id":"np.aws.6","rule_structural_id":"39d60c56d8a84ca6ab5999de8fea93657e3cae99","groups":["QUtJQVNQMlRQSEpTNlI3MkFGVTI=","dHBSTFREcjAvUFRadFVrUzFyQ1VlV3pRdmtuZWtESXBlNFUzY3hidg=="],"num_matches":1,"num_redundant_matches":0,"statuses":[],"comment":null,"mean_score":null,"matches":[{"provenance":[{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"43d6429098e4d3acdf6938dfc159172c4dd99220","committer_name":"Jeroen Willemsen","committer_email":"jeroenwillemsen2001@gmail.com","committer_timestamp":"1678401216 +0100","author_name":"Jeroen Willemsen","author_email":"jeroenwillemsen2001@gmail.com","author_timestamp":"1678401216 +0100","message":"Update docs for okteto challenge 15\n"},"blob_path":"src/main/resources/explanations/challenge15.adoc"}}],"blob_metadata":{"id":"1590fc5b5b352fd538a6372946b04120bc2b5649","num_bytes":1741,"mime_essence":null,"charset":null},"blob_id":"1590fc5b5b352fd538a6372946b04120bc2b5649","location":{"offset_span":{"start":998,"end":1082},"source_span":{"start":{"line":15,"column":19},"end":{"line":17,"column":0}}},"groups":["QUtJQVNQMlRQSEpTNlI3MkFGVTI=","dHBSTFREcjAvUFRadFVrUzFyQ1VlV3pRdmtuZWtESXBlNFUzY3hidg=="],"snippet":{"before":"tokens! Though you can do `aws sts get-caller-identity` with them. When you use them, some of your data (IP/agent) is being logged.]?!\nGo to https://wrongsecrets.herokuapp.com/stats[stats] when you tried them to find out more!\n\n[default]\naws_access_key_id=","matching":"AKIASP2TPHJS6R72AFU2\naws_secret_access_key=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\n","after":"region=us-east-2\noutput=json\n\n#https://canarytokens.org/manage?token=cs07k832u9t1u4npowbvsw4mb&auth=7f75f2b2a4207c91fbc1ea59f7a495eb\n\naws_access_key_id=AKIASP2TPHJS6R72AFU2aws_secret_access_key=tpRLTDr0/PTZtUkS1rCUeWzQvknekDIpe4U3cxbv\n\n[default]\naws_access"},"structural_id":"7c651b1c48c52e4d85c21b5eb4b4ded419da8f59","rule_structural_id":"39d60c56d8a84ca6ab5999de8fea93657e3cae99","rule_text_id":"np.aws.6","rule_name":"AWS API Credentials","score":null,"comment":null,"status":null,"redundant_to":[]}]} +{"finding_id":"a8950151f2e808e7ad19f417415718e2b4bd94e1","rule_name":"AWS API Credentials","rule_text_id":"np.aws.6","rule_structural_id":"39d60c56d8a84ca6ab5999de8fea93657e3cae99","groups":["QUtJQVlWUDRDSVBQSkNKT1BKV0w=","OEl5U1VlRWhMRE5kMkFlR0VlQklVVEJ3NzZQRmlUQjR0U1c5dWZIRg=="],"num_matches":2,"num_redundant_matches":0,"statuses":[],"comment":null,"mean_score":null,"matches":[{"provenance":[{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"47d1e638af75ff2ecbc19ca70cf2aaccaa0bcd6b","committer_name":"Jeroen Willemsen","committer_email":"jeroenwillemsen2001@gmail.com","committer_timestamp":"1647241216 +0100","author_name":"Jeroen Willemsen","author_email":"jeroenwillemsen2001@gmail.com","author_timestamp":"1647241216 +0100","message":"WIP @ Challenge 15\n"},"blob_path":"src/main/java/org/owasp/wrongsecrets/challenges/docker/Challenge15.java"}}],"blob_metadata":{"id":"23f671fd1604266bbba2eedf651652b4baec6560","num_bytes":3335,"mime_essence":"application/octet-stream","charset":null},"blob_id":"23f671fd1604266bbba2eedf651652b4baec6560","location":{"offset_span":{"start":1423,"end":1516},"source_span":{"start":{"line":55,"column":28},"end":{"line":57,"column":0}}},"groups":["QUtJQVlWUDRDSVBQSkNKT1BKV0w=","OEl5U1VlRWhMRE5kMkFlR0VlQklVVEJ3NzZQRmlUQjR0U1c5dWZIRg=="],"snippet":{"before":"\n * [Arcane]\n * aws_access_key_id = AKIAYVP4CIPPEMEC27B2\n * aws_secret_access_key = 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\n * output = json\n * region = us-east-2\n *\n * Arcane debug:\n * [default]\n * aws_access_key_id = ","matching":"AKIAYVP4CIPPJCJOPJWL\n * aws_secret_access_key = 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\n","after":" * output = json\n * region = us-east-2\n *\n * wrongsecrets debug:\n * [default]\n * aws_access_key_id = AKIAYVP4CIPPCXOWVNMW\n * aws_secret_access_key = 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\n * output = json\n * region = u"},"structural_id":"f25b5d7a3de7cce6af433b392625e7acd99c0355","rule_structural_id":"39d60c56d8a84ca6ab5999de8fea93657e3cae99","rule_text_id":"np.aws.6","rule_name":"AWS API Credentials","score":null,"comment":null,"status":null,"redundant_to":[]},{"provenance":[{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"2c2c0b1231212321f19ee02a392f7193524c1195","committer_name":"Jeroen Willemsen","committer_email":"jeroenwillemsen2001@gmail.com","committer_timestamp":"1647240427 +0100","author_name":"Jeroen Willemsen","author_email":"jeroenwillemsen2001@gmail.com","author_timestamp":"1647240427 +0100","message":"First setup of challenge 15, will remove raw credentials later\n"},"blob_path":"src/main/java/org/owasp/wrongsecrets/challenges/docker/Challenge15.java"}}],"blob_metadata":{"id":"5b8bd6808428853791516927badf127418e64bc8","num_bytes":1536,"mime_essence":"application/octet-stream","charset":null},"blob_id":"5b8bd6808428853791516927badf127418e64bc8","location":{"offset_span":{"start":1168,"end":1261},"source_span":{"start":{"line":47,"column":28},"end":{"line":49,"column":0}}},"groups":["QUtJQVlWUDRDSVBQSkNKT1BKV0w=","OEl5U1VlRWhMRE5kMkFlR0VlQklVVEJ3NzZQRmlUQjR0U1c5dWZIRg=="],"snippet":{"before":"\n * [Arcane]\n * aws_access_key_id = AKIAYVP4CIPPEMEC27B2\n * aws_secret_access_key = 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\n * output = json\n * region = us-east-2\n *\n * Arcane debug:\n * [default]\n * aws_access_key_id = ","matching":"AKIAYVP4CIPPJCJOPJWL\n * aws_secret_access_key = 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\n","after":" * output = json\n * region = us-east-2\n *\n * wrongsecrets debug:\n * [default]\n * aws_access_key_id = AKIAYVP4CIPPCXOWVNMW\n * aws_secret_access_key = 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\n * output = json\n * region = u"},"structural_id":"ed247789cc7d4aa713c26844a3373555f664bb0b","rule_structural_id":"39d60c56d8a84ca6ab5999de8fea93657e3cae99","rule_text_id":"np.aws.6","rule_name":"AWS API Credentials","score":null,"comment":null,"status":null,"redundant_to":[]}]} +{"finding_id":"1c82eb8796a72495efce9ecf618b84fba10ffc84","rule_name":"Base64-PEM-Encoded Private Key","rule_text_id":"np.pem.2","rule_structural_id":"35bc9170393aecc3777a72fe77e269fc150c91ce","groups":["TFMwdExTMUNSVWRKVGlCU1UwRWdVRkpKVmtGVVJTQkxSVmt0TFMwdExRcE5TVWxLUzJkSlFrRkJTME5CWjBWQmMzZFhVMnRwWlU5bk5rSmhkRWxHVFVKMFZsZHpaVzluVFd3MFVraFlOblJKUTFsUU0wRXdWelJKU2pocmMxZzJDbXh2V0VWeE15czNOV3hUWjBVMVJ6UkdTMk5YWVVWNVV6ZGpjR0V5Y0VSVU5UTjVSbU4xTlRkdFQwdFJWa2gyT0c1VmN6VnJNM2hYY1VGWE5pczNNMEVLT1RabE1IWnRlbmxLYkc5T1JVa3dPVTFwUzBac00zZHNTelZMTjJWQ1JsRnBiMnRSYjNZeFNsRnFPVVo0VkdGSU1EQlNNR3BxSzAwM0wzaFdTV0ZJS3dweGFtVTNOMHhhU2pCcWRESkRWWGRLVkc5NFVUTXlSRVl3VkRRNE1IaDRjRE5QTTNaWU9FUkNUbmhaUzNNMUt6Vk5RekV4TWtweFp6QnJZbXBUYVZNMUNpOHZaWEYzU1hGd1R6VXpaVXhETjJGMVVWbHRUVTVEYjFaTVduTm1hVzF0ZURGd1RYTjRiRWh1VEVKeGNWTkdiVzVzZVZCb09WcHZlVmRTWkROUWRIY0tVV1UzVjIwNVptcHJaU3RYTlc1eWFIYzNNMjV1TVhGS056TjBRWFU1WkZReVQybEZjV1pRTmpNM01YZFRkSHBaVXpCR2EwbGxWV2xWWkVSeGRXRlhVQW9yUjFNM1VrUlFhbk5MTVhaTUszbG1XbEJNYkROWVNrdHZLMFl6UlVKcmIyZGpiM05MZHpOdWVFaHNjRUkwVFd0VFJuSkViMmRKUVVrMFJVOVZTelIzQ2toc1JqZE1jU3REZDA0eGFYRkNkbFp2YldGT1NqVldURlZKVlRWamNtcFZMMUk1U1hkSU56Tk5OSFpYU0ZaV2QzaFFOV0ZxVW01cE9HMXdlV3BaUzIwS2VHd3JSMXBSTDI1bmJFMUxUVzlTY0c5dVNEVnFMMFJOTms4MVdURTVkVmcyT1VsbVRFRlNhRGhST0dOMFIwcG1PRzA0Wkd0NWVsaEhLMmRTWWxvNGNncEtkVk0wVFVWNmJtcFNhMkpyYUdodEwyZzFUVGR4WkM5RmRUSm5ibWxUT1c5eVkzVlVNR0UzTlVkMFJtbHNTbE15YTNwbVRsZGFVamhOY0VGSlFXNXFDbWh1YlhJeWNEVTJNVEJCUkVORU1uSnFSbkJUUlV0UFN6bFBOVUY1YWxSUWVsWnNaRFJ3WlhZNU0ybGpjVzFQVHprMGJEQnhjVXBIY21NNFEwRjNSVUVLUVZGTFEwRm5SVUZ2T0U1VFVFTTNRbGRNYkd0dlJVTjVLMjlHU0RaRlRFUktla0ZVZFdkYU5tVnlSVEo1ZEdGVFpXUm5SQ3QzVUhWWmNXMUdjV3hwVkFveU0xUkpXbTQxYW1oVmFpOHpNMDVGTmxFck1rbHpRbVpwUWk5S2REaFNaUzlvUlhKa1NURnFTR05UUm5FNWQxQk1WemR2YVVsamFITmhjMUpXVXpBeUNrdHJNSGRNUm1JemFIaG5OMWR6VEhaQlltZGtiRUV4UzNaNEx6VlFlRUY1V25NMldXeFljRE55TlZOemFISXJkSFp2TnpKc1YybHhTRXMwVmxVNGJrSUtTaTlpU3pKSFFtUkdlVk5YYVc1RWRFcExZM0JZZG14alFXZFZTMlF5TlU1cldWZDRWRzVYY2treE5VTnpiREZ3UW14S1oyTjRLemR2YlVzclMxcHVkUXBwTmtkNlltNXRRMFJqVjBwU1FtaHpSU3R2TkhaM1VqbEZNVzlSVm1KdGJsZzNaVzFMYjJSeFdua3hTRUY2ZW04MlIyNVBlakp4V0Zjd2ExQjFjMjh6Q2pJdlZIVTVia2R6WlhKSlN6UjVVV2RyUW10UmN6UlRZbkZCTW5vMVNHRnZPU3RLVEZVMGRFdExXVVF2WkRkMk1IQTFTa0ZKUkdSU1lsQTNVMWhSU2pBS2RURjJOWGxxVm1sU2JFNDJiVkpuZVd0Tk5rODVTSGN3YWtWWFF5dE5SRzV6Tm04eFMySnhiM1p5TDNOWk4zVk5SVVpyVkVaeFdFY3lUbmRZTm1FMVVBbzJTbmRIV1U1RFNYSk5hWHBqYUVZd2RVNTBTVUZJUzFScE1tOU9jak0wY1ZaUWIwWnJWV3N2VmxJNFYzUjFOSGM1Y3pCWlNsaDFaM3BsV2xBclJ6TjZDbTVGZUhSV05qUkNhWFJ3THk5MWExcDZOVWMzWmtvMlFqRkZTV1pDTTJWek1uZFliVTV1TVdWbk9YVjZjWEpoTWxCMVNHRnBjWGxoYTB4ak1YTktURm9LZG01Rk5VMU9XRTF1WTNaalVHUkdRbGRqT0V3d1JGWkhRMjlyZFcxQ1dqSTFabWRQUjNGcFVrRlBiMHRFZDAxVWFtMWxNR1ZzTkdRdlFVNXlabFJTZFFvMVRWZHJNRXRTU1VscUt6ZEdjeXRTUldkYVZUVnlRazl1VWpORWNuVllSeXRXWTI1MUswbzBSbk5yWjJOSmFqRldhMFZEWjJkRlFrRk5SMHRrYlhBeUNuRnRlVVJDVW1GYWVuRnNORXRRVlZGMFVGVmlTMDVDT1Vad1YycHdZVXhrVUdaVE1VRjFOMEl2WjJoYU56UndhamgxZEhsalNrZFpkMWt5ZFdsb2NURUtTRzlFY2tOaFExZE9jbmhDZHpKWGQwTTRPSGwyU1dsbGRtRkhhRXBSVG5KNFJuRmFiWEJIWkdzNFRpdFJNeTlsYW5scFpWQjJTMVJZWTB0RU9TdFlXUXBOZGtacFpWaHpRbWhsVW5sa1QzYzFZM00zT1hjck1YazJOVGRLT1VScGVVdzRaVlpHVkU0emFFWlBUQzlUV1hCamEzaGtiRVZOZDJGbVlrOXVXRTlsQ25wQmJFdEJlR1ppUldKRlNsZzJUR3hIY0RkcWQydE5abVJvV1d4ak9Ya3pWSE5WVGpsU1REbDBaVkZsVUZSS1N6Wk1VRzVvYldOSlZEVjBTSGxsWVZVS2NIQlJRWFZyZEZZdlkxUmhNa3N5VUZSRVkyVk1LMFpvY1dacVFVdzFObE5GY0d0U2QwSXhSV296Y1RrdkwyVTRaa2xCZVZkdVUwTnZWM0F4UjNGb1dRcFpRMEUxZFdoa1MzVjRXbGhVS3poRFoyZEZRa0ZQZWt4dGFqTnpTSFpRU21GcFVqZDNibmhuYTAxWU9WZzBiMGd6UmpNNFIydEVORkZwVUhNNWJsYzJDblJDTVN0RGQycEtaekZHTkVSc1QwaEVVVTh2Y1RnM2QwSXJZa0ZMUlVwTWJGZGFiR1Z4VlhaNlpFTnlTalJMVTNJeWFHZDNUbWh3Wnl0Mk5IZGhVVlVLWm5kUGVXY3JVbk5xUmpac1FXUlFiRlpCU0hNMldsWXJhQzgwZW5wMFJ6WlZlbXRwUjAxcU9HbDJSMk5uSzNCRE4wMTVOVTVHVjBOUVJtVlZWSGd2TmdwWlNuRlRkV2xQY1cxeU5qQm9XVTVaY25KYVpUTlVaRGhXVlZsWWFtWjNZVmxwYjFWVWFVMDVjREZSTVRrNU9FSlBObmhDTlhveVNXMUhOV1EyUzFCMUNuQlhkR2hHVEZaTFZWYzJhWGwxUW0wd1IyOVhlazVKVFVZdldIZG1Xa3R1TWt0bWFtUldRVUpZVXpWRFRtOXlabUpoWjBGa05tdFBTR3Q2V1RGSE55c0thRkp3UTBWdVVIUkRiRVF6Y2k5SFl6VnZSemM1ZEM5QmRDOVFOVTVVUzA5UWRHZzNLMkpJZDI5RFJVTm5aMFZDUVVzeGJIWkZiMDlJZDI5NFMwaElhZ3BSTUVOaFNYcG1UWHBDVGpsVVkyWmtRMnR5ZFROM2RGaGFTbUpHVW1Wa1ltSndiRlV5YTFGSE5FUkZTbmx6YW01TWVVbGpObWxoWW1SelZGSjZNRXBOQ2pCeFZTOHhhWGhaTWxoMU9WaFRVMW81UWxCSVMwUTBNRmw1Y3pSNWVqaEVRM04yZGxvNVNUaE9PRFpSVjBveFJWbHdRbEJaT1RWT1NWQmpZVE4yVUZJS2JsZ3ZZU3M1TlZsaFowRkNXVE5TZEhKaFVVdEdRVUp1ZUVZM2RuUndjbmg2VjBJNE16QlZPV2hQV0dWUlYwMW9SVkUzUzJSck5rMVRkMnhZWjJkWWVBcG5OWGN4V21KWGRuaFlXSFJaVEVvMmExaFhkVUZIZDFNek1FOHhRMWxyUjFCa2QzWkxNbnBRY1VWVWJXWlpaalUzYW1sVmVrNU5Sa0V5YXl0aVlXNXdDbFpRTnpGbk5FOWpRM2hEYlhaM1kxTldVa3BGVUZkWE5GRlhiR0ZsWlVNckswUXhhakZPZWtSNGJHVnJlRll6TVZjeVJHaHZaM2xLWjFSWVpVRnNaWG9LTkVsS1dHNDBhME5uWjBWQ1FVcDFjR1JIYmtWalRYZzVNbGRNUjFsTFJqSnpXRlZYUkU1dmJVNU1NSEl6ZFRWaGVFMHZkRVpGT1hOUWNqaHpabTlDTWdwU0wyTnJXSFpoVm5wMk5YQXpOekpZYlZCdlpEQTViRkUyY0U0emJteHZPSGhMVjNBdllqQmxVa2RVZGxKYVdpdE9jRlJITkVSVVIzSkpRak4wTjBKUUNtdDVPR05rWTJ0bU9XdDFWblpETDJ0RUt6UlNha3RJWlZKS2RFbFRUbUpITDFkbE4wcG9Vek5xWjJNcmMwdExUbXBNUm1RNE1qTnZTRGt3T1VjeFVXRUtPRTFNZHpGT2RqRlViRlJuYnpkbE16WnBOamRwVlVSNU4zaHJUMXBDWkZOcWF6aFVZM3BEVFVaUVp6SkZjR050TVd3eWJrdHBiWEF4WlVKNU0zcFJWZ29yVGxseUsyZFlkVlZKSzA1RldGQTNUWG93Wm5KTmVtUjJaRlZRT1RaNGVWZFNhbmgxWmpCemVERTRjbVl6TkVsdkt6Wk5helJ0Y21zd1FXaHplRTh4Q2k5YVVXZFlkVGhNTXpWUWJGSnVUSE56TkVOVmEwWXpPVzFHTm1wclVUbHBVMVZGUTJkblJVRkRlVlJQUkRsV2RXcGFUMEZTUldFMVowdG1TMlJzY3lzS04yczVUMUJTVWtaMVkybHVSRTExVTFkV1oyTXZRM2hSTDI4clJ6Z3JUMU53YlVnNU1sbFFTM1ZKVWtSVEx6WktiWEIyY1dVeVdGcEpOMFo1YkM4ell3cEpOM2N5V1dka1JWTTVPVkl2VGpOcE9VaDJRMFZ1ZGtGeFNUSXZjalJpU0ZaMFNVbGhSMk14VG01dGNsaDZhemxvYkRsRGVsZFZPRGM1Y1hCMGJIQjJDalpSVGtsek9WZGxWa2hsU21nMGRrYzJTV3g2Tm01aU5VeGFkRU5xYTBaWFFWZEljV05SV1RNd04xUm5TWGN4ZURBek0wUlhNa3QzTVVGRFVWaFhXbU1LUVM4d01saEpZV05VV21GSE0ySk5iVko0TTFVNGJHZHVTVlpzUlhOcWJWcFpaVlZtU1RKbmVUQXlZblprYWtaamQzUkxWVmR5UTFSdmFFUXhZbFI1VXdwcWQwcFFla1pYU0ZFNU9XaHlVa3h6WTBsRVpHTmtaVk5zU0RnNVIwOXZNbTVOTlhKelZuUmhSbXByYjNZNGJHWkNTakpoTlZWRVJWRXpRMlExUVQwOUNpMHRMUzB0UlU1RUlGSlRRU0JRVWtsV1FWUkZJRXRGV1MwdExTMHRDZz09"],"num_matches":1,"num_redundant_matches":0,"statuses":[],"comment":null,"mean_score":null,"matches":[{"provenance":[{"kind":"file","path":"./k8s/main.key"},{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"ae88235b6d82a71ff07c095841aafc08837f32be","committer_name":"Jeroen Willemsen","committer_email":"jeroenwillemsen2001@gmail.com","committer_timestamp":"1720167712 +0200","author_name":"Jeroen Willemsen","author_email":"jeroenwillemsen2001@gmail.com","author_timestamp":"1720167712 +0200","message":"step 1, part 1: fix for sealed secret controler\n"},"blob_path":"k8s/main.key"}}],"blob_metadata":{"id":"f5407810e2991e97e14756fe20d0263020eb6981","num_bytes":7091,"mime_essence":null,"charset":null},"blob_id":"f5407810e2991e97e14756fe20d0263020eb6981","location":{"offset_span":{"start":2374,"end":6707},"source_span":{"start":{"line":6,"column":14},"end":{"line":7,"column":0}}},"groups":["TFMwdExTMUNSVWRKVGlCU1UwRWdVRkpKVmtGVVJTQkxSVmt0TFMwdExRcE5TVWxLUzJkSlFrRkJTME5CWjBWQmMzZFhVMnRwWlU5bk5rSmhkRWxHVFVKMFZsZHpaVzluVFd3MFVraFlOblJKUTFsUU0wRXdWelJKU2pocmMxZzJDbXh2V0VWeE15czNOV3hUWjBVMVJ6UkdTMk5YWVVWNVV6ZGpjR0V5Y0VSVU5UTjVSbU4xTlRkdFQwdFJWa2gyT0c1VmN6VnJNM2hYY1VGWE5pczNNMEVLT1RabE1IWnRlbmxLYkc5T1JVa3dPVTFwUzBac00zZHNTelZMTjJWQ1JsRnBiMnRSYjNZeFNsRnFPVVo0VkdGSU1EQlNNR3BxSzAwM0wzaFdTV0ZJS3dweGFtVTNOMHhhU2pCcWRESkRWWGRLVkc5NFVUTXlSRVl3VkRRNE1IaDRjRE5QTTNaWU9FUkNUbmhaUzNNMUt6Vk5RekV4TWtweFp6QnJZbXBUYVZNMUNpOHZaWEYzU1hGd1R6VXpaVXhETjJGMVVWbHRUVTVEYjFaTVduTm1hVzF0ZURGd1RYTjRiRWh1VEVKeGNWTkdiVzVzZVZCb09WcHZlVmRTWkROUWRIY0tVV1UzVjIwNVptcHJaU3RYTlc1eWFIYzNNMjV1TVhGS056TjBRWFU1WkZReVQybEZjV1pRTmpNM01YZFRkSHBaVXpCR2EwbGxWV2xWWkVSeGRXRlhVQW9yUjFNM1VrUlFhbk5MTVhaTUszbG1XbEJNYkROWVNrdHZLMFl6UlVKcmIyZGpiM05MZHpOdWVFaHNjRUkwVFd0VFJuSkViMmRKUVVrMFJVOVZTelIzQ2toc1JqZE1jU3REZDA0eGFYRkNkbFp2YldGT1NqVldURlZKVlRWamNtcFZMMUk1U1hkSU56Tk5OSFpYU0ZaV2QzaFFOV0ZxVW01cE9HMXdlV3BaUzIwS2VHd3JSMXBSTDI1bmJFMUxUVzlTY0c5dVNEVnFMMFJOTms4MVdURTVkVmcyT1VsbVRFRlNhRGhST0dOMFIwcG1PRzA0Wkd0NWVsaEhLMmRTWWxvNGNncEtkVk0wVFVWNmJtcFNhMkpyYUdodEwyZzFUVGR4WkM5RmRUSm5ibWxUT1c5eVkzVlVNR0UzTlVkMFJtbHNTbE15YTNwbVRsZGFVamhOY0VGSlFXNXFDbWh1YlhJeWNEVTJNVEJCUkVORU1uSnFSbkJUUlV0UFN6bFBOVUY1YWxSUWVsWnNaRFJ3WlhZNU0ybGpjVzFQVHprMGJEQnhjVXBIY21NNFEwRjNSVUVLUVZGTFEwRm5SVUZ2T0U1VFVFTTNRbGRNYkd0dlJVTjVLMjlHU0RaRlRFUktla0ZVZFdkYU5tVnlSVEo1ZEdGVFpXUm5SQ3QzVUhWWmNXMUdjV3hwVkFveU0xUkpXbTQxYW1oVmFpOHpNMDVGTmxFck1rbHpRbVpwUWk5S2REaFNaUzlvUlhKa1NURnFTR05UUm5FNWQxQk1WemR2YVVsamFITmhjMUpXVXpBeUNrdHJNSGRNUm1JemFIaG5OMWR6VEhaQlltZGtiRUV4UzNaNEx6VlFlRUY1V25NMldXeFljRE55TlZOemFISXJkSFp2TnpKc1YybHhTRXMwVmxVNGJrSUtTaTlpU3pKSFFtUkdlVk5YYVc1RWRFcExZM0JZZG14alFXZFZTMlF5TlU1cldWZDRWRzVYY2treE5VTnpiREZ3UW14S1oyTjRLemR2YlVzclMxcHVkUXBwTmtkNlltNXRRMFJqVjBwU1FtaHpSU3R2TkhaM1VqbEZNVzlSVm1KdGJsZzNaVzFMYjJSeFdua3hTRUY2ZW04MlIyNVBlakp4V0Zjd2ExQjFjMjh6Q2pJdlZIVTVia2R6WlhKSlN6UjVVV2RyUW10UmN6UlRZbkZCTW5vMVNHRnZPU3RLVEZVMGRFdExXVVF2WkRkMk1IQTFTa0ZKUkdSU1lsQTNVMWhSU2pBS2RURjJOWGxxVm1sU2JFNDJiVkpuZVd0Tk5rODVTSGN3YWtWWFF5dE5SRzV6Tm04eFMySnhiM1p5TDNOWk4zVk5SVVpyVkVaeFdFY3lUbmRZTm1FMVVBbzJTbmRIV1U1RFNYSk5hWHBqYUVZd2RVNTBTVUZJUzFScE1tOU9jak0wY1ZaUWIwWnJWV3N2VmxJNFYzUjFOSGM1Y3pCWlNsaDFaM3BsV2xBclJ6TjZDbTVGZUhSV05qUkNhWFJ3THk5MWExcDZOVWMzWmtvMlFqRkZTV1pDTTJWek1uZFliVTV1TVdWbk9YVjZjWEpoTWxCMVNHRnBjWGxoYTB4ak1YTktURm9LZG01Rk5VMU9XRTF1WTNaalVHUkdRbGRqT0V3d1JGWkhRMjlyZFcxQ1dqSTFabWRQUjNGcFVrRlBiMHRFZDAxVWFtMWxNR1ZzTkdRdlFVNXlabFJTZFFvMVRWZHJNRXRTU1VscUt6ZEdjeXRTUldkYVZUVnlRazl1VWpORWNuVllSeXRXWTI1MUswbzBSbk5yWjJOSmFqRldhMFZEWjJkRlFrRk5SMHRrYlhBeUNuRnRlVVJDVW1GYWVuRnNORXRRVlZGMFVGVmlTMDVDT1Vad1YycHdZVXhrVUdaVE1VRjFOMEl2WjJoYU56UndhamgxZEhsalNrZFpkMWt5ZFdsb2NURUtTRzlFY2tOaFExZE9jbmhDZHpKWGQwTTRPSGwyU1dsbGRtRkhhRXBSVG5KNFJuRmFiWEJIWkdzNFRpdFJNeTlsYW5scFpWQjJTMVJZWTB0RU9TdFlXUXBOZGtacFpWaHpRbWhsVW5sa1QzYzFZM00zT1hjck1YazJOVGRLT1VScGVVdzRaVlpHVkU0emFFWlBUQzlUV1hCamEzaGtiRVZOZDJGbVlrOXVXRTlsQ25wQmJFdEJlR1ppUldKRlNsZzJUR3hIY0RkcWQydE5abVJvV1d4ak9Ya3pWSE5WVGpsU1REbDBaVkZsVUZSS1N6Wk1VRzVvYldOSlZEVjBTSGxsWVZVS2NIQlJRWFZyZEZZdlkxUmhNa3N5VUZSRVkyVk1LMFpvY1dacVFVdzFObE5GY0d0U2QwSXhSV296Y1RrdkwyVTRaa2xCZVZkdVUwTnZWM0F4UjNGb1dRcFpRMEUxZFdoa1MzVjRXbGhVS3poRFoyZEZRa0ZQZWt4dGFqTnpTSFpRU21GcFVqZDNibmhuYTAxWU9WZzBiMGd6UmpNNFIydEVORkZwVUhNNWJsYzJDblJDTVN0RGQycEtaekZHTkVSc1QwaEVVVTh2Y1RnM2QwSXJZa0ZMUlVwTWJGZGFiR1Z4VlhaNlpFTnlTalJMVTNJeWFHZDNUbWh3Wnl0Mk5IZGhVVlVLWm5kUGVXY3JVbk5xUmpac1FXUlFiRlpCU0hNMldsWXJhQzgwZW5wMFJ6WlZlbXRwUjAxcU9HbDJSMk5uSzNCRE4wMTVOVTVHVjBOUVJtVlZWSGd2TmdwWlNuRlRkV2xQY1cxeU5qQm9XVTVaY25KYVpUTlVaRGhXVlZsWWFtWjNZVmxwYjFWVWFVMDVjREZSTVRrNU9FSlBObmhDTlhveVNXMUhOV1EyUzFCMUNuQlhkR2hHVEZaTFZWYzJhWGwxUW0wd1IyOVhlazVKVFVZdldIZG1Xa3R1TWt0bWFtUldRVUpZVXpWRFRtOXlabUpoWjBGa05tdFBTR3Q2V1RGSE55c0thRkp3UTBWdVVIUkRiRVF6Y2k5SFl6VnZSemM1ZEM5QmRDOVFOVTVVUzA5UWRHZzNLMkpJZDI5RFJVTm5aMFZDUVVzeGJIWkZiMDlJZDI5NFMwaElhZ3BSTUVOaFNYcG1UWHBDVGpsVVkyWmtRMnR5ZFROM2RGaGFTbUpHVW1Wa1ltSndiRlV5YTFGSE5FUkZTbmx6YW01TWVVbGpObWxoWW1SelZGSjZNRXBOQ2pCeFZTOHhhWGhaTWxoMU9WaFRVMW81UWxCSVMwUTBNRmw1Y3pSNWVqaEVRM04yZGxvNVNUaE9PRFpSVjBveFJWbHdRbEJaT1RWT1NWQmpZVE4yVUZJS2JsZ3ZZU3M1TlZsaFowRkNXVE5TZEhKaFVVdEdRVUp1ZUVZM2RuUndjbmg2VjBJNE16QlZPV2hQV0dWUlYwMW9SVkUzUzJSck5rMVRkMnhZWjJkWWVBcG5OWGN4V21KWGRuaFlXSFJaVEVvMmExaFhkVUZIZDFNek1FOHhRMWxyUjFCa2QzWkxNbnBRY1VWVWJXWlpaalUzYW1sVmVrNU5Sa0V5YXl0aVlXNXdDbFpRTnpGbk5FOWpRM2hEYlhaM1kxTldVa3BGVUZkWE5GRlhiR0ZsWlVNckswUXhhakZPZWtSNGJHVnJlRll6TVZjeVJHaHZaM2xLWjFSWVpVRnNaWG9LTkVsS1dHNDBhME5uWjBWQ1FVcDFjR1JIYmtWalRYZzVNbGRNUjFsTFJqSnpXRlZYUkU1dmJVNU1NSEl6ZFRWaGVFMHZkRVpGT1hOUWNqaHpabTlDTWdwU0wyTnJXSFpoVm5wMk5YQXpOekpZYlZCdlpEQTViRkUyY0U0emJteHZPSGhMVjNBdllqQmxVa2RVZGxKYVdpdE9jRlJITkVSVVIzSkpRak4wTjBKUUNtdDVPR05rWTJ0bU9XdDFWblpETDJ0RUt6UlNha3RJWlZKS2RFbFRUbUpITDFkbE4wcG9Vek5xWjJNcmMwdExUbXBNUm1RNE1qTnZTRGt3T1VjeFVXRUtPRTFNZHpGT2RqRlViRlJuYnpkbE16WnBOamRwVlVSNU4zaHJUMXBDWkZOcWF6aFVZM3BEVFVaUVp6SkZjR050TVd3eWJrdHBiWEF4WlVKNU0zcFJWZ29yVGxseUsyZFlkVlZKSzA1RldGQTNUWG93Wm5KTmVtUjJaRlZRT1RaNGVWZFNhbmgxWmpCemVERTRjbVl6TkVsdkt6Wk5helJ0Y21zd1FXaHplRTh4Q2k5YVVXZFlkVGhNTXpWUWJGSnVUSE56TkVOVmEwWXpPVzFHTm1wclVUbHBVMVZGUTJkblJVRkRlVlJQUkRsV2RXcGFUMEZTUldFMVowdG1TMlJzY3lzS04yczVUMUJTVWtaMVkybHVSRTExVTFkV1oyTXZRM2hSTDI4clJ6Z3JUMU53YlVnNU1sbFFTM1ZKVWtSVEx6WktiWEIyY1dVeVdGcEpOMFo1YkM4ell3cEpOM2N5V1dka1JWTTVPVkl2VGpOcE9VaDJRMFZ1ZGtGeFNUSXZjalJpU0ZaMFNVbGhSMk14VG01dGNsaDZhemxvYkRsRGVsZFZPRGM1Y1hCMGJIQjJDalpSVGtsek9WZGxWa2hsU21nMGRrYzJTV3g2Tm01aU5VeGFkRU5xYTBaWFFWZEljV05SV1RNd04xUm5TWGN4ZURBek0wUlhNa3QzTVVGRFVWaFhXbU1LUVM4d01saEpZV05VV21GSE0ySk5iVko0TTFVNGJHZHVTVlpzUlhOcWJWcFpaVlZtU1RKbmVUQXlZblprYWtaamQzUkxWVmR5UTFSdmFFUXhZbFI1VXdwcWQwcFFla1pYU0ZFNU9XaHlVa3h6WTBsRVpHTmtaVk5zU0RnNVIwOXZNbTVOTlhKelZuUmhSbXByYjNZNGJHWkNTakpoTlZWRVJWRXpRMlExUVQwOUNpMHRMUzB0UlU1RUlGSlRRU0JRVWtsV1FWUkZJRXRGV1MwdExTMHRDZz09"],"snippet":{"before":"lIVUx3WURSTmJyNjVZb2RhRU9WRE1ITW5ZUWptMWlNUElLM3QyMkdmcEErCktyRENHMUVlZnZkUTU0eGhreUtZVjZOZko0R0gyOVJicXNkeVMyS1hDUXJINDlqV1h3RHpRZ09iK1BUcW9nNCsKNzVVemhsQ2pYT3FPRFFaY1JjWXVyZTJjK2Z1elU2cVJ1L1o5SlJZM0MyOD0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=\n tls.key: ","matching":"LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlKS2dJQkFBS0NBZ0VBc3dXU2tpZU9nNkJhdElGTUJ0VldzZW9nTWw0UkhYNnRJQ1lQM0EwVzRJSjhrc1g2CmxvWEVxMys3NWxTZ0U1RzRGS2NXYUV5UzdjcGEycERUNTN5RmN1NTdtT0tRVkh2OG5VczVrM3hXcUFXNis3M0EKOTZlMHZtenlKbG9ORUkwOU1pS0ZsM3dsSzVLN2VCRlFpb2tRb3YxSlFqOUZ4VGFIMDBSMGpqK003L3hWSWFIKwpxamU3N0xaSjBqdDJDVXdKVG94UTMyREYwVDQ4MHh4cDNPM3ZYOERCTnhZS3M1KzVNQzExMkpxZzBrYmpTaVM1Ci8vZXF3SXFwTzUzZUxDN2F1UVltTU5Db1ZMWnNmaW1teDFwTXN4bEhuTEJxcVNGbW5seVBoOVpveVdSZDNQdHcKUWU3V205ZmprZStXNW5yaHc3M25uMXFKNzN0QXU5ZFQyT2lFcWZQNjM3MXdTdHpZUzBGa0llVWlVZERxdWFXUAorR1M3UkRQanNLMXZMK3lmWlBMbDNYSktvK0YzRUJrb2djb3NLdzNueEhscEI0TWtTRnJEb2dJQUk0RU9VSzR3CkhsRjdMcStDd04xaXFCdlZvbWFOSjVWTFVJVTVjcmpVL1I5SXdINzNNNHZXSFZWd3hQNWFqUm5pOG1weWpZS20KeGwrR1pRL25nbE1LTW9ScG9uSDVqL0RNNk81WTE5dVg2OUlmTEFSaDhROGN0R0pmOG04ZGt5elhHK2dSYlo4cgpKdVM0TUV6bmpSa2JraGhtL2g1TTdxZC9FdTJnbmlTOW9yY3VUMGE3NUd0RmlsSlMya3pmTldaUjhNcEFJQW5qCmhubXIycDU2MTBBRENEMnJqRnBTRUtPSzlPNUF5alRQelZsZDRwZXY5M2ljcW1PTzk0bDBxcUpHcmM4Q0F3RUEKQVFLQ0FnRUFvOE5TUEM3QldMbGtvRUN5K29GSDZFTERKekFUdWdaNmVyRTJ5dGFTZWRnRCt3UHVZcW1GcWxpVAoyM1RJWm41amhVai8zM05FNlErMklzQmZpQi9KdDhSZS9oRXJkSTFqSGNTRnE5d1BMVzdvaUljaHNhc1JWUzAyCktrMHdMRmIzaHhnN1dzTHZBYmdkbEExS3Z4LzVQeEF5WnM2WWxYcDNyNVNzaHIrdHZvNzJsV2lxSEs0VlU4bkIKSi9iSzJHQmRGeVNXaW5EdEpLY3BYdmxjQWdVS2QyNU5rWVd4VG5XckkxNUNzbDFwQmxKZ2N4KzdvbUsrS1pudQppNkd6Ym5tQ0RjV0pSQmhzRStvNHZ3UjlFMW9RVmJtblg3ZW1Lb2RxWnkxSEF6em82R25PejJxWFcwa1B1c28zCjIvVHU5bkdzZXJJSzR5UWdrQmtRczRTYnFBMno1SGFvOStKTFU0dEtLWUQvZDd2MHA1SkFJRGRSYlA3U1hRSjAKdTF2NXlqVmlSbE42bVJneWtNNk85SHcwakVXQytNRG5zNm8xS2Jxb3ZyL3NZN3VNRUZrVEZxWEcyTndYNmE1UAo2SndHWU5DSXJNaXpjaEYwdU50SUFIS1RpMm9OcjM0cVZQb0ZrVWsvVlI4V3R1NHc5czBZSlh1Z3plWlArRzN6Cm5FeHRWNjRCaXRwLy91a1p6NUc3Zko2QjFFSWZCM2VzMndYbU5uMWVnOXV6cXJhMlB1SGFpcXlha0xjMXNKTFoKdm5FNU1OWE1uY3ZjUGRGQldjOEwwRFZHQ29rdW1CWjI1ZmdPR3FpUkFPb0tEd01Uam1lMGVsNGQvQU5yZlRSdQo1TVdrMEtSSUlqKzdGcytSRWdaVTVyQk9uUjNEcnVYRytWY251K0o0RnNrZ2NJajFWa0VDZ2dFQkFNR0tkbXAyCnFteURCUmFaenFsNEtQVVF0UFViS05COUZwV2pwYUxkUGZTMUF1N0IvZ2haNzRwajh1dHljSkdZd1kydWlocTEKSG9EckNhQ1dOcnhCdzJXd0M4OHl2SWlldmFHaEpRTnJ4RnFabXBHZGs4TitRMy9lanlpZVB2S1RYY0tEOStYWQpNdkZpZVhzQmhlUnlkT3c1Y3M3OXcrMXk2NTdKOURpeUw4ZVZGVE4zaEZPTC9TWXBja3hkbEVNd2FmYk9uWE9lCnpBbEtBeGZiRWJFSlg2TGxHcDdqd2tNZmRoWWxjOXkzVHNVTjlSTDl0ZVFlUFRKSzZMUG5obWNJVDV0SHllYVUKcHBRQXVrdFYvY1RhMksyUFREY2VMK0ZocWZqQUw1NlNFcGtSd0IxRWozcTkvL2U4ZklBeVduU0NvV3AxR3FoWQpZQ0E1dWhkS3V4WlhUKzhDZ2dFQkFPekxtajNzSHZQSmFpUjd3bnhna01YOVg0b0gzRjM4R2tENFFpUHM5blc2CnRCMStDd2pKZzFGNERsT0hEUU8vcTg3d0IrYkFLRUpMbFdabGVxVXZ6ZENySjRLU3IyaGd3TmhwZyt2NHdhUVUKZndPeWcrUnNqRjZsQWRQbFZBSHM2WlYraC80enp0RzZVemtpR01qOGl2R2NnK3BDN015NU5GV0NQRmVVVHgvNgpZSnFTdWlPcW1yNjBoWU5ZcnJaZTNUZDhWVVlYamZ3YVlpb1VUaU05cDFRMTk5OEJPNnhCNXoySW1HNWQ2S1B1CnBXdGhGTFZLVVc2aXl1Qm0wR29Xek5JTUYvWHdmWktuMktmamRWQUJYUzVDTm9yZmJhZ0FkNmtPSGt6WTFHNysKaFJwQ0VuUHRDbEQzci9HYzVvRzc5dC9BdC9QNU5US09QdGg3K2JId29DRUNnZ0VCQUsxbHZFb09Id294S0hIagpRMENhSXpmTXpCTjlUY2ZkQ2tydTN3dFhaSmJGUmVkYmJwbFUya1FHNERFSnlzam5MeUljNmlhYmRzVFJ6MEpNCjBxVS8xaXhZMlh1OVhTU1o5QlBIS0Q0MFl5czR5ejhEQ3N2dlo5SThOODZRV0oxRVlwQlBZOTVOSVBjYTN2UFIKblgvYSs5NVlhZ0FCWTNSdHJhUUtGQUJueEY3dnRwcnh6V0I4MzBVOWhPWGVRV01oRVE3S2RrNk1Td2xYZ2dYeApnNXcxWmJXdnhYWHRZTEo2a1hXdUFHd1MzME8xQ1lrR1Bkd3ZLMnpQcUVUbWZZZjU3amlVek5NRkEyaytiYW5wClZQNzFnNE9jQ3hDbXZ3Y1NWUkpFUFdXNFFXbGFlZUMrK0QxajFOekR4bGVreFYzMVcyRGhvZ3lKZ1RYZUFsZXoKNElKWG40a0NnZ0VCQUp1cGRHbkVjTXg5MldMR1lLRjJzWFVXRE5vbU5MMHIzdTVheE0vdEZFOXNQcjhzZm9CMgpSL2NrWHZhVnp2NXAzNzJYbVBvZDA5bFE2cE4zbmxvOHhLV3AvYjBlUkdUdlJaWitOcFRHNERUR3JJQjN0N0JQCmt5OGNkY2tmOWt1VnZDL2tEKzRSaktIZVJKdElTTmJHL1dlN0poUzNqZ2Mrc0tLTmpMRmQ4MjNvSDkwOUcxUWEKOE1MdzFOdjFUbFRnbzdlMzZpNjdpVUR5N3hrT1pCZFNqazhUY3pDTUZQZzJFcGNtMWwybktpbXAxZUJ5M3pRVgorTllyK2dYdVVJK05FWFA3TXowZnJNemR2ZFVQOTZ4eVdSanh1ZjBzeDE4cmYzNElvKzZNazRtcmswQWhzeE8xCi9aUWdYdThMMzVQbFJuTHNzNENVa0YzOW1GNmprUTlpU1VFQ2dnRUFDeVRPRDlWdWpaT0FSRWE1Z0tmS2RscysKN2s5T1BSUkZ1Y2luRE11U1dWZ2MvQ3hRL28rRzgrT1NwbUg5MllQS3VJUkRTLzZKbXB2cWUyWFpJN0Z5bC8zYwpJN3cyWWdkRVM5OVIvTjNpOUh2Q0VudkFxSTIvcjRiSFZ0SUlhR2MxTm5tclh6azlobDlDeldVODc5cXB0bHB2CjZRTklzOVdlVkhlSmg0dkc2SWx6Nm5iNUxadENqa0ZXQVdIcWNRWTMwN1RnSXcxeDAzM0RXMkt3MUFDUVhXWmMKQS8wMlhJYWNUWmFHM2JNbVJ4M1U4bGduSVZsRXNqbVpZZVVmSTJneTAyYnZkakZjd3RLVVdyQ1RvaEQxYlR5Uwpqd0pQekZXSFE5OWhyUkxzY0lEZGNkZVNsSDg5R09vMm5NNXJzVnRhRmprb3Y4bGZCSjJhNVVERVEzQ2Q1QT09Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==\n","after":" kind: Secret\n metadata:\n creationTimestamp: \"2024-07-05T07:48:29Z\"\n generateName: sealed-secrets-key\n labels:\n sealedsecrets.bitnami.com/sealed-secrets-key: active\n name: sealed-secrets-keydp7k2\n namespace: kube-system\n resourceVe"},"structural_id":"1d846c2374cc26fbc45343177e2839083bb9c878","rule_structural_id":"35bc9170393aecc3777a72fe77e269fc150c91ce","rule_text_id":"np.pem.2","rule_name":"Base64-PEM-Encoded Private Key","score":null,"comment":null,"status":null,"redundant_to":[]}]} +{"finding_id":"d7247b31c4416f723e6a0be17096ec5d87f66fa2","rule_name":"Base64-PEM-Encoded Private Key","rule_text_id":"np.pem.2","rule_structural_id":"35bc9170393aecc3777a72fe77e269fc150c91ce","groups":["TFMwdExTMUNSVWRKVGlCU1UwRWdVRkpKVmtGVVJTQkxSVmt0TFMwdExRcE5TVWxLUzJkSlFrRkJTME5CWjBWQmVtUXZNVXhTTkVvMFIycHpjbVowU1V0U2NqVldOV1VyZHpFMU1Tc3ZkRVZTU2xvMVRYUllZMlpLTlhVd2NTdERDbm80VHpCcE9XUkVPV2d3Y21VeFdURnJVMmhLWVVVd1NHeEJUamsyTURFNWFWaGpXbVp5YURNemFFbGxNMGhoU2xoRFVEZGxZVmhUU0RRelN6Uk9MMWNLWW5kclJUWkVjRVp0Y1U5T2VGQnhWVWxwYW1OUmNsSTFXRzFWYUhoaFVtNHZVbGcxUTI1NFRXeEtTV1prY21aeVpuUnhWMjFIWVZGNk1YWldhVWhJWndwWU5sTjVkSEZQTjBrd2RGcG1ZemRLVGtWclZVUXdObVJTTTJkTmJrWmpiRFUxVTNWUWMycGxWakkyTUZwQlVYb3lkR1JDTWxkd2F5OWlUR1EyYldkUENsaERRbXhRVWxKb2EyMHJhRk5aWlRSVVNYbGhZVWQwUlVSVlZUQnBkRkV3VlZnNVZrVnhaM1kyYWxaeVlsaElla2hVTVZSU2QxRjZPRlZCYmtoV1UxY0tlV3BaWTJSeVpqUnNhVkJIZFVSSVdsUTVaa1JvVlVwaEwyaHNUbEZuWkZWeVExcFpVMmxGU1ZONFVVOVVWVlZwUjBocmVHSjRVSE15VVVodVZ6ZFNWUXBQVEdSVlZVNHdVVnBwVG5sb2FFSnpWMHN5Vm1sdk1uSmhWRVkwZG5GdFRXd3pNRTlIVEhocVRXbzVaMFV2VjA5U1ZXMHlUSGRVUVVGR1pXWllUSFIyQ2xjNU1rNVdNRzFhTUdWNGRVOW9ibUV2WW1kWVNXMUNPR1JZZG5RNWVtcDRUV0U0YUVWV1VYZHFaemxIVW14NVUzRlNWR3BQV2xGdGVraElUbU5hUzNnS1dteDNjbGhwV1ZGS1NFWkRSbEpOZFd0a1dqUXZiVTltY2pCSVRVWXlMMlF2UlU5VldYaHdUVXRoYzAxRWN6aHBjWGwyWjNsYUsxaHNORlppZWtKV2NBcEtlbWxrT0dONU5rMUNOMDlGZVdkNmRrdDZZMVZUWkhOUFQzQjVlREppYW14QlkzWXplRE5pTXpSWU1IUnJkMkpEY1hsd1UxcFNZVlZYT0RkeFlXZE9DbmhwU2l0c2RqY3pWREJCWkRKUVpGZFFVa2RDYXk5clRWWXpaSFI0V1dnelFrc3daVk5CVkM5UlV6ZFZWalJpWjNOcFRYSnZja2xGTjFOalEwRjNSVUVLUVZGTFEwRm5SVUY0ZDI5TldIZGtXVzk1WVhWR1NFMUtlUzkyZUV4NlpHUlVhMDVKVEVWek9FMUdaVTloTlVOS056Z3lORGxYWVUxUlRsSkVMMHROZEFwaFNXWkNTSGxuTURrNVkwMVRXazlZYWxGc2JrNTRVV1pHT0ZWWE1rRllLMnhoZGs1UVNGazFlazlTV1RNM05YRnFUMnR2VTFGeUwzQlVRVU5HTkdOdENpOW9LemhGT0VoNllqWmplbVJEV2tvMVVFa3ZiVmhoWlhSMlowVlpOV1pUTjJoNmNqUkljRzFoY1d4dmVWbFVNelJvU1RKUGQyeHNNVkJLUm1WblEwa0tSRGxLYzJabk5YSlNiWEZWZHpCMlRrVlhha2RSUjNsa1JDc3dWMGhWZEZWamVFWnlkeTl4ZDBzMVlreGhhV3BNTlU0clIxZ3pZMnN3ZG5KaVFtaGtjZ3BWWjJ0ek1WRnpja3R2T0ZKU1dIcFlUMVZGT0ZWRVFtcFBTblY2YjNselNqQkNVeTltVUhWaVFWSjZiVWsxVjNwd2FqTlJNSHBIVWpZMFVYWk9jbEZqQ2pKUlRsUjJOR054VVRKQmJDOTJUWFJTT1dOell6TmlhemRPTjFKdldGZHNTMmwzZG5aM1kzWlJhVU5KV0dRMVZTdDRSRU5PYUVnMFIxQkdUa1ozVmtRS1RIbFNkVmxpU1ZneFVrNUxlRlJsUkROMVRIcDNkVkZ1UzNVdlprSmtjR281ZUdrMU1FSTNhMVl3ZWpGNGNGWXZNa0ZQVEZOSlpqZ3pWVzlyUlhwYVZncDBVSEYzYjNFeVJsRnZTMlFyUzBGRlJuSnJVWEpwTjA5TU0wbG1SazFCVVVaa1ZFRmhhemd5TTJaeVExVktWemx3ZVZCd2FsWjFXazlTTTIxd1YyaFdDbVI0VUhSSGNsbDNMM0ZYYTJ4SFJXeFVjRWcxUmtrdlRrcEtSR05PUlhodVMyVmxNbkZGZUV3dmNYcEVSM3B3UTNGaGFua3hWMGRuUVZCalJtdEdiM0lLVkdkRVF6VlJOM3A1UW1jNFRIUk5PR0ZHYzJocGNtWXJla1YzVkRreFlTdFZRaXRXUzBNeGNUWnJZV2hJUjFSS1dWVjVUMVpDU1Zsak1IQlBVSGhHYkFwTU5HcG9XbkpaVEdsclNrMUZiMjU0YVhsRGVtSmlNRGs0VHpFclNDdEtNVUl3UVhrellsWkhVRlZoYUdWRlRqVk5lbXREWjJkRlFrRlBhWGx4Vlc4eENtTktSMXBVS3pKTVNYZ3piVkVyVkVKemFUSm9NV3hhYUhWcldtRXpTbXRuVTFSaVprcEpLMmxLUkVkVFRYUmtPVXQyVlc1VkswcEpSRXRrUmtSV1JUa0tjbXBNZVZRNVJuTjRRVk42ZW5Od1JFaFVlREl6WVU5UWJsazBaWGxZY1RCTk1tVnFhRnAyUmk4MlUwZHlOUzhyYWtWdGRsaEJiRzVCVEd4cVRYQkRSUXBUWVZKdGIxaHVTMEZ0VW01bFdHeE9VbU0zWVhKS1YxSmFRa2RTZFhwTFVUaFllV3Q0TlhRNGJ5OVFSamgzUlZKMGFXa3lTQzg1UlM5WVNHdEhUQ3RTQ2tSdVdrVlhNV2h5YVhkaVRHWldkRVpSVVU1SWJqZDFja1FyYlcxWFVtbExWRmhGTHpOTlZESkNZMlJXVFRWSUsxTlNMemhHTkhkVFpFMXJSVlZzWW1vS2NsWllhR1ZXZG1GWFZGQk5Ra0ZFY1VsUmQzVlVWVU54ZDA1U1NtMVRPRXd3UVZaVVVXRktXak5sUjFCcGRFNHJiM0F6U3k5SmFWVnJNMUZPZUROdlpRcFZNVE01WmxoQ09XbE5PWFI1TTAxRFoyZEZRa0ZQU2pseVEybGFMMFZtYjBaUFYweHJTRUZrUVVNNFZubGtWRGxhZEVwTWExUXpkVVJzVEZsWmJrdERDazV3UzNoaFEzTjFhR2RNTVd0a1NqZ3plRmN4YWpCa1kwMUtXamd4VlUwNVYyOUNZamRNUTJoQ1NGaGxXRTlPY21oclVrdzJjazFhTjBKRk9HNHJaVGtLVVRsSmRYcGpOWEUwUVdkc2NFZElZVVYwVmtOWlRqSlFkSFZDVG1abFkxbG9ZWGxRVW1RMWRsQnljalIxVEZGa2FuUjJXblkxVWl0Q2J6ZFpPVEZOZEFwNWMxUjBSa3RvUmxvd2RXdGtXVUZXTVdORU5IY3pUM1Z3TVZOQmRDdG9NMEl4Ym5wSVoycEhiREo1ZVNzclFWSXdWMjlDWVZCRVJtZzBVekZLVWpaMENsTlRSR012Ukd0M1VqRnBjVmxyYlZGcmMxTllZVzVGVW5WSlpIWnFSRU5VVDFadWJ6Uk1kUzlVUzBOT2RGUnhURUozZG5BelptUk5PR2RGSzNCR1lrOEtSa3Q2UkRCd1pXUlRObXBpV2psbFdsUmtlSGRHV1VnNFprbDVhMlZVVlhreWRIQTNhRlZFWW10dU1FTm5aMFZDUVV4V1EyaHNRa0k1Umk5WFJuTmhid3AyVlVGTVNVSlFVRTVsV1VwNlNsbGFTbWx4Y20xSE4waEZXVVJ0Y2s5dlpHMVRabk5DUTB0TVdUWjZNMW8wV1RaTVdFVlhablJ6Y25WelowRnlSMEZ5Q2pSNmVuVmxNRkpsY0hsV01GTm5NbWRVTVVKdVpIWmFXRGRFY25ONU5sTmtlRkppTlhVeFFtSjZhR1ZRZFRWbGRsaHZZMHRYUm1KMVZsSmlNSGQzTDBZS1dtSjFhMUp4UmxGeFprbEtjMHR2U2xOUVVFOXRkekF5YW05V1ZFeG9SbGROTjJkMVJqWTFZbWcxVDBzeWNqTlViR05FYW5SemRIbFRUblZxVURaNFR3cHBWWFJRY3pKUmJsQkJPRGNyV21OTlJubGpaalYzVFZGeFIxcDRTWEF4YVV4MmIyOTVhbFZOTmxKVk4wZ3pVME0wSzIwMWNtaElNRFZCTm1wNFp6ZG5DblJPT1N0TGFHY3dVMGN6WlcweGNXMUtMMHN4UzJSbVZqQmpTakJMWjI4elEyNXJjbWRUTldneFNGbHdjV3BGVWpKNFNuQnpWbkpWZVVsbVRXSTJkbmtLWmxkR2JUSjZWVU5uWjBWQ1FVNUxTRXR3TTJFNVMzZExVMFpLVDJSdldXZEVkVzB4WjFKVFoweDRSazhyVFZSMVNWSmFWVEpNTTBReEswcHVWVmxNUXdwRVowNHlURWxRVWxkeVVXUlFaaXRCTjBKRlYwaEZORk5ITm5WM2VITlJkMEkyZEdka1JYUlplVlZrTW05alV6RldZamt6T0U1T1EwWkhZWE0xZEZReUNtUjVVbUpwVURoRU5pOXRZbG81Y2t4SFduaEtjVGQ0UTBOUk9ISmFha054YVRBM1ZXNTVkV1JTWjNWS2VtaEVSRlJIYXk5MVJGaDZTRTFaVERobFYzY0tSR0ZKT0ZSd1VYVjBOMEpYUXpabVpVMTBMMVJtYURsNFpFd3phSGhKY3pOMll6RnRWMk5aWjBOa05WWlVVM2g0UkRsVlJscEhiWFJtV1c4d2JqWmhkd292T0VOc1RVSk9aR1JYSzFSd00yUlBiRFEwWXpJNWVsUklURFE0VURWdFJUWnFPV0pQV21kbVdIWTVkRGxqZEVsR1RFOW1aa3hpVEdaSFFsTTNZeTlOQ21kSFYwY3ZTRVpzY1ZGQk9Hd3lSbkZTYjA5ak5YRllVMnBoWjBWd1VYQjVhbmxyUTJkblJVRldObTVvWm1aRGMwcE5URmxpYUdKbGJIVk9NMGg1TjJrS1lYRjNkR0p3TVZwd1ExZzRXV3R2TUVrM1ZXRllVRnBaU1Vkc1YxRllWRFZsUWpCdWJIQlRXVlZMVVcxak9HOXBVa1ZvTjNoRWIwNXlSRnBCY21WeGVBcFJaREJVVG1sdVdrZFZaR3Q1VXpKTFdUUk1ibGsyWTB0aldYUmFUamh0UkM5cFFtTTJOV2RtU0dvNU1saFJUV0pSZUZrMWRUQXdWMFZKT1VOUmVGUXpDalpoVlVwS2REQm1abGRqU2pscloydHlVR0pFVTJ4UUwxaDJVbEJ0Y1dKRVVEZEVObkV6TUhBdk5VdzNZbXBrWnpKT1ZIWTRTMWxzTWpsV1p6SnRTSGtLT0VsemEybEVVRFZKUjJwUE5Fd3dOMmhVYTNSNE1sSTVTV0pQYUdNellrbDZiVW9yUTA0MFVGVlVhMFY2WW5kbVNVTjRTVkJOTlRsMVYyTlNTbk5ZYkFwalMwMXJWVXRWV1RGR1MzWTJSQ3QzTURsc1UwWlBRMHRWSzNRMmFsSmxWV0ptVnk5eGF6VkNiMVpvTW05RFNXeHhZMGxYYWtaRlJYQlBOMlZTVVQwOUNpMHRMUzB0UlU1RUlGSlRRU0JRVWtsV1FWUkZJRXRGV1MwdExTMHRDZz09"],"num_matches":2,"num_redundant_matches":0,"statuses":[],"comment":null,"mean_score":null,"matches":[{"provenance":[{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"2fc4236dfb6df4549e96cbf1b9e6bb15940f25da","committer_name":"GitHub","committer_email":"noreply@github.com","committer_timestamp":"1719145094 +0000","author_name":"pre-commit-ci-lite[bot]","author_email":"117423508+pre-commit-ci-lite[bot]@users.noreply.github.com","author_timestamp":"1719145094 +0000","message":"[pre-commit.ci lite] apply automatic fixes"},"blob_path":"private.pem"}}],"blob_metadata":{"id":"1bee033606a82c8b855f955063fc1b13a4201c1c","num_bytes":4333,"mime_essence":"application/x-x509-ca-cert","charset":null},"blob_id":"1bee033606a82c8b855f955063fc1b13a4201c1c","location":{"offset_span":{"start":0,"end":4333},"source_span":{"start":{"line":1,"column":1},"end":{"line":2,"column":0}}},"groups":["TFMwdExTMUNSVWRKVGlCU1UwRWdVRkpKVmtGVVJTQkxSVmt0TFMwdExRcE5TVWxLUzJkSlFrRkJTME5CWjBWQmVtUXZNVXhTTkVvMFIycHpjbVowU1V0U2NqVldOV1VyZHpFMU1Tc3ZkRVZTU2xvMVRYUllZMlpLTlhVd2NTdERDbm80VHpCcE9XUkVPV2d3Y21VeFdURnJVMmhLWVVVd1NHeEJUamsyTURFNWFWaGpXbVp5YURNemFFbGxNMGhoU2xoRFVEZGxZVmhUU0RRelN6Uk9MMWNLWW5kclJUWkVjRVp0Y1U5T2VGQnhWVWxwYW1OUmNsSTFXRzFWYUhoaFVtNHZVbGcxUTI1NFRXeEtTV1prY21aeVpuUnhWMjFIWVZGNk1YWldhVWhJWndwWU5sTjVkSEZQTjBrd2RGcG1ZemRLVGtWclZVUXdObVJTTTJkTmJrWmpiRFUxVTNWUWMycGxWakkyTUZwQlVYb3lkR1JDTWxkd2F5OWlUR1EyYldkUENsaERRbXhRVWxKb2EyMHJhRk5aWlRSVVNYbGhZVWQwUlVSVlZUQnBkRkV3VlZnNVZrVnhaM1kyYWxaeVlsaElla2hVTVZSU2QxRjZPRlZCYmtoV1UxY0tlV3BaWTJSeVpqUnNhVkJIZFVSSVdsUTVaa1JvVlVwaEwyaHNUbEZuWkZWeVExcFpVMmxGU1ZONFVVOVVWVlZwUjBocmVHSjRVSE15VVVodVZ6ZFNWUXBQVEdSVlZVNHdVVnBwVG5sb2FFSnpWMHN5Vm1sdk1uSmhWRVkwZG5GdFRXd3pNRTlIVEhocVRXbzVaMFV2VjA5U1ZXMHlUSGRVUVVGR1pXWllUSFIyQ2xjNU1rNVdNRzFhTUdWNGRVOW9ibUV2WW1kWVNXMUNPR1JZZG5RNWVtcDRUV0U0YUVWV1VYZHFaemxIVW14NVUzRlNWR3BQV2xGdGVraElUbU5hUzNnS1dteDNjbGhwV1ZGS1NFWkRSbEpOZFd0a1dqUXZiVTltY2pCSVRVWXlMMlF2UlU5VldYaHdUVXRoYzAxRWN6aHBjWGwyWjNsYUsxaHNORlppZWtKV2NBcEtlbWxrT0dONU5rMUNOMDlGZVdkNmRrdDZZMVZUWkhOUFQzQjVlREppYW14QlkzWXplRE5pTXpSWU1IUnJkMkpEY1hsd1UxcFNZVlZYT0RkeFlXZE9DbmhwU2l0c2RqY3pWREJCWkRKUVpGZFFVa2RDYXk5clRWWXpaSFI0V1dnelFrc3daVk5CVkM5UlV6ZFZWalJpWjNOcFRYSnZja2xGTjFOalEwRjNSVUVLUVZGTFEwRm5SVUY0ZDI5TldIZGtXVzk1WVhWR1NFMUtlUzkyZUV4NlpHUlVhMDVKVEVWek9FMUdaVTloTlVOS056Z3lORGxYWVUxUlRsSkVMMHROZEFwaFNXWkNTSGxuTURrNVkwMVRXazlZYWxGc2JrNTRVV1pHT0ZWWE1rRllLMnhoZGs1UVNGazFlazlTV1RNM05YRnFUMnR2VTFGeUwzQlVRVU5HTkdOdENpOW9LemhGT0VoNllqWmplbVJEV2tvMVVFa3ZiVmhoWlhSMlowVlpOV1pUTjJoNmNqUkljRzFoY1d4dmVWbFVNelJvU1RKUGQyeHNNVkJLUm1WblEwa0tSRGxLYzJabk5YSlNiWEZWZHpCMlRrVlhha2RSUjNsa1JDc3dWMGhWZEZWamVFWnlkeTl4ZDBzMVlreGhhV3BNTlU0clIxZ3pZMnN3ZG5KaVFtaGtjZ3BWWjJ0ek1WRnpja3R2T0ZKU1dIcFlUMVZGT0ZWRVFtcFBTblY2YjNselNqQkNVeTltVUhWaVFWSjZiVWsxVjNwd2FqTlJNSHBIVWpZMFVYWk9jbEZqQ2pKUlRsUjJOR054VVRKQmJDOTJUWFJTT1dOell6TmlhemRPTjFKdldGZHNTMmwzZG5aM1kzWlJhVU5KV0dRMVZTdDRSRU5PYUVnMFIxQkdUa1ozVmtRS1RIbFNkVmxpU1ZneFVrNUxlRlJsUkROMVRIcDNkVkZ1UzNVdlprSmtjR281ZUdrMU1FSTNhMVl3ZWpGNGNGWXZNa0ZQVEZOSlpqZ3pWVzlyUlhwYVZncDBVSEYzYjNFeVJsRnZTMlFyUzBGRlJuSnJVWEpwTjA5TU0wbG1SazFCVVVaa1ZFRmhhemd5TTJaeVExVktWemx3ZVZCd2FsWjFXazlTTTIxd1YyaFdDbVI0VUhSSGNsbDNMM0ZYYTJ4SFJXeFVjRWcxUmtrdlRrcEtSR05PUlhodVMyVmxNbkZGZUV3dmNYcEVSM3B3UTNGaGFua3hWMGRuUVZCalJtdEdiM0lLVkdkRVF6VlJOM3A1UW1jNFRIUk5PR0ZHYzJocGNtWXJla1YzVkRreFlTdFZRaXRXUzBNeGNUWnJZV2hJUjFSS1dWVjVUMVpDU1Zsak1IQlBVSGhHYkFwTU5HcG9XbkpaVEdsclNrMUZiMjU0YVhsRGVtSmlNRGs0VHpFclNDdEtNVUl3UVhrellsWkhVRlZoYUdWRlRqVk5lbXREWjJkRlFrRlBhWGx4Vlc4eENtTktSMXBVS3pKTVNYZ3piVkVyVkVKemFUSm9NV3hhYUhWcldtRXpTbXRuVTFSaVprcEpLMmxLUkVkVFRYUmtPVXQyVlc1VkswcEpSRXRrUmtSV1JUa0tjbXBNZVZRNVJuTjRRVk42ZW5Od1JFaFVlREl6WVU5UWJsazBaWGxZY1RCTk1tVnFhRnAyUmk4MlUwZHlOUzhyYWtWdGRsaEJiRzVCVEd4cVRYQkRSUXBUWVZKdGIxaHVTMEZ0VW01bFdHeE9VbU0zWVhKS1YxSmFRa2RTZFhwTFVUaFllV3Q0TlhRNGJ5OVFSamgzUlZKMGFXa3lTQzg1UlM5WVNHdEhUQ3RTQ2tSdVdrVlhNV2h5YVhkaVRHWldkRVpSVVU1SWJqZDFja1FyYlcxWFVtbExWRmhGTHpOTlZESkNZMlJXVFRWSUsxTlNMemhHTkhkVFpFMXJSVlZzWW1vS2NsWllhR1ZXZG1GWFZGQk5Ra0ZFY1VsUmQzVlVWVU54ZDA1U1NtMVRPRXd3UVZaVVVXRktXak5sUjFCcGRFNHJiM0F6U3k5SmFWVnJNMUZPZUROdlpRcFZNVE01WmxoQ09XbE5PWFI1TTAxRFoyZEZRa0ZQU2pseVEybGFMMFZtYjBaUFYweHJTRUZrUVVNNFZubGtWRGxhZEVwTWExUXpkVVJzVEZsWmJrdERDazV3UzNoaFEzTjFhR2RNTVd0a1NqZ3plRmN4YWpCa1kwMUtXamd4VlUwNVYyOUNZamRNUTJoQ1NGaGxXRTlPY21oclVrdzJjazFhTjBKRk9HNHJaVGtLVVRsSmRYcGpOWEUwUVdkc2NFZElZVVYwVmtOWlRqSlFkSFZDVG1abFkxbG9ZWGxRVW1RMWRsQnljalIxVEZGa2FuUjJXblkxVWl0Q2J6ZFpPVEZOZEFwNWMxUjBSa3RvUmxvd2RXdGtXVUZXTVdORU5IY3pUM1Z3TVZOQmRDdG9NMEl4Ym5wSVoycEhiREo1ZVNzclFWSXdWMjlDWVZCRVJtZzBVekZLVWpaMENsTlRSR012Ukd0M1VqRnBjVmxyYlZGcmMxTllZVzVGVW5WSlpIWnFSRU5VVDFadWJ6Uk1kUzlVUzBOT2RGUnhURUozZG5BelptUk5PR2RGSzNCR1lrOEtSa3Q2UkRCd1pXUlRObXBpV2psbFdsUmtlSGRHV1VnNFprbDVhMlZVVlhreWRIQTNhRlZFWW10dU1FTm5aMFZDUVV4V1EyaHNRa0k1Umk5WFJuTmhid3AyVlVGTVNVSlFVRTVsV1VwNlNsbGFTbWx4Y20xSE4waEZXVVJ0Y2s5dlpHMVRabk5DUTB0TVdUWjZNMW8wV1RaTVdFVlhablJ6Y25WelowRnlSMEZ5Q2pSNmVuVmxNRkpsY0hsV01GTm5NbWRVTVVKdVpIWmFXRGRFY25ONU5sTmtlRkppTlhVeFFtSjZhR1ZRZFRWbGRsaHZZMHRYUm1KMVZsSmlNSGQzTDBZS1dtSjFhMUp4UmxGeFprbEtjMHR2U2xOUVVFOXRkekF5YW05V1ZFeG9SbGROTjJkMVJqWTFZbWcxVDBzeWNqTlViR05FYW5SemRIbFRUblZxVURaNFR3cHBWWFJRY3pKUmJsQkJPRGNyV21OTlJubGpaalYzVFZGeFIxcDRTWEF4YVV4MmIyOTVhbFZOTmxKVk4wZ3pVME0wSzIwMWNtaElNRFZCTm1wNFp6ZG5DblJPT1N0TGFHY3dVMGN6WlcweGNXMUtMMHN4UzJSbVZqQmpTakJMWjI4elEyNXJjbWRUTldneFNGbHdjV3BGVWpKNFNuQnpWbkpWZVVsbVRXSTJkbmtLWmxkR2JUSjZWVU5uWjBWQ1FVNUxTRXR3TTJFNVMzZExVMFpLVDJSdldXZEVkVzB4WjFKVFoweDRSazhyVFZSMVNWSmFWVEpNTTBReEswcHVWVmxNUXdwRVowNHlURWxRVWxkeVVXUlFaaXRCTjBKRlYwaEZORk5ITm5WM2VITlJkMEkyZEdka1JYUlplVlZrTW05alV6RldZamt6T0U1T1EwWkhZWE0xZEZReUNtUjVVbUpwVURoRU5pOXRZbG81Y2t4SFduaEtjVGQ0UTBOUk9ISmFha054YVRBM1ZXNTVkV1JTWjNWS2VtaEVSRlJIYXk5MVJGaDZTRTFaVERobFYzY0tSR0ZKT0ZSd1VYVjBOMEpYUXpabVpVMTBMMVJtYURsNFpFd3phSGhKY3pOMll6RnRWMk5aWjBOa05WWlVVM2g0UkRsVlJscEhiWFJtV1c4d2JqWmhkd292T0VOc1RVSk9aR1JYSzFSd00yUlBiRFEwWXpJNWVsUklURFE0VURWdFJUWnFPV0pQV21kbVdIWTVkRGxqZEVsR1RFOW1aa3hpVEdaSFFsTTNZeTlOQ21kSFYwY3ZTRVpzY1ZGQk9Hd3lSbkZTYjA5ak5YRllVMnBoWjBWd1VYQjVhbmxyUTJkblJVRldObTVvWm1aRGMwcE5URmxpYUdKbGJIVk9NMGg1TjJrS1lYRjNkR0p3TVZwd1ExZzRXV3R2TUVrM1ZXRllVRnBaU1Vkc1YxRllWRFZsUWpCdWJIQlRXVlZMVVcxak9HOXBVa1ZvTjNoRWIwNXlSRnBCY21WeGVBcFJaREJVVG1sdVdrZFZaR3Q1VXpKTFdUUk1ibGsyWTB0aldYUmFUamh0UkM5cFFtTTJOV2RtU0dvNU1saFJUV0pSZUZrMWRUQXdWMFZKT1VOUmVGUXpDalpoVlVwS2REQm1abGRqU2pscloydHlVR0pFVTJ4UUwxaDJVbEJ0Y1dKRVVEZEVObkV6TUhBdk5VdzNZbXBrWnpKT1ZIWTRTMWxzTWpsV1p6SnRTSGtLT0VsemEybEVVRFZKUjJwUE5Fd3dOMmhVYTNSNE1sSTVTV0pQYUdNellrbDZiVW9yUTA0MFVGVlVhMFY2WW5kbVNVTjRTVkJOTlRsMVYyTlNTbk5ZYkFwalMwMXJWVXRWV1RGR1MzWTJSQ3QzTURsc1UwWlBRMHRWSzNRMmFsSmxWV0ptVnk5eGF6VkNiMVpvTW05RFNXeHhZMGxYYWtaRlJYQlBOMlZTVVQwOUNpMHRMUzB0UlU1RUlGSlRRU0JRVWtsV1FWUkZJRXRGV1MwdExTMHRDZz09"],"snippet":{"before":"","matching":"LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlKS2dJQkFBS0NBZ0VBemQvMUxSNEo0R2pzcmZ0SUtScjVWNWUrdzE1MSsvdEVSSlo1TXRYY2ZKNXUwcStDCno4TzBpOWREOWgwcmUxWTFrU2hKYUUwSGxBTjk2MDE5aVhjWmZyaDMzaEllM0hhSlhDUDdlYVhTSDQzSzROL1cKYndrRTZEcEZtcU9OeFBxVUlpamNRclI1WG1VaHhhUm4vUlg1Q254TWxKSWZkcmZyZnRxV21HYVF6MXZWaUhIZwpYNlN5dHFPN0kwdFpmYzdKTkVrVUQwNmRSM2dNbkZjbDU1U3VQc2plVjI2MFpBUXoydGRCMldway9iTGQ2bWdPClhDQmxQUlJoa20raFNZZTRUSXlhYUd0RURVVTBpdFEwVVg5VkVxZ3Y2alZyYlhIekhUMVRSd1F6OFVBbkhWU1cKeWpZY2RyZjRsaVBHdURIWlQ5ZkRoVUphL2hsTlFnZFVyQ1pZU2lFSVN4UU9UVVVpR0hreGJ4UHMyUUhuVzdSVQpPTGRVVU4wUVppTnloaEJzV0syVmlvMnJhVEY0dnFtTWwzME9HTHhqTWo5Z0UvV09SVW0yTHdUQUFGZWZYTHR2Clc5Mk5WMG1aMGV4dU9obmEvYmdYSW1COGRYdnQ5emp4TWE4aEVWUXdqZzlHUmx5U3FSVGpPWlFtekhITmNaS3gKWmx3clhpWVFKSEZDRlJNdWtkWjQvbU9mcjBITUYyL2QvRU9VWXhwTUthc01EczhpcXl2Z3laK1hsNFZiekJWcApKemlkOGN5Nk1CN09FeWd6dkt6Y1VTZHNPT3B5eDJiamxBY3YzeDNiMzRYMHRrd2JDcXlwU1pSYVVXODdxYWdOCnhpSitsdjczVDBBZDJQZFdQUkdCay9rTVYzZHR4WWgzQkswZVNBVC9RUzdVVjRiZ3NpTXJvcklFN1NjQ0F3RUEKQVFLQ0FnRUF4d29NWHdkWW95YXVGSE1KeS92eEx6ZGRUa05JTEVzOE1GZU9hNUNKNzgyNDlXYU1RTlJEL0tNdAphSWZCSHlnMDk5Y01TWk9YalFsbk54UWZGOFVXMkFYK2xhdk5QSFk1ek9SWTM3NXFqT2tvU1FyL3BUQUNGNGNtCi9oKzhFOEh6YjZjemRDWko1UEkvbVhhZXR2Z0VZNWZTN2h6cjRIcG1hcWxveVlUMzRoSTJPd2xsMVBKRmVnQ0kKRDlKc2ZnNXJSbXFVdzB2TkVXakdRR3lkRCswV0hVdFVjeEZydy9xd0s1YkxhaWpMNU4rR1gzY2swdnJiQmhkcgpVZ2tzMVFzcktvOFJSWHpYT1VFOFVEQmpPSnV6b3lzSjBCUy9mUHViQVJ6bUk1V3pwajNRMHpHUjY0UXZOclFjCjJRTlR2NGNxUTJBbC92TXRSOWNzYzNiazdON1JvWFdsS2l3dnZ3Y3ZRaUNJWGQ1VSt4RENOaEg0R1BGTkZ3VkQKTHlSdVliSVgxUk5LeFRlRDN1THp3dVFuS3UvZkJkcGo5eGk1MEI3a1YwejF4cFYvMkFPTFNJZjgzVW9rRXpaVgp0UHF3b3EyRlFvS2QrS0FFRnJrUXJpN09MM0lmRk1BUUZkVEFhazgyM2ZyQ1VKVzlweVBwalZ1Wk9SM21wV2hWCmR4UHRHcll3L3FXa2xHRWxUcEg1RkkvTkpKRGNORXhuS2VlMnFFeEwvcXpER3pwQ3FhankxV0dnQVBjRmtGb3IKVGdEQzVRN3p5Qmc4THRNOGFGc2hpcmYrekV3VDkxYStVQitWS0MxcTZrYWhIR1RKWVV5T1ZCSVljMHBPUHhGbApMNGpoWnJZTGlrSk1Fb254aXlDemJiMDk4TzErSCtKMUIwQXkzYlZHUFVhaGVFTjVNemtDZ2dFQkFPaXlxVW8xCmNKR1pUKzJMSXgzbVErVEJzaTJoMWxaaHVrWmEzSmtnU1RiZkpJK2lKREdTTXRkOUt2VW5VK0pJREtkRkRWRTkKcmpMeVQ5RnN4QVN6enNwREhUeDIzYU9Qblk0ZXlYcTBNMmVqaFp2Ri82U0dyNS8rakVtdlhBbG5BTGxqTXBDRQpTYVJtb1huS0FtUm5lWGxOUmM3YXJKV1JaQkdSdXpLUThYeWt4NXQ4by9QRjh3RVJ0aWkySC85RS9YSGtHTCtSCkRuWkVXMWhyaXdiTGZWdEZRUU5Ibjd1ckQrbW1XUmlLVFhFLzNNVDJCY2RWTTVIK1NSLzhGNHdTZE1rRVVsYmoKclZYaGVWdmFXVFBNQkFEcUlRd3VUVUNxd05SSm1TOEwwQVZUUWFKWjNlR1BpdE4rb3AzSy9JaVVrM1FOeDNvZQpVMTM5ZlhCOWlNOXR5M01DZ2dFQkFPSjlyQ2laL0Vmb0ZPV0xrSEFkQUM4VnlkVDladEpMa1QzdURsTFlZbktDCk5wS3hhQ3N1aGdMMWtkSjgzeFcxajBkY01KWjgxVU05V29CYjdMQ2hCSFhlWE9OcmhrUkw2ck1aN0JFOG4rZTkKUTlJdXpjNXE0QWdscEdIYUV0VkNZTjJQdHVCTmZlY1loYXlQUmQ1dlBycjR1TFFkanR2WnY1UitCbzdZOTFNdAp5c1R0RktoRlowdWtkWUFWMWNENHczT3VwMVNBdCtoM0IxbnpIZ2pHbDJ5eSsrQVIwV29CYVBERmg0UzFKUjZ0ClNTRGMvRGt3UjFpcVlrbVFrc1NYYW5FUnVJZHZqRENUT1ZubzRMdS9US0NOdFRxTEJ3dnAzZmRNOGdFK3BGYk8KRkt6RDBwZWRTNmpiWjllWlRkeHdGWUg4Zkl5a2VUVXkydHA3aFVEYmtuMENnZ0VCQUxWQ2hsQkI5Ri9XRnNhbwp2VUFMSUJQUE5lWUp6SllaSmlxcm1HN0hFWURtck9vZG1TZnNCQ0tMWTZ6M1o0WTZMWEVXZnRzcnVzZ0FyR0FyCjR6enVlMFJlcHlWMFNnMmdUMUJuZHZaWDdEcnN5NlNkeFJiNXUxQmJ6aGVQdTVldlhvY0tXRmJ1VlJiMHd3L0YKWmJ1a1JxRlFxZklKc0tvSlNQUE9tdzAyam9WVExoRldNN2d1RjY1Ymg1T0sycjNUbGNEanRzdHlTTnVqUDZ4TwppVXRQczJRblBBODcrWmNNRnljZjV3TVFxR1p4SXAxaUx2b295alVNNlJVN0gzU0M0K201cmhIMDVBNmp4ZzdnCnROOStLaGcwU0czZW0xcW1KL0sxS2RmVjBjSjBLZ28zQ25rcmdTNWgxSFlwcWpFUjJ4SnBzVnJVeUlmTWI2dnkKZldGbTJ6VUNnZ0VCQU5LSEtwM2E5S3dLU0ZKT2RvWWdEdW0xZ1JTZ0x4Rk8rTVR1SVJaVTJMM0QxK0puVVlMQwpEZ04yTElQUldyUWRQZitBN0JFV0hFNFNHNnV3eHNRd0I2dGdkRXRZeVVkMm9jUzFWYjkzOE5OQ0ZHYXM1dFQyCmR5UmJpUDhENi9tYlo5ckxHWnhKcTd4Q0NROHJaakNxaTA3VW55dWRSZ3VKemhERFRHay91RFh6SE1ZTDhlV3cKRGFJOFRwUXV0N0JXQzZmZU10L1RmaDl4ZEwzaHhJczN2YzFtV2NZZ0NkNVZUU3h4RDlVRlpHbXRmWW8wbjZhdwovOENsTUJOZGRXK1RwM2RPbDQ0YzI5elRITDQ4UDVtRTZqOWJPWmdmWHY5dDljdElGTE9mZkxiTGZHQlM3Yy9NCmdHV0cvSEZscVFBOGwyRnFSb09jNXFYU2phZ0VwUXB5anlrQ2dnRUFWNm5oZmZDc0pNTFliaGJlbHVOM0h5N2kKYXF3dGJwMVpwQ1g4WWtvMEk3VWFYUFpZSUdsV1FYVDVlQjBubHBTWVVLUW1jOG9pUkVoN3hEb05yRFpBcmVxeApRZDBUTmluWkdVZGt5UzJLWTRMblk2Y0tjWXRaTjhtRC9pQmM2NWdmSGo5MlhRTWJReFk1dTAwV0VJOUNReFQzCjZhVUpKdDBmZldjSjlrZ2tyUGJEU2xQL1h2UlBtcWJEUDdENnEzMHAvNUw3YmpkZzJOVHY4S1lsMjlWZzJtSHkKOElza2lEUDVJR2pPNEwwN2hUa3R4MlI5SWJPaGMzYkl6bUorQ040UFVUa0V6YndmSUN4SVBNNTl1V2NSSnNYbApjS01rVUtVWTFGS3Y2RCt3MDlsU0ZPQ0tVK3Q2alJlVWJmVy9xazVCb1ZoMm9DSWxxY0lXakZFRXBPN2VSUT09Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==\n","after":""},"structural_id":"f86197cd3407d2a2a83d2b0df6e30148805d1f08","rule_structural_id":"35bc9170393aecc3777a72fe77e269fc150c91ce","rule_text_id":"np.pem.2","rule_name":"Base64-PEM-Encoded Private Key","score":null,"comment":null,"status":null,"redundant_to":[]},{"provenance":[{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"a5600f242eceb1073c316b8aef79623663797c3c","committer_name":"Shubham-Patel07","committer_email":"shubham.patel.btech2020@sitpune.edu.in","committer_timestamp":"1719045275 +0530","author_name":"Shubham-Patel07","author_email":"shubham.patel.btech2020@sitpune.edu.in","author_timestamp":"1719045275 +0530","message":"Fix: Deleted unnecessary files and fixed issue\n"},"blob_path":"private.pem"}}],"blob_metadata":{"id":"2859736cb09feae89bd924156c045e22719d0c8e","num_bytes":4332,"mime_essence":"application/x-x509-ca-cert","charset":null},"blob_id":"2859736cb09feae89bd924156c045e22719d0c8e","location":{"offset_span":{"start":0,"end":4332},"source_span":{"start":{"line":1,"column":1},"end":{"line":1,"column":4332}}},"groups":["TFMwdExTMUNSVWRKVGlCU1UwRWdVRkpKVmtGVVJTQkxSVmt0TFMwdExRcE5TVWxLUzJkSlFrRkJTME5CWjBWQmVtUXZNVXhTTkVvMFIycHpjbVowU1V0U2NqVldOV1VyZHpFMU1Tc3ZkRVZTU2xvMVRYUllZMlpLTlhVd2NTdERDbm80VHpCcE9XUkVPV2d3Y21VeFdURnJVMmhLWVVVd1NHeEJUamsyTURFNWFWaGpXbVp5YURNemFFbGxNMGhoU2xoRFVEZGxZVmhUU0RRelN6Uk9MMWNLWW5kclJUWkVjRVp0Y1U5T2VGQnhWVWxwYW1OUmNsSTFXRzFWYUhoaFVtNHZVbGcxUTI1NFRXeEtTV1prY21aeVpuUnhWMjFIWVZGNk1YWldhVWhJWndwWU5sTjVkSEZQTjBrd2RGcG1ZemRLVGtWclZVUXdObVJTTTJkTmJrWmpiRFUxVTNWUWMycGxWakkyTUZwQlVYb3lkR1JDTWxkd2F5OWlUR1EyYldkUENsaERRbXhRVWxKb2EyMHJhRk5aWlRSVVNYbGhZVWQwUlVSVlZUQnBkRkV3VlZnNVZrVnhaM1kyYWxaeVlsaElla2hVTVZSU2QxRjZPRlZCYmtoV1UxY0tlV3BaWTJSeVpqUnNhVkJIZFVSSVdsUTVaa1JvVlVwaEwyaHNUbEZuWkZWeVExcFpVMmxGU1ZONFVVOVVWVlZwUjBocmVHSjRVSE15VVVodVZ6ZFNWUXBQVEdSVlZVNHdVVnBwVG5sb2FFSnpWMHN5Vm1sdk1uSmhWRVkwZG5GdFRXd3pNRTlIVEhocVRXbzVaMFV2VjA5U1ZXMHlUSGRVUVVGR1pXWllUSFIyQ2xjNU1rNVdNRzFhTUdWNGRVOW9ibUV2WW1kWVNXMUNPR1JZZG5RNWVtcDRUV0U0YUVWV1VYZHFaemxIVW14NVUzRlNWR3BQV2xGdGVraElUbU5hUzNnS1dteDNjbGhwV1ZGS1NFWkRSbEpOZFd0a1dqUXZiVTltY2pCSVRVWXlMMlF2UlU5VldYaHdUVXRoYzAxRWN6aHBjWGwyWjNsYUsxaHNORlppZWtKV2NBcEtlbWxrT0dONU5rMUNOMDlGZVdkNmRrdDZZMVZUWkhOUFQzQjVlREppYW14QlkzWXplRE5pTXpSWU1IUnJkMkpEY1hsd1UxcFNZVlZYT0RkeFlXZE9DbmhwU2l0c2RqY3pWREJCWkRKUVpGZFFVa2RDYXk5clRWWXpaSFI0V1dnelFrc3daVk5CVkM5UlV6ZFZWalJpWjNOcFRYSnZja2xGTjFOalEwRjNSVUVLUVZGTFEwRm5SVUY0ZDI5TldIZGtXVzk1WVhWR1NFMUtlUzkyZUV4NlpHUlVhMDVKVEVWek9FMUdaVTloTlVOS056Z3lORGxYWVUxUlRsSkVMMHROZEFwaFNXWkNTSGxuTURrNVkwMVRXazlZYWxGc2JrNTRVV1pHT0ZWWE1rRllLMnhoZGs1UVNGazFlazlTV1RNM05YRnFUMnR2VTFGeUwzQlVRVU5HTkdOdENpOW9LemhGT0VoNllqWmplbVJEV2tvMVVFa3ZiVmhoWlhSMlowVlpOV1pUTjJoNmNqUkljRzFoY1d4dmVWbFVNelJvU1RKUGQyeHNNVkJLUm1WblEwa0tSRGxLYzJabk5YSlNiWEZWZHpCMlRrVlhha2RSUjNsa1JDc3dWMGhWZEZWamVFWnlkeTl4ZDBzMVlreGhhV3BNTlU0clIxZ3pZMnN3ZG5KaVFtaGtjZ3BWWjJ0ek1WRnpja3R2T0ZKU1dIcFlUMVZGT0ZWRVFtcFBTblY2YjNselNqQkNVeTltVUhWaVFWSjZiVWsxVjNwd2FqTlJNSHBIVWpZMFVYWk9jbEZqQ2pKUlRsUjJOR054VVRKQmJDOTJUWFJTT1dOell6TmlhemRPTjFKdldGZHNTMmwzZG5aM1kzWlJhVU5KV0dRMVZTdDRSRU5PYUVnMFIxQkdUa1ozVmtRS1RIbFNkVmxpU1ZneFVrNUxlRlJsUkROMVRIcDNkVkZ1UzNVdlprSmtjR281ZUdrMU1FSTNhMVl3ZWpGNGNGWXZNa0ZQVEZOSlpqZ3pWVzlyUlhwYVZncDBVSEYzYjNFeVJsRnZTMlFyUzBGRlJuSnJVWEpwTjA5TU0wbG1SazFCVVVaa1ZFRmhhemd5TTJaeVExVktWemx3ZVZCd2FsWjFXazlTTTIxd1YyaFdDbVI0VUhSSGNsbDNMM0ZYYTJ4SFJXeFVjRWcxUmtrdlRrcEtSR05PUlhodVMyVmxNbkZGZUV3dmNYcEVSM3B3UTNGaGFua3hWMGRuUVZCalJtdEdiM0lLVkdkRVF6VlJOM3A1UW1jNFRIUk5PR0ZHYzJocGNtWXJla1YzVkRreFlTdFZRaXRXUzBNeGNUWnJZV2hJUjFSS1dWVjVUMVpDU1Zsak1IQlBVSGhHYkFwTU5HcG9XbkpaVEdsclNrMUZiMjU0YVhsRGVtSmlNRGs0VHpFclNDdEtNVUl3UVhrellsWkhVRlZoYUdWRlRqVk5lbXREWjJkRlFrRlBhWGx4Vlc4eENtTktSMXBVS3pKTVNYZ3piVkVyVkVKemFUSm9NV3hhYUhWcldtRXpTbXRuVTFSaVprcEpLMmxLUkVkVFRYUmtPVXQyVlc1VkswcEpSRXRrUmtSV1JUa0tjbXBNZVZRNVJuTjRRVk42ZW5Od1JFaFVlREl6WVU5UWJsazBaWGxZY1RCTk1tVnFhRnAyUmk4MlUwZHlOUzhyYWtWdGRsaEJiRzVCVEd4cVRYQkRSUXBUWVZKdGIxaHVTMEZ0VW01bFdHeE9VbU0zWVhKS1YxSmFRa2RTZFhwTFVUaFllV3Q0TlhRNGJ5OVFSamgzUlZKMGFXa3lTQzg1UlM5WVNHdEhUQ3RTQ2tSdVdrVlhNV2h5YVhkaVRHWldkRVpSVVU1SWJqZDFja1FyYlcxWFVtbExWRmhGTHpOTlZESkNZMlJXVFRWSUsxTlNMemhHTkhkVFpFMXJSVlZzWW1vS2NsWllhR1ZXZG1GWFZGQk5Ra0ZFY1VsUmQzVlVWVU54ZDA1U1NtMVRPRXd3UVZaVVVXRktXak5sUjFCcGRFNHJiM0F6U3k5SmFWVnJNMUZPZUROdlpRcFZNVE01WmxoQ09XbE5PWFI1TTAxRFoyZEZRa0ZQU2pseVEybGFMMFZtYjBaUFYweHJTRUZrUVVNNFZubGtWRGxhZEVwTWExUXpkVVJzVEZsWmJrdERDazV3UzNoaFEzTjFhR2RNTVd0a1NqZ3plRmN4YWpCa1kwMUtXamd4VlUwNVYyOUNZamRNUTJoQ1NGaGxXRTlPY21oclVrdzJjazFhTjBKRk9HNHJaVGtLVVRsSmRYcGpOWEUwUVdkc2NFZElZVVYwVmtOWlRqSlFkSFZDVG1abFkxbG9ZWGxRVW1RMWRsQnljalIxVEZGa2FuUjJXblkxVWl0Q2J6ZFpPVEZOZEFwNWMxUjBSa3RvUmxvd2RXdGtXVUZXTVdORU5IY3pUM1Z3TVZOQmRDdG9NMEl4Ym5wSVoycEhiREo1ZVNzclFWSXdWMjlDWVZCRVJtZzBVekZLVWpaMENsTlRSR012Ukd0M1VqRnBjVmxyYlZGcmMxTllZVzVGVW5WSlpIWnFSRU5VVDFadWJ6Uk1kUzlVUzBOT2RGUnhURUozZG5BelptUk5PR2RGSzNCR1lrOEtSa3Q2UkRCd1pXUlRObXBpV2psbFdsUmtlSGRHV1VnNFprbDVhMlZVVlhreWRIQTNhRlZFWW10dU1FTm5aMFZDUVV4V1EyaHNRa0k1Umk5WFJuTmhid3AyVlVGTVNVSlFVRTVsV1VwNlNsbGFTbWx4Y20xSE4waEZXVVJ0Y2s5dlpHMVRabk5DUTB0TVdUWjZNMW8wV1RaTVdFVlhablJ6Y25WelowRnlSMEZ5Q2pSNmVuVmxNRkpsY0hsV01GTm5NbWRVTVVKdVpIWmFXRGRFY25ONU5sTmtlRkppTlhVeFFtSjZhR1ZRZFRWbGRsaHZZMHRYUm1KMVZsSmlNSGQzTDBZS1dtSjFhMUp4UmxGeFprbEtjMHR2U2xOUVVFOXRkekF5YW05V1ZFeG9SbGROTjJkMVJqWTFZbWcxVDBzeWNqTlViR05FYW5SemRIbFRUblZxVURaNFR3cHBWWFJRY3pKUmJsQkJPRGNyV21OTlJubGpaalYzVFZGeFIxcDRTWEF4YVV4MmIyOTVhbFZOTmxKVk4wZ3pVME0wSzIwMWNtaElNRFZCTm1wNFp6ZG5DblJPT1N0TGFHY3dVMGN6WlcweGNXMUtMMHN4UzJSbVZqQmpTakJMWjI4elEyNXJjbWRUTldneFNGbHdjV3BGVWpKNFNuQnpWbkpWZVVsbVRXSTJkbmtLWmxkR2JUSjZWVU5uWjBWQ1FVNUxTRXR3TTJFNVMzZExVMFpLVDJSdldXZEVkVzB4WjFKVFoweDRSazhyVFZSMVNWSmFWVEpNTTBReEswcHVWVmxNUXdwRVowNHlURWxRVWxkeVVXUlFaaXRCTjBKRlYwaEZORk5ITm5WM2VITlJkMEkyZEdka1JYUlplVlZrTW05alV6RldZamt6T0U1T1EwWkhZWE0xZEZReUNtUjVVbUpwVURoRU5pOXRZbG81Y2t4SFduaEtjVGQ0UTBOUk9ISmFha054YVRBM1ZXNTVkV1JTWjNWS2VtaEVSRlJIYXk5MVJGaDZTRTFaVERobFYzY0tSR0ZKT0ZSd1VYVjBOMEpYUXpabVpVMTBMMVJtYURsNFpFd3phSGhKY3pOMll6RnRWMk5aWjBOa05WWlVVM2g0UkRsVlJscEhiWFJtV1c4d2JqWmhkd292T0VOc1RVSk9aR1JYSzFSd00yUlBiRFEwWXpJNWVsUklURFE0VURWdFJUWnFPV0pQV21kbVdIWTVkRGxqZEVsR1RFOW1aa3hpVEdaSFFsTTNZeTlOQ21kSFYwY3ZTRVpzY1ZGQk9Hd3lSbkZTYjA5ak5YRllVMnBoWjBWd1VYQjVhbmxyUTJkblJVRldObTVvWm1aRGMwcE5URmxpYUdKbGJIVk9NMGg1TjJrS1lYRjNkR0p3TVZwd1ExZzRXV3R2TUVrM1ZXRllVRnBaU1Vkc1YxRllWRFZsUWpCdWJIQlRXVlZMVVcxak9HOXBVa1ZvTjNoRWIwNXlSRnBCY21WeGVBcFJaREJVVG1sdVdrZFZaR3Q1VXpKTFdUUk1ibGsyWTB0aldYUmFUamh0UkM5cFFtTTJOV2RtU0dvNU1saFJUV0pSZUZrMWRUQXdWMFZKT1VOUmVGUXpDalpoVlVwS2REQm1abGRqU2pscloydHlVR0pFVTJ4UUwxaDJVbEJ0Y1dKRVVEZEVObkV6TUhBdk5VdzNZbXBrWnpKT1ZIWTRTMWxzTWpsV1p6SnRTSGtLT0VsemEybEVVRFZKUjJwUE5Fd3dOMmhVYTNSNE1sSTVTV0pQYUdNellrbDZiVW9yUTA0MFVGVlVhMFY2WW5kbVNVTjRTVkJOTlRsMVYyTlNTbk5ZYkFwalMwMXJWVXRWV1RGR1MzWTJSQ3QzTURsc1UwWlBRMHRWSzNRMmFsSmxWV0ptVnk5eGF6VkNiMVpvTW05RFNXeHhZMGxYYWtaRlJYQlBOMlZTVVQwOUNpMHRMUzB0UlU1RUlGSlRRU0JRVWtsV1FWUkZJRXRGV1MwdExTMHRDZz09"],"snippet":{"before":"","matching":"LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlKS2dJQkFBS0NBZ0VBemQvMUxSNEo0R2pzcmZ0SUtScjVWNWUrdzE1MSsvdEVSSlo1TXRYY2ZKNXUwcStDCno4TzBpOWREOWgwcmUxWTFrU2hKYUUwSGxBTjk2MDE5aVhjWmZyaDMzaEllM0hhSlhDUDdlYVhTSDQzSzROL1cKYndrRTZEcEZtcU9OeFBxVUlpamNRclI1WG1VaHhhUm4vUlg1Q254TWxKSWZkcmZyZnRxV21HYVF6MXZWaUhIZwpYNlN5dHFPN0kwdFpmYzdKTkVrVUQwNmRSM2dNbkZjbDU1U3VQc2plVjI2MFpBUXoydGRCMldway9iTGQ2bWdPClhDQmxQUlJoa20raFNZZTRUSXlhYUd0RURVVTBpdFEwVVg5VkVxZ3Y2alZyYlhIekhUMVRSd1F6OFVBbkhWU1cKeWpZY2RyZjRsaVBHdURIWlQ5ZkRoVUphL2hsTlFnZFVyQ1pZU2lFSVN4UU9UVVVpR0hreGJ4UHMyUUhuVzdSVQpPTGRVVU4wUVppTnloaEJzV0syVmlvMnJhVEY0dnFtTWwzME9HTHhqTWo5Z0UvV09SVW0yTHdUQUFGZWZYTHR2Clc5Mk5WMG1aMGV4dU9obmEvYmdYSW1COGRYdnQ5emp4TWE4aEVWUXdqZzlHUmx5U3FSVGpPWlFtekhITmNaS3gKWmx3clhpWVFKSEZDRlJNdWtkWjQvbU9mcjBITUYyL2QvRU9VWXhwTUthc01EczhpcXl2Z3laK1hsNFZiekJWcApKemlkOGN5Nk1CN09FeWd6dkt6Y1VTZHNPT3B5eDJiamxBY3YzeDNiMzRYMHRrd2JDcXlwU1pSYVVXODdxYWdOCnhpSitsdjczVDBBZDJQZFdQUkdCay9rTVYzZHR4WWgzQkswZVNBVC9RUzdVVjRiZ3NpTXJvcklFN1NjQ0F3RUEKQVFLQ0FnRUF4d29NWHdkWW95YXVGSE1KeS92eEx6ZGRUa05JTEVzOE1GZU9hNUNKNzgyNDlXYU1RTlJEL0tNdAphSWZCSHlnMDk5Y01TWk9YalFsbk54UWZGOFVXMkFYK2xhdk5QSFk1ek9SWTM3NXFqT2tvU1FyL3BUQUNGNGNtCi9oKzhFOEh6YjZjemRDWko1UEkvbVhhZXR2Z0VZNWZTN2h6cjRIcG1hcWxveVlUMzRoSTJPd2xsMVBKRmVnQ0kKRDlKc2ZnNXJSbXFVdzB2TkVXakdRR3lkRCswV0hVdFVjeEZydy9xd0s1YkxhaWpMNU4rR1gzY2swdnJiQmhkcgpVZ2tzMVFzcktvOFJSWHpYT1VFOFVEQmpPSnV6b3lzSjBCUy9mUHViQVJ6bUk1V3pwajNRMHpHUjY0UXZOclFjCjJRTlR2NGNxUTJBbC92TXRSOWNzYzNiazdON1JvWFdsS2l3dnZ3Y3ZRaUNJWGQ1VSt4RENOaEg0R1BGTkZ3VkQKTHlSdVliSVgxUk5LeFRlRDN1THp3dVFuS3UvZkJkcGo5eGk1MEI3a1YwejF4cFYvMkFPTFNJZjgzVW9rRXpaVgp0UHF3b3EyRlFvS2QrS0FFRnJrUXJpN09MM0lmRk1BUUZkVEFhazgyM2ZyQ1VKVzlweVBwalZ1Wk9SM21wV2hWCmR4UHRHcll3L3FXa2xHRWxUcEg1RkkvTkpKRGNORXhuS2VlMnFFeEwvcXpER3pwQ3FhankxV0dnQVBjRmtGb3IKVGdEQzVRN3p5Qmc4THRNOGFGc2hpcmYrekV3VDkxYStVQitWS0MxcTZrYWhIR1RKWVV5T1ZCSVljMHBPUHhGbApMNGpoWnJZTGlrSk1Fb254aXlDemJiMDk4TzErSCtKMUIwQXkzYlZHUFVhaGVFTjVNemtDZ2dFQkFPaXlxVW8xCmNKR1pUKzJMSXgzbVErVEJzaTJoMWxaaHVrWmEzSmtnU1RiZkpJK2lKREdTTXRkOUt2VW5VK0pJREtkRkRWRTkKcmpMeVQ5RnN4QVN6enNwREhUeDIzYU9Qblk0ZXlYcTBNMmVqaFp2Ri82U0dyNS8rakVtdlhBbG5BTGxqTXBDRQpTYVJtb1huS0FtUm5lWGxOUmM3YXJKV1JaQkdSdXpLUThYeWt4NXQ4by9QRjh3RVJ0aWkySC85RS9YSGtHTCtSCkRuWkVXMWhyaXdiTGZWdEZRUU5Ibjd1ckQrbW1XUmlLVFhFLzNNVDJCY2RWTTVIK1NSLzhGNHdTZE1rRVVsYmoKclZYaGVWdmFXVFBNQkFEcUlRd3VUVUNxd05SSm1TOEwwQVZUUWFKWjNlR1BpdE4rb3AzSy9JaVVrM1FOeDNvZQpVMTM5ZlhCOWlNOXR5M01DZ2dFQkFPSjlyQ2laL0Vmb0ZPV0xrSEFkQUM4VnlkVDladEpMa1QzdURsTFlZbktDCk5wS3hhQ3N1aGdMMWtkSjgzeFcxajBkY01KWjgxVU05V29CYjdMQ2hCSFhlWE9OcmhrUkw2ck1aN0JFOG4rZTkKUTlJdXpjNXE0QWdscEdIYUV0VkNZTjJQdHVCTmZlY1loYXlQUmQ1dlBycjR1TFFkanR2WnY1UitCbzdZOTFNdAp5c1R0RktoRlowdWtkWUFWMWNENHczT3VwMVNBdCtoM0IxbnpIZ2pHbDJ5eSsrQVIwV29CYVBERmg0UzFKUjZ0ClNTRGMvRGt3UjFpcVlrbVFrc1NYYW5FUnVJZHZqRENUT1ZubzRMdS9US0NOdFRxTEJ3dnAzZmRNOGdFK3BGYk8KRkt6RDBwZWRTNmpiWjllWlRkeHdGWUg4Zkl5a2VUVXkydHA3aFVEYmtuMENnZ0VCQUxWQ2hsQkI5Ri9XRnNhbwp2VUFMSUJQUE5lWUp6SllaSmlxcm1HN0hFWURtck9vZG1TZnNCQ0tMWTZ6M1o0WTZMWEVXZnRzcnVzZ0FyR0FyCjR6enVlMFJlcHlWMFNnMmdUMUJuZHZaWDdEcnN5NlNkeFJiNXUxQmJ6aGVQdTVldlhvY0tXRmJ1VlJiMHd3L0YKWmJ1a1JxRlFxZklKc0tvSlNQUE9tdzAyam9WVExoRldNN2d1RjY1Ymg1T0sycjNUbGNEanRzdHlTTnVqUDZ4TwppVXRQczJRblBBODcrWmNNRnljZjV3TVFxR1p4SXAxaUx2b295alVNNlJVN0gzU0M0K201cmhIMDVBNmp4ZzdnCnROOStLaGcwU0czZW0xcW1KL0sxS2RmVjBjSjBLZ28zQ25rcmdTNWgxSFlwcWpFUjJ4SnBzVnJVeUlmTWI2dnkKZldGbTJ6VUNnZ0VCQU5LSEtwM2E5S3dLU0ZKT2RvWWdEdW0xZ1JTZ0x4Rk8rTVR1SVJaVTJMM0QxK0puVVlMQwpEZ04yTElQUldyUWRQZitBN0JFV0hFNFNHNnV3eHNRd0I2dGdkRXRZeVVkMm9jUzFWYjkzOE5OQ0ZHYXM1dFQyCmR5UmJpUDhENi9tYlo5ckxHWnhKcTd4Q0NROHJaakNxaTA3VW55dWRSZ3VKemhERFRHay91RFh6SE1ZTDhlV3cKRGFJOFRwUXV0N0JXQzZmZU10L1RmaDl4ZEwzaHhJczN2YzFtV2NZZ0NkNVZUU3h4RDlVRlpHbXRmWW8wbjZhdwovOENsTUJOZGRXK1RwM2RPbDQ0YzI5elRITDQ4UDVtRTZqOWJPWmdmWHY5dDljdElGTE9mZkxiTGZHQlM3Yy9NCmdHV0cvSEZscVFBOGwyRnFSb09jNXFYU2phZ0VwUXB5anlrQ2dnRUFWNm5oZmZDc0pNTFliaGJlbHVOM0h5N2kKYXF3dGJwMVpwQ1g4WWtvMEk3VWFYUFpZSUdsV1FYVDVlQjBubHBTWVVLUW1jOG9pUkVoN3hEb05yRFpBcmVxeApRZDBUTmluWkdVZGt5UzJLWTRMblk2Y0tjWXRaTjhtRC9pQmM2NWdmSGo5MlhRTWJReFk1dTAwV0VJOUNReFQzCjZhVUpKdDBmZldjSjlrZ2tyUGJEU2xQL1h2UlBtcWJEUDdENnEzMHAvNUw3YmpkZzJOVHY4S1lsMjlWZzJtSHkKOElza2lEUDVJR2pPNEwwN2hUa3R4MlI5SWJPaGMzYkl6bUorQ040UFVUa0V6YndmSUN4SVBNNTl1V2NSSnNYbApjS01rVUtVWTFGS3Y2RCt3MDlsU0ZPQ0tVK3Q2alJlVWJmVy9xazVCb1ZoMm9DSWxxY0lXakZFRXBPN2VSUT09Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==","after":""},"structural_id":"6966b2fcd1edb6d42c0da7c85e1aa22ffae24485","rule_structural_id":"35bc9170393aecc3777a72fe77e269fc150c91ce","rule_text_id":"np.pem.2","rule_name":"Base64-PEM-Encoded Private Key","score":null,"comment":null,"status":null,"redundant_to":[]}]} +{"finding_id":"3d014260f6750ff458a0b02343c9645f82f21348","rule_name":"Generic API Key","rule_text_id":"np.generic.2","rule_structural_id":"ac1028729d342a4d0cf282377532d882a48795c6","groups":["Q1Uwb0t0NEd0MWxIRHRKam5STGZkQlVaV2FkbVlJSGV2cS9UeVV6"],"num_matches":2,"num_redundant_matches":1,"statuses":[],"comment":null,"mean_score":null,"matches":[{"provenance":[{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"43d6429098e4d3acdf6938dfc159172c4dd99220","committer_name":"Jeroen Willemsen","committer_email":"jeroenwillemsen2001@gmail.com","committer_timestamp":"1678401216 +0100","author_name":"Jeroen Willemsen","author_email":"jeroenwillemsen2001@gmail.com","author_timestamp":"1678401216 +0100","message":"Update docs for okteto challenge 15\n"},"blob_path":"src/main/resources/explanations/challenge15.adoc"}}],"blob_metadata":{"id":"1590fc5b5b352fd538a6372946b04120bc2b5649","num_bytes":1741,"mime_essence":null,"charset":null},"blob_id":"1590fc5b5b352fd538a6372946b04120bc2b5649","location":{"offset_span":{"start":1613,"end":1663},"source_span":{"start":{"line":32,"column":50},"end":{"line":32,"column":99}}},"groups":["Q1Uwb0t0NEd0MWxIRHRKam5STGZkQlVaV2FkbVlJSGV2cS9UeVV6"],"snippet":{"before":"S4XUU3EPJ\naws_secret_access_key=CU0oKt4Gt1lHDtJjnRLfdBUZWadmYIHevq/TyUz/\nregion=us-east-2\noutput=json\n\n#https://canarytokens.org/manage?token=n0cnd92mavmv1m61tjmyj9of5&auth=6519be82ef910868529091527c3edb3f\n\naws_access_key_id=AKIASP2TPHJS4XUU3EPJaws_secret_","matching":"access_key=CU0oKt4Gt1lHDtJjnRLfdBUZWadmYIHevq/TyUz","after":"/\n\nhttps://wrongsecrets-commjoen.cloud.okteto.net/canaries/tokencallbackdebug\n"},"structural_id":"c56d61ee93a4f093108a146dd1181ec411b1df18","rule_structural_id":"ac1028729d342a4d0cf282377532d882a48795c6","rule_text_id":"np.generic.2","rule_name":"Generic API Key","score":null,"comment":null,"status":null,"redundant_to":[]}]} +{"finding_id":"15b6e7abd2ef12bd9bac8666e9a45c9cd5339baf","rule_name":"Generic API Key","rule_text_id":"np.generic.2","rule_structural_id":"ac1028729d342a4d0cf282377532d882a48795c6","groups":["QUtJQVNQMlRQSEpTNFhVVTNFUEphd3Nfc2VjcmV0X2FjY2Vzc19rZXk="],"num_matches":1,"num_redundant_matches":0,"statuses":[],"comment":null,"mean_score":null,"matches":[{"provenance":[{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"43d6429098e4d3acdf6938dfc159172c4dd99220","committer_name":"Jeroen Willemsen","committer_email":"jeroenwillemsen2001@gmail.com","committer_timestamp":"1678401216 +0100","author_name":"Jeroen Willemsen","author_email":"jeroenwillemsen2001@gmail.com","author_timestamp":"1678401216 +0100","message":"Update docs for okteto challenge 15\n"},"blob_path":"src/main/resources/explanations/challenge15.adoc"}}],"blob_metadata":{"id":"1590fc5b5b352fd538a6372946b04120bc2b5649","num_bytes":1741,"mime_essence":null,"charset":null},"blob_id":"1590fc5b5b352fd538a6372946b04120bc2b5649","location":{"offset_span":{"start":1568,"end":1623},"source_span":{"start":{"line":32,"column":5},"end":{"line":32,"column":59}}},"groups":["QUtJQVNQMlRQSEpTNFhVVTNFUEphd3Nfc2VjcmV0X2FjY2Vzc19rZXk="],"snippet":{"before":"cxbv\n\n[default]\naws_access_key_id=AKIASP2TPHJS4XUU3EPJ\naws_secret_access_key=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\nregion=us-east-2\noutput=json\n\n#https://canarytokens.org/manage?token=n0cnd92mavmv1m61tjmyj9of5&auth=6519be82ef910868529091527c3edb3f\n\naws_","matching":"access_key_id=AKIASP2TPHJS4XUU3EPJaws_secret_access_key","after":"=CU0oKt4Gt1lHDtJjnRLfdBUZWadmYIHevq/TyUz/\n\nhttps://wrongsecrets-commjoen.cloud.okteto.net/canaries/tokencallbackdebug\n"},"structural_id":"f09e74268374789bf07f8f0c085491b4eabc3966","rule_structural_id":"ac1028729d342a4d0cf282377532d882a48795c6","rule_text_id":"np.generic.2","rule_name":"Generic API Key","score":null,"comment":null,"status":null,"redundant_to":[]}]} +{"finding_id":"a7cdf564d70c7e2a928e273a9d3ee4f2adff9dcf","rule_name":"Generic Password","rule_text_id":"np.generic.5","rule_structural_id":"4742a7e5266ce68dd5633ca6c2c634a4fa706673","groups":["MjZJYWdFVk9nM2RzUTlBd0FTNXlVOGdwaXQ0aU9JWU9SOHNVdU9nWVZwVT0="],"num_matches":1,"num_redundant_matches":0,"statuses":[],"comment":null,"mean_score":null,"matches":[{"provenance":[{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"76bc2a8ed945682d3fc6fc9b730bc923a82691ac","committer_name":"Jeroen Willemsen","committer_email":"jeroenwillemsen2001@gmail.com","committer_timestamp":"1705822443 +0100","author_name":"Jeroen Willemsen","author_email":"jeroenwillemsen2001@gmail.com","author_timestamp":"1705494983 +0100","message":"Update POM file with new version: 1.8.2d\n"},"blob_path":".github/scripts/.bash_history"}}],"blob_metadata":{"id":"35fae43b9edec27151e60bf2a41949f91351774f","num_bytes":14740,"mime_essence":null,"charset":null},"blob_id":"35fae43b9edec27151e60bf2a41949f91351774f","location":{"offset_span":{"start":7201,"end":7256},"source_span":{"start":{"line":350,"column":12},"end":{"line":350,"column":66}}},"groups":["MjZJYWdFVk9nM2RzUTlBd0FTNXlVOGdwaXQ0aU9JWU9SOHNVdU9nWVZwVT0="],"snippet":{"before":"_bin.deb\nsudo apt install libc6-i386\nsudo apt --fix-broken install\nsudo dpkg -i jdk-18_linux-x64_bin.deb\njava --version\nmvn --no-transfer-progress test\ngit status\nrm -rf jdk-18_linux-x64_bin.deb\ngit rebase -i main\ngit rebase -i master\ngit stash\nexport temp","matching":"Password=\"26IagEVOg3dsQ9AwAS5yU8gpit4iOIYOR8sUuOgYVpU=\"","after":"\nmvn run tempPassword\nk6\nnpx k6\nnpm remove @types/k6\nnpm run tests:automatic\nnpm run test:automatic\nk6\nnpm run\nnpm start\nnpm install nodemon --save\nnpm start\nnpm install\ngit status\ngit checkout main\ngit status\ngit stash\ngit log\ngit checkout 0a34816be1f1149"},"structural_id":"2e0e745fcdf8fef423dd27b0791630e7258252c0","rule_structural_id":"4742a7e5266ce68dd5633ca6c2c634a4fa706673","rule_text_id":"np.generic.5","rule_name":"Generic Password","score":null,"comment":null,"status":null,"redundant_to":[]}]} +{"finding_id":"7d967ba79a438184cc227b02cb91b59067144e09","rule_name":"Generic Password","rule_text_id":"np.generic.6","rule_structural_id":"623947cab165b1d6c786a051107b4555e41719d5","groups":["dEpiUWpDTT0="],"num_matches":2,"num_redundant_matches":0,"statuses":[],"comment":null,"mean_score":null,"matches":[{"provenance":[{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"812d4185f6d835730265679f2748e330568d2460","committer_name":"Jeroen Willemsen","committer_email":"jeroenwillemsen2001@gmail.com","committer_timestamp":"1685336295 +0200","author_name":"Jeroen Willemsen","author_email":"jeroenwillemsen2001@gmail.com","author_timestamp":"1685336295 +0200","message":"Changed to node v18, updated configs, updated pre-commit, susppressed eslint on index.js\n"},"blob_path":"js/index.js"}}],"blob_metadata":{"id":"4a03e02f3b71094bd9ea9a2220f151bc3b0591f6","num_bytes":152,"mime_essence":"text/javascript","charset":null},"blob_id":"4a03e02f3b71094bd9ea9a2220f151bc3b0591f6","location":{"offset_span":{"start":67,"end":88},"source_span":{"start":{"line":2,"column":9},"end":{"line":2,"column":29}}},"groups":["dEpiUWpDTT0="],"snippet":{"before":"function secret () { // eslint-disable-line no-unused-vars\n const ","matching":"password = 'tJbQjCM='","after":" + 9 + 'SnCq' + 6 + 'LBU=' + 2 + 'h5GD' + 7\n return password\n}\n"},"structural_id":"221de10d50ba8bb10943168e9536aa4ac93f4261","rule_structural_id":"623947cab165b1d6c786a051107b4555e41719d5","rule_text_id":"np.generic.6","rule_name":"Generic Password","score":null,"comment":null,"status":null,"redundant_to":[]},{"provenance":[{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"e2947f2b69948c8916b744144ebb02d36f8bd58b","committer_name":"GitHub","committer_email":"noreply@github.com","committer_timestamp":"1685256766 +0000","author_name":"pre-commit-ci-lite[bot]","author_email":"117423508+pre-commit-ci-lite[bot]@users.noreply.github.com","author_timestamp":"1685256766 +0000","message":"[pre-commit.ci lite] apply automatic fixes"},"blob_path":"js/index.js"}}],"blob_metadata":{"id":"a88dd5bb2a766074134a814379630b4f7a13ca25","num_bytes":114,"mime_essence":"text/javascript","charset":null},"blob_id":"a88dd5bb2a766074134a814379630b4f7a13ca25","location":{"offset_span":{"start":29,"end":50},"source_span":{"start":{"line":2,"column":9},"end":{"line":2,"column":29}}},"groups":["dEpiUWpDTT0="],"snippet":{"before":"function secret () {\n const ","matching":"password = 'tJbQjCM='","after":" + 9 + 'SnCq' + 6 + 'LBU=' + 2 + 'h5GD' + 7\n return password\n}\n"},"structural_id":"59bee622a86afc205e14a98339e43590554bc97f","rule_structural_id":"623947cab165b1d6c786a051107b4555e41719d5","rule_text_id":"np.generic.6","rule_name":"Generic Password","score":null,"comment":null,"status":null,"redundant_to":[]}]} +{"finding_id":"1d56ba5ba541032ca531a3b07fbefd3d3a58df32","rule_name":"Generic Secret","rule_text_id":"np.generic.1","rule_structural_id":"3a961eccebcf7356ad803ec8e1a711d01801b9d7","groups":["MHg4YjcyZjdjYkFENTA2MjBjNDYyMTlhZDY3NkFkOWQzYTVBMjczNTg3"],"num_matches":3,"num_redundant_matches":0,"statuses":[],"comment":null,"mean_score":null,"matches":[{"provenance":[{"kind":"file","path":"./src/main/resources/explanations/challenge25.adoc"},{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"3fd81b9bcc4532eaf62aeb35a9f99231a90fd81c","committer_name":"Jeroen Willemsen","committer_email":"jeroenwillemsen2001@gmail.com","committer_timestamp":"1664348148 +0200","author_name":"Jeroen Willemsen","author_email":"jeroenwillemsen2001@gmail.com","author_timestamp":"1664348148 +0200","message":"fixes for challenges\n"},"blob_path":"src/main/resources/explanations/challenge25.adoc"}}],"blob_metadata":{"id":"12c6ab9a78813e5cf5aa45b505ad858217861a92","num_bytes":270,"mime_essence":null,"charset":null},"blob_id":"12c6ab9a78813e5cf5aa45b505ad858217861a92","location":{"offset_span":{"start":173,"end":241},"source_span":{"start":{"line":5,"column":56},"end":{"line":5,"column":123}}},"groups":["MHg4YjcyZjdjYkFENTA2MjBjNDYyMTlhZDY3NkFkOWQzYTVBMjczNTg3"],"snippet":{"before":"=== Secrets in smart contracts part 1\n\nOn public blockchains, everything that is written on-chain is world-readable.\n\nIn this challenge, you need to read the variable named ","matching":"secret from the contract `0x8b72f7cbAD50620c46219ad676Ad9d3a5A273587","after":"` on the Goerli EVM Testnet.\n"},"structural_id":"c290822f8c5e37199afe269b4367bb37d5aeed04","rule_structural_id":"3a961eccebcf7356ad803ec8e1a711d01801b9d7","rule_text_id":"np.generic.1","rule_name":"Generic Secret","score":null,"comment":null,"status":null,"redundant_to":[]},{"provenance":[{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"b266f31da8df2a2cb7bd90011c2ae1f2700fc9e9","committer_name":"Chris Elbring","committer_email":"chris.elbring@gmail.com","committer_timestamp":"1664240979 -0500","author_name":"Chris Elbring","author_email":"chris.elbring@gmail.com","author_timestamp":"1664240979 -0500","message":"Challenge25\n"},"blob_path":"src/main/resources/explanations/challenge25.adoc"}}],"blob_metadata":{"id":"8d42eb52d9050870e70a5c635c1e0c58bbd12729","num_bytes":458,"mime_essence":null,"charset":null},"blob_id":"8d42eb52d9050870e70a5c635c1e0c58bbd12729","location":{"offset_span":{"start":362,"end":429},"source_span":{"start":{"line":5,"column":56},"end":{"line":5,"column":122}}},"groups":["MHg4YjcyZjdjYkFENTA2MjBjNDYyMTlhZDY3NkFkOWQzYTVBMjczNTg3"],"snippet":{"before":"-readable. So there are various mistakes you can find on https://twitter.com/d_feldman/status/1558309810801631233?s=20&t=z98ii6IPJEZq10cnsGAhpQ[Twitter]. What if our developers made the same mistake?\n\nIn this challenge, you need to read the variable named ","matching":"secret from the contract 0x8b72f7cbAD50620c46219ad676Ad9d3a5A273587","after":" on the goerli evm testnet.\n\n"},"structural_id":"7ff4b8b2c8aca984725c443d7e12697994d15bea","rule_structural_id":"3a961eccebcf7356ad803ec8e1a711d01801b9d7","rule_text_id":"np.generic.1","rule_name":"Generic Secret","score":null,"comment":null,"status":null,"redundant_to":[]},{"provenance":[{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"ec89e8a69cd6fdfa55c02fd35bea29056213a652","committer_name":"Chris Elbring","committer_email":"chris.elbring@gmail.com","committer_timestamp":"1664300830 -0500","author_name":"Chris Elbring","author_email":"chris.elbring@gmail.com","author_timestamp":"1664300830 -0500","message":"Challenge25-6\n"},"blob_path":"src/main/resources/explanations/challenge25.adoc"}}],"blob_metadata":{"id":"deaed2195e9fae8a1098ae159d3075ee59921577","num_bytes":269,"mime_essence":null,"charset":null},"blob_id":"deaed2195e9fae8a1098ae159d3075ee59921577","location":{"offset_span":{"start":173,"end":240},"source_span":{"start":{"line":5,"column":56},"end":{"line":5,"column":122}}},"groups":["MHg4YjcyZjdjYkFENTA2MjBjNDYyMTlhZDY3NkFkOWQzYTVBMjczNTg3"],"snippet":{"before":"=== Secrets in smart contracts part 1\n\nOn public blockchains, everything that is written on-chain is world-readable.\n\nIn this challenge, you need to read the variable named ","matching":"secret from the contract 0x8b72f7cbAD50620c46219ad676Ad9d3a5A273587","after":" on the goerli evm testnet.\n\n"},"structural_id":"6280d23c964b17d622706498d962712e7c1b93f6","rule_structural_id":"3a961eccebcf7356ad803ec8e1a711d01801b9d7","rule_text_id":"np.generic.1","rule_name":"Generic Secret","score":null,"comment":null,"status":null,"redundant_to":[]}]} +{"finding_id":"796ae549d04f3e3895387e0ba6e890a3f7e8cf36","rule_name":"Generic Secret","rule_text_id":"np.generic.1","rule_structural_id":"3a961eccebcf7356ad803ec8e1a711d01801b9d7","groups":["aFJacU9FQjBWMGtVNkpoRVhkbThVSDMyVkRBYkFiZFJ4ZzVSTXBv"],"num_matches":6,"num_redundant_matches":0,"statuses":[],"comment":null,"mean_score":null,"matches":[{"provenance":[{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"30e36acf62d104ff600b72aedeb14e7b668c1ea1","committer_name":"Jeroen Willemsen","committer_email":"jwillemsen@xebia.com","committer_timestamp":"1644228456 +0100","author_name":"Jeroen Willemsen","author_email":"jwillemsen@xebia.com","author_timestamp":"1644227390 +0100","message":"Updated comments and code in 13\n\nAdded test for challenge 13\n\nUpdated README to include challenge 13\n\nUpdate readme\n"},"blob_path":"src/test/java/org/owasp/wrongsecrets/challenges/docker/Challenge13Test.java"}}],"blob_metadata":{"id":"01f2e1f50867c4d2200edac60290aadaef82e0d7","num_bytes":1683,"mime_essence":"application/octet-stream","charset":null},"blob_id":"01f2e1f50867c4d2200edac60290aadaef82e0d7","location":{"offset_span":{"start":663,"end":712},"source_span":{"start":{"line":23,"column":69},"end":{"line":23,"column":117}}},"groups":["aFJacU9FQjBWMGtVNkpoRVhkbThVSDMyVkRBYkFiZFJ4ZzVSTXBv"],"snippet":{"before":"arsets;\nimport java.util.Base64;\n\n@ExtendWith(MockitoExtension.class)\nclass Challenge13Test {\n\n @Mock\n private ScoreCard scoreCard;\n\n @Test\n void spoilerShouldRevealAnswer() {\n var challenge = new Challenge13(scoreCard, \"This is not the ","matching":"secret\", \"hRZqOEB0V0kU6JhEXdm8UH32VDAbAbdRxg5RMpo","after":"/fA8caUCvJhs=\");\n\n Assertions.assertThat(challenge.spoiler()).isEqualTo(new Spoiler(Base64.getEncoder().encodeToString(\"This is our first key as github secret\".getBytes(StandardCharsets.UTF_8))));\n }\n\n @Test\n void rightAnswerShouldSolveChal"},"structural_id":"9dcdcb8e4717a1f122c57eb4232b01d88dca39a5","rule_structural_id":"3a961eccebcf7356ad803ec8e1a711d01801b9d7","rule_text_id":"np.generic.1","rule_name":"Generic Secret","score":null,"comment":null,"status":null,"redundant_to":[]},{"provenance":[{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"30e36acf62d104ff600b72aedeb14e7b668c1ea1","committer_name":"Jeroen Willemsen","committer_email":"jwillemsen@xebia.com","committer_timestamp":"1644228456 +0100","author_name":"Jeroen Willemsen","author_email":"jwillemsen@xebia.com","author_timestamp":"1644227390 +0100","message":"Updated comments and code in 13\n\nAdded test for challenge 13\n\nUpdated README to include challenge 13\n\nUpdate readme\n"},"blob_path":"src/test/java/org/owasp/wrongsecrets/challenges/docker/Challenge13Test.java"}}],"blob_metadata":{"id":"01f2e1f50867c4d2200edac60290aadaef82e0d7","num_bytes":1683,"mime_essence":"application/octet-stream","charset":null},"blob_id":"01f2e1f50867c4d2200edac60290aadaef82e0d7","location":{"offset_span":{"start":1046,"end":1095},"source_span":{"start":{"line":30,"column":69},"end":{"line":30,"column":117}}},"groups":["aFJacU9FQjBWMGtVNkpoRVhkbThVSDMyVkRBYkFiZFJ4ZzVSTXBv"],"snippet":{"before":"(new Spoiler(Base64.getEncoder().encodeToString(\"This is our first key as github secret\".getBytes(StandardCharsets.UTF_8))));\n }\n\n @Test\n void rightAnswerShouldSolveChallenge() {\n var challenge = new Challenge13(scoreCard, \"This is not the ","matching":"secret\", \"hRZqOEB0V0kU6JhEXdm8UH32VDAbAbdRxg5RMpo","after":"/fA8caUCvJhs=\");\n\n Assertions.assertThat(challenge.solved(Base64.getEncoder().encodeToString(\"This is our first key as github secret\".getBytes(StandardCharsets.UTF_8)))).isTrue();\n Mockito.verify(scoreCard).completeChallenge(challenge);\n }"},"structural_id":"929d26e0314bf73ed4b938fbb7f08951a60fdb89","rule_structural_id":"3a961eccebcf7356ad803ec8e1a711d01801b9d7","rule_text_id":"np.generic.1","rule_name":"Generic Secret","score":null,"comment":null,"status":null,"redundant_to":[]},{"provenance":[{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"30e36acf62d104ff600b72aedeb14e7b668c1ea1","committer_name":"Jeroen Willemsen","committer_email":"jwillemsen@xebia.com","committer_timestamp":"1644228456 +0100","author_name":"Jeroen Willemsen","author_email":"jwillemsen@xebia.com","author_timestamp":"1644227390 +0100","message":"Updated comments and code in 13\n\nAdded test for challenge 13\n\nUpdated README to include challenge 13\n\nUpdate readme\n"},"blob_path":"src/test/java/org/owasp/wrongsecrets/challenges/docker/Challenge13Test.java"}}],"blob_metadata":{"id":"01f2e1f50867c4d2200edac60290aadaef82e0d7","num_bytes":1683,"mime_essence":"application/octet-stream","charset":null},"blob_id":"01f2e1f50867c4d2200edac60290aadaef82e0d7","location":{"offset_span":{"start":1483,"end":1532},"source_span":{"start":{"line":38,"column":69},"end":{"line":38,"column":117}}},"groups":["aFJacU9FQjBWMGtVNkpoRVhkbThVSDMyVkRBYkFiZFJ4ZzVSTXBv"],"snippet":{"before":" secret\".getBytes(StandardCharsets.UTF_8)))).isTrue();\n Mockito.verify(scoreCard).completeChallenge(challenge);\n }\n\n @Test\n void incorrectAnswerShouldNotSolveChallenge() {\n var challenge = new Challenge13(scoreCard, \"This is not the ","matching":"secret\", \"hRZqOEB0V0kU6JhEXdm8UH32VDAbAbdRxg5RMpo","after":"/fA8caUCvJhs=\");\n\n Assertions.assertThat(challenge.solved(\"wrong answer\")).isFalse();\n Mockito.verifyNoInteractions(scoreCard);\n }\n\n}\n"},"structural_id":"63c61a042d7112b91910ccf66fd4ceb15706b4d2","rule_structural_id":"3a961eccebcf7356ad803ec8e1a711d01801b9d7","rule_text_id":"np.generic.1","rule_name":"Generic Secret","score":null,"comment":null,"status":null,"redundant_to":[]}]} +{"finding_id":"025edf89aabf5730eba20f56a2bdd3152d7da1c1","rule_name":"Generic Username and Password","rule_text_id":"np.generic.3","rule_structural_id":"8cca3a4a548b00450bae08c40f5f33f06173b21f","groups":["c3RhdGljLXVzZXI=","JChvcGVuc3NsIHJhbmQgLWJhc2U2NCAxNik="],"num_matches":5,"num_redundant_matches":0,"statuses":[],"comment":null,"mean_score":null,"matches":[{"provenance":[{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"f72cdca5ddc696ddd03b0c1926379d6d618eadd5","committer_name":"GitHub","committer_email":"noreply@github.com","committer_timestamp":"1604486227 +0100","author_name":"Jeroen Willemsen","author_email":"jeroenwillemsen2001@gmail.com","author_timestamp":"1604486227 +0100","message":"Update k8s-vault-minkube-start.sh\n\nCo-authored-by: Ben de Haan <53901866+bendehaan@users.noreply.github.com>"},"blob_path":"k8s-vault-minkube-start.sh"}}],"blob_metadata":{"id":"611c57d0dd9f26eba2cb3689ecc8544e61c2e9b9","num_bytes":3828,"mime_essence":"application/x-sh","charset":null},"blob_id":"611c57d0dd9f26eba2cb3689ecc8544e61c2e9b9","location":{"offset_span":{"start":2565,"end":2625},"source_span":{"start":{"line":70,"column":59},"end":{"line":70,"column":118}}},"groups":["c3RhdGljLXVzZXI=","JChvcGVuc3NsIHJhbmQgLWJhc2U2NCAxNik="],"snippet":{"before":"ken)\n\necho \"Logging in\"\nkubectl exec vault-0 -- vault login $ROOTTOKEN \n\necho \"Enabling kv-v2 kubernetes\"\nkubectl exec vault-0 -- vault secrets enable -path=secret kv-v2\n\necho \"Putting a secret in\"\nkubectl exec vault-0 -- vault kv put secret/webapp/config ","matching":"username=\"static-user\" password=\"$(openssl rand -base64 16)\"","after":"\n\necho \"Enable k8s auth\"\nkubectl exec vault-0 -- vault auth enable kubernetes\n\necho \"Writing k8s auth config\" \n#TODO: below should be executed on he host only, so pick it up from the pod!\nkubectl exec vault-0 -- bash -c 'vault write auth/kubernetes/config "},"structural_id":"adcc890b7f6162534d2cca50dd84530060e68a17","rule_structural_id":"8cca3a4a548b00450bae08c40f5f33f06173b21f","rule_text_id":"np.generic.3","rule_name":"Generic Username and Password","score":null,"comment":null,"status":null,"redundant_to":[]},{"provenance":[{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"cbd13f218be083ca063e1bc03da7f4ef2b34fd2c","committer_name":"Jeroen Willemsen","committer_email":"jwillemsen@xebia.com","committer_timestamp":"1604524065 +0100","author_name":"Jeroen Willemsen","author_email":"jwillemsen@xebia.com","author_timestamp":"1604524065 +0100","message":"enable proper port forwarding\n"},"blob_path":"k8s-vault-minkube-start.sh"}}],"blob_metadata":{"id":"653a441f774d3251f466b37599f576f2e659df9d","num_bytes":3903,"mime_essence":"application/x-sh","charset":null},"blob_id":"653a441f774d3251f466b37599f576f2e659df9d","location":{"offset_span":{"start":2565,"end":2625},"source_span":{"start":{"line":70,"column":59},"end":{"line":70,"column":118}}},"groups":["c3RhdGljLXVzZXI=","JChvcGVuc3NsIHJhbmQgLWJhc2U2NCAxNik="],"snippet":{"before":"ken)\n\necho \"Logging in\"\nkubectl exec vault-0 -- vault login $ROOTTOKEN \n\necho \"Enabling kv-v2 kubernetes\"\nkubectl exec vault-0 -- vault secrets enable -path=secret kv-v2\n\necho \"Putting a secret in\"\nkubectl exec vault-0 -- vault kv put secret/webapp/config ","matching":"username=\"static-user\" password=\"$(openssl rand -base64 16)\"","after":"\n\necho \"Enable k8s auth\"\nkubectl exec vault-0 -- vault auth enable kubernetes\n\necho \"Writing k8s auth config\" \n\nkubectl exec vault-0 -- /bin/sh -c 'vault write auth/kubernetes/config \\\n token_reviewer_jwt=\"$(cat /var/run/secrets/kubernetes.io/servic"},"structural_id":"a1cd108348201e0f920b0b966d6843653831054b","rule_structural_id":"8cca3a4a548b00450bae08c40f5f33f06173b21f","rule_text_id":"np.generic.3","rule_name":"Generic Username and Password","score":null,"comment":null,"status":null,"redundant_to":[]},{"provenance":[{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"39056f14229af2f3880ba85193f933059ac4828d","committer_name":"Jeroen Willemsen","committer_email":"jwillemsen@xebia.com","committer_timestamp":"1604430603 +0100","author_name":"Jeroen Willemsen","author_email":"jwillemsen@xebia.com","author_timestamp":"1604430603 +0100","message":"update\n"},"blob_path":"k8s-vault-minkube-start.sh"}}],"blob_metadata":{"id":"7da1220ff8fd5ce55a3e6adfe82605910c4efb0e","num_bytes":3682,"mime_essence":"application/x-sh","charset":null},"blob_id":"7da1220ff8fd5ce55a3e6adfe82605910c4efb0e","location":{"offset_span":{"start":2430,"end":2490},"source_span":{"start":{"line":70,"column":59},"end":{"line":70,"column":118}}},"groups":["c3RhdGljLXVzZXI=","JChvcGVuc3NsIHJhbmQgLWJhc2U2NCAxNik="],"snippet":{"before":"ken)\n\necho \"Logging in\"\nkubectl exec vault-0 -- vault login $ROOTTOKEN \n\necho \"Enabling kv-v2 kubernetes\"\nkubectl exec vault-0 -- vault secrets enable -path=secret kv-v2\n\necho \"Putting a secret in\"\nkubectl exec vault-0 -- vault kv put secret/webapp/config ","matching":"username=\"static-user\" password=\"$(openssl rand -base64 16)\"","after":"\n\necho \"Enable k8s auth\"\nkubectl exec vault-0 -- vault auth enable kubernetes\n\necho \"Writing k8s auth config\" \n#TODO: below should be executed on he host only, so pick it up from the pod!\nkubectl exec vault-0 -- vault write auth/kubernetes/config \\\n "},"structural_id":"8f41699c92acc6b1a85fbf4b0fee76b6afa52c9c","rule_structural_id":"8cca3a4a548b00450bae08c40f5f33f06173b21f","rule_text_id":"np.generic.3","rule_name":"Generic Username and Password","score":null,"comment":null,"status":null,"redundant_to":[]}]} +{"finding_id":"9521950b90ee23af6a5c95f9094f7f53f4dfe693","rule_name":"Generic Username and Password","rule_text_id":"np.generic.3","rule_structural_id":"8cca3a4a548b00450bae08c40f5f33f06173b21f","groups":["c3RhdGljLXVzZXI=","c3RhdGljLXBhc3N3b3Jk"],"num_matches":10,"num_redundant_matches":0,"statuses":[],"comment":null,"mean_score":null,"matches":[{"provenance":[{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"d6b18f36bc470399463ae34b7d2bd8a3ba6c6126","committer_name":"Jeroen Willemsen","committer_email":"jwillemsen@xebia.com","committer_timestamp":"1604393608 +0100","author_name":"Jeroen Willemsen","author_email":"jwillemsen@xebia.com","author_timestamp":"1604393608 +0100","message":"Updated script (working till k8s\n"},"blob_path":"k8s-vault-minkube-start.sh"}}],"blob_metadata":{"id":"00751cca0c9da1639ce36e69c86647d3afcb21a1","num_bytes":3162,"mime_essence":"application/x-sh","charset":null},"blob_id":"00751cca0c9da1639ce36e69c86647d3afcb21a1","location":{"offset_span":{"start":2947,"end":2996},"source_span":{"start":{"line":83,"column":39},"end":{"line":83,"column":87}}},"groups":["c3RhdGljLXVzZXI=","c3RhdGljLXBhc3N3b3Jk"],"snippet":{"before":" for webapp\"\nkubectl exec vault-0 -- vault write auth/kubernetes/role/webapp \\\n bound_service_account_names=vault \\\n bound_service_account_namespaces=default \\\n policies=webapp \\\n ttl=24h \\\n && vault kv put secret/webapp/config ","matching":"username=\"static-user\" password=\"static-password\"","after":"\n\n#kubectl apply -f k8s/secret-challenge-deployment.yml\n#kubectl expose deployment secret-challenge --type=LoadBalancer --port=8080\n#minikube service secret-challenge"},"structural_id":"3ea18eb858a2bebbccc79478bd392daba876f73e","rule_structural_id":"8cca3a4a548b00450bae08c40f5f33f06173b21f","rule_text_id":"np.generic.3","rule_name":"Generic Username and Password","score":null,"comment":null,"status":null,"redundant_to":[]},{"provenance":[{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"255fb6def8a2546d29fb673fb55e94c0a9a4ee00","committer_name":"Jeroen Willemsen","committer_email":"jwillemsen@xebia.com","committer_timestamp":"1604295194 +0100","author_name":"Jeroen Willemsen","author_email":"jwillemsen@xebia.com","author_timestamp":"1604295194 +0100","message":"Processed feedback\n"},"blob_path":"k8s-vault-minkube-start.sh"}}],"blob_metadata":{"id":"39d7129d565c361b36b28d3f08ac7429b149be5e","num_bytes":2419,"mime_essence":"application/x-sh","charset":null},"blob_id":"39d7129d565c361b36b28d3f08ac7429b149be5e","location":{"offset_span":{"start":2204,"end":2253},"source_span":{"start":{"line":63,"column":108},"end":{"line":63,"column":156}}},"groups":["c3RhdGljLXVzZXI=","c3RhdGljLXBhc3N3b3Jk"],"snippet":{"before":"mes=vault \\\n bound_service_account_namespaces=default \\\n policies=webapp \\\n ttl=24h\ncat cluster-keys.json | jq -r \".root_token\"\nkubectl exec vault-0 login && vault secrets enable -path=secret kv-v2 && vault kv put secret/webapp/config ","matching":"username=\"static-user\" password=\"static-password\"","after":"\n\n#kubectl apply -f k8s/secret-challenge-deployment.yml\n#kubectl expose deployment secret-challenge --type=LoadBalancer --port=8080\n#minikube service secret-challenge"},"structural_id":"e768dc2f2936f6b49980a9be7bb3791497df896d","rule_structural_id":"8cca3a4a548b00450bae08c40f5f33f06173b21f","rule_text_id":"np.generic.3","rule_name":"Generic Username and Password","score":null,"comment":null,"status":null,"redundant_to":[]},{"provenance":[{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"f72cdca5ddc696ddd03b0c1926379d6d618eadd5","committer_name":"GitHub","committer_email":"noreply@github.com","committer_timestamp":"1604486227 +0100","author_name":"Jeroen Willemsen","author_email":"jeroenwillemsen2001@gmail.com","author_timestamp":"1604486227 +0100","message":"Update k8s-vault-minkube-start.sh\n\nCo-authored-by: Ben de Haan <53901866+bendehaan@users.noreply.github.com>"},"blob_path":"k8s-vault-minkube-start.sh"}}],"blob_metadata":{"id":"611c57d0dd9f26eba2cb3689ecc8544e61c2e9b9","num_bytes":3828,"mime_essence":"application/x-sh","charset":null},"blob_id":"611c57d0dd9f26eba2cb3689ecc8544e61c2e9b9","location":{"offset_span":{"start":3560,"end":3609},"source_span":{"start":{"line":95,"column":39},"end":{"line":95,"column":87}}},"groups":["c3RhdGljLXVzZXI=","c3RhdGljLXBhc3N3b3Jk"],"snippet":{"before":" for webapp\"\nkubectl exec vault-0 -- vault write auth/kubernetes/role/webapp \\\n bound_service_account_names=vault \\\n bound_service_account_namespaces=default \\\n policies=webapp \\\n ttl=24h \\\n && vault kv put secret/webapp/config ","matching":"username=\"static-user\" password=\"static-password\"","after":"\n\nkubectl apply -f k8s/secret-challenge-deployment.yml\nkubectl expose deployment secret-challenge --type=LoadBalancer --port=8080\nkubectl port-forward secret-challenge 8080:8080 \n#or \n#minikube service secret-challenge\n"},"structural_id":"522f63916db353188ddab9414096210673e21085","rule_structural_id":"8cca3a4a548b00450bae08c40f5f33f06173b21f","rule_text_id":"np.generic.3","rule_name":"Generic Username and Password","score":null,"comment":null,"status":null,"redundant_to":[]}]} +{"finding_id":"6bd2fc96f6ca817526063128466d97d3ac22d631","rule_name":"GitHub Personal Access Token","rule_text_id":"np.github.1","rule_structural_id":"f6c4fca24a1c7f275d51d2718a1585ca6e4ae664","groups":["Z2hwX2cwR0FReXZQcGpiNFREcEZTUGZBckU2OE11U2JxUzRGR1ZFNA=="],"num_matches":1,"num_redundant_matches":0,"statuses":[],"comment":null,"mean_score":null,"matches":[{"provenance":[{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"ade903e02bf5e9832a21f0f38ff1136ab4b497c5","committer_name":"Jeroen Willemsen","committer_email":"jeroenwillemsen2001@gmail.com","committer_timestamp":"1653281696 +0200","author_name":"Jeroen Willemsen","author_email":"jeroenwillemsen2001@gmail.com","author_timestamp":"1650227203 +0200","message":"Added github access token for #201\n"},"blob_path":"secretscache/token"}}],"blob_metadata":{"id":"217e905d55d11baa4403d26e174cf98fa5168496","num_bytes":41,"mime_essence":null,"charset":null},"blob_id":"217e905d55d11baa4403d26e174cf98fa5168496","location":{"offset_span":{"start":0,"end":40},"source_span":{"start":{"line":1,"column":1},"end":{"line":1,"column":40}}},"groups":["Z2hwX2cwR0FReXZQcGpiNFREcEZTUGZBckU2OE11U2JxUzRGR1ZFNA=="],"snippet":{"before":"","matching":"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","after":"\n"},"structural_id":"aad91b86c86aa4d87732dcf1c41585f1214ff9a6","rule_structural_id":"f6c4fca24a1c7f275d51d2718a1585ca6e4ae664","rule_text_id":"np.github.1","rule_name":"GitHub Personal Access Token","score":null,"comment":null,"status":null,"redundant_to":[]}]} +{"finding_id":"90ab285686cd020122f3fe710a2a1b77e9c6e0b3","rule_name":"GitHub Personal Access Token (fine-grained permissions)","rule_text_id":"np.github.7","rule_structural_id":"8a8de711c438d48576508a096971b13135fb73cb","groups":["Z2l0aHViX3BhdF8xMUFDTDRTNFEwQ0xiY1ZySXowZ2ROX1NzWGdMRlpwNXVsdGN4NkNBdkJaQTlmeHNNNHpxRHVUZVYxbkFHTFpUeGI0NkE0Wkk2QnRwOVdFeDR2"],"num_matches":1,"num_redundant_matches":0,"statuses":[],"comment":null,"mean_score":null,"matches":[{"provenance":[{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"ca8ed47675496c48a8d801a5b4fe538312e22b7f","committer_name":"Rodolfo Cabral Neves","committer_email":"roddas360@gmail.com","committer_timestamp":"1696519709 -0300","author_name":"Rodolfo Cabral Neves","author_email":"roddas360@gmail.com","author_timestamp":"1696519709 -0300","message":" feat #984 : Add the main script\n"},"blob_path":"scripts/sort_contibutors/main.py"}}],"blob_metadata":{"id":"ea7626f56a7a81abf9769dae451666044eae5f62","num_bytes":1374,"mime_essence":"text/plain","charset":null},"blob_id":"ea7626f56a7a81abf9769dae451666044eae5f62","location":{"offset_span":{"start":1141,"end":1234},"source_span":{"start":{"line":38,"column":10},"end":{"line":38,"column":102}}},"groups":["Z2l0aHViX3BhdF8xMUFDTDRTNFEwQ0xiY1ZySXowZ2ROX1NzWGdMRlpwNXVsdGN4NkNBdkJaQTlmeHNNNHpxRHVUZVYxbkFHTFpUeGI0NkE0Wkk2QnRwOVdFeDR2"],"snippet":{"before":"st:\n\theaders = {'X-GitHub-Api-Version':'2022-11-28','Accept':'application/vnd.github+json','Authorization':'Bearer ' + user_token}\n\tr = requests.get('https://api.github.com/repos/OWASP/'+project+'/contributors',headers=headers)\n\treturn r.json()\n\t\ntoken = '","matching":"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","after":"'\nproject = 'wrongsecrets'\n\ncontributors_list = parse_contributor_list(get_contibutor_list(project,token))\nprint_list(contributors_list[1])\n"},"structural_id":"f8fa7e7f2f788acae3af96fe4e459091e6b95a8c","rule_structural_id":"8a8de711c438d48576508a096971b13135fb73cb","rule_text_id":"np.github.7","rule_name":"GitHub Personal Access Token (fine-grained permissions)","score":null,"comment":null,"status":null,"redundant_to":[]}]} +{"finding_id":"430a3b39efb937edef71eb3cf18e76532bea7fd5","rule_name":"GitLab Personal Access Token","rule_text_id":"np.gitlab.2","rule_structural_id":"c9d156209ee37a65c39b8845464831ca8936ff79","groups":["Z2xwYXQtNVB5N3loa0toR3JUVzZzd0tBeV8="],"num_matches":1,"num_redundant_matches":0,"statuses":[],"comment":null,"mean_score":null,"matches":[{"provenance":[{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"ff2179e667e7a8cbbf994ffcf7b431ea53eb5414","committer_name":"Jeroen Willemsen","committer_email":"jeroenwillemsen2001@gmail.com","committer_timestamp":"1655622003 +0200","author_name":"Jeroen Willemsen","author_email":"jeroenwillemsen2001@gmail.com","author_timestamp":"1655622003 +0200","message":"Added segment.io admin token #201\n"},"blob_path":"secretscache/gitlab/revoked_access_tokens.txt"}}],"blob_metadata":{"id":"b02b4a67f92ced4fcf47d5784f72017b26697cc1","num_bytes":90,"mime_essence":"text/plain","charset":null},"blob_id":"b02b4a67f92ced4fcf47d5784f72017b26697cc1","location":{"offset_span":{"start":5,"end":31},"source_span":{"start":{"line":1,"column":6},"end":{"line":1,"column":31}}},"groups":["Z2xwYXQtNVB5N3loa0toR3JUVzZzd0tBeV8="],"snippet":{"before":"PAT: ","matching":"glpat-5Py7yhkKhGrTW6swKAy_","after":"\nCalendar: RBKUnE1hzuWMTxAcxD9J\nMail: RG2UFyvxu84w8TTMbKRZ\n"},"structural_id":"6d98bf44886dfdcdb71392262fd6806e3461d0d0","rule_structural_id":"c9d156209ee37a65c39b8845464831ca8936ff79","rule_text_id":"np.gitlab.2","rule_name":"GitLab Personal Access Token","score":null,"comment":null,"status":null,"redundant_to":[]}]} +{"finding_id":"0267af8fb4a4efa5a061cfd920a4903ac27be4a8","rule_name":"Google API Key","rule_text_id":"np.google.5","rule_structural_id":"2574397bc0dd4560b0f59cbe1018bc33016e819b","groups":["QUl6YVN5QlNwSHZ0OGwxZjlxbHBwSnFRVzI4MHZHYWNYZ3dObnJr"],"num_matches":4,"num_redundant_matches":0,"statuses":[],"comment":null,"mean_score":null,"matches":[{"provenance":[{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"5dfc72f20ef5c048f1bc9afa1d94c7c0bf2df36c","committer_name":"Jeroen Willemsen","committer_email":"jeroenwillemsen2001@gmail.com","committer_timestamp":"1692169868 +0200","author_name":"Jeroen Willemsen","author_email":"jeroenwillemsen2001@gmail.com","author_timestamp":"1692169868 +0200","message":"Add docs\n"},"blob_path":"src/main/java/org/owasp/wrongsecrets/challenges/docker/Challenge36.java"}}],"blob_metadata":{"id":"b65081351e1ec71d426addd36c55f723a695c893","num_bytes":1631,"mime_essence":"application/octet-stream","charset":null},"blob_id":"b65081351e1ec71d426addd36c55f723a695c893","location":{"offset_span":{"start":1581,"end":1621},"source_span":{"start":{"line":65,"column":13},"end":{"line":65,"column":52}}},"groups":["QUl6YVN5QlNwSHZ0OGwxZjlxbHBwSnFRVzI4MHZHYWNYZ3dObnJr"],"snippet":{"before":"lineHosted() {\n return false;\n }\n\n @Override\n public List supportedRuntimeEnvironments() {\n return List.of(RuntimeEnvironment.Environment.DOCKER);\n }\n\n private String getKey() {\n //google api key\n return \"","matching":"AIzaSyBSpHvt8l1f9qlppJqQW280vGacXgwNnrk\"","after":";\n }\n\n\n}\n"},"structural_id":"04d00ff2f88309a0f6cc04167a7698581a814b2a","rule_structural_id":"2574397bc0dd4560b0f59cbe1018bc33016e819b","rule_text_id":"np.google.5","rule_name":"Google API Key","score":null,"comment":null,"status":null,"redundant_to":[]},{"provenance":[{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"92740436df98d0d100f4c6457a9fb58199958fbc","committer_name":"Jeroen Willemsen","committer_email":"jeroenwillemsen2001@gmail.com","committer_timestamp":"1692161491 +0200","author_name":"Jeroen Willemsen","author_email":"jeroenwillemsen2001@gmail.com","author_timestamp":"1692161491 +0200","message":"Add experimental key\n"},"blob_path":"src/main/java/org/owasp/wrongsecrets/challenges/docker/Challenge36.java"}}],"blob_metadata":{"id":"bf79098769f91bc9e5d63e9c81953e429a07b031","num_bytes":1745,"mime_essence":"application/octet-stream","charset":null},"blob_id":"bf79098769f91bc9e5d63e9c81953e429a07b031","location":{"offset_span":{"start":1695,"end":1735},"source_span":{"start":{"line":67,"column":13},"end":{"line":67,"column":52}}},"groups":["QUl6YVN5QlNwSHZ0OGwxZjlxbHBwSnFRVzI4MHZHYWNYZ3dObnJr"],"snippet":{"before":"lineHosted() {\n return false;\n }\n\n @Override\n public List supportedRuntimeEnvironments() {\n return List.of(RuntimeEnvironment.Environment.DOCKER);\n }\n\n private String getKey() {\n //google api key\n return \"","matching":"AIzaSyBSpHvt8l1f9qlppJqQW280vGacXgwNnrk\"","after":";\n }\n\n\n}\n"},"structural_id":"5e86afe4c8673bc81f55bee3422f692fcc0fc5ac","rule_structural_id":"2574397bc0dd4560b0f59cbe1018bc33016e819b","rule_text_id":"np.google.5","rule_name":"Google API Key","score":null,"comment":null,"status":null,"redundant_to":[]},{"provenance":[{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"8ccc3f982bcfc89bc629a6b30bc3b2a9e883359e","committer_name":"GitHub","committer_email":"noreply@github.com","committer_timestamp":"1692170466 +0000","author_name":"pre-commit-ci-lite[bot]","author_email":"117423508+pre-commit-ci-lite[bot]@users.noreply.github.com","author_timestamp":"1692170466 +0000","message":"[pre-commit.ci lite] apply automatic fixes"},"blob_path":"src/main/java/org/owasp/wrongsecrets/challenges/docker/Challenge36.java"}}],"blob_metadata":{"id":"e350ade649fb48c8615108b1dd66baad62a527aa","num_bytes":1624,"mime_essence":"application/octet-stream","charset":null},"blob_id":"e350ade649fb48c8615108b1dd66baad62a527aa","location":{"offset_span":{"start":1576,"end":1616},"source_span":{"start":{"line":63,"column":13},"end":{"line":63,"column":52}}},"groups":["QUl6YVN5QlNwSHZ0OGwxZjlxbHBwSnFRVzI4MHZHYWNYZ3dObnJr"],"snippet":{"before":"nlineHosted() {\n return false;\n }\n\n @Override\n public List supportedRuntimeEnvironments() {\n return List.of(RuntimeEnvironment.Environment.DOCKER);\n }\n\n private String getKey() {\n // google api key\n return \"","matching":"AIzaSyBSpHvt8l1f9qlppJqQW280vGacXgwNnrk\"","after":";\n }\n}\n"},"structural_id":"1356f42c72da0485d20b872dcfc4a59a04349ba0","rule_structural_id":"2574397bc0dd4560b0f59cbe1018bc33016e819b","rule_text_id":"np.google.5","rule_name":"Google API Key","score":null,"comment":null,"status":null,"redundant_to":[]}]} +{"finding_id":"58c620bd85b8287ec6b1bb9baef7d0c9ae9e0172","rule_name":"Google API Key","rule_text_id":"np.google.5","rule_structural_id":"2574397bc0dd4560b0f59cbe1018bc33016e819b","groups":["QUl6YVN5QmdRM29ZV3FmR3VGNloya0ZIYkl2dzBHTDJHY3l2QU9v"],"num_matches":1,"num_redundant_matches":0,"statuses":[],"comment":null,"mean_score":null,"matches":[{"provenance":[{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"da6446dd45adf849b72dfa118bb1f9adf8608432","committer_name":"Jeroen Willemsen","committer_email":"jeroenwillemsen2001@gmail.com","committer_timestamp":"1655529712 +0200","author_name":"Jeroen Willemsen","author_email":"jeroenwillemsen2001@gmail.com","author_timestamp":"1655529712 +0200","message":"Made the secrets cache easier to overview in #201\n"},"blob_path":"secretscache/firebase/google-services.json"}}],"blob_metadata":{"id":"a1ebecd777886802cf0bdf167743adc17889dd80","num_bytes":1006,"mime_essence":"application/json","charset":null},"blob_id":"a1ebecd777886802cf0bdf167743adc17889dd80","location":{"offset_span":{"start":617,"end":657},"source_span":{"start":{"line":23,"column":27},"end":{"line":23,"column":66}}},"groups":["QUl6YVN5QmdRM29ZV3FmR3VGNloya0ZIYkl2dzBHTDJHY3l2QU9v"],"snippet":{"before":"s\"\n }\n },\n \"oauth_client\": [\n {\n \"client_id\": \"368606766077-9h639dm2huccj5bmb9gd6aapuimcgihf.apps.googleusercontent.com\",\n \"client_type\": 3\n }\n ],\n \"api_key\": [\n {\n \"current_key\": \"","matching":"AIzaSyBgQ3oYWqfGuF6Z2kFHbIvw0GL2GcyvAOo\"","after":"\n }\n ],\n \"services\": {\n \"appinvite_service\": {\n \"other_platform_oauth_client\": [\n {\n \"client_id\": \"368606766077-9h639dm2huccj5bmb9gd6aapuimcgihf.apps.googleusercontent.com\",\n \"client_typ"},"structural_id":"d5726bc9dbaa3e8c272948ccd05c1e6ff50c56df","rule_structural_id":"2574397bc0dd4560b0f59cbe1018bc33016e819b","rule_text_id":"np.google.5","rule_name":"Google API Key","score":null,"comment":null,"status":null,"redundant_to":[]}]} +{"finding_id":"60c5f47b58e4414a214a79d4d004619b91a56eb9","rule_name":"HTTP Basic Authentication","rule_text_id":"np.http.1","rule_structural_id":"83c76359a051a0490f1d57406bccfdd289cb113d","groups":["UVd4aFpHUnBianB2Y0dWdUlITmxjMkZ0WlE9PQ=="],"num_matches":2,"num_redundant_matches":0,"statuses":[],"comment":null,"mean_score":null,"matches":[{"provenance":[{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"ae013ddca2804e0059e91fa693cc12a7f9e09d02","committer_name":"Jeroen Willemsen","committer_email":"jeroenwillemsen2001@gmail.com","committer_timestamp":"1653721229 +0200","author_name":"Jeroen Willemsen","author_email":"jeroenwillemsen2001@gmail.com","author_timestamp":"1653721229 +0200","message":"Merge branch 'master' into experiment-bed\n"},"blob_path":"secretscache/curl.sh"}}],"blob_metadata":{"id":"03643e2808d897e4238f7994a5a7da37c7fbc6d5","num_bytes":129,"mime_essence":"application/x-sh","charset":null},"blob_id":"03643e2808d897e4238f7994a5a7da37c7fbc6d5","location":{"offset_span":{"start":21,"end":71},"source_span":{"start":{"line":3,"column":9},"end":{"line":3,"column":58}}},"groups":["UVd4aFpHUnBianB2Y0dWdUlITmxjMkZ0WlE9PQ=="],"snippet":{"before":"#!/bin/bash\n\ncurl -H ","matching":"Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ== ","after":"https://en.wikipedia.org/wiki/Basic_access_authentication\n"},"structural_id":"464c7549cfcd3a05199ff3e7094b968eb39cebfe","rule_structural_id":"83c76359a051a0490f1d57406bccfdd289cb113d","rule_text_id":"np.http.1","rule_name":"HTTP Basic Authentication","score":null,"comment":null,"status":null,"redundant_to":[]},{"provenance":[{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"f62c9fd5d5ac5c0add01cb3f02055404d19da7e9","committer_name":"Jeroen Willemsen","committer_email":"jeroenwillemsen2001@gmail.com","committer_timestamp":"1693794249 +0200","author_name":"Jeroen Willemsen","author_email":"jeroenwillemsen2001@gmail.com","author_timestamp":"1693794249 +0200","message":"silly setup for getting challenge to work\n"},"blob_path":".github/workflows/dast-zap-test.yml"}}],"blob_metadata":{"id":"e50dcdfb4b5d6af494a1f91de0e67458a0de0990","num_bytes":1041,"mime_essence":"text/x-yaml","charset":null},"blob_id":"e50dcdfb4b5d6af494a1f91de0e67458a0de0990","location":{"offset_span":{"start":766,"end":816},"source_span":{"start":{"line":29,"column":35},"end":{"line":29,"column":84}}},"groups":["UVd4aFpHUnBianB2Y0dWdUlITmxjMkZ0WlE9PQ=="],"snippet":{"before":".skip=true -Dexec.skip\n - name: Start wrongsecrets\n run: nohup ./mvnw spring-boot:run -Dspring-boot.run.profiles=without-vault &\n - name: ZAP Scan\n uses: zaproxy/action-baseline@v0.9.0\n env:\n ZAP_AUTH_HEADER_VALUE: \"","matching":"Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==\"","after":"\n with:\n allow_issue_writing: false\n docker_name: \"owasp/zap2docker-stable\"\n target: \"http://localhost:8080\"\n rules_file_name: config/zap/rule-config.tsv\n fail_action: true\n"},"structural_id":"1b767368fbca584eb01aba2c152c44179d2121f1","rule_structural_id":"83c76359a051a0490f1d57406bccfdd289cb113d","rule_text_id":"np.http.1","rule_name":"HTTP Basic Authentication","score":null,"comment":null,"status":null,"redundant_to":[]}]} +{"finding_id":"c753b17c895df529327b5a0823b9413455bc8b7d","rule_name":"Hardcoded Gradle Credentials","rule_text_id":"np.gradle.1","rule_structural_id":"a12f90a50f965526bfcf34016b914665483c389d","groups":["YWFkbWlu","bm90YXBhc3N3b3Jk"],"num_matches":1,"num_redundant_matches":0,"statuses":[],"comment":null,"mean_score":null,"matches":[{"provenance":[{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"3f7c9d3531e919508587da3a0855e755b37b79b6","committer_name":"Jeroen Willemsen","committer_email":"jeroenwillemsen2001@gmail.com","committer_timestamp":"1653721717 +0200","author_name":"Jeroen Willemsen","author_email":"jeroenwillemsen2001@gmail.com","author_timestamp":"1653721717 +0200","message":"add a few mor secrets for #201\n"},"blob_path":"secretscache/gradle/build.gradle"}}],"blob_metadata":{"id":"698d1a63b99b948f85bb9b09455759bb40d1c207","num_bytes":173,"mime_essence":null,"charset":null},"blob_id":"698d1a63b99b948f85bb9b09455759bb40d1c207","location":{"offset_span":{"start":75,"end":154},"source_span":{"start":{"line":4,"column":9},"end":{"line":6,"column":35}}},"groups":["YWFkbWlu","bm90YXBhc3N3b3Jk"],"snippet":{"before":"repositories {\n maven {\n url \"http://repo.mycompany.com\"\n ","matching":"credentials {\n username \"aadmin\"\n password \"notapassword\"","after":"\n }\n }\n}\n"},"structural_id":"7d76ab48ae1eb825470a018cd3a71d3493e2ad12","rule_structural_id":"a12f90a50f965526bfcf34016b914665483c389d","rule_text_id":"np.gradle.1","rule_name":"Hardcoded Gradle Credentials","score":null,"comment":null,"status":null,"redundant_to":[]}]} +{"finding_id":"b33ada155b6fb43aa0593b81634eaf94dd93b189","rule_name":"JSON Web Token (base64url-encoded)","rule_text_id":"np.jwt.1","rule_structural_id":"6e2b42f8571e4534c13a22f26a39c78b0596edb7","groups":["ZXlKaGJHY2lPaUpGVXpJMU5pSXNJbXRwWkNJNkluVnBhV3huTW05dmVIaGpOSGhzTm10bVlYWjFjbWh2TkRWMUlpd2lkSGx3SWpvaVNsZFVJbjAuZXlJeGNHRnpjM2R2Y21RdVkyOXRMMkYxZFdsa0lqb2lUMHMyVERkV016UlJUa2RSTjFCYVZsazNRbGxJUWxaTU5WRWlMQ0l4Y0dGemMzZHZjbVF1WTI5dEwzUnZhMlZ1SWpvaVZURjRjR2hpWVRkMmFUWnROSEJIU0RaUk5uSkpRVjlxUW1vd2RqRkljellpTENJeGNHRnpjM2R2Y21RdVkyOXRMMlowY3lJNld5SjJZWFZzZEdGalkyVnpjeUpkTENJeGNHRnpjM2R2Y21RdVkyOXRMM1owY3lJNlczc2lkU0k2SW5KeU1uQmtjVzVsZDJZeWRIaHFZalZ3TWpkclpYbHplR0p4SWl3aVlTSTZORGg5WFN3aVlYVmtJanBiSW1OdmJTNHhjR0Z6YzNkdmNtUXVZMjl1Ym1WamRDSmRMQ0p6ZFdJaU9pSkxSVll5UVROTVFqWkdSemRXVEV4TU5rcFlWRXBhU0VaUVdTSXNJbVY0Y0NJNk1UWTFPVEV6T1RFNU9Td2lhV0YwSWpveE5qVTJORGMyT1RBM0xDSnBjM01pT2lKamIyMHVNWEJoYzNOM2IzSmtMbUkxSWl3aWFuUnBJam9pYVhBMGFIUmxibWx0Ympka1lXOXVZV0p6YVdGcWNUSXpaMkVpZlEuZTZMRjFyd1lqRGZOQmhlLXRNbl9pRUtQMkwyWVhlVWVfQ1IxaFBJbUJqVkFsZFRqLWhadmdYR3I1dmNLYnBVX2lMai1SVW9oTUdYVnJiVTAyWXpPWWc="],"num_matches":1,"num_redundant_matches":0,"statuses":[],"comment":null,"mean_score":null,"matches":[{"provenance":[{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"ce6451a2b7218680f16ba4983c39be6f12c08e18","committer_name":"Jeroen Willemsen","committer_email":"jeroenwillemsen2001@gmail.com","committer_timestamp":"1656477757 +0200","author_name":"Jeroen Willemsen","author_email":"jeroenwillemsen2001@gmail.com","author_timestamp":"1656477757 +0200","message":"Added 1password for #201\n"},"blob_path":"secretscache/1password/accesstoken.txt"}}],"blob_metadata":{"id":"f249e323f168bdaac0e5f57b67fb55715ff6fc1c","num_bytes":665,"mime_essence":"text/plain","charset":null},"blob_id":"f249e323f168bdaac0e5f57b67fb55715ff6fc1c","location":{"offset_span":{"start":0,"end":665},"source_span":{"start":{"line":1,"column":1},"end":{"line":1,"column":665}}},"groups":["ZXlKaGJHY2lPaUpGVXpJMU5pSXNJbXRwWkNJNkluVnBhV3huTW05dmVIaGpOSGhzTm10bVlYWjFjbWh2TkRWMUlpd2lkSGx3SWpvaVNsZFVJbjAuZXlJeGNHRnpjM2R2Y21RdVkyOXRMMkYxZFdsa0lqb2lUMHMyVERkV016UlJUa2RSTjFCYVZsazNRbGxJUWxaTU5WRWlMQ0l4Y0dGemMzZHZjbVF1WTI5dEwzUnZhMlZ1SWpvaVZURjRjR2hpWVRkMmFUWnROSEJIU0RaUk5uSkpRVjlxUW1vd2RqRkljellpTENJeGNHRnpjM2R2Y21RdVkyOXRMMlowY3lJNld5SjJZWFZzZEdGalkyVnpjeUpkTENJeGNHRnpjM2R2Y21RdVkyOXRMM1owY3lJNlczc2lkU0k2SW5KeU1uQmtjVzVsZDJZeWRIaHFZalZ3TWpkclpYbHplR0p4SWl3aVlTSTZORGg5WFN3aVlYVmtJanBiSW1OdmJTNHhjR0Z6YzNkdmNtUXVZMjl1Ym1WamRDSmRMQ0p6ZFdJaU9pSkxSVll5UVROTVFqWkdSemRXVEV4TU5rcFlWRXBhU0VaUVdTSXNJbVY0Y0NJNk1UWTFPVEV6T1RFNU9Td2lhV0YwSWpveE5qVTJORGMyT1RBM0xDSnBjM01pT2lKamIyMHVNWEJoYzNOM2IzSmtMbUkxSWl3aWFuUnBJam9pYVhBMGFIUmxibWx0Ympka1lXOXVZV0p6YVdGcWNUSXpaMkVpZlEuZTZMRjFyd1lqRGZOQmhlLXRNbl9pRUtQMkwyWVhlVWVfQ1IxaFBJbUJqVkFsZFRqLWhadmdYR3I1dmNLYnBVX2lMai1SVW9oTUdYVnJiVTAyWXpPWWc="],"snippet":{"before":"","matching":"eyJhbGciOiJFUzI1NiIsImtpZCI6InVpaWxnMm9veHhjNHhsNmtmYXZ1cmhvNDV1IiwidHlwIjoiSldUIn0.eyIxcGFzc3dvcmQuY29tL2F1dWlkIjoiT0s2TDdWMzRRTkdRN1BaVlk3QllIQlZMNVEiLCIxcGFzc3dvcmQuY29tL3Rva2VuIjoiVTF4cGhiYTd2aTZtNHBHSDZRNnJJQV9qQmowdjFIczYiLCIxcGFzc3dvcmQuY29tL2Z0cyI6WyJ2YXVsdGFjY2VzcyJdLCIxcGFzc3dvcmQuY29tL3Z0cyI6W3sidSI6InJyMnBkcW5ld2YydHhqYjVwMjdrZXlzeGJxIiwiYSI6NDh9XSwiYXVkIjpbImNvbS4xcGFzc3dvcmQuY29ubmVjdCJdLCJzdWIiOiJLRVYyQTNMQjZGRzdWTExMNkpYVEpaSEZQWSIsImV4cCI6MTY1OTEzOTE5OSwiaWF0IjoxNjU2NDc2OTA3LCJpc3MiOiJjb20uMXBhc3N3b3JkLmI1IiwianRpIjoiaXA0aHRlbmltbjdkYW9uYWJzaWFqcTIzZ2EifQ.e6LF1rwYjDfNBhe-tMn_iEKP2L2YXeUe_CR1hPImBjVAldTj-hZvgXGr5vcKbpU_iLj-RUohMGXVrbU02YzOYg","after":""},"structural_id":"105a5b29dfb3d60b4e941133cc464f9c0d8293b3","rule_structural_id":"6e2b42f8571e4534c13a22f26a39c78b0596edb7","rule_text_id":"np.jwt.1","rule_name":"JSON Web Token (base64url-encoded)","score":null,"comment":null,"status":null,"redundant_to":[]}]} +{"finding_id":"1f0747f7e41976c37054e342011438fac87498aa","rule_name":"JSON Web Token (base64url-encoded)","rule_text_id":"np.jwt.1","rule_structural_id":"6e2b42f8571e4534c13a22f26a39c78b0596edb7","groups":["ZXlKaGJHY2lPaUpTVXpJMU5pSXNJblI1Y0NJNklrcFhWQ0o5LmV5SnpkV0lpT2lJeE1qTTBOVFkzT0Rrd0lpd2libUZ0WlNJNklrcHZhRzRnUkc5bElpd2lZV1J0YVc0aU9uUnlkV1VzSW1saGRDSTZNVFV4TmpJek9UQXlNbjAuTkhWYVllMjZNYnRPWWhTS2tvS1lkRlZvbWc0aThaSmQ4Xy1SVThWTmJmdGM0VFNNYjRiWFAzbDNZbE5XQUN3eVhQR2ZmejVhWEhjNmx0eTFZMnQ0U1dScUd0ZXJhZ3NWZFp1ZkRuNUJsbkpsOXBkUl9rZFZGVXNyYTJyV0tFb2ZrWmVJQzR5V3l0RTU4c01JaWh2bzlIMVNjbW1Wd0JjUVA2WEVUcVlkMGFTSHAxZ09hOVJkVVBEdm9YUTVvcXlnVHFWdHhhRHI2d1VGS3JLSXRnQk16V0lkTlo2eTdPOUUwRGhFUFRiRTlyZkJvNktURnNIQVpuTWc0azY4Q0RwMndvWUlhWGJtWVRXY3Ziekl1SE83XzM3R1Q3OVhkSXdrbTk1UUo3aFlDOVJpd3JWN21lc2JZNFBBYWhFUkphd250aG8wbXk5NDJYaGVWTG1Hd0xNQmtR"],"num_matches":1,"num_redundant_matches":0,"statuses":[],"comment":null,"mean_score":null,"matches":[{"provenance":[{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"b3a4110481b3eaf973b33bf12829b6ae3e85e8b6","committer_name":"Jeroen Willemsen","committer_email":"jeroenwillemsen2001@gmail.com","committer_timestamp":"1655623794 +0200","author_name":"Jeroen Willemsen","author_email":"jeroenwillemsen2001@gmail.com","author_timestamp":"1655623794 +0200","message":"added JWT with keys #201\n"},"blob_path":"secretscache/jwt/jwt.txt"}}],"blob_metadata":{"id":"65ba47f9f492716d579e88b89a7afaa692e34e39","num_bytes":471,"mime_essence":"text/plain","charset":null},"blob_id":"65ba47f9f492716d579e88b89a7afaa692e34e39","location":{"offset_span":{"start":0,"end":471},"source_span":{"start":{"line":1,"column":1},"end":{"line":1,"column":471}}},"groups":["ZXlKaGJHY2lPaUpTVXpJMU5pSXNJblI1Y0NJNklrcFhWQ0o5LmV5SnpkV0lpT2lJeE1qTTBOVFkzT0Rrd0lpd2libUZ0WlNJNklrcHZhRzRnUkc5bElpd2lZV1J0YVc0aU9uUnlkV1VzSW1saGRDSTZNVFV4TmpJek9UQXlNbjAuTkhWYVllMjZNYnRPWWhTS2tvS1lkRlZvbWc0aThaSmQ4Xy1SVThWTmJmdGM0VFNNYjRiWFAzbDNZbE5XQUN3eVhQR2ZmejVhWEhjNmx0eTFZMnQ0U1dScUd0ZXJhZ3NWZFp1ZkRuNUJsbkpsOXBkUl9rZFZGVXNyYTJyV0tFb2ZrWmVJQzR5V3l0RTU4c01JaWh2bzlIMVNjbW1Wd0JjUVA2WEVUcVlkMGFTSHAxZ09hOVJkVVBEdm9YUTVvcXlnVHFWdHhhRHI2d1VGS3JLSXRnQk16V0lkTlo2eTdPOUUwRGhFUFRiRTlyZkJvNktURnNIQVpuTWc0azY4Q0RwMndvWUlhWGJtWVRXY3Ziekl1SE83XzM3R1Q3OVhkSXdrbTk1UUo3aFlDOVJpd3JWN21lc2JZNFBBYWhFUkphd250aG8wbXk5NDJYaGVWTG1Hd0xNQmtR"],"snippet":{"before":"","matching":"eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWUsImlhdCI6MTUxNjIzOTAyMn0.NHVaYe26MbtOYhSKkoKYdFVomg4i8ZJd8_-RU8VNbftc4TSMb4bXP3l3YlNWACwyXPGffz5aXHc6lty1Y2t4SWRqGteragsVdZufDn5BlnJl9pdR_kdVFUsra2rWKEofkZeIC4yWytE58sMIihvo9H1ScmmVwBcQP6XETqYd0aSHp1gOa9RdUPDvoXQ5oqygTqVtxaDr6wUFKrKItgBMzWIdNZ6y7O9E0DhEPTbE9rfBo6KTFsHAZnMg4k68CDp2woYIaXbmYTWcvbzIuHO7_37GT79XdIwkm95QJ7hYC9RiwrV7mesbY4PAahERJawntho0my942XheVLmGwLMBkQ","after":""},"structural_id":"a95366399dc670d4b155061de1ec1ecb3d2efd83","rule_structural_id":"6e2b42f8571e4534c13a22f26a39c78b0596edb7","rule_text_id":"np.jwt.1","rule_name":"JSON Web Token (base64url-encoded)","score":null,"comment":null,"status":null,"redundant_to":[]}]} +{"finding_id":"3c83ac99b70dbd5190f5ef0efab27dec109b7aa3","rule_name":"PEM-Encoded Private Key","rule_text_id":"np.pem.1","rule_structural_id":"046a96dd5272aa399275afb853a54884611769c0","groups":["TUhjQ0FRRUVJRVF5VDlQMkUyUVNoeEZSWUMwV1FNekl5RXBST2hLVjg4MGdSNFA5YTUwQW9Bb0dDQ3FHU000OQpBd0VIb1VRRFFnQUUrQVU1OWowd2libGw3RHc2VTVPcmM2N3d0cHJQS3JEcWFOaUJXUjRiR3B2bVo2bFF6L2ZYCktpaGpneWZSU0lhTFA5VEZiR3NMUjFTa3crYWpKVTdIRlE9PQo="],"num_matches":1,"num_redundant_matches":0,"statuses":[],"comment":null,"mean_score":null,"matches":[{"provenance":[{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"546451ada89cfebfdbdfce7c1bfcb8f5931ee745","committer_name":"Jeroen Willemsen","committer_email":"jeroenwillemsen2001@gmail.com","committer_timestamp":"1653281695 +0200","author_name":"Jeroen Willemsen","author_email":"jeroenwillemsen2001@gmail.com","author_timestamp":"1650226987 +0200","message":"Added RSA/ECC keys for #201\n"},"blob_path":"secretscache/keys/eccprivate-key.pem"}}],"blob_metadata":{"id":"91278c54477f5a251e10e4e7c500f05862c6ca7f","num_bytes":227,"mime_essence":"application/x-x509-ca-cert","charset":null},"blob_id":"91278c54477f5a251e10e4e7c500f05862c6ca7f","location":{"offset_span":{"start":0,"end":226},"source_span":{"start":{"line":1,"column":1},"end":{"line":5,"column":28}}},"groups":["TUhjQ0FRRUVJRVF5VDlQMkUyUVNoeEZSWUMwV1FNekl5RXBST2hLVjg4MGdSNFA5YTUwQW9Bb0dDQ3FHU000OQpBd0VIb1VRRFFnQUUrQVU1OWowd2libGw3RHc2VTVPcmM2N3d0cHJQS3JEcWFOaUJXUjRiR3B2bVo2bFF6L2ZYCktpaGpneWZSU0lhTFA5VEZiR3NMUjFTa3crYWpKVTdIRlE9PQo="],"snippet":{"before":"","matching":"-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIEQyT9P2E2QShxFRYC0WQMzIyEpROhKV880gR4P9a50AoAoGCCqGSM49\nAwEHoUQDQgAE+AU59j0wibll7Dw6U5Orc67wtprPKrDqaNiBWR4bGpvmZ6lQz/fX\nKihjgyfRSIaLP9TFbGsLR1Skw+ajJU7HFQ==\n-----END EC PRIVATE KEY-----","after":"\n"},"structural_id":"0ec652afaeb5dfa032abe8a58df816323c5622d8","rule_structural_id":"046a96dd5272aa399275afb853a54884611769c0","rule_text_id":"np.pem.1","rule_name":"PEM-Encoded Private Key","score":null,"comment":null,"status":null,"redundant_to":[]}]} +{"finding_id":"3e348cf2520dd9b40880ca270e2914ab930d9d00","rule_name":"PEM-Encoded Private Key","rule_text_id":"np.pem.1","rule_structural_id":"046a96dd5272aa399275afb853a54884611769c0","groups":["TUlJRXZ3SUJBREFOQmdrcWhraUc5dzBCQVFFRkFBU0NCS2t3Z2dTbEFnRUFBb0lCQVFDN1ZKVFV0OVVzOGNLagpNekVmWXlqaVdBNFI0L00yYlMxR0I0dDdOWHA5OEMzU0M2ZFZNdkR1aWN0R2V1clQ4ak5idkpaSHRDU3VZRXZ1Ck5Nb1NmbTc2b3FGdkFwOEd5MGl6NXN4alptU25YeUNkUEVvdkdoTGEwVnpNYVE4cytDTE95UzU2WXlDRkdlSloKcWd0eko2R1IzZXFvWVNXOWI5VU12a0JwWk9EU2N0V1NOR2ozUDdqUkZETzVWb1R3Q1FBV2JGbk9qRGZINVVsZwpwMlBLU1FuU0pQM0FKTFFORk5lN2JyMVhicmhWLy9lTyt0NTFtSXBHU0RDVXYzRTBEREZjV0RUSDljWERUVGxSClpWRWlSMkJ3cFpPT2tFL1owL0JWbmhaWUw3MW9aVjM0YktmV2pRSXQ2Vi9pc1NNYWhkc0FBU0FDcDRaVEd0d2kKVnVOZDl0eWJBZ01CQUFFQ2dnRUJBS1RtamFTNnRrSzhCbFBYQ2xUUTJ2cHovTjZ1eERlUzM1bVhwcWFzcXNrVgpsYUFpZGdnL3NXcXBqWERiWHI5M290SU1MbFdzTStYMENxTURnU1hLZWpMUzJqeDRHRGpJMVpUWGcrKzBBTUo4CnNKNzRwV3pWRE9mbUNFUS83d1hzMytjYm5YaEtyaU84WjAzNnE5MlFjMStOODdTSTM4bmtHYTBBQkg5Q044M0gKbVFxdDRmQjdVZEh6dUlSZS9tZTJQR2hJcTVaQnpqNmgzQnBvUEd6RVAreDNsOVltSzh0LzFjTjBwcUkrZFF3WQpkZ2ZHamFja0x1LzJxSDgwTUNGN0l5UWFzZVpVT0p5S3JDTHRTRC9JaXh2L2h6REVVUGZPQ2pGRGdUcHpmM2N3CnRhOCtvRTR3SENvMWlJMS80VGxQa3dtWHg0cVNYdG13NGFRUHo3SURRdkVDZ1lFQThLTlRoQ08yZ3NDMkk5UFEKRE0vOEN3ME85ODNXQ0RZK29pKzdKUGlOQUp3djVEWUJxRVpCMVFZZGowNllEMTZYbEMvSEFaTXNNa3UxbmEyVApOMGRyaXdlblFRV3pvZXYzZzJTN2dSRG9TL0ZDSlNJM2pKK2tqZ3RhQTdRbXpsZ2sxVHhPRE4rRzFIOTFIVzd0CjBsN1ZuTDI3SVd5WW8ycVJSSzNqenhxVWlQVUNnWUVBeDBvUXMycmVCUUdNVlpuQXBEMWplcTduNE12TkxjUHYKdDhiL2VVOWlVdjZZNE1qMFN1by9BVThsWVpYbTh1YmJxQWx3ejJWU1Z1bkQydE9wbEh5TVVydEN0T2JBZlZEVQpBaENuZEthQTlnQXBnZmIzeHcxSUtidVExdTRJRjFGSmwzVnR1bWZRbi8vTGlIMUIzclhoY2R5bzMvdkl0dEVrCjQ4UmFrVUtDbFU4Q2dZRUF6VjdXM0NPT2xERGNRZDkzNURkdEtCRlJBUFJQQWxzcFFVbnpNaTVlU0hNRC9JU0wKRFk1SWlRSGJJSDgzRDRidlhxMFg3cVFvU0JTTlA3RHZ2M0hZdXFNaGYwRGFlZ3JsQnVKbGxGVlZxOXFQVlJuSwp4dDFJbDJIZ3hPQnZiaE9UKzlpbjFCekErWUo5OVV6Qzg1TzBRejA2QStDbXRIRXk0YVoya2o1aEhqRUNnWUVBCm1OUzQrQThGa3NzOEpzMVJpZUsyTG5pQnhNZ21ZbWwzcGZWTEtHbnptbmc3SDIrY3dQTGhQSXpJdXd5dFh5d2gKMmJ6YnNZRWZZeDNFb0VWZ01FcFBob2FyUW5ZUHVrckpPNGd3RTJvNVRlNlQ1bUpTWkdsUUpRajlxNFpCMkRmegpldDZJTnNLMG9HOFhWR1hTcFF2UWgzUlVZZWtDWlFrQkJGY3BxV3BiSUVzQ2dZQW5NM0RRZjNGSm9TblhhTWhyClZCSW92aWM1bDB4RmtFSHNrQWpGVGV2Tzg2RnN6MUMyYVNlUktTcUdGb09RMHRtSnpCRXMxUjZLcW5ISW5pY0QKVFFyS2hBcmdMWFg0djNDZGRqZlRSSmtGV0RiRS9Da3ZLWk5PcmNmMW5oYUdDUHNwUkpqMktVa2oxRmhsOUNuYwpkbi9Sc1lFT05id1FTaklmTVBrdnhGKzhIUT09Cg=="],"num_matches":1,"num_redundant_matches":0,"statuses":[],"comment":null,"mean_score":null,"matches":[{"provenance":[{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"b3a4110481b3eaf973b33bf12829b6ae3e85e8b6","committer_name":"Jeroen Willemsen","committer_email":"jeroenwillemsen2001@gmail.com","committer_timestamp":"1655623794 +0200","author_name":"Jeroen Willemsen","author_email":"jeroenwillemsen2001@gmail.com","author_timestamp":"1655623794 +0200","message":"added JWT with keys #201\n"},"blob_path":"secretscache/jwt/privatekey.txt"}}],"blob_metadata":{"id":"3314ab6e7a5c175778131071bf423b2e6cecd847","num_bytes":1707,"mime_essence":"text/plain","charset":null},"blob_id":"3314ab6e7a5c175778131071bf423b2e6cecd847","location":{"offset_span":{"start":0,"end":1707},"source_span":{"start":{"line":1,"column":1},"end":{"line":28,"column":25}}},"groups":["TUlJRXZ3SUJBREFOQmdrcWhraUc5dzBCQVFFRkFBU0NCS2t3Z2dTbEFnRUFBb0lCQVFDN1ZKVFV0OVVzOGNLagpNekVmWXlqaVdBNFI0L00yYlMxR0I0dDdOWHA5OEMzU0M2ZFZNdkR1aWN0R2V1clQ4ak5idkpaSHRDU3VZRXZ1Ck5Nb1NmbTc2b3FGdkFwOEd5MGl6NXN4alptU25YeUNkUEVvdkdoTGEwVnpNYVE4cytDTE95UzU2WXlDRkdlSloKcWd0eko2R1IzZXFvWVNXOWI5VU12a0JwWk9EU2N0V1NOR2ozUDdqUkZETzVWb1R3Q1FBV2JGbk9qRGZINVVsZwpwMlBLU1FuU0pQM0FKTFFORk5lN2JyMVhicmhWLy9lTyt0NTFtSXBHU0RDVXYzRTBEREZjV0RUSDljWERUVGxSClpWRWlSMkJ3cFpPT2tFL1owL0JWbmhaWUw3MW9aVjM0YktmV2pRSXQ2Vi9pc1NNYWhkc0FBU0FDcDRaVEd0d2kKVnVOZDl0eWJBZ01CQUFFQ2dnRUJBS1RtamFTNnRrSzhCbFBYQ2xUUTJ2cHovTjZ1eERlUzM1bVhwcWFzcXNrVgpsYUFpZGdnL3NXcXBqWERiWHI5M290SU1MbFdzTStYMENxTURnU1hLZWpMUzJqeDRHRGpJMVpUWGcrKzBBTUo4CnNKNzRwV3pWRE9mbUNFUS83d1hzMytjYm5YaEtyaU84WjAzNnE5MlFjMStOODdTSTM4bmtHYTBBQkg5Q044M0gKbVFxdDRmQjdVZEh6dUlSZS9tZTJQR2hJcTVaQnpqNmgzQnBvUEd6RVAreDNsOVltSzh0LzFjTjBwcUkrZFF3WQpkZ2ZHamFja0x1LzJxSDgwTUNGN0l5UWFzZVpVT0p5S3JDTHRTRC9JaXh2L2h6REVVUGZPQ2pGRGdUcHpmM2N3CnRhOCtvRTR3SENvMWlJMS80VGxQa3dtWHg0cVNYdG13NGFRUHo3SURRdkVDZ1lFQThLTlRoQ08yZ3NDMkk5UFEKRE0vOEN3ME85ODNXQ0RZK29pKzdKUGlOQUp3djVEWUJxRVpCMVFZZGowNllEMTZYbEMvSEFaTXNNa3UxbmEyVApOMGRyaXdlblFRV3pvZXYzZzJTN2dSRG9TL0ZDSlNJM2pKK2tqZ3RhQTdRbXpsZ2sxVHhPRE4rRzFIOTFIVzd0CjBsN1ZuTDI3SVd5WW8ycVJSSzNqenhxVWlQVUNnWUVBeDBvUXMycmVCUUdNVlpuQXBEMWplcTduNE12TkxjUHYKdDhiL2VVOWlVdjZZNE1qMFN1by9BVThsWVpYbTh1YmJxQWx3ejJWU1Z1bkQydE9wbEh5TVVydEN0T2JBZlZEVQpBaENuZEthQTlnQXBnZmIzeHcxSUtidVExdTRJRjFGSmwzVnR1bWZRbi8vTGlIMUIzclhoY2R5bzMvdkl0dEVrCjQ4UmFrVUtDbFU4Q2dZRUF6VjdXM0NPT2xERGNRZDkzNURkdEtCRlJBUFJQQWxzcFFVbnpNaTVlU0hNRC9JU0wKRFk1SWlRSGJJSDgzRDRidlhxMFg3cVFvU0JTTlA3RHZ2M0hZdXFNaGYwRGFlZ3JsQnVKbGxGVlZxOXFQVlJuSwp4dDFJbDJIZ3hPQnZiaE9UKzlpbjFCekErWUo5OVV6Qzg1TzBRejA2QStDbXRIRXk0YVoya2o1aEhqRUNnWUVBCm1OUzQrQThGa3NzOEpzMVJpZUsyTG5pQnhNZ21ZbWwzcGZWTEtHbnptbmc3SDIrY3dQTGhQSXpJdXd5dFh5d2gKMmJ6YnNZRWZZeDNFb0VWZ01FcFBob2FyUW5ZUHVrckpPNGd3RTJvNVRlNlQ1bUpTWkdsUUpRajlxNFpCMkRmegpldDZJTnNLMG9HOFhWR1hTcFF2UWgzUlVZZWtDWlFrQkJGY3BxV3BiSUVzQ2dZQW5NM0RRZjNGSm9TblhhTWhyClZCSW92aWM1bDB4RmtFSHNrQWpGVGV2Tzg2RnN6MUMyYVNlUktTcUdGb09RMHRtSnpCRXMxUjZLcW5ISW5pY0QKVFFyS2hBcmdMWFg0djNDZGRqZlRSSmtGV0RiRS9Da3ZLWk5PcmNmMW5oYUdDUHNwUkpqMktVa2oxRmhsOUNuYwpkbi9Sc1lFT05id1FTaklmTVBrdnhGKzhIUT09Cg=="],"snippet":{"before":"","matching":"-----BEGIN PRIVATE KEY-----\nMIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQC7VJTUt9Us8cKj\nMzEfYyjiWA4R4/M2bS1GB4t7NXp98C3SC6dVMvDuictGeurT8jNbvJZHtCSuYEvu\nNMoSfm76oqFvAp8Gy0iz5sxjZmSnXyCdPEovGhLa0VzMaQ8s+CLOyS56YyCFGeJZ\nqgtzJ6GR3eqoYSW9b9UMvkBpZODSctWSNGj3P7jRFDO5VoTwCQAWbFnOjDfH5Ulg\np2PKSQnSJP3AJLQNFNe7br1XbrhV//eO+t51mIpGSDCUv3E0DDFcWDTH9cXDTTlR\nZVEiR2BwpZOOkE/Z0/BVnhZYL71oZV34bKfWjQIt6V/isSMahdsAASACp4ZTGtwi\nVuNd9tybAgMBAAECggEBAKTmjaS6tkK8BlPXClTQ2vpz/N6uxDeS35mXpqasqskV\nlaAidgg/sWqpjXDbXr93otIMLlWsM+X0CqMDgSXKejLS2jx4GDjI1ZTXg++0AMJ8\nsJ74pWzVDOfmCEQ/7wXs3+cbnXhKriO8Z036q92Qc1+N87SI38nkGa0ABH9CN83H\nmQqt4fB7UdHzuIRe/me2PGhIq5ZBzj6h3BpoPGzEP+x3l9YmK8t/1cN0pqI+dQwY\ndgfGjackLu/2qH80MCF7IyQaseZUOJyKrCLtSD/Iixv/hzDEUPfOCjFDgTpzf3cw\nta8+oE4wHCo1iI1/4TlPkwmXx4qSXtmw4aQPz7IDQvECgYEA8KNThCO2gsC2I9PQ\nDM/8Cw0O983WCDY+oi+7JPiNAJwv5DYBqEZB1QYdj06YD16XlC/HAZMsMku1na2T\nN0driwenQQWzoev3g2S7gRDoS/FCJSI3jJ+kjgtaA7Qmzlgk1TxODN+G1H91HW7t\n0l7VnL27IWyYo2qRRK3jzxqUiPUCgYEAx0oQs2reBQGMVZnApD1jeq7n4MvNLcPv\nt8b/eU9iUv6Y4Mj0Suo/AU8lYZXm8ubbqAlwz2VSVunD2tOplHyMUrtCtObAfVDU\nAhCndKaA9gApgfb3xw1IKbuQ1u4IF1FJl3VtumfQn//LiH1B3rXhcdyo3/vIttEk\n48RakUKClU8CgYEAzV7W3COOlDDcQd935DdtKBFRAPRPAlspQUnzMi5eSHMD/ISL\nDY5IiQHbIH83D4bvXq0X7qQoSBSNP7Dvv3HYuqMhf0DaegrlBuJllFVVq9qPVRnK\nxt1Il2HgxOBvbhOT+9in1BzA+YJ99UzC85O0Qz06A+CmtHEy4aZ2kj5hHjECgYEA\nmNS4+A8Fkss8Js1RieK2LniBxMgmYml3pfVLKGnzmng7H2+cwPLhPIzIuwytXywh\n2bzbsYEfYx3EoEVgMEpPhoarQnYPukrJO4gwE2o5Te6T5mJSZGlQJQj9q4ZB2Dfz\net6INsK0oG8XVGXSpQvQh3RUYekCZQkBBFcpqWpbIEsCgYAnM3DQf3FJoSnXaMhr\nVBIovic5l0xFkEHskAjFTevO86Fsz1C2aSeRKSqGFoOQ0tmJzBEs1R6KqnHInicD\nTQrKhArgLXX4v3CddjfTRJkFWDbE/CkvKZNOrcf1nhaGCPspRJj2KUkj1Fhl9Cnc\ndn/RsYEONbwQSjIfMPkvxF+8HQ==\n-----END PRIVATE KEY-----","after":""},"structural_id":"8ad7b3b3581e72dbb9ddedbbe3a714d02c990c03","rule_structural_id":"046a96dd5272aa399275afb853a54884611769c0","rule_text_id":"np.pem.1","rule_name":"PEM-Encoded Private Key","score":null,"comment":null,"status":null,"redundant_to":[]}]} +{"finding_id":"e25450d6863684f5ee94aa8de0f38e57c0a6df1e","rule_name":"PEM-Encoded Private Key","rule_text_id":"np.pem.1","rule_structural_id":"046a96dd5272aa399275afb853a54884611769c0","groups":["bEZnRVlseDV0eFlKS3dZQkJBSGFSdzhCQVFkQWVPV0dTNERNWkFlR1k4WG9oOXZLeUJxK212WTdzT0RwVHIzbgprOHRFaFpVQUFQOUVSYlV3dEVNb2JFemtQZXVlb3dWaWlrK2xIdll4clZ2NmVJck91Y29VZ1JDbHRCVm1ZV2xzCmRYSmxRR1JsZEdWamRHbHZiaTVqYjIySWxnUVRGZ29BUGhZaEJJZEt3ZUtTK2drTHdrVGdCZUhtajhFSzdQTE4KQlFKaVhIbTNBaHNEQlFrRHdtY0FCUXNKQ0FjREJSVUtDUWdMQlJZQ0F3RUFBaDRGQWhlQUFBb0pFT0htajhFSwo3UExORHZjQS9pcW13VnNjVlJWYmVMaHlhSVhwMHBpT0pENG0yQUttNGFhcXJSbE1DOUlDQVA5Vmd1NVZCQ2UrCmVxMEFOVE5vWnVKZXNaUWQ4S2k0Q20zYklsTjlXUi9tRHB4ZEJHSmNlYmNTQ2lzR0FRUUJsMVVCQlFFQkIwREcKaXQyV3hIOFZTdjRhR2pMTG90eFZ3VS9pZkNjRWQ4cityS3NnUzlsU09nTUJDQWNBQVA5Y0NNeGZUMGtrVGxKUQovQ3JVUFFzM3JJMlZ1eXFaUWR1MmJxZmovVUlSSUE4NWlIZ0VHQllLQUNBV0lRU0hTc0hpa3ZvSkM4SkU0QVhoCjVvL0JDdXp5elFVQ1lseDV0d0liREFBS0NSRGg1by9CQ3V6eXpSYVdBUDRvaVZ1ckxFc2FXZHlnL2lCVDRPaDgKZy9iWWFLUkFzTi9ibmFzcXo2RW5md0Q5RS8wOFRnM205YlFiWThhNWZHNnU5MlN3M1dneVNCdjFSWE5rVXkvRgpZZ1E9Cj1oZkhOCg=="],"num_matches":1,"num_redundant_matches":0,"statuses":[],"comment":null,"mean_score":null,"matches":[{"provenance":[{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"6337e8e528efb6940c7bd4523d6c7663546bb473","committer_name":"Jeroen Willemsen","committer_email":"jeroenwillemsen2001@gmail.com","committer_timestamp":"1655623003 +0200","author_name":"Jeroen Willemsen","author_email":"jeroenwillemsen2001@gmail.com","author_timestamp":"1655623003 +0200","message":"add armored gpg keys\n"},"blob_path":"secretscache/gpg/private_key.asc"}}],"blob_metadata":{"id":"cdb2ed5b92e8cd4eaab8154fe5c10481337e9cb7","num_bytes":736,"mime_essence":"application/pgp-signature","charset":null},"blob_id":"cdb2ed5b92e8cd4eaab8154fe5c10481337e9cb7","location":{"offset_span":{"start":0,"end":735},"source_span":{"start":{"line":1,"column":1},"end":{"line":15,"column":35}}},"groups":["bEZnRVlseDV0eFlKS3dZQkJBSGFSdzhCQVFkQWVPV0dTNERNWkFlR1k4WG9oOXZLeUJxK212WTdzT0RwVHIzbgprOHRFaFpVQUFQOUVSYlV3dEVNb2JFemtQZXVlb3dWaWlrK2xIdll4clZ2NmVJck91Y29VZ1JDbHRCVm1ZV2xzCmRYSmxRR1JsZEdWamRHbHZiaTVqYjIySWxnUVRGZ29BUGhZaEJJZEt3ZUtTK2drTHdrVGdCZUhtajhFSzdQTE4KQlFKaVhIbTNBaHNEQlFrRHdtY0FCUXNKQ0FjREJSVUtDUWdMQlJZQ0F3RUFBaDRGQWhlQUFBb0pFT0htajhFSwo3UExORHZjQS9pcW13VnNjVlJWYmVMaHlhSVhwMHBpT0pENG0yQUttNGFhcXJSbE1DOUlDQVA5Vmd1NVZCQ2UrCmVxMEFOVE5vWnVKZXNaUWQ4S2k0Q20zYklsTjlXUi9tRHB4ZEJHSmNlYmNTQ2lzR0FRUUJsMVVCQlFFQkIwREcKaXQyV3hIOFZTdjRhR2pMTG90eFZ3VS9pZkNjRWQ4cityS3NnUzlsU09nTUJDQWNBQVA5Y0NNeGZUMGtrVGxKUQovQ3JVUFFzM3JJMlZ1eXFaUWR1MmJxZmovVUlSSUE4NWlIZ0VHQllLQUNBV0lRU0hTc0hpa3ZvSkM4SkU0QVhoCjVvL0JDdXp5elFVQ1lseDV0d0liREFBS0NSRGg1by9CQ3V6eXpSYVdBUDRvaVZ1ckxFc2FXZHlnL2lCVDRPaDgKZy9iWWFLUkFzTi9ibmFzcXo2RW5md0Q5RS8wOFRnM205YlFiWThhNWZHNnU5MlN3M1dneVNCdjFSWE5rVXkvRgpZZ1E9Cj1oZkhOCg=="],"snippet":{"before":"","matching":"-----BEGIN PGP PRIVATE KEY BLOCK-----\n\nlFgEYlx5txYJKwYBBAHaRw8BAQdAeOWGS4DMZAeGY8Xoh9vKyBq+mvY7sODpTr3n\nk8tEhZUAAP9ERbUwtEMobEzkPeueowViik+lHvYxrVv6eIrOucoUgRCltBVmYWls\ndXJlQGRldGVjdGlvbi5jb22IlgQTFgoAPhYhBIdKweKS+gkLwkTgBeHmj8EK7PLN\nBQJiXHm3AhsDBQkDwmcABQsJCAcDBRUKCQgLBRYCAwEAAh4FAheAAAoJEOHmj8EK\n7PLNDvcA/iqmwVscVRVbeLhyaIXp0piOJD4m2AKm4aaqrRlMC9ICAP9Vgu5VBCe+\neq0ANTNoZuJesZQd8Ki4Cm3bIlN9WR/mDpxdBGJcebcSCisGAQQBl1UBBQEBB0DG\nit2WxH8VSv4aGjLLotxVwU/ifCcEd8r+rKsgS9lSOgMBCAcAAP9cCMxfT0kkTlJQ\n/CrUPQs3rI2VuyqZQdu2bqfj/UIRIA85iHgEGBYKACAWIQSHSsHikvoJC8JE4AXh\n5o/BCuzyzQUCYlx5twIbDAAKCRDh5o/BCuzyzRaWAP4oiVurLEsaWdyg/iBT4Oh8\ng/bYaKRAsN/bnasqz6EnfwD9E/08Tg3m9bQbY8a5fG6u92Sw3WgySBv1RXNkUy/F\nYgQ=\n=hfHN\n-----END PGP PRIVATE KEY BLOCK-----","after":"\n"},"structural_id":"47d1535adc8f74a21d4cebb4ebd56dbd36a56f3a","rule_structural_id":"046a96dd5272aa399275afb853a54884611769c0","rule_text_id":"np.pem.1","rule_name":"PEM-Encoded Private Key","score":null,"comment":null,"status":null,"redundant_to":[]}]} +{"finding_id":"61a76f28932780cb03402457c97a21e76d2bc2ec","rule_name":"Slack Webhook","rule_text_id":"np.slack.3","rule_structural_id":"05ed2105125034f509474d7ee7689a11e1fdded7","groups":["aHR0cHM6Ly9ob29rcy5zbGFjay5jb20vc2VydmljZXMvVDA0VDQwTkhYL0IwM0JUNkQwVUQ4LzZoUG9ObHhsU2xESEJSTUpvOWR3UUNtVQ=="],"num_matches":1,"num_redundant_matches":0,"statuses":[],"comment":null,"mean_score":null,"matches":[{"provenance":[{"kind":"git_repo","repo_path":"./.git","first_commit":{"commit_metadata":{"commit_id":"9fe0e6522f6c54f4fc1b9e93111de1e34dd9cb4f","committer_name":"Jeroen Willemsen","committer_email":"jeroenwillemsen2001@gmail.com","committer_timestamp":"1653281698 +0200","author_name":"Jeroen Willemsen","author_email":"jeroenwillemsen2001@gmail.com","author_timestamp":"1650228902 +0200","message":"Added active slack callback url for #201\n"},"blob_path":"secretscache/slack/callback.url"}}],"blob_metadata":{"id":"c30b3360a03c2cc3d8e47bb9afa897ccf3a2f8e4","num_bytes":80,"mime_essence":null,"charset":null},"blob_id":"c30b3360a03c2cc3d8e47bb9afa897ccf3a2f8e4","location":{"offset_span":{"start":0,"end":79},"source_span":{"start":{"line":1,"column":1},"end":{"line":1,"column":79}}},"groups":["aHR0cHM6Ly9ob29rcy5zbGFjay5jb20vc2VydmljZXMvVDA0VDQwTkhYL0IwM0JUNkQwVUQ4LzZoUG9ObHhsU2xESEJSTUpvOWR3UUNtVQ=="],"snippet":{"before":"","matching":"https://hooks.slack.com/services/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","after":"\n"},"structural_id":"2a3aaac9bbd49760849128169ddffd0c881ac53b","rule_structural_id":"05ed2105125034f509474d7ee7689a11e1fdded7","rule_text_id":"np.slack.3","rule_name":"Slack Webhook","score":null,"comment":null,"status":null,"redundant_to":[]}]} diff --git a/unittests/scans/noseyparker/noseyparker_0_22_0_without_githistory.jsonl b/unittests/scans/noseyparker/noseyparker_0_22_0_without_githistory.jsonl new file mode 100644 index 00000000000..3bea238c81f --- /dev/null +++ b/unittests/scans/noseyparker/noseyparker_0_22_0_without_githistory.jsonl @@ -0,0 +1,6 @@ +{"finding_id":"1c82eb8796a72495efce9ecf618b84fba10ffc84","rule_name":"Base64-PEM-Encoded Private Key","rule_text_id":"np.pem.2","rule_structural_id":"35bc9170393aecc3777a72fe77e269fc150c91ce","groups":["TFMwdExTMUNSVWRKVGlCU1UwRWdVRkpKVmtGVVJTQkxSVmt0TFMwdExRcE5TVWxLUzJkSlFrRkJTME5CWjBWQmMzZFhVMnRwWlU5bk5rSmhkRWxHVFVKMFZsZHpaVzluVFd3MFVraFlOblJKUTFsUU0wRXdWelJKU2pocmMxZzJDbXh2V0VWeE15czNOV3hUWjBVMVJ6UkdTMk5YWVVWNVV6ZGpjR0V5Y0VSVU5UTjVSbU4xTlRkdFQwdFJWa2gyT0c1VmN6VnJNM2hYY1VGWE5pczNNMEVLT1RabE1IWnRlbmxLYkc5T1JVa3dPVTFwUzBac00zZHNTelZMTjJWQ1JsRnBiMnRSYjNZeFNsRnFPVVo0VkdGSU1EQlNNR3BxSzAwM0wzaFdTV0ZJS3dweGFtVTNOMHhhU2pCcWRESkRWWGRLVkc5NFVUTXlSRVl3VkRRNE1IaDRjRE5QTTNaWU9FUkNUbmhaUzNNMUt6Vk5RekV4TWtweFp6QnJZbXBUYVZNMUNpOHZaWEYzU1hGd1R6VXpaVXhETjJGMVVWbHRUVTVEYjFaTVduTm1hVzF0ZURGd1RYTjRiRWh1VEVKeGNWTkdiVzVzZVZCb09WcHZlVmRTWkROUWRIY0tVV1UzVjIwNVptcHJaU3RYTlc1eWFIYzNNMjV1TVhGS056TjBRWFU1WkZReVQybEZjV1pRTmpNM01YZFRkSHBaVXpCR2EwbGxWV2xWWkVSeGRXRlhVQW9yUjFNM1VrUlFhbk5MTVhaTUszbG1XbEJNYkROWVNrdHZLMFl6UlVKcmIyZGpiM05MZHpOdWVFaHNjRUkwVFd0VFJuSkViMmRKUVVrMFJVOVZTelIzQ2toc1JqZE1jU3REZDA0eGFYRkNkbFp2YldGT1NqVldURlZKVlRWamNtcFZMMUk1U1hkSU56Tk5OSFpYU0ZaV2QzaFFOV0ZxVW01cE9HMXdlV3BaUzIwS2VHd3JSMXBSTDI1bmJFMUxUVzlTY0c5dVNEVnFMMFJOTms4MVdURTVkVmcyT1VsbVRFRlNhRGhST0dOMFIwcG1PRzA0Wkd0NWVsaEhLMmRTWWxvNGNncEtkVk0wVFVWNmJtcFNhMkpyYUdodEwyZzFUVGR4WkM5RmRUSm5ibWxUT1c5eVkzVlVNR0UzTlVkMFJtbHNTbE15YTNwbVRsZGFVamhOY0VGSlFXNXFDbWh1YlhJeWNEVTJNVEJCUkVORU1uSnFSbkJUUlV0UFN6bFBOVUY1YWxSUWVsWnNaRFJ3WlhZNU0ybGpjVzFQVHprMGJEQnhjVXBIY21NNFEwRjNSVUVLUVZGTFEwRm5SVUZ2T0U1VFVFTTNRbGRNYkd0dlJVTjVLMjlHU0RaRlRFUktla0ZVZFdkYU5tVnlSVEo1ZEdGVFpXUm5SQ3QzVUhWWmNXMUdjV3hwVkFveU0xUkpXbTQxYW1oVmFpOHpNMDVGTmxFck1rbHpRbVpwUWk5S2REaFNaUzlvUlhKa1NURnFTR05UUm5FNWQxQk1WemR2YVVsamFITmhjMUpXVXpBeUNrdHJNSGRNUm1JemFIaG5OMWR6VEhaQlltZGtiRUV4UzNaNEx6VlFlRUY1V25NMldXeFljRE55TlZOemFISXJkSFp2TnpKc1YybHhTRXMwVmxVNGJrSUtTaTlpU3pKSFFtUkdlVk5YYVc1RWRFcExZM0JZZG14alFXZFZTMlF5TlU1cldWZDRWRzVYY2treE5VTnpiREZ3UW14S1oyTjRLemR2YlVzclMxcHVkUXBwTmtkNlltNXRRMFJqVjBwU1FtaHpSU3R2TkhaM1VqbEZNVzlSVm1KdGJsZzNaVzFMYjJSeFdua3hTRUY2ZW04MlIyNVBlakp4V0Zjd2ExQjFjMjh6Q2pJdlZIVTVia2R6WlhKSlN6UjVVV2RyUW10UmN6UlRZbkZCTW5vMVNHRnZPU3RLVEZVMGRFdExXVVF2WkRkMk1IQTFTa0ZKUkdSU1lsQTNVMWhSU2pBS2RURjJOWGxxVm1sU2JFNDJiVkpuZVd0Tk5rODVTSGN3YWtWWFF5dE5SRzV6Tm04eFMySnhiM1p5TDNOWk4zVk5SVVpyVkVaeFdFY3lUbmRZTm1FMVVBbzJTbmRIV1U1RFNYSk5hWHBqYUVZd2RVNTBTVUZJUzFScE1tOU9jak0wY1ZaUWIwWnJWV3N2VmxJNFYzUjFOSGM1Y3pCWlNsaDFaM3BsV2xBclJ6TjZDbTVGZUhSV05qUkNhWFJ3THk5MWExcDZOVWMzWmtvMlFqRkZTV1pDTTJWek1uZFliVTV1TVdWbk9YVjZjWEpoTWxCMVNHRnBjWGxoYTB4ak1YTktURm9LZG01Rk5VMU9XRTF1WTNaalVHUkdRbGRqT0V3d1JGWkhRMjlyZFcxQ1dqSTFabWRQUjNGcFVrRlBiMHRFZDAxVWFtMWxNR1ZzTkdRdlFVNXlabFJTZFFvMVRWZHJNRXRTU1VscUt6ZEdjeXRTUldkYVZUVnlRazl1VWpORWNuVllSeXRXWTI1MUswbzBSbk5yWjJOSmFqRldhMFZEWjJkRlFrRk5SMHRrYlhBeUNuRnRlVVJDVW1GYWVuRnNORXRRVlZGMFVGVmlTMDVDT1Vad1YycHdZVXhrVUdaVE1VRjFOMEl2WjJoYU56UndhamgxZEhsalNrZFpkMWt5ZFdsb2NURUtTRzlFY2tOaFExZE9jbmhDZHpKWGQwTTRPSGwyU1dsbGRtRkhhRXBSVG5KNFJuRmFiWEJIWkdzNFRpdFJNeTlsYW5scFpWQjJTMVJZWTB0RU9TdFlXUXBOZGtacFpWaHpRbWhsVW5sa1QzYzFZM00zT1hjck1YazJOVGRLT1VScGVVdzRaVlpHVkU0emFFWlBUQzlUV1hCamEzaGtiRVZOZDJGbVlrOXVXRTlsQ25wQmJFdEJlR1ppUldKRlNsZzJUR3hIY0RkcWQydE5abVJvV1d4ak9Ya3pWSE5WVGpsU1REbDBaVkZsVUZSS1N6Wk1VRzVvYldOSlZEVjBTSGxsWVZVS2NIQlJRWFZyZEZZdlkxUmhNa3N5VUZSRVkyVk1LMFpvY1dacVFVdzFObE5GY0d0U2QwSXhSV296Y1RrdkwyVTRaa2xCZVZkdVUwTnZWM0F4UjNGb1dRcFpRMEUxZFdoa1MzVjRXbGhVS3poRFoyZEZRa0ZQZWt4dGFqTnpTSFpRU21GcFVqZDNibmhuYTAxWU9WZzBiMGd6UmpNNFIydEVORkZwVUhNNWJsYzJDblJDTVN0RGQycEtaekZHTkVSc1QwaEVVVTh2Y1RnM2QwSXJZa0ZMUlVwTWJGZGFiR1Z4VlhaNlpFTnlTalJMVTNJeWFHZDNUbWh3Wnl0Mk5IZGhVVlVLWm5kUGVXY3JVbk5xUmpac1FXUlFiRlpCU0hNMldsWXJhQzgwZW5wMFJ6WlZlbXRwUjAxcU9HbDJSMk5uSzNCRE4wMTVOVTVHVjBOUVJtVlZWSGd2TmdwWlNuRlRkV2xQY1cxeU5qQm9XVTVaY25KYVpUTlVaRGhXVlZsWWFtWjNZVmxwYjFWVWFVMDVjREZSTVRrNU9FSlBObmhDTlhveVNXMUhOV1EyUzFCMUNuQlhkR2hHVEZaTFZWYzJhWGwxUW0wd1IyOVhlazVKVFVZdldIZG1Xa3R1TWt0bWFtUldRVUpZVXpWRFRtOXlabUpoWjBGa05tdFBTR3Q2V1RGSE55c0thRkp3UTBWdVVIUkRiRVF6Y2k5SFl6VnZSemM1ZEM5QmRDOVFOVTVVUzA5UWRHZzNLMkpJZDI5RFJVTm5aMFZDUVVzeGJIWkZiMDlJZDI5NFMwaElhZ3BSTUVOaFNYcG1UWHBDVGpsVVkyWmtRMnR5ZFROM2RGaGFTbUpHVW1Wa1ltSndiRlV5YTFGSE5FUkZTbmx6YW01TWVVbGpObWxoWW1SelZGSjZNRXBOQ2pCeFZTOHhhWGhaTWxoMU9WaFRVMW81UWxCSVMwUTBNRmw1Y3pSNWVqaEVRM04yZGxvNVNUaE9PRFpSVjBveFJWbHdRbEJaT1RWT1NWQmpZVE4yVUZJS2JsZ3ZZU3M1TlZsaFowRkNXVE5TZEhKaFVVdEdRVUp1ZUVZM2RuUndjbmg2VjBJNE16QlZPV2hQV0dWUlYwMW9SVkUzUzJSck5rMVRkMnhZWjJkWWVBcG5OWGN4V21KWGRuaFlXSFJaVEVvMmExaFhkVUZIZDFNek1FOHhRMWxyUjFCa2QzWkxNbnBRY1VWVWJXWlpaalUzYW1sVmVrNU5Sa0V5YXl0aVlXNXdDbFpRTnpGbk5FOWpRM2hEYlhaM1kxTldVa3BGVUZkWE5GRlhiR0ZsWlVNckswUXhhakZPZWtSNGJHVnJlRll6TVZjeVJHaHZaM2xLWjFSWVpVRnNaWG9LTkVsS1dHNDBhME5uWjBWQ1FVcDFjR1JIYmtWalRYZzVNbGRNUjFsTFJqSnpXRlZYUkU1dmJVNU1NSEl6ZFRWaGVFMHZkRVpGT1hOUWNqaHpabTlDTWdwU0wyTnJXSFpoVm5wMk5YQXpOekpZYlZCdlpEQTViRkUyY0U0emJteHZPSGhMVjNBdllqQmxVa2RVZGxKYVdpdE9jRlJITkVSVVIzSkpRak4wTjBKUUNtdDVPR05rWTJ0bU9XdDFWblpETDJ0RUt6UlNha3RJWlZKS2RFbFRUbUpITDFkbE4wcG9Vek5xWjJNcmMwdExUbXBNUm1RNE1qTnZTRGt3T1VjeFVXRUtPRTFNZHpGT2RqRlViRlJuYnpkbE16WnBOamRwVlVSNU4zaHJUMXBDWkZOcWF6aFVZM3BEVFVaUVp6SkZjR050TVd3eWJrdHBiWEF4WlVKNU0zcFJWZ29yVGxseUsyZFlkVlZKSzA1RldGQTNUWG93Wm5KTmVtUjJaRlZRT1RaNGVWZFNhbmgxWmpCemVERTRjbVl6TkVsdkt6Wk5helJ0Y21zd1FXaHplRTh4Q2k5YVVXZFlkVGhNTXpWUWJGSnVUSE56TkVOVmEwWXpPVzFHTm1wclVUbHBVMVZGUTJkblJVRkRlVlJQUkRsV2RXcGFUMEZTUldFMVowdG1TMlJzY3lzS04yczVUMUJTVWtaMVkybHVSRTExVTFkV1oyTXZRM2hSTDI4clJ6Z3JUMU53YlVnNU1sbFFTM1ZKVWtSVEx6WktiWEIyY1dVeVdGcEpOMFo1YkM4ell3cEpOM2N5V1dka1JWTTVPVkl2VGpOcE9VaDJRMFZ1ZGtGeFNUSXZjalJpU0ZaMFNVbGhSMk14VG01dGNsaDZhemxvYkRsRGVsZFZPRGM1Y1hCMGJIQjJDalpSVGtsek9WZGxWa2hsU21nMGRrYzJTV3g2Tm01aU5VeGFkRU5xYTBaWFFWZEljV05SV1RNd04xUm5TWGN4ZURBek0wUlhNa3QzTVVGRFVWaFhXbU1LUVM4d01saEpZV05VV21GSE0ySk5iVko0TTFVNGJHZHVTVlpzUlhOcWJWcFpaVlZtU1RKbmVUQXlZblprYWtaamQzUkxWVmR5UTFSdmFFUXhZbFI1VXdwcWQwcFFla1pYU0ZFNU9XaHlVa3h6WTBsRVpHTmtaVk5zU0RnNVIwOXZNbTVOTlhKelZuUmhSbXByYjNZNGJHWkNTakpoTlZWRVJWRXpRMlExUVQwOUNpMHRMUzB0UlU1RUlGSlRRU0JRVWtsV1FWUkZJRXRGV1MwdExTMHRDZz09"],"num_matches":1,"num_redundant_matches":0,"statuses":[],"comment":null,"mean_score":null,"matches":[{"provenance":[{"kind":"file","path":"./k8s/main.key"}],"blob_metadata":{"id":"f5407810e2991e97e14756fe20d0263020eb6981","num_bytes":7091,"mime_essence":null,"charset":null},"blob_id":"f5407810e2991e97e14756fe20d0263020eb6981","location":{"offset_span":{"start":2374,"end":6707},"source_span":{"start":{"line":6,"column":14},"end":{"line":7,"column":0}}},"groups":["TFMwdExTMUNSVWRKVGlCU1UwRWdVRkpKVmtGVVJTQkxSVmt0TFMwdExRcE5TVWxLUzJkSlFrRkJTME5CWjBWQmMzZFhVMnRwWlU5bk5rSmhkRWxHVFVKMFZsZHpaVzluVFd3MFVraFlOblJKUTFsUU0wRXdWelJKU2pocmMxZzJDbXh2V0VWeE15czNOV3hUWjBVMVJ6UkdTMk5YWVVWNVV6ZGpjR0V5Y0VSVU5UTjVSbU4xTlRkdFQwdFJWa2gyT0c1VmN6VnJNM2hYY1VGWE5pczNNMEVLT1RabE1IWnRlbmxLYkc5T1JVa3dPVTFwUzBac00zZHNTelZMTjJWQ1JsRnBiMnRSYjNZeFNsRnFPVVo0VkdGSU1EQlNNR3BxSzAwM0wzaFdTV0ZJS3dweGFtVTNOMHhhU2pCcWRESkRWWGRLVkc5NFVUTXlSRVl3VkRRNE1IaDRjRE5QTTNaWU9FUkNUbmhaUzNNMUt6Vk5RekV4TWtweFp6QnJZbXBUYVZNMUNpOHZaWEYzU1hGd1R6VXpaVXhETjJGMVVWbHRUVTVEYjFaTVduTm1hVzF0ZURGd1RYTjRiRWh1VEVKeGNWTkdiVzVzZVZCb09WcHZlVmRTWkROUWRIY0tVV1UzVjIwNVptcHJaU3RYTlc1eWFIYzNNMjV1TVhGS056TjBRWFU1WkZReVQybEZjV1pRTmpNM01YZFRkSHBaVXpCR2EwbGxWV2xWWkVSeGRXRlhVQW9yUjFNM1VrUlFhbk5MTVhaTUszbG1XbEJNYkROWVNrdHZLMFl6UlVKcmIyZGpiM05MZHpOdWVFaHNjRUkwVFd0VFJuSkViMmRKUVVrMFJVOVZTelIzQ2toc1JqZE1jU3REZDA0eGFYRkNkbFp2YldGT1NqVldURlZKVlRWamNtcFZMMUk1U1hkSU56Tk5OSFpYU0ZaV2QzaFFOV0ZxVW01cE9HMXdlV3BaUzIwS2VHd3JSMXBSTDI1bmJFMUxUVzlTY0c5dVNEVnFMMFJOTms4MVdURTVkVmcyT1VsbVRFRlNhRGhST0dOMFIwcG1PRzA0Wkd0NWVsaEhLMmRTWWxvNGNncEtkVk0wVFVWNmJtcFNhMkpyYUdodEwyZzFUVGR4WkM5RmRUSm5ibWxUT1c5eVkzVlVNR0UzTlVkMFJtbHNTbE15YTNwbVRsZGFVamhOY0VGSlFXNXFDbWh1YlhJeWNEVTJNVEJCUkVORU1uSnFSbkJUUlV0UFN6bFBOVUY1YWxSUWVsWnNaRFJ3WlhZNU0ybGpjVzFQVHprMGJEQnhjVXBIY21NNFEwRjNSVUVLUVZGTFEwRm5SVUZ2T0U1VFVFTTNRbGRNYkd0dlJVTjVLMjlHU0RaRlRFUktla0ZVZFdkYU5tVnlSVEo1ZEdGVFpXUm5SQ3QzVUhWWmNXMUdjV3hwVkFveU0xUkpXbTQxYW1oVmFpOHpNMDVGTmxFck1rbHpRbVpwUWk5S2REaFNaUzlvUlhKa1NURnFTR05UUm5FNWQxQk1WemR2YVVsamFITmhjMUpXVXpBeUNrdHJNSGRNUm1JemFIaG5OMWR6VEhaQlltZGtiRUV4UzNaNEx6VlFlRUY1V25NMldXeFljRE55TlZOemFISXJkSFp2TnpKc1YybHhTRXMwVmxVNGJrSUtTaTlpU3pKSFFtUkdlVk5YYVc1RWRFcExZM0JZZG14alFXZFZTMlF5TlU1cldWZDRWRzVYY2treE5VTnpiREZ3UW14S1oyTjRLemR2YlVzclMxcHVkUXBwTmtkNlltNXRRMFJqVjBwU1FtaHpSU3R2TkhaM1VqbEZNVzlSVm1KdGJsZzNaVzFMYjJSeFdua3hTRUY2ZW04MlIyNVBlakp4V0Zjd2ExQjFjMjh6Q2pJdlZIVTVia2R6WlhKSlN6UjVVV2RyUW10UmN6UlRZbkZCTW5vMVNHRnZPU3RLVEZVMGRFdExXVVF2WkRkMk1IQTFTa0ZKUkdSU1lsQTNVMWhSU2pBS2RURjJOWGxxVm1sU2JFNDJiVkpuZVd0Tk5rODVTSGN3YWtWWFF5dE5SRzV6Tm04eFMySnhiM1p5TDNOWk4zVk5SVVpyVkVaeFdFY3lUbmRZTm1FMVVBbzJTbmRIV1U1RFNYSk5hWHBqYUVZd2RVNTBTVUZJUzFScE1tOU9jak0wY1ZaUWIwWnJWV3N2VmxJNFYzUjFOSGM1Y3pCWlNsaDFaM3BsV2xBclJ6TjZDbTVGZUhSV05qUkNhWFJ3THk5MWExcDZOVWMzWmtvMlFqRkZTV1pDTTJWek1uZFliVTV1TVdWbk9YVjZjWEpoTWxCMVNHRnBjWGxoYTB4ak1YTktURm9LZG01Rk5VMU9XRTF1WTNaalVHUkdRbGRqT0V3d1JGWkhRMjlyZFcxQ1dqSTFabWRQUjNGcFVrRlBiMHRFZDAxVWFtMWxNR1ZzTkdRdlFVNXlabFJTZFFvMVRWZHJNRXRTU1VscUt6ZEdjeXRTUldkYVZUVnlRazl1VWpORWNuVllSeXRXWTI1MUswbzBSbk5yWjJOSmFqRldhMFZEWjJkRlFrRk5SMHRrYlhBeUNuRnRlVVJDVW1GYWVuRnNORXRRVlZGMFVGVmlTMDVDT1Vad1YycHdZVXhrVUdaVE1VRjFOMEl2WjJoYU56UndhamgxZEhsalNrZFpkMWt5ZFdsb2NURUtTRzlFY2tOaFExZE9jbmhDZHpKWGQwTTRPSGwyU1dsbGRtRkhhRXBSVG5KNFJuRmFiWEJIWkdzNFRpdFJNeTlsYW5scFpWQjJTMVJZWTB0RU9TdFlXUXBOZGtacFpWaHpRbWhsVW5sa1QzYzFZM00zT1hjck1YazJOVGRLT1VScGVVdzRaVlpHVkU0emFFWlBUQzlUV1hCamEzaGtiRVZOZDJGbVlrOXVXRTlsQ25wQmJFdEJlR1ppUldKRlNsZzJUR3hIY0RkcWQydE5abVJvV1d4ak9Ya3pWSE5WVGpsU1REbDBaVkZsVUZSS1N6Wk1VRzVvYldOSlZEVjBTSGxsWVZVS2NIQlJRWFZyZEZZdlkxUmhNa3N5VUZSRVkyVk1LMFpvY1dacVFVdzFObE5GY0d0U2QwSXhSV296Y1RrdkwyVTRaa2xCZVZkdVUwTnZWM0F4UjNGb1dRcFpRMEUxZFdoa1MzVjRXbGhVS3poRFoyZEZRa0ZQZWt4dGFqTnpTSFpRU21GcFVqZDNibmhuYTAxWU9WZzBiMGd6UmpNNFIydEVORkZwVUhNNWJsYzJDblJDTVN0RGQycEtaekZHTkVSc1QwaEVVVTh2Y1RnM2QwSXJZa0ZMUlVwTWJGZGFiR1Z4VlhaNlpFTnlTalJMVTNJeWFHZDNUbWh3Wnl0Mk5IZGhVVlVLWm5kUGVXY3JVbk5xUmpac1FXUlFiRlpCU0hNMldsWXJhQzgwZW5wMFJ6WlZlbXRwUjAxcU9HbDJSMk5uSzNCRE4wMTVOVTVHVjBOUVJtVlZWSGd2TmdwWlNuRlRkV2xQY1cxeU5qQm9XVTVaY25KYVpUTlVaRGhXVlZsWWFtWjNZVmxwYjFWVWFVMDVjREZSTVRrNU9FSlBObmhDTlhveVNXMUhOV1EyUzFCMUNuQlhkR2hHVEZaTFZWYzJhWGwxUW0wd1IyOVhlazVKVFVZdldIZG1Xa3R1TWt0bWFtUldRVUpZVXpWRFRtOXlabUpoWjBGa05tdFBTR3Q2V1RGSE55c0thRkp3UTBWdVVIUkRiRVF6Y2k5SFl6VnZSemM1ZEM5QmRDOVFOVTVVUzA5UWRHZzNLMkpJZDI5RFJVTm5aMFZDUVVzeGJIWkZiMDlJZDI5NFMwaElhZ3BSTUVOaFNYcG1UWHBDVGpsVVkyWmtRMnR5ZFROM2RGaGFTbUpHVW1Wa1ltSndiRlV5YTFGSE5FUkZTbmx6YW01TWVVbGpObWxoWW1SelZGSjZNRXBOQ2pCeFZTOHhhWGhaTWxoMU9WaFRVMW81UWxCSVMwUTBNRmw1Y3pSNWVqaEVRM04yZGxvNVNUaE9PRFpSVjBveFJWbHdRbEJaT1RWT1NWQmpZVE4yVUZJS2JsZ3ZZU3M1TlZsaFowRkNXVE5TZEhKaFVVdEdRVUp1ZUVZM2RuUndjbmg2VjBJNE16QlZPV2hQV0dWUlYwMW9SVkUzUzJSck5rMVRkMnhZWjJkWWVBcG5OWGN4V21KWGRuaFlXSFJaVEVvMmExaFhkVUZIZDFNek1FOHhRMWxyUjFCa2QzWkxNbnBRY1VWVWJXWlpaalUzYW1sVmVrNU5Sa0V5YXl0aVlXNXdDbFpRTnpGbk5FOWpRM2hEYlhaM1kxTldVa3BGVUZkWE5GRlhiR0ZsWlVNckswUXhhakZPZWtSNGJHVnJlRll6TVZjeVJHaHZaM2xLWjFSWVpVRnNaWG9LTkVsS1dHNDBhME5uWjBWQ1FVcDFjR1JIYmtWalRYZzVNbGRNUjFsTFJqSnpXRlZYUkU1dmJVNU1NSEl6ZFRWaGVFMHZkRVpGT1hOUWNqaHpabTlDTWdwU0wyTnJXSFpoVm5wMk5YQXpOekpZYlZCdlpEQTViRkUyY0U0emJteHZPSGhMVjNBdllqQmxVa2RVZGxKYVdpdE9jRlJITkVSVVIzSkpRak4wTjBKUUNtdDVPR05rWTJ0bU9XdDFWblpETDJ0RUt6UlNha3RJWlZKS2RFbFRUbUpITDFkbE4wcG9Vek5xWjJNcmMwdExUbXBNUm1RNE1qTnZTRGt3T1VjeFVXRUtPRTFNZHpGT2RqRlViRlJuYnpkbE16WnBOamRwVlVSNU4zaHJUMXBDWkZOcWF6aFVZM3BEVFVaUVp6SkZjR050TVd3eWJrdHBiWEF4WlVKNU0zcFJWZ29yVGxseUsyZFlkVlZKSzA1RldGQTNUWG93Wm5KTmVtUjJaRlZRT1RaNGVWZFNhbmgxWmpCemVERTRjbVl6TkVsdkt6Wk5helJ0Y21zd1FXaHplRTh4Q2k5YVVXZFlkVGhNTXpWUWJGSnVUSE56TkVOVmEwWXpPVzFHTm1wclVUbHBVMVZGUTJkblJVRkRlVlJQUkRsV2RXcGFUMEZTUldFMVowdG1TMlJzY3lzS04yczVUMUJTVWtaMVkybHVSRTExVTFkV1oyTXZRM2hSTDI4clJ6Z3JUMU53YlVnNU1sbFFTM1ZKVWtSVEx6WktiWEIyY1dVeVdGcEpOMFo1YkM4ell3cEpOM2N5V1dka1JWTTVPVkl2VGpOcE9VaDJRMFZ1ZGtGeFNUSXZjalJpU0ZaMFNVbGhSMk14VG01dGNsaDZhemxvYkRsRGVsZFZPRGM1Y1hCMGJIQjJDalpSVGtsek9WZGxWa2hsU21nMGRrYzJTV3g2Tm01aU5VeGFkRU5xYTBaWFFWZEljV05SV1RNd04xUm5TWGN4ZURBek0wUlhNa3QzTVVGRFVWaFhXbU1LUVM4d01saEpZV05VV21GSE0ySk5iVko0TTFVNGJHZHVTVlpzUlhOcWJWcFpaVlZtU1RKbmVUQXlZblprYWtaamQzUkxWVmR5UTFSdmFFUXhZbFI1VXdwcWQwcFFla1pYU0ZFNU9XaHlVa3h6WTBsRVpHTmtaVk5zU0RnNVIwOXZNbTVOTlhKelZuUmhSbXByYjNZNGJHWkNTakpoTlZWRVJWRXpRMlExUVQwOUNpMHRMUzB0UlU1RUlGSlRRU0JRVWtsV1FWUkZJRXRGV1MwdExTMHRDZz09"],"snippet":{"before":"lIVUx3WURSTmJyNjVZb2RhRU9WRE1ITW5ZUWptMWlNUElLM3QyMkdmcEErCktyRENHMUVlZnZkUTU0eGhreUtZVjZOZko0R0gyOVJicXNkeVMyS1hDUXJINDlqV1h3RHpRZ09iK1BUcW9nNCsKNzVVemhsQ2pYT3FPRFFaY1JjWXVyZTJjK2Z1elU2cVJ1L1o5SlJZM0MyOD0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=\n tls.key: ","matching":"LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlKS2dJQkFBS0NBZ0VBc3dXU2tpZU9nNkJhdElGTUJ0VldzZW9nTWw0UkhYNnRJQ1lQM0EwVzRJSjhrc1g2CmxvWEVxMys3NWxTZ0U1RzRGS2NXYUV5UzdjcGEycERUNTN5RmN1NTdtT0tRVkh2OG5VczVrM3hXcUFXNis3M0EKOTZlMHZtenlKbG9ORUkwOU1pS0ZsM3dsSzVLN2VCRlFpb2tRb3YxSlFqOUZ4VGFIMDBSMGpqK003L3hWSWFIKwpxamU3N0xaSjBqdDJDVXdKVG94UTMyREYwVDQ4MHh4cDNPM3ZYOERCTnhZS3M1KzVNQzExMkpxZzBrYmpTaVM1Ci8vZXF3SXFwTzUzZUxDN2F1UVltTU5Db1ZMWnNmaW1teDFwTXN4bEhuTEJxcVNGbW5seVBoOVpveVdSZDNQdHcKUWU3V205ZmprZStXNW5yaHc3M25uMXFKNzN0QXU5ZFQyT2lFcWZQNjM3MXdTdHpZUzBGa0llVWlVZERxdWFXUAorR1M3UkRQanNLMXZMK3lmWlBMbDNYSktvK0YzRUJrb2djb3NLdzNueEhscEI0TWtTRnJEb2dJQUk0RU9VSzR3CkhsRjdMcStDd04xaXFCdlZvbWFOSjVWTFVJVTVjcmpVL1I5SXdINzNNNHZXSFZWd3hQNWFqUm5pOG1weWpZS20KeGwrR1pRL25nbE1LTW9ScG9uSDVqL0RNNk81WTE5dVg2OUlmTEFSaDhROGN0R0pmOG04ZGt5elhHK2dSYlo4cgpKdVM0TUV6bmpSa2JraGhtL2g1TTdxZC9FdTJnbmlTOW9yY3VUMGE3NUd0RmlsSlMya3pmTldaUjhNcEFJQW5qCmhubXIycDU2MTBBRENEMnJqRnBTRUtPSzlPNUF5alRQelZsZDRwZXY5M2ljcW1PTzk0bDBxcUpHcmM4Q0F3RUEKQVFLQ0FnRUFvOE5TUEM3QldMbGtvRUN5K29GSDZFTERKekFUdWdaNmVyRTJ5dGFTZWRnRCt3UHVZcW1GcWxpVAoyM1RJWm41amhVai8zM05FNlErMklzQmZpQi9KdDhSZS9oRXJkSTFqSGNTRnE5d1BMVzdvaUljaHNhc1JWUzAyCktrMHdMRmIzaHhnN1dzTHZBYmdkbEExS3Z4LzVQeEF5WnM2WWxYcDNyNVNzaHIrdHZvNzJsV2lxSEs0VlU4bkIKSi9iSzJHQmRGeVNXaW5EdEpLY3BYdmxjQWdVS2QyNU5rWVd4VG5XckkxNUNzbDFwQmxKZ2N4KzdvbUsrS1pudQppNkd6Ym5tQ0RjV0pSQmhzRStvNHZ3UjlFMW9RVmJtblg3ZW1Lb2RxWnkxSEF6em82R25PejJxWFcwa1B1c28zCjIvVHU5bkdzZXJJSzR5UWdrQmtRczRTYnFBMno1SGFvOStKTFU0dEtLWUQvZDd2MHA1SkFJRGRSYlA3U1hRSjAKdTF2NXlqVmlSbE42bVJneWtNNk85SHcwakVXQytNRG5zNm8xS2Jxb3ZyL3NZN3VNRUZrVEZxWEcyTndYNmE1UAo2SndHWU5DSXJNaXpjaEYwdU50SUFIS1RpMm9OcjM0cVZQb0ZrVWsvVlI4V3R1NHc5czBZSlh1Z3plWlArRzN6Cm5FeHRWNjRCaXRwLy91a1p6NUc3Zko2QjFFSWZCM2VzMndYbU5uMWVnOXV6cXJhMlB1SGFpcXlha0xjMXNKTFoKdm5FNU1OWE1uY3ZjUGRGQldjOEwwRFZHQ29rdW1CWjI1ZmdPR3FpUkFPb0tEd01Uam1lMGVsNGQvQU5yZlRSdQo1TVdrMEtSSUlqKzdGcytSRWdaVTVyQk9uUjNEcnVYRytWY251K0o0RnNrZ2NJajFWa0VDZ2dFQkFNR0tkbXAyCnFteURCUmFaenFsNEtQVVF0UFViS05COUZwV2pwYUxkUGZTMUF1N0IvZ2haNzRwajh1dHljSkdZd1kydWlocTEKSG9EckNhQ1dOcnhCdzJXd0M4OHl2SWlldmFHaEpRTnJ4RnFabXBHZGs4TitRMy9lanlpZVB2S1RYY0tEOStYWQpNdkZpZVhzQmhlUnlkT3c1Y3M3OXcrMXk2NTdKOURpeUw4ZVZGVE4zaEZPTC9TWXBja3hkbEVNd2FmYk9uWE9lCnpBbEtBeGZiRWJFSlg2TGxHcDdqd2tNZmRoWWxjOXkzVHNVTjlSTDl0ZVFlUFRKSzZMUG5obWNJVDV0SHllYVUKcHBRQXVrdFYvY1RhMksyUFREY2VMK0ZocWZqQUw1NlNFcGtSd0IxRWozcTkvL2U4ZklBeVduU0NvV3AxR3FoWQpZQ0E1dWhkS3V4WlhUKzhDZ2dFQkFPekxtajNzSHZQSmFpUjd3bnhna01YOVg0b0gzRjM4R2tENFFpUHM5blc2CnRCMStDd2pKZzFGNERsT0hEUU8vcTg3d0IrYkFLRUpMbFdabGVxVXZ6ZENySjRLU3IyaGd3TmhwZyt2NHdhUVUKZndPeWcrUnNqRjZsQWRQbFZBSHM2WlYraC80enp0RzZVemtpR01qOGl2R2NnK3BDN015NU5GV0NQRmVVVHgvNgpZSnFTdWlPcW1yNjBoWU5ZcnJaZTNUZDhWVVlYamZ3YVlpb1VUaU05cDFRMTk5OEJPNnhCNXoySW1HNWQ2S1B1CnBXdGhGTFZLVVc2aXl1Qm0wR29Xek5JTUYvWHdmWktuMktmamRWQUJYUzVDTm9yZmJhZ0FkNmtPSGt6WTFHNysKaFJwQ0VuUHRDbEQzci9HYzVvRzc5dC9BdC9QNU5US09QdGg3K2JId29DRUNnZ0VCQUsxbHZFb09Id294S0hIagpRMENhSXpmTXpCTjlUY2ZkQ2tydTN3dFhaSmJGUmVkYmJwbFUya1FHNERFSnlzam5MeUljNmlhYmRzVFJ6MEpNCjBxVS8xaXhZMlh1OVhTU1o5QlBIS0Q0MFl5czR5ejhEQ3N2dlo5SThOODZRV0oxRVlwQlBZOTVOSVBjYTN2UFIKblgvYSs5NVlhZ0FCWTNSdHJhUUtGQUJueEY3dnRwcnh6V0I4MzBVOWhPWGVRV01oRVE3S2RrNk1Td2xYZ2dYeApnNXcxWmJXdnhYWHRZTEo2a1hXdUFHd1MzME8xQ1lrR1Bkd3ZLMnpQcUVUbWZZZjU3amlVek5NRkEyaytiYW5wClZQNzFnNE9jQ3hDbXZ3Y1NWUkpFUFdXNFFXbGFlZUMrK0QxajFOekR4bGVreFYzMVcyRGhvZ3lKZ1RYZUFsZXoKNElKWG40a0NnZ0VCQUp1cGRHbkVjTXg5MldMR1lLRjJzWFVXRE5vbU5MMHIzdTVheE0vdEZFOXNQcjhzZm9CMgpSL2NrWHZhVnp2NXAzNzJYbVBvZDA5bFE2cE4zbmxvOHhLV3AvYjBlUkdUdlJaWitOcFRHNERUR3JJQjN0N0JQCmt5OGNkY2tmOWt1VnZDL2tEKzRSaktIZVJKdElTTmJHL1dlN0poUzNqZ2Mrc0tLTmpMRmQ4MjNvSDkwOUcxUWEKOE1MdzFOdjFUbFRnbzdlMzZpNjdpVUR5N3hrT1pCZFNqazhUY3pDTUZQZzJFcGNtMWwybktpbXAxZUJ5M3pRVgorTllyK2dYdVVJK05FWFA3TXowZnJNemR2ZFVQOTZ4eVdSanh1ZjBzeDE4cmYzNElvKzZNazRtcmswQWhzeE8xCi9aUWdYdThMMzVQbFJuTHNzNENVa0YzOW1GNmprUTlpU1VFQ2dnRUFDeVRPRDlWdWpaT0FSRWE1Z0tmS2RscysKN2s5T1BSUkZ1Y2luRE11U1dWZ2MvQ3hRL28rRzgrT1NwbUg5MllQS3VJUkRTLzZKbXB2cWUyWFpJN0Z5bC8zYwpJN3cyWWdkRVM5OVIvTjNpOUh2Q0VudkFxSTIvcjRiSFZ0SUlhR2MxTm5tclh6azlobDlDeldVODc5cXB0bHB2CjZRTklzOVdlVkhlSmg0dkc2SWx6Nm5iNUxadENqa0ZXQVdIcWNRWTMwN1RnSXcxeDAzM0RXMkt3MUFDUVhXWmMKQS8wMlhJYWNUWmFHM2JNbVJ4M1U4bGduSVZsRXNqbVpZZVVmSTJneTAyYnZkakZjd3RLVVdyQ1RvaEQxYlR5Uwpqd0pQekZXSFE5OWhyUkxzY0lEZGNkZVNsSDg5R09vMm5NNXJzVnRhRmprb3Y4bGZCSjJhNVVERVEzQ2Q1QT09Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==\n","after":" kind: Secret\n metadata:\n creationTimestamp: \"2024-07-05T07:48:29Z\"\n generateName: sealed-secrets-key\n labels:\n sealedsecrets.bitnami.com/sealed-secrets-key: active\n name: sealed-secrets-keydp7k2\n namespace: kube-system\n resourceVe"},"structural_id":"1d846c2374cc26fbc45343177e2839083bb9c878","rule_structural_id":"35bc9170393aecc3777a72fe77e269fc150c91ce","rule_text_id":"np.pem.2","rule_name":"Base64-PEM-Encoded Private Key","score":null,"comment":null,"status":null,"redundant_to":[]}]} +{"finding_id":"38e3edc1387124ef61e8c76070e663d115f5bc99","rule_name":"Generic Password","rule_text_id":"np.generic.5","rule_structural_id":"4742a7e5266ce68dd5633ca6c2c634a4fa706673","groups":["RGVmYXVsdExvZ2luUGFzc3dvcmREb05vdENoYW5nZSE="],"num_matches":1,"num_redundant_matches":0,"statuses":[],"comment":null,"mean_score":null,"matches":[{"provenance":[{"kind":"file","path":"./src/main/java/org/owasp/wrongsecrets/challenges/docker/WrongSecretsConstants.java"}],"blob_metadata":{"id":"96c52dfccb8eb84609a82a256110eedda4533660","num_bytes":351,"mime_essence":"application/octet-stream","charset":null},"blob_id":"96c52dfccb8eb84609a82a256110eedda4533660","location":{"offset_span":{"start":216,"end":261},"source_span":{"start":{"line":9,"column":30},"end":{"line":9,"column":74}}},"groups":["RGVmYXVsdExvZ2luUGFzc3dvcmREb05vdENoYW5nZSE="],"snippet":{"before":"package org.owasp.wrongsecrets.challenges.docker;\n\nimport lombok.experimental.UtilityClass;\n\n/** used for Challenges their secrets. */\n@UtilityClass\npublic class WrongSecretsConstants {\n\n public static final String ","matching":"password = \"DefaultLoginPasswordDoNotChange!\"","after":";\n public static final String newKey = \"mISydD0En55Fq8FXbUfX720K8Vc6/aQYtkFmkp7ntsM=\";\n}\n"},"structural_id":"cf11aba33d3a6f47d2754fb27a0744cbf0e1b84b","rule_structural_id":"4742a7e5266ce68dd5633ca6c2c634a4fa706673","rule_text_id":"np.generic.5","rule_name":"Generic Password","score":null,"comment":null,"status":null,"redundant_to":[]}]} +{"finding_id":"d8bca20ca68b780e7d767742e9ac645c94fd9eef","rule_name":"Generic Password","rule_text_id":"np.generic.5","rule_structural_id":"4742a7e5266ce68dd5633ca6c2c634a4fa706673","groups":["WWpOQ2JHSnBRbnBhV0U1b1lsZFZQUW89"],"num_matches":1,"num_redundant_matches":0,"statuses":[],"comment":null,"mean_score":null,"matches":[{"provenance":[{"kind":"file","path":"./src/main/java/org/owasp/wrongsecrets/challenges/docker/authchallenge/Challenge37.java"}],"blob_metadata":{"id":"a8870a79c7187eee396159eb1d1b29c1a9f0cb7a","num_bytes":1439,"mime_essence":"application/octet-stream","charset":null},"blob_id":"a8870a79c7187eee396159eb1d1b29c1a9f0cb7a","location":{"offset_span":{"start":781,"end":818},"source_span":{"start":{"line":23,"column":31},"end":{"line":23,"column":67}}},"groups":["WWpOQ2JHSnBRbnBhV0U1b1lsZFZQUW89"],"snippet":{"before":"s is a challenge based on the idea of leaking a secret for an authenticated endpoint through a\n * ZAP configuration file.\n */\n@Slf4j\n@Component\npublic class Challenge37 extends FixedAnswerChallenge {\n\n private String secret;\n private static final String ","matching":"password = \"YjNCbGJpQnpaWE5oYldVPQo=\"","after":";\n\n public Challenge37(@Value(\"${DEFAULT37}\") String secret) {\n if (\"DEFAULT37\".equals(secret) || Strings.isNullOrEmpty(secret)) {\n this.secret = UUID.randomUUID().toString();\n } else {\n this.secret = secret;\n }\n }\n\n @Bean\n public Ba"},"structural_id":"f881cdead81e499145ee74abaeb4d2ddfa217a04","rule_structural_id":"4742a7e5266ce68dd5633ca6c2c634a4fa706673","rule_text_id":"np.generic.5","rule_name":"Generic Password","score":null,"comment":null,"status":null,"redundant_to":[]}]} +{"finding_id":"bc3208fe4063d948df9a64801e685f850ab88bf2","rule_name":"Generic Password","rule_text_id":"np.generic.5","rule_structural_id":"4742a7e5266ce68dd5633ca6c2c634a4fa706673","groups":["YW4zVXpSZz0="],"num_matches":1,"num_redundant_matches":0,"statuses":[],"comment":null,"mean_score":null,"matches":[{"provenance":[{"kind":"file","path":"./js/index.js"}],"blob_metadata":{"id":"d5a482b649e932ce49749c347ffed9a282e4855f","num_bytes":114,"mime_essence":"text/javascript","charset":null},"blob_id":"d5a482b649e932ce49749c347ffed9a282e4855f","location":{"offset_span":{"start":27,"end":48},"source_span":{"start":{"line":3,"column":6},"end":{"line":3,"column":26}}},"groups":["YW4zVXpSZz0="],"snippet":{"before":"\n function secret() {\n var ","matching":"password = \"an3UzRg=\"","after":" + 9 + \"vrR9\" + 6 + \"KSs=\" + 2 + \"ARBN\" + 7;\n return password;\n }\n"},"structural_id":"03f2c296a1469f1f6ea19ba04870ff258ed3f953","rule_structural_id":"4742a7e5266ce68dd5633ca6c2c634a4fa706673","rule_text_id":"np.generic.5","rule_name":"Generic Password","score":null,"comment":null,"status":null,"redundant_to":[]}]} +{"finding_id":"e7d9519d23104e0652bf3b718fd34b413e0ebb86","rule_name":"Generic Password","rule_text_id":"np.generic.5","rule_structural_id":"4742a7e5266ce68dd5633ca6c2c634a4fa706673","groups":["ZGVmYXVsdA=="],"num_matches":1,"num_redundant_matches":0,"statuses":[],"comment":null,"mean_score":null,"matches":[{"provenance":[{"kind":"file","path":"./Dockerfile"}],"blob_metadata":{"id":"af07fe7fdac96700fa5059405cf1f5a184194452","num_bytes":1223,"mime_essence":null,"charset":null},"blob_id":"af07fe7fdac96700fa5059405cf1f5a184194452","location":{"offset_span":{"start":55,"end":73},"source_span":{"start":{"line":3,"column":13},"end":{"line":3,"column":30}}},"groups":["ZGVmYXVsdA=="],"snippet":{"before":"FROM eclipse-temurin:23.0.1_11-jre-alpine\n\nARG argBased","matching":"Password=\"default\"","after":"\nARG argBasedVersion=\"1.10.0\"\nARG spring_profile=\"\"\nENV SPRING_PROFILES_ACTIVE=$spring_profile\nENV ARG_BASED_PASSWORD=$argBasedPassword\nENV APP_VERSION=$argBasedVersion\nENV DOCKER_ENV_PASSWORD=\"This is it\"\nENV AZURE_KEY_VAULT_ENABLED=false\nENV SPRINGDOC_UI"},"structural_id":"eb9fcb748e976f8dc139d4e2bf0dfe13f37f768e","rule_structural_id":"4742a7e5266ce68dd5633ca6c2c634a4fa706673","rule_text_id":"np.generic.5","rule_name":"Generic Password","score":null,"comment":null,"status":null,"redundant_to":[]}]} +{"finding_id":"1d56ba5ba541032ca531a3b07fbefd3d3a58df32","rule_name":"Generic Secret","rule_text_id":"np.generic.1","rule_structural_id":"3a961eccebcf7356ad803ec8e1a711d01801b9d7","groups":["MHg4YjcyZjdjYkFENTA2MjBjNDYyMTlhZDY3NkFkOWQzYTVBMjczNTg3"],"num_matches":1,"num_redundant_matches":0,"statuses":[],"comment":null,"mean_score":null,"matches":[{"provenance":[{"kind":"file","path":"./src/main/resources/explanations/challenge25.adoc"}],"blob_metadata":{"id":"12c6ab9a78813e5cf5aa45b505ad858217861a92","num_bytes":270,"mime_essence":null,"charset":null},"blob_id":"12c6ab9a78813e5cf5aa45b505ad858217861a92","location":{"offset_span":{"start":173,"end":241},"source_span":{"start":{"line":5,"column":56},"end":{"line":5,"column":123}}},"groups":["MHg4YjcyZjdjYkFENTA2MjBjNDYyMTlhZDY3NkFkOWQzYTVBMjczNTg3"],"snippet":{"before":"=== Secrets in smart contracts part 1\n\nOn public blockchains, everything that is written on-chain is world-readable.\n\nIn this challenge, you need to read the variable named ","matching":"secret from the contract `0x8b72f7cbAD50620c46219ad676Ad9d3a5A273587","after":"` on the Goerli EVM Testnet.\n"},"structural_id":"c290822f8c5e37199afe269b4367bb37d5aeed04","rule_structural_id":"3a961eccebcf7356ad803ec8e1a711d01801b9d7","rule_text_id":"np.generic.1","rule_name":"Generic Secret","score":null,"comment":null,"status":null,"redundant_to":[]}]} \ No newline at end of file diff --git a/unittests/tools/test_noseyparker_parser.py b/unittests/tools/test_noseyparker_parser.py index 714e8a4fa7b..1456f94d8fd 100644 --- a/unittests/tools/test_noseyparker_parser.py +++ b/unittests/tools/test_noseyparker_parser.py @@ -43,3 +43,23 @@ def test_noseyparker_parser_error(self): "Invalid Nosey Parker data, make sure to use Nosey Parker v0.16.0", str(context.exception), ) self.assertIn("ECONNREFUSED", str(context.exception)) + + def test_noseyparker_version_0_22_0(self): + with open("unittests/scans/noseyparker/noseyparker_0_22_0.jsonl", encoding="utf-8") as testfile: + parser = NoseyParkerParser() + findings = parser.get_findings(testfile, Test()) + finding = findings[0] + self.assertEqual("High", finding.severity) + self.assertEqual(798, finding.cwe) + self.assertEqual(33, len(findings)) + finding = findings[10] + self.assertEqual("High", finding.severity) + + def test_noseyparker_version_0_22_0_without_githistory(self): + with open("unittests/scans/noseyparker/noseyparker_0_22_0_without_githistory.jsonl", encoding="utf-8") as testfile: + parser = NoseyParkerParser() + findings = parser.get_findings(testfile, Test()) + finding = findings[0] + self.assertEqual("High", finding.severity) + self.assertEqual(798, finding.cwe) + self.assertEqual(6, len(findings)) From 9309dfe4993650ac0c45b1ca48214fe5b85e150f Mon Sep 17 00:00:00 2001 From: manuelsommer <47991713+manuel-sommer@users.noreply.github.com> Date: Tue, 28 Jan 2025 18:08:25 +0100 Subject: [PATCH 73/99] fix ruff for version 0.9.3 (#11451) --- dojo/management/commands/import_surveys.py | 3 +-- dojo/tools/mobsf/parser.py | 2 -- dojo/utils.py | 6 ++---- 3 files changed, 3 insertions(+), 8 deletions(-) diff --git a/dojo/management/commands/import_surveys.py b/dojo/management/commands/import_surveys.py index d1577648806..ad8f2c8648a 100644 --- a/dojo/management/commands/import_surveys.py +++ b/dojo/management/commands/import_surveys.py @@ -39,7 +39,6 @@ def handle(self, *args, **options): new_line = matchedLine.replace(old_id, str(ctype_id)) # Replace the all lines in the file with open(path, "w", encoding="utf-8") as fout: - for line in contents: - fout.write(line.replace(matchedLine, new_line)) + fout.writelines(line.replace(matchedLine, new_line) for line in contents) # Delete the temp question created_question.delete() diff --git a/dojo/tools/mobsf/parser.py b/dojo/tools/mobsf/parser.py index 8cbc98cc54d..adcabbe25eb 100644 --- a/dojo/tools/mobsf/parser.py +++ b/dojo/tools/mobsf/parser.py @@ -148,8 +148,6 @@ def get_findings(self, filename, test): "file_path": None, } mobsf_findings.append(mobsf_item) - else: - pass # Manifest Analysis if "manifest_analysis" in data: diff --git a/dojo/utils.py b/dojo/utils.py index 206d561151e..240bf23c2f3 100644 --- a/dojo/utils.py +++ b/dojo/utils.py @@ -1379,8 +1379,7 @@ def handle_uploaded_threat(f, eng): Path(settings.MEDIA_ROOT + "/threat/").mkdir() with open(settings.MEDIA_ROOT + f"/threat/{eng.id}{extension}", "wb+") as destination: - for chunk in f.chunks(): - destination.write(chunk) + destination.writelines(chunk for chunk in f.chunks()) eng.tmodel_path = settings.MEDIA_ROOT + f"/threat/{eng.id}{extension}" eng.save() @@ -1390,8 +1389,7 @@ def handle_uploaded_selenium(f, cred): extension = path.suffix with open(settings.MEDIA_ROOT + f"/selenium/{cred.id}{extension}", "wb+") as destination: - for chunk in f.chunks(): - destination.write(chunk) + destination.writelines(chunk for chunk in f.chunks()) cred.selenium_script = settings.MEDIA_ROOT + f"/selenium/{cred.id}{extension}" cred.save() From 83c07877b57be39a68ca07c3457e48fc808042cd Mon Sep 17 00:00:00 2001 From: kiblik <5609770+kiblik@users.noreply.github.com> Date: Tue, 28 Jan 2025 18:09:27 +0100 Subject: [PATCH 74/99] fix(webhook notif): Reorg docs, add 'ping' (#11631) * fix(webhook notif): Reorg docs, add 'ping' * Fix mermaid --- .../notification_webhooks/_index.md | 74 ----------------- .../notification_webhooks/engagement_added.md | 2 +- .../notification_webhooks/how_to.md | 83 +++++++++++++++++++ .../open_source/notification_webhooks/ping.md | 21 +++++ .../notification_webhooks/product_added.md | 2 +- .../product_type_added.md | 2 +- .../notification_webhooks/scan_added.md | 2 +- .../notification_webhooks/test_added.md | 2 +- unittests/test_notifications.py | 11 +++ 9 files changed, 120 insertions(+), 79 deletions(-) create mode 100644 docs/content/en/open_source/notification_webhooks/how_to.md create mode 100644 docs/content/en/open_source/notification_webhooks/ping.md diff --git a/docs/content/en/open_source/notification_webhooks/_index.md b/docs/content/en/open_source/notification_webhooks/_index.md index 96b6ff6c734..c2097db805e 100644 --- a/docs/content/en/open_source/notification_webhooks/_index.md +++ b/docs/content/en/open_source/notification_webhooks/_index.md @@ -1,81 +1,7 @@ --- title: "Notification Webhooks (experimental)" -description: "How to setup and use webhooks" weight: 7 chapter: true sidebar: collapsed: true --- - -Webhooks are HTTP requests coming from the DefectDojo instance towards a user-defined webserver which expects this kind of incoming traffic. - -## Transition graph: - -It is not unusual that in some cases a webhook can not be delivered. It is usually connected to network issues, server misconfiguration, or running upgrades on the server. DefectDojo needs to react to these outages. It might temporarily or permanently disable related endpoints. The following graph shows how it might change the status of the webhook definition based on HTTP responses (or manual user interaction). - -```mermaid -flowchart TD - - START{{Endpoint created}} - ALL{All states} - STATUS_ACTIVE([STATUS_ACTIVE]) - STATUS_INACTIVE_TMP - STATUS_INACTIVE_PERMANENT - STATUS_ACTIVE_TMP([STATUS_ACTIVE_TMP]) - END{{Endpoint removed}} - - START ==> STATUS_ACTIVE - STATUS_ACTIVE --HTTP 200 or 201 --> STATUS_ACTIVE - STATUS_ACTIVE --HTTP 5xx
or HTTP 429
or Timeout--> STATUS_INACTIVE_TMP - STATUS_ACTIVE --Any HTTP 4xx response
or any other HTTP response
or non-HTTP error--> STATUS_INACTIVE_PERMANENT - STATUS_INACTIVE_TMP -.After 60s.-> STATUS_ACTIVE_TMP - STATUS_ACTIVE_TMP --HTTP 5xx
or HTTP 429
or Timeout
within 24h
from the first error-->STATUS_INACTIVE_TMP - STATUS_ACTIVE_TMP -.After 24h.-> STATUS_ACTIVE - STATUS_ACTIVE_TMP --HTTP 200 or 201 --> STATUS_ACTIVE_TMP - STATUS_ACTIVE_TMP --HTTP 5xx
or HTTP 429
or Timeout
within 24h from the first error
or any other HTTP response or error--> STATUS_INACTIVE_PERMANENT - ALL ==Activation by user==> STATUS_ACTIVE - ALL ==Deactivation by user==> STATUS_INACTIVE_PERMANENT - ALL ==Removal of endpoint by user==> END -``` - -Notes: - -1. Transitions: - - bold: manual changes by user - - dotted: automated by celery - - others: based on responses on webhooks -1. Nodes: - - Stadium-shaped: Active - following webhook can be sent - - Rectangles: Inactive - performing of webhook will fail (and not retried) - - Hexagonal: Initial and final states - - Rhombus: All states (meta node to make the graph more readable) - -## Body and Headers - -The body of each request is JSON which contains data about related events like names and IDs of affected elements. -Examples of bodies are on pages related to each event (see below). - -Each request contains the following headers. They might be useful for better handling of events by the server receiving them. - -```yaml -User-Agent: DefectDojo- -X-DefectDojo-Event: -X-DefectDojo-Instance: -``` -## Disclaimer - -This functionality is new and in experimental mode. This means functionality might generate breaking changes in following DefectDojo releases and might not be considered final. - -However, the community is open to feedback to make this functionality better and get it stable as soon as possible. - -## Roadmap - -There are a couple of known issues that are expected to be resolved as soon as core functionality is considered ready. - -- Support events - Not only adding products, product types, engagements, tests, or upload of new scans but also events around SLA -- User webhook - right now only admins can define webhooks; in the future, users will also be able to define their own -- Improvement in UI - add filtering and pagination of webhook endpoints - -## Events - - diff --git a/docs/content/en/open_source/notification_webhooks/engagement_added.md b/docs/content/en/open_source/notification_webhooks/engagement_added.md index 36e31586a50..689dff9cc97 100644 --- a/docs/content/en/open_source/notification_webhooks/engagement_added.md +++ b/docs/content/en/open_source/notification_webhooks/engagement_added.md @@ -1,6 +1,6 @@ --- title: "Event: engagement_added" -weight: 3 +weight: 4 chapter: true --- diff --git a/docs/content/en/open_source/notification_webhooks/how_to.md b/docs/content/en/open_source/notification_webhooks/how_to.md new file mode 100644 index 00000000000..d8bb9311ca7 --- /dev/null +++ b/docs/content/en/open_source/notification_webhooks/how_to.md @@ -0,0 +1,83 @@ +--- +title: "How to setup and use webhooks" +weight: 1 +chapter: true +--- + +Webhooks are HTTP requests coming from the DefectDojo instance towards a user-defined webserver which expects this kind of incoming traffic. + +## Transition graph: + +It is not unusual that in some cases a webhook can not be delivered. It is usually connected to network issues, server misconfiguration, or running upgrades on the server. DefectDojo needs to react to these outages. It might temporarily or permanently disable related endpoints. The following graph shows how it might change the status of the webhook definition based on HTTP responses (or manual user interaction). + +```kroki {type=mermaid} +flowchart TD + + START{{Endpoint created}} + ALL{All states} + STATUS_ACTIVE([STATUS_ACTIVE]) + STATUS_INACTIVE_TMP + STATUS_INACTIVE_PERMANENT + STATUS_ACTIVE_TMP([STATUS_ACTIVE_TMP]) + END{{Endpoint removed}} + + START ==> STATUS_ACTIVE + STATUS_ACTIVE --HTTP 200 or 201 --> STATUS_ACTIVE + STATUS_ACTIVE --HTTP 5xx
or HTTP 429
or Timeout--> STATUS_INACTIVE_TMP + STATUS_ACTIVE --Any HTTP 4xx response
or any other HTTP response
or non-HTTP error--> STATUS_INACTIVE_PERMANENT + STATUS_INACTIVE_TMP -.After 60s.-> STATUS_ACTIVE_TMP + STATUS_ACTIVE_TMP --HTTP 5xx
or HTTP 429
or Timeout
within 24h
from the first error-->STATUS_INACTIVE_TMP + STATUS_ACTIVE_TMP -.After 24h.-> STATUS_ACTIVE + STATUS_ACTIVE_TMP --HTTP 200 or 201 --> STATUS_ACTIVE_TMP + STATUS_ACTIVE_TMP --HTTP 5xx
or HTTP 429
or Timeout
within 24h from the first error
or any other HTTP response or error--> STATUS_INACTIVE_PERMANENT + ALL ==Activation by user==> STATUS_ACTIVE + ALL ==Deactivation by user==> STATUS_INACTIVE_PERMANENT + ALL ==Removal of endpoint by user==> END +``` + +Notes: + +1. Transitions: + - bold: manual changes by user + - dotted: automated by celery + - others: based on responses on webhooks +1. Nodes: + - Stadium-shaped: Active - following webhook can be sent + - Rectangles: Inactive - performing of webhook will fail (and not retried) + - Hexagonal: Initial and final states + - Rhombus: All states (meta node to make the graph more readable) + +## Body and Headers + +The body of each request is JSON which contains data about related events like names and IDs of affected elements. +Examples of bodies are on pages related to each event (see below). + +Each request contains the following headers. They might be useful for better handling of events by the server receiving them. + +```yaml +User-Agent: DefectDojo- +X-DefectDojo-Event: +X-DefectDojo-Instance: +``` +## Disclaimer + +This functionality is new and in experimental mode. This means functionality might generate breaking changes in following DefectDojo releases and might not be considered final. + +However, the community is open to feedback to make this functionality better and get it stable as soon as possible. + +## Roadmap + +There are a couple of known issues that are expected to be resolved as soon as core functionality is considered ready. + +- Support events - Not only adding products, product types, engagements, tests, or upload of new scans but also events around SLA +- User webhook - right now only admins can define webhooks; in the future, users will also be able to define their own +- Improvement in UI - add filtering and pagination of webhook endpoints + +## Events + +- [`product_type_added`](../product_type_added) +- [`product_added`](../product_added) +- [`engagement_added`](../engagement_added) +- [`test_added`](../test_added) +- [`scan_added` and `scan_added_empty`](../scan_added) +- [`ping`](../ping) diff --git a/docs/content/en/open_source/notification_webhooks/ping.md b/docs/content/en/open_source/notification_webhooks/ping.md new file mode 100644 index 00000000000..006a9d88e66 --- /dev/null +++ b/docs/content/en/open_source/notification_webhooks/ping.md @@ -0,0 +1,21 @@ +--- +title: "Event: ping" +weight: 7 +chapter: true +--- + +An event `ping` is sent during Webhook setup to test whether the endpoint is up and responding with the expected status code. + +## Event HTTP header +```yaml +X-DefectDojo-Event: ping +``` + +## Event HTTP body +```json +{ + "description": "Test webhook notification", + "title": "", + "user": null, +} +``` diff --git a/docs/content/en/open_source/notification_webhooks/product_added.md b/docs/content/en/open_source/notification_webhooks/product_added.md index dea3cd27f2a..e381ed401ce 100644 --- a/docs/content/en/open_source/notification_webhooks/product_added.md +++ b/docs/content/en/open_source/notification_webhooks/product_added.md @@ -1,6 +1,6 @@ --- title: "Event: product_added" -weight: 2 +weight: 3 chapter: true --- diff --git a/docs/content/en/open_source/notification_webhooks/product_type_added.md b/docs/content/en/open_source/notification_webhooks/product_type_added.md index e5db4139297..3d9ba3f5767 100644 --- a/docs/content/en/open_source/notification_webhooks/product_type_added.md +++ b/docs/content/en/open_source/notification_webhooks/product_type_added.md @@ -1,6 +1,6 @@ --- title: "Event: product_type_added" -weight: 1 +weight: 2 chapter: true --- diff --git a/docs/content/en/open_source/notification_webhooks/scan_added.md b/docs/content/en/open_source/notification_webhooks/scan_added.md index ea1a6bffa3d..61709ce7647 100644 --- a/docs/content/en/open_source/notification_webhooks/scan_added.md +++ b/docs/content/en/open_source/notification_webhooks/scan_added.md @@ -1,6 +1,6 @@ --- title: "Event: scan_added and scan_added_empty" -weight: 5 +weight: 6 chapter: true --- diff --git a/docs/content/en/open_source/notification_webhooks/test_added.md b/docs/content/en/open_source/notification_webhooks/test_added.md index bf6d71dc6f5..490272c86b8 100644 --- a/docs/content/en/open_source/notification_webhooks/test_added.md +++ b/docs/content/en/open_source/notification_webhooks/test_added.md @@ -1,6 +1,6 @@ --- title: "Event: test_added" -weight: 4 +weight: 5 chapter: true --- diff --git a/unittests/test_notifications.py b/unittests/test_notifications.py index 860c2168599..a7022b7e83e 100644 --- a/unittests/test_notifications.py +++ b/unittests/test_notifications.py @@ -697,6 +697,17 @@ def test_headers(self, mock): @patch("requests.request", **{"return_value.status_code": 200}) def test_events_messages(self, mock): + with self.subTest("ping"): + manager = WebhookNotificationManger() + manager._test_webhooks_notification(self.sys_wh) + self.assertEqual(mock.call_args.kwargs["headers"]["X-DefectDojo-Event"], "ping") + self.maxDiff = None + self.assertEqual(mock.call_args.kwargs["json"], { + "description": "Test webhook notification", + "title": "", + "user": None, + }) + with self.subTest("product_type_added"): prod_type = Product_Type.objects.create(name="notif prod type") self.assertEqual(mock.call_args.kwargs["headers"]["X-DefectDojo-Event"], "product_type_added") From 5b66033ca18ede356c21a5aed6007bae988ed18f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Jan 2025 12:00:21 -0600 Subject: [PATCH 75/99] Bump boto3 from 1.36.6 to 1.36.7 (#11683) Bumps [boto3](https://github.com/boto/boto3) from 1.36.6 to 1.36.7. - [Release notes](https://github.com/boto/boto3/releases) - [Commits](https://github.com/boto/boto3/compare/1.36.6...1.36.7) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index ceabec1bc54..29367842135 100644 --- a/requirements.txt +++ b/requirements.txt @@ -69,7 +69,7 @@ django-ratelimit==4.1.0 argon2-cffi==23.1.0 blackduck==1.1.3 pycurl==7.45.4 # Required for Celery Broker AWS (SQS) support -boto3==1.36.6 # Required for Celery Broker AWS (SQS) support +boto3==1.36.7 # Required for Celery Broker AWS (SQS) support netaddr==1.3.0 vulners==2.2.3 fontawesomefree==6.6.0 From 45fec6acc8625e87d876b538dd16ad72c87cec6c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Jan 2025 12:00:47 -0600 Subject: [PATCH 76/99] Bump python-gitlab from 5.3.1 to 5.4.0 (#11682) Bumps [python-gitlab](https://github.com/python-gitlab/python-gitlab) from 5.3.1 to 5.4.0. - [Release notes](https://github.com/python-gitlab/python-gitlab/releases) - [Changelog](https://github.com/python-gitlab/python-gitlab/blob/main/CHANGELOG.md) - [Commits](https://github.com/python-gitlab/python-gitlab/compare/v5.3.1...v5.4.0) --- updated-dependencies: - dependency-name: python-gitlab dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 29367842135..443d525dd60 100644 --- a/requirements.txt +++ b/requirements.txt @@ -46,7 +46,7 @@ titlecase==2.4.1 social-auth-app-django==5.4.2 social-auth-core==4.5.4 gitpython==3.1.44 -python-gitlab==5.3.1 +python-gitlab==5.4.0 cpe==1.3.1 packageurl-python==0.16.0 django-crum==0.7.9 From 3985363787fe4c4869d9f1a963afab1ff7617043 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 28 Jan 2025 12:02:21 -0600 Subject: [PATCH 77/99] chore(deps): update actions/setup-python action from v5.3.0 to v5.4.0 (.github/workflows/test-helm-chart.yml) (#11680) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- .github/workflows/test-helm-chart.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-helm-chart.yml b/.github/workflows/test-helm-chart.yml index b7d30cece1b..e4dfa16bcfd 100644 --- a/.github/workflows/test-helm-chart.yml +++ b/.github/workflows/test-helm-chart.yml @@ -22,7 +22,7 @@ jobs: - name: Set up Helm uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814 # v4.2.0 - - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 + - uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0 with: python-version: 3.9 From 9e8111ae4426e617e1515a6148e49378a1387bfa Mon Sep 17 00:00:00 2001 From: kiblik <5609770+kiblik@users.noreply.github.com> Date: Tue, 28 Jan 2025 20:44:11 +0100 Subject: [PATCH 78/99] fix: Cleanup of old files and notes (#11628) --- .github/workflows/gh-pages.yml | 4 ++-- _config.yml | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) delete mode 100644 _config.yml diff --git a/.github/workflows/gh-pages.yml b/.github/workflows/gh-pages.yml index 888cd7eb3e4..5d066d60cd1 100644 --- a/.github/workflows/gh-pages.yml +++ b/.github/workflows/gh-pages.yml @@ -39,8 +39,8 @@ jobs: - name: Setup Pages id: pages - uses: actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b # v5.0.0 - use this after https://github.com/DefectDojo/django-DefectDojo/pull/11329 - + uses: actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b # v5.0.0 + - name: Install dependencies run: cd docs && npm ci diff --git a/_config.yml b/_config.yml deleted file mode 100644 index c4192631f25..00000000000 --- a/_config.yml +++ /dev/null @@ -1 +0,0 @@ -theme: jekyll-theme-cayman \ No newline at end of file From 09dc10dcf21f91af304588c20876b0321078ce16 Mon Sep 17 00:00:00 2001 From: maxi-bee <84531851+maxi-bee@users.noreply.github.com> Date: Tue, 28 Jan 2025 20:45:07 +0100 Subject: [PATCH 79/99] AnchoreCTL_Vulns parser: removes URL and corrects mapping if no fix for mitigation field (#11623) * Update anchorectl_vulns parser.py - removes the URL from the mitigation field as that is already on the references - Only writes on the mitigation field if there is actually a fix available for it to avoid some the following results: ``` Upgrade to libssl3 None ``` * Update anchorectl_vulns parser.py - removes the URL from the mitigation field as that is already on the references - Writes a better message on the mitigation field to avoid: ``` Upgrade to libssl3 None ``` - note: this should not modify default deduplication for such parser --- dojo/tools/anchorectl_vulns/parser.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/dojo/tools/anchorectl_vulns/parser.py b/dojo/tools/anchorectl_vulns/parser.py index 13632e84b84..e1c725272b0 100644 --- a/dojo/tools/anchorectl_vulns/parser.py +++ b/dojo/tools/anchorectl_vulns/parser.py @@ -54,10 +54,14 @@ def get_findings(self, filename, test): if sev == "Negligible" or sev == "Unknown": sev = "Info" - mitigation = ( - "Upgrade to " + item["packageName"] + " " + item["fix"] + "\n" - ) - mitigation += "URL: " + item["url"] + "\n" + if item["fix"] != "None": + mitigation = ( + "Upgrade to " + item["packageName"] + " " + item["fix"] + "\n" + ) + else: + mitigation = ( + "No fix available" + "\n" + ) cvssv3_base_score = None if item["feed"] == "nvdv2" or item["feed"] == "vulnerabilities": From 80ab858a012641ce84f4c9e52bee7db391c304b9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 29 Jan 2025 13:46:47 -0700 Subject: [PATCH 80/99] Bump boto3 from 1.36.7 to 1.36.8 (#11686) Bumps [boto3](https://github.com/boto/boto3) from 1.36.7 to 1.36.8. - [Release notes](https://github.com/boto/boto3/releases) - [Commits](https://github.com/boto/boto3/compare/1.36.7...1.36.8) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 443d525dd60..e9b89914557 100644 --- a/requirements.txt +++ b/requirements.txt @@ -69,7 +69,7 @@ django-ratelimit==4.1.0 argon2-cffi==23.1.0 blackduck==1.1.3 pycurl==7.45.4 # Required for Celery Broker AWS (SQS) support -boto3==1.36.7 # Required for Celery Broker AWS (SQS) support +boto3==1.36.8 # Required for Celery Broker AWS (SQS) support netaddr==1.3.0 vulners==2.2.3 fontawesomefree==6.6.0 From c41f10ced8dc4d35592c824e7b907f5c1e0a9536 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 29 Jan 2025 13:47:07 -0700 Subject: [PATCH 81/99] Bump python-gitlab from 5.4.0 to 5.6.0 (#11687) Bumps [python-gitlab](https://github.com/python-gitlab/python-gitlab) from 5.4.0 to 5.6.0. - [Release notes](https://github.com/python-gitlab/python-gitlab/releases) - [Changelog](https://github.com/python-gitlab/python-gitlab/blob/main/CHANGELOG.md) - [Commits](https://github.com/python-gitlab/python-gitlab/compare/v5.4.0...v5.6.0) --- updated-dependencies: - dependency-name: python-gitlab dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index e9b89914557..f7a24d6c342 100644 --- a/requirements.txt +++ b/requirements.txt @@ -46,7 +46,7 @@ titlecase==2.4.1 social-auth-app-django==5.4.2 social-auth-core==4.5.4 gitpython==3.1.44 -python-gitlab==5.4.0 +python-gitlab==5.6.0 cpe==1.3.1 packageurl-python==0.16.0 django-crum==0.7.9 From 061b20d460cb42656faac4271222c1488163282a Mon Sep 17 00:00:00 2001 From: kiblik <5609770+kiblik@users.noreply.github.com> Date: Wed, 29 Jan 2025 22:25:40 +0100 Subject: [PATCH 82/99] Ruff: Add already corrected rules (#11648) --- ruff.toml | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/ruff.toml b/ruff.toml index a4a2daa69ff..e81866ef214 100644 --- a/ruff.toml +++ b/ruff.toml @@ -37,52 +37,53 @@ select = [ "W", "C90", "I", - "D2", "D3", "D403", "D411", "D413", + "N804", "N811", "N814", "N818", + "D2", "D3", "D402", "D403", "D405", "D406", "D407", "D408", "D409", "D410", "D411", "D412", "D413", "D416", "UP", "YTT", "ASYNC", - "S1", "S2", "S5", "S7", "S311", + "S1", "S2", "S302", "S303", "S304", "S305", "S306", "S307", "S31", "S323", "S401", "S402", "S406", "S407", "S408", "S409", "S41", "S5", "S601", "S602", "S604", "S605", "S606", "S607", "S609", "S610", "S612", "S7", "FBT001", "FBT003", "A003", "A004", "A005", "A006", "COM", "C4", "T10", - "DJ003", "DJ012", "DJ013", + "DJ003", "DJ01", "EM", "EXE", + "FA", "ISC001", "ICN", "LOG", - "G001", "G002", "G1", "G2", + "G001", "G002", "G01", "G1", "G2", "INP", - "RET", "PIE", "T20", "Q", "RSE", + "RET", "SLOT", "SIM", "TID", - "TCH", + "TC", "INT", "ARG003", "ARG004", "ARG005", "PTH2", "PTH10", "PTH110", "PTH111", "PTH112", "PTH113", "PTH114", "PTH115", "PTH116", "PTH117", "PTH119", "PTH120", "PTH121", "PTH122", "PTH124", "TD001", "TD004", "TD005", + "FIX001", "FIX003", "PD", "PGH", + "PLC01", "PLC0205", "PLC0208", "PLC0414", "PLC24", "PLC3", "PLE", - "PLR0915", - "PLW1", "PLW2", "PLW3", - "FIX001", - "FIX003", - "TRY003", - "TRY004", - "TRY2", + "PLR01", "PLR0203", "PLR0206", "PLR0915", "PLR1716", "PLR172", "PLR1733", "PLR1736", + "PLW0120", "PLW0129", "PLW013", "PLW017", "PLW02", "PLW04", "PLW07", "PLW1", "PLW2", "PLW3", + "TRY003", "TRY004", "TRY2", "FLY", "NPY", "FAST", "AIR", "FURB", + "DOC202", "DOC403", "DOC502", "RUF", ] ignore = [ @@ -91,7 +92,6 @@ ignore = [ "SIM102", "SIM113", "SIM115", - "SIM116", "SIM117", "RUF012", "RUF015", From 0e912480b89754efa517b74473644e8be19e4ab5 Mon Sep 17 00:00:00 2001 From: Jaja <16231488+littlesvensson@users.noreply.github.com> Date: Wed, 29 Jan 2025 22:26:54 +0100 Subject: [PATCH 83/99] Accessibility ergonomy filtering (#11634) * Added vertical alignment for filter and clear filter logic * Apply clear filter logic * Fix new line at the end of file * Updated unit tests * Removed forgotten console log * Updated selector for integration test --- dojo/static/dojo/css/dojo.css | 2 +- dojo/static/dojo/js/index.js | 11 +- dojo/templates/dojo/filter_js_snippet.html | 3 + dojo/templates/dojo/filter_snippet.html | 164 +++++++++++++++------ tests/group_test.py | 4 +- tests/product_group_test.py | 2 +- tests/product_type_group_test.py | 2 +- tests/user_test.py | 10 +- 8 files changed, 139 insertions(+), 59 deletions(-) diff --git a/dojo/static/dojo/css/dojo.css b/dojo/static/dojo/css/dojo.css index 281c2420d7f..c656205a35c 100644 --- a/dojo/static/dojo/css/dojo.css +++ b/dojo/static/dojo/css/dojo.css @@ -1278,7 +1278,7 @@ div.custom-search-form { } .dojo-filter-set.form-inline .filter-form-control { - width: auto!important; + width: 100%!important; vertical-align: middle; } diff --git a/dojo/static/dojo/js/index.js b/dojo/static/dojo/js/index.js index ec201f827dd..56c09501807 100644 --- a/dojo/static/dojo/js/index.js +++ b/dojo/static/dojo/js/index.js @@ -342,8 +342,13 @@ function clear_form(form){ case 'radio': this.checked = false; break; - case 'select-multiple': - $(this).val(null).trigger('change'); + case 'select-multiple': + // Clear all types of multiple select versions + if ($(this).hasClass('select2-hidden-accessible')) { + $(this).data('select2').$container.find(".select2-selection__choice").remove(); + } + $(this).val(null).trigger('change'); + break; } }); -} \ No newline at end of file +} diff --git a/dojo/templates/dojo/filter_js_snippet.html b/dojo/templates/dojo/filter_js_snippet.html index 43e1ddfe74d..2dbad1c4b33 100644 --- a/dojo/templates/dojo/filter_js_snippet.html +++ b/dojo/templates/dojo/filter_js_snippet.html @@ -8,6 +8,9 @@ $(function () { $('.similar-filters select[multiple]').not('[data-tagulous]').each(function () { + // Removing bootstrap-select class and replacing with select2 to avoid library conflicts + // leading to visually unappealing dropdowns + $(this).removeClass('selectpicker').closest('.bootstrap-select').replaceWith($(this)); $(this).select2( ) }) diff --git a/dojo/templates/dojo/filter_snippet.html b/dojo/templates/dojo/filter_snippet.html index 971cdeacd0f..94311d469b6 100644 --- a/dojo/templates/dojo/filter_snippet.html +++ b/dojo/templates/dojo/filter_snippet.html @@ -15,54 +15,67 @@ {% for field in form.hidden_fields %} {{ field }} {% endfor %} -
- {% for field in form.visible_fields %} -
- {{ field.errors }} -
\ No newline at end of file + + // Clear filter logic below should clear filters without reload and + // have the button disabled if no filters are active + + const clearFilterLink = $("#clear, #clear_js"); + + const hasActiveFilters = () => { + return $(form) + .find(":input:not([type='hidden'])") + .filter(function () { + const value = $(this).val(); + + if ($(this).is("select[multiple]")) { + // Checking if value is an array and has non-empty elements at the same time + return Array.isArray(value) && value.some(v => v && v.trim() !== ""); + } else if ($(this).is(":checkbox, :radio")) { + // Checkboxes and radio buttons + return $(this).prop("checked"); + } else if (typeof value === "string") { + // Text inputs, textareas + return value.trim() !== "" && value !== "unknown" && value !== null; + } else { + // Other input types + return value !== null && value !== undefined; + } + }).length > 0; + }; + + const updateClearFiltersState = () => { + const filtersActive = hasActiveFilters(); + + if (clearFilterLink.length) { + clearFilterLink + .toggleClass("disabled", !filtersActive) + .toggleClass("btn-outline-secondary", !filtersActive) + .toggleClass("btn-secondary", filtersActive) + .attr("aria-disabled", !filtersActive) + .css("pointer-events", filtersActive ? "auto" : "none"); + } + }; + +$(form).on("input change", updateClearFiltersState); + + $(document).on('click', '#clear, #clear_js', function (event) { + event.preventDefault(); + if ($(this).attr("aria-disabled") === "true") { + return; + } + const form = $(this).closest(".filter-set").find("form"); + if (form.length) { + clear_form(form); + // Refresh some UI components to work have cleared form from all respective libraries + form.find("select.selectpicker").selectpicker("refresh"); + form.find(".multi-tag-input").val('').trigger('change'); + form.find("select.select2-hidden-accessible").val(null).trigger('change'); + form.find(".select2-selection__choice").remove(); + form.find(".select2-search__field").val(''); + // Update the state for the clear filters button + updateClearFiltersState(); + + } + }); +}); + diff --git a/tests/group_test.py b/tests/group_test.py index bde85f08f9e..a98a9cf3adf 100644 --- a/tests/group_test.py +++ b/tests/group_test.py @@ -43,7 +43,7 @@ def test_group_edit_name_and_global_role(self): driver.find_element(By.ID, "id_name").clear() driver.find_element(By.ID, "id_name").send_keys("Group Name") # click on 'apply filter' button - driver.find_element(By.CSS_SELECTOR, "button.btn.btn-sm.btn-secondary").click() + driver.find_element(By.ID, "apply").click() # only the needed group is now available, proceed with opening the context menu and clicking 'Edit' button driver.find_element(By.ID, "dropdownMenuGroup").click() driver.find_element(By.ID, "editGroup").click() @@ -139,7 +139,7 @@ def test_group_delete(self): driver.find_element(By.ID, "id_name").clear() driver.find_element(By.ID, "id_name").send_keys("Another Name") # click on 'apply filter' button - driver.find_element(By.CSS_SELECTOR, "button.btn.btn-sm.btn-secondary").click() + driver.find_element(By.ID, "apply").click() # only the needed group is now available, proceed with clicking 'Delete' button driver.find_element(By.ID, "dropdownMenuGroup").click() driver.find_element(By.ID, "deleteGroup").click() diff --git a/tests/product_group_test.py b/tests/product_group_test.py index ab0ccc7c77f..19dbd31f11e 100644 --- a/tests/product_group_test.py +++ b/tests/product_group_test.py @@ -148,7 +148,7 @@ def navigate_to_group_view(self): driver.find_element(By.ID, "id_name").clear() driver.find_element(By.ID, "id_name").send_keys("Group Name") # click on 'apply filter' button - driver.find_element(By.CSS_SELECTOR, "button.btn.btn-sm.btn-secondary").click() + driver.find_element(By.ID, "apply").click() # only the needed group is now available, proceed with opening the context menu and clicking 'Edit' button driver.find_element(By.ID, "dropdownMenuGroup").click() driver.find_element(By.ID, "viewGroup").click() diff --git a/tests/product_type_group_test.py b/tests/product_type_group_test.py index 9e5011b6fe0..8848e5d8add 100644 --- a/tests/product_type_group_test.py +++ b/tests/product_type_group_test.py @@ -147,7 +147,7 @@ def navigate_to_group_view(self): driver.find_element(By.ID, "id_name").clear() driver.find_element(By.ID, "id_name").send_keys("Group Name") # click on 'apply filter' button - driver.find_element(By.CSS_SELECTOR, "button.btn.btn-sm.btn-secondary").click() + driver.find_element(By.ID, "apply").click() # only the needed group is now available, proceed with opening the context menu and clicking 'Edit' button driver.find_element(By.ID, "dropdownMenuGroup").click() driver.find_element(By.ID, "viewGroup").click() diff --git a/tests/user_test.py b/tests/user_test.py index 607b8a7b4ea..e2b638d9708 100644 --- a/tests/user_test.py +++ b/tests/user_test.py @@ -114,7 +114,7 @@ def test_user_edit_permissions(self): driver.find_element(By.ID, "id_username").clear() driver.find_element(By.ID, "id_username").send_keys("propersahm") # click on 'apply filter' button - driver.find_element(By.CSS_SELECTOR, "button.btn.btn-sm.btn-secondary").click() + driver.find_element(By.ID, "apply").click() # only the needed user is now available, proceed with opening the context menu and clicking 'Edit' button driver.find_element(By.ID, "dropdownMenuUser").click() driver.find_element(By.ID, "editUser").click() @@ -141,7 +141,7 @@ def test_user_delete(self): driver.find_element(By.ID, "id_username").clear() driver.find_element(By.ID, "id_username").send_keys("propersahm") # click on 'apply filter' button - driver.find_element(By.CSS_SELECTOR, "button.btn.btn-sm.btn-secondary").click() + driver.find_element(By.ID, "apply").click() # only the needed user is now available, proceed with clicking 'View' button driver.find_element(By.ID, "dropdownMenuUser").click() driver.find_element(By.ID, "viewUser").click() @@ -169,7 +169,7 @@ def test_user_with_writer_role_delete(self): driver.find_element(By.ID, "id_username").clear() driver.find_element(By.ID, "id_username").send_keys("userWriter") # click on 'apply filter' button - driver.find_element(By.CSS_SELECTOR, "button.btn.btn-sm.btn-secondary").click() + driver.find_element(By.ID, "apply").click() # only the needed user is now available, proceed with clicking 'View' button driver.find_element(By.ID, "dropdownMenuUser").click() driver.find_element(By.ID, "viewUser").click() @@ -235,7 +235,7 @@ def test_user_edit_configuration(self): driver.find_element(By.ID, "id_username").clear() driver.find_element(By.ID, "id_username").send_keys("propersahm") # click on 'apply filter' button - driver.find_element(By.CSS_SELECTOR, "button.btn.btn-sm.btn-secondary").click() + driver.find_element(By.ID, "apply").click() # only the needed user is now available, proceed with opening the context menu and clicking 'Edit' button driver.find_element(By.ID, "dropdownMenuUser").click() driver.find_element(By.ID, "viewUser").click() @@ -256,7 +256,7 @@ def test_user_edit_configuration(self): driver.find_element(By.ID, "id_username").clear() driver.find_element(By.ID, "id_username").send_keys("propersahm") # click on 'apply filter' button - driver.find_element(By.CSS_SELECTOR, "button.btn.btn-sm.btn-secondary").click() + driver.find_element(By.ID, "apply").click() # only the needed user is now available, proceed with opening the context menu and clicking 'Edit' button driver.find_element(By.ID, "dropdownMenuUser").click() driver.find_element(By.ID, "viewUser").click() From bd613d19ed18c1178a1f3b32fe61ebbb89cf0c7c Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Wed, 29 Jan 2025 15:27:55 -0600 Subject: [PATCH 84/99] Add different pro banner for databases over 100k findings and endpoints (#11665) * Add different pro banner for databases over 100k * Update message --- dojo/settings/settings.dist.py | 4 ++++ dojo/tasks.py | 29 ++++++++++++++++++++++++++++- 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py index 5a55400ecd3..ecb67768183 100644 --- a/dojo/settings/settings.dist.py +++ b/dojo/settings/settings.dist.py @@ -1147,6 +1147,10 @@ def saml2_attrib_map_format(dict): "task": "dojo.notifications.helper.webhook_status_cleanup", "schedule": timedelta(minutes=1), }, + "trigger_evaluate_pro_proposition": { + "task": "dojo.tasks.evaluate_pro_proposition", + "schedule": timedelta(hours=8), + }, # 'jira_status_reconciliation': { # 'task': 'dojo.tasks.jira_status_reconciliation_task', # 'schedule': timedelta(hours=12), diff --git a/dojo/tasks.py b/dojo/tasks.py index 9e96c258605..3257115682c 100644 --- a/dojo/tasks.py +++ b/dojo/tasks.py @@ -10,7 +10,7 @@ from django.utils import timezone from dojo.celery import app -from dojo.models import Alerts, Engagement, Finding, Product, System_Settings, User +from dojo.models import Alerts, Announcement, Endpoint, Engagement, Finding, Product, System_Settings, User from dojo.notifications.helper import create_notification from dojo.utils import calculate_grade, sla_compute_and_notify @@ -190,3 +190,30 @@ def jira_status_reconciliation_task(*args, **kwargs): def fix_loop_duplicates_task(*args, **kwargs): from dojo.finding.helper import fix_loop_duplicates return fix_loop_duplicates() + + +@app.task +def evaluate_pro_proposition(*args, **kwargs): + # Ensure we should be doing this + if not settings.CREATE_CLOUD_BANNER: + return + # Get the announcement object + announcement = Announcement.objects.get_or_create(id=1)[0] + # Quick check for a user has modified the current banner - if not, exit early as we dont want to stomp + if not any( + entry in announcement.message + for entry in [ + "", + "Cloud and On-Premise Subscriptions Now Available!", + "Findings/Endpoints in their systems", + ] + ): + return + # Count the objects the determine if the banner should be updated + object_count = Finding.objects.count() + Endpoint.objects.count() + # Unless the count is greater than 100k, exit early + if object_count < 100000: + return + # Update the announcement + announcement.message = f'Only professionals have {object_count:,} Findings and Endpoints in their systems... Get DefectDojo Pro today!' + announcement.save() From 80a29ce2885c3a9b82f49aa7a62aa34da40e5580 Mon Sep 17 00:00:00 2001 From: valentijnscholten Date: Wed, 29 Jan 2025 22:28:28 +0100 Subject: [PATCH 85/99] Remove makemigrations from scripts (#11638) * remove automatic make migrations * remove automatic make migrations * restore unit-tests.sh --- docker/entrypoint-initializer.sh | 26 +++++++++++++++++++---- docker/entrypoint-unit-tests-devDocker.sh | 24 ++++++++++++++++++--- docker/entrypoint-unit-tests.sh | 6 +++--- docker/unit-tests.sh | 0 readme-docs/CONTRIBUTING.md | 6 ++++++ 5 files changed, 52 insertions(+), 10 deletions(-) mode change 100755 => 100644 docker/unit-tests.sh diff --git a/docker/entrypoint-initializer.sh b/docker/entrypoint-initializer.sh index 45a67105109..7163f761d8c 100755 --- a/docker/entrypoint-initializer.sh +++ b/docker/entrypoint-initializer.sh @@ -16,7 +16,7 @@ initialize_data() python3 manage.py initialize_permissions } -create_announcement_banner() +create_announcement_banner() { # Load the announcement banner if [ -z "$DD_CREATE_CLOUD_BANNER" ]; then @@ -103,8 +103,26 @@ then exit 47 fi -echo "Making migrations" -python3 manage.py makemigrations dojo + +python3 manage.py makemigrations --no-input --check --dry-run --verbosity 3 || { + cat <<-EOF + +******************************************************************************** + +You made changes to the models without creating a DB migration for them. + +**NEVER** change existing migrations, create a new one. + +If you're not familiar with migrations in Django, please read the +great documentation thoroughly: +https://docs.djangoproject.com/en/5.0/topics/migrations/ + +******************************************************************************** + +EOF + exit 1 +} + echo "Migrating" python3 manage.py migrate @@ -139,7 +157,7 @@ fi if [ -z "${ADMIN_EXISTS}" ] then . /entrypoint-first-boot.sh - + create_announcement_banner initialize_data fi diff --git a/docker/entrypoint-unit-tests-devDocker.sh b/docker/entrypoint-unit-tests-devDocker.sh index 19fcc80768c..2a9f2619cd7 100755 --- a/docker/entrypoint-unit-tests-devDocker.sh +++ b/docker/entrypoint-unit-tests-devDocker.sh @@ -18,7 +18,25 @@ unset DD_CELERY_BROKER_URL wait_for_database_to_be_reachable -python3 manage.py makemigrations dojo +python3 manage.py makemigrations --no-input --check --dry-run --verbosity 3 || { + cat <<-EOF + +******************************************************************************** + +You made changes to the models without creating a DB migration for them. + +**NEVER** change existing migrations, create a new one. + +If you're not familiar with migrations in Django, please read the +great documentation thoroughly: +https://docs.djangoproject.com/en/5.0/topics/migrations/ + +******************************************************************************** + +EOF + exit 1 +} + python3 manage.py migrate # do the check with Django stack @@ -56,10 +74,10 @@ echo "------------------------------------------------------------" # Removing parallel and shuffle for now to maintain stability python3 manage.py test unittests -v 3 --keepdb --no-input --exclude-tag="non-parallel" || { - exit 1; + exit 1; } python3 manage.py test unittests -v 3 --keepdb --no-input --tag="non-parallel" || { - exit 1; + exit 1; } # you can select a single file to "test" unit tests diff --git a/docker/entrypoint-unit-tests.sh b/docker/entrypoint-unit-tests.sh index 30abb5d4f4f..8b6ba002ca3 100755 --- a/docker/entrypoint-unit-tests.sh +++ b/docker/entrypoint-unit-tests.sh @@ -64,7 +64,7 @@ You made changes to the models without creating a DB migration for them. If you're not familiar with migrations in Django, please read the great documentation thoroughly: -https://docs.djangoproject.com/en/1.11/topics/migrations/ +https://docs.djangoproject.com/en/5.0/topics/migrations/ ******************************************************************************** @@ -82,8 +82,8 @@ echo "------------------------------------------------------------" # Removing parallel and shuffle for now to maintain stability python3 manage.py test unittests -v 3 --keepdb --no-input --exclude-tag="non-parallel" || { - exit 1; + exit 1; } python3 manage.py test unittests -v 3 --keepdb --no-input --tag="non-parallel" || { - exit 1; + exit 1; } \ No newline at end of file diff --git a/docker/unit-tests.sh b/docker/unit-tests.sh old mode 100755 new mode 100644 diff --git a/readme-docs/CONTRIBUTING.md b/readme-docs/CONTRIBUTING.md index dd4eb25dbd2..3beb9dd62d5 100644 --- a/readme-docs/CONTRIBUTING.md +++ b/readme-docs/CONTRIBUTING.md @@ -65,6 +65,12 @@ For changes that require additional settings, you can now use local_settings.py ## Python3 Version For compatibility reasons, the code in dev branch should be python3.11 compliant. +## Database migrations +When changes are made to the database model, a database migration is needed. This migration can be generated using something like +`docker compose exec uwsgi bash -c "python manage.py makemigrations"`. +This will result in a new file in the `dojo/db_migrations` folder that can be committed to `git` +When making downstream database model changes in your fork of Defect Dojo please be aware of the risks of getting out of sync with our upstream migrations. +It requiers proper knowledge of [Django Migrations](https://docs.djangoproject.com/en/5.0/topics/migrations/) to reconcile the migrations before you can upgrade to a newer version of Defect Dojo. ## Submitting Pull Requests From c66e86b4383c155eaf7a790b257b3f2cfa52c73b Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Wed, 29 Jan 2025 15:28:50 -0600 Subject: [PATCH 86/99] Import History: Make the absence of action more clear (#11637) --- dojo/templates/dojo/view_test.html | 2 ++ 1 file changed, 2 insertions(+) diff --git a/dojo/templates/dojo/view_test.html b/dojo/templates/dojo/view_test.html index ea78ced62d9..ffcfacef606 100644 --- a/dojo/templates/dojo/view_test.html +++ b/dojo/templates/dojo/view_test.html @@ -277,6 +277,8 @@

{% for action in finding_action_list %} {{ action.list|length }} {{ action.grouper }} {% if not forloop.last %},{% endif %} + {% empty %} + There were no findings created, closed, or modified {% endfor %} From 420bf66b43e3adaabe6b4d0bc543c474930f3d23 Mon Sep 17 00:00:00 2001 From: kiblik <5609770+kiblik@users.noreply.github.com> Date: Wed, 29 Jan 2025 22:35:06 +0100 Subject: [PATCH 87/99] feat(GHA): Pin azure/setup-helm (#11493) * feat(GHA): Add pinact (gha pin checker) * Update .github/workflows/gha-pin.yml Co-authored-by: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> * Drop tool --------- Co-authored-by: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> --- .github/workflows/release-x-manual-helm-chart.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-x-manual-helm-chart.yml b/.github/workflows/release-x-manual-helm-chart.yml index 1a969e788f5..10aed4c5886 100644 --- a/.github/workflows/release-x-manual-helm-chart.yml +++ b/.github/workflows/release-x-manual-helm-chart.yml @@ -47,7 +47,7 @@ jobs: git config --global user.email "${{ env.GIT_EMAIL }}" - name: Set up Helm - uses: azure/setup-helm@v4.2.0 + uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814 # v4.2.0 - name: Configure HELM repos run: |- From 97477014972eb66a4d5ce0aa3de8e6df5d700e27 Mon Sep 17 00:00:00 2001 From: kiblik <5609770+kiblik@users.noreply.github.com> Date: Thu, 30 Jan 2025 17:15:25 +0100 Subject: [PATCH 88/99] feat(bash-script): Drop all `dc-` scripts (#11649) Co-authored-by: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> --- .github/workflows/fetch-oas.yml | 2 +- .github/workflows/integration-tests.yml | 6 +-- .github/workflows/rest-framework-tests.yml | 2 +- README.md | 23 +++++---- dc-build.sh | 21 -------- dc-down.sh | 21 -------- dc-stop.sh | 21 -------- dc-up-d.sh | 11 ----- dc-up.sh | 10 ---- .../contributing/how-to-write-a-parser.md | 4 +- docs/content/en/open_source/upgrading/2.43.md | 6 ++- .../en/open_source/upgrading/_index.md | 5 +- .../open_source/upgrading/upgrading_guide.md | 5 +- readme-docs/DOCKER.md | 48 +++++++++++-------- ...ation-tests.sh => run-integration-tests.sh | 10 ++-- dc-unittest.sh => run-unittest.sh | 7 +-- 16 files changed, 59 insertions(+), 143 deletions(-) delete mode 100755 dc-build.sh delete mode 100755 dc-down.sh delete mode 100755 dc-stop.sh delete mode 100755 dc-up-d.sh delete mode 100755 dc-up.sh rename dc-integration-tests.sh => run-integration-tests.sh (74%) rename dc-unittest.sh => run-unittest.sh (84%) diff --git a/.github/workflows/fetch-oas.yml b/.github/workflows/fetch-oas.yml index 8f2b5514436..15720f1f31b 100644 --- a/.github/workflows/fetch-oas.yml +++ b/.github/workflows/fetch-oas.yml @@ -33,7 +33,7 @@ jobs: docker images - name: Start Dojo - run: docker compose up --no-deps -d postgres nginx uwsgi + run: docker compose up -d postgres nginx uwsgi env: DJANGO_VERSION: ${{ env.release_version }}-alpine NGINX_VERSION: ${{ env.release_version }}-alpine diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index c60cb6f3403..11a5d69d6b2 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -63,21 +63,21 @@ jobs: run: ln -s docker-compose.override.integration_tests.yml docker-compose.override.yml - name: Start Dojo - run: docker compose up --no-deps -d postgres nginx celerybeat celeryworker mailhog uwsgi redis + run: docker compose up -d postgres nginx celerybeat celeryworker mailhog uwsgi redis env: DJANGO_VERSION: ${{ matrix.os }} NGINX_VERSION: ${{ matrix.os }} - name: Initialize timeout-minutes: 10 - run: docker compose up --no-deps --exit-code-from initializer initializer + run: docker compose up --exit-code-from initializer initializer env: DJANGO_VERSION: ${{ matrix.os }} NGINX_VERSION: ${{ matrix.os }} - name: Integration tests timeout-minutes: 10 - run: docker compose up --no-deps --exit-code-from integration-tests integration-tests + run: docker compose up --exit-code-from integration-tests integration-tests env: DD_INTEGRATION_TEST_FILENAME: ${{ matrix.test-case }} INTEGRATION_TESTS_VERSION: debian diff --git a/.github/workflows/rest-framework-tests.yml b/.github/workflows/rest-framework-tests.yml index 63056587431..309eee1240d 100644 --- a/.github/workflows/rest-framework-tests.yml +++ b/.github/workflows/rest-framework-tests.yml @@ -44,7 +44,7 @@ jobs: # no celery or initializer needed for unit tests - name: Unit tests timeout-minutes: 10 - run: docker compose up --no-deps --exit-code-from uwsgi uwsgi + run: docker compose up --exit-code-from uwsgi uwsgi env: DJANGO_VERSION: ${{ matrix.os }} diff --git a/README.md b/README.md index 6ca297579b3..1acf956d43a 100644 --- a/README.md +++ b/README.md @@ -50,12 +50,15 @@ docker-compose features and flags. You can run Compose V2 by replacing the hyphe git clone https://github.com/DefectDojo/django-DefectDojo cd django-DefectDojo +# Check if your installed toolkit is compatible +./docker/docker-compose-check.sh + # Building Docker images -./dc-build.sh +docker compose build # Run the application (for other profiles besides postgres-redis see # https://github.com/DefectDojo/django-DefectDojo/blob/dev/readme-docs/DOCKER.md) -./dc-up-d.sh postgres-redis +docker compose up -d # Obtain admin credentials. The initializer can take up to 3 minutes to run. # Use docker compose logs -f initializer to track its progress. @@ -64,17 +67,13 @@ docker compose logs initializer | grep "Admin password:" ## For Docker Compose V1 -You can run Compose V1 by editing the files below to add the hyphen (-) between `docker compose`. +You can run Compose V1 by calling `docker-compose` (by adding the hyphen (-) between `docker compose`). + +Following commands are using original version so you might need to adjust them: ```sh - dc-build.sh - dc-down.sh - dc-stop.sh - dc-unittest.sh - dc-up-d.sh - dc-up.sh - docker/docker-compose-check.sh - docker/entrypoint-initializer.sh - docker/setEnv.sh +docker/docker-compose-check.sh +docker/entrypoint-initializer.sh +docker/setEnv.sh ``` Navigate to `http://localhost:8080` to see your new instance! diff --git a/dc-build.sh b/dc-build.sh deleted file mode 100755 index fd908428207..00000000000 --- a/dc-build.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -bash ./docker/docker-compose-check.sh -if [[ $? -eq 1 ]]; then exit 1; fi - -if [ $# -eq 0 ] -then - echo "Building docker compose" - # Compose V2 integrates compose functions into the Docker platform, - # continuing to support most of the previous docker-compose features - # and flags. You can run Compose V2 by replacing the hyphen (-) with - # a space, using docker compose, instead of docker-compose. - docker compose build -else - echo "Building docker compose with additional parameter $1 ..." - # Compose V2 integrates compose functions into the Docker platform, - # continuing to support most of the previous docker-compose features - # and flags. You can run Compose V2 by replacing the hyphen (-) with - # a space, using docker compose, instead of docker-compose. - docker compose build "$1" -fi diff --git a/dc-down.sh b/dc-down.sh deleted file mode 100755 index 3ef3c667c49..00000000000 --- a/dc-down.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -bash ./docker/docker-compose-check.sh -if [[ $? -eq 1 ]]; then exit 1; fi - -if [ $# -eq 0 ] -then - echo "Stopping docker compose and removing containers" - # Compose V2 integrates compose functions into the Docker platform, - # continuing to support most of the previous docker-compose features - # and flags. You can run Compose V2 by replacing the hyphen (-) with - # a space, using docker compose, instead of docker-compose. - docker compose down -else - echo "Stopping docker compose and removing containers with additional parameter $1 ..." - # Compose V2 integrates compose functions into the Docker platform, - # continuing to support most of the previous docker-compose features - # and flags. You can run Compose V2 by replacing the hyphen (-) with - # a space, using docker compose, instead of docker-compose. - docker compose down "$1" -fi diff --git a/dc-stop.sh b/dc-stop.sh deleted file mode 100755 index 8dc82e09a60..00000000000 --- a/dc-stop.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -bash ./docker/docker-compose-check.sh -if [[ $? -eq 1 ]]; then exit 1; fi - -if [ $# -eq 0 ] -then - echo "Stopping docker compose" - # Compose V2 integrates compose functions into the Docker platform, - # continuing to support most of the previous docker-compose features - # and flags. You can run Compose V2 by replacing the hyphen (-) with - # a space, using docker compose, instead of docker-compose. - docker compose stop -else - echo "Stopping docker compose with additional parameter $1 ..." - # Compose V2 integrates compose functions into the Docker platform, - # continuing to support most of the previous docker-compose features - # and flags. You can run Compose V2 by replacing the hyphen (-) with - # a space, using docker compose, instead of docker-compose. - docker compose stop "$1" -fi diff --git a/dc-up-d.sh b/dc-up-d.sh deleted file mode 100755 index abe694783cf..00000000000 --- a/dc-up-d.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -bash ./docker/docker-compose-check.sh -if [[ $? -eq 1 ]]; then exit 1; fi - -echo "Starting docker compose in the background ..." - -# Compose V2 integrates compose functions into the Docker platform, continuing to support -# most of the previous docker-compose features and flags. You can run Compose V2 by -# replacing the hyphen (-) with a space, using docker compose, instead of docker-compose. -docker compose up --no-deps -d diff --git a/dc-up.sh b/dc-up.sh deleted file mode 100755 index 347f844fc85..00000000000 --- a/dc-up.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -bash ./docker/docker-compose-check.sh -if [[ $? -eq 1 ]]; then exit 1; fi - -# Compose V2 integrates compose functions into the Docker platform, continuing to support -# most of the previous docker-compose features and flags. You can run Compose V2 by -# replacing the hyphen (-) with a space, using docker compose, instead of docker-compose. -docker compose up --no-deps -echo "Starting docker compose in the foreground ..." \ No newline at end of file diff --git a/docs/content/en/open_source/contributing/how-to-write-a-parser.md b/docs/content/en/open_source/contributing/how-to-write-a-parser.md index 3ee5622506b..fde9fe76a8c 100644 --- a/docs/content/en/open_source/contributing/how-to-write-a-parser.md +++ b/docs/content/en/open_source/contributing/how-to-write-a-parser.md @@ -295,7 +295,7 @@ $ docker compose exec uwsgi bash -c 'python manage.py test unittests.tools.}} -$ ./dc-unittest.sh --test-case unittests.tools.. +$ ./run-unittest.sh --test-case unittests.tools.. {{< /highlight >}} Example for the blackduck hub parser: @@ -307,7 +307,7 @@ $ docker compose exec uwsgi bash -c 'python manage.py test unittests.tools.test_ or like this: {{< highlight bash >}} -$ ./dc-unittest.sh --test-case unittests.tools.test_blackduck_csv_parser.TestBlackduckHubParser +$ ./run-unittest.sh --test-case unittests.tools.test_blackduck_csv_parser.TestBlackduckHubParser {{< /highlight >}} If you want to run all unit tests, simply run `$ docker-compose exec uwsgi bash -c 'python manage.py test unittests -v2'` diff --git a/docs/content/en/open_source/upgrading/2.43.md b/docs/content/en/open_source/upgrading/2.43.md index 4b5efcecf2f..59a4a1faeea 100644 --- a/docs/content/en/open_source/upgrading/2.43.md +++ b/docs/content/en/open_source/upgrading/2.43.md @@ -2,11 +2,13 @@ title: 'Upgrading to DefectDojo Version 2.43.x' toc_hide: true weight: -20250106 -description: Disclaimer field renamed/split. +description: Disclaimer field renamed/split and removal of `dc-` scripts. --- [Pull request #10902](https://github.com/DefectDojo/django-DefectDojo/pull/10902) introduced different kinds of disclaimers within the DefectDojo instance. The original content of the disclaimer was copied to all new fields where it had been used until now (so this change does not require any action on the user's side). However, if users were managing the original disclaimer via API (endpoint `/api/v2/system_settings/1/`, field `disclaimer`), be aware that the fields are now called `disclaimer_notifications` and `disclaimer_reports` (plus there is one additional, previously unused field called `disclaimer_notes`). +In the past, when DefectDojo supported different database and message brokers, `dc-` scripts have been added to simplify start of Dojo stack. As these backends are not supported, mentioned scripts are not needed anymore. From now we recommend to use standard `docker compose` (or `docker-compose`) commands as they are described on [README.md](https://github.com/DefectDojo/django-DefectDojo/blob/master/README.md) + **Hash Code changes** The Rusty Hog parser has been [updated](https://github.com/DefectDojo/django-DefectDojo/pull/11433) to populate more fields. Some of these fields are part of the hash code calculation. To recalculate the hash code and deduplicate existing Rusty Hog findings, please execute the following command: @@ -19,4 +21,4 @@ The Rusty Hog parser has been [updated](https://github.com/DefectDojo/django-Def This command has various command line arguments to tweak its behaviour, for example to trigger a run of the deduplication process. See [dedupe.py](https://github.com/DefectDojo/django-DefectDojo/blob/master/dojo/management/commands/dedupe.py) for more information. -Check the [Release Notes](https://github.com/DefectDojo/django-DefectDojo/releases/tag/2.43.0) for the contents of the release. \ No newline at end of file +Check the [Release Notes](https://github.com/DefectDojo/django-DefectDojo/releases/tag/2.43.0) for the contents of the release. diff --git a/docs/content/en/open_source/upgrading/_index.md b/docs/content/en/open_source/upgrading/_index.md index d453def6346..5eee9e16873 100644 --- a/docs/content/en/open_source/upgrading/_index.md +++ b/docs/content/en/open_source/upgrading/_index.md @@ -41,9 +41,8 @@ The generic upgrade method for docker compose are as follows: ``` - Go to the directory where your docker-compose.yml file lives -- Stop DefectDojo: `./dc-stop.sh` -- Re-start DefectDojo, allowing for container recreation: - `./dc-up-d.sh` +- Stop DefectDojo: `docker compose stop` +- Re-start DefectDojo, allowing for container recreation: `docker compose up -d` - Database migrations will be run automatically by the initializer. Check the output via `docker compose logs initializer` or relevant k8s command - If you have the initializer disabled (or if you want to be on the diff --git a/docs/content/en/open_source/upgrading/upgrading_guide.md b/docs/content/en/open_source/upgrading/upgrading_guide.md index e7662f7e575..792d024dfc2 100644 --- a/docs/content/en/open_source/upgrading/upgrading_guide.md +++ b/docs/content/en/open_source/upgrading/upgrading_guide.md @@ -41,9 +41,8 @@ The generic upgrade method for docker compose are as follows: ``` - Go to the directory where your docker-compose.yml file lives -- Stop DefectDojo: `./dc-stop.sh` -- Re-start DefectDojo, allowing for container recreation: - `./dc-up-d.sh` +- Stop DefectDojo: `docker compose stop` +- Re-start DefectDojo, allowing for container recreation: `docker compose up -d` - Database migrations will be run automatically by the initializer. Check the output via `docker compose logs initializer` or relevant k8s command - If you have the initializer disabled (or if you want to be on the diff --git a/readme-docs/DOCKER.md b/readme-docs/DOCKER.md index a85d9f55f26..440122f2e39 100644 --- a/readme-docs/DOCKER.md +++ b/readme-docs/DOCKER.md @@ -32,16 +32,22 @@ When running the application without building images, the application will run b # Setup via Docker Compose +## Commands + +Short summary of useful commands: + +- `docker compose build` - Build the docker images, it can take additional parameters to be used in the build process, e.g. `docker compose build --no-cache`. +- `docker compose up` - Start the docker containers in the foreground. +- `docker compose up -d` - Start the docker containers in the background. +- `docker compose stop` - Stop the docker containers, it can take additional parameters to be used in the stop process. +- `docker compose down` - Stop and remove the docker containers, it can take additional parameters to be used in the stop and remove process. + ## Scripts -6 shell scripts make life easier and avoid typing long commands: +2 shell scripts make life easier: -- `./dc-build.sh` - Build the docker images, it can take one additional parameter to be used in the build process, e.g. `./dc-build.sh --no-cache`. -- `./dc-up.sh` - Start the docker containers in the foreground. -- `./dc-up-d.sh` - Start the docker containers in the background. -- `./dc-stop.sh` - Stop the docker containers, it can take one additional parameter to be used in the stop process. -- `./dc-down.sh` - Stop and remove the docker containers, it can take one additional parameter to be used in the stop and remove process. -- `./dc-unittest.sh` - Utility script to aid in running a specific unit test class. +- `./run-unittest.sh` - Utility script to aid in running a specific unit test class. +- `./run-integration-tests.sh` - Utility script to aid in running a specific integration test. # Setup via Docker Compose - Building and running the application @@ -51,18 +57,18 @@ When running the application without building images, the application will run b To build images and put them in your local docker cache, run: ```zsh -./dc-build.sh +docker compose build ``` To build a single image, run: ```zsh -./dc-build.sh uwsgi +docker compose build uwsgi ``` or ``` -./dc-build.sh nginx +docker compose build nginx ``` > **_NOTE:_** It's possible to add extra fixtures in folder "/docker/extra_fixtures". @@ -72,7 +78,7 @@ To run the application based on previously built image (or based on dockerhub im ```zsh docker/setEnv.sh release -./dc-up.sh +docker compose up ``` This will run the application based on docker-compose.yml only. @@ -86,8 +92,8 @@ For development, use: ```zsh docker/setEnv.sh dev -./dc-build.sh -./dc-up.sh +docker compose build +docker compose up ``` This will run the application based on merged configurations from docker-compose.yml and docker-compose.override.dev.yml. @@ -198,13 +204,13 @@ aedc404d6dee defectdojo/defectdojo-nginx:1.0.0 "/entrypoint-nginx.sh" Removes all containers ```zsh -./dc-down.sh +docker compose down ``` Removes all containers, networks and the database volume ```zsh -./dc-down.sh --volumes +docker compose down --volumes ``` # Run with Docker Compose using https @@ -231,7 +237,7 @@ chmod 400 nginx/*.key ``` rm -f docker-compose.override.yml ln -s docker-compose.override.https.yml docker-compose.override.yml -./dc-up.sh +docker compose up ``` ## Create credentials on the fly @@ -241,7 +247,7 @@ ln -s docker-compose.override.https.yml docker-compose.override.yml ``` rm -f docker-compose.override.yml ln -s docker-compose.override.https.yml docker-compose.override.yml -./dc-up.sh +docker compose up ``` The default https port is 8443. @@ -267,14 +273,14 @@ This will run all unit-tests and leave the uwsgi container up: ``` docker/setEnv.sh unit_tests -./dc-up.sh +docker compose up ``` ### Limited tests If you want to enter the container to run more tests or a single test case, leave setEnv in normal or dev mode: ``` docker/setEnv.sh dev -./dc-up.sh +docker compose up ``` Then ``` @@ -312,10 +318,10 @@ This will run all integration-tests and leave the containers up: ``` docker/setEnv.sh integration_tests -./dc-up.sh +docker compose up ``` -NB: the first time you run it, initializing the database may be too long for the tests to succeed. In that case, you'll need to wait for the initializer container to end, then re-run `./dc-up.sh` +NB: the first time you run it, initializing the database may be too long for the tests to succeed. In that case, you'll need to wait for the initializer container to end, then re-run `docker compose up` Check the logs with: ``` diff --git a/dc-integration-tests.sh b/run-integration-tests.sh similarity index 74% rename from dc-integration-tests.sh rename to run-integration-tests.sh index 88267f8f6f1..953fbbab31f 100755 --- a/dc-integration-tests.sh +++ b/run-integration-tests.sh @@ -15,7 +15,7 @@ usage() { echo echo echo "Example command:" - echo './dc-unittest.sh --test-case "Finding integration tests"' + echo './run-unittest.sh --test-case "Finding integration tests"' } while [[ $# -gt 0 ]]; do @@ -49,10 +49,10 @@ echo "Running docker compose unit tests with profile postgres-redis and test cas # a space, using docker compose, instead of docker-compose. echo "Building images..." ./docker/setEnv.sh integration_tests -./dc-build.sh +docker compose build echo "Setting up DefectDojo with Postgres and Redis..." -DD_INTEGRATION_TEST_FILENAME="$TEST_CASE" docker compose --no-deps -d postgres nginx celerybeat celeryworker mailhog uwsgi redis +DD_INTEGRATION_TEST_FILENAME="$TEST_CASE" docker compose -d postgres nginx celerybeat celeryworker mailhog uwsgi redis echo "Initializing DefectDojo..." -DD_INTEGRATION_TEST_FILENAME="$TEST_CASE" docker compose --no-deps --exit-code-from initializer initializer +DD_INTEGRATION_TEST_FILENAME="$TEST_CASE" docker compose --exit-code-from initializer initializer echo "Running the integration tests..." -DD_INTEGRATION_TEST_FILENAME="$TEST_CASE" docker compose --no-deps --exit-code-from integration-tests integration-tests +DD_INTEGRATION_TEST_FILENAME="$TEST_CASE" docker compose --exit-code-from integration-tests integration-tests diff --git a/dc-unittest.sh b/run-unittest.sh similarity index 84% rename from dc-unittest.sh rename to run-unittest.sh index 5f8e4788a88..6aaa8c78cb4 100755 --- a/dc-unittest.sh +++ b/run-unittest.sh @@ -16,16 +16,11 @@ usage() { echo "You must specify a test case (arg)!" echo echo "Example command:" - echo "./dc-unittest.sh --test-case unittests.tools.test_stackhawk_parser.TestStackHawkParser" + echo "./run-unittest.sh --test-case unittests.tools.test_stackhawk_parser.TestStackHawkParser" } while [[ $# -gt 0 ]]; do case $1 in - -p|--profile) - # Leaving this here for backwards compatability - shift # past argument - shift # past value - ;; -t|--test-case) TEST_CASE="$2" shift # past argument From e15e9aa40795186c124f753bf23db1cc9abeee54 Mon Sep 17 00:00:00 2001 From: kiblik <5609770+kiblik@users.noreply.github.com> Date: Thu, 30 Jan 2025 17:16:37 +0100 Subject: [PATCH 89/99] Ruff: Fix PTH118, merge PTH11 (#11503) --- dojo/forms.py | 2 +- dojo/jira_link/helper.py | 5 +- dojo/models.py | 3 +- dojo/settings/settings.dist.py | 6 +- dojo/tools/factory.py | 2 +- dojo/views.py | 5 +- ruff.toml | 2 +- tests/file_test.py | 12 ++-- tests/finding_test.py | 8 +-- tests/ibm_appscan_test.py | 4 +- unittests/dojo_test_case.py | 13 +++-- unittests/test_endpoint_meta_import.py | 12 ++-- unittests/test_factory.py | 11 ++-- unittests/test_import_reimport.py | 55 +++++++++---------- unittests/test_importers_closeold.py | 14 ++--- unittests/test_importers_importer.py | 14 ++--- unittests/test_jira_config_engagement_epic.py | 2 +- unittests/test_jira_import_and_pushing_api.py | 14 +++-- unittests/test_jira_template.py | 6 +- unittests/test_parsers.py | 24 ++++---- unittests/test_rest_framework.py | 7 +-- unittests/test_tags.py | 10 ++-- unittests/tools/test_acunetix_parser.py | 24 ++++---- unittests/tools/test_anchore_engine_parser.py | 10 ++-- .../tools/test_anchore_enterprise_parser.py | 12 ++-- unittests/tools/test_anchore_grype_parser.py | 18 +++--- .../tools/test_anchorectl_policies_parser.py | 10 ++-- .../tools/test_anchorectl_vulns_parser.py | 8 +-- unittests/tools/test_api_blackduck_parser.py | 4 +- unittests/tools/test_api_bugcrowd_parser.py | 15 +++-- unittests/tools/test_api_cobalt_parser.py | 26 ++++----- unittests/tools/test_api_edgescan_parser.py | 11 ++-- .../tools/test_api_sonarqube_importer.py | 20 +++---- unittests/tools/test_api_sonarqube_parser.py | 10 ++-- unittests/tools/test_api_vulners_parser.py | 10 ++-- ...appcheck_web_application_scanner_parser.py | 15 +++-- unittests/tools/test_appspider_parser.py | 6 +- unittests/tools/test_aqua_parser.py | 26 ++++----- unittests/tools/test_arachni_parser.py | 8 +-- unittests/tools/test_asff_parser.py | 5 +- unittests/tools/test_auditjs_parser.py | 12 ++-- unittests/tools/test_aws_inspector2_parser.py | 9 +-- unittests/tools/test_aws_prowler_parser.py | 18 +++--- .../tools/test_aws_prowler_v3plus_parser.py | 18 +++--- unittests/tools/test_awssecurityhub_parser.py | 30 +++++----- ..._security_center_recommendations_parser.py | 6 +- unittests/tools/test_bandit_parser.py | 12 ++-- unittests/tools/test_bearer_cli_parser.py | 5 +- .../test_blackduck_binary_analysis_parser.py | 9 ++- .../test_blackduck_component_risk_parser.py | 8 +-- unittests/tools/test_blackduck_parser.py | 19 +++---- unittests/tools/test_brakeman_parser.py | 8 +-- unittests/tools/test_bugcrowd_parser.py | 8 +-- unittests/tools/test_bundler_audit_parser.py | 8 +-- unittests/tools/test_burp_api_parser.py | 8 +-- unittests/tools/test_burp_dastardly_parser.py | 6 +- .../tools/test_burp_enterprise_parser.py | 8 +-- unittests/tools/test_burp_graphql_parser.py | 16 +++--- unittests/tools/test_burp_parser.py | 14 ++--- unittests/tools/test_cargo_audit_parser.py | 6 +- .../test_checkmarx_cxflow_sast_parser.py | 8 +-- unittests/tools/test_checkmarx_one_parser.py | 14 ++--- unittests/tools/test_checkmarx_osa_parser.py | 18 +++--- unittests/tools/test_checkmarx_parser.py | 50 ++++++++--------- unittests/tools/test_checkov_parser.py | 14 ++--- unittests/tools/test_chefinspect_parser.py | 8 +-- unittests/tools/test_clair_parser.py | 14 ++--- unittests/tools/test_cloudsploit_parser.py | 8 +-- unittests/tools/test_cobalt_parser.py | 8 +-- unittests/tools/test_codechecker_parser.py | 10 ++-- unittests/tools/test_contrast_parser.py | 6 +- unittests/tools/test_coverity_api_parser.py | 14 ++--- unittests/tools/test_coverity_scan_parser.py | 10 ++-- .../tools/test_crashtest_security_parser.py | 8 +-- unittests/tools/test_cred_scan_parser.py | 8 +-- unittests/tools/test_crunch42_parser.py | 6 +- unittests/tools/test_cyclonedx_parser.py | 32 +++++------ unittests/tools/test_dawnscanner_parser.py | 6 +- .../test_deepfence_threatmapper_parser.py | 10 ++-- .../tools/test_dependency_check_parser.py | 16 +++--- .../tools/test_dependency_track_parser.py | 22 ++++---- unittests/tools/test_detect_secrets_parser.py | 6 +- unittests/tools/test_dockerbench_parser.py | 8 +-- unittests/tools/test_dockle_parser.py | 6 +- unittests/tools/test_drheader_parser.py | 12 ++-- unittests/tools/test_dsop_parser.py | 6 +- unittests/tools/test_eslint_parser.py | 8 +-- unittests/tools/test_fortify_parser.py | 14 ++--- .../tools/test_gcloud_artifact_scan_parser.py | 4 +- unittests/tools/test_generic_parser.py | 24 ++++---- unittests/tools/test_ggshield_parser.py | 8 +-- .../tools/test_github_vulnerability_parser.py | 28 +++++----- .../tools/test_gitlab_api_fuzzing_parser.py | 10 ++-- .../test_gitlab_container_scan_parser.py | 16 +++--- unittests/tools/test_gitlab_dast_parser.py | 12 ++-- .../tools/test_gitlab_dep_scan_parser.py | 16 +++--- unittests/tools/test_gitlab_sast_parser.py | 32 +++++------ ...t_gitlab_secret_detection_report_parser.py | 12 ++-- unittests/tools/test_gitleaks_parser.py | 16 +++--- unittests/tools/test_gosec_parser.py | 4 +- unittests/tools/test_govulncheck_parser.py | 14 ++--- unittests/tools/test_h1_parser.py | 20 +++---- unittests/tools/test_hadolint_parser.py | 6 +- .../tools/test_harbor_vulnerability_parser.py | 12 ++-- unittests/tools/test_hcl_appscan_parser.py | 10 ++-- unittests/tools/test_hcl_asoc_sast_parser.py | 8 +-- unittests/tools/test_horusec_parser.py | 16 +++--- unittests/tools/test_humble_parser.py | 6 +- unittests/tools/test_huskyci_parser.py | 10 ++-- unittests/tools/test_hydra_parser.py | 16 +++--- unittests/tools/test_ibm_app_parser.py | 4 +- unittests/tools/test_immuniweb_parser.py | 8 +-- unittests/tools/test_intsights_parser.py | 16 +++--- unittests/tools/test_invicti_parser.py | 10 ++-- ..._jfrog_xray_api_summary_artifact_parser.py | 10 ++-- ...jfrog_xray_on_demand_binary_scan_parser.py | 10 ++-- .../tools/test_jfrog_xray_unified_parser.py | 12 ++-- unittests/tools/test_jfrogxray_parser.py | 8 +-- unittests/tools/test_kics_parser.py | 6 +- unittests/tools/test_kiuwan_parser.py | 12 ++-- unittests/tools/test_kiuwan_sca_parser.py | 10 ++-- unittests/tools/test_krakend_audit_parser.py | 6 +- unittests/tools/test_kubeaudit_parser.py | 4 +- unittests/tools/test_kubebench_parser.py | 10 ++-- unittests/tools/test_kubehunter_parser.py | 11 ++-- unittests/tools/test_kubescape_parser.py | 8 +-- unittests/tools/test_legitify_parser.py | 8 +-- unittests/tools/test_mend_parser.py | 18 +++--- unittests/tools/test_meterian_parser.py | 16 +++--- .../test_microfocus_webinspect_parser.py | 12 ++-- unittests/tools/test_mobsf_parser.py | 18 +++--- .../tools/test_mobsf_scorecard_parser.py | 14 ++--- unittests/tools/test_mobsfscan_parser.py | 8 +-- .../tools/test_mozilla_observatory_parser.py | 18 +++--- unittests/tools/test_ms_defender_parser.py | 16 +++--- unittests/tools/test_nancy_parser.py | 10 ++-- unittests/tools/test_netsparker_parser.py | 12 ++-- .../tools/test_neuvector_compliance_parser.py | 10 ++-- unittests/tools/test_neuvector_parser.py | 10 ++-- unittests/tools/test_nexpose_parser.py | 12 ++-- unittests/tools/test_nikto_parser.py | 20 +++---- unittests/tools/test_nmap_parser.py | 12 ++-- unittests/tools/test_noseyparker_parser.py | 13 ++--- .../tools/test_npm_audit_7_plus_parser.py | 12 ++-- unittests/tools/test_npm_audit_parser.py | 22 ++++---- unittests/tools/test_nsp_parser.py | 6 +- unittests/tools/test_nuclei_parser.py | 20 +++---- unittests/tools/test_openscap_parser.py | 10 ++-- unittests/tools/test_openvas_parser.py | 16 +++--- unittests/tools/test_ort_parser.py | 4 +- .../tools/test_ossindex_devaudit_parser.py | 32 +++++------ unittests/tools/test_osv_scanner_parser.py | 10 ++-- unittests/tools/test_outpost24_parser.py | 8 +-- .../test_php_security_audit_v2_parser.py | 6 +- .../test_php_symfony_security_check_parser.py | 8 +-- unittests/tools/test_pip_audit_parser.py | 14 ++--- unittests/tools/test_pmd_parser.py | 8 +-- unittests/tools/test_popeye_parser.py | 8 +-- unittests/tools/test_progpilot_parser.py | 10 ++-- unittests/tools/test_ptart_parser.py | 19 +++---- unittests/tools/test_pwn_sast_parser.py | 12 ++-- .../test_qualys_hacker_guardian_parser.py | 10 ++-- .../test_qualys_infrascan_webgui_parser.py | 10 ++-- unittests/tools/test_qualys_parser.py | 18 +++--- unittests/tools/test_qualys_webapp_parser.py | 12 ++-- unittests/tools/test_rapplex_parser.py | 10 ++-- .../tools/test_redhatsatellite_parser.py | 10 ++-- unittests/tools/test_retirejs_parser.py | 4 +- unittests/tools/test_risk_recon_parser.py | 8 +-- unittests/tools/test_rubocop_parser.py | 10 ++-- unittests/tools/test_rusty_hog_parser.py | 34 ++++++------ unittests/tools/test_sarif_parser.py | 53 +++++++++--------- unittests/tools/test_scantist_parser.py | 8 +-- unittests/tools/test_scout_suite_parser.py | 10 ++-- unittests/tools/test_semgrep_parser.py | 26 ++++----- unittests/tools/test_skf_parser.py | 4 +- unittests/tools/test_snyk_code_parser.py | 6 +- unittests/tools/test_snyk_parser.py | 32 +++++------ .../tools/test_solar_appscreener_parser.py | 8 +-- unittests/tools/test_sonarqube_parser.py | 44 +++++++-------- unittests/tools/test_sonatype_parser.py | 20 +++---- unittests/tools/test_spotbugs_parser.py | 24 ++++---- unittests/tools/test_ssh_audit_parser.py | 8 +-- unittests/tools/test_ssl_labs_parser.py | 14 ++--- unittests/tools/test_sslscan_parser.py | 8 +-- unittests/tools/test_sslyze_parser.py | 31 +++++------ unittests/tools/test_stackhawk_parser.py | 20 +++---- unittests/tools/test_sysdig_reports_parser.py | 17 +++--- unittests/tools/test_talisman_parser.py | 8 +-- unittests/tools/test_tenable_parser.py | 47 ++++++++-------- unittests/tools/test_terrascan_parser.py | 6 +- unittests/tools/test_testssl_parser.py | 20 +++---- unittests/tools/test_tfsec_parser.py | 10 ++-- unittests/tools/test_threagile_parser.py | 18 +++--- .../tools/test_threat_composer_parser.py | 18 +++--- unittests/tools/test_trivy_operator_parser.py | 5 +- unittests/tools/test_trivy_parser.py | 5 +- unittests/tools/test_trufflehog3_parser.py | 5 +- unittests/tools/test_trufflehog_parser.py | 5 +- .../tools/test_trustwave_fusion_api_parser.py | 8 +-- unittests/tools/test_trustwave_parser.py | 5 +- unittests/tools/test_twistlock_parser.py | 24 ++++---- unittests/tools/test_veracode_parser.py | 34 ++++++------ unittests/tools/test_veracode_sca_parser.py | 8 +-- unittests/tools/test_wapiti_parser.py | 10 ++-- unittests/tools/test_wazuh_parser.py | 10 ++-- unittests/tools/test_wfuzz_parser.py | 14 ++--- unittests/tools/test_whispers_parser.py | 15 +++-- .../tools/test_whitehat_sentinel_parser.py | 10 ++-- unittests/tools/test_wiz_parser.py | 10 ++-- unittests/tools/test_wizcli_dir_parser.py | 8 +-- unittests/tools/test_wizcli_iac_parser.py | 8 +-- unittests/tools/test_wizcli_img_parser.py | 8 +-- unittests/tools/test_wpscan_parser.py | 18 +++--- unittests/tools/test_xanitizer_parser.py | 10 ++-- unittests/tools/test_yarn_audit_parser.py | 18 +++--- unittests/tools/test_zap_parser.py | 24 ++++---- 217 files changed, 1361 insertions(+), 1431 deletions(-) diff --git a/dojo/forms.py b/dojo/forms.py index f9a52f9530f..ac88e5e797b 100644 --- a/dojo/forms.py +++ b/dojo/forms.py @@ -2428,7 +2428,7 @@ def get_jira_issue_template_dir_choices(): for dirname in dirnames: clean_base_dir = base_dir.removeprefix(settings.TEMPLATE_DIR_PREFIX) - template_dir_list.append((os.path.join(clean_base_dir, dirname), dirname)) + template_dir_list.append((str(Path(clean_base_dir) / dirname), dirname)) logger.debug("templates: %s", template_dir_list) return template_dir_list diff --git a/dojo/jira_link/helper.py b/dojo/jira_link/helper.py index a557a05a3c1..56c33771c5e 100644 --- a/dojo/jira_link/helper.py +++ b/dojo/jira_link/helper.py @@ -1,7 +1,6 @@ import io import json import logging -import os from pathlib import Path from typing import Any @@ -333,8 +332,8 @@ def get_jira_issue_template(obj): template_dir = "issue-trackers/jira_full/" if isinstance(obj, Finding_Group): - return os.path.join(template_dir, "jira-finding-group-description.tpl") - return os.path.join(template_dir, "jira-description.tpl") + return Path(template_dir) / "jira-finding-group-description.tpl" + return Path(template_dir) / "jira-description.tpl" def get_jira_creation(obj): diff --git a/dojo/models.py b/dojo/models.py index 67f30ee20ab..ddb03290194 100644 --- a/dojo/models.py +++ b/dojo/models.py @@ -2,7 +2,6 @@ import copy import hashlib import logging -import os import re import warnings from contextlib import suppress @@ -150,7 +149,7 @@ def __call__(self, model_instance, filename): filename += ext if self.directory is None: return filename - return os.path.join(now().strftime(self.directory), filename) + return Path(now().strftime(self.directory)) / filename class Regulation(models.Model): diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py index ecb67768183..f959cbf605a 100644 --- a/dojo/settings/settings.dist.py +++ b/dojo/settings/settings.dist.py @@ -442,7 +442,7 @@ def generate_url(scheme, double_slashes, user, password, host, port, path, param # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. - os.path.join(Path(DOJO_ROOT).parent, "components", "node_modules"), + Path(DOJO_ROOT).parent / "components" / "node_modules", ) # List of finder classes that know how to find static files in @@ -912,8 +912,6 @@ def saml2_attrib_map_format(dict): SAML2_LOGIN_BUTTON_TEXT = env("DD_SAML2_LOGIN_BUTTON_TEXT") SAML2_LOGOUT_URL = env("DD_SAML2_LOGOUT_URL") if SAML2_ENABLED: - from os import path - import saml2 import saml2.saml # SSO_URL = env('DD_SSO_URL') @@ -949,7 +947,7 @@ def saml2_attrib_map_format(dict): "entityid": str(SAML2_ENTITY_ID), # directory with attribute mapping - "attribute_map_dir": path.join(BASEDIR, "attribute-maps"), + "attribute_map_dir": Path(BASEDIR) / "attribute-maps", # do now discard attributes not specified in attribute-maps "allow_unknown_attributes": SAML_ALLOW_UNKNOWN_ATTRIBUTES, # this block states what services we provide diff --git a/dojo/tools/factory.py b/dojo/tools/factory.py index b69fea12ac0..daddd62f00c 100644 --- a/dojo/tools/factory.py +++ b/dojo/tools/factory.py @@ -117,7 +117,7 @@ def requires_tool_type(scan_type): package_dir = str(Path(__file__).resolve().parent) for module_name in os.listdir(package_dir): # noqa: PTH208 # check if it's dir - if Path(os.path.join(package_dir, module_name)).is_dir(): + if (Path(package_dir) / module_name).is_dir(): try: # check if it's a Python module if find_spec(f"dojo.tools.{module_name}.parser"): diff --git a/dojo/views.py b/dojo/views.py index df65be4d6bd..8149671618e 100644 --- a/dojo/views.py +++ b/dojo/views.py @@ -1,5 +1,4 @@ import logging -import os from pathlib import Path from auditlog.models import LogEntry @@ -151,7 +150,7 @@ def manage_files(request, oid, obj_type): for o in files_formset.deleted_objects: logger.debug("removing file: %s", o.file.name) - Path(os.path.join(settings.MEDIA_ROOT, o.file.name)).unlink() + (Path(settings.MEDIA_ROOT) / o.file.name).unlink() for o in files_formset.new_objects: logger.debug("adding file: %s", o.file.name) @@ -162,7 +161,7 @@ def manage_files(request, oid, obj_type): finding__isnull=True) for o in orphan_files: logger.debug("purging orphan file: %s", o.file.name) - Path(os.path.join(settings.MEDIA_ROOT, o.file.name)).unlink() + (Path(settings.MEDIA_ROOT) / o.file.name).unlink() o.delete() messages.add_message( diff --git a/ruff.toml b/ruff.toml index e81866ef214..848c249f71a 100644 --- a/ruff.toml +++ b/ruff.toml @@ -68,7 +68,7 @@ select = [ "TC", "INT", "ARG003", "ARG004", "ARG005", - "PTH2", "PTH10", "PTH110", "PTH111", "PTH112", "PTH113", "PTH114", "PTH115", "PTH116", "PTH117", "PTH119", "PTH120", "PTH121", "PTH122", "PTH124", + "PTH2", "PTH10", "PTH11", "PTH120", "PTH121", "PTH122", "PTH124", "TD001", "TD004", "TD005", "FIX001", "FIX003", "PD", diff --git a/tests/file_test.py b/tests/file_test.py index 686f133e4d0..6d603c7e0b3 100644 --- a/tests/file_test.py +++ b/tests/file_test.py @@ -35,9 +35,9 @@ def test_add_file_finding_level(self): driver.find_element(By.LINK_TEXT, "Manage Files").click() # select first file input field: form-0-image # Set full image path for image file 'strange.png - image_path = os.path.join(dir_path, "finding_image.png") + image_path = dir_path / "finding_image.png" driver.find_element(By.ID, "id_form-0-title").send_keys("Finding Title") - driver.find_element(By.ID, "id_form-0-file").send_keys(image_path) + driver.find_element(By.ID, "id_form-0-file").send_keys(str(image_path)) # Save uploaded image with WaitForPageLoad(driver, timeout=50): driver.find_element(By.CSS_SELECTOR, "button.btn.btn-success").click() @@ -76,9 +76,9 @@ def test_add_file_test_level(self): driver.find_element(By.NAME, "Manage Files").click() # select first file input field: form-0-image # Set full image path for image file 'strange.png - image_path = os.path.join(dir_path, "finding_image.png") + image_path = dir_path / "finding_image.png" driver.find_element(By.ID, "id_form-0-title").send_keys("Test Title") - driver.find_element(By.ID, "id_form-0-file").send_keys(image_path) + driver.find_element(By.ID, "id_form-0-file").send_keys(str(image_path)) # Save uploaded image with WaitForPageLoad(driver, timeout=50): driver.find_element(By.CSS_SELECTOR, "button.btn.btn-success").click() @@ -116,9 +116,9 @@ def test_add_file_engagement_level(self): driver.find_element(By.NAME, "Manage Files").click() # select first file input field: form-0-image # Set full image path for image file 'strange.png - image_path = os.path.join(dir_path, "finding_image.png") + image_path = dir_path / "finding_image.png" driver.find_element(By.ID, "id_form-0-title").send_keys("Engagement Title") - driver.find_element(By.ID, "id_form-0-file").send_keys(image_path) + driver.find_element(By.ID, "id_form-0-file").send_keys(str(image_path)) # Save uploaded image with WaitForPageLoad(driver, timeout=50): driver.find_element(By.CSS_SELECTOR, "button.btn.btn-success").click() diff --git a/tests/finding_test.py b/tests/finding_test.py index 4e08744c5e9..751a4a26605 100644 --- a/tests/finding_test.py +++ b/tests/finding_test.py @@ -146,8 +146,8 @@ def test_add_image(self): driver.find_element(By.LINK_TEXT, "Manage Files").click() # select first file input field: form-0-image # Set full image path for image file 'strange.png - image_path = os.path.join(dir_path, "finding_image.png") - driver.find_element(By.ID, "id_form-0-file").send_keys(image_path) + image_path = dir_path / "finding_image.png" + driver.find_element(By.ID, "id_form-0-file").send_keys(str(image_path)) driver.find_element(By.ID, "id_form-0-title").send_keys("Image Title") # Save uploaded image with WaitForPageLoad(driver, timeout=50): @@ -466,8 +466,8 @@ def test_import_scan_result(self): # Select `Default` as the Environment Select(driver.find_element(By.ID, "id_environment")).select_by_visible_text("Development") # upload scan file - file_path = os.path.join(dir_path, "zap_sample.xml") - driver.find_element(By.NAME, "file").send_keys(file_path) + file_path = dir_path / "zap_sample.xml" + driver.find_element(By.NAME, "file").send_keys(str(file_path)) # Click Submit button with WaitForPageLoad(driver, timeout=50): driver.find_elements(By.CSS_SELECTOR, "button.btn.btn-primary")[1].click() diff --git a/tests/ibm_appscan_test.py b/tests/ibm_appscan_test.py index 451e387db1c..3aee47c7eaf 100644 --- a/tests/ibm_appscan_test.py +++ b/tests/ibm_appscan_test.py @@ -31,8 +31,8 @@ def test_import_ibm_app_scan_result(self): # Select `Default` as the Environment Select(driver.find_element(By.ID, "id_environment")).select_by_visible_text("Development") # Upload Scan result file - scanner_file = os.path.join(dir_path, "ibm_appscan_xml_file.xml") - driver.find_element(By.NAME, "file").send_keys(scanner_file) + scanner_file = dir_path / "ibm_appscan_xml_file.xml" + driver.find_element(By.NAME, "file").send_keys(str(scanner_file)) # click on upload button driver.find_elements(By.CSS_SELECTOR, "button.btn.btn-primary")[1].click() # Query the site to determine if the finding has been added diff --git a/unittests/dojo_test_case.py b/unittests/dojo_test_case.py index d22073e2730..0de526b358b 100644 --- a/unittests/dojo_test_case.py +++ b/unittests/dojo_test_case.py @@ -1,7 +1,6 @@ import copy import json import logging -import os from functools import wraps from itertools import chain from pathlib import Path @@ -40,7 +39,11 @@ def get_unit_tests_path(): - return str(Path(os.path.realpath(__file__)).parent) + return Path(__file__).parent + + +def get_unit_tests_scans_path(parser): + return Path(__file__).parent / "scans" / parser def toggle_system_setting_boolean(flag_name, value): @@ -504,7 +507,7 @@ def import_scan_with_params(self, filename, scan_type="ZAP Scan", engagement=1, product_name=None, product_type_name=None, auto_create_context=None, expected_http_status_code=201, test_title=None, scan_date=None, service=None, forceActive=True, forceVerified=True): - with open(get_unit_tests_path() + "/" + filename, encoding="utf-8") as testfile: + with open(get_unit_tests_path() / filename, encoding="utf-8") as testfile: payload = { "minimum_severity": minimum_severity, "active": active, @@ -556,7 +559,7 @@ def import_scan_with_params(self, filename, scan_type="ZAP Scan", engagement=1, def reimport_scan_with_params(self, test_id, filename, scan_type="ZAP Scan", engagement=1, minimum_severity="Low", active=True, verified=False, push_to_jira=None, tags=None, close_old_findings=True, group_by=None, engagement_name=None, scan_date=None, product_name=None, product_type_name=None, auto_create_context=None, expected_http_status_code=201, test_title=None): - with open(get_unit_tests_path() + "/" + filename, encoding="utf-8") as testfile: + with open(filename, encoding="utf-8") as testfile: payload = { "minimum_severity": minimum_severity, "active": active, @@ -605,7 +608,7 @@ def reimport_scan_with_params(self, test_id, filename, scan_type="ZAP Scan", eng def endpoint_meta_import_scan_with_params(self, filename, product=1, product_name=None, create_endpoints=True, create_tags=True, create_dojo_meta=True, expected_http_status_code=201): - with open(get_unit_tests_path() + "/" + filename, encoding="utf-8") as testfile: + with open(filename, encoding="utf-8") as testfile: payload = { "create_endpoints": create_endpoints, "create_tags": create_tags, diff --git a/unittests/test_endpoint_meta_import.py b/unittests/test_endpoint_meta_import.py index d159dbd4f2a..817c14303bc 100644 --- a/unittests/test_endpoint_meta_import.py +++ b/unittests/test_endpoint_meta_import.py @@ -16,11 +16,11 @@ # test methods to be used both by API Test and UI Test class EndpointMetaImportMixin: def __init__(self, *args, **kwargs): - self.meta_import_full = "endpoint_meta_import/full_endpoint_meta_import.csv" - self.meta_import_no_hostname = "endpoint_meta_import/no_hostname_endpoint_meta_import.csv" - self.meta_import_updated_added = "endpoint_meta_import/updated_added_endpoint_meta_import.csv" - self.meta_import_updated_removed = "endpoint_meta_import/updated_removed_endpoint_meta_import.csv" - self.meta_import_updated_changed = "endpoint_meta_import/updated_changed_endpoint_meta_import.csv" + self.meta_import_full = get_unit_tests_path() / "endpoint_meta_import" / "full_endpoint_meta_import.csv" + self.meta_import_no_hostname = get_unit_tests_path() / "endpoint_meta_import" / "no_hostname_endpoint_meta_import.csv" + self.meta_import_updated_added = get_unit_tests_path() / "endpoint_meta_import" / "updated_added_endpoint_meta_import.csv" + self.meta_import_updated_removed = get_unit_tests_path() / "endpoint_meta_import" / "updated_removed_endpoint_meta_import.csv" + self.meta_import_updated_changed = get_unit_tests_path() / "endpoint_meta_import" / "updated_changed_endpoint_meta_import.csv" self.updated_tag_host = "feedback.internal.google.com" def test_endpoint_meta_import_endpoint_create_tag_create_meta_create(self): @@ -206,7 +206,7 @@ def endpoint_meta_import_ui(self, product, payload): def endpoint_meta_import_scan_with_params_ui(self, filename, product=1, create_endpoints=True, create_tags=True, create_dojo_meta=True, expected_http_status_code=201): - with open(get_unit_tests_path() + "/" + filename, encoding="utf-8") as testfile: + with open(filename, encoding="utf-8") as testfile: payload = { "create_endpoints": create_endpoints, "create_tags": create_tags, diff --git a/unittests/test_factory.py b/unittests/test_factory.py index 5d8b4040dd5..39c4a789aea 100644 --- a/unittests/test_factory.py +++ b/unittests/test_factory.py @@ -1,5 +1,4 @@ import logging -import os from importlib import import_module from importlib.util import find_spec from inspect import isclass @@ -16,25 +15,25 @@ class TestFactory(DojoTestCase): def test_get_parser(self): with self.subTest(scan_type="Acunetix Scan"): scan_type = "Acunetix Scan" - testfile = open(get_unit_tests_path() + "/scans/acunetix/one_finding.xml", encoding="utf-8") + testfile = open(get_unit_tests_path() / "scans" / "acunetix" / "one_finding.xml", encoding="utf-8") parser = get_parser(scan_type) parser.get_findings(testfile, Test()) testfile.close() with self.subTest(scan_type="Anchore Engine Scan"): scan_type = "Anchore Engine Scan" - testfile = open(get_unit_tests_path() + "/scans/anchore_engine/one_vuln.json", encoding="utf-8") + testfile = open(get_unit_tests_path() / "scans" / "anchore_engine" / "one_vuln.json", encoding="utf-8") parser = get_parser(scan_type) parser.get_findings(testfile, Test()) testfile.close() with self.subTest(scan_type="Tenable Scan"): scan_type = "Tenable Scan" - testfile = open(get_unit_tests_path() + "/scans/tenable/nessus/nessus_v_unknown.xml", encoding="utf-8") + testfile = open(get_unit_tests_path() / "scans" / "tenable/nessus" / "nessus_v_unknown.xml", encoding="utf-8") parser = get_parser(scan_type) parser.get_findings(testfile, Test()) testfile.close() with self.subTest(scan_type="ZAP Scan"): scan_type = "ZAP Scan" - testfile = open(get_unit_tests_path() + "/scans/zap/some_2.9.0.xml", encoding="utf-8") + testfile = open(get_unit_tests_path() / "scans" / "zap" / "some_2.9.0.xml", encoding="utf-8") parser = get_parser(scan_type) parser.get_findings(testfile, Test()) testfile.close() @@ -73,7 +72,7 @@ def test_parser_name_matches_module(self): for module_name in module_names: if module_name in excluded_parsers: continue - if Path(os.path.join(package_dir, module_name)).is_dir(): + if (Path(package_dir) / module_name).is_dir(): found = False if find_spec(f"dojo.tools.{module_name}.parser"): module = import_module(f"dojo.tools.{module_name}.parser") diff --git a/unittests/test_import_reimport.py b/unittests/test_import_reimport.py index 02548ccb57c..89e19f11db1 100644 --- a/unittests/test_import_reimport.py +++ b/unittests/test_import_reimport.py @@ -12,7 +12,7 @@ from dojo.models import Finding, Test, Test_Type, User -from .dojo_test_case import DojoAPITestCase, get_unit_tests_path +from .dojo_test_case import DojoAPITestCase, get_unit_tests_scans_path from .test_utils import assertTestImportModelsCreated logger = logging.getLogger(__name__) @@ -54,52 +54,51 @@ # test methods to be used both by API Test and UI Test class ImportReimportMixin: def __init__(self, *args, **kwargs): - self.scans_path = "/scans/" - self.zap_sample0_filename = self.scans_path + "zap/0_zap_sample.xml" - self.zap_sample1_filename = self.scans_path + "zap/1_zap_sample_0_and_new_absent.xml" - self.zap_sample2_filename = self.scans_path + "zap/2_zap_sample_0_and_new_endpoint.xml" - self.zap_sample3_filename = self.scans_path + "zap/3_zap_sampl_0_and_different_severities.xml" + self.zap_sample0_filename = get_unit_tests_scans_path("zap") / "0_zap_sample.xml" + self.zap_sample1_filename = get_unit_tests_scans_path("zap") / "1_zap_sample_0_and_new_absent.xml" + self.zap_sample2_filename = get_unit_tests_scans_path("zap") / "2_zap_sample_0_and_new_endpoint.xml" + self.zap_sample3_filename = get_unit_tests_scans_path("zap") / "3_zap_sampl_0_and_different_severities.xml" - self.anchore_file_name = self.scans_path + "anchore_engine/one_vuln_many_files.json" + self.anchore_file_name = get_unit_tests_scans_path("anchore_engine") / "one_vuln_many_files.json" self.scan_type_anchore = "Anchore Engine Scan" - self.acunetix_file_name = self.scans_path + "acunetix/one_finding.xml" + self.acunetix_file_name = get_unit_tests_scans_path("acunetix") / "one_finding.xml" self.scan_type_acunetix = "Acunetix Scan" - self.gitlab_dep_scan_components_filename = f"{self.scans_path}gitlab_dep_scan/gl-dependency-scanning-report-many-vuln_v15.json" + self.gitlab_dep_scan_components_filename = get_unit_tests_scans_path("gitlab_dep_scan") / "gl-dependency-scanning-report-many-vuln_v15.json" self.scan_type_gtlab_dep_scan = "GitLab Dependency Scanning Report" - self.sonarqube_file_name1 = self.scans_path + "sonarqube/sonar-6-findings.html" - self.sonarqube_file_name2 = self.scans_path + "sonarqube/sonar-6-findings-1-unique_id_changed.html" + self.sonarqube_file_name1 = get_unit_tests_scans_path("sonarqube") / "sonar-6-findings.html" + self.sonarqube_file_name2 = get_unit_tests_scans_path("sonarqube") / "sonar-6-findings-1-unique_id_changed.html" self.scan_type_sonarqube_detailed = "SonarQube Scan detailed" - self.veracode_many_findings = self.scans_path + "veracode/many_findings.xml" - self.veracode_same_hash_code_different_unique_id = self.scans_path + "veracode/many_findings_same_hash_code_different_unique_id.xml" - self.veracode_same_unique_id_different_hash_code = self.scans_path + "veracode/many_findings_same_unique_id_different_hash_code.xml" - self.veracode_different_hash_code_different_unique_id = self.scans_path + "veracode/many_findings_different_hash_code_different_unique_id.xml" - self.veracode_mitigated_findings = self.scans_path + "veracode/mitigated_finding.xml" + self.veracode_many_findings = get_unit_tests_scans_path("veracode") / "many_findings.xml" + self.veracode_same_hash_code_different_unique_id = get_unit_tests_scans_path("veracode") / "many_findings_same_hash_code_different_unique_id.xml" + self.veracode_same_unique_id_different_hash_code = get_unit_tests_scans_path("veracode") / "many_findings_same_unique_id_different_hash_code.xml" + self.veracode_different_hash_code_different_unique_id = get_unit_tests_scans_path("veracode") / "many_findings_different_hash_code_different_unique_id.xml" + self.veracode_mitigated_findings = get_unit_tests_scans_path("veracode") / "mitigated_finding.xml" self.scan_type_veracode = "Veracode Scan" - self.clair_few_findings = self.scans_path + "clair/clair_few_vuln.json" - self.clair_empty = self.scans_path + "clair/clair_empty.json" + self.clair_few_findings = get_unit_tests_scans_path("clair") / "clair_few_vuln.json" + self.clair_empty = get_unit_tests_scans_path("clair") / "clair_empty.json" self.scan_type_clair = "Clair Scan" self.scan_type_generic = "Generic Findings Import" - self.generic_filename_with_file = self.scans_path + "generic/test_with_image.json" - self.generic_import_1 = self.scans_path + "generic/test_import_report1.json" - self.generic_import_2 = self.scans_path + "generic/test_import_report2.json" + self.generic_filename_with_file = get_unit_tests_scans_path("generic") / "test_with_image.json" + self.generic_import_1 = get_unit_tests_scans_path("generic") / "test_import_report1.json" + self.generic_import_2 = get_unit_tests_scans_path("generic") / "test_import_report2.json" - self.aws_prowler_file_name = self.scans_path + "aws_prowler/many_vuln.json" - self.aws_prowler_file_name_plus_one = self.scans_path + "aws_prowler/many_vuln_plus_one.json" + self.aws_prowler_file_name = get_unit_tests_scans_path("aws_prowler") / "many_vuln.json" + self.aws_prowler_file_name_plus_one = get_unit_tests_scans_path("aws_prowler") / "many_vuln_plus_one.json" self.scan_type_aws_prowler = "AWS Prowler Scan" - self.nuclei_empty = self.scans_path + "nuclei/empty.jsonl" + self.nuclei_empty = get_unit_tests_scans_path("nuclei") / "empty.jsonl" - self.gitlab_dast_file_name = f"{self.scans_path}gitlab_dast/gitlab_dast_one_vul_v15.json" + self.gitlab_dast_file_name = get_unit_tests_scans_path("gitlab_dast") / "gitlab_dast_one_vul_v15.json" self.scan_type_gitlab_dast = "GitLab DAST Report" - self.anchore_grype_file_name = self.scans_path + "anchore_grype/check_all_fields.json" + self.anchore_grype_file_name = get_unit_tests_scans_path("anchore_grype") / "check_all_fields.json" self.anchore_grype_scan_type = "Anchore Grype" # import zap scan, testing: @@ -1823,7 +1822,7 @@ def import_scan_with_params_ui(self, filename, scan_type="ZAP Scan", engagement= elif not verified: verifiedPayload = "force_to_false" - with open(get_unit_tests_path() + filename, encoding="utf-8") as testfile: + with open(filename, encoding="utf-8") as testfile: payload = { "minimum_severity": minimum_severity, "active": activePayload, @@ -1861,7 +1860,7 @@ def reimport_scan_with_params_ui(self, test_id, filename, scan_type="ZAP Scan", if not verified: verifiedPayload = "force_to_false" - with open(get_unit_tests_path() + filename, encoding="utf-8") as testfile: + with open(filename, encoding="utf-8") as testfile: payload = { "minimum_severity": minimum_severity, "active": activePayload, diff --git a/unittests/test_importers_closeold.py b/unittests/test_importers_closeold.py index 2a9d82978f2..df36f1509b8 100644 --- a/unittests/test_importers_closeold.py +++ b/unittests/test_importers_closeold.py @@ -5,7 +5,7 @@ from dojo.importers.default_importer import DefaultImporter from dojo.models import Development_Environment, Engagement, Product, Product_Type, User -from .dojo_test_case import DojoTestCase, get_unit_tests_path +from .dojo_test_case import DojoTestCase, get_unit_tests_scans_path logger = logging.getLogger(__name__) @@ -37,19 +37,19 @@ def test_close_old_same_engagement(self): "scan_type": scan_type, } # Import first test - with open(f"{get_unit_tests_path()}/scans/acunetix/many_findings.xml", "r+", encoding="utf-8") as many_findings_scan: + with open(get_unit_tests_scans_path("acunetix") / "many_findings.xml", "r+", encoding="utf-8") as many_findings_scan: importer = DefaultImporter(close_old_findings=False, **import_options) _, _, len_new_findings, len_closed_findings, _, _, _ = importer.process_scan(many_findings_scan) self.assertEqual(4, len_new_findings) self.assertEqual(0, len_closed_findings) # Import same test, should close no findings - with open(f"{get_unit_tests_path()}/scans/acunetix/many_findings.xml", "r+", encoding="utf-8") as many_findings_scan: + with open(get_unit_tests_scans_path("acunetix") / "many_findings.xml", "r+", encoding="utf-8") as many_findings_scan: importer = DefaultImporter(close_old_findings=True, **import_options) _, _, len_new_findings, len_closed_findings, _, _, _ = importer.process_scan(many_findings_scan) self.assertEqual(4, len_new_findings) self.assertEqual(0, len_closed_findings) # Import test with only one finding. Remaining findings should close - with open(f"{get_unit_tests_path()}/scans/acunetix/one_finding.xml", "r+", encoding="utf-8") as single_finding_scan: + with open(get_unit_tests_scans_path("acunetix") / "one_finding.xml", "r+", encoding="utf-8") as single_finding_scan: importer = DefaultImporter(close_old_findings=True, **import_options) _, _, len_new_findings, len_closed_findings, _, _, _ = importer.process_scan(single_finding_scan) self.assertEqual(1, len_new_findings) @@ -95,19 +95,19 @@ def test_close_old_same_product_scan(self): "scan_type": scan_type, } # Import first test - with open(f"{get_unit_tests_path()}/scans/acunetix/many_findings.xml", "r+", encoding="utf-8") as many_findings_scan: + with open(get_unit_tests_scans_path("acunetix") / "many_findings.xml", "r+", encoding="utf-8") as many_findings_scan: importer = DefaultImporter(engagement=engagement1, close_old_findings=False, **import_options) _, _, len_new_findings, len_closed_findings, _, _, _ = importer.process_scan(many_findings_scan) self.assertEqual(4, len_new_findings) self.assertEqual(0, len_closed_findings) # Import same test, should close no findings - with open(f"{get_unit_tests_path()}/scans/acunetix/many_findings.xml", "r+", encoding="utf-8") as many_findings_scan: + with open(get_unit_tests_scans_path("acunetix") / "many_findings.xml", "r+", encoding="utf-8") as many_findings_scan: importer = DefaultImporter(engagement=engagement2, close_old_findings=True, **import_options) _, _, len_new_findings, len_closed_findings, _, _, _ = importer.process_scan(many_findings_scan) self.assertEqual(4, len_new_findings) self.assertEqual(0, len_closed_findings) # Import test with only one finding. Remaining findings should close - with open(f"{get_unit_tests_path()}/scans/acunetix/one_finding.xml", "r+", encoding="utf-8") as single_finding_scan: + with open(get_unit_tests_scans_path("acunetix") / "one_finding.xml", "r+", encoding="utf-8") as single_finding_scan: importer = DefaultImporter(engagement=engagement3, close_old_findings=True, **import_options) _, _, len_new_findings, len_closed_findings, _, _, _ = importer.process_scan(single_finding_scan) self.assertEqual(1, len_new_findings) diff --git a/unittests/test_importers_importer.py b/unittests/test_importers_importer.py index 41baf6d78e9..79ed5bbbe96 100644 --- a/unittests/test_importers_importer.py +++ b/unittests/test_importers_importer.py @@ -12,16 +12,16 @@ from dojo.tools.sarif.parser import SarifParser from dojo.utils import get_object_or_none -from .dojo_test_case import DojoAPITestCase, DojoTestCase, get_unit_tests_path +from .dojo_test_case import DojoAPITestCase, DojoTestCase, get_unit_tests_path, get_unit_tests_scans_path from .test_utils import assertImportModelsCreated logger = logging.getLogger(__name__) -NPM_AUDIT_NO_VULN_FILENAME = "scans/npm_audit/no_vuln.json" +NPM_AUDIT_NO_VULN_FILENAME = get_unit_tests_scans_path("npm_audit") / "no_vuln.json" NPM_AUDIT_SCAN_TYPE = "NPM Audit Scan" -ACUNETIX_AUDIT_ONE_VULN_FILENAME = "scans/acunetix/one_finding.xml" -ENDPOINT_META_IMPORTER_FILENAME = "endpoint_meta_import/no_endpoint_meta_import.csv" +ACUNETIX_AUDIT_ONE_VULN_FILENAME = get_unit_tests_scans_path("acunetix") / "one_finding.xml" +ENDPOINT_META_IMPORTER_FILENAME = get_unit_tests_path() / "endpoint_meta_import" / "no_endpoint_meta_import.csv" ENGAGEMENT_NAME_DEFAULT = "Engagement 1" ENGAGEMENT_NAME_NEW = "Engagement New 1" @@ -39,7 +39,7 @@ class TestDojoDefaultImporter(DojoTestCase): def test_parse_findings(self): - with open(get_unit_tests_path() + "/scans/acunetix/one_finding.xml", encoding="utf-8") as scan: + with open(get_unit_tests_path() / "scans" / "acunetix" / "one_finding.xml", encoding="utf-8") as scan: scan_type = "Acunetix Scan" user, _created = User.objects.get_or_create(username="admin") product_type, _created = Product_Type.objects.get_or_create(name="test") @@ -80,7 +80,7 @@ def test_parse_findings(self): self.assertIn(finding.numerical_severity, ["S0", "S1", "S2", "S3", "S4"]) def test_import_scan(self): - with open(get_unit_tests_path() + "/scans/sarif/spotbugs.sarif", encoding="utf-8") as scan: + with open(get_unit_tests_path() / "scans" / "sarif" / "spotbugs.sarif", encoding="utf-8") as scan: scan_type = SarifParser().get_scan_types()[0] # SARIF format implement the new method user, _ = User.objects.get_or_create(username="admin") product_type, _ = Product_Type.objects.get_or_create(name="test2") @@ -114,7 +114,7 @@ def test_import_scan(self): self.assertEqual(0, len_closed_findings) def test_import_scan_without_test_scan_type(self): - with open(f"{get_unit_tests_path()}/scans/gitlab_sast/gl-sast-report-1-vuln_v15.json", encoding="utf-8") as scan: + with open(get_unit_tests_scans_path("gitlab_sast") / "gl-sast-report-1-vuln_v15.json", encoding="utf-8") as scan: # GitLabSastParser implements get_tests but report has no scanner name scan_type = GitlabSastParser().get_scan_types()[0] user, _ = User.objects.get_or_create(username="admin") diff --git a/unittests/test_jira_config_engagement_epic.py b/unittests/test_jira_config_engagement_epic.py index 7b6b753416e..afced117877 100644 --- a/unittests/test_jira_config_engagement_epic.py +++ b/unittests/test_jira_config_engagement_epic.py @@ -27,7 +27,7 @@ def _get_vcr(self, **kwargs): my_vcr.record_mode = "once" my_vcr.path_transformer = VCR.ensure_suffix(".yaml") my_vcr.filter_headers = ["Authorization", "X-Atlassian-Token"] - my_vcr.cassette_library_dir = get_unit_tests_path() + "/vcr/jira/" + my_vcr.cassette_library_dir = str(get_unit_tests_path() / "vcr" / "jira") # filters headers doesn't seem to work for cookies, so use callbacks to filter cookies from being recorded my_vcr.before_record_request = self.before_record_request my_vcr.before_record_response = self.before_record_response diff --git a/unittests/test_jira_import_and_pushing_api.py b/unittests/test_jira_import_and_pushing_api.py index 52f1c936fe6..6c0bcb0d26a 100644 --- a/unittests/test_jira_import_and_pushing_api.py +++ b/unittests/test_jira_import_and_pushing_api.py @@ -11,7 +11,12 @@ from dojo.jira_link import helper as jira_helper from dojo.models import Finding, Finding_Group, JIRA_Instance, Risk_Acceptance, User -from .dojo_test_case import DojoVCRAPITestCase, get_unit_tests_path, toggle_system_setting_boolean +from .dojo_test_case import ( + DojoVCRAPITestCase, + get_unit_tests_path, + get_unit_tests_scans_path, + toggle_system_setting_boolean, +) logger = logging.getLogger(__name__) @@ -52,7 +57,7 @@ def _get_vcr(self, **kwargs): my_vcr.record_mode = "once" my_vcr.path_transformer = VCR.ensure_suffix(".yaml") my_vcr.filter_headers = ["Authorization", "X-Atlassian-Token"] - my_vcr.cassette_library_dir = get_unit_tests_path() + "/vcr/jira/" + my_vcr.cassette_library_dir = str(get_unit_tests_path() / "vcr" / "jira") # filters headers doesn't seem to work for cookies, so use callbacks to filter cookies from being recorded my_vcr.before_record_request = self.before_record_request my_vcr.before_record_response = self.before_record_response @@ -67,9 +72,8 @@ def setUp(self): token = Token.objects.get(user=self.testuser) self.client = APIClient() self.client.credentials(HTTP_AUTHORIZATION="Token " + token.key) - self.scans_path = "/scans/" - self.zap_sample5_filename = self.scans_path + "zap/5_zap_sample_one.xml" - self.npm_groups_sample_filename = self.scans_path + "npm_audit/many_vuln_with_groups.json" + self.zap_sample5_filename = get_unit_tests_scans_path("zap") / "5_zap_sample_one.xml" + self.npm_groups_sample_filename = get_unit_tests_scans_path("npm_audit") / "many_vuln_with_groups.json" self.client.force_login(self.get_test_admin()) def test_import_no_push_to_jira(self): diff --git a/unittests/test_jira_template.py b/unittests/test_jira_template.py index f62c693a6a3..961cdefbf9b 100644 --- a/unittests/test_jira_template.py +++ b/unittests/test_jira_template.py @@ -25,14 +25,14 @@ def test_get_jira_issue_template_dir_from_project(self): jira_project.issue_template_dir = "issue-trackers/jira_full_extra" jira_project.save() - self.assertEqual(jira_helper.get_jira_issue_template(product), "issue-trackers/jira_full_extra/jira-description.tpl") + self.assertEqual(str(jira_helper.get_jira_issue_template(product)), "issue-trackers/jira_full_extra/jira-description.tpl") def test_get_jira_issue_template_dir_from_instance(self): product = Product.objects.get(id=1) jira_project = jira_helper.get_jira_project(product) jira_project.issue_template_dir = None jira_project.save() - self.assertEqual(jira_helper.get_jira_issue_template(product), "issue-trackers/jira_full/jira-description.tpl") + self.assertEqual(str(jira_helper.get_jira_issue_template(product)), "issue-trackers/jira_full/jira-description.tpl") def test_get_jira_project_and_instance_no_issue_template_dir(self): product = Product.objects.get(id=1) @@ -43,4 +43,4 @@ def test_get_jira_project_and_instance_no_issue_template_dir(self): jira_instance.issue_template_dir = None jira_instance.save() # no template should return default - self.assertEqual(jira_helper.get_jira_issue_template(product), "issue-trackers/jira_full/jira-description.tpl") + self.assertEqual(str(jira_helper.get_jira_issue_template(product)), "issue-trackers/jira_full/jira-description.tpl") diff --git a/unittests/test_parsers.py b/unittests/test_parsers.py index 2e61c48273b..9a7da594d13 100644 --- a/unittests/test_parsers.py +++ b/unittests/test_parsers.py @@ -5,13 +5,13 @@ from .dojo_test_case import DojoTestCase, get_unit_tests_path -basedir = os.path.join(get_unit_tests_path(), "..") +basedir = get_unit_tests_path().parent @test_tag("parser-supplement-tests") class TestParsers(DojoTestCase): def test_file_existence(self): - for parser_dir in os.scandir(os.path.join(basedir, "dojo", "tools")): + for parser_dir in os.scandir(Path(basedir) / "dojo" / "tools"): if parser_dir.is_file() or parser_dir.name == "__pycache__": continue # this is not parser dir but some support file @@ -28,7 +28,7 @@ def test_file_existence(self): "wizcli_common_parsers", # common class for other wizcli parsers ]: with self.subTest(parser=parser_dir.name, category="docs"): - doc_file = os.path.join(basedir, "docs", "content", "en", "connecting_your_tools", "parsers", category, f"{doc_name}.md") + doc_file = Path(basedir) / "docs" / "content" / "en" / "connecting_your_tools" / "parsers" / category / f"{doc_name}.md" self.assertTrue( Path(doc_file).is_file(), f"Documentation file '{doc_file}' is missing or using different name", @@ -53,7 +53,7 @@ def test_file_existence(self): "wizcli_common_parsers", # common class for other wizcli parsers ]: with self.subTest(parser=parser_dir.name, category="parser"): - parser_test_file = os.path.join(basedir, "unittests", "tools", f"test_{parser_dir.name}_parser.py") + parser_test_file = Path(basedir) / "unittests" / "tools" / f"test_{parser_dir.name}_parser.py" self.assertTrue( Path(parser_test_file).is_file(), f"Unittest of parser '{parser_test_file}' is missing or using different name", @@ -64,7 +64,7 @@ def test_file_existence(self): "wizcli_common_parsers", # common class for other wizcli parsers ]: with self.subTest(parser=parser_dir.name, category="testfiles"): - scan_dir = os.path.join(basedir, "unittests", "scans", parser_dir.name) + scan_dir = Path(basedir) / "unittests" / "scans" / parser_dir.name self.assertTrue( Path(scan_dir).is_dir(), f"Test files for unittest of parser '{scan_dir}' are missing or using different name", @@ -76,16 +76,16 @@ def test_file_existence(self): "api_vulners", # TODO: tests should be implemented also for this parser ]: with self.subTest(parser=parser_dir.name, category="importer"): - importer_test_file = os.path.join(basedir, "unittests", "tools", f"test_{parser_dir.name}_importer.py") + importer_test_file = Path(basedir) / "unittests" / "tools" / f"test_{parser_dir.name}_importer.py" self.assertTrue( Path(importer_test_file).is_file(), f"Unittest of importer '{importer_test_file}' is missing or using different name", ) - for file in os.scandir(os.path.join(basedir, "dojo", "tools", parser_dir.name)): + for file in os.scandir(Path(basedir) / "dojo" / "tools" / parser_dir.name): if file.is_file() and file.name != "__pycache__" and file.name != "__init__.py": - f = os.path.join(basedir, "dojo", "tools", parser_dir.name, file.name) + f_path = Path(basedir) / "dojo" / "tools" / parser_dir.name / file.name read_true = False - with open(f, encoding="utf-8") as f: + with open(f_path, encoding="utf-8") as f: i = 0 for line in f: if read_true is True: @@ -93,7 +93,7 @@ def test_file_existence(self): read_true = False i = 0 elif i > 4: - self.assertTrue(expr=False, msg="In file " + str(os.path.join("dojo", "tools", parser_dir.name, file.name)) + " the test is failing because you don't have utf-8 after .read()") + self.assertTrue(expr=False, msg=f"In file '{f_path}' the test is failing because you don't have utf-8 after .read()") i = 0 read_true = False else: @@ -103,12 +103,12 @@ def test_file_existence(self): i = 0 def test_parser_existence(self): - for docs in os.scandir(os.path.join(basedir, "docs", "content", "en", "connecting_your_tools", "parsers", "file")): + for docs in os.scandir(Path(basedir) / "docs" / "content" / "en" / "connecting_your_tools" / "parsers" / "file"): if docs.name not in [ "_index.md", "codeql.md", "edgescan.md", ]: with self.subTest(parser=docs.name.split(".md")[0], category="parser"): - parser = os.path.join(basedir, "dojo", "tools", f"{docs.name.split('.md')[0]}", "parser.py") + parser = Path(basedir) / "dojo" / "tools" / f"{docs.name.split('.md')[0]}" / "parser.py" self.assertTrue( Path(parser).is_file(), f"Parser '{parser}' is missing or using different name", diff --git a/unittests/test_rest_framework.py b/unittests/test_rest_framework.py index 9b58a538e43..2eba5278c89 100644 --- a/unittests/test_rest_framework.py +++ b/unittests/test_rest_framework.py @@ -4,7 +4,6 @@ from collections import OrderedDict from enum import Enum from json import dumps -from pathlib import Path # from drf_spectacular.renderers import OpenApiJsonRenderer from unittest.mock import ANY, MagicMock, call, patch @@ -134,7 +133,7 @@ UserContactInfo, ) -from .dojo_test_case import DojoAPITestCase +from .dojo_test_case import DojoAPITestCase, get_unit_tests_scans_path logger = logging.getLogger(__name__) @@ -1120,7 +1119,7 @@ def test_request_response_post_and_download(self): # Test the creation for level in self.url_levels: length = FileUpload.objects.count() - with open(f"{self.path}/scans/acunetix/one_finding.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("acunetix") / "one_finding.xml", encoding="utf-8") as testfile: payload = { "title": level, "file": testfile, @@ -1132,7 +1131,7 @@ def test_request_response_post_and_download(self): self.url_levels[level] = response.data.get("id") # Test the download - file_data = Path(f"{self.path}/scans/acunetix/one_finding.xml").read_text(encoding="utf-8") + file_data = (get_unit_tests_scans_path("acunetix") / "one_finding.xml").read_text(encoding="utf-8") for level, file_id in self.url_levels.items(): response = self.client.get(f"/api/v2/{level}/files/download/{file_id}/") self.assertEqual(200, response.status_code) diff --git a/unittests/test_tags.py b/unittests/test_tags.py index 3f93129fa82..9c8a71d0d7a 100644 --- a/unittests/test_tags.py +++ b/unittests/test_tags.py @@ -4,7 +4,7 @@ from dojo.models import Finding, Test from dojo.product.helpers import propagate_tags_on_product_sync -from .dojo_test_case import DojoAPITestCase +from .dojo_test_case import DojoAPITestCase, get_unit_tests_scans_path logger = logging.getLogger(__name__) @@ -15,8 +15,8 @@ class TagTests(DojoAPITestCase): def setUp(self, *args, **kwargs): super().setUp() self.login_as_admin() - self.scans_path = "/scans/zap/" - self.zap_sample5_filename = self.scans_path + "5_zap_sample_one.xml" + self.scans_path = get_unit_tests_scans_path("zap") + self.zap_sample5_filename = self.scans_path / "5_zap_sample_one.xml" def create_finding_with_tags(self, tags): finding_id = Finding.objects.all().first().id @@ -251,8 +251,8 @@ def setUp(self, *args, **kwargs): self.login_as_admin() self.system_settings(enable_product_tag_inehritance=True) self.product = self.create_product("Inherited Tags Test", tags=["inherit", "these", "tags"]) - self.scans_path = "/scans/zap/" - self.zap_sample5_filename = f"{self.scans_path}5_zap_sample_one.xml" + self.scans_path = get_unit_tests_scans_path("zap") + self.zap_sample5_filename = self.scans_path / "5_zap_sample_one.xml" def _convert_instance_tags_to_list(self, instance) -> list: return [tag.name for tag in instance.tags.all()] diff --git a/unittests/tools/test_acunetix_parser.py b/unittests/tools/test_acunetix_parser.py index fe0deb95e63..699cb841104 100644 --- a/unittests/tools/test_acunetix_parser.py +++ b/unittests/tools/test_acunetix_parser.py @@ -3,13 +3,13 @@ from dojo.models import Test from dojo.tools.acunetix.parser import AcunetixParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestAcunetixParser(DojoTestCase): def test_parse_file_with_one_finding(self): - with open("unittests/scans/acunetix/one_finding.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("acunetix") / "one_finding.xml", encoding="utf-8") as testfile: parser = AcunetixParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -37,7 +37,7 @@ def test_parse_file_with_one_finding(self): self.assertEqual("some/path", endpoint.path) def test_parse_file_with_multiple_finding(self): - with open("unittests/scans/acunetix/many_findings.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("acunetix") / "many_findings.xml", encoding="utf-8") as testfile: parser = AcunetixParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -132,7 +132,7 @@ def test_parse_file_with_multiple_finding(self): self.assertIsInstance(req_resp["resp"], str) def test_parse_file_with_example_com(self): - with open("unittests/scans/acunetix/XML_http_example_co_id_.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("acunetix") / "XML_http_example_co_id_.xml", encoding="utf-8") as testfile: parser = AcunetixParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -204,7 +204,7 @@ def test_parse_file_with_example_com(self): self.assertIsInstance(req_resp["resp"], str) def test_parse_file_with_one_finding_acunetix360(self): - with open("unittests/scans/acunetix/acunetix360_one_finding.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("acunetix") / "acunetix360_one_finding.json", encoding="utf-8") as testfile: parser = AcunetixParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -225,7 +225,7 @@ def test_parse_file_with_one_finding_acunetix360(self): self.assertIn("https://online.acunetix360.com/issues/detail/735f4503-e9eb-4b4c-4306-ad49020a4c4b", finding.references) def test_parse_file_with_one_finding_false_positive(self): - with open("unittests/scans/acunetix/acunetix360_one_finding_false_positive.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("acunetix") / "acunetix360_one_finding_false_positive.json", encoding="utf-8") as testfile: parser = AcunetixParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -245,7 +245,7 @@ def test_parse_file_with_one_finding_false_positive(self): self.assertTrue(finding.false_p) def test_parse_file_with_one_finding_risk_accepted(self): - with open("unittests/scans/acunetix/acunetix360_one_finding_accepted_risk.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("acunetix") / "acunetix360_one_finding_accepted_risk.json", encoding="utf-8") as testfile: parser = AcunetixParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -265,7 +265,7 @@ def test_parse_file_with_one_finding_risk_accepted(self): self.assertTrue(finding.risk_accepted) def test_parse_file_with_multiple_finding_acunetix360(self): - with open("unittests/scans/acunetix/acunetix360_many_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("acunetix") / "acunetix360_many_findings.json", encoding="utf-8") as testfile: parser = AcunetixParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(16, len(findings)) @@ -306,7 +306,7 @@ def test_parse_file_with_multiple_finding_acunetix360(self): self.assertEqual(str(endpoint), "http://php.testsparker.com") def test_parse_file_with_mulitple_cwe(self): - with open("unittests/scans/acunetix/acunetix360_multiple_cwe.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("acunetix") / "acunetix360_multiple_cwe.json", encoding="utf-8") as testfile: parser = AcunetixParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -325,19 +325,19 @@ def test_parse_file_with_mulitple_cwe(self): self.assertEqual(str(endpoint), "http://php.testsparker.com/auth/login.php") def test_parse_file_issue_10370(self): - with open("unittests/scans/acunetix/issue_10370.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("acunetix") / "issue_10370.json", encoding="utf-8") as testfile: parser = AcunetixParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) def test_parse_file_issue_10435(self): - with open("unittests/scans/acunetix/issue_10435.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("acunetix") / "issue_10435.json", encoding="utf-8") as testfile: parser = AcunetixParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) def test_parse_file_issue_11206(self): - with open("unittests/scans/acunetix/issue_11206.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("acunetix") / "issue_11206.json", encoding="utf-8") as testfile: parser = AcunetixParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) diff --git a/unittests/tools/test_anchore_engine_parser.py b/unittests/tools/test_anchore_engine_parser.py index 60a4e511f30..007c1166f75 100644 --- a/unittests/tools/test_anchore_engine_parser.py +++ b/unittests/tools/test_anchore_engine_parser.py @@ -1,29 +1,29 @@ from dojo.models import Test from dojo.tools.anchore_engine.parser import AnchoreEngineParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestAnchoreEngineParser(DojoTestCase): def test_anchore_engine_parser_has_no_finding(self): - with open("unittests/scans/anchore_engine/no_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("anchore_engine") / "no_vuln.json", encoding="utf-8") as testfile: parser = AnchoreEngineParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_anchore_engine_parser_has_one_finding(self): - with open("unittests/scans/anchore_engine/one_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("anchore_engine") / "one_vuln.json", encoding="utf-8") as testfile: parser = AnchoreEngineParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) def test_anchore_engine_parser_has_many_findings(self): - with open("unittests/scans/anchore_engine/many_vulns.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("anchore_engine") / "many_vulns.json", encoding="utf-8") as testfile: parser = AnchoreEngineParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(23, len(findings)) def test_anchore_engine_parser_has_many_findings_2_4_1(self): - with open("unittests/scans/anchore_engine/many_vulns_2.4.1.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("anchore_engine") / "many_vulns_2.4.1.json", encoding="utf-8") as testfile: parser = AnchoreEngineParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(51, len(findings)) diff --git a/unittests/tools/test_anchore_enterprise_parser.py b/unittests/tools/test_anchore_enterprise_parser.py index 6025fb736a8..a2ae81304ae 100644 --- a/unittests/tools/test_anchore_enterprise_parser.py +++ b/unittests/tools/test_anchore_enterprise_parser.py @@ -1,26 +1,24 @@ -from os import path -from pathlib import Path from dojo.models import Test from dojo.tools.anchore_enterprise.parser import AnchoreEnterpriseParser, extract_vulnerability_id, search_filepath -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestAnchoreEnterpriseParser(DojoTestCase): def test_anchore_policy_check_parser_has_no_findings(self): - with open(path.join(Path(__file__).parent, "../scans/anchore_enterprise/no_checks.json"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("anchore_enterprise") / "no_checks.json", encoding="utf-8") as testfile: parser = AnchoreEnterpriseParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_anchore_policy_check_parser_has_one_finding(self): - with open(path.join(Path(__file__).parent, "../scans/anchore_enterprise/one_check.json"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("anchore_enterprise") / "one_check.json", encoding="utf-8") as testfile: parser = AnchoreEnterpriseParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) def test_anchore_policy_check_parser_has_multiple_findings(self): - with open(path.join(Path(__file__).parent, "../scans/anchore_enterprise/many_checks.json"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("anchore_enterprise") / "many_checks.json", encoding="utf-8") as testfile: parser = AnchoreEnterpriseParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(57, len(findings)) @@ -29,7 +27,7 @@ def test_anchore_policy_check_parser_has_multiple_findings(self): self.assertEqual("CVE-2015-2992", finding.unsaved_vulnerability_ids[0]) def test_anchore_policy_check_parser_invalid_format(self): - with open(path.join(Path(__file__).parent, "../scans/anchore_enterprise/invalid_checks_format.json"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("anchore_enterprise") / "invalid_checks_format.json", encoding="utf-8") as testfile: with self.assertRaises(Exception): parser = AnchoreEnterpriseParser() parser.get_findings(testfile, Test()) diff --git a/unittests/tools/test_anchore_grype_parser.py b/unittests/tools/test_anchore_grype_parser.py index c706e0c384f..ffff8673662 100644 --- a/unittests/tools/test_anchore_grype_parser.py +++ b/unittests/tools/test_anchore_grype_parser.py @@ -1,19 +1,19 @@ from dojo.models import Finding, Test from dojo.tools.anchore_grype.parser import AnchoreGrypeParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestAnchoreGrypeParser(DojoTestCase): def test_parser_has_no_findings(self): - with open("unittests/scans/anchore_grype/no_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("anchore_grype") / "no_vuln.json", encoding="utf-8") as testfile: parser = AnchoreGrypeParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parser_has_many_findings(self): found = False - with open("unittests/scans/anchore_grype/many_vulns.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("anchore_grype") / "many_vulns.json", encoding="utf-8") as testfile: parser = AnchoreGrypeParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1509, len(findings)) @@ -35,7 +35,7 @@ def test_parser_has_many_findings(self): def test_grype_parser_with_one_criticle_vuln_has_one_findings(self): found = False - with open("unittests/scans/anchore_grype/many_vulns2.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("anchore_grype") / "many_vulns2.json", encoding="utf-8") as testfile: parser = AnchoreGrypeParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1567, len(findings)) @@ -56,7 +56,7 @@ def test_grype_parser_with_one_criticle_vuln_has_one_findings(self): def test_grype_parser_with_many_vulns3(self): found = False - with open("unittests/scans/anchore_grype/many_vulns3.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("anchore_grype") / "many_vulns3.json", encoding="utf-8") as testfile: parser = AnchoreGrypeParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(327, len(findings)) @@ -77,7 +77,7 @@ def test_grype_parser_with_many_vulns3(self): def test_grype_parser_with_new_matcher_list(self): found = False - with open("unittests/scans/anchore_grype/many_vulns4.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("anchore_grype") / "many_vulns4.json", encoding="utf-8") as testfile: parser = AnchoreGrypeParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(9, len(findings)) @@ -97,7 +97,7 @@ def test_grype_parser_with_new_matcher_list(self): self.assertTrue(found) def test_check_all_fields(self): - with open("unittests/scans/anchore_grype/check_all_fields.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("anchore_grype") / "check_all_fields.json", encoding="utf-8") as testfile: parser = AnchoreGrypeParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(5, len(findings)) @@ -266,13 +266,13 @@ def test_check_all_fields(self): self.assertEqual(2, finding.nb_occurences) def test_grype_issue_9618(self): - with open("unittests/scans/anchore_grype/issue_9618.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("anchore_grype") / "issue_9618.json", encoding="utf-8") as testfile: parser = AnchoreGrypeParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(35, len(findings)) def test_grype_issue_9942(self): - with open("unittests/scans/anchore_grype/issue_9942.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("anchore_grype") / "issue_9942.json", encoding="utf-8") as testfile: parser = AnchoreGrypeParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) diff --git a/unittests/tools/test_anchorectl_policies_parser.py b/unittests/tools/test_anchorectl_policies_parser.py index 1ad4eb91cca..83806163767 100644 --- a/unittests/tools/test_anchorectl_policies_parser.py +++ b/unittests/tools/test_anchorectl_policies_parser.py @@ -1,17 +1,17 @@ from dojo.models import Test from dojo.tools.anchorectl_policies.parser import AnchoreCTLPoliciesParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestAnchoreCTLPoliciesParser(DojoTestCase): def test_anchore_engine_parser_has_no_finding(self): - with open("unittests/scans/anchorectl_policies/no_violation.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("anchorectl_policies") / "no_violation.json", encoding="utf-8") as testfile: parser = AnchoreCTLPoliciesParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_anchore_engine_parser_has_one_finding_and_it_is_correctly_parsed(self): - with open("unittests/scans/anchorectl_policies/one_violation.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("anchorectl_policies") / "one_violation.json", encoding="utf-8") as testfile: parser = AnchoreCTLPoliciesParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -21,13 +21,13 @@ def test_anchore_engine_parser_has_one_finding_and_it_is_correctly_parsed(self): self.assertEqual(singleFinding.description, "User root found as effective user, which is not on the allowed list") def test_anchore_engine_parser_has_many_findings(self): - with open("unittests/scans/anchorectl_policies/many_violations.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("anchorectl_policies") / "many_violations.json", encoding="utf-8") as testfile: parser = AnchoreCTLPoliciesParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(3, len(findings)) def test_anchore_engine_parser_has_one_finding_and_description_has_severity(self): - with open("unittests/scans/anchorectl_policies/one_violation_description_severity.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("anchorectl_policies") / "one_violation_description_severity.json", encoding="utf-8") as testfile: parser = AnchoreCTLPoliciesParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) diff --git a/unittests/tools/test_anchorectl_vulns_parser.py b/unittests/tools/test_anchorectl_vulns_parser.py index 1ba824fe765..9254331c0f0 100644 --- a/unittests/tools/test_anchorectl_vulns_parser.py +++ b/unittests/tools/test_anchorectl_vulns_parser.py @@ -1,17 +1,17 @@ from dojo.models import Test from dojo.tools.anchorectl_vulns.parser import AnchoreCTLVulnsParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestAnchoreCTLVulnsParser(DojoTestCase): def test_anchore_engine_parser_has_no_finding(self): - with open("unittests/scans/anchorectl_vulns/no_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("anchorectl_vulns") / "no_vuln.json", encoding="utf-8") as testfile: parser = AnchoreCTLVulnsParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_anchore_engine_parser_has_one_finding_and_it_is_correctly_parsed(self): - with open("unittests/scans/anchorectl_vulns/one_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("anchorectl_vulns") / "one_vuln.json", encoding="utf-8") as testfile: parser = AnchoreCTLVulnsParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -21,7 +21,7 @@ def test_anchore_engine_parser_has_one_finding_and_it_is_correctly_parsed(self): self.assertEqual(singleFinding.description, "**Image hash**: None\n\n**Package**: libgnutls30-3.5.8-5+deb9u4\n\n**Package path**: None\n\n**Package type**: dpkg\n\n**Feed**: vulnerabilities/debian:9\n\n**CPE**: None\n\n**Description**: That test description\n\n") def test_anchore_engine_parser_has_many_findings(self): - with open("unittests/scans/anchorectl_vulns/many_vulns.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("anchorectl_vulns") / "many_vulns.json", encoding="utf-8") as testfile: parser = AnchoreCTLVulnsParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(23, len(findings)) diff --git a/unittests/tools/test_api_blackduck_parser.py b/unittests/tools/test_api_blackduck_parser.py index f58613ca710..438318eefd2 100644 --- a/unittests/tools/test_api_blackduck_parser.py +++ b/unittests/tools/test_api_blackduck_parser.py @@ -1,13 +1,13 @@ from dojo.models import SEVERITIES, Test from dojo.tools.api_blackduck.parser import ApiBlackduckParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestApiBlackduckParser(DojoTestCase): def test_bandit_parser_has_many_findings(self): - with open("unittests/scans/api_blackduck/many_vulns.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("api_blackduck") / "many_vulns.json", encoding="utf-8") as testfile: parser = ApiBlackduckParser() findings = parser.get_findings(testfile, Test()) for finding in findings: diff --git a/unittests/tools/test_api_bugcrowd_parser.py b/unittests/tools/test_api_bugcrowd_parser.py index 48e748633cf..e1d851b8e84 100644 --- a/unittests/tools/test_api_bugcrowd_parser.py +++ b/unittests/tools/test_api_bugcrowd_parser.py @@ -1,20 +1,19 @@ import datetime -from django.test import TestCase - from dojo.models import Product_API_Scan_Configuration, Test from dojo.tools.api_bugcrowd.parser import ApiBugcrowdParser +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path -class TestApiBugcrowdParser(TestCase): +class TestApiBugcrowdParser(DojoTestCase): def test_parse_file_with_no_vuln_has_no_findings(self): - with open("unittests/scans/api_bugcrowd/bugcrowd_empty.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("api_bugcrowd") / "bugcrowd_empty.json", encoding="utf-8") as testfile: parser = ApiBugcrowdParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_file_with_one_vuln_has_one_findings(self): - with open("unittests/scans/api_bugcrowd/bugcrowd_one.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("api_bugcrowd") / "bugcrowd_one.json", encoding="utf-8") as testfile: # description = """ # Vulnerability Name: JWT alg none @@ -51,7 +50,7 @@ def test_parse_file_with_one_vuln_has_one_findings(self): endpoint.clean() def test_parse_file_with_multiple_vuln_has_multiple_finding(self): - with open("unittests/scans/api_bugcrowd/bugcrowd_many.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("api_bugcrowd") / "bugcrowd_many.json", encoding="utf-8") as testfile: parser = ApiBugcrowdParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(3, len(findings)) @@ -117,7 +116,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_finding(self): def test_parse_file_with_not_reproducible_finding(self): with open( - "unittests/scans/api_bugcrowd/bugcrowd_not_reproducible.json", encoding="utf-8", + get_unit_tests_scans_path("api_bugcrowd") / "bugcrowd_not_reproducible.json", encoding="utf-8", ) as testfile: # description = """ @@ -149,7 +148,7 @@ def test_parse_file_with_not_reproducible_finding(self): endpoint.clean() def test_parse_file_with_broken_bug_url(self): - with open("unittests/scans/api_bugcrowd/bugcrowd_broken_bug_url.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("api_bugcrowd") / "bugcrowd_broken_bug_url.json", encoding="utf-8") as testfile: parser = ApiBugcrowdParser() with self.assertLogs("dojo.tools.api_bugcrowd.parser", level="ERROR") as cm: parser.get_findings(testfile, Test()) diff --git a/unittests/tools/test_api_cobalt_parser.py b/unittests/tools/test_api_cobalt_parser.py index afb45d902f7..e64f407a9e6 100644 --- a/unittests/tools/test_api_cobalt_parser.py +++ b/unittests/tools/test_api_cobalt_parser.py @@ -3,19 +3,19 @@ from dojo.models import Test, Test_Type from dojo.tools.api_cobalt.parser import ApiCobaltParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestApiCobaltParser(DojoTestCase): def test_cobalt_api_parser_with_no_vuln_has_no_findings(self): - with open("unittests/scans/api_cobalt/cobalt_api_zero_vul.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("api_cobalt") / "cobalt_api_zero_vul.json", encoding="utf-8") as testfile: parser = ApiCobaltParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_cobalt_api_parser_with_many_vuln_has_many_findings(self): - with open("unittests/scans/api_cobalt/cobalt_api_many_vul.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("api_cobalt") / "cobalt_api_many_vul.json", encoding="utf-8") as testfile: parser = ApiCobaltParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -24,7 +24,7 @@ def test_cobalt_api_parser_with_many_vuln_has_many_findings(self): self.assertEqual(3, len(findings)) def test_cobalt_api_parser_with_carried_over_finding(self): - with open("unittests/scans/api_cobalt/cobalt_api_one_vul_carried_over.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("api_cobalt") / "cobalt_api_one_vul_carried_over.json", encoding="utf-8") as testfile: parser = ApiCobaltParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -51,7 +51,7 @@ def test_cobalt_api_parser_with_carried_over_finding(self): self.assertTrue(finding.dynamic_finding) def test_cobalt_api_parser_with_check_fix_finding(self): - with open("unittests/scans/api_cobalt/cobalt_api_one_vul_check_fix.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("api_cobalt") / "cobalt_api_one_vul_check_fix.json", encoding="utf-8") as testfile: parser = ApiCobaltParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -78,7 +78,7 @@ def test_cobalt_api_parser_with_check_fix_finding(self): self.assertTrue(finding.dynamic_finding) def test_cobalt_api_parser_with_invalid_finding(self): - with open("unittests/scans/api_cobalt/cobalt_api_one_vul_invalid.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("api_cobalt") / "cobalt_api_one_vul_invalid.json", encoding="utf-8") as testfile: parser = ApiCobaltParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -105,7 +105,7 @@ def test_cobalt_api_parser_with_invalid_finding(self): self.assertTrue(finding.dynamic_finding) def test_cobalt_api_parser_with_need_fix_finding(self): - with open("unittests/scans/api_cobalt/cobalt_api_one_vul_need_fix.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("api_cobalt") / "cobalt_api_one_vul_need_fix.json", encoding="utf-8") as testfile: parser = ApiCobaltParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -132,7 +132,7 @@ def test_cobalt_api_parser_with_need_fix_finding(self): self.assertTrue(finding.dynamic_finding) def test_cobalt_api_parser_with_new_finding(self): - with open("unittests/scans/api_cobalt/cobalt_api_one_vul_new.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("api_cobalt") / "cobalt_api_one_vul_new.json", encoding="utf-8") as testfile: parser = ApiCobaltParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -159,7 +159,7 @@ def test_cobalt_api_parser_with_new_finding(self): self.assertTrue(finding.dynamic_finding) def test_cobalt_api_parser_with_out_of_scope_finding(self): - with open("unittests/scans/api_cobalt/cobalt_api_one_vul_out_of_scope.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("api_cobalt") / "cobalt_api_one_vul_out_of_scope.json", encoding="utf-8") as testfile: parser = ApiCobaltParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -186,7 +186,7 @@ def test_cobalt_api_parser_with_out_of_scope_finding(self): self.assertTrue(finding.dynamic_finding) def test_cobalt_api_parser_with_triaging_finding(self): - with open("unittests/scans/api_cobalt/cobalt_api_one_vul_triaging.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("api_cobalt") / "cobalt_api_one_vul_triaging.json", encoding="utf-8") as testfile: parser = ApiCobaltParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -213,7 +213,7 @@ def test_cobalt_api_parser_with_triaging_finding(self): self.assertTrue(finding.dynamic_finding) def test_cobalt_api_parser_with_valid_fix_finding(self): - with open("unittests/scans/api_cobalt/cobalt_api_one_vul_valid_fix.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("api_cobalt") / "cobalt_api_one_vul_valid_fix.json", encoding="utf-8") as testfile: parser = ApiCobaltParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -240,7 +240,7 @@ def test_cobalt_api_parser_with_valid_fix_finding(self): self.assertTrue(finding.dynamic_finding) def test_cobalt_api_parser_with_wont_fix_finding(self): - with open("unittests/scans/api_cobalt/cobalt_api_one_vul_wont_fix.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("api_cobalt") / "cobalt_api_one_vul_wont_fix.json", encoding="utf-8") as testfile: parser = ApiCobaltParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -268,7 +268,7 @@ def test_cobalt_api_parser_with_wont_fix_finding(self): @patch("dojo.tools.api_cobalt.importer.CobaltApiImporter.get_findings") def test_cobalt_api_parser_with_api(self, mock): - with open(get_unit_tests_path() + "/scans/api_cobalt/cobalt_api_many_vul.json", encoding="utf-8") as api_findings_file: + with open(get_unit_tests_scans_path("api_cobalt") / "cobalt_api_many_vul.json", encoding="utf-8") as api_findings_file: api_findings = json.load(api_findings_file) mock.return_value = api_findings diff --git a/unittests/tools/test_api_edgescan_parser.py b/unittests/tools/test_api_edgescan_parser.py index 944b721f016..567150da209 100644 --- a/unittests/tools/test_api_edgescan_parser.py +++ b/unittests/tools/test_api_edgescan_parser.py @@ -1,10 +1,9 @@ -from django.test import TestCase - from dojo.models import Test from dojo.tools.api_edgescan.parser import ApiEdgescanParser +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path -class TestApiEdgescanParser(TestCase): +class TestApiEdgescanParser(DojoTestCase): def test_get_scan_types(self): parser = ApiEdgescanParser() @@ -32,13 +31,13 @@ def test_requires_tool_type(self): self.assertEqual(parser.requires_tool_type("scan_type"), "Edgescan") def test_parse_file_with_no_vuln_has_no_findings(self): - with open("unittests/scans/api_edgescan/no_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("api_edgescan") / "no_vuln.json", encoding="utf-8") as testfile: parser = ApiEdgescanParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_file_with_one_vuln_has_one_findings(self): - with open("unittests/scans/api_edgescan/one_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("api_edgescan") / "one_vuln.json", encoding="utf-8") as testfile: parser = ApiEdgescanParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -61,7 +60,7 @@ def test_parse_file_with_one_vuln_has_one_findings(self): self.assertEqual(finding.unsaved_endpoints[0].protocol, None) def test_parse_file_with_multiple_vuln_has_multiple_finding(self): - with open("unittests/scans/api_edgescan/many_vulns.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("api_edgescan") / "many_vulns.json", encoding="utf-8") as testfile: parser = ApiEdgescanParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(2, len(findings)) diff --git a/unittests/tools/test_api_sonarqube_importer.py b/unittests/tools/test_api_sonarqube_importer.py index 2c5564fbecf..b81826b48a0 100644 --- a/unittests/tools/test_api_sonarqube_importer.py +++ b/unittests/tools/test_api_sonarqube_importer.py @@ -5,51 +5,51 @@ from dojo.models import Engagement, Product, Product_API_Scan_Configuration, Test from dojo.tools.api_sonarqube.importer import SonarQubeApiImporter -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path def dummy_product(self, *args, **kwargs): - with open(get_unit_tests_path() + "/scans/api_sonarqube/product.json", encoding="utf-8") as json_file: + with open(get_unit_tests_scans_path("api_sonarqube") / "product.json", encoding="utf-8") as json_file: return json.load(json_file) def dummy_issues(self, *args, **kwargs): - with open(get_unit_tests_path() + "/scans/api_sonarqube/issues.json", encoding="utf-8") as json_file: + with open(get_unit_tests_scans_path("api_sonarqube") / "issues.json", encoding="utf-8") as json_file: return json.load(json_file) def dummy_rule(self, *args, **kwargs): - with open(get_unit_tests_path() + "/scans/api_sonarqube/rule.json", encoding="utf-8") as json_file: + with open(get_unit_tests_scans_path("api_sonarqube") / "rule.json", encoding="utf-8") as json_file: return json.load(json_file) def dummy_rule_wo_html_desc(self, *args, **kwargs): - with open(get_unit_tests_path() + "/scans/api_sonarqube/rule_wo_html_desc.json", encoding="utf-8") as json_file: + with open(get_unit_tests_scans_path("api_sonarqube") / "rule_wo_html_desc.json", encoding="utf-8") as json_file: return json.load(json_file) def dummy_no_hotspot(self, *args, **kwargs): - with open(get_unit_tests_path() + "/scans/api_sonarqube/hotspots/no_vuln.json", encoding="utf-8") as json_file: + with open(get_unit_tests_scans_path("api_sonarqube") / "hotspots" / "no_vuln.json", encoding="utf-8") as json_file: return json.load(json_file) def dummy_one_hotspot(self, *args, **kwargs): - with open(get_unit_tests_path() + "/scans/api_sonarqube/hotspots/one_vuln.json", encoding="utf-8") as json_file: + with open(get_unit_tests_scans_path("api_sonarqube") / "hotspots" / "one_vuln.json", encoding="utf-8") as json_file: return json.load(json_file) def dummy_many_hotspots(self, *args, **kwargs): - with open(get_unit_tests_path() + "/scans/api_sonarqube/hotspots/many_vulns.json", encoding="utf-8") as json_file: + with open(get_unit_tests_scans_path("api_sonarqube") / "hotspots" / "many_vulns.json", encoding="utf-8") as json_file: return json.load(json_file) def dummy_hotspot_rule(self, *args, **kwargs): - with open(get_unit_tests_path() + "/scans/api_sonarqube/hotspots/rule.json", encoding="utf-8") as json_file: + with open(get_unit_tests_scans_path("api_sonarqube") / "hotspots" / "rule.json", encoding="utf-8") as json_file: return json.load(json_file) def dummy_hotspot_rule_wo_risk_description(self, *args, **kwargs): - with open(get_unit_tests_path() + "/scans/api_sonarqube/hotspots/rule_wo_risk_description.json", encoding="utf-8") as json_file: + with open(get_unit_tests_scans_path("api_sonarqube") / "hotspots" / "rule_wo_risk_description.json", encoding="utf-8") as json_file: return json.load(json_file) diff --git a/unittests/tools/test_api_sonarqube_parser.py b/unittests/tools/test_api_sonarqube_parser.py index 176219291a5..6f419ec48c3 100644 --- a/unittests/tools/test_api_sonarqube_parser.py +++ b/unittests/tools/test_api_sonarqube_parser.py @@ -11,26 +11,26 @@ Tool_Type, ) from dojo.tools.api_sonarqube.parser import ApiSonarQubeParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path def dummy_product(self, *args, **kwargs): - with open("unittests/scans/api_sonarqube/product.json", encoding="utf-8") as json_file: + with open(get_unit_tests_scans_path("api_sonarqube") / "product.json", encoding="utf-8") as json_file: return json.load(json_file) def dummy_issues(self, *args, **kwargs): - with open("unittests/scans/api_sonarqube/issues.json", encoding="utf-8") as json_file: + with open(get_unit_tests_scans_path("api_sonarqube") / "issues.json", encoding="utf-8") as json_file: return json.load(json_file) def dummy_rule(self, *args, **kwargs): - with open("unittests/scans/api_sonarqube/rule.json", encoding="utf-8") as json_file: + with open(get_unit_tests_scans_path("api_sonarqube") / "rule.json", encoding="utf-8") as json_file: return json.load(json_file) def dummy_hotspot_rule(self, *args, **kwargs): - with open(get_unit_tests_path() + "/scans/api_sonarqube/hotspots/rule.json", encoding="utf-8") as json_file: + with open(get_unit_tests_scans_path("api_sonarqube") / "hotspots" / "rule.json", encoding="utf-8") as json_file: return json.load(json_file) diff --git a/unittests/tools/test_api_vulners_parser.py b/unittests/tools/test_api_vulners_parser.py index e532e1ee272..d66c1f93037 100644 --- a/unittests/tools/test_api_vulners_parser.py +++ b/unittests/tools/test_api_vulners_parser.py @@ -1,12 +1,12 @@ from dojo.models import Test from dojo.tools.api_vulners.parser import ApiVulnersParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestApiVulnersParser(DojoTestCase): def test_parse_many_findings(self): - with open("unittests/scans/api_vulners/report_many_vulns.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("api_vulners") / "report_many_vulns.json", encoding="utf-8") as testfile: parser = ApiVulnersParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(3, len(findings)) @@ -19,7 +19,7 @@ def test_parse_many_findings(self): self.assertEqual("CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:H", finding.cvssv3) def test_parse_one_finding(self): - with open("unittests/scans/api_vulners/report_one_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("api_vulners") / "report_one_vuln.json", encoding="utf-8") as testfile: parser = ApiVulnersParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -31,13 +31,13 @@ def test_parse_one_finding(self): self.assertEqual("CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:H/I:H/A:H", finding.cvssv3) def test_parse_no_finding(self): - with open("unittests/scans/api_vulners/report_no_vulns.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("api_vulners") / "report_no_vulns.json", encoding="utf-8") as testfile: parser = ApiVulnersParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_no_description(self): - with open("unittests/scans/api_vulners/report_no_description.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("api_vulners") / "report_no_description.json", encoding="utf-8") as testfile: parser = ApiVulnersParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) diff --git a/unittests/tools/test_appcheck_web_application_scanner_parser.py b/unittests/tools/test_appcheck_web_application_scanner_parser.py index 9360eb9209f..133b773d9a3 100644 --- a/unittests/tools/test_appcheck_web_application_scanner_parser.py +++ b/unittests/tools/test_appcheck_web_application_scanner_parser.py @@ -1,7 +1,5 @@ import string -from django.test import TestCase - from dojo.models import Finding, Test from dojo.tools.appcheck_web_application_scanner.engines.appcheck import AppCheckScanningEngineParser from dojo.tools.appcheck_web_application_scanner.engines.base import ( @@ -12,18 +10,19 @@ ) from dojo.tools.appcheck_web_application_scanner.engines.nmap import NmapScanningEngineParser from dojo.tools.appcheck_web_application_scanner.parser import AppCheckWebApplicationScannerParser +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path -class TestAppCheckWebApplicationScannerParser(TestCase): +class TestAppCheckWebApplicationScannerParser(DojoTestCase): def test_appcheck_web_application_scanner_parser_with_no_vuln_has_no_findings(self): - with open("unittests/scans/appcheck_web_application_scanner/appcheck_web_application_scanner_zero_vul.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("appcheck_web_application_scanner") / "appcheck_web_application_scanner_zero_vul.json", encoding="utf-8") as testfile: parser = AppCheckWebApplicationScannerParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_appcheck_web_application_scanner_parser_with_one_criticle_vuln_has_one_findings(self): - with open("unittests/scans/appcheck_web_application_scanner/appcheck_web_application_scanner_one_vul.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("appcheck_web_application_scanner") / "appcheck_web_application_scanner_one_vul.json", encoding="utf-8") as testfile: parser = AppCheckWebApplicationScannerParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -57,7 +56,7 @@ def test_appcheck_web_application_scanner_parser_with_one_criticle_vuln_has_one_ self.assertEqual("0.0.0.1", endpoint.host) def test_appcheck_web_application_scanner_parser_with_many_vuln_has_many_findings(self): - with open("unittests/scans/appcheck_web_application_scanner/appcheck_web_application_scanner_many_vul.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("appcheck_web_application_scanner") / "appcheck_web_application_scanner_many_vul.json", encoding="utf-8") as testfile: parser = AppCheckWebApplicationScannerParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(6, len(findings)) @@ -223,14 +222,14 @@ def test_appcheck_web_application_scanner_parser_with_many_vuln_has_many_finding self.assertEqual("ajax/ShelfEdgeLabel/ShelfEdgeLabelsPromotionalBatch", endpoint.path) def test_appcheck_web_application_scanner_parser_dupes(self): - with open("unittests/scans/appcheck_web_application_scanner/appcheck_web_application_scanner_dupes.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("appcheck_web_application_scanner") / "appcheck_web_application_scanner_dupes.json", encoding="utf-8") as testfile: parser = AppCheckWebApplicationScannerParser() findings = parser.get_findings(testfile, Test()) # Test has 5 entries, but we should only return 3 findings. self.assertEqual(3, len(findings)) def test_appcheck_web_application_scanner_parser_http2(self): - with open("unittests/scans/appcheck_web_application_scanner/appcheck_web_application_scanner_http2.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("appcheck_web_application_scanner") / "appcheck_web_application_scanner_http2.json", encoding="utf-8") as testfile: parser = AppCheckWebApplicationScannerParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(3, len(findings)) diff --git a/unittests/tools/test_appspider_parser.py b/unittests/tools/test_appspider_parser.py index 207db2d2d6b..10fe201691f 100644 --- a/unittests/tools/test_appspider_parser.py +++ b/unittests/tools/test_appspider_parser.py @@ -1,9 +1,7 @@ -from os import path -from pathlib import Path from dojo.models import Engagement, Finding, Product, Test from dojo.tools.appspider.parser import AppSpiderParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestAppSpiderParser(DojoTestCase): @@ -11,7 +9,7 @@ def test_appspider_parser_has_one_finding(self): test = Test() test.engagement = Engagement() test.engagement.product = Product() - testfile = open(path.join(Path(__file__).parent, "../scans/appspider/one_vuln.xml"), encoding="utf-8") + testfile = open(get_unit_tests_scans_path("appspider") / "one_vuln.xml", encoding="utf-8") parser = AppSpiderParser() findings = parser.get_findings(testfile, test) for finding in findings: diff --git a/unittests/tools/test_aqua_parser.py b/unittests/tools/test_aqua_parser.py index 3cb28ee7eea..380c18a2ed1 100644 --- a/unittests/tools/test_aqua_parser.py +++ b/unittests/tools/test_aqua_parser.py @@ -2,18 +2,18 @@ from dojo.models import Test from dojo.tools.aqua.parser import AquaParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestAquaParser(DojoTestCase): def test_aqua_parser_has_no_finding(self): - with open("unittests/scans/aqua/no_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("aqua") / "no_vuln.json", encoding="utf-8") as testfile: parser = AquaParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_aqua_parser_has_one_finding(self): - with open("unittests/scans/aqua/one_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("aqua") / "one_vuln.json", encoding="utf-8") as testfile: parser = AquaParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -30,13 +30,13 @@ def test_aqua_parser_has_one_finding(self): self.assertEqual("CVE-2019-14697", finding.unsaved_vulnerability_ids[0]) def test_aqua_parser_has_many_findings(self): - with open("unittests/scans/aqua/many_vulns.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("aqua") / "many_vulns.json", encoding="utf-8") as testfile: parser = AquaParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(24, len(findings)) def test_aqua_parser_v2_has_one_finding(self): - with open("unittests/scans/aqua/one_v2.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("aqua") / "one_v2.json", encoding="utf-8") as testfile: parser = AquaParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -49,13 +49,13 @@ def test_aqua_parser_v2_has_one_finding(self): self.assertEqual("CVE-2019-15601", finding.unsaved_vulnerability_ids[0]) def test_aqua_parser_v2_has_many_findings(self): - with open("unittests/scans/aqua/many_v2.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("aqua") / "many_v2.json", encoding="utf-8") as testfile: parser = AquaParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(3, len(findings)) def test_aqua_parser_cvssv3_has_no_finding(self): - with open("unittests/scans/aqua/many_v2.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("aqua") / "many_v2.json", encoding="utf-8") as testfile: parser = AquaParser() findings = parser.get_findings(testfile, Test()) nb_cvssv3 = 0 @@ -66,7 +66,7 @@ def test_aqua_parser_cvssv3_has_no_finding(self): self.assertEqual(0, nb_cvssv3) def test_aqua_parser_cvssv3_has_many_findings(self): - with open("unittests/scans/aqua/many_vulns.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("aqua") / "many_vulns.json", encoding="utf-8") as testfile: parser = AquaParser() findings = parser.get_findings(testfile, Test()) nb_cvssv3 = 0 @@ -77,7 +77,7 @@ def test_aqua_parser_cvssv3_has_many_findings(self): self.assertEqual(16, nb_cvssv3) def test_aqua_parser_for_aqua_severity(self): - with open("unittests/scans/aqua/vulns_with_aqua_severity.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("aqua") / "vulns_with_aqua_severity.json", encoding="utf-8") as testfile: parser = AquaParser() findings = parser.get_findings(testfile, Test()) sevs = [] @@ -93,27 +93,27 @@ def test_aqua_parser_for_aqua_severity(self): self.assertEqual(7, d["Info"]) def test_aqua_parser_issue_10585(self): - with open("unittests/scans/aqua/issue_10585.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("aqua") / "issue_10585.json", encoding="utf-8") as testfile: parser = AquaParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_aqua_parser_aqua_devops_issue_10611(self): - with open("unittests/scans/aqua/aqua_devops_issue_10611.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("aqua") / "aqua_devops_issue_10611.json", encoding="utf-8") as testfile: parser = AquaParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(101, len(findings)) self.assertEqual("server.key - server.key (/juice-shop/node_modules/node-gyp/test/fixtures/server.key) ", findings[83].title) def test_aqua_parser_aqua_devops_issue_10849(self): - with open("unittests/scans/aqua/issue_10849.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("aqua") / "issue_10849.json", encoding="utf-8") as testfile: parser = AquaParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0.0006, findings[0].epss_score) self.assertEqual(0.23474, findings[0].epss_percentile) def test_aqua_parser_aqua_devops_empty(self): - with open("unittests/scans/aqua/empty_aquadevops.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("aqua") / "empty_aquadevops.json", encoding="utf-8") as testfile: parser = AquaParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) diff --git a/unittests/tools/test_arachni_parser.py b/unittests/tools/test_arachni_parser.py index 266d45dc05d..6e31186b7ae 100644 --- a/unittests/tools/test_arachni_parser.py +++ b/unittests/tools/test_arachni_parser.py @@ -2,13 +2,13 @@ from dojo.models import Test from dojo.tools.arachni.parser import ArachniParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestArachniParser(DojoTestCase): def test_parser_has_one_finding(self): - with open("unittests/scans/arachni/arachni.afr.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("arachni") / "arachni.afr.json", encoding="utf-8") as testfile: parser = ArachniParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -23,7 +23,7 @@ def test_parser_has_one_finding(self): self.assertEqual(datetime.datetime(2017, 11, 14, 2, 57, 29, tzinfo=datetime.UTC), finding.date) def test_parser_has_many_finding(self): - with open("unittests/scans/arachni/dd.com.afr.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("arachni") / "dd.com.afr.json", encoding="utf-8") as testfile: parser = ArachniParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -54,7 +54,7 @@ def test_parser_has_many_finding(self): self.assertIn("server", finding.unsaved_tags) def test_parser_has_many_finding2(self): - with open("unittests/scans/arachni/js.com.afr.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("arachni") / "js.com.afr.json", encoding="utf-8") as testfile: parser = ArachniParser() findings = parser.get_findings(testfile, Test()) for finding in findings: diff --git a/unittests/tools/test_asff_parser.py b/unittests/tools/test_asff_parser.py index fe01bb06cfd..75a99be24c2 100644 --- a/unittests/tools/test_asff_parser.py +++ b/unittests/tools/test_asff_parser.py @@ -1,14 +1,13 @@ import json -import os.path from datetime import datetime from dojo.models import Endpoint, Test from dojo.tools.asff.parser import AsffParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path def sample_path(file_name): - return os.path.join(get_unit_tests_path(), "scans/asff", file_name) + return get_unit_tests_scans_path("asff") / file_name class TestAsffParser(DojoTestCase): diff --git a/unittests/tools/test_auditjs_parser.py b/unittests/tools/test_auditjs_parser.py index 4a367a7ca2c..1df420952cc 100644 --- a/unittests/tools/test_auditjs_parser.py +++ b/unittests/tools/test_auditjs_parser.py @@ -1,18 +1,18 @@ from dojo.models import Test from dojo.tools.auditjs.parser import AuditJSParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestAuditJSParser(DojoTestCase): def test_auditjs_parser_with_no_vuln_has_no_findings(self): - with open("unittests/scans/auditjs/auditjs_zero_vul.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("auditjs") / "auditjs_zero_vul.json", encoding="utf-8") as testfile: parser = AuditJSParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_auditjs_parser_with_one_criticle_vuln_has_one_findings(self): - with open("unittests/scans/auditjs/auditjs_one_vul.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("auditjs") / "auditjs_one_vul.json", encoding="utf-8") as testfile: parser = AuditJSParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -32,7 +32,7 @@ def test_auditjs_parser_with_one_criticle_vuln_has_one_findings(self): findings[0].references) def test_auditjs_parser_with_many_vuln_has_many_findings(self): - with open("unittests/scans/auditjs/auditjs_many_vul.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("auditjs") / "auditjs_many_vul.json", encoding="utf-8") as testfile: parser = AuditJSParser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -59,7 +59,7 @@ def test_auditjs_parser_with_many_vuln_has_many_findings(self): def test_auditjs_parser_empty_with_error(self): with self.assertRaises(ValueError) as context: - with open("unittests/scans/auditjs/empty_with_error.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("auditjs") / "empty_with_error.json", encoding="utf-8") as testfile: parser = AuditJSParser() parser.get_findings(testfile, Test()) @@ -68,7 +68,7 @@ def test_auditjs_parser_empty_with_error(self): ) def test_auditjs_parser_with_package_name_has_namespace(self): - with open("unittests/scans/auditjs/auditjs_with_package_namespace.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("auditjs") / "auditjs_with_package_namespace.json", encoding="utf-8") as testfile: parser = AuditJSParser() findings = parser.get_findings(testfile, Test()) diff --git a/unittests/tools/test_aws_inspector2_parser.py b/unittests/tools/test_aws_inspector2_parser.py index f023bec88a2..ff47cbd5177 100644 --- a/unittests/tools/test_aws_inspector2_parser.py +++ b/unittests/tools/test_aws_inspector2_parser.py @@ -2,19 +2,20 @@ from dojo.models import Test from dojo.tools.aws_inspector2.parser import AWSInspector2Parser +from unittests.dojo_test_case import get_unit_tests_scans_path class TestAWSInspector2Parser(TestCase): def test_aws_inspector2_parser_with_no_vuln_has_no_findings(self): - with open("unittests/scans/aws_inspector2/aws_inspector2_zero_vul.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("aws_inspector2") / "aws_inspector2_zero_vul.json", encoding="utf-8") as testfile: parser = AWSInspector2Parser() findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual(0, len(findings)) def test_aws_inspector2_parser_with_one_vuln_has_one_findings(self): - with open("unittests/scans/aws_inspector2/aws_inspector2_one_vul.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("aws_inspector2") / "aws_inspector2_one_vul.json", encoding="utf-8") as testfile: parser = AWSInspector2Parser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -26,7 +27,7 @@ def test_aws_inspector2_parser_with_one_vuln_has_one_findings(self): self.assertEqual("Medium", findings[0].severity) def test_aws_inspector2_parser_with_many_vuln_has_many_findings(self): - with open("unittests/scans/aws_inspector2/aws_inspector2_many_vul.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("aws_inspector2") / "aws_inspector2_many_vul.json", encoding="utf-8") as testfile: parser = AWSInspector2Parser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -37,7 +38,7 @@ def test_aws_inspector2_parser_with_many_vuln_has_many_findings(self): def test_aws_inspector2_parser_empty_with_error(self): with self.assertRaises(TypeError) as context: - with open("unittests/scans/aws_inspector2/empty_with_error.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("aws_inspector2") / "empty_with_error.json", encoding="utf-8") as testfile: parser = AWSInspector2Parser() parser.get_findings(testfile, Test()) testfile.close() diff --git a/unittests/tools/test_aws_prowler_parser.py b/unittests/tools/test_aws_prowler_parser.py index 91da95da6dc..c24e0c5d37f 100644 --- a/unittests/tools/test_aws_prowler_parser.py +++ b/unittests/tools/test_aws_prowler_parser.py @@ -2,7 +2,7 @@ from dojo.models import Test from dojo.tools.aws_prowler.parser import AWSProwlerParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestAwsProwlerParser(DojoTestCase): @@ -14,12 +14,12 @@ def setup(self, testfile): def test_aws_prowler_parser_with_no_vuln_has_no_findings(self): findings = self.setup( - open("unittests/scans/aws_prowler/no_vuln.csv", encoding="utf-8")) + open(get_unit_tests_scans_path("aws_prowler") / "no_vuln.csv", encoding="utf-8")) self.assertEqual(0, len(findings)) def test_aws_prowler_parser_with_critical_vuln_has_one_findings(self): findings = self.setup( - open("unittests/scans/aws_prowler/one_vuln.csv", encoding="utf-8")) + open(get_unit_tests_scans_path("aws_prowler") / "one_vuln.csv", encoding="utf-8")) self.assertEqual(1, len(findings)) self.assertEqual( "Root user in the account wasn't accessed in the last 1 days", findings[0].title, @@ -27,7 +27,7 @@ def test_aws_prowler_parser_with_critical_vuln_has_one_findings(self): def test_aws_prowler_parser_with_many_vuln_has_many_findings(self): findings = self.setup( - open("unittests/scans/aws_prowler/many_vuln.csv", encoding="utf-8")) + open(get_unit_tests_scans_path("aws_prowler") / "many_vuln.csv", encoding="utf-8")) self.assertEqual(4, len(findings)) self.assertEqual( "Root user in the account wasn't accessed in the last 1 days", findings[0].title) @@ -42,7 +42,7 @@ def test_aws_prowler_parser_with_many_vuln_has_many_findings(self): def test_aws_prowler_parser_with_many_vuln_has_many_findings2(self): findings = self.setup( - open("unittests/scans/aws_prowler/many_vuln2.csv", encoding="utf-8")) + open(get_unit_tests_scans_path("aws_prowler") / "many_vuln2.csv", encoding="utf-8")) self.assertEqual(174, len(findings)) self.assertEqual("Root user in the account wasn't accessed in the last 1 days", findings[0].title) self.assertEqual("Info", findings[0].severity) @@ -52,7 +52,7 @@ def test_aws_prowler_parser_with_many_vuln_has_many_findings2(self): def test_aws_prowler_parser_issue4450(self): findings = self.setup( - open("unittests/scans/aws_prowler/issue4450.csv", encoding="utf-8")) + open(get_unit_tests_scans_path("aws_prowler") / "issue4450.csv", encoding="utf-8")) self.assertEqual(4, len(findings)) with self.subTest(i=0): finding = findings[0] @@ -72,12 +72,12 @@ def test_aws_prowler_parser_issue4450(self): def test_aws_prowler_parser_with_no_vuln_has_no_findings_json(self): findings = self.setup( - open("unittests/scans/aws_prowler/no_vuln.json", encoding="utf-8")) + open(get_unit_tests_scans_path("aws_prowler") / "no_vuln.json", encoding="utf-8")) self.assertEqual(0, len(findings)) def test_aws_prowler_parser_with_critical_vuln_has_one_findings_json(self): findings = self.setup( - open("unittests/scans/aws_prowler/one_vuln.json", encoding="utf-8")) + open(get_unit_tests_scans_path("aws_prowler") / "one_vuln.json", encoding="utf-8")) self.assertEqual(1, len(findings)) self.assertEqual("eu-central-1: Only Virtual MFA is enabled for root", findings[0].title) self.assertIn("012345678912", findings[0].description) @@ -97,7 +97,7 @@ def test_aws_prowler_parser_with_critical_vuln_has_one_findings_json(self): def test_aws_prowler_parser_with_many_vuln_has_many_findings_json(self): findings = self.setup( - open("unittests/scans/aws_prowler/many_vuln.json", encoding="utf-8")) + open(get_unit_tests_scans_path("aws_prowler") / "many_vuln.json", encoding="utf-8")) self.assertEqual(4, len(findings)) with self.subTest(i=0): self.assertEqual("eu-central-1: Only Virtual MFA is enabled for root", findings[0].title) diff --git a/unittests/tools/test_aws_prowler_v3plus_parser.py b/unittests/tools/test_aws_prowler_v3plus_parser.py index 5ef20b764af..db273f3bf7e 100644 --- a/unittests/tools/test_aws_prowler_v3plus_parser.py +++ b/unittests/tools/test_aws_prowler_v3plus_parser.py @@ -2,7 +2,7 @@ from dojo.models import Test from dojo.tools.aws_prowler_v3plus.parser import AWSProwlerV3plusParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestAwsProwlerV3plusParser(DojoTestCase): @@ -14,12 +14,12 @@ def setup(self, testfile): def test_aws_prowler_parser_with_no_vuln_has_no_findings_json(self): findings = self.setup( - open("unittests/scans/aws_prowler_v3plus/no_vuln.json", encoding="utf-8")) + open(get_unit_tests_scans_path("aws_prowler_v3plus") / "no_vuln.json", encoding="utf-8")) self.assertEqual(0, len(findings)) def test_aws_prowler_parser_with_critical_vuln_has_one_findings_json(self): findings = self.setup( - open("unittests/scans/aws_prowler_v3plus/one_vuln.json", encoding="utf-8")) + open(get_unit_tests_scans_path("aws_prowler_v3plus") / "one_vuln.json", encoding="utf-8")) self.assertEqual(1, len(findings)) self.assertEqual("prowler-aws-acm_certificates_expiration_check-999999999999-us-east-1-api.sandbox.partner.teste.com", findings[0].unique_id_from_tool) self.assertIn("Check if ACM Certificates are about to expire in specific days or less", findings[0].description) @@ -29,7 +29,7 @@ def test_aws_prowler_parser_with_critical_vuln_has_one_findings_json(self): def test_aws_prowler_parser_with_many_vuln_has_many_findings_json(self): findings = self.setup( - open("unittests/scans/aws_prowler_v3plus/many_vuln.json", encoding="utf-8")) + open(get_unit_tests_scans_path("aws_prowler_v3plus") / "many_vuln.json", encoding="utf-8")) self.assertEqual(3, len(findings)) with self.subTest(i=0): self.assertEqual("prowler-aws-acm_certificates_expiration_check-999999999999-us-east-1-api.teste.teste.com", findings[0].unique_id_from_tool) @@ -46,12 +46,12 @@ def test_aws_prowler_parser_with_many_vuln_has_many_findings_json(self): def test_aws_prowler_parser_with_no_vuln_has_no_findings_ocsf_json(self): findings = self.setup( - open("unittests/scans/aws_prowler_v3plus/no_vuln.ocsf.json", encoding="utf-8")) + open(get_unit_tests_scans_path("aws_prowler_v3plus") / "no_vuln.ocsf.json", encoding="utf-8")) self.assertEqual(0, len(findings)) def test_aws_prowler_parser_after_4_5_0_with_critical_vuln_has_one_findings_ocsf_json(self): findings = self.setup( - open("unittests/scans/aws_prowler_v3plus/one_vuln_after_4_5_0.ocsf.json", encoding="utf-8")) + open(get_unit_tests_scans_path("aws_prowler_v3plus") / "one_vuln_after_4_5_0.ocsf.json", encoding="utf-8")) self.assertEqual(1, len(findings)) self.assertEqual("prowler-aws-iam_role_administratoraccess_policy_permissive_trust_relationship-123456789012-us-east-1-myAdministratorExecutionRole", findings[0].unique_id_from_tool) self.assertIn("Ensure IAM Roles with attached AdministratorAccess policy have a well defined trust relationship", findings[0].description) @@ -61,7 +61,7 @@ def test_aws_prowler_parser_after_4_5_0_with_critical_vuln_has_one_findings_ocsf def test_aws_prowler_parser_after_4_5_0_with_many_vuln_has_many_findings_ocsf_json(self): findings = self.setup( - open("unittests/scans/aws_prowler_v3plus/many_vuln_after_4_5_0.ocsf.json", encoding="utf-8")) + open(get_unit_tests_scans_path("aws_prowler_v3plus") / "many_vuln_after_4_5_0.ocsf.json", encoding="utf-8")) self.assertEqual(2, len(findings)) with self.subTest(i=0): self.assertEqual("prowler-aws-iam_role_administratoraccess_policy_permissive_trust_relationship-123456789012-us-east-1-myAdministratorExecutionRole", findings[0].unique_id_from_tool) @@ -74,7 +74,7 @@ def test_aws_prowler_parser_after_4_5_0_with_many_vuln_has_many_findings_ocsf_js def test_aws_prowler_parser_with_critical_vuln_has_one_findings_ocsf_json(self): findings = self.setup( - open("unittests/scans/aws_prowler_v3plus/one_vuln.ocsf.json", encoding="utf-8")) + open(get_unit_tests_scans_path("aws_prowler_v3plus") / "one_vuln.ocsf.json", encoding="utf-8")) self.assertEqual(1, len(findings)) self.assertEqual("prowler-aws-iam_role_administratoraccess_policy_permissive_trust_relationship-123456789012-us-east-1-myAdministratorExecutionRole", findings[0].unique_id_from_tool) self.assertIn("Ensure IAM Roles with attached AdministratorAccess policy have a well defined trust relationship", findings[0].description) @@ -84,7 +84,7 @@ def test_aws_prowler_parser_with_critical_vuln_has_one_findings_ocsf_json(self): def test_aws_prowler_parser_with_many_vuln_has_many_findings_ocsf_json(self): findings = self.setup( - open("unittests/scans/aws_prowler_v3plus/many_vuln.ocsf.json", encoding="utf-8")) + open(get_unit_tests_scans_path("aws_prowler_v3plus") / "many_vuln.ocsf.json", encoding="utf-8")) self.assertEqual(2, len(findings)) with self.subTest(i=0): self.assertEqual("prowler-aws-iam_role_administratoraccess_policy_permissive_trust_relationship-123456789012-us-east-1-myAdministratorExecutionRole", findings[0].unique_id_from_tool) diff --git a/unittests/tools/test_awssecurityhub_parser.py b/unittests/tools/test_awssecurityhub_parser.py index 5885852b348..cf875c6aed1 100644 --- a/unittests/tools/test_awssecurityhub_parser.py +++ b/unittests/tools/test_awssecurityhub_parser.py @@ -1,18 +1,18 @@ -import os.path + from dojo.models import Test from dojo.tools.awssecurityhub.parser import AwsSecurityHubParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path def sample_path(file_name: str): - return os.path.join("/scans/awssecurityhub", file_name) + return get_unit_tests_scans_path("awssecurityhub") / file_name class TestAwsSecurityHubParser(DojoTestCase): def test_one_finding(self): - with open(get_unit_tests_path() + sample_path("config_one_finding.json"), encoding="utf-8") as test_file: + with open(sample_path("config_one_finding.json"), encoding="utf-8") as test_file: parser = AwsSecurityHubParser() findings = parser.get_findings(test_file, Test()) self.assertEqual(1, len(findings)) @@ -23,7 +23,7 @@ def test_one_finding(self): self.assertEqual("https://docs.aws.amazon.com/console/securityhub/IAM.5/remediation", finding.references) def test_one_finding_active(self): - with open(get_unit_tests_path() + sample_path("config_one_finding_active.json"), encoding="utf-8") as test_file: + with open(sample_path("config_one_finding_active.json"), encoding="utf-8") as test_file: parser = AwsSecurityHubParser() findings = parser.get_findings(test_file, Test()) self.assertEqual(1, len(findings)) @@ -33,7 +33,7 @@ def test_one_finding_active(self): self.assertTrue(finding.active) def test_many_findings(self): - with open(get_unit_tests_path() + sample_path("config_many_findings.json"), encoding="utf-8") as test_file: + with open(sample_path("config_many_findings.json"), encoding="utf-8") as test_file: parser = AwsSecurityHubParser() findings = parser.get_findings(test_file, Test()) self.assertEqual(3, len(findings)) @@ -42,13 +42,13 @@ def test_many_findings(self): self.assertEqual("This is a Security Hub Finding \nThis AWS control checks whether AWS Multi-Factor Authentication (MFA) is enabled for all AWS Identity and Access Management (IAM) users that use a console password.\n**AWS Finding ARN:** arn:aws:securityhub:us-east-1:012345678912:subscription/aws-foundational-security-best-practices/v/1.0.0/IAM.5/finding/de861909-2d26-4e45-bd86-19d2ab6ceef1\n**Resource IDs:** AWS::::Account:012345678912\n**AwsAccountId:** 012345678912\n**Generator ID:** aws-foundational-security-best-practices/v/1.0.0/IAM.5\n", finding.description) def test_repeated_findings(self): - with open(get_unit_tests_path() + sample_path("config_repeated_findings.json"), encoding="utf-8") as test_file: + with open(sample_path("config_repeated_findings.json"), encoding="utf-8") as test_file: parser = AwsSecurityHubParser() findings = parser.get_findings(test_file, Test()) self.assertEqual(1, len(findings)) def test_unique_id(self): - with open(get_unit_tests_path() + sample_path("config_one_finding.json"), encoding="utf-8") as test_file: + with open(sample_path("config_one_finding.json"), encoding="utf-8") as test_file: parser = AwsSecurityHubParser() findings = parser.get_findings(test_file, Test()) self.assertEqual( @@ -57,7 +57,7 @@ def test_unique_id(self): ) def test_inspector_ec2(self): - with open(get_unit_tests_path() + sample_path("inspector_ec2_cve.json"), encoding="utf-8") as test_file: + with open(sample_path("inspector_ec2_cve.json"), encoding="utf-8") as test_file: parser = AwsSecurityHubParser() findings = parser.get_findings(test_file, Test()) self.assertEqual(5, len(findings)) @@ -71,7 +71,7 @@ def test_inspector_ec2(self): self.assertEqual("AwsEc2Instance arn:aws:ec2:us-east-1:XXXXXXXXXXXX:i-11111111111111111", endpoint.host) def test_inspector_ec2_with_no_vulnerabilities(self): - with open(get_unit_tests_path() + sample_path("inspector_ec2_cve_no_vulnerabilities.json"), encoding="utf-8") as test_file: + with open(sample_path("inspector_ec2_cve_no_vulnerabilities.json"), encoding="utf-8") as test_file: parser = AwsSecurityHubParser() findings = parser.get_findings(test_file, Test()) self.assertEqual(1, len(findings)) @@ -79,7 +79,7 @@ def test_inspector_ec2_with_no_vulnerabilities(self): self.assertEqual(finding.component_name, "AwsEc2Instance") def test_inspector_ec2_ghsa(self): - with open(get_unit_tests_path() + sample_path("inspector_ec2_ghsa.json"), encoding="utf-8") as test_file: + with open(sample_path("inspector_ec2_ghsa.json"), encoding="utf-8") as test_file: parser = AwsSecurityHubParser() findings = parser.get_findings(test_file, Test()) self.assertEqual(1, len(findings)) @@ -94,7 +94,7 @@ def test_inspector_ec2_ghsa(self): self.assertEqual("AwsEc2Instance arn:aws:ec2:eu-central-1:012345678912:instance/i-07c11cc535d830123", endpoint.host) def test_inspector_ecr(self): - with open(get_unit_tests_path() + sample_path("inspector_ecr.json"), encoding="utf-8") as test_file: + with open(sample_path("inspector_ecr.json"), encoding="utf-8") as test_file: parser = AwsSecurityHubParser() findings = parser.get_findings(test_file, Test()) self.assertEqual(7, len(findings)) @@ -111,7 +111,7 @@ def test_inspector_ecr(self): self.assertEqual("AwsEcrContainerImage arn:aws:ecr:eu-central-1:123456789012:repository/repo-os/sha256:af965ef68c78374a5f987fce98c0ddfa45801df2395bf012c50b863e65978d74", endpoint.host) def test_guardduty(self): - with open(get_unit_tests_path() + sample_path("guardduty.json"), encoding="utf-8") as test_file: + with open(sample_path("guardduty.json"), encoding="utf-8") as test_file: parser = AwsSecurityHubParser() findings = parser.get_findings(test_file, Test()) self.assertEqual(4, len(findings)) @@ -128,7 +128,7 @@ def test_guardduty(self): self.assertEqual("This is a GuardDuty Finding\nAPIs commonly used in Discovery tactics were invoked by user AssumedRole : 123123123, under anomalous circumstances. Such activity is not typically seen from this user.\n**AWS Finding ARN:** arn:aws:guardduty:us-east-1:123456789012:detector/123456789/finding/2123123123123\n**SourceURL:** [https://us-east-1.console.aws.amazon.com/guardduty/home?region=us-east-1#/findings?macros=current&fId=2123123123123](https://us-east-1.console.aws.amazon.com/guardduty/home?region=us-east-1#/findings?macros=current&fId=2123123123123)\n**AwsAccountId:** 123456789012\n**Region:** us-east-1\n**Generator ID:** arn:aws:guardduty:us-east-1:123456789012:detector/123456789\n", finding.description) def test_issue_10956(self): - with open(get_unit_tests_path() + sample_path("issue_10956.json"), encoding="utf-8") as test_file: + with open(sample_path("issue_10956.json"), encoding="utf-8") as test_file: parser = AwsSecurityHubParser() findings = parser.get_findings(test_file, Test()) self.assertEqual(1, len(findings)) @@ -136,7 +136,7 @@ def test_issue_10956(self): self.assertEqual("0.00239", finding.epss_score) def test_missing_account_id(self): - with open(get_unit_tests_path() + sample_path("missing_account_id.json"), encoding="utf-8") as test_file: + with open(sample_path("missing_account_id.json"), encoding="utf-8") as test_file: parser = AwsSecurityHubParser() findings = parser.get_findings(test_file, Test()) self.assertEqual(1, len(findings)) diff --git a/unittests/tools/test_azure_security_center_recommendations_parser.py b/unittests/tools/test_azure_security_center_recommendations_parser.py index 3ee1beefe3f..b61055fa07d 100644 --- a/unittests/tools/test_azure_security_center_recommendations_parser.py +++ b/unittests/tools/test_azure_security_center_recommendations_parser.py @@ -2,19 +2,19 @@ from dojo.models import Test from dojo.tools.azure_security_center_recommendations.parser import AzureSecurityCenterRecommendationsParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestAzureSecurityCenterRecommendationsParser(DojoTestCase): def test_parse_file_with_no_findings(self): - with open("unittests/scans/azure_security_center_recommendations/zero_vulns.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("azure_security_center_recommendations") / "zero_vulns.csv", encoding="utf-8") as testfile: parser = AzureSecurityCenterRecommendationsParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_file_with_multiple_findings(self): - with open("unittests/scans/azure_security_center_recommendations/many_vulns.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("azure_security_center_recommendations") / "many_vulns.csv", encoding="utf-8") as testfile: parser = AzureSecurityCenterRecommendationsParser() findings = parser.get_findings(testfile, Test()) diff --git a/unittests/tools/test_bandit_parser.py b/unittests/tools/test_bandit_parser.py index 6e51768ced9..5603d8ebfe5 100644 --- a/unittests/tools/test_bandit_parser.py +++ b/unittests/tools/test_bandit_parser.py @@ -4,18 +4,18 @@ from dojo.models import Test from dojo.tools.bandit.parser import BanditParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestBanditParser(DojoTestCase): def test_bandit_parser_has_no_finding(self): - with open("unittests/scans/bandit/no_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("bandit") / "no_vuln.json", encoding="utf-8") as testfile: parser = BanditParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_bandit_parser_has_one_finding(self): - with open("unittests/scans/bandit/one_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("bandit") / "one_vuln.json", encoding="utf-8") as testfile: parser = BanditParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -33,7 +33,7 @@ def test_bandit_parser_has_one_finding(self): self.assertIn("https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html", item.references) def test_bandit_parser_has_many_findings(self): - with open("unittests/scans/bandit/many_vulns.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("bandit") / "many_vulns.json", encoding="utf-8") as testfile: parser = BanditParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(214, len(findings)) @@ -48,7 +48,7 @@ def test_bandit_parser_has_many_findings(self): self.assertIn("https://bandit.readthedocs.io/en/latest/plugins/b110_try_except_pass.html", item.references) def test_bandit_parser_has_many_findings_recent(self): - with open("unittests/scans/bandit/dd.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("bandit") / "dd.json", encoding="utf-8") as testfile: parser = BanditParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(47, len(findings)) @@ -62,7 +62,7 @@ def test_bandit_parser_has_many_findings_recent(self): self.assertEqual("Certain", item.get_scanner_confidence_text()) def test_bandit_parser_has_many_findings_recent2(self): - with open("unittests/scans/bandit/dd2.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("bandit") / "dd2.json", encoding="utf-8") as testfile: parser = BanditParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(165, len(findings)) diff --git a/unittests/tools/test_bearer_cli_parser.py b/unittests/tools/test_bearer_cli_parser.py index 92a7b55098a..1da0e02a775 100644 --- a/unittests/tools/test_bearer_cli_parser.py +++ b/unittests/tools/test_bearer_cli_parser.py @@ -2,12 +2,13 @@ from dojo.models import Test from dojo.tools.bearer_cli.parser import BearerCLIParser +from unittests.dojo_test_case import get_unit_tests_scans_path class TestBearerParser(TestCase): def test_bearer_parser_with_one_vuln_has_one_findings(self): - testfile = open("unittests/scans/bearer_cli/bearer_cli_one_vul.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("bearer_cli") / "bearer_cli_one_vul.json", encoding="utf-8") parser = BearerCLIParser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -21,7 +22,7 @@ def test_bearer_parser_with_one_vuln_has_one_findings(self): self.assertEqual(581, findings[0].line) def test_bearer_parser_with_many_vuln_has_many_findings(self): - testfile = open("unittests/scans/bearer_cli/bearer_cli_many_vul.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("bearer_cli") / "bearer_cli_many_vul.json", encoding="utf-8") parser = BearerCLIParser() findings = parser.get_findings(testfile, Test()) testfile.close() diff --git a/unittests/tools/test_blackduck_binary_analysis_parser.py b/unittests/tools/test_blackduck_binary_analysis_parser.py index 92d92c111b0..22a810cfce7 100644 --- a/unittests/tools/test_blackduck_binary_analysis_parser.py +++ b/unittests/tools/test_blackduck_binary_analysis_parser.py @@ -1,19 +1,18 @@ -from pathlib import Path from dojo.models import Test from dojo.tools.blackduck_binary_analysis.parser import BlackduckBinaryAnalysisParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestBlackduckBinaryAnalysisParser(DojoTestCase): def test_parse_no_vulns(self): - testfile = Path(get_unit_tests_path() + "/scans/blackduck_binary_analysis/no_vuln.csv") + testfile = get_unit_tests_scans_path("blackduck_binary_analysis") / "no_vuln.csv" parser = BlackduckBinaryAnalysisParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_one_vuln(self): - testfile = Path(get_unit_tests_path() + "/scans/blackduck_binary_analysis/one_vuln.csv") + testfile = get_unit_tests_scans_path("blackduck_binary_analysis") / "one_vuln.csv" parser = BlackduckBinaryAnalysisParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -46,7 +45,7 @@ def test_parse_one_vuln(self): self.assertIsNotNone(finding.unique_id_from_tool) def test_parse_many_vulns(self): - testfile = Path(get_unit_tests_path() + "/scans/blackduck_binary_analysis/many_vulns.csv") + testfile = get_unit_tests_scans_path("blackduck_binary_analysis") / "many_vulns.csv" parser = BlackduckBinaryAnalysisParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(5, len(findings)) diff --git a/unittests/tools/test_blackduck_component_risk_parser.py b/unittests/tools/test_blackduck_component_risk_parser.py index ccb613ce9e3..605c738281d 100644 --- a/unittests/tools/test_blackduck_component_risk_parser.py +++ b/unittests/tools/test_blackduck_component_risk_parser.py @@ -1,16 +1,12 @@ -from pathlib import Path from dojo.models import Test from dojo.tools.blackduck_component_risk.parser import BlackduckComponentRiskParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestBlackduckComponentRiskParser(DojoTestCase): def test_blackduck_enhanced_zip_upload(self): - testfile = Path( - get_unit_tests_path() + "/scans/blackduck_component_risk/" - "blackduck_hub_component_risk.zip", - ) + testfile = get_unit_tests_scans_path("blackduck_component_risk") / "blackduck_hub_component_risk.zip" parser = BlackduckComponentRiskParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(12, len(findings)) diff --git a/unittests/tools/test_blackduck_parser.py b/unittests/tools/test_blackduck_parser.py index d2d16c6942c..aaa9b723185 100644 --- a/unittests/tools/test_blackduck_parser.py +++ b/unittests/tools/test_blackduck_parser.py @@ -1,25 +1,24 @@ -from pathlib import Path from dojo.models import Test from dojo.tools.blackduck.parser import BlackduckParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestBlackduckHubParser(DojoTestCase): def test_blackduck_csv_parser_has_no_finding(self): - testfile = Path(get_unit_tests_path() + "/scans/blackduck/no_vuln.csv") + testfile = get_unit_tests_scans_path("blackduck") / "no_vuln.csv" parser = BlackduckParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_blackduck_csv_parser_has_one_finding(self): - testfile = Path(get_unit_tests_path() + "/scans/blackduck/one_vuln.csv") + testfile = get_unit_tests_scans_path("blackduck") / "one_vuln.csv" parser = BlackduckParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) def test_blackduck_csv_parser_has_many_findings(self): - testfile = Path(get_unit_tests_path() + "/scans/blackduck/many_vulns.csv") + testfile = get_unit_tests_scans_path("blackduck") / "many_vulns.csv" parser = BlackduckParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(24, len(findings)) @@ -32,7 +31,7 @@ def test_blackduck_csv_parser_has_many_findings(self): self.assertEqual(findings[2].component_version, "4.5.2") def test_blackduck_csv_parser_new_format_has_many_findings(self): - testfile = Path(get_unit_tests_path() + "/scans/blackduck/many_vulns_new_format.csv") + testfile = get_unit_tests_scans_path("blackduck") / "many_vulns_new_format.csv" parser = BlackduckParser() findings = parser.get_findings(testfile, Test()) findings = list(findings) @@ -43,17 +42,13 @@ def test_blackduck_csv_parser_new_format_has_many_findings(self): self.assertEqual(findings[2].component_version, "2.9.9.3") def test_blackduck_enhanced_has_many_findings(self): - testfile = Path( - get_unit_tests_path() + "/scans/blackduck/blackduck_enhanced_py3_unittest.zip", - ) + testfile = get_unit_tests_scans_path("blackduck") / "blackduck_enhanced_py3_unittest.zip" parser = BlackduckParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(11, len(findings)) def test_blackduck_enhanced_zip_upload(self): - testfile = Path( - get_unit_tests_path() + "/scans/blackduck/blackduck_enhanced_py3_unittest_v2.zip", - ) + testfile = get_unit_tests_scans_path("blackduck") / "blackduck_enhanced_py3_unittest_v2.zip" parser = BlackduckParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(11, len(findings)) diff --git a/unittests/tools/test_brakeman_parser.py b/unittests/tools/test_brakeman_parser.py index 185c7f22042..34b8cc1bc23 100644 --- a/unittests/tools/test_brakeman_parser.py +++ b/unittests/tools/test_brakeman_parser.py @@ -1,24 +1,24 @@ from dojo.models import Test from dojo.tools.brakeman.parser import BrakemanParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestBrakemanParser(DojoTestCase): def test_parse_file_no_finding(self): - with open("unittests/scans/brakeman/no_finding.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("brakeman") / "no_finding.json", encoding="utf-8") as testfile: parser = BrakemanParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_file_has_two_findings(self): - with open("unittests/scans/brakeman/two_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("brakeman") / "two_findings.json", encoding="utf-8") as testfile: parser = BrakemanParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(2, len(findings)) def test_parse_file_has_many_findings(self): - with open("unittests/scans/brakeman/many_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("brakeman") / "many_findings.json", encoding="utf-8") as testfile: parser = BrakemanParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(18, len(findings)) diff --git a/unittests/tools/test_bugcrowd_parser.py b/unittests/tools/test_bugcrowd_parser.py index 87a3083ffb2..12e0a6b00d0 100644 --- a/unittests/tools/test_bugcrowd_parser.py +++ b/unittests/tools/test_bugcrowd_parser.py @@ -2,13 +2,13 @@ from dojo.models import Test from dojo.tools.bugcrowd.parser import BugCrowdParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestBugCrowdParser(DojoTestCase): def test_parse_file_with_no_vuln_has_no_findings(self): - with open("unittests/scans/bugcrowd/BugCrowd-zero.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("bugcrowd") / "BugCrowd-zero.csv", encoding="utf-8") as testfile: parser = BugCrowdParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -17,7 +17,7 @@ def test_parse_file_with_no_vuln_has_no_findings(self): self.assertEqual(0, len(findings)) def test_parse_file_with_one_vuln_has_one_findings(self): - with open("unittests/scans/bugcrowd/BugCrowd-one.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("bugcrowd") / "BugCrowd-one.csv", encoding="utf-8") as testfile: parser = BugCrowdParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -27,7 +27,7 @@ def test_parse_file_with_one_vuln_has_one_findings(self): self.assertEqual(findings[0].date, datetime(2020, 3, 1, 6, 15, 6, tzinfo=UTC)) def test_parse_file_with_multiple_vuln_has_multiple_finding(self): - with open("unittests/scans/bugcrowd/BugCrowd-many.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("bugcrowd") / "BugCrowd-many.csv", encoding="utf-8") as testfile: parser = BugCrowdParser() findings = parser.get_findings(testfile, Test()) for finding in findings: diff --git a/unittests/tools/test_bundler_audit_parser.py b/unittests/tools/test_bundler_audit_parser.py index 2fa4b4bc31d..66df114691b 100644 --- a/unittests/tools/test_bundler_audit_parser.py +++ b/unittests/tools/test_bundler_audit_parser.py @@ -1,14 +1,12 @@ -from os import path -from pathlib import Path from dojo.models import Test from dojo.tools.bundler_audit.parser import BundlerAuditParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestBundlerAuditParser(DojoTestCase): def test_get_findings(self): - with open(path.join(Path(__file__).parent, "../scans/bundler_audit/bundler-audit_v0.6.1.txt"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("bundler_audit") / "bundler-audit_v0.6.1.txt", encoding="utf-8") as testfile: parser = BundlerAuditParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(2, len(findings)) @@ -30,7 +28,7 @@ def test_get_findings(self): self.assertEqual("2.2.3", finding.component_version) def test_get_findings_version9(self): - with open(path.join(Path(__file__).parent, "../scans/bundler_audit/version_9.0.txt"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("bundler_audit") / "version_9.0.txt", encoding="utf-8") as testfile: parser = BundlerAuditParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(4, len(findings)) diff --git a/unittests/tools/test_burp_api_parser.py b/unittests/tools/test_burp_api_parser.py index af34ecac7af..282ab428e6b 100644 --- a/unittests/tools/test_burp_api_parser.py +++ b/unittests/tools/test_burp_api_parser.py @@ -1,12 +1,12 @@ from dojo.models import Test from dojo.tools.burp_api.parser import BurpApiParser, convert_confidence, convert_severity -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestParser(DojoTestCase): def test_example_report(self): - testfile = get_unit_tests_path() + "/scans/burp_api/example.json" + testfile = get_unit_tests_scans_path("burp_api") / "example.json" with open(testfile, encoding="utf-8") as f: parser = BurpApiParser() findings = parser.get_findings(f, Test()) @@ -24,7 +24,7 @@ def test_example_report(self): self.assertIsNotNone(item.impact) def test_validate_more(self): - testfile = get_unit_tests_path() + "/scans/burp_api/many_vulns.json" + testfile = get_unit_tests_scans_path("burp_api") / "many_vulns.json" with open(testfile, encoding="utf-8") as f: parser = BurpApiParser() findings = parser.get_findings(f, Test()) @@ -61,7 +61,7 @@ def test_convert_confidence(self): self.assertIsNone(convert_confidence({})) def test_fix_issue_9128(self): - testfile = get_unit_tests_path() + "/scans/burp_api/fix_issue_9128.json" + testfile = get_unit_tests_scans_path("burp_api") / "fix_issue_9128.json" with open(testfile, encoding="utf-8") as f: parser = BurpApiParser() findings = parser.get_findings(f, Test()) diff --git a/unittests/tools/test_burp_dastardly_parser.py b/unittests/tools/test_burp_dastardly_parser.py index 8bf959b23c4..2972acfd2ee 100644 --- a/unittests/tools/test_burp_dastardly_parser.py +++ b/unittests/tools/test_burp_dastardly_parser.py @@ -1,15 +1,13 @@ -from os import path -from pathlib import Path from dojo.models import Test from dojo.tools.burp_dastardly.parser import BurpDastardlyParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestBurpParser(DojoTestCase): def test_burp_dastardly_multiple_findings(self): - with open(path.join(Path(__file__).parent, "../scans/burp_dastardly/many_findings.xml"), encoding="utf-8") as test_file: + with open(get_unit_tests_scans_path("burp_dastardly") / "many_findings.xml", encoding="utf-8") as test_file: parser = BurpDastardlyParser() findings = parser.get_findings(test_file, Test()) for finding in findings: diff --git a/unittests/tools/test_burp_enterprise_parser.py b/unittests/tools/test_burp_enterprise_parser.py index cc0ce744e0e..bf493dacc1b 100644 --- a/unittests/tools/test_burp_enterprise_parser.py +++ b/unittests/tools/test_burp_enterprise_parser.py @@ -1,15 +1,13 @@ -from os import path -from pathlib import Path from dojo.models import Test from dojo.tools.burp_enterprise.parser import BurpEnterpriseParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestBurpEnterpriseParser(DojoTestCase): def test_burp_enterprise_with_multiple_vulns(self): - with open(path.join(Path(__file__).parent, "../scans/burp_enterprise/many_vulns.html"), encoding="utf-8") as test_file: + with open(get_unit_tests_scans_path("burp_enterprise") / "many_vulns.html", encoding="utf-8") as test_file: parser = BurpEnterpriseParser() findings = parser.get_findings(test_file, Test()) for finding in findings: @@ -38,7 +36,7 @@ def test_burp_enterprise_with_multiple_vulns(self): self.assertIn("**Issue detail**:\nFingerprint Details:\n\nWAF Type : redacted\nWAF tech. details : Cloud-based CDN, WAF & DDoS prevention", finding.description) def test_burp_enterprise_with_multiple_vulns_newer_format(self): - with open(path.join(Path(__file__).parent, "../scans/burp_enterprise/many_vulns_updated_format.html"), encoding="utf-8") as test_file: + with open(get_unit_tests_scans_path("burp_enterprise") / "many_vulns_updated_format.html", encoding="utf-8") as test_file: parser = BurpEnterpriseParser() findings = parser.get_findings(test_file, Test()) for finding in findings: diff --git a/unittests/tools/test_burp_graphql_parser.py b/unittests/tools/test_burp_graphql_parser.py index e163c52fead..bfa5e057eca 100644 --- a/unittests/tools/test_burp_graphql_parser.py +++ b/unittests/tools/test_burp_graphql_parser.py @@ -1,15 +1,13 @@ -from os import path -from pathlib import Path from dojo.models import Test from dojo.tools.burp_graphql.parser import BurpGraphQLParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestBurpGraphQLParser(DojoTestCase): def test_burp_one_finding(self): - with open(path.join(Path(__file__).parent, "../scans/burp_graphql/one_finding.json"), encoding="utf-8") as test_file: + with open(get_unit_tests_scans_path("burp_graphql") / "one_finding.json", encoding="utf-8") as test_file: parser = BurpGraphQLParser() findings = parser.get_findings(test_file, Test()) for finding in findings: @@ -34,7 +32,7 @@ def test_burp_one_finding(self): self.assertIn("CWE-79", findings[0].references) def test_burp_two_findings(self): - with open(path.join(Path(__file__).parent, "../scans/burp_graphql/two_findings.json"), encoding="utf-8") as test_file: + with open(get_unit_tests_scans_path("burp_graphql") / "two_findings.json", encoding="utf-8") as test_file: parser = BurpGraphQLParser() findings = parser.get_findings(test_file, Test()) for finding in findings: @@ -50,27 +48,27 @@ def test_burp_two_findings(self): self.assertIn("description 3", findings[1].description) def test_burp_no_findings(self): - with open(path.join(Path(__file__).parent, "../scans/burp_graphql/no_findings.json"), encoding="utf-8") as test_file: + with open(get_unit_tests_scans_path("burp_graphql") / "no_findings.json", encoding="utf-8") as test_file: parser = BurpGraphQLParser() findings = parser.get_findings(test_file, Test()) self.assertEqual(0, len(findings)) def test_burp_null_title(self): - with open(path.join(Path(__file__).parent, "../scans/burp_graphql/null_title.json"), encoding="utf-8") as test_file: + with open(get_unit_tests_scans_path("burp_graphql") / "null_title.json", encoding="utf-8") as test_file: with self.assertRaises(ValueError): parser = BurpGraphQLParser() parser.get_findings(test_file, Test()) def test_burp_null_request_segments(self): - with open(path.join(Path(__file__).parent, "../scans/burp_graphql/null_request_segments.json"), encoding="utf-8") as test_file: + with open(get_unit_tests_scans_path("burp_graphql") / "null_request_segments.json", encoding="utf-8") as test_file: parser = BurpGraphQLParser() findings = parser.get_findings(test_file, Test()) self.assertEqual(1, len(findings)) def test_burp_null_data(self): - with open(path.join(Path(__file__).parent, "../scans/burp_graphql/null_data.json"), encoding="utf-8") as test_file: + with open(get_unit_tests_scans_path("burp_graphql") / "null_data.json", encoding="utf-8") as test_file: parser = BurpGraphQLParser() findings = parser.get_findings(test_file, Test()) for finding in findings: diff --git a/unittests/tools/test_burp_parser.py b/unittests/tools/test_burp_parser.py index a75ea4f9148..8145ea86e1e 100644 --- a/unittests/tools/test_burp_parser.py +++ b/unittests/tools/test_burp_parser.py @@ -1,15 +1,13 @@ -from os import path -from pathlib import Path from dojo.models import Test from dojo.tools.burp.parser import BurpParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestBurpParser(DojoTestCase): def test_burp_with_one_vuln_has_one_finding(self): - with open(path.join(Path(__file__).parent, "../scans/burp/one_finding.xml"), encoding="utf-8") as test_file: + with open(get_unit_tests_scans_path("burp") / "one_finding.xml", encoding="utf-8") as test_file: parser = BurpParser() findings = parser.get_findings(test_file, Test()) for finding in findings: @@ -21,7 +19,7 @@ def test_burp_with_one_vuln_has_one_finding(self): self.assertEqual(3, len(findings[0].unsaved_endpoints)) def test_burp_with_multiple_vulns_has_multiple_findings(self): - with open(path.join(Path(__file__).parent, "../scans/burp/seven_findings.xml"), encoding="utf-8") as test_file: + with open(get_unit_tests_scans_path("burp") / "seven_findings.xml", encoding="utf-8") as test_file: parser = BurpParser() findings = parser.get_findings(test_file, Test()) for finding in findings: @@ -35,7 +33,7 @@ def test_burp_with_multiple_vulns_has_multiple_findings(self): self.assertEqual("Frameable response (potential Clickjacking)", finding.title) def test_burp_with_one_vuln_with_blank_response(self): - with open(path.join(Path(__file__).parent, "../scans/burp/one_finding_with_blank_response.xml"), encoding="utf-8") as test_file: + with open(get_unit_tests_scans_path("burp") / "one_finding_with_blank_response.xml", encoding="utf-8") as test_file: parser = BurpParser() findings = parser.get_findings(test_file, Test()) for finding in findings: @@ -51,7 +49,7 @@ def test_burp_with_one_vuln_with_blank_response(self): self.assertEqual("High", findings[0].severity) def test_burp_with_one_vuln_with_cwe(self): - with open(path.join(Path(__file__).parent, "../scans/burp/one_finding_with_cwe.xml"), encoding="utf-8") as test_file: + with open(get_unit_tests_scans_path("burp") / "one_finding_with_cwe.xml", encoding="utf-8") as test_file: parser = BurpParser() findings = parser.get_findings(test_file, Test()) for finding in findings: @@ -67,7 +65,7 @@ def test_burp_with_one_vuln_with_cwe(self): self.assertEqual("Info", findings[0].severity) def test_burp_issue4399(self): - with open(path.join(Path(__file__).parent, "../scans/burp/issue4399.xml"), encoding="utf-8") as test_file: + with open(get_unit_tests_scans_path("burp") / "issue4399.xml", encoding="utf-8") as test_file: parser = BurpParser() findings = parser.get_findings(test_file, Test()) for finding in findings: diff --git a/unittests/tools/test_cargo_audit_parser.py b/unittests/tools/test_cargo_audit_parser.py index d8c4ac7fd22..556aab0b9e3 100644 --- a/unittests/tools/test_cargo_audit_parser.py +++ b/unittests/tools/test_cargo_audit_parser.py @@ -1,18 +1,18 @@ from dojo.models import Test from dojo.tools.cargo_audit.parser import CargoAuditParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestCargoAuditParser(DojoTestCase): def test_parse_no_findings(self): - with open("unittests/scans/cargo_audit/no_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("cargo_audit") / "no_findings.json", encoding="utf-8") as testfile: parser = CargoAuditParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_many_findings(self): - with open("unittests/scans/cargo_audit/many_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("cargo_audit") / "many_findings.json", encoding="utf-8") as testfile: parser = CargoAuditParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(4, len(findings)) diff --git a/unittests/tools/test_checkmarx_cxflow_sast_parser.py b/unittests/tools/test_checkmarx_cxflow_sast_parser.py index 7481002e3d2..cc85078f01c 100644 --- a/unittests/tools/test_checkmarx_cxflow_sast_parser.py +++ b/unittests/tools/test_checkmarx_cxflow_sast_parser.py @@ -2,7 +2,7 @@ from dojo.models import Engagement, Product, Test from dojo.tools.checkmarx_cxflow_sast.parser import CheckmarxCXFlowSastParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestCheckmarxCxflowSast(DojoTestCase): @@ -18,7 +18,7 @@ def init(self, reportFilename): def test_file_name_aggregated_parse_file_with_no_vulnerabilities_has_no_findings(self): my_file_handle, _, _, test = self.init( - get_unit_tests_path() + "/scans/checkmarx_cxflow_sast/no_finding.json", + get_unit_tests_scans_path("checkmarx_cxflow_sast") / "no_finding.json", ) parser = CheckmarxCXFlowSastParser() findings = parser.get_findings(my_file_handle, test) @@ -26,7 +26,7 @@ def test_file_name_aggregated_parse_file_with_no_vulnerabilities_has_no_findings def test_file_name_aggregated_parse_file_with_no_vulnerabilities_has_1_finding(self): my_file_handle, _, _, test = self.init( - get_unit_tests_path() + "/scans/checkmarx_cxflow_sast/1-finding.json", + get_unit_tests_scans_path("checkmarx_cxflow_sast") / "1-finding.json", ) parser = CheckmarxCXFlowSastParser() findings = parser.get_findings(my_file_handle, test) @@ -55,7 +55,7 @@ def test_file_name_aggregated_parse_file_with_no_vulnerabilities_has_1_finding(s def test_file_name_aggregated_parse_file_with_no_vulnerabilities_has_4_findings(self): my_file_handle, _, _, test = self.init( - get_unit_tests_path() + "/scans/checkmarx_cxflow_sast/4-findings.json", + get_unit_tests_scans_path("checkmarx_cxflow_sast") / "4-findings.json", ) parser = CheckmarxCXFlowSastParser() findings = parser.get_findings(my_file_handle, test) diff --git a/unittests/tools/test_checkmarx_one_parser.py b/unittests/tools/test_checkmarx_one_parser.py index 2c1efcce117..0039c09db11 100644 --- a/unittests/tools/test_checkmarx_one_parser.py +++ b/unittests/tools/test_checkmarx_one_parser.py @@ -3,7 +3,7 @@ from dojo.models import Test from dojo.tools.checkmarx_one.parser import CheckmarxOneParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path logger = logging.getLogger(__name__) @@ -11,7 +11,7 @@ class TestCheckmarxOneParser(DojoTestCase): def test_checkmarx_one_many_vulns(self): - with open("unittests/scans/checkmarx_one/checkmarx_one.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("checkmarx_one") / "checkmarx_one.json", encoding="utf-8") as testfile: parser = CheckmarxOneParser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -29,13 +29,13 @@ def test_checkmarx_one_many_vulns(self): self.assertEqual("/src/helpers/Constants.ts", finding_test.file_path) def test_checkmarx_one_no_findings(self): - with open("unittests/scans/checkmarx_one/no_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("checkmarx_one") / "no_findings.json", encoding="utf-8") as testfile: parser = CheckmarxOneParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_checkmarx_one_many_findings(self): - with open("unittests/scans/checkmarx_one/many_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("checkmarx_one") / "many_findings.json", encoding="utf-8") as testfile: parser = CheckmarxOneParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(6, len(findings)) @@ -52,7 +52,7 @@ def test_checkmarx_one_many_findings(self): self.assertEqual("/qe/testharness/Dockerfile", finding_test.file_path) def test_checkmarx_one_sca_10770(self): - with open("unittests/scans/checkmarx_one/checkmarx_one_sca_10770.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("checkmarx_one") / "checkmarx_one_sca_10770.json", encoding="utf-8") as testfile: parser = CheckmarxOneParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(8, len(findings)) @@ -69,7 +69,7 @@ def test_checkmarx_one_sca_10770(self): self.assertEqual(89, finding_test.cwe) def test_checkmarx_one_no_description(self): - with open("unittests/scans/checkmarx_one/checkmarx_one_format_two.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("checkmarx_one") / "checkmarx_one_format_two.json", encoding="utf-8") as testfile: parser = CheckmarxOneParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -135,7 +135,7 @@ def test_sca_finding(finding): # Not implemented yet pass - with open("unittests/scans/checkmarx_one/vulnerabilities_from_scan_results.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("checkmarx_one") / "vulnerabilities_from_scan_results.json", encoding="utf-8") as testfile: parser = CheckmarxOneParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(146, len(findings)) diff --git a/unittests/tools/test_checkmarx_osa_parser.py b/unittests/tools/test_checkmarx_osa_parser.py index 74592b51246..3a696f7a4af 100644 --- a/unittests/tools/test_checkmarx_osa_parser.py +++ b/unittests/tools/test_checkmarx_osa_parser.py @@ -2,7 +2,7 @@ from dojo.models import Engagement, Product, Test from dojo.tools.checkmarx_osa.parser import CheckmarxOsaParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestCheckmarxOsaParser(DojoTestCase): @@ -28,7 +28,7 @@ def test_checkmarx_osa_parse_file_with_no_vulnerabilities_has_no_findings( self, ): my_file_handle, _product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/checkmarx_osa/no_finding.json", + get_unit_tests_scans_path("checkmarx_osa") / "no_finding.json", ) parser = CheckmarxOsaParser() findings = parser.get_findings(my_file_handle, test) @@ -42,7 +42,7 @@ def test_checkmarx_osa_parse_file_with_single_vulnerability_has_single_finding( self, ): my_file_handle, _product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/checkmarx_osa/single_finding.json", + get_unit_tests_scans_path("checkmarx_osa") / "single_finding.json", ) parser = CheckmarxOsaParser() findings = parser.get_findings(my_file_handle, test) @@ -94,7 +94,7 @@ def test_checkmarx_osa_parse_file_with_false_positive_is_false_positive( self, ): my_file_handle, _product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/checkmarx_osa/single_finding_false_positive.json", + get_unit_tests_scans_path("checkmarx_osa") / "single_finding_false_positive.json", ) parser = CheckmarxOsaParser() findings = parser.get_findings(my_file_handle, test) @@ -115,7 +115,7 @@ def test_checkmarx_osa_parse_file_with_confirmed_is_verified( self, ): my_file_handle, _product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/checkmarx_osa/single_finding_confirmed.json", + get_unit_tests_scans_path("checkmarx_osa") / "single_finding_confirmed.json", ) parser = CheckmarxOsaParser() findings = parser.get_findings(my_file_handle, test) @@ -136,7 +136,7 @@ def test_checkmarx_osa_parse_file_with_multiple_findings( self, ): my_file_handle, _product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/checkmarx_osa/multiple_findings.json", + get_unit_tests_scans_path("checkmarx_osa") / "multiple_findings.json", ) parser = CheckmarxOsaParser() findings = parser.get_findings(my_file_handle, test) @@ -150,7 +150,7 @@ def test_checkmarx_osa_parse_file_with_no_score( self, ): my_file_handle, _product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/checkmarx_osa/single_finding_no_score.json", + get_unit_tests_scans_path("checkmarx_osa") / "single_finding_no_score.json", ) parser = CheckmarxOsaParser() findings = parser.get_findings(my_file_handle, test) @@ -166,7 +166,7 @@ def test_checkmarx_osa_parse_file_with_no_url( self, ): my_file_handle, _product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/checkmarx_osa/single_finding_no_url.json", + get_unit_tests_scans_path("checkmarx_osa") / "single_finding_no_url.json", ) parser = CheckmarxOsaParser() findings = parser.get_findings(my_file_handle, test) @@ -183,7 +183,7 @@ def test_checkmarx_osa_parse_file_with_no_libraryId_raises_ValueError( ): with self.assertRaises(ValueError) as context: my_file_handle, _product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/checkmarx_osa/single_finding_no_libraryId.json", + get_unit_tests_scans_path("checkmarx_osa") / "single_finding_no_libraryId.json", ) with my_file_handle: parser = CheckmarxOsaParser() diff --git a/unittests/tools/test_checkmarx_parser.py b/unittests/tools/test_checkmarx_parser.py index 322b28faa3b..50d830936e9 100644 --- a/unittests/tools/test_checkmarx_parser.py +++ b/unittests/tools/test_checkmarx_parser.py @@ -4,7 +4,7 @@ from dojo.models import Engagement, Product, Test from dojo.tools.checkmarx.parser import CheckmarxParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestCheckmarxParser(DojoTestCase): @@ -30,7 +30,7 @@ def teardown(self, my_file_handle): @patch("dojo.tools.checkmarx.parser.add_language") def test_file_name_aggregated_parse_file_with_no_vulnerabilities_has_no_findings(self, mock): my_file_handle, _product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/checkmarx/no_finding.xml", + get_unit_tests_scans_path("checkmarx") / "no_finding.xml", ) parser = CheckmarxParser() findings = parser.get_findings(my_file_handle, test) @@ -41,7 +41,7 @@ def test_file_name_aggregated_parse_file_with_no_vulnerabilities_has_no_findings def test_detailed_parse_file_with_no_vulnerabilities_has_no_findings(self, mock): """Checkmarx detailed scanner, with all vulnerabilities from checkmarx""" my_file_handle, _product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/checkmarx/no_finding.xml", + get_unit_tests_scans_path("checkmarx") / "no_finding.xml", ) parser = CheckmarxParser() parser.set_mode("detailed") @@ -52,7 +52,7 @@ def test_detailed_parse_file_with_no_vulnerabilities_has_no_findings(self, mock) @patch("dojo.tools.checkmarx.parser.add_language") def test_file_name_aggregated_parse_file_with_single_vulnerability_has_single_finding(self, mock): my_file_handle, product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/checkmarx/single_finding.xml", + get_unit_tests_scans_path("checkmarx") / "single_finding.xml", ) parser = CheckmarxParser() findings = parser.get_findings(my_file_handle, test) @@ -80,7 +80,7 @@ def test_file_name_aggregated_parse_file_with_single_vulnerability_has_single_fi @patch("dojo.tools.checkmarx.parser.add_language") def test_detailed_parse_file_with_single_vulnerability_has_single_finding(self, mock): my_file_handle, product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/checkmarx/single_finding.xml", + get_unit_tests_scans_path("checkmarx") / "single_finding.xml", ) parser = CheckmarxParser() parser.set_mode("detailed") @@ -214,7 +214,7 @@ def check_parse_file_with_single_vulnerability_has_single_finding(self, findings @patch("dojo.tools.checkmarx.parser.add_language") def test_file_name_aggregated_parse_file_with_false_positive_is_false_positive(self, mock): my_file_handle, product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/checkmarx/single_finding_false_positive.xml", + get_unit_tests_scans_path("checkmarx") / "single_finding_false_positive.xml", ) parser = CheckmarxParser() findings = parser.get_findings(my_file_handle, test) @@ -226,7 +226,7 @@ def test_file_name_aggregated_parse_file_with_false_positive_is_false_positive(s @patch("dojo.tools.checkmarx.parser.add_language") def test_detailed_parse_file_with_false_positive_is_false_positive(self, mock): my_file_handle, product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/checkmarx/single_finding_false_positive.xml", + get_unit_tests_scans_path("checkmarx") / "single_finding_false_positive.xml", ) parser = CheckmarxParser() parser.set_mode("detailed") @@ -255,7 +255,7 @@ def check_parse_file_with_false_positive_is_false_positive(self, findings): @patch("dojo.tools.checkmarx.parser.add_language") def test_file_name_aggregated_parse_file_with_two_aggregated_findings_one_is_false_p(self, mock): my_file_handle, product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/checkmarx/two_aggregated_findings_one_is_false_positive.xml", + get_unit_tests_scans_path("checkmarx") / "two_aggregated_findings_one_is_false_positive.xml", ) parser = CheckmarxParser() findings = parser.get_findings(my_file_handle, test) @@ -281,7 +281,7 @@ def test_file_name_aggregated_parse_file_with_two_aggregated_findings_one_is_fal @patch("dojo.tools.checkmarx.parser.add_language") def test_file_name_aggregated_parse_file_with_multiple_vulnerabilities_has_multiple_findings(self, mock): my_file_handle, product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/checkmarx/multiple_findings.xml", + get_unit_tests_scans_path("checkmarx") / "multiple_findings.xml", ) parser = CheckmarxParser() findings = parser.get_findings(my_file_handle, test) @@ -300,7 +300,7 @@ def test_file_name_aggregated_parse_file_with_multiple_vulnerabilities_has_multi @patch("dojo.tools.checkmarx.parser.add_language") def test_detailed_parse_file_with_multiple_vulnerabilities_has_multiple_findings(self, mock): my_file_handle, product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/checkmarx/multiple_findings.xml", + get_unit_tests_scans_path("checkmarx") / "multiple_findings.xml", ) parser = CheckmarxParser() parser.set_mode("detailed") @@ -323,7 +323,7 @@ def test_detailed_parse_file_with_multiple_vulnerabilities_has_multiple_findings @patch("dojo.tools.checkmarx.parser.add_language") def test_file_name_aggregated_parse_file_with_different_sourceFilename_same_sinkFilename_is_aggregated(self, mock): my_file_handle, product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/checkmarx/multiple_findings_different_sourceFilename_same_sinkFilename.xml", + get_unit_tests_scans_path("checkmarx") / "multiple_findings_different_sourceFilename_same_sinkFilename.xml", ) parser = CheckmarxParser() findings = parser.get_findings(my_file_handle, test) @@ -337,7 +337,7 @@ def test_file_name_aggregated_parse_file_with_different_sourceFilename_same_sink @patch("dojo.tools.checkmarx.parser.add_language") def test_detailed_parse_file_with_different_sourceFilename_same_sinkFilename_is_not_aggregated(self, mock): my_file_handle, product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/checkmarx/multiple_findings_different_sourceFilename_same_sinkFilename.xml", + get_unit_tests_scans_path("checkmarx") / "multiple_findings_different_sourceFilename_same_sinkFilename.xml", ) parser = CheckmarxParser() parser.set_mode("detailed") @@ -354,7 +354,7 @@ def test_detailed_parse_file_with_different_sourceFilename_same_sinkFilename_is_ @patch("dojo.tools.checkmarx.parser.add_language") def test_file_name_aggregated_parse_file_with_same_sourceFilename_different_sinkFilename_is_not_aggregated(self, mock): my_file_handle, product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/checkmarx/multiple_findings_same_sourceFilename_different_sinkFilename.xml", + get_unit_tests_scans_path("checkmarx") / "multiple_findings_same_sourceFilename_different_sinkFilename.xml", ) parser = CheckmarxParser() findings = parser.get_findings(my_file_handle, test) @@ -366,7 +366,7 @@ def test_file_name_aggregated_parse_file_with_same_sourceFilename_different_sink @patch("dojo.tools.checkmarx.parser.add_language") def test_detailed_parse_file_with_same_sourceFilename_different_sinkFilename_is_not_aggregated(self, mock): my_file_handle, product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/checkmarx/multiple_findings_same_sourceFilename_different_sinkFilename.xml", + get_unit_tests_scans_path("checkmarx") / "multiple_findings_same_sourceFilename_different_sinkFilename.xml", ) parser = CheckmarxParser() parser.set_mode("detailed") @@ -381,7 +381,7 @@ def test_detailed_parse_file_with_same_sourceFilename_different_sinkFilename_is_ @patch("dojo.tools.checkmarx.parser.add_language") def test_file_name_aggregated_parse_file_with_utf8_replacement_char(self, mock): my_file_handle, product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/checkmarx/utf8_replacement_char.xml", + get_unit_tests_scans_path("checkmarx") / "utf8_replacement_char.xml", ) parser = CheckmarxParser() findings = parser.get_findings(my_file_handle, test) @@ -409,7 +409,7 @@ def test_file_name_aggregated_parse_file_with_utf8_replacement_char(self, mock): @patch("dojo.tools.checkmarx.parser.add_language") def test_detailed_parse_file_with_utf8_replacement_char(self, mock): my_file_handle, product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/checkmarx/utf8_replacement_char.xml", + get_unit_tests_scans_path("checkmarx") / "utf8_replacement_char.xml", ) parser = CheckmarxParser() parser.set_mode("detailed") @@ -527,7 +527,7 @@ def check_parse_file_with_utf8_replacement_char(self, findings): @patch("dojo.tools.checkmarx.parser.add_language") def test_file_name_aggregated_parse_file_with_utf8_various_non_ascii_char(self, mock): my_file_handle, product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/checkmarx/utf8_various_non_ascii_char.xml", + get_unit_tests_scans_path("checkmarx") / "utf8_various_non_ascii_char.xml", ) parser = CheckmarxParser() findings = parser.get_findings(my_file_handle, test) @@ -555,7 +555,7 @@ def test_file_name_aggregated_parse_file_with_utf8_various_non_ascii_char(self, @patch("dojo.tools.checkmarx.parser.add_language") def test_detailed_parse_file_with_utf8_various_non_ascii_char(self, mock): my_file_handle, product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/checkmarx/utf8_various_non_ascii_char.xml", + get_unit_tests_scans_path("checkmarx") / "utf8_various_non_ascii_char.xml", ) parser = CheckmarxParser() parser.set_mode("detailed") @@ -673,7 +673,7 @@ def check_parse_file_with_utf8_various_non_ascii_char(self, findings): @patch("dojo.tools.checkmarx.parser.add_language") def test_file_with_multiple_findings_is_aggregated_with_query_id(self, mock): my_file_handle, product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/checkmarx/multiple_findings_same_query_id.xml", + get_unit_tests_scans_path("checkmarx") / "multiple_findings_same_query_id.xml", ) parser = CheckmarxParser() findings = parser.get_findings(my_file_handle, test) @@ -693,7 +693,7 @@ def test_file_with_multiple_findings_is_aggregated_with_query_id(self, mock): @patch("dojo.tools.checkmarx.parser.add_language") def test_file_with_empty_filename(self, mock): my_file_handle, product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/checkmarx/single_no_filename.xml", + get_unit_tests_scans_path("checkmarx") / "single_no_filename.xml", ) parser = CheckmarxParser() findings = parser.get_findings(my_file_handle, test) @@ -713,7 +713,7 @@ def test_file_with_empty_filename(self, mock): @patch("dojo.tools.checkmarx.parser.add_language") def test_file_with_many_aggregated_findings(self, mock): my_file_handle, _product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/checkmarx/many_aggregated_findings.xml", + get_unit_tests_scans_path("checkmarx") / "many_aggregated_findings.xml", ) parser = CheckmarxParser() findings = parser.get_findings(my_file_handle, test) @@ -730,7 +730,7 @@ def test_file_with_many_aggregated_findings(self, mock): @patch("dojo.tools.checkmarx.parser.add_language") def test_file_with_many_findings_json(self, mock): my_file_handle, _product, _engagement, _test = self.init( - get_unit_tests_path() + "/scans/checkmarx/multiple_findings.json", + get_unit_tests_scans_path("checkmarx") / "multiple_findings.json", ) parser = CheckmarxParser() findings = parser.get_findings(my_file_handle, Test()) @@ -764,7 +764,7 @@ def test_file_with_many_findings_json(self, mock): @patch("dojo.tools.checkmarx.parser.add_language") def test_file_issue6956(self, mock): my_file_handle, _product, _engagement, _test = self.init( - get_unit_tests_path() + "/scans/checkmarx/sample_report.json", + get_unit_tests_scans_path("checkmarx") / "sample_report.json", ) parser = CheckmarxParser() findings = parser.get_findings(my_file_handle, Test()) @@ -827,7 +827,7 @@ def test_file_issue6956(self, mock): @patch("dojo.tools.checkmarx.parser.add_language") def test_finding_date_should_be_date_xml(self, mock): my_file_handle, _product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/checkmarx/single_finding.xml", + get_unit_tests_scans_path("checkmarx") / "single_finding.xml", ) parser = CheckmarxParser() parser.set_mode("detailed") @@ -838,7 +838,7 @@ def test_finding_date_should_be_date_xml(self, mock): @patch("dojo.tools.checkmarx.parser.add_language") def test_finding_date_should_be_date_json(self, mock): my_file_handle, _product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/checkmarx/multiple_findings.json", + get_unit_tests_scans_path("checkmarx") / "multiple_findings.json", ) parser = CheckmarxParser() parser.set_mode("detailed") diff --git a/unittests/tools/test_checkov_parser.py b/unittests/tools/test_checkov_parser.py index 9e4cd58cbdb..5980c4ac83a 100644 --- a/unittests/tools/test_checkov_parser.py +++ b/unittests/tools/test_checkov_parser.py @@ -1,36 +1,36 @@ from dojo.models import Test from dojo.tools.checkov.parser import CheckovParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestCheckovParser(DojoTestCase): def test_parse_file_with_no_vuln_has_no_findings(self): - with open("unittests/scans/checkov/checkov-report-0-vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("checkov") / "checkov-report-0-vuln.json", encoding="utf-8") as testfile: parser = CheckovParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_file_with_no_vuln_has_no_findings_v2(self): - with open("unittests/scans/checkov/checkov2-report-0-vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("checkov") / "checkov2-report-0-vuln.json", encoding="utf-8") as testfile: parser = CheckovParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_file_with_one_vuln_has_one_finding(self): - with open("unittests/scans/checkov/checkov-report-1-vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("checkov") / "checkov-report-1-vuln.json", encoding="utf-8") as testfile: parser = CheckovParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) def test_parse_file_with_multiple_vuln_has_multiple_findings(self): - with open("unittests/scans/checkov/checkov-report-many-vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("checkov") / "checkov-report-many-vuln.json", encoding="utf-8") as testfile: parser = CheckovParser() findings = parser.get_findings(testfile, Test()) self.assertGreater(len(findings), 2) def test_parse_file_with_multiple_check_type_has_multiple_check_type(self): - with open("unittests/scans/checkov/checkov-report-multiple-check_type.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("checkov") / "checkov-report-multiple-check_type.json", encoding="utf-8") as testfile: parser = CheckovParser() findings = parser.get_findings(testfile, Test()) @@ -80,7 +80,7 @@ def test_parse_file_with_multiple_check_type_has_multiple_check_type(self): ) def test_parse_file_with_specified_severity(self): - with open("unittests/scans/checkov/checkov-report-severity.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("checkov") / "checkov-report-severity.json", encoding="utf-8") as testfile: parser = CheckovParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(4, len(findings)) diff --git a/unittests/tools/test_chefinspect_parser.py b/unittests/tools/test_chefinspect_parser.py index 65aa6262810..ba4e5a4e21c 100644 --- a/unittests/tools/test_chefinspect_parser.py +++ b/unittests/tools/test_chefinspect_parser.py @@ -1,24 +1,24 @@ from dojo.models import Test from dojo.tools.chefinspect.parser import ChefInspectParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestChefInspectParser(DojoTestCase): def test_parse_file_with_no_vuln_has_no_findings(self): - with open("unittests/scans/chefinspect/no_finding.log", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("chefinspect") / "no_finding.log", encoding="utf-8") as testfile: parser = ChefInspectParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_file_with_one_vuln_has_one_finding(self): - with open("unittests/scans/chefinspect/one_finding.log", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("chefinspect") / "one_finding.log", encoding="utf-8") as testfile: parser = ChefInspectParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) def test_parse_file_with_multiple_vuln_has_multiple_findings(self): - with open("unittests/scans/chefinspect/many_findings.log", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("chefinspect") / "many_findings.log", encoding="utf-8") as testfile: parser = ChefInspectParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(10, len(findings)) diff --git a/unittests/tools/test_clair_parser.py b/unittests/tools/test_clair_parser.py index 858215fab6f..2712d1cbc4d 100644 --- a/unittests/tools/test_clair_parser.py +++ b/unittests/tools/test_clair_parser.py @@ -1,25 +1,25 @@ from dojo.tools.clair.parser import ClairParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestClairParser(DojoTestCase): def test_no_findings_clair(self): - my_file_handle = open("unittests/scans/clair/clair_empty.json", encoding="utf-8") + my_file_handle = open(get_unit_tests_scans_path("clair") / "clair_empty.json", encoding="utf-8") parser = ClairParser() findings = parser.get_findings(my_file_handle, None) my_file_handle.close() self.assertEqual(0, len(findings)) def test_few_findings_clair(self): - my_file_handle = open("unittests/scans/clair/clair_few_vuln.json", encoding="utf-8") + my_file_handle = open(get_unit_tests_scans_path("clair") / "clair_few_vuln.json", encoding="utf-8") parser = ClairParser() findings = parser.get_findings(my_file_handle, None) my_file_handle.close() self.assertEqual(4, len(findings)) def test_many_findings_clair(self): - my_file_handle = open("unittests/scans/clair/clair_many_vul.json", encoding="utf-8") + my_file_handle = open(get_unit_tests_scans_path("clair") / "clair_many_vul.json", encoding="utf-8") parser = ClairParser() findings = parser.get_findings(my_file_handle, None) my_file_handle.close() @@ -32,21 +32,21 @@ def test_many_findings_clair(self): self.assertEqual("CVE-2018-20839", finding.unsaved_vulnerability_ids[0]) def test_parse_no_content_no_findings_clairklar(self): - my_file_handle = open("unittests/scans/clair/clairklar_empty.json", encoding="utf-8") + my_file_handle = open(get_unit_tests_scans_path("clair") / "clairklar_empty.json", encoding="utf-8") parser = ClairParser() findings = parser.get_findings(my_file_handle, None) my_file_handle.close() self.assertEqual(0, len(findings)) def test_high_findings_clairklar(self): - my_file_handle = open("unittests/scans/clair/clairklar_high.json", encoding="utf-8") + my_file_handle = open(get_unit_tests_scans_path("clair") / "clairklar_high.json", encoding="utf-8") parser = ClairParser() findings = parser.get_findings(my_file_handle, None) my_file_handle.close() self.assertEqual(6, len(findings)) def test_mixed_findings_clairklar(self): - my_file_handle = open("unittests/scans/clair/clairklar_mixed.json", encoding="utf-8") + my_file_handle = open(get_unit_tests_scans_path("clair") / "clairklar_mixed.json", encoding="utf-8") parser = ClairParser() findings = parser.get_findings(my_file_handle, None) my_file_handle.close() diff --git a/unittests/tools/test_cloudsploit_parser.py b/unittests/tools/test_cloudsploit_parser.py index a919b03ff9e..885b017ee4d 100644 --- a/unittests/tools/test_cloudsploit_parser.py +++ b/unittests/tools/test_cloudsploit_parser.py @@ -1,26 +1,26 @@ from dojo.models import Test from dojo.tools.cloudsploit.parser import CloudsploitParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestCloudsploitParser(DojoTestCase): def test_cloudsploit_parser_with_no_vuln_has_no_findings(self): - testfile = open("unittests/scans/cloudsploit/cloudsploit_zero_vul.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("cloudsploit") / "cloudsploit_zero_vul.json", encoding="utf-8") parser = CloudsploitParser() findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual(0, len(findings)) def test_cloudsploit_parser_with_one_criticle_vuln_has_one_findings(self): - testfile = open("unittests/scans/cloudsploit/cloudsploit_one_vul.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("cloudsploit") / "cloudsploit_one_vul.json", encoding="utf-8") parser = CloudsploitParser() findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual(1, len(findings)) def test_cloudsploit_parser_with_many_vuln_has_many_findings(self): - testfile = open("unittests/scans/cloudsploit/cloudsploit_many_vul.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("cloudsploit") / "cloudsploit_many_vul.json", encoding="utf-8") parser = CloudsploitParser() findings = parser.get_findings(testfile, Test()) testfile.close() diff --git a/unittests/tools/test_cobalt_parser.py b/unittests/tools/test_cobalt_parser.py index ad93c3a83ed..bfec5fe50ea 100644 --- a/unittests/tools/test_cobalt_parser.py +++ b/unittests/tools/test_cobalt_parser.py @@ -1,25 +1,25 @@ from dojo.models import Test from dojo.tools.cobalt.parser import CobaltParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestCobaltParser(DojoTestCase): def test_parse_file_with_no_vuln_has_no_findings(self): - with open("unittests/scans/cobalt/cobalt_no_vuln.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("cobalt") / "cobalt_no_vuln.csv", encoding="utf-8") as testfile: parser = CobaltParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_file_with_one_vuln_has_one_findings(self): - with open("unittests/scans/cobalt/cobalt_one_vuln.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("cobalt") / "cobalt_one_vuln.csv", encoding="utf-8") as testfile: parser = CobaltParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) def test_parse_file_with_multiple_vuln_has_multiple_finding(self): - with open("unittests/scans/cobalt/cobalt_many_vuln.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("cobalt") / "cobalt_many_vuln.csv", encoding="utf-8") as testfile: parser = CobaltParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(9, len(findings)) diff --git a/unittests/tools/test_codechecker_parser.py b/unittests/tools/test_codechecker_parser.py index 4f81bca4570..ff6c7125e4e 100644 --- a/unittests/tools/test_codechecker_parser.py +++ b/unittests/tools/test_codechecker_parser.py @@ -1,13 +1,13 @@ from dojo.models import Test from dojo.tools.codechecker.parser import CodeCheckerParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestCodeCheckerParser(DojoTestCase): def test_parse_file_with_no_vuln_has_no_findings(self): with open( - get_unit_tests_path() + "/scans/codechecker/cc-report-0-vuln.json", encoding="utf-8", + get_unit_tests_scans_path("codechecker") / "cc-report-0-vuln.json", encoding="utf-8", ) as testfile: parser = CodeCheckerParser() findings = parser.get_findings(testfile, Test()) @@ -15,7 +15,7 @@ def test_parse_file_with_no_vuln_has_no_findings(self): def test_parse_file_with_one_vuln_has_one_finding(self): with open( - get_unit_tests_path() + "/scans/codechecker/cc-report-1-vuln.json", encoding="utf-8", + get_unit_tests_scans_path("codechecker") / "cc-report-1-vuln.json", encoding="utf-8", ) as testfile: parser = CodeCheckerParser() findings = parser.get_findings(testfile, Test()) @@ -33,7 +33,7 @@ def test_parse_file_with_one_vuln_has_one_finding(self): def test_parse_file_with_multiple_vuln_has_multiple_findings(self): with open( - get_unit_tests_path() + "/scans/codechecker/cc-report-many-vuln.json", encoding="utf-8", + get_unit_tests_scans_path("codechecker") / "cc-report-many-vuln.json", encoding="utf-8", ) as testfile: parser = CodeCheckerParser() findings = parser.get_findings(testfile, Test()) @@ -60,7 +60,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_findings(self): def test_parse_file_with_various_review_statuses(self): with open( - get_unit_tests_path() + "/scans/codechecker/cc-report-review-status.json", encoding="utf-8", + get_unit_tests_scans_path("codechecker") / "cc-report-review-status.json", encoding="utf-8", ) as testfile: parser = CodeCheckerParser() findings = parser.get_findings(testfile, Test()) diff --git a/unittests/tools/test_contrast_parser.py b/unittests/tools/test_contrast_parser.py index 479da4d4735..421ec7d294b 100644 --- a/unittests/tools/test_contrast_parser.py +++ b/unittests/tools/test_contrast_parser.py @@ -2,7 +2,7 @@ from dojo.models import Engagement, Product, Test from dojo.tools.contrast.parser import ContrastParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestContrastParser(DojoTestCase): @@ -11,7 +11,7 @@ def test_example_report(self): test = Test() test.engagement = Engagement() test.engagement.product = Product() - with open("unittests/scans/contrast/contrast-node-goat.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("contrast") / "contrast-node-goat.csv", encoding="utf-8") as testfile: parser = ContrastParser() findings = parser.get_findings(testfile, test) for finding in findings: @@ -56,7 +56,7 @@ def test_example2_report(self): test = Test() test.engagement = Engagement() test.engagement.product = Product() - with open("unittests/scans/contrast/vulnerabilities2020-09-21.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("contrast") / "vulnerabilities2020-09-21.csv", encoding="utf-8") as testfile: parser = ContrastParser() findings = parser.get_findings(testfile, test) for finding in findings: diff --git a/unittests/tools/test_coverity_api_parser.py b/unittests/tools/test_coverity_api_parser.py index f6f468cfa0c..9be4e0d1251 100644 --- a/unittests/tools/test_coverity_api_parser.py +++ b/unittests/tools/test_coverity_api_parser.py @@ -2,31 +2,31 @@ from dojo.models import Test from dojo.tools.coverity_api.parser import CoverityApiParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestZapParser(DojoTestCase): def test_parse_wrong_file(self): with self.assertRaises(ValueError): - with open("unittests/scans/coverity_api/wrong.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("coverity_api") / "wrong.json", encoding="utf-8") as testfile: parser = CoverityApiParser() parser.get_findings(testfile, Test()) def test_parse_no_findings(self): - with open("unittests/scans/coverity_api/empty.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("coverity_api") / "empty.json", encoding="utf-8") as testfile: parser = CoverityApiParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_only_quality(self): """This report only have quality findings""" - with open("unittests/scans/coverity_api/only_quality.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("coverity_api") / "only_quality.json", encoding="utf-8") as testfile: parser = CoverityApiParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_some_findings(self): - with open("unittests/scans/coverity_api/few_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("coverity_api") / "few_findings.json", encoding="utf-8") as testfile: parser = CoverityApiParser() findings = parser.get_findings(testfile, Test()) self.assertIsInstance(findings, list) @@ -43,7 +43,7 @@ def test_parse_some_findings(self): self.assertEqual(22463, finding.unique_id_from_tool) def test_parse_few_findings_triaged_as_bug(self): - with open("unittests/scans/coverity_api/few_findings_triaged_as_bug.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("coverity_api") / "few_findings_triaged_as_bug.json", encoding="utf-8") as testfile: parser = CoverityApiParser() findings = parser.get_findings(testfile, Test()) self.assertIsInstance(findings, list) @@ -60,7 +60,7 @@ def test_parse_few_findings_triaged_as_bug(self): self.assertEqual(22248, finding.unique_id_from_tool) def test_parse_some_findings_mitigated(self): - with open("unittests/scans/coverity_api/few_findings_mitigated.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("coverity_api") / "few_findings_mitigated.json", encoding="utf-8") as testfile: parser = CoverityApiParser() findings = parser.get_findings(testfile, Test()) self.assertIsInstance(findings, list) diff --git a/unittests/tools/test_coverity_scan_parser.py b/unittests/tools/test_coverity_scan_parser.py index c3720884e87..c5bcb59442e 100644 --- a/unittests/tools/test_coverity_scan_parser.py +++ b/unittests/tools/test_coverity_scan_parser.py @@ -1,20 +1,20 @@ from dojo.models import Test from dojo.tools.coverity_scan.parser import CoverityScanParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path -SCANS_PATH = "unittests/scans/coverity_scan/" +SCANS_PATH = get_unit_tests_scans_path("coverity_scan") class TestCoverityScanParser(DojoTestCase): def test_parse_no_findings(self): - with open(f"{SCANS_PATH}/no_vuln.json", encoding="utf-8") as testfile: + with open(SCANS_PATH / "no_vuln.json", encoding="utf-8") as testfile: parser = CoverityScanParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_one_finding(self): - with open(f"{SCANS_PATH}/one_vuln.json", encoding="utf-8") as testfile: + with open(SCANS_PATH / "one_vuln.json", encoding="utf-8") as testfile: parser = CoverityScanParser() findings = parser.get_findings(testfile, Test()) @@ -31,7 +31,7 @@ def test_parse_one_finding(self): ) def test_parse_many_findings(self): - with open(f"{SCANS_PATH}/many_vulns.json", encoding="utf-8") as testfile: + with open(SCANS_PATH / "many_vulns.json", encoding="utf-8") as testfile: parser = CoverityScanParser() findings = parser.get_findings(testfile, Test()) diff --git a/unittests/tools/test_crashtest_security_parser.py b/unittests/tools/test_crashtest_security_parser.py index 88aa859cad6..b708ab60901 100644 --- a/unittests/tools/test_crashtest_security_parser.py +++ b/unittests/tools/test_crashtest_security_parser.py @@ -1,18 +1,18 @@ from dojo.models import Test from dojo.tools.crashtest_security.parser import CrashtestSecurityParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestCrashtestSecurityParser(DojoTestCase): def test_crashtest_security_json_parser_empty_file_has_no_findings(self): - testfile = open("unittests/scans/crashtest_security/empty.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("crashtest_security") / "empty.json", encoding="utf-8") parser = CrashtestSecurityParser() findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual(0, len(findings)) def test_crashtest_security_json_parser_full_file_has_many_findings(self): - testfile = open("unittests/scans/crashtest_security/full.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("crashtest_security") / "full.json", encoding="utf-8") parser = CrashtestSecurityParser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -25,7 +25,7 @@ def test_crashtest_security_json_parser_full_file_has_many_findings(self): def test_crashtest_security_json_parser_extracted_data_file_has_many_findings(self): testfile = open( - get_unit_tests_path() + "/scans/crashtest_security/data_extracted.json", encoding="utf-8", + get_unit_tests_scans_path("crashtest_security") / "data_extracted.json", encoding="utf-8", ) parser = CrashtestSecurityParser() findings = parser.get_findings(testfile, Test()) diff --git a/unittests/tools/test_cred_scan_parser.py b/unittests/tools/test_cred_scan_parser.py index a913e591ed7..8f5f976b37b 100644 --- a/unittests/tools/test_cred_scan_parser.py +++ b/unittests/tools/test_cred_scan_parser.py @@ -2,19 +2,19 @@ from dojo.models import Test from dojo.tools.cred_scan.parser import CredScanParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestCredScanParser(DojoTestCase): def test_parse_file_with_no_vuln_has_no_findings(self): - with open("unittests/scans/cred_scan/cred_scan_no_vuln.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("cred_scan") / "cred_scan_no_vuln.csv", encoding="utf-8") as testfile: parser = CredScanParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_file_with_one_vuln_has_one_findings(self): - with open("unittests/scans/cred_scan/cred_scan_one_vuln.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("cred_scan") / "cred_scan_one_vuln.csv", encoding="utf-8") as testfile: parser = CredScanParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -25,7 +25,7 @@ def test_parse_file_with_one_vuln_has_one_findings(self): self.assertEqual(datetime.date(2021, 4, 10), datetime.datetime.date(finding.date)) def test_parse_file_with_multiple_vuln_has_multiple_finding(self): - with open("unittests/scans/cred_scan/cred_scan_many_vuln.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("cred_scan") / "cred_scan_many_vuln.csv", encoding="utf-8") as testfile: parser = CredScanParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(3, len(findings)) diff --git a/unittests/tools/test_crunch42_parser.py b/unittests/tools/test_crunch42_parser.py index 79565e95023..099813fad61 100644 --- a/unittests/tools/test_crunch42_parser.py +++ b/unittests/tools/test_crunch42_parser.py @@ -1,12 +1,12 @@ from dojo.models import Test from dojo.tools.crunch42.parser import Crunch42Parser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestCrunch42Parser(DojoTestCase): def test_crunch42parser_single_has_many_findings(self): - with open("unittests/scans/crunch42/crunch42_many_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("crunch42") / "crunch42_many_findings.json", encoding="utf-8") as testfile: parser = Crunch42Parser() findings = parser.get_findings(testfile, Test()) self.assertEqual(8, len(findings)) @@ -18,7 +18,7 @@ def test_crunch42parser_single_has_many_findings(self): self.assertGreater(len(finding.description), 0) def test_crunch42parser_single_has_many_findings2(self): - with open("unittests/scans/crunch42/crunch42_many_findings2.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("crunch42") / "crunch42_many_findings2.json", encoding="utf-8") as testfile: parser = Crunch42Parser() findings = parser.get_findings(testfile, Test()) self.assertEqual(5, len(findings)) diff --git a/unittests/tools/test_cyclonedx_parser.py b/unittests/tools/test_cyclonedx_parser.py index 65e377496fe..0816b61e8fa 100644 --- a/unittests/tools/test_cyclonedx_parser.py +++ b/unittests/tools/test_cyclonedx_parser.py @@ -2,12 +2,12 @@ from dojo.models import Finding, Test from dojo.tools.cyclonedx.parser import CycloneDXParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestCyclonedxParser(DojoTestCase): def test_grype_report(self): - with open("unittests/scans/cyclonedx/grype_dd_1_14_1.xml", encoding="utf-8") as file: + with open(get_unit_tests_scans_path("cyclonedx") / "grype_dd_1_14_1.xml", encoding="utf-8") as file: parser = CycloneDXParser() findings = list(parser.get_findings(file, Test())) for finding in findings: @@ -31,7 +31,7 @@ def test_grype_report(self): def test_spec1_report(self): """Test a report from the spec itself""" - with open("unittests/scans/cyclonedx/spec1.xml", encoding="utf-8") as file: + with open(get_unit_tests_scans_path("cyclonedx") / "spec1.xml", encoding="utf-8") as file: parser = CycloneDXParser() findings = list(parser.get_findings(file, Test())) for finding in findings: @@ -54,7 +54,7 @@ def test_spec1_report(self): def test_spec1_report_low_first(self): """Test a report from the spec itself""" - with open("unittests/scans/cyclonedx/spec1_lowfirst.xml", encoding="utf-8") as file: + with open(get_unit_tests_scans_path("cyclonedx") / "spec1_lowfirst.xml", encoding="utf-8") as file: parser = CycloneDXParser() findings = list(parser.get_findings(file, Test())) for finding in findings: @@ -74,7 +74,7 @@ def test_spec1_report_low_first(self): self.assertEqual("Upgrade\n", finding.mitigation) def test_cyclonedx_bom_report(self): - with open("unittests/scans/cyclonedx/cyclonedx_bom.xml", encoding="utf-8") as file: + with open(get_unit_tests_scans_path("cyclonedx") / "cyclonedx_bom.xml", encoding="utf-8") as file: parser = CycloneDXParser() findings = parser.get_findings(file, Test()) for finding in findings: @@ -83,7 +83,7 @@ def test_cyclonedx_bom_report(self): def test_cyclonedx_jake_report(self): """Test a report generated by Jake""" - with open("unittests/scans/cyclonedx/jake.xml", encoding="utf-8") as file: + with open(get_unit_tests_scans_path("cyclonedx") / "jake.xml", encoding="utf-8") as file: parser = CycloneDXParser() findings = parser.get_findings(file, Test()) for finding in findings: @@ -92,7 +92,7 @@ def test_cyclonedx_jake_report(self): def test_cyclonedx_retirejs_report(self): """Test a report generated by RetireJS""" - with open("unittests/scans/cyclonedx/retirejs.latest.xml", encoding="utf-8") as file: + with open(get_unit_tests_scans_path("cyclonedx") / "retirejs.latest.xml", encoding="utf-8") as file: parser = CycloneDXParser() findings = parser.get_findings(file, Test()) for finding in findings: @@ -101,7 +101,7 @@ def test_cyclonedx_retirejs_report(self): def test_cyclonedx_grype_11_report(self): """Test a report generated by Grype 0.11""" - with open("unittests/scans/cyclonedx/dd_1_15_0.xml", encoding="utf-8") as file: + with open(get_unit_tests_scans_path("cyclonedx") / "dd_1_15_0.xml", encoding="utf-8") as file: parser = CycloneDXParser() findings = parser.get_findings(file, Test()) for finding in findings: @@ -142,7 +142,7 @@ def test_cyclonedx_grype_11_report(self): def test_cyclonedx_1_4_xml(self): """CycloneDX version 1.4 XML format""" - with open("unittests/scans/cyclonedx/valid-vulnerability-1.4.xml", encoding="utf-8") as file: + with open(get_unit_tests_scans_path("cyclonedx") / "valid-vulnerability-1.4.xml", encoding="utf-8") as file: parser = CycloneDXParser() findings = parser.get_findings(file, Test()) for finding in findings: @@ -188,7 +188,7 @@ def test_cyclonedx_1_4_xml(self): def test_cyclonedx_1_4_json(self): """CycloneDX version 1.4 JSON format""" - with open("unittests/scans/cyclonedx/valid-vulnerability-1.4.json", encoding="utf-8") as file: + with open(get_unit_tests_scans_path("cyclonedx") / "valid-vulnerability-1.4.json", encoding="utf-8") as file: parser = CycloneDXParser() findings = parser.get_findings(file, Test()) for finding in findings: @@ -232,7 +232,7 @@ def test_cyclonedx_1_4_json(self): def test_cyclonedx_1_4_jake_json(self): """CycloneDX version 1.4 JSON format produced by jake 1.4.1""" - with open("unittests/scans/cyclonedx/jake2.json", encoding="utf-8") as file: + with open(get_unit_tests_scans_path("cyclonedx") / "jake2.json", encoding="utf-8") as file: parser = CycloneDXParser() findings = parser.get_findings(file, Test()) self.assertEqual(7, len(findings)) @@ -286,7 +286,7 @@ def test_cyclonedx_1_4_jake_json(self): def test_cyclonedx_1_4_xml_cvssv31(self): """CycloneDX version 1.4 XML format""" - with open("unittests/scans/cyclonedx/log4j.xml", encoding="utf-8") as file: + with open(get_unit_tests_scans_path("cyclonedx") / "log4j.xml", encoding="utf-8") as file: parser = CycloneDXParser() findings = parser.get_findings(file, Test()) for finding in findings: @@ -303,7 +303,7 @@ def test_cyclonedx_1_4_xml_cvssv31(self): def test_cyclonedx_1_4_json_cvssv31(self): """CycloneDX version 1.4 JSON format""" - with open("unittests/scans/cyclonedx/log4j.json", encoding="utf-8") as file: + with open(get_unit_tests_scans_path("cyclonedx") / "log4j.json", encoding="utf-8") as file: parser = CycloneDXParser() findings = parser.get_findings(file, Test()) for finding in findings: @@ -320,7 +320,7 @@ def test_cyclonedx_1_4_json_cvssv31(self): def test_cyclonedx_1_4_json_nested_cvssv31(self): """CycloneDX version 1.4 JSON format""" - with open("unittests/scans/cyclonedx/nested-component-log4j.json", encoding="utf-8") as file: + with open(get_unit_tests_scans_path("cyclonedx") / "nested-component-log4j.json", encoding="utf-8") as file: parser = CycloneDXParser() findings = parser.get_findings(file, Test()) for finding in findings: @@ -337,7 +337,7 @@ def test_cyclonedx_1_4_json_nested_cvssv31(self): def test_cyclonedx_issue_9277(self): """CycloneDX version 1.5 JSON format""" - with open("unittests/scans/cyclonedx/issue_9277.json", encoding="utf-8") as file: + with open(get_unit_tests_scans_path("cyclonedx") / "issue_9277.json", encoding="utf-8") as file: parser = CycloneDXParser() findings = parser.get_findings(file, Test()) for finding in findings: @@ -350,7 +350,7 @@ def test_cyclonedx_issue_9277(self): def test_cyclonedx_issue_8022(self): """CycloneDX version 1.4 JSON format""" - with open("unittests/scans/cyclonedx/issue_8022.json", encoding="utf-8") as file: + with open(get_unit_tests_scans_path("cyclonedx") / "issue_8022.json", encoding="utf-8") as file: parser = CycloneDXParser() findings = parser.get_findings(file, Test()) for finding in findings: diff --git a/unittests/tools/test_dawnscanner_parser.py b/unittests/tools/test_dawnscanner_parser.py index 5b7a161cc99..3956137d868 100644 --- a/unittests/tools/test_dawnscanner_parser.py +++ b/unittests/tools/test_dawnscanner_parser.py @@ -1,15 +1,13 @@ import datetime -from os import path -from pathlib import Path from dojo.models import Test from dojo.tools.dawnscanner.parser import DawnScannerParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestDawnScannerParser(DojoTestCase): def test_burp_with_one_vuln_has_one_finding(self): - with open(path.join(Path(__file__).parent, "../scans/dawnscanner/dawnscanner_v1.6.9.json"), encoding="utf-8") as test_file: + with open(get_unit_tests_scans_path("dawnscanner") / "dawnscanner_v1.6.9.json", encoding="utf-8") as test_file: parser = DawnScannerParser() findings = parser.get_findings(test_file, Test()) for finding in findings: diff --git a/unittests/tools/test_deepfence_threatmapper_parser.py b/unittests/tools/test_deepfence_threatmapper_parser.py index e4e6070dfe5..e97b9ce2af5 100644 --- a/unittests/tools/test_deepfence_threatmapper_parser.py +++ b/unittests/tools/test_deepfence_threatmapper_parser.py @@ -1,12 +1,12 @@ from dojo.models import Test from dojo.tools.deepfence_threatmapper.parser import DeepfenceThreatmapperParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestDeepfenceThreatmapperParser(DojoTestCase): def test_parse_file_compliance_report(self): - with open("unittests/scans/deepfence_threatmapper/compliance_report.xlsx", "rb") as testfile: + with open(get_unit_tests_scans_path("deepfence_threatmapper") / "compliance_report.xlsx", "rb") as testfile: parser = DeepfenceThreatmapperParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(7, len(findings)) @@ -14,7 +14,7 @@ def test_parse_file_compliance_report(self): self.assertEqual(findings[0].severity, "Info") def test_parse_file_malware_report(self): - with open("unittests/scans/deepfence_threatmapper/malware_report.xlsx", "rb") as testfile: + with open(get_unit_tests_scans_path("deepfence_threatmapper") / "malware_report.xlsx", "rb") as testfile: parser = DeepfenceThreatmapperParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(9, len(findings)) @@ -23,7 +23,7 @@ def test_parse_file_malware_report(self): self.assertEqual(findings[0].file_path, "/tmp/Deepfence/YaraHunter/df_db09257b02e615049e0aecc05be2dc2401735e67db4ab74225df777c62c39753/usr/sbin/mkfs.cramfs") def test_parse_file_secret_report(self): - with open("unittests/scans/deepfence_threatmapper/secret_report.xlsx", "rb") as testfile: + with open(get_unit_tests_scans_path("deepfence_threatmapper") / "secret_report.xlsx", "rb") as testfile: parser = DeepfenceThreatmapperParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(7, len(findings)) @@ -32,7 +32,7 @@ def test_parse_file_secret_report(self): self.assertEqual(findings[0].file_path, "usr/share/doc/curl-8.3.0/TheArtOfHttpScripting.md") def test_parse_file_vulnerability_report(self): - with open("unittests/scans/deepfence_threatmapper/vulnerability_report.xlsx", "rb") as testfile: + with open(get_unit_tests_scans_path("deepfence_threatmapper") / "vulnerability_report.xlsx", "rb") as testfile: parser = DeepfenceThreatmapperParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(3, len(findings)) diff --git a/unittests/tools/test_dependency_check_parser.py b/unittests/tools/test_dependency_check_parser.py index 4ad945a39cc..14306334a45 100644 --- a/unittests/tools/test_dependency_check_parser.py +++ b/unittests/tools/test_dependency_check_parser.py @@ -1,13 +1,11 @@ import logging from datetime import UTC, datetime -from os import path -from pathlib import Path from dateutil.tz import tzlocal, tzoffset from dojo.models import Test from dojo.tools.dependency_check.parser import DependencyCheckParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path logger = logging.getLogger(__name__) @@ -23,13 +21,13 @@ def __init__(self, name, content): class TestDependencyCheckParser(DojoTestCase): def test_parse_empty_file(self): - with open("unittests/scans/dependency_check/single_dependency_with_related_no_vulnerability.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("dependency_check") / "single_dependency_with_related_no_vulnerability.xml", encoding="utf-8") as testfile: parser = DependencyCheckParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_file_with_single_vulnerability_has_single_finding(self): - with open("unittests/scans/dependency_check/single_vuln.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("dependency_check") / "single_vuln.xml", encoding="utf-8") as testfile: parser = DependencyCheckParser() findings = parser.get_findings(testfile, Test()) items = findings @@ -47,14 +45,14 @@ def test_parse_file_with_single_vulnerability_has_single_finding(self): self.assertEqual(items[i].date, datetime(2016, 11, 5, 14, 52, 15, 748000, tzinfo=tzoffset(None, -14400))) def test_parse_file_with_single_dependency_with_related_no_vulnerability(self): - with open("unittests/scans/dependency_check/single_dependency_with_related_no_vulnerability.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("dependency_check") / "single_dependency_with_related_no_vulnerability.xml", encoding="utf-8") as testfile: parser = DependencyCheckParser() findings = parser.get_findings(testfile, Test()) items = findings self.assertEqual(0, len(items)) def test_parse_file_with_multiple_vulnerabilities_has_multiple_findings(self): - with open("unittests/scans/dependency_check/multiple_vulnerabilities_has_multiple_findings.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("dependency_check") / "multiple_vulnerabilities_has_multiple_findings.xml", encoding="utf-8") as testfile: parser = DependencyCheckParser() findings = parser.get_findings(testfile, Test()) items = findings @@ -256,7 +254,7 @@ def test_parse_file_with_multiple_vulnerabilities_has_multiple_findings(self): def test_parse_java_6_5_3(self): """Test with version 6.5.3""" - with open(path.join(Path(__file__).parent, "../scans/dependency_check/version-6.5.3.xml"), encoding="utf-8") as test_file: + with open(get_unit_tests_scans_path("dependency_check") / "version-6.5.3.xml", encoding="utf-8") as test_file: parser = DependencyCheckParser() findings = parser.get_findings(test_file, Test()) items = findings @@ -275,7 +273,7 @@ def test_parse_java_6_5_3(self): self.assertEqual(items[i].date, datetime(2022, 1, 15, 14, 31, 13, 42600, tzinfo=UTC)) def test_parse_file_pr6439(self): - with open("unittests/scans/dependency_check/PR6439.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("dependency_check") / "PR6439.xml", encoding="utf-8") as testfile: parser = DependencyCheckParser() findings = parser.get_findings(testfile, Test()) items = findings diff --git a/unittests/tools/test_dependency_track_parser.py b/unittests/tools/test_dependency_track_parser.py index 60db4cedc35..48fad97a536 100644 --- a/unittests/tools/test_dependency_track_parser.py +++ b/unittests/tools/test_dependency_track_parser.py @@ -1,13 +1,13 @@ from dojo.models import Test from dojo.tools.dependency_track.parser import DependencyTrackParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestDependencyTrackParser(DojoTestCase): def test_dependency_track_parser_with_empty_list_for_findings_key_has_no_findings(self): with open( - get_unit_tests_path() + "/scans/dependency_track/no_findings_because_findings_key_is_empty_list.json", encoding="utf-8", + get_unit_tests_scans_path("dependency_track") / "no_findings_because_findings_key_is_empty_list.json", encoding="utf-8", ) as testfile: parser = DependencyTrackParser() findings = parser.get_findings(testfile, Test()) @@ -15,7 +15,7 @@ def test_dependency_track_parser_with_empty_list_for_findings_key_has_no_finding def test_dependency_track_parser_with_missing_findings_key_has_no_findings(self): with open( - get_unit_tests_path() + "/scans/dependency_track/no_findings_because_findings_key_is_missing.json", encoding="utf-8", + get_unit_tests_scans_path("dependency_track") / "no_findings_because_findings_key_is_missing.json", encoding="utf-8", ) as testfile: parser = DependencyTrackParser() findings = parser.get_findings(testfile, Test()) @@ -23,7 +23,7 @@ def test_dependency_track_parser_with_missing_findings_key_has_no_findings(self) def test_dependency_track_parser_with_null_findings_key_has_no_findings(self): with open( - get_unit_tests_path() + "/scans/dependency_track/no_findings_because_findings_key_is_null.json", encoding="utf-8", + get_unit_tests_scans_path("dependency_track") / "no_findings_because_findings_key_is_null.json", encoding="utf-8", ) as testfile: parser = DependencyTrackParser() findings = parser.get_findings(testfile, Test()) @@ -31,7 +31,7 @@ def test_dependency_track_parser_with_null_findings_key_has_no_findings(self): def test_dependency_track_parser_has_many_findings(self): with open( - get_unit_tests_path() + "/scans/dependency_track/many_findings.json", encoding="utf-8", + get_unit_tests_scans_path("dependency_track") / "many_findings.json", encoding="utf-8", ) as testfile: parser = DependencyTrackParser() findings = parser.get_findings(testfile, Test()) @@ -49,7 +49,7 @@ def test_dependency_track_parser_has_many_findings(self): def test_dependency_track_parser_has_one_finding(self): with open( - get_unit_tests_path() + "/scans/dependency_track/one_finding.json", encoding="utf-8", + get_unit_tests_scans_path("dependency_track") / "one_finding.json", encoding="utf-8", ) as testfile: parser = DependencyTrackParser() findings = parser.get_findings(testfile, Test()) @@ -57,7 +57,7 @@ def test_dependency_track_parser_has_one_finding(self): def test_dependency_track_parser_v3_8_0(self): with open( - get_unit_tests_path() + "/scans/dependency_track/dependency_track_3.8.0_2021-01-18.json", encoding="utf-8", + get_unit_tests_scans_path("dependency_track") / "dependency_track_3.8.0_2021-01-18.json", encoding="utf-8", ) as testfile: parser = DependencyTrackParser() findings = parser.get_findings(testfile, Test()) @@ -67,7 +67,7 @@ def test_dependency_track_parser_v3_8_0(self): def test_dependency_track_parser_findings_with_alias(self): with open( - get_unit_tests_path() + "/scans/dependency_track/many_findings_with_alias.json", encoding="utf-8", + get_unit_tests_scans_path("dependency_track") / "many_findings_with_alias.json", encoding="utf-8", ) as testfile: parser = DependencyTrackParser() findings = parser.get_findings(testfile, Test()) @@ -79,7 +79,7 @@ def test_dependency_track_parser_findings_with_alias(self): def test_dependency_track_parser_findings_with_empty_alias(self): with open( - get_unit_tests_path() + "/scans/dependency_track/many_findings_with_empty_alias.json", encoding="utf-8", + get_unit_tests_scans_path("dependency_track") / "many_findings_with_empty_alias.json", encoding="utf-8", ) as testfile: parser = DependencyTrackParser() findings = parser.get_findings(testfile, Test()) @@ -88,7 +88,7 @@ def test_dependency_track_parser_findings_with_empty_alias(self): self.assertIn("CVE-2022-2053", findings[11].unsaved_vulnerability_ids) def test_dependency_track_parser_findings_with_cvssV3_score(self): - with open(f"{get_unit_tests_path()}/scans/dependency_track/many_findings_with_cvssV3_score.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("dependency_track") / "many_findings_with_cvssV3_score.json", encoding="utf-8") as testfile: parser = DependencyTrackParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(12, len(findings)) @@ -98,7 +98,7 @@ def test_dependency_track_parser_findings_with_cvssV3_score(self): self.assertEqual(8.3, findings[0].cvssv3_score) def test_dependency_track_parser_findings_with_epss_score(self): - with open(f"{get_unit_tests_path()}/scans/dependency_track/dependency_track_4.10_2024_02_11.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("dependency_track") / "dependency_track_4.10_2024_02_11.json", encoding="utf-8") as testfile: parser = DependencyTrackParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) diff --git a/unittests/tools/test_detect_secrets_parser.py b/unittests/tools/test_detect_secrets_parser.py index 421d393f845..b3e6a6ce194 100644 --- a/unittests/tools/test_detect_secrets_parser.py +++ b/unittests/tools/test_detect_secrets_parser.py @@ -4,19 +4,19 @@ from dojo.models import Test from dojo.tools.detect_secrets.parser import DetectSecretsParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestDetectSecretsParser(DojoTestCase): def test_parse_no_findings(self): - with open("unittests/scans/detect_secrets/no_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("detect_secrets") / "no_findings.json", encoding="utf-8") as testfile: parser = DetectSecretsParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_many_findings(self): - with open("unittests/scans/detect_secrets/many_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("detect_secrets") / "many_findings.json", encoding="utf-8") as testfile: parser = DetectSecretsParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(4, len(findings)) diff --git a/unittests/tools/test_dockerbench_parser.py b/unittests/tools/test_dockerbench_parser.py index 22c0d66f28d..c1e8594609a 100644 --- a/unittests/tools/test_dockerbench_parser.py +++ b/unittests/tools/test_dockerbench_parser.py @@ -1,13 +1,13 @@ from dojo.models import Test from dojo.tools.dockerbench.parser import DockerBenchParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestDockerBenchParser(DojoTestCase): def test_parse_file_with_no_vuln_has_no_findings(self): with open( - get_unit_tests_path() + "/scans/dockerbench/docker-bench-report-zero-vulns.json", encoding="utf-8", + get_unit_tests_scans_path("dockerbench") / "docker-bench-report-zero-vulns.json", encoding="utf-8", ) as testfile: parser = DockerBenchParser() findings = parser.get_findings(testfile, Test()) @@ -15,7 +15,7 @@ def test_parse_file_with_no_vuln_has_no_findings(self): def test_parse_file_with_one_vuln_has_one_finding(self): with open( - get_unit_tests_path() + "/scans/dockerbench/docker-bench-report-single-vuln.json", encoding="utf-8", + get_unit_tests_scans_path("dockerbench") / "docker-bench-report-single-vuln.json", encoding="utf-8", ) as testfile: parser = DockerBenchParser() findings = parser.get_findings(testfile, Test()) @@ -29,7 +29,7 @@ def test_parse_file_with_one_vuln_has_one_finding(self): def test_parse_file_with_multiple_vuln_has_multiple_findings(self): with open( - get_unit_tests_path() + "/scans/dockerbench/docker-bench-report-many-vulns.json", encoding="utf-8", + get_unit_tests_scans_path("dockerbench") / "docker-bench-report-many-vulns.json", encoding="utf-8", ) as testfile: parser = DockerBenchParser() findings = parser.get_findings(testfile, Test()) diff --git a/unittests/tools/test_dockle_parser.py b/unittests/tools/test_dockle_parser.py index 314112299c2..b614dd98c43 100644 --- a/unittests/tools/test_dockle_parser.py +++ b/unittests/tools/test_dockle_parser.py @@ -1,18 +1,18 @@ from dojo.models import Test from dojo.tools.dockle.parser import DockleParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestDockleParser(DojoTestCase): def test_parse_no_findings(self): - with open("unittests/scans/dockle/no_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("dockle") / "no_findings.json", encoding="utf-8") as testfile: parser = DockleParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_many_findings(self): - with open("unittests/scans/dockle/many_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("dockle") / "many_findings.json", encoding="utf-8") as testfile: parser = DockleParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(3, len(findings)) diff --git a/unittests/tools/test_drheader_parser.py b/unittests/tools/test_drheader_parser.py index 9eb07664bfd..7f4cbbe095e 100644 --- a/unittests/tools/test_drheader_parser.py +++ b/unittests/tools/test_drheader_parser.py @@ -1,40 +1,40 @@ from dojo.models import Test from dojo.tools.drheader.parser import DrHeaderParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestDrHeaderParser(DojoTestCase): def test_parse_file_has_no_findings(self): - testfile = open("unittests/scans/drheader/no_vulns.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("drheader") / "no_vulns.json", encoding="utf-8") parser = DrHeaderParser() findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual(0, len(findings)) def test_parse_file_has_many_finding_one_tool(self): - testfile = open("unittests/scans/drheader/scan.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("drheader") / "scan.json", encoding="utf-8") parser = DrHeaderParser() findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual(6, len(findings)) def test_parse_file_has_many_finding_one_tool2(self): - testfile = open("unittests/scans/drheader/scan2.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("drheader") / "scan2.json", encoding="utf-8") parser = DrHeaderParser() findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual(6, len(findings)) def test_parse_file_has_many_finding_one_tool3(self): - testfile = open("unittests/scans/drheader/scan3.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("drheader") / "scan3.json", encoding="utf-8") parser = DrHeaderParser() findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual(11, len(findings)) def test_parse_file_has_many_finding_multiple_urls(self): - testfile = open("unittests/scans/drheader/multiple_urls.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("drheader") / "multiple_urls.json", encoding="utf-8") parser = DrHeaderParser() findings = parser.get_findings(testfile, Test()) for finding in findings: diff --git a/unittests/tools/test_dsop_parser.py b/unittests/tools/test_dsop_parser.py index e3a1b8d9848..c22abed8399 100644 --- a/unittests/tools/test_dsop_parser.py +++ b/unittests/tools/test_dsop_parser.py @@ -1,17 +1,17 @@ from dojo.models import Test from dojo.tools.dsop.parser import DsopParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestDsopParser(DojoTestCase): def test_zero_findings(self): - with open("unittests/scans/dsop/zero_vuln.xlsx", "rb") as testfile: + with open(get_unit_tests_scans_path("dsop") / "zero_vuln.xlsx", "rb") as testfile: parser = DsopParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(len(findings), 0) def test_many_findings(self): - with open("unittests/scans/dsop/many_vuln.xlsx", "rb") as testfile: + with open(get_unit_tests_scans_path("dsop") / "many_vuln.xlsx", "rb") as testfile: parser = DsopParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(len(findings), 4) diff --git a/unittests/tools/test_eslint_parser.py b/unittests/tools/test_eslint_parser.py index c1574c574e4..621cd738a1b 100644 --- a/unittests/tools/test_eslint_parser.py +++ b/unittests/tools/test_eslint_parser.py @@ -1,25 +1,25 @@ from dojo.models import Test from dojo.tools.eslint.parser import ESLintParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestESLintParser(DojoTestCase): def test_parse_file_has_two_findings(self): - testfile = open("unittests/scans/eslint/scan.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("eslint") / "scan.json", encoding="utf-8") parser = ESLintParser() findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual(2, len(findings)) def test_parse_empty_file(self): - testfile = open("unittests/scans/eslint/empty.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("eslint") / "empty.json", encoding="utf-8") parser = ESLintParser() findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual(0, len(findings)) def test_parse_file_with_no_finding(self): - testfile = open("unittests/scans/eslint/no_finding.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("eslint") / "no_finding.json", encoding="utf-8") parser = ESLintParser() findings = parser.get_findings(testfile, Test()) testfile.close() diff --git a/unittests/tools/test_fortify_parser.py b/unittests/tools/test_fortify_parser.py index 43ab4a911ec..ad532d7b716 100644 --- a/unittests/tools/test_fortify_parser.py +++ b/unittests/tools/test_fortify_parser.py @@ -1,11 +1,11 @@ from dojo.models import Test from dojo.tools.fortify.parser import FortifyParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestFortifyParser(DojoTestCase): def test_fortify_many_findings(self): - with open("unittests/scans/fortify/fortify_many_findings.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("fortify") / "fortify_many_findings.xml", encoding="utf-8") as testfile: parser = FortifyParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(324, len(findings)) @@ -17,7 +17,7 @@ def test_fortify_many_findings(self): self.assertEqual(81, finding.line) def test_fortify_few_findings(self): - with open("unittests/scans/fortify/fortify_few_findings.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("fortify") / "fortify_few_findings.xml", encoding="utf-8") as testfile: parser = FortifyParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(2, len(findings)) @@ -30,7 +30,7 @@ def test_fortify_few_findings(self): self.assertEqual("53C25D2FC6950554F16D3CEF9E41EF6F", finding.unique_id_from_tool) def test_fortify_few_findings_count_chart(self): - with open("unittests/scans/fortify/fortify_few_findings_count_chart.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("fortify") / "fortify_few_findings_count_chart.xml", encoding="utf-8") as testfile: parser = FortifyParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(3, len(findings)) @@ -43,7 +43,7 @@ def test_fortify_few_findings_count_chart(self): self.assertEqual("53C25D2FC6950554F16D3CEF9E41EF6F", finding.unique_id_from_tool) def test_fortify_issue6260(self): - with open("unittests/scans/fortify/issue6260.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("fortify") / "issue6260.xml", encoding="utf-8") as testfile: parser = FortifyParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(16, len(findings)) @@ -56,7 +56,7 @@ def test_fortify_issue6260(self): self.assertEqual("7A2F1C728BDDBB17C7CB31CEDF5D8F85", finding.unique_id_from_tool) def test_fortify_issue6082(self): - with open("unittests/scans/fortify/issue6082.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("fortify") / "issue6082.xml", encoding="utf-8") as testfile: parser = FortifyParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(2, len(findings)) @@ -76,7 +76,7 @@ def test_fortify_issue6082(self): self.assertEqual("B5B15F27E10F4D7799BD0ED1E6D34C5D", finding.unique_id_from_tool) def test_fortify_many_fdr_findings(self): - with open("unittests/scans/fortify/many_findings.fpr", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("fortify") / "many_findings.fpr", encoding="utf-8") as testfile: parser = FortifyParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(61, len(findings)) diff --git a/unittests/tools/test_gcloud_artifact_scan_parser.py b/unittests/tools/test_gcloud_artifact_scan_parser.py index 88a2b0ec099..8063ce56e3c 100644 --- a/unittests/tools/test_gcloud_artifact_scan_parser.py +++ b/unittests/tools/test_gcloud_artifact_scan_parser.py @@ -1,11 +1,11 @@ from dojo.models import Test from dojo.tools.gcloud_artifact_scan.parser import GCloudArtifactScanParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestGCloudArtifactScanParser(DojoTestCase): def test_parse_file_with_multiple_vuln_has_multiple_findings(self): - with open(f"{get_unit_tests_path()}/scans/gcloud_artifact_scan/many_vulns.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gcloud_artifact_scan") / "many_vulns.json", encoding="utf-8") as testfile: parser = GCloudArtifactScanParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(7, len(findings)) diff --git a/unittests/tools/test_generic_parser.py b/unittests/tools/test_generic_parser.py index b7fce8efed0..6d113e5b679 100644 --- a/unittests/tools/test_generic_parser.py +++ b/unittests/tools/test_generic_parser.py @@ -2,7 +2,7 @@ from dojo.models import Engagement, Finding, Product, Test from dojo.tools.generic.parser import GenericParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestFile: @@ -25,7 +25,7 @@ def setUp(self): self.test = Test(engagement=self.engagement) def test_parse_report1(self): - with open("unittests/scans/generic/generic_report1.csv", encoding="utf-8") as file: + with open(get_unit_tests_scans_path("generic") / "generic_report1.csv", encoding="utf-8") as file: parser = GenericParser() findings = parser.get_findings(file, self.test) for finding in findings: @@ -434,7 +434,7 @@ def test_column_order_is_flexible(self): self.assertEqual(fields1, fields2) def test_parse_json(self): - with open("unittests/scans/generic/generic_report1.json", encoding="utf-8") as file: + with open(get_unit_tests_scans_path("generic") / "generic_report1.json", encoding="utf-8") as file: parser = GenericParser() findings = parser.get_findings(file, Test()) for finding in findings: @@ -465,7 +465,7 @@ def test_parse_json(self): self.assertIn(finding.severity, Finding.SEVERITIES) def test_parse_json2(self): - with open("unittests/scans/generic/generic_report2.json", encoding="utf-8") as file: + with open(get_unit_tests_scans_path("generic") / "generic_report2.json", encoding="utf-8") as file: parser = GenericParser() findings = parser.get_findings(file, Test()) for finding in findings: @@ -488,7 +488,7 @@ def test_parse_json2(self): self.assertEqual("Some mitigation", finding.mitigation) def test_parse_json3(self): - with open("unittests/scans/generic/generic_report3.json", encoding="utf-8") as file: + with open(get_unit_tests_scans_path("generic") / "generic_report3.json", encoding="utf-8") as file: parser = GenericParser() findings = parser.get_findings(file, Test()) self.assertEqual(3, len(findings)) @@ -526,7 +526,7 @@ def test_parse_json3(self): self.assertEqual("test-pest", endpoint.path) def test_parse_endpoints_and_vulnerability_ids_json(self): - with open("unittests/scans/generic/generic_report4.json", encoding="utf-8") as file: + with open(get_unit_tests_scans_path("generic") / "generic_report4.json", encoding="utf-8") as file: parser = GenericParser() findings = parser.get_findings(file, Test()) self.assertEqual(1, len(findings)) @@ -557,7 +557,7 @@ def test_parse_endpoints_and_vulnerability_ids_json(self): self.assertEqual("CVE-2015-9235", finding.unsaved_vulnerability_ids[1]) def test_parse_host_and_vulnerability_id_csv(self): - with open("unittests/scans/generic/generic_report4.csv", encoding="utf-8") as file: + with open(get_unit_tests_scans_path("generic") / "generic_report4.csv", encoding="utf-8") as file: parser = GenericParser() findings = parser.get_findings(file, Test()) self.assertEqual(4, len(findings)) @@ -599,7 +599,7 @@ def test_parse_host_and_vulnerability_id_csv(self): self.assertIsNone(finding.unsaved_vulnerability_ids) def test_parse_json_with_image(self): - with open("unittests/scans/generic/test_with_image.json", encoding="utf-8") as file: + with open(get_unit_tests_scans_path("generic") / "test_with_image.json", encoding="utf-8") as file: parser = GenericParser() findings = parser.get_findings(file, Test()) self.assertEqual(1, len(findings)) @@ -612,7 +612,7 @@ def test_parse_json_with_image(self): self.assertIn("data", image) def test_parse_json_custom_test(self): - with open("unittests/scans/generic/generic_custom_test.json", encoding="utf-8") as file: + with open(get_unit_tests_scans_path("generic") / "generic_custom_test.json", encoding="utf-8") as file: parser = GenericParser() tests = parser.get_tests(parser.get_scan_types()[0], file) self.assertEqual(1, len(tests)) @@ -637,21 +637,21 @@ def test_parse_json_custom_test(self): self.assertEqual("TEST1", finding.vuln_id_from_tool) def test_parse_json_empty_finding(self): - with open("unittests/scans/generic/generic_empty.json", encoding="utf-8") as file: + with open(get_unit_tests_scans_path("generic") / "generic_empty.json", encoding="utf-8") as file: parser = GenericParser() with self.assertRaisesMessage(ValueError, "Required fields are missing: ['description', 'severity', 'title']"): parser.get_findings(file, Test()) def test_parse_json_invalid_finding(self): - with open("unittests/scans/generic/generic_invalid.json", encoding="utf-8") as file: + with open(get_unit_tests_scans_path("generic") / "generic_invalid.json", encoding="utf-8") as file: parser = GenericParser() with self.assertRaisesMessage(ValueError, "Not allowed fields are present: ['invalid_field', 'last_status_update']"): parser.get_findings(file, Test()) def test_parse_csv_with_epss(self): - with open("unittests/scans/generic/generic_csv_with_epss.csv", encoding="utf-8") as file: + with open(get_unit_tests_scans_path("generic") / "generic_csv_with_epss.csv", encoding="utf-8") as file: parser = GenericParser() findings = parser.get_findings(file, self.test) self.assertEqual(1, len(findings)) diff --git a/unittests/tools/test_ggshield_parser.py b/unittests/tools/test_ggshield_parser.py index 27d4e267663..03cdbfd96c9 100644 --- a/unittests/tools/test_ggshield_parser.py +++ b/unittests/tools/test_ggshield_parser.py @@ -1,18 +1,18 @@ from dojo.models import Test from dojo.tools.ggshield.parser import GgshieldParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestGgshieldParser(DojoTestCase): def test_parse_empty(self): - with open("unittests/scans/ggshield/no_finding.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("ggshield") / "no_finding.json", encoding="utf-8") as testfile: parser = GgshieldParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_one_finding(self): - with open("unittests/scans/ggshield/one_finding.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("ggshield") / "one_finding.json", encoding="utf-8") as testfile: parser = GgshieldParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -23,7 +23,7 @@ def test_parse_one_finding(self): self.assertEqual("2021-07-05", finding.date) def test_parse_many_finding(self): - with open("unittests/scans/ggshield/many_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("ggshield") / "many_findings.json", encoding="utf-8") as testfile: parser = GgshieldParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(2, len(findings)) diff --git a/unittests/tools/test_github_vulnerability_parser.py b/unittests/tools/test_github_vulnerability_parser.py index 00321647bf1..5fda63d312b 100644 --- a/unittests/tools/test_github_vulnerability_parser.py +++ b/unittests/tools/test_github_vulnerability_parser.py @@ -4,20 +4,20 @@ from dojo.models import Test from dojo.tools.github_vulnerability.parser import GithubVulnerabilityParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestGithubVulnerabilityParser(DojoTestCase): def test_parse_file_with_no_vuln_has_no_findings(self): """Sample with zero vulnerability""" - with open("unittests/scans/github_vulnerability/github-0-vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("github_vulnerability") / "github-0-vuln.json", encoding="utf-8") as testfile: parser = GithubVulnerabilityParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_file_with_one_vuln_has_one_findings(self): """Sample with one vulnerability""" - with open("unittests/scans/github_vulnerability/github-1-vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("github_vulnerability") / "github-1-vuln.json", encoding="utf-8") as testfile: parser = GithubVulnerabilityParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -37,7 +37,7 @@ def test_parse_file_with_one_vuln_has_one_findings(self): def test_parse_file_with_one_vuln_has_one_finding_and_dependabot_direct_link(self): """Sample with one vulnerability""" - with open("unittests/scans/github_vulnerability/github-1-vuln-repo-dependabot-link.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("github_vulnerability") / "github-1-vuln-repo-dependabot-link.json", encoding="utf-8") as testfile: parser = GithubVulnerabilityParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -57,13 +57,13 @@ def test_parse_file_with_one_vuln_has_one_finding_and_dependabot_direct_link(sel def test_parse_file_with_multiple_vuln_has_multiple_findings(self): """Sample with five vulnerability""" - with open("unittests/scans/github_vulnerability/github-5-vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("github_vulnerability") / "github-5-vuln.json", encoding="utf-8") as testfile: parser = GithubVulnerabilityParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(5, len(findings)) def test_parse_file_issue2984(self): - with open("unittests/scans/github_vulnerability/github_issue2984.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("github_vulnerability") / "github_issue2984.json", encoding="utf-8") as testfile: parser = GithubVulnerabilityParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(4, len(findings)) @@ -87,7 +87,7 @@ def test_parse_file_issue2984(self): self.assertEqual(finding.unique_id_from_tool, "DASFMMFKLNKDSAKFSDLANJKKFDSNJSAKDFNJKDFS=") def test_parse_file_search(self): - with open("unittests/scans/github_vulnerability/github_search.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("github_vulnerability") / "github_search.json", encoding="utf-8") as testfile: parser = GithubVulnerabilityParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(2, len(findings)) @@ -115,7 +115,7 @@ def test_parse_file_search(self): def test_parse_file_search2(self): """Search result with more data/attributes""" - with open("unittests/scans/github_vulnerability/github_search2.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("github_vulnerability") / "github_search2.json", encoding="utf-8") as testfile: parser = GithubVulnerabilityParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(2, len(findings)) @@ -143,7 +143,7 @@ def test_parse_file_search2(self): def test_parse_file_search3(self): """Search result with more data/attributes""" - with open("unittests/scans/github_vulnerability/github_search3.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("github_vulnerability") / "github_search3.json", encoding="utf-8") as testfile: parser = GithubVulnerabilityParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(2, len(findings)) @@ -175,7 +175,7 @@ def test_parse_file_search3(self): def test_parse_file_search4_null_cvss_vector(self): """Search result with more data/attributes""" - with open("unittests/scans/github_vulnerability/github_search4_null_cvss_vector.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("github_vulnerability") / "github_search4_null_cvss_vector.json", encoding="utf-8") as testfile: parser = GithubVulnerabilityParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(2, len(findings)) @@ -206,7 +206,7 @@ def test_parse_file_search4_null_cvss_vector(self): self.assertEqual(finding.unique_id_from_tool, "MDI4OlJlcG9zaXRvcnlWdWxuZXJhYmlsaXR5QWxlcnQ1NTE5NTI2OTM=") def test_parse_cwe_and_date(self): - with open("unittests/scans/github_vulnerability/github_h2.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("github_vulnerability") / "github_h2.json", encoding="utf-8") as testfile: parser = GithubVulnerabilityParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -229,7 +229,7 @@ def test_parse_cwe_and_date(self): self.assertEqual(finding.active, True) def test_parse_state(self): - with open("unittests/scans/github_vulnerability/github_shiro.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("github_vulnerability") / "github_shiro.json", encoding="utf-8") as testfile: parser = GithubVulnerabilityParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -253,7 +253,7 @@ def test_parse_state(self): self.assertEqual(finding.is_mitigated, True) def test_parser_version(self): - with open("unittests/scans/github_vulnerability/github-vuln-version.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("github_vulnerability") / "github-vuln-version.json", encoding="utf-8") as testfile: parser = GithubVulnerabilityParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -268,7 +268,7 @@ def test_parser_version(self): self.assertEqual(finding.component_version, "5.3.29") def test_parse_file_issue_9582(self): - with open("unittests/scans/github_vulnerability/issue_9582.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("github_vulnerability") / "issue_9582.json", encoding="utf-8") as testfile: parser = GithubVulnerabilityParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(2, len(findings)) diff --git a/unittests/tools/test_gitlab_api_fuzzing_parser.py b/unittests/tools/test_gitlab_api_fuzzing_parser.py index 0da1fadde85..376db6f3ea8 100644 --- a/unittests/tools/test_gitlab_api_fuzzing_parser.py +++ b/unittests/tools/test_gitlab_api_fuzzing_parser.py @@ -1,18 +1,18 @@ from dojo.models import Test from dojo.tools.gitlab_api_fuzzing.parser import GitlabAPIFuzzingParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestGitlabAPIFuzzingParser(DojoTestCase): def test_gitlab_api_fuzzing_parser_with_no_vuln_has_no_findings(self): - with open(f"{get_unit_tests_path()}/scans/gitlab_api_fuzzing/gitlab_api_fuzzing_0_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_api_fuzzing") / "gitlab_api_fuzzing_0_vuln.json", encoding="utf-8") as testfile: parser = GitlabAPIFuzzingParser() findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual(0, len(findings)) def test_gitlab_api_fuzzing_parser_with_one_criticle_vuln_has_one_findings_v14(self): - with open(f"{get_unit_tests_path()}/scans/gitlab_api_fuzzing/gitlab_api_fuzzing_1_vuln_v14.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_api_fuzzing") / "gitlab_api_fuzzing_1_vuln_v14.json", encoding="utf-8") as testfile: parser = GitlabAPIFuzzingParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -28,7 +28,7 @@ def test_gitlab_api_fuzzing_parser_with_one_criticle_vuln_has_one_findings_v14(s ) def test_gitlab_api_fuzzing_parser_with_one_criticle_vuln_has_one_findings_v15(self): - with open(f"{get_unit_tests_path()}/scans/gitlab_api_fuzzing/gitlab_api_fuzzing_1_vuln_v15.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_api_fuzzing") / "gitlab_api_fuzzing_1_vuln_v15.json", encoding="utf-8") as testfile: parser = GitlabAPIFuzzingParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -44,7 +44,7 @@ def test_gitlab_api_fuzzing_parser_with_one_criticle_vuln_has_one_findings_v15(s ) def test_gitlab_api_fuzzing_parser_with_invalid_json(self): - with open(f"{get_unit_tests_path()}/scans/gitlab_api_fuzzing/gitlab_api_fuzzing_invalid.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_api_fuzzing") / "gitlab_api_fuzzing_invalid.json", encoding="utf-8") as testfile: # Something is wrong with JSON file with self.assertRaises((KeyError, ValueError)): parser = GitlabAPIFuzzingParser() diff --git a/unittests/tools/test_gitlab_container_scan_parser.py b/unittests/tools/test_gitlab_container_scan_parser.py index 4bc69cd809e..1cb7aad2af1 100644 --- a/unittests/tools/test_gitlab_container_scan_parser.py +++ b/unittests/tools/test_gitlab_container_scan_parser.py @@ -2,18 +2,18 @@ from dojo.models import Test from dojo.tools.gitlab_container_scan.parser import GitlabContainerScanParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestGitlabContainerScanParser(DojoTestCase): def test_gitlab_container_scan_parser_with_no_vuln_has_no_findings(self): - with open("unittests/scans/gitlab_container_scan/gl-container-scanning-report-0-vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_container_scan") / "gl-container-scanning-report-0-vuln.json", encoding="utf-8") as testfile: parser = GitlabContainerScanParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_gitlab_container_scan_parser_with_one_vuln_has_one_findings_v14(self): - with open("unittests/scans/gitlab_container_scan/gl-container-scanning-report-1-vuln_v14.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_container_scan") / "gl-container-scanning-report-1-vuln_v14.json", encoding="utf-8") as testfile: parser = GitlabContainerScanParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -32,7 +32,7 @@ def test_gitlab_container_scan_parser_with_one_vuln_has_one_findings_v14(self): self.assertEqual("df52bc8ce9a2ae56bbcb0c4ecda62123fbd6f69b", first_finding.unique_id_from_tool) def test_gitlab_container_scan_parser_with_one_vuln_has_one_findings_v15(self): - with open("unittests/scans/gitlab_container_scan/gl-container-scanning-report-1-vuln_v15.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_container_scan") / "gl-container-scanning-report-1-vuln_v15.json", encoding="utf-8") as testfile: parser = GitlabContainerScanParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -51,7 +51,7 @@ def test_gitlab_container_scan_parser_with_one_vuln_has_one_findings_v15(self): self.assertEqual("df52bc8ce9a2ae56bbcb0c4ecda62123fbd6f69b", first_finding.unique_id_from_tool) def test_gitlab_container_scan_parser_with_five_vuln_has_five_findings_v14(self): - with open("unittests/scans/gitlab_container_scan/gl-container-scanning-report-5-vuln_v14.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_container_scan") / "gl-container-scanning-report-5-vuln_v14.json", encoding="utf-8") as testfile: parser = GitlabContainerScanParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -60,7 +60,7 @@ def test_gitlab_container_scan_parser_with_five_vuln_has_five_findings_v14(self) self.assertEqual(5, len(findings)) def test_gitlab_container_scan_parser_with_five_vuln_has_five_findings_v15(self): - with open("unittests/scans/gitlab_container_scan/gl-container-scanning-report-5-vuln_v15.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_container_scan") / "gl-container-scanning-report-5-vuln_v15.json", encoding="utf-8") as testfile: parser = GitlabContainerScanParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -69,7 +69,7 @@ def test_gitlab_container_scan_parser_with_five_vuln_has_five_findings_v15(self) self.assertEqual(5, len(findings)) def test_gitlab_container_scan_parser_with_fless_data_v14(self): - with open("unittests/scans/gitlab_container_scan/issue6639_v14.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_container_scan") / "issue6639_v14.json", encoding="utf-8") as testfile: parser = GitlabContainerScanParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -104,7 +104,7 @@ def test_gitlab_container_scan_parser_with_fless_data_v14(self): self.assertEqual("CVE-2022-0778", finding.unique_id_from_tool) def test_gitlab_container_scan_parser_with_fless_data_v15(self): - with open("unittests/scans/gitlab_container_scan/issue6639_v15.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_container_scan") / "issue6639_v15.json", encoding="utf-8") as testfile: parser = GitlabContainerScanParser() findings = parser.get_findings(testfile, Test()) for finding in findings: diff --git a/unittests/tools/test_gitlab_dast_parser.py b/unittests/tools/test_gitlab_dast_parser.py index 01107a3a93a..56c94debd79 100644 --- a/unittests/tools/test_gitlab_dast_parser.py +++ b/unittests/tools/test_gitlab_dast_parser.py @@ -1,17 +1,17 @@ from dojo.models import Test from dojo.tools.gitlab_dast.parser import GitlabDastParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestGitlabDastParser(DojoTestCase): def test_parse_file_with_no_vuln_has_no_findings(self): - with open("unittests/scans/gitlab_dast/gitlab_dast_zero_vul.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_dast") / "gitlab_dast_zero_vul.json", encoding="utf-8") as testfile: parser = GitlabDastParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_file_with_one_vuln_has_one_finding_v14(self): - with open("unittests/scans/gitlab_dast/gitlab_dast_one_vul_v14.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_dast") / "gitlab_dast_one_vul_v14.json", encoding="utf-8") as testfile: parser = GitlabDastParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -39,7 +39,7 @@ def test_parse_file_with_one_vuln_has_one_finding_v14(self): self.assertEqual(359, finding.cwe) def test_parse_file_with_one_vuln_has_one_finding_v15(self): - with open("unittests/scans/gitlab_dast/gitlab_dast_one_vul_v15.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_dast") / "gitlab_dast_one_vul_v15.json", encoding="utf-8") as testfile: parser = GitlabDastParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -67,7 +67,7 @@ def test_parse_file_with_one_vuln_has_one_finding_v15(self): self.assertEqual(359, finding.cwe) def test_parse_file_with_multiple_vuln_has_multiple_findings_v14(self): - with open("unittests/scans/gitlab_dast/gitlab_dast_many_vul_v14.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_dast") / "gitlab_dast_many_vul_v14.json", encoding="utf-8") as testfile: parser = GitlabDastParser() findings = parser.get_findings(testfile, Test()) @@ -105,7 +105,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_findings_v14(self): self.assertIn("Ensure that your web server,", finding.mitigation) def test_parse_file_with_multiple_vuln_has_multiple_findings_v15(self): - with open("unittests/scans/gitlab_dast/gitlab_dast_many_vul_v15.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_dast") / "gitlab_dast_many_vul_v15.json", encoding="utf-8") as testfile: parser = GitlabDastParser() findings = parser.get_findings(testfile, Test()) diff --git a/unittests/tools/test_gitlab_dep_scan_parser.py b/unittests/tools/test_gitlab_dep_scan_parser.py index ea306247c22..ff5c87aee85 100644 --- a/unittests/tools/test_gitlab_dep_scan_parser.py +++ b/unittests/tools/test_gitlab_dep_scan_parser.py @@ -1,30 +1,30 @@ from dojo.models import Test from dojo.tools.gitlab_dep_scan.parser import GitlabDepScanParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestGitlabDepScanParser(DojoTestCase): def test_parse_file_with_no_vuln_has_no_findings(self): - with open(f"{get_unit_tests_path()}/scans/gitlab_dep_scan/gl-dependency-scanning-report-0-vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_dep_scan") / "gl-dependency-scanning-report-0-vuln.json", encoding="utf-8") as testfile: parser = GitlabDepScanParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_file_with_one_vuln_has_one_finding_v14(self): - with open(f"{get_unit_tests_path()}/scans/gitlab_dep_scan/gl-dependency-scanning-report-1-vuln_v14.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_dep_scan") / "gl-dependency-scanning-report-1-vuln_v14.json", encoding="utf-8") as testfile: parser = GitlabDepScanParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) def test_parse_file_with_one_vuln_has_one_finding_v15(self): - with open(f"{get_unit_tests_path()}/scans/gitlab_dep_scan/gl-dependency-scanning-report-1-vuln_v15.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_dep_scan") / "gl-dependency-scanning-report-1-vuln_v15.json", encoding="utf-8") as testfile: parser = GitlabDepScanParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) def test_parse_file_with_two_vuln_has_one_missing_component__v14(self): - with open(f"{get_unit_tests_path()}/scans/gitlab_dep_scan/gl-dependency-scanning-report-2-vuln-missing-component_v14.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_dep_scan") / "gl-dependency-scanning-report-2-vuln-missing-component_v14.json", encoding="utf-8") as testfile: parser = GitlabDepScanParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(2, len(findings)) @@ -36,7 +36,7 @@ def test_parse_file_with_two_vuln_has_one_missing_component__v14(self): self.assertEqual("v0.0.0-20190308221718-c2843e01d9a2", finding.component_version) def test_parse_file_with_two_vuln_has_one_missing_component__v15(self): - with open(f"{get_unit_tests_path()}/scans/gitlab_dep_scan/gl-dependency-scanning-report-2-vuln-missing-component_v15.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_dep_scan") / "gl-dependency-scanning-report-2-vuln-missing-component_v15.json", encoding="utf-8") as testfile: parser = GitlabDepScanParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(2, len(findings)) @@ -48,7 +48,7 @@ def test_parse_file_with_two_vuln_has_one_missing_component__v15(self): self.assertEqual("v0.0.0-20190308221718-c2843e01d9a2", finding.component_version) def test_parse_file_with_multiple_vuln_has_multiple_findings_v14(self): - with open(f"{get_unit_tests_path()}/scans/gitlab_dep_scan/gl-dependency-scanning-report-many-vuln_v14.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_dep_scan") / "gl-dependency-scanning-report-many-vuln_v14.json", encoding="utf-8") as testfile: parser = GitlabDepScanParser() findings = parser.get_findings(testfile, Test()) self.assertGreater(len(findings), 2) @@ -57,7 +57,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_findings_v14(self): self.assertEqual("CVE-2020-29652", findings[0].unsaved_vulnerability_ids[0]) def test_parse_file_with_multiple_vuln_has_multiple_findings_v15(self): - with open(f"{get_unit_tests_path()}/scans/gitlab_dep_scan/gl-dependency-scanning-report-many-vuln_v15.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_dep_scan") / "gl-dependency-scanning-report-many-vuln_v15.json", encoding="utf-8") as testfile: parser = GitlabDepScanParser() findings = parser.get_findings(testfile, Test()) self.assertGreater(len(findings), 2) diff --git a/unittests/tools/test_gitlab_sast_parser.py b/unittests/tools/test_gitlab_sast_parser.py index e0757ac6694..7a64fa6eb3d 100644 --- a/unittests/tools/test_gitlab_sast_parser.py +++ b/unittests/tools/test_gitlab_sast_parser.py @@ -1,18 +1,18 @@ from dojo.models import Test from dojo.tools.gitlab_sast.parser import GitlabSastParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestGitlabSastParser(DojoTestCase): def test_parse_file_with_no_vuln_has_no_findings(self): - with open("unittests/scans/gitlab_sast/gl-sast-report-0-vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_sast") / "gl-sast-report-0-vuln.json", encoding="utf-8") as testfile: parser = GitlabSastParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_file_with_one_vuln_has_one_finding_v14(self): - with open("unittests/scans/gitlab_sast/gl-sast-report-1-vuln_v14.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_sast") / "gl-sast-report-1-vuln_v14.json", encoding="utf-8") as testfile: parser = GitlabSastParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -21,7 +21,7 @@ def test_parse_file_with_one_vuln_has_one_finding_v14(self): self.assertEqual("Critical", finding.severity) def test_parse_file_with_one_vuln_has_one_finding_v15(self): - with open("unittests/scans/gitlab_sast/gl-sast-report-1-vuln_v15.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_sast") / "gl-sast-report-1-vuln_v15.json", encoding="utf-8") as testfile: parser = GitlabSastParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -30,7 +30,7 @@ def test_parse_file_with_one_vuln_has_one_finding_v15(self): self.assertEqual("Critical", finding.severity) def test_parse_file_with_multiple_vuln_has_multiple_findings_v14(self): - with open(f"{get_unit_tests_path()}/scans/gitlab_sast/gl-sast-report-many-vuln_v14.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_sast") / "gl-sast-report-many-vuln_v14.json", encoding="utf-8") as testfile: parser = GitlabSastParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(219, len(findings)) @@ -45,7 +45,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_findings_v14(self): self.assertEqual("Critical", finding.severity) def test_parse_file_with_multiple_vuln_has_multiple_findings_v15(self): - with open(f"{get_unit_tests_path()}/scans/gitlab_sast/gl-sast-report-many-vuln_v15.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_sast") / "gl-sast-report-many-vuln_v15.json", encoding="utf-8") as testfile: parser = GitlabSastParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(219, len(findings)) @@ -60,7 +60,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_findings_v15(self): self.assertEqual("Critical", finding.severity) def test_parse_file_with_various_confidences_v14(self): - with open(f"{get_unit_tests_path()}/scans/gitlab_sast/gl-sast-report-confidence_v14.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_sast") / "gl-sast-report-confidence_v14.json", encoding="utf-8") as testfile: parser = GitlabSastParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(len(findings), 8) @@ -79,7 +79,7 @@ def test_parse_file_with_various_confidences_v14(self): self.assertEqual("Certain", finding.get_scanner_confidence_text()) def test_parse_file_with_various_confidences_v15(self): - with open(f"{get_unit_tests_path()}/scans/gitlab_sast/gl-sast-report-confidence_v15.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_sast") / "gl-sast-report-confidence_v15.json", encoding="utf-8") as testfile: parser = GitlabSastParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(len(findings), 8) @@ -98,7 +98,7 @@ def test_parse_file_with_various_confidences_v15(self): self.assertEqual("", finding.get_scanner_confidence_text()) def test_parse_file_with_various_cwes_v14(self): - with open("unittests/scans/gitlab_sast/gl-sast-report-cwe_v14.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_sast") / "gl-sast-report-cwe_v14.json", encoding="utf-8") as testfile: parser = GitlabSastParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(len(findings), 3) @@ -107,7 +107,7 @@ def test_parse_file_with_various_cwes_v14(self): self.assertEqual(None, findings[2].cwe) def test_parse_file_with_various_cwes_v15(self): - with open("unittests/scans/gitlab_sast/gl-sast-report-cwe_v15.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_sast") / "gl-sast-report-cwe_v15.json", encoding="utf-8") as testfile: parser = GitlabSastParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(len(findings), 3) @@ -116,7 +116,7 @@ def test_parse_file_with_various_cwes_v15(self): self.assertEqual(None, findings[2].cwe) def test_parse_file_issue4336_v14(self): - with open("unittests/scans/gitlab_sast/gl-sast-report_issue4344_v14.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_sast") / "gl-sast-report_issue4344_v14.json", encoding="utf-8") as testfile: parser = GitlabSastParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -124,7 +124,7 @@ def test_parse_file_issue4336_v14(self): self.assertEqual("[None severity] Potential XSS vulnerability", finding.title) def test_parse_file_issue4336_v15(self): - with open("unittests/scans/gitlab_sast/gl-sast-report_issue4344_v15.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_sast") / "gl-sast-report_issue4344_v15.json", encoding="utf-8") as testfile: parser = GitlabSastParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -132,7 +132,7 @@ def test_parse_file_issue4336_v15(self): self.assertEqual("[None severity] Potential XSS vulnerability", finding.title) def test_without_scan_v14(self): - with open("unittests/scans/gitlab_sast/gl-sast-report-1-vuln_v14.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_sast") / "gl-sast-report-1-vuln_v14.json", encoding="utf-8") as testfile: parser = GitlabSastParser() tests = parser.get_tests(None, testfile) self.assertEqual(1, len(tests)) @@ -144,7 +144,7 @@ def test_without_scan_v14(self): self.assertEqual(1, len(findings)) def test_without_scan_v15(self): - with open("unittests/scans/gitlab_sast/gl-sast-report-1-vuln_v15.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_sast") / "gl-sast-report-1-vuln_v15.json", encoding="utf-8") as testfile: parser = GitlabSastParser() tests = parser.get_tests(None, testfile) self.assertEqual(1, len(tests)) @@ -156,7 +156,7 @@ def test_without_scan_v15(self): self.assertEqual(1, len(findings)) def test_with_scan_v14(self): - with open("unittests/scans/gitlab_sast/gl-sast-report-confidence_v14.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_sast") / "gl-sast-report-confidence_v14.json", encoding="utf-8") as testfile: parser = GitlabSastParser() tests = parser.get_tests(None, testfile) self.assertEqual(1, len(tests)) @@ -168,7 +168,7 @@ def test_with_scan_v14(self): self.assertEqual(8, len(findings)) def test_with_scan_v15(self): - with open("unittests/scans/gitlab_sast/gl-sast-report-confidence_v15.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_sast") / "gl-sast-report-confidence_v15.json", encoding="utf-8") as testfile: parser = GitlabSastParser() tests = parser.get_tests(None, testfile) self.assertEqual(1, len(tests)) diff --git a/unittests/tools/test_gitlab_secret_detection_report_parser.py b/unittests/tools/test_gitlab_secret_detection_report_parser.py index 2d1df5bbe18..14d860bc383 100644 --- a/unittests/tools/test_gitlab_secret_detection_report_parser.py +++ b/unittests/tools/test_gitlab_secret_detection_report_parser.py @@ -4,12 +4,12 @@ from dojo.tools.gitlab_secret_detection_report.parser import ( GitlabSecretDetectionReportParser, ) -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestGitlabSecretDetectionReportParser(DojoTestCase): def test_gitlab_secret_detection_report_parser_with_no_vuln_has_no_findings(self): - with open(f"{get_unit_tests_path()}/scans/gitlab_secret_detection_report/gitlab_secret_detection_report_0_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_secret_detection_report") / "gitlab_secret_detection_report_0_vuln.json", encoding="utf-8") as testfile: parser = GitlabSecretDetectionReportParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) @@ -17,7 +17,7 @@ def test_gitlab_secret_detection_report_parser_with_no_vuln_has_no_findings(self def test_gitlab_secret_detection_report_parser_with_one_vuln_has_one_findings_v14( self, ): - with open(f"{get_unit_tests_path()}/scans/gitlab_secret_detection_report/gitlab_secret_detection_report_1_vuln_v14.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_secret_detection_report") / "gitlab_secret_detection_report_1_vuln_v14.json", encoding="utf-8") as testfile: parser = GitlabSecretDetectionReportParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -38,7 +38,7 @@ def test_gitlab_secret_detection_report_parser_with_one_vuln_has_one_findings_v1 def test_gitlab_secret_detection_report_parser_with_one_vuln_has_one_findings_v15( self, ): - with open(f"{get_unit_tests_path()}/scans/gitlab_secret_detection_report/gitlab_secret_detection_report_1_vuln_v15.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_secret_detection_report") / "gitlab_secret_detection_report_1_vuln_v15.json", encoding="utf-8") as testfile: parser = GitlabSecretDetectionReportParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -59,7 +59,7 @@ def test_gitlab_secret_detection_report_parser_with_one_vuln_has_one_findings_v1 def test_gitlab_secret_detection_report_parser_with_many_vuln_has_many_findings_v14( self, ): - with open(f"{get_unit_tests_path()}/scans/gitlab_secret_detection_report/gitlab_secret_detection_report_3_vuln_v14.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_secret_detection_report") / "gitlab_secret_detection_report_3_vuln_v14.json", encoding="utf-8") as testfile: parser = GitlabSecretDetectionReportParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -70,7 +70,7 @@ def test_gitlab_secret_detection_report_parser_with_many_vuln_has_many_findings_ def test_gitlab_secret_detection_report_parser_with_many_vuln_has_many_findings_v15( self, ): - with open(f"{get_unit_tests_path()}/scans/gitlab_secret_detection_report/gitlab_secret_detection_report_3_vuln_v15.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitlab_secret_detection_report") / "gitlab_secret_detection_report_3_vuln_v15.json", encoding="utf-8") as testfile: parser = GitlabSecretDetectionReportParser() findings = parser.get_findings(testfile, Test()) for finding in findings: diff --git a/unittests/tools/test_gitleaks_parser.py b/unittests/tools/test_gitleaks_parser.py index 1b70f854dd4..ebee0949add 100644 --- a/unittests/tools/test_gitleaks_parser.py +++ b/unittests/tools/test_gitleaks_parser.py @@ -1,18 +1,18 @@ from dojo.models import Test from dojo.tools.gitleaks.parser import GitleaksParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestGitleaksParser(DojoTestCase): def test_parse_file_legacy_with_no_findings(self): - with open(get_unit_tests_path() + "/scans/gitleaks/no_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitleaks") / "no_findings.json", encoding="utf-8") as testfile: parser = GitleaksParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_file_legacy_with_one_finding(self): - with open(get_unit_tests_path() + "/scans/gitleaks/data_one.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitleaks") / "data_one.json", encoding="utf-8") as testfile: parser = GitleaksParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -24,7 +24,7 @@ def test_parse_file_legacy_with_one_finding(self): self.assertIn("AsymmetricPrivateKey", finding.unsaved_tags) def test_parse_file_legacy_with_multiple_finding(self): - with open(get_unit_tests_path() + "/scans/gitleaks/data_many.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitleaks") / "data_many.json", encoding="utf-8") as testfile: parser = GitleaksParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(2, len(findings)) @@ -36,13 +36,13 @@ def test_parse_file_legacy_with_multiple_finding(self): self.assertIn("Github", finding.unsaved_tags) def test_parse_file_legacy_with_multiple_redacted_finding(self): - with open(get_unit_tests_path() + "/scans/gitleaks/redacted_data_many.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitleaks") / "redacted_data_many.json", encoding="utf-8") as testfile: parser = GitleaksParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(6, len(findings)) def test_parse_file_legacy_from_issue4336(self): - with open(get_unit_tests_path() + "/scans/gitleaks/issue4336.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitleaks") / "issue4336.json", encoding="utf-8") as testfile: parser = GitleaksParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -53,7 +53,7 @@ def test_parse_file_legacy_from_issue4336(self): self.assertEqual(23, finding.line) def test_parse_file_from_version_7_5_0(self): - with open(get_unit_tests_path() + "/scans/gitleaks/version_7.5.0.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitleaks") / "version_7.5.0.json", encoding="utf-8") as testfile: parser = GitleaksParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(4, len(findings)) @@ -84,7 +84,7 @@ def test_parse_file_from_version_7_5_0(self): self.assertIn("AWS", finding.unsaved_tags) def test_parse_file_from_version_8(self): - with open(get_unit_tests_path() + "/scans/gitleaks/gitleaks8_many.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gitleaks") / "gitleaks8_many.json", encoding="utf-8") as testfile: parser = GitleaksParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(3, len(findings)) diff --git a/unittests/tools/test_gosec_parser.py b/unittests/tools/test_gosec_parser.py index c696692a44b..45d22664d8c 100644 --- a/unittests/tools/test_gosec_parser.py +++ b/unittests/tools/test_gosec_parser.py @@ -1,12 +1,12 @@ from dojo.models import Test from dojo.tools.gosec.parser import GosecParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestGosecParser(DojoTestCase): def test_parse_file_with_one_finding(self): - with open("unittests/scans/gosec/many_vulns.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("gosec") / "many_vulns.json", encoding="utf-8") as testfile: parser = GosecParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(28, len(findings)) diff --git a/unittests/tools/test_govulncheck_parser.py b/unittests/tools/test_govulncheck_parser.py index 1865ff3c5d2..7d0bdbff265 100644 --- a/unittests/tools/test_govulncheck_parser.py +++ b/unittests/tools/test_govulncheck_parser.py @@ -1,13 +1,13 @@ from dojo.models import Test from dojo.tools.govulncheck.parser import GovulncheckParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestGovulncheckParser(DojoTestCase): def test_parse_empty(self): with self.assertRaises(ValueError) as exp: - with open("unittests/scans/govulncheck/empty.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("govulncheck") / "empty.json", encoding="utf-8") as testfile: parser = GovulncheckParser() parser.get_findings(testfile, Test()) self.assertIn( @@ -15,13 +15,13 @@ def test_parse_empty(self): ) def test_parse_no_findings(self): - with open("unittests/scans/govulncheck/no_vulns.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("govulncheck") / "no_vulns.json", encoding="utf-8") as testfile: parser = GovulncheckParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_many_findings(self): - with open("unittests/scans/govulncheck/many_vulns.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("govulncheck") / "many_vulns.json", encoding="utf-8") as testfile: parser = GovulncheckParser() findings = parser.get_findings(testfile, Test()) @@ -67,13 +67,13 @@ def test_parse_many_findings(self): self.assertEqual("https://groups.google.com/g/golang-announce/c/x49AQzIVX-s", finding.references) def test_parse_new_version_no_findings(self): - with open("unittests/scans/govulncheck/no_vulns_new_version.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("govulncheck") / "no_vulns_new_version.json", encoding="utf-8") as testfile: parser = GovulncheckParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_new_version_many_findings(self): - with open("unittests/scans/govulncheck/many_vulns_new_version.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("govulncheck") / "many_vulns_new_version.json", encoding="utf-8") as testfile: parser = GovulncheckParser() findings = parser.get_findings(testfile, Test()) @@ -94,7 +94,7 @@ def test_parse_new_version_many_findings(self): self.assertIsNotNone(finding.references) def test_parse_new_version_many_findings_custom_severity(self): - with open("unittests/scans/govulncheck/many_vulns_new_version_custom_severity.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("govulncheck") / "many_vulns_new_version_custom_severity.json", encoding="utf-8") as testfile: parser = GovulncheckParser() findings = parser.get_findings(testfile, Test()) diff --git a/unittests/tools/test_h1_parser.py b/unittests/tools/test_h1_parser.py index 36f1ef17247..0aee422f69d 100644 --- a/unittests/tools/test_h1_parser.py +++ b/unittests/tools/test_h1_parser.py @@ -2,24 +2,24 @@ from dojo.models import Test from dojo.tools.h1.parser import H1Parser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class HackerOneVulnerabilityDisclosureProgramTests(DojoTestCase): def test_parse_file_with_multiple_vuln_has_multiple_finding(self): - with open("unittests/scans/h1/vuln_disclosure_many.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("h1") / "vuln_disclosure_many.json", encoding="utf-8") as testfile: parser = H1Parser() findings = parser.get_findings(testfile, Test()) self.assertEqual(2, len(findings)) def test_parse_file_with_one_vuln_has_one_finding(self): - with open("unittests/scans/h1/vuln_disclosure_one.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("h1") / "vuln_disclosure_one.json", encoding="utf-8") as testfile: parser = H1Parser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) def test_parse_file_with_no_vuln_has_no_finding(self): - with open("unittests/scans/h1/vuln_disclosure_zero.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("h1") / "vuln_disclosure_zero.json", encoding="utf-8") as testfile: parser = H1Parser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) @@ -27,7 +27,7 @@ def test_parse_file_with_no_vuln_has_no_finding(self): class HackerOneBugBountyProgramTests(DojoTestCase): def test_bug_bounty_hacker_one_many_findings_json(self): - with open("unittests/scans/h1/bug_bounty_many.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("h1") / "bug_bounty_many.json", encoding="utf-8") as testfile: parser = H1Parser() findings = parser.get_findings(testfile, Test()) self.assertEqual(4, len(findings)) @@ -71,7 +71,7 @@ def test_bug_bounty_hacker_one_many_findings_json(self): self.assertIn("CVE-2017-12615", finding.unsaved_vulnerability_ids) def test_bug_bounty_hacker_one_one_findings_json(self): - with open("unittests/scans/h1/bug_bounty_one.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("h1") / "bug_bounty_one.json", encoding="utf-8") as testfile: parser = H1Parser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -89,13 +89,13 @@ def test_bug_bounty_hacker_one_one_findings_json(self): self.assertIn("**Reporter**: reporter", finding.description) def test_bug_bounty_hacker_one_zero_findings_json(self): - with open("unittests/scans/h1/bug_bounty_zero.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("h1") / "bug_bounty_zero.json", encoding="utf-8") as testfile: parser = H1Parser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_bug_bounty_hacker_one_many_findings_csv(self): - with open("unittests/scans/h1/bug_bounty_many.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("h1") / "bug_bounty_many.json", encoding="utf-8") as testfile: parser = H1Parser() findings = parser.get_findings(testfile, Test()) self.assertEqual(4, len(findings)) @@ -139,7 +139,7 @@ def test_bug_bounty_hacker_one_many_findings_csv(self): self.assertIn("CVE-2017-12615", finding.unsaved_vulnerability_ids) def test_bug_bounty_hacker_one_one_findings_csv(self): - with open("unittests/scans/h1/bug_bounty_one.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("h1") / "bug_bounty_one.json", encoding="utf-8") as testfile: parser = H1Parser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -157,7 +157,7 @@ def test_bug_bounty_hacker_one_one_findings_csv(self): self.assertIn("**Reporter**: reporter", finding.description) def test_bug_bounty_hacker_one_zero_findings_csv(self): - with open("unittests/scans/h1/bug_bounty_zero.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("h1") / "bug_bounty_zero.json", encoding="utf-8") as testfile: parser = H1Parser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) diff --git a/unittests/tools/test_hadolint_parser.py b/unittests/tools/test_hadolint_parser.py index 1e96dfe287a..74b24f54517 100644 --- a/unittests/tools/test_hadolint_parser.py +++ b/unittests/tools/test_hadolint_parser.py @@ -1,12 +1,12 @@ from dojo.models import Test from dojo.tools.hadolint.parser import HadolintParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TesthadolintParser(DojoTestCase): def test_parse_file_with_one_dockerfile(self): - testfile = open("unittests/scans/hadolint/one_dockerfile.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("hadolint") / "one_dockerfile.json", encoding="utf-8") parser = HadolintParser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -16,7 +16,7 @@ def test_parse_file_with_one_dockerfile(self): self.assertEqual(finding.file_path, "django-DefectDojo\\Dockerfile.django") def test_parse_file_with_many_dockerfile(self): - testfile = open("unittests/scans/hadolint/many_dockerfile.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("hadolint") / "many_dockerfile.json", encoding="utf-8") parser = HadolintParser() findings = parser.get_findings(testfile, Test()) testfile.close() diff --git a/unittests/tools/test_harbor_vulnerability_parser.py b/unittests/tools/test_harbor_vulnerability_parser.py index 6659f23d604..52347034c9b 100644 --- a/unittests/tools/test_harbor_vulnerability_parser.py +++ b/unittests/tools/test_harbor_vulnerability_parser.py @@ -1,12 +1,12 @@ from dojo.models import Test from dojo.tools.harbor_vulnerability.parser import HarborVulnerabilityParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestHarborVulnerabilityParser(DojoTestCase): def test_parse_file_with_no_vuln_has_no_findings(self): - with open("unittests/scans/harbor_vulnerability/harbor-0-vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("harbor_vulnerability") / "harbor-0-vuln.json", encoding="utf-8") as testfile: parser = HarborVulnerabilityParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) @@ -14,7 +14,7 @@ def test_parse_file_with_no_vuln_has_no_findings(self): # Sample with One Test # + also verify data with one test def test_parse_file_with_one_vuln_has_one_findings(self): - with open("unittests/scans/harbor_vulnerability/harbor-1-vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("harbor_vulnerability") / "harbor-1-vuln.json", encoding="utf-8") as testfile: parser = HarborVulnerabilityParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -35,7 +35,7 @@ def test_parse_file_with_one_vuln_has_one_findings(self): # Sample with Multiple Test def test_parse_file_with_multiple_vuln_has_multiple_findings(self): - with open("unittests/scans/harbor_vulnerability/harbor-5-vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("harbor_vulnerability") / "harbor-5-vuln.json", encoding="utf-8") as testfile: parser = HarborVulnerabilityParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(5, len(findings)) @@ -47,7 +47,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_findings(self): # Sample with Trivy Test def test_parse_file_with_multiple_vuln_has_multiple_trivy_findings(self): - with open("unittests/scans/harbor_vulnerability/harbor-trivy-vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("harbor_vulnerability") / "harbor-trivy-vuln.json", encoding="utf-8") as testfile: parser = HarborVulnerabilityParser() findings = parser.get_findings(testfile, Test()) @@ -57,7 +57,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_trivy_findings(self): # Sample with harborapi pip def test_parse_file_with_multiple_vuln_has_harborapi_pip_package(self): - with open("unittests/scans/harbor_vulnerability/harborapipip.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("harbor_vulnerability") / "harborapipip.json", encoding="utf-8") as testfile: parser = HarborVulnerabilityParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(2, len(findings)) diff --git a/unittests/tools/test_hcl_appscan_parser.py b/unittests/tools/test_hcl_appscan_parser.py index 6ee0f083ccc..718cde5e376 100644 --- a/unittests/tools/test_hcl_appscan_parser.py +++ b/unittests/tools/test_hcl_appscan_parser.py @@ -1,18 +1,18 @@ from dojo.tools.hcl_appscan.parser import HCLAppScanParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestHCLAppScanParser(DojoTestCase): def test_no_findings(self): - my_file_handle = open("unittests/scans/hcl_appscan/no_findings.xml", encoding="utf-8") + my_file_handle = open(get_unit_tests_scans_path("hcl_appscan") / "no_findings.xml", encoding="utf-8") parser = HCLAppScanParser() findings = parser.get_findings(my_file_handle, None) my_file_handle.close() self.assertEqual(0, len(findings)) def test_many_findings(self): - my_file_handle = open("unittests/scans/hcl_appscan/many_findings.xml", encoding="utf-8") + my_file_handle = open(get_unit_tests_scans_path("hcl_appscan") / "many_findings.xml", encoding="utf-8") parser = HCLAppScanParser() findings = parser.get_findings(my_file_handle, None) my_file_handle.close() @@ -26,7 +26,7 @@ def test_many_findings(self): self.assertEqual(findings[9].cwe, 522) def test_issue_9279(self): - my_file_handle = open("unittests/scans/hcl_appscan/issue_9279.xml", encoding="utf-8") + my_file_handle = open(get_unit_tests_scans_path("hcl_appscan") / "issue_9279.xml", encoding="utf-8") parser = HCLAppScanParser() findings = parser.get_findings(my_file_handle, None) my_file_handle.close() @@ -39,7 +39,7 @@ def test_issue_9279(self): self.assertEqual(findings[10].cwe, 1275) def test_issue_10074(self): - with open("unittests/scans/hcl_appscan/issue_10074.xml", encoding="utf-8") as my_file_handle: + with open(get_unit_tests_scans_path("hcl_appscan") / "issue_10074.xml", encoding="utf-8") as my_file_handle: parser = HCLAppScanParser() findings = parser.get_findings(my_file_handle, None) my_file_handle.close() diff --git a/unittests/tools/test_hcl_asoc_sast_parser.py b/unittests/tools/test_hcl_asoc_sast_parser.py index d9adbde8c24..107897d55df 100644 --- a/unittests/tools/test_hcl_asoc_sast_parser.py +++ b/unittests/tools/test_hcl_asoc_sast_parser.py @@ -1,18 +1,18 @@ from dojo.tools.hcl_asoc_sast.parser import HCLASoCSASTParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestHCLASoCSASTParser(DojoTestCase): def test_no_findings(self): - my_file_handle = open("unittests/scans/hcl_asoc_sast/no_issues.xml", encoding="utf-8") + my_file_handle = open(get_unit_tests_scans_path("hcl_asoc_sast") / "no_issues.xml", encoding="utf-8") parser = HCLASoCSASTParser() findings = parser.get_findings(my_file_handle, None) my_file_handle.close() self.assertEqual(0, len(findings)) def test_one_finding(self): - my_file_handle = open("unittests/scans/hcl_asoc_sast/one_issue.xml", encoding="utf-8") + my_file_handle = open(get_unit_tests_scans_path("hcl_asoc_sast") / "one_issue.xml", encoding="utf-8") parser = HCLASoCSASTParser() findings = parser.get_findings(my_file_handle, None) my_file_handle.close() @@ -22,7 +22,7 @@ def test_one_finding(self): self.assertEqual(findings[0].cwe, 266) def test_many_findings(self): - my_file_handle = open("unittests/scans/hcl_asoc_sast/many_issues.xml", encoding="utf-8") + my_file_handle = open(get_unit_tests_scans_path("hcl_asoc_sast") / "many_issues.xml", encoding="utf-8") parser = HCLASoCSASTParser() findings = parser.get_findings(my_file_handle, None) my_file_handle.close() diff --git a/unittests/tools/test_horusec_parser.py b/unittests/tools/test_horusec_parser.py index b2a39b75ca2..81ce6f2d64a 100644 --- a/unittests/tools/test_horusec_parser.py +++ b/unittests/tools/test_horusec_parser.py @@ -1,16 +1,14 @@ import datetime -from os import path -from pathlib import Path from dojo.models import Test from dojo.tools.horusec.parser import HorusecParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestHorusecParser(DojoTestCase): def test_get_findings(self): """Version 2.6.3 with big project in Python""" - with open(path.join(Path(__file__).parent, "../scans/horusec/version_2.6.3.json"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("horusec") / "version_2.6.3.json", encoding="utf-8") as testfile: parser = HorusecParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(267, len(findings)) @@ -18,7 +16,7 @@ def test_get_findings(self): def test_get_tests(self): """Version 2.6.3 with big project in Python""" - with open(path.join(Path(__file__).parent, "../scans/horusec/version_2.6.3.json"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("horusec") / "version_2.6.3.json", encoding="utf-8") as testfile: parser = HorusecParser() tests = parser.get_tests("Horusec Scan", testfile) self.assertEqual(1, len(tests)) @@ -50,7 +48,7 @@ def test_get_tests(self): def test_get_tests_ok(self): """Version 2.6.3 with big project in Python""" - with open(path.join(Path(__file__).parent, "../scans/horusec/horres3.json"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("horusec") / "horres3.json", encoding="utf-8") as testfile: parser = HorusecParser() tests = parser.get_tests("Horusec Scan", testfile) self.assertEqual(1, len(tests)) @@ -82,7 +80,7 @@ def test_get_tests_ok(self): def test_get_tests_issue_6258(self): """""" - with open(path.join(Path(__file__).parent, "../scans/horusec/issue_6258.json"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("horusec") / "issue_6258.json", encoding="utf-8") as testfile: parser = HorusecParser() tests = parser.get_tests("Horusec Scan", testfile) self.assertEqual(1, len(tests)) @@ -118,7 +116,7 @@ def test_get_tests_issue_6258(self): def test_get_tests_pr_6563(self): """""" - with open(path.join(Path(__file__).parent, "../scans/horusec/pr_6563.json"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("horusec") / "pr_6563.json", encoding="utf-8") as testfile: parser = HorusecParser() tests = parser.get_tests("Horusec Scan", testfile) self.assertEqual(1, len(tests)) @@ -137,7 +135,7 @@ def test_get_tests_pr_6563(self): def test_issue_9939(self): """""" - with open(path.join(Path(__file__).parent, "../scans/horusec/issue_9939.json"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("horusec") / "issue_9939.json", encoding="utf-8") as testfile: parser = HorusecParser() tests = parser.get_tests("Horusec Scan", testfile) self.assertEqual(1, len(tests)) diff --git a/unittests/tools/test_humble_parser.py b/unittests/tools/test_humble_parser.py index d4284e1692b..85497d00f66 100644 --- a/unittests/tools/test_humble_parser.py +++ b/unittests/tools/test_humble_parser.py @@ -1,11 +1,11 @@ from dojo.models import Test from dojo.tools.humble.parser import HumbleParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestHumbleParser(DojoTestCase): def test_humble_parser_with_many_findings(self): - with open("unittests/scans/humble/many_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("humble") / "many_findings.json", encoding="utf-8") as testfile: parser = HumbleParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -19,7 +19,7 @@ def test_humble_parser_with_many_findings(self): self.assertEqual("Deprecated header: Strict-Transport-Security (Recommended Values)", finding.title) def test_humble_parser_with_many_findings2(self): - with open("unittests/scans/humble/many_findings2.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("humble") / "many_findings2.json", encoding="utf-8") as testfile: parser = HumbleParser() findings = parser.get_findings(testfile, Test()) for finding in findings: diff --git a/unittests/tools/test_huskyci_parser.py b/unittests/tools/test_huskyci_parser.py index 7c7bb1ad59a..b2bb4147dcf 100644 --- a/unittests/tools/test_huskyci_parser.py +++ b/unittests/tools/test_huskyci_parser.py @@ -1,19 +1,19 @@ from dojo.models import Test from dojo.tools.huskyci.parser import HuskyCIParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestHuskyCIParser(DojoTestCase): def test_parse_file_no_finding(self): - with open("unittests/scans/huskyci/huskyci_report_no_finding.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("huskyci") / "huskyci_report_no_finding.json", encoding="utf-8") as testfile: parser = HuskyCIParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_file_has_one_finding_one_tool(self): with open( - get_unit_tests_path() + "/scans/huskyci/huskyci_report_one_finding_one_tool.json", encoding="utf-8", + get_unit_tests_scans_path("huskyci") / "huskyci_report_one_finding_one_tool.json", encoding="utf-8", ) as testfile: parser = HuskyCIParser() findings = parser.get_findings(testfile, Test()) @@ -21,7 +21,7 @@ def test_parse_file_has_one_finding_one_tool(self): def test_parse_file_has_many_finding_one_tool(self): with open( - get_unit_tests_path() + "/scans/huskyci/huskyci_report_many_finding_one_tool.json", encoding="utf-8", + get_unit_tests_scans_path("huskyci") / "huskyci_report_many_finding_one_tool.json", encoding="utf-8", ) as testfile: parser = HuskyCIParser() findings = parser.get_findings(testfile, Test()) @@ -29,7 +29,7 @@ def test_parse_file_has_many_finding_one_tool(self): def test_parse_file_has_many_finding_two_tools(self): with open( - get_unit_tests_path() + "/scans/huskyci/huskyci_report_many_finding_two_tools.json", encoding="utf-8", + get_unit_tests_scans_path("huskyci") / "huskyci_report_many_finding_two_tools.json", encoding="utf-8", ) as testfile: parser = HuskyCIParser() findings = parser.get_findings(testfile, Test()) diff --git a/unittests/tools/test_hydra_parser.py b/unittests/tools/test_hydra_parser.py index a4045e2c261..e83b17acb8f 100644 --- a/unittests/tools/test_hydra_parser.py +++ b/unittests/tools/test_hydra_parser.py @@ -2,32 +2,32 @@ from dojo.models import Finding, Test from dojo.tools.hydra.parser import HydraParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestHydraParser(DojoTestCase): __test_datetime = datetime(2019, 3, 1, 14, 44, 22) def test_invalid_json_format(self): - with open("unittests/scans/hydra/invalid.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("hydra") / "invalid.json", encoding="utf-8") as testfile: parser = HydraParser() with self.assertRaises(ValueError): parser.get_findings(testfile, Test()) def test_parser_ensures_data_is_for_hydra_before_parsing(self): - with open("unittests/scans/hydra/oddly_familiar_json_that_isnt_us.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("hydra") / "oddly_familiar_json_that_isnt_us.json", encoding="utf-8") as testfile: parser = HydraParser() with self.assertRaises(ValueError): parser.get_findings(testfile, Test()) def test_hydra_parser_with_no_vuln_has_no_findings(self): - with open("unittests/scans/hydra/hydra_report_no_finding.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("hydra") / "hydra_report_no_finding.json", encoding="utf-8") as testfile: parser = HydraParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_hydra_parser_with_one_finding_has_one_finding(self): - with open("unittests/scans/hydra/hydra_report_one_finding.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("hydra") / "hydra_report_one_finding.json", encoding="utf-8") as testfile: parser = HydraParser() findings = parser.get_findings(testfile, Test()) self.__assertAllEndpointsAreClean(findings) @@ -45,7 +45,7 @@ def test_hydra_parser_with_one_finding_has_one_finding(self): ) def test_hydra_parser_with_one_finding_and_missing_date_has_one_finding(self): - with open("unittests/scans/hydra/hydra_report_one_finding_missing_date.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("hydra") / "hydra_report_one_finding_missing_date.json", encoding="utf-8") as testfile: parser = HydraParser() findings = parser.get_findings(testfile, Test()) self.__assertAllEndpointsAreClean(findings) @@ -63,7 +63,7 @@ def test_hydra_parser_with_one_finding_and_missing_date_has_one_finding(self): ) def test_hydra_parser_with_two_findings_with_one_incomplete_has_one_finding(self): - with open("unittests/scans/hydra/hydra_report_two_findings_with_one_incomplete.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("hydra") / "hydra_report_two_findings_with_one_incomplete.json", encoding="utf-8") as testfile: parser = HydraParser() findings = parser.get_findings(testfile, Test()) self.__assertAllEndpointsAreClean(findings) @@ -81,7 +81,7 @@ def test_hydra_parser_with_two_findings_with_one_incomplete_has_one_finding(self ) def test_hydra_parser_with_many_findings_has_many_findings(self): - with open("unittests/scans/hydra/hydra_report_many_finding.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("hydra") / "hydra_report_many_finding.json", encoding="utf-8") as testfile: parser = HydraParser() findings = parser.get_findings(testfile, Test()) self.__assertAllEndpointsAreClean(findings) diff --git a/unittests/tools/test_ibm_app_parser.py b/unittests/tools/test_ibm_app_parser.py index 7e6fc5d4474..55d039ef321 100644 --- a/unittests/tools/test_ibm_app_parser.py +++ b/unittests/tools/test_ibm_app_parser.py @@ -1,12 +1,12 @@ from dojo.models import Test from dojo.tools.ibm_app.parser import IbmAppParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestIbmAppParser(DojoTestCase): def test_parse_file(self): - testfile = open("unittests/scans/ibm_app/testfire.xml", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("ibm_app") / "testfire.xml", encoding="utf-8") parser = IbmAppParser() findings = parser.get_findings(testfile, Test()) for finding in findings: diff --git a/unittests/tools/test_immuniweb_parser.py b/unittests/tools/test_immuniweb_parser.py index 413a2c0a1e8..d8161f92a50 100644 --- a/unittests/tools/test_immuniweb_parser.py +++ b/unittests/tools/test_immuniweb_parser.py @@ -1,18 +1,18 @@ from dojo.models import Test from dojo.tools.immuniweb.parser import ImmuniwebParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestImmuniwebParser(DojoTestCase): def test_parse_file_with_no_vuln_has_no_findings(self): - with open("unittests/scans/immuniweb/ImmuniWeb-0-vuln.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("immuniweb") / "ImmuniWeb-0-vuln.xml", encoding="utf-8") as testfile: parser = ImmuniwebParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_file_with_one_vuln_has_one_finding(self): - with open("unittests/scans/immuniweb/ImmuniWeb-1-vuln.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("immuniweb") / "ImmuniWeb-1-vuln.xml", encoding="utf-8") as testfile: parser = ImmuniwebParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -21,7 +21,7 @@ def test_parse_file_with_one_vuln_has_one_finding(self): self.assertEqual(1, len(findings)) def test_parse_file_with_multiple_vuln_has_multiple_findings(self): - with open("unittests/scans/immuniweb/ImmuniWeb-multiple-vuln.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("immuniweb") / "ImmuniWeb-multiple-vuln.xml", encoding="utf-8") as testfile: parser = ImmuniwebParser() findings = parser.get_findings(testfile, Test()) for finding in findings: diff --git a/unittests/tools/test_intsights_parser.py b/unittests/tools/test_intsights_parser.py index c4460d9464c..b2bfb34c041 100644 --- a/unittests/tools/test_intsights_parser.py +++ b/unittests/tools/test_intsights_parser.py @@ -1,12 +1,12 @@ from dojo.models import Test from dojo.tools.intsights.parser import IntSightsParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestIntSightsParser(DojoTestCase): def test_intsights_parser_with_one_critical_vuln_has_one_findings_json( self): - with open("unittests/scans/intsights/intsights_one_vul.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("intsights") / "intsights_one_vul.json", encoding="utf-8") as testfile: parser = IntSightsParser() findings = parser.get_findings(testfile, Test()) @@ -27,7 +27,7 @@ def test_intsights_parser_with_one_critical_vuln_has_one_findings_json( def test_intsights_parser_with_one_critical_vuln_has_one_findings_csv( self): - with open("unittests/scans/intsights/intsights_one_vuln.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("intsights") / "intsights_one_vuln.csv", encoding="utf-8") as testfile: parser = IntSightsParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -42,31 +42,31 @@ def test_intsights_parser_with_one_critical_vuln_has_one_findings_csv( finding.title) def test_intsights_parser_with_many_vuln_has_many_findings_json(self): - with open("unittests/scans/intsights/intsights_many_vul.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("intsights") / "intsights_many_vul.json", encoding="utf-8") as testfile: parser = IntSightsParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(3, len(findings)) def test_intsights_parser_with_many_vuln_has_many_findings_csv(self): - with open("unittests/scans/intsights/intsights_many_vuln.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("intsights") / "intsights_many_vuln.csv", encoding="utf-8") as testfile: parser = IntSightsParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(9, len(findings)) def test_intsights_parser_invalid_text_with_error_csv(self): with self.assertRaises(ValueError): - with open("unittests/scans/intsights/intsights_invalid_file.txt", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("intsights") / "intsights_invalid_file.txt", encoding="utf-8") as testfile: parser = IntSightsParser() parser.get_findings(testfile, Test()) def test_intsights_parser_with_no_alerts_json(self): - with open("unittests/scans/intsights/intsights_zero_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("intsights") / "intsights_zero_vuln.json", encoding="utf-8") as testfile: parser = IntSightsParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_intsights_parser_with_no_alerts_csv(self): - with open("unittests/scans/intsights/intsights_zero_vuln.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("intsights") / "intsights_zero_vuln.csv", encoding="utf-8") as testfile: parser = IntSightsParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) diff --git a/unittests/tools/test_invicti_parser.py b/unittests/tools/test_invicti_parser.py index aca5bfadd78..324ba73ca38 100644 --- a/unittests/tools/test_invicti_parser.py +++ b/unittests/tools/test_invicti_parser.py @@ -1,12 +1,12 @@ from dojo.models import Test from dojo.tools.invicti.parser import InvictiParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestInvictiParser(DojoTestCase): def test_parse_file_with_one_finding(self): - with open("unittests/scans/invicti/invicti_one_finding.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("invicti") / "invicti_one_finding.json", encoding="utf-8") as testfile: parser = InvictiParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -26,7 +26,7 @@ def test_parse_file_with_one_finding(self): self.assertEqual(str(endpoint), "http://php.testsparker.com/auth/login.php") def test_parse_file_with_multiple_finding(self): - with open("unittests/scans/invicti/invicti_many_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("invicti") / "invicti_many_findings.json", encoding="utf-8") as testfile: parser = InvictiParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(16, len(findings)) @@ -70,7 +70,7 @@ def test_parse_file_with_multiple_finding(self): self.assertEqual(str(endpoint), "http://php.testsparker.com") def test_parse_file_issue_9816(self): - with open("unittests/scans/invicti/issue_9816.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("invicti") / "issue_9816.json", encoding="utf-8") as testfile: parser = InvictiParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(3, len(findings)) @@ -84,7 +84,7 @@ def test_parse_file_issue_9816(self): self.assertEqual("03/02/2019", finding.date.strftime("%d/%m/%Y")) def test_parse_file_issue_10311(self): - with open("unittests/scans/invicti/issue_10311.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("invicti") / "issue_10311.json", encoding="utf-8") as testfile: parser = InvictiParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(3, len(findings)) diff --git a/unittests/tools/test_jfrog_xray_api_summary_artifact_parser.py b/unittests/tools/test_jfrog_xray_api_summary_artifact_parser.py index dae26302328..945c8ac6c43 100644 --- a/unittests/tools/test_jfrog_xray_api_summary_artifact_parser.py +++ b/unittests/tools/test_jfrog_xray_api_summary_artifact_parser.py @@ -4,19 +4,19 @@ from dojo.tools.jfrog_xray_api_summary_artifact.parser import ( JFrogXrayApiSummaryArtifactParser, ) -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestJFrogXrayApiSummaryArtifactParser(DojoTestCase): def test_parse_file_with_no_vuln(self): - testfile = open("unittests/scans/jfrog_xray_api_summary_artifact/no_vuln.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("jfrog_xray_api_summary_artifact") / "no_vuln.json", encoding="utf-8") parser = JFrogXrayApiSummaryArtifactParser() findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual(0, len(findings)) def test_parse_file_with_one_vuln(self): - testfile = open("unittests/scans/jfrog_xray_api_summary_artifact/one_vuln.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("jfrog_xray_api_summary_artifact") / "one_vuln.json", encoding="utf-8") parser = JFrogXrayApiSummaryArtifactParser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -52,7 +52,7 @@ def test_parse_file_with_one_vuln(self): def test_parse_file_with_many_vulns(self): testfile = open( - "unittests/scans/jfrog_xray_api_summary_artifact/many_vulns.json", encoding="utf-8", + get_unit_tests_scans_path("jfrog_xray_api_summary_artifact") / "many_vulns.json", encoding="utf-8", ) parser = JFrogXrayApiSummaryArtifactParser() findings = parser.get_findings(testfile, Test()) @@ -64,7 +64,7 @@ def test_parse_file_with_many_vulns(self): def test_parse_file_with_malformed_cvssv3_score(self): testfile = open( - "unittests/scans/jfrog_xray_api_summary_artifact/malformed_cvssv3.json", encoding="utf-8", + get_unit_tests_scans_path("jfrog_xray_api_summary_artifact") / "malformed_cvssv3.json", encoding="utf-8", ) parser = JFrogXrayApiSummaryArtifactParser() findings = parser.get_findings(testfile, Test()) diff --git a/unittests/tools/test_jfrog_xray_on_demand_binary_scan_parser.py b/unittests/tools/test_jfrog_xray_on_demand_binary_scan_parser.py index 257a88dd49f..a8b89a1ee52 100644 --- a/unittests/tools/test_jfrog_xray_on_demand_binary_scan_parser.py +++ b/unittests/tools/test_jfrog_xray_on_demand_binary_scan_parser.py @@ -4,13 +4,13 @@ clean_title, get_component_name_version, ) -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestJFrogXrayOnDemandBinaryScanParser(DojoTestCase): def test_parse_file_with_one_vuln(self): - testfile = open("unittests/scans/jfrog_xray_on_demand_binary_scan/one_vuln.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("jfrog_xray_on_demand_binary_scan") / "one_vuln.json", encoding="utf-8") parser = JFrogXrayOnDemandBinaryScanParser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -21,7 +21,7 @@ def test_parse_file_with_one_vuln(self): self.assertEqual("High", item.severity) def test_parse_file_with_many_vulns(self): - testfile = open("unittests/scans/jfrog_xray_on_demand_binary_scan/many_vulns.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("jfrog_xray_on_demand_binary_scan") / "many_vulns.json", encoding="utf-8") parser = JFrogXrayOnDemandBinaryScanParser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -50,14 +50,14 @@ def test_clean_title(self): self.assertEqual("Processing some specially crafted ASN.1 object identifiers or", clean_title("Issue summary: Processing some specially crafted ASN.1 object identifiers or\ndata containing them may be very slow.")) def test_parse_file_with_many_vulns_docker(self): - testfile = open("unittests/scans/jfrog_xray_on_demand_binary_scan/many_vulns_docker.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("jfrog_xray_on_demand_binary_scan") / "many_vulns_docker.json", encoding="utf-8") parser = JFrogXrayOnDemandBinaryScanParser() findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual(4, len(findings)) def test_parse_file_with_many_vulns_pypi(self): - testfile = open("unittests/scans/jfrog_xray_on_demand_binary_scan/many_vulns_pypi.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("jfrog_xray_on_demand_binary_scan") / "many_vulns_pypi.json", encoding="utf-8") parser = JFrogXrayOnDemandBinaryScanParser() findings = parser.get_findings(testfile, Test()) testfile.close() diff --git a/unittests/tools/test_jfrog_xray_unified_parser.py b/unittests/tools/test_jfrog_xray_unified_parser.py index 25349d89838..8325ca52240 100644 --- a/unittests/tools/test_jfrog_xray_unified_parser.py +++ b/unittests/tools/test_jfrog_xray_unified_parser.py @@ -2,20 +2,20 @@ from dojo.models import Test from dojo.tools.jfrog_xray_unified.parser import JFrogXrayUnifiedParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestJFrogXrayUnifiedParser(DojoTestCase): def test_parse_file_with_no_vuln(self): - testfile = open("unittests/scans/jfrog_xray_unified/no_vuln.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("jfrog_xray_unified") / "no_vuln.json", encoding="utf-8") parser = JFrogXrayUnifiedParser() findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual(0, len(findings)) def test_parse_file_with_one_vuln(self): - testfile = open("unittests/scans/jfrog_xray_unified/one_vuln.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("jfrog_xray_unified") / "one_vuln.json", encoding="utf-8") parser = JFrogXrayUnifiedParser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -45,14 +45,14 @@ def test_parse_file_with_one_vuln(self): self.assertEqual("XRAY-139239", item.unique_id_from_tool) def test_parse_file_with_many_vulns(self): - testfile = open("unittests/scans/jfrog_xray_unified/many_vulns.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("jfrog_xray_unified") / "many_vulns.json", encoding="utf-8") parser = JFrogXrayUnifiedParser() findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual(3, len(findings)) def test_parse_file_with_very_many_vulns(self): - testfile = open("unittests/scans/jfrog_xray_unified/very_many_vulns.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("jfrog_xray_unified") / "very_many_vulns.json", encoding="utf-8") parser = JFrogXrayUnifiedParser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -339,7 +339,7 @@ def test_parse_file_with_very_many_vulns(self): # **finished various packages** def test_parse_file_with_another_report(self): - testfile = open("unittests/scans/jfrog_xray_unified/Vulnerabilities-Report-XRAY_Unified.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("jfrog_xray_unified") / "Vulnerabilities-Report-XRAY_Unified.json", encoding="utf-8") parser = JFrogXrayUnifiedParser() findings = parser.get_findings(testfile, Test()) testfile.close() diff --git a/unittests/tools/test_jfrogxray_parser.py b/unittests/tools/test_jfrogxray_parser.py index 0a4aeb2e39b..e7afb51ea24 100644 --- a/unittests/tools/test_jfrogxray_parser.py +++ b/unittests/tools/test_jfrogxray_parser.py @@ -1,12 +1,12 @@ from dojo.models import Test from dojo.tools.jfrogxray.parser import JFrogXrayParser, decode_cwe_number -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestJfrogJFrogXrayParser(DojoTestCase): def test_parse_file_with_one_vuln(self): - testfile = open("unittests/scans/jfrogxray/one_vuln.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("jfrogxray") / "one_vuln.json", encoding="utf-8") parser = JFrogXrayParser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -19,14 +19,14 @@ def test_parse_file_with_one_vuln(self): self.assertEqual(787, item.cwe) def test_parse_file_with_many_vulns(self): - testfile = open("unittests/scans/jfrogxray/many_vulns.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("jfrogxray") / "many_vulns.json", encoding="utf-8") parser = JFrogXrayParser() findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual(3, len(findings)) def test_parse_file_with_many_vulns2(self): - testfile = open("unittests/scans/jfrogxray/many_vulns2.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("jfrogxray") / "many_vulns2.json", encoding="utf-8") parser = JFrogXrayParser() findings = parser.get_findings(testfile, Test()) testfile.close() diff --git a/unittests/tools/test_kics_parser.py b/unittests/tools/test_kics_parser.py index 03fe1ddaac9..fdef69aa6da 100644 --- a/unittests/tools/test_kics_parser.py +++ b/unittests/tools/test_kics_parser.py @@ -1,18 +1,18 @@ from dojo.models import Test from dojo.tools.kics.parser import KICSParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestKICSParser(DojoTestCase): def test_parse_no_findings(self): - with open("unittests/scans/kics/no_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("kics") / "no_findings.json", encoding="utf-8") as testfile: parser = KICSParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_many_findings(self): - with open("unittests/scans/kics/many_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("kics") / "many_findings.json", encoding="utf-8") as testfile: parser = KICSParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(18, len(findings)) diff --git a/unittests/tools/test_kiuwan_parser.py b/unittests/tools/test_kiuwan_parser.py index d27f003bbbb..90655ddaca7 100644 --- a/unittests/tools/test_kiuwan_parser.py +++ b/unittests/tools/test_kiuwan_parser.py @@ -1,36 +1,36 @@ from dojo.models import Test from dojo.tools.kiuwan.parser import KiuwanParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestKiuwanParser(DojoTestCase): def test_parse_file_with_no_vuln_has_no_findings(self): - with open("unittests/scans/kiuwan/kiuwan_no_vuln.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("kiuwan") / "kiuwan_no_vuln.csv", encoding="utf-8") as testfile: parser = KiuwanParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_file_with_two_vuln_has_two_findings(self): - with open("unittests/scans/kiuwan/kiuwan_two_vuln.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("kiuwan") / "kiuwan_two_vuln.csv", encoding="utf-8") as testfile: parser = KiuwanParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(2, len(findings)) def test_parse_file_with_multiple_vuln_has_multiple_finding(self): - with open("unittests/scans/kiuwan/kiuwan_many_vuln.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("kiuwan") / "kiuwan_many_vuln.csv", encoding="utf-8") as testfile: parser = KiuwanParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(131, len(findings)) def test_parse_file_with_defects(self): - with open("unittests/scans/kiuwan/kiuwan_defects.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("kiuwan") / "kiuwan_defects.csv", encoding="utf-8") as testfile: parser = KiuwanParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) def test_parse_file_issue_9308(self): - with open("unittests/scans/kiuwan/issue_9308.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("kiuwan") / "issue_9308.csv", encoding="utf-8") as testfile: parser = KiuwanParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(2, len(findings)) diff --git a/unittests/tools/test_kiuwan_sca_parser.py b/unittests/tools/test_kiuwan_sca_parser.py index 3c868c483c8..1545480757b 100644 --- a/unittests/tools/test_kiuwan_sca_parser.py +++ b/unittests/tools/test_kiuwan_sca_parser.py @@ -1,32 +1,32 @@ from dojo.models import Test from dojo.tools.kiuwan_sca.parser import KiuwanSCAParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path # ./dc-unittest.sh --profile postgres-redis --test-case unittests.tools.test_kiuwan_sca_parser.TestKiuwanSCAParser class TestKiuwanSCAParser(DojoTestCase): def test_parse_file_with_no_vuln_has_no_findings(self): - with open("unittests/scans/kiuwan_sca/kiuwan_sca_no_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("kiuwan_sca") / "kiuwan_sca_no_vuln.json", encoding="utf-8") as testfile: parser = KiuwanSCAParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_file_with_two_vuln_has_two_findings(self): - with open("unittests/scans/kiuwan_sca/kiuwan_sca_two_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("kiuwan_sca") / "kiuwan_sca_two_vuln.json", encoding="utf-8") as testfile: parser = KiuwanSCAParser() findings = parser.get_findings(testfile, Test()) # file contains 3, but we only get 2 as "muted" ones are ignored: self.assertEqual(2, len(findings)) def test_parse_file_with_multiple_vuln_has_multiple_finding(self): - with open("unittests/scans/kiuwan_sca/kiuwan_sca_many_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("kiuwan_sca") / "kiuwan_sca_many_vuln.json", encoding="utf-8") as testfile: parser = KiuwanSCAParser() findings = parser.get_findings(testfile, Test()) # also tests deduplication as there are 28 findings in the file: self.assertEqual(27, len(findings)) def test_correct_mapping(self): - with open("unittests/scans/kiuwan_sca/kiuwan_sca_two_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("kiuwan_sca") / "kiuwan_sca_two_vuln.json", encoding="utf-8") as testfile: parser = KiuwanSCAParser() findings = parser.get_findings(testfile, Test()) diff --git a/unittests/tools/test_krakend_audit_parser.py b/unittests/tools/test_krakend_audit_parser.py index 60f44d51ec1..4d5f4d03940 100644 --- a/unittests/tools/test_krakend_audit_parser.py +++ b/unittests/tools/test_krakend_audit_parser.py @@ -1,18 +1,18 @@ from dojo.models import Test from dojo.tools.krakend_audit.parser import KrakenDAuditParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestKrakenDAuditParser(DojoTestCase): def test_parse_no_findings(self): - with open("unittests/scans/krakend_audit/no_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("krakend_audit") / "no_findings.json", encoding="utf-8") as testfile: parser = KrakenDAuditParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_many_findings(self): - with open("unittests/scans/krakend_audit/many_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("krakend_audit") / "many_findings.json", encoding="utf-8") as testfile: parser = KrakenDAuditParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(5, len(findings)) diff --git a/unittests/tools/test_kubeaudit_parser.py b/unittests/tools/test_kubeaudit_parser.py index dea4e51e5bf..a7d74002eeb 100644 --- a/unittests/tools/test_kubeaudit_parser.py +++ b/unittests/tools/test_kubeaudit_parser.py @@ -1,12 +1,12 @@ from dojo.models import Test from dojo.tools.kubeaudit.parser import KubeAuditParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestKubeAuditParser(DojoTestCase): def test_parse_file_has_no_findings(self): - testfile = open("unittests/scans/kubeaudit/kubeaudit.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("kubeaudit") / "kubeaudit.json", encoding="utf-8") parser = KubeAuditParser() findings = parser.get_findings(testfile, Test()) testfile.close() diff --git a/unittests/tools/test_kubebench_parser.py b/unittests/tools/test_kubebench_parser.py index 25b77faaf77..8afc53f1b18 100644 --- a/unittests/tools/test_kubebench_parser.py +++ b/unittests/tools/test_kubebench_parser.py @@ -1,13 +1,13 @@ from dojo.models import Test from dojo.tools.kubebench.parser import KubeBenchParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestKubeBenchParser(DojoTestCase): def test_parse_file_with_no_vuln_has_no_findings(self): with open( - get_unit_tests_path() + "/scans/kubebench/kube-bench-report-zero-vuln.json", encoding="utf-8", + get_unit_tests_scans_path("kubebench") / "kube-bench-report-zero-vuln.json", encoding="utf-8", ) as testfile: parser = KubeBenchParser() findings = parser.get_findings(testfile, Test()) @@ -15,7 +15,7 @@ def test_parse_file_with_no_vuln_has_no_findings(self): def test_parse_file_with_one_vuln_has_one_finding(self): with open( - get_unit_tests_path() + "/scans/kubebench/kube-bench-report-one-vuln.json", encoding="utf-8", + get_unit_tests_scans_path("kubebench") / "kube-bench-report-one-vuln.json", encoding="utf-8", ) as testfile: parser = KubeBenchParser() findings = parser.get_findings(testfile, Test()) @@ -23,7 +23,7 @@ def test_parse_file_with_one_vuln_has_one_finding(self): def test_parse_file_with_multiple_vuln_has_multiple_findings(self): with open( - get_unit_tests_path() + "/scans/kubebench/kube-bench-report-many-vuln.json", encoding="utf-8", + get_unit_tests_scans_path("kubebench") / "kube-bench-report-many-vuln.json", encoding="utf-8", ) as testfile: parser = KubeBenchParser() findings = parser.get_findings(testfile, Test()) @@ -33,7 +33,7 @@ def test_parse_file_with_controls_tag(self): # The testfile has been derived from https://github.com/kubernetes-sigs/wg-policy-prototypes/blob/master/policy-report/kube-bench-adapter/samples/kube-bench-output.json with open( - get_unit_tests_path() + "/scans/kubebench/kube-bench-controls.json", encoding="utf-8", + get_unit_tests_scans_path("kubebench") / "kube-bench-controls.json", encoding="utf-8", ) as testfile: parser = KubeBenchParser() findings = parser.get_findings(testfile, Test()) diff --git a/unittests/tools/test_kubehunter_parser.py b/unittests/tools/test_kubehunter_parser.py index 2141d59bb1d..c59a5e74fbb 100644 --- a/unittests/tools/test_kubehunter_parser.py +++ b/unittests/tools/test_kubehunter_parser.py @@ -2,18 +2,19 @@ from dojo.models import Test from dojo.tools.kubehunter.parser import KubeHunterParser +from unittests.dojo_test_case import get_unit_tests_scans_path class TestKubeHunterParser(TestCase): def test_kubehunter_parser_with_no_vuln_has_no_findings(self): - with open("unittests/scans/kubehunter/kubehunter_zero_vul.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("kubehunter") / "kubehunter_zero_vul.json", encoding="utf-8") as testfile: parser = KubeHunterParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_kubehunter_parser_with_one_criticle_vuln_has_one_findings(self): - with open("unittests/scans/kubehunter/kubehunter_one_vul.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("kubehunter") / "kubehunter_one_vul.json", encoding="utf-8") as testfile: parser = KubeHunterParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -28,7 +29,7 @@ def test_kubehunter_parser_with_one_criticle_vuln_has_one_findings(self): self.assertEqual(finding.severity, "High") def test_kubehunter_parser_with_many_vuln_has_many_findings(self): - with open("unittests/scans/kubehunter/kubehunter_many_vul.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("kubehunter") / "kubehunter_many_vul.json", encoding="utf-8") as testfile: parser = KubeHunterParser() findings = parser.get_findings(testfile, Test()) @@ -36,7 +37,7 @@ def test_kubehunter_parser_with_many_vuln_has_many_findings(self): def test_kubehunter_parser_empty_with_error(self): with self.assertRaises(ValueError) as context: - with open("unittests/scans/kubehunter/empty.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("kubehunter") / "empty.json", encoding="utf-8") as testfile: parser = KubeHunterParser() parser.get_findings(testfile, Test()) @@ -45,7 +46,7 @@ def test_kubehunter_parser_empty_with_error(self): ) def test_kubehunter_parser_dupe(self): - with open("unittests/scans/kubehunter/dupe.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("kubehunter") / "dupe.json", encoding="utf-8") as testfile: parser = KubeHunterParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) diff --git a/unittests/tools/test_kubescape_parser.py b/unittests/tools/test_kubescape_parser.py index 346cda04019..d0b62f6e967 100644 --- a/unittests/tools/test_kubescape_parser.py +++ b/unittests/tools/test_kubescape_parser.py @@ -1,23 +1,23 @@ from dojo.models import Test from dojo.tools.kubescape.parser import KubescapeParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestKubescapeParser(DojoTestCase): def test_parse_file_has_many_findings(self): - with open(get_unit_tests_path() + "/scans/kubescape/many_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("kubescape") / "many_findings.json", encoding="utf-8") as testfile: parser = KubescapeParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(349, len(findings)) def test_parse_file_has_many_results(self): - with open(get_unit_tests_path() + "/scans/kubescape/results.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("kubescape") / "results.json", encoding="utf-8") as testfile: parser = KubescapeParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_file_with_a_failure(self): - with open(get_unit_tests_path() + "/scans/kubescape/with_a_failure.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("kubescape") / "with_a_failure.json", encoding="utf-8") as testfile: parser = KubescapeParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(3, len(findings)) diff --git a/unittests/tools/test_legitify_parser.py b/unittests/tools/test_legitify_parser.py index 66f803258b2..f5ffb33f0ba 100644 --- a/unittests/tools/test_legitify_parser.py +++ b/unittests/tools/test_legitify_parser.py @@ -1,11 +1,11 @@ from dojo.models import Test from dojo.tools.legitify.parser import LegitifyParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestLegitifyParser(DojoTestCase): def test_parse_file_with_many_findings(self): - with open(get_unit_tests_path() + "/scans/legitify/legitify_many_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("legitify") / "legitify_many_findings.json", encoding="utf-8") as testfile: parser = LegitifyParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(16, len(findings)) @@ -21,7 +21,7 @@ def test_parse_file_with_many_findings(self): endpoint.clean() def test_parse_file_with_one_finding(self): - with open(get_unit_tests_path() + "/scans/legitify/legitify_one_finding.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("legitify") / "legitify_one_finding.json", encoding="utf-8") as testfile: parser = LegitifyParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -37,7 +37,7 @@ def test_parse_file_with_one_finding(self): endpoint.clean() def test_parse_file_with_no_findings(self): - with open(get_unit_tests_path() + "/scans/legitify/legitify_no_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("legitify") / "legitify_no_findings.json", encoding="utf-8") as testfile: parser = LegitifyParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) diff --git a/unittests/tools/test_mend_parser.py b/unittests/tools/test_mend_parser.py index 75bbd54bcbb..7aa28f3cd8c 100644 --- a/unittests/tools/test_mend_parser.py +++ b/unittests/tools/test_mend_parser.py @@ -1,18 +1,18 @@ from dojo.models import Test from dojo.tools.mend.parser import MendParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestMendParser(DojoTestCase): def test_parse_file_with_no_vuln_has_no_findings(self): - with open("unittests/scans/mend/okhttp_no_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("mend") / "okhttp_no_vuln.json", encoding="utf-8") as testfile: parser = MendParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_file_with_one_vuln_has_one_findings(self): - with open("unittests/scans/mend/okhttp_one_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("mend") / "okhttp_one_vuln.json", encoding="utf-8") as testfile: parser = MendParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -23,21 +23,21 @@ def test_parse_file_with_one_vuln_has_one_findings(self): self.assertEqual(5.3, finding.cvssv3_score) def test_parse_file_with_multiple_vuln_has_multiple_finding(self): - with open("unittests/scans/mend/okhttp_many_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("mend") / "okhttp_many_vuln.json", encoding="utf-8") as testfile: parser = MendParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(6, len(findings)) def test_parse_file_with_multiple_vuln_cli_output(self): with open( - get_unit_tests_path() + "/scans/mend/cli_generated_many_vulns.json", encoding="utf-8", + get_unit_tests_scans_path("mend") / "cli_generated_many_vulns.json", encoding="utf-8", ) as testfile: parser = MendParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(20, len(findings)) def test_parse_file_with_one_sca_vuln_finding(self): - with open("unittests/scans/mend/mend_sca_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("mend") / "mend_sca_vuln.json", encoding="utf-8") as testfile: parser = MendParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -46,13 +46,13 @@ def test_parse_file_with_one_sca_vuln_finding(self): self.assertEqual("WS-2019-0379 | commons-codec-1.6.jar", finding.title) def test_parse_file_with_no_vuln_has_no_findings_platform(self): - with open("unittests/scans/mend/mend-sca-platform-api3-no-findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("mend") / "mend-sca-platform-api3-no-findings.json", encoding="utf-8") as testfile: parser = MendParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_file_with_one_vuln_has_one_findings_platform(self): - with open("unittests/scans/mend/mend-sca-platform-api3-one-finding.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("mend") / "mend-sca-platform-api3-one-finding.json", encoding="utf-8") as testfile: parser = MendParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -64,7 +64,7 @@ def test_parse_file_with_one_vuln_has_one_findings_platform(self): self.assertEqual("CVE-2024-51744 | github.com/golang-JWT/jwt-v3.2.2+incompatible", finding.title) def test_parse_file_with_multiple_vuln_has_multiple_finding_platform(self): - with open("unittests/scans/mend/mend-sca-platform-api3-multiple-findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("mend") / "mend-sca-platform-api3-multiple-findings.json", encoding="utf-8") as testfile: parser = MendParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(5, len(findings)) diff --git a/unittests/tools/test_meterian_parser.py b/unittests/tools/test_meterian_parser.py index 2a5a9f3c27b..e119dc8df83 100644 --- a/unittests/tools/test_meterian_parser.py +++ b/unittests/tools/test_meterian_parser.py @@ -1,39 +1,39 @@ from dojo.models import Test from dojo.tools.meterian.parser import MeterianParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestMeterianParser(DojoTestCase): def test_meterianParser_invalid_security_report_raise_ValueError_exception(self): with self.assertRaises(ValueError): - with open("unittests/scans/meterian/report_invalid.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("meterian") / "report_invalid.json", encoding="utf-8") as testfile: parser = MeterianParser() parser.get_findings(testfile, Test()) def test_meterianParser_report_has_no_finding(self): - with open("unittests/scans/meterian/report_no_vulns.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("meterian") / "report_no_vulns.json", encoding="utf-8") as testfile: parser = MeterianParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_meterianParser_report_has_one_findings(self): - with open("unittests/scans/meterian/report_one_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("meterian") / "report_one_vuln.json", encoding="utf-8") as testfile: parser = MeterianParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) def test_meterianParser_report_has_many_findings(self): - with open("unittests/scans/meterian/report_many_vulns.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("meterian") / "report_many_vulns.json", encoding="utf-8") as testfile: parser = MeterianParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(20, len(findings)) def test_meterianParser_finding_has_fields(self): - with open("unittests/scans/meterian/report_one_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("meterian") / "report_one_vuln.json", encoding="utf-8") as testfile: parser = MeterianParser() findings = parser.get_findings(testfile, Test()) @@ -63,7 +63,7 @@ def test_meterianParser_finding_has_fields(self): self.assertEqual(["nodejs"], finding.tags) def test_meterianParser_finding_has_no_remediation(self): - with open("unittests/scans/meterian/report_one_vuln_no_remediation.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("meterian") / "report_one_vuln_no_remediation.json", encoding="utf-8") as testfile: parser = MeterianParser() findings = parser.get_findings(testfile, Test()) @@ -73,7 +73,7 @@ def test_meterianParser_finding_has_no_remediation(self): + "issue for the safety of your application.", finding.mitigation) def test_meterianParser_dual_language_report_has_two_findins(self): - with open("unittests/scans/meterian/report_multi_language.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("meterian") / "report_multi_language.json", encoding="utf-8") as testfile: parser = MeterianParser() findings = parser.get_findings(testfile, Test()) diff --git a/unittests/tools/test_microfocus_webinspect_parser.py b/unittests/tools/test_microfocus_webinspect_parser.py index 07f43bca936..9683168ad44 100644 --- a/unittests/tools/test_microfocus_webinspect_parser.py +++ b/unittests/tools/test_microfocus_webinspect_parser.py @@ -1,6 +1,6 @@ from dojo.models import Engagement, Product, Test from dojo.tools.microfocus_webinspect.parser import MicrofocusWebinspectParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestMicrofocusWebinspectParser(DojoTestCase): @@ -10,7 +10,7 @@ def test_parse_file_with_no_vuln_has_no_findings(self): test.engagement = Engagement() test.engagement.product = Product() with open( - get_unit_tests_path() + "/scans/microfocus_webinspect/Webinspect_no_vuln.xml", encoding="utf-8", + get_unit_tests_scans_path("microfocus_webinspect") / "Webinspect_no_vuln.xml", encoding="utf-8", ) as testfile: parser = MicrofocusWebinspectParser() findings = parser.get_findings(testfile, test) @@ -21,7 +21,7 @@ def test_parse_file_with_one_vuln_has_one_findings(self): test.engagement = Engagement() test.engagement.product = Product() with open( - get_unit_tests_path() + "/scans/microfocus_webinspect/Webinspect_one_vuln.xml", encoding="utf-8", + get_unit_tests_scans_path("microfocus_webinspect") / "Webinspect_one_vuln.xml", encoding="utf-8", ) as testfile: parser = MicrofocusWebinspectParser() findings = parser.get_findings(testfile, test) @@ -42,7 +42,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_finding(self): test.engagement = Engagement() test.engagement.product = Product() with open( - get_unit_tests_path() + "/scans/microfocus_webinspect/Webinspect_many_vuln.xml", encoding="utf-8", + get_unit_tests_scans_path("microfocus_webinspect") / "Webinspect_many_vuln.xml", encoding="utf-8", )as testfile: parser = MicrofocusWebinspectParser() findings = parser.get_findings(testfile, test) @@ -73,7 +73,7 @@ def test_convert_severity(self): ) def test_parse_file_version_18_20(self): - with open("unittests/scans/microfocus_webinspect/Webinspect_V18_20.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("microfocus_webinspect") / "Webinspect_V18_20.xml", encoding="utf-8") as testfile: parser = MicrofocusWebinspectParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -121,7 +121,7 @@ def test_parse_file_issue7690(self): test.engagement = Engagement() test.engagement.product = Product() with open( - get_unit_tests_path() + "/scans/microfocus_webinspect/issue_7690.xml", encoding="utf-8", + get_unit_tests_scans_path("microfocus_webinspect") / "issue_7690.xml", encoding="utf-8", ) as testfile: parser = MicrofocusWebinspectParser() findings = parser.get_findings(testfile, test) diff --git a/unittests/tools/test_mobsf_parser.py b/unittests/tools/test_mobsf_parser.py index 31a40eb7841..8ff22099d2b 100644 --- a/unittests/tools/test_mobsf_parser.py +++ b/unittests/tools/test_mobsf_parser.py @@ -1,6 +1,6 @@ from dojo.models import Engagement, Product, Test from dojo.tools.mobsf.parser import MobSFParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestMobSFParser(DojoTestCase): @@ -10,7 +10,7 @@ def test_parse_file(self): engagement = Engagement() engagement.product = Product() test.engagement = engagement - testfile = open("unittests/scans/mobsf/report1.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("mobsf") / "report1.json", encoding="utf-8") parser = MobSFParser() findings = parser.get_findings(testfile, test) testfile.close() @@ -36,7 +36,7 @@ def test_parse_file2(self): engagement = Engagement() engagement.product = Product() test.engagement = engagement - testfile = open("unittests/scans/mobsf/report2.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("mobsf") / "report2.json", encoding="utf-8") parser = MobSFParser() findings = parser.get_findings(testfile, test) testfile.close() @@ -50,7 +50,7 @@ def test_parse_file_3_1_9_android(self): engagement = Engagement() engagement.product = Product() test.engagement = engagement - testfile = open("unittests/scans/mobsf/android.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("mobsf") / "android.json", encoding="utf-8") parser = MobSFParser() findings = parser.get_findings(testfile, test) testfile.close() @@ -73,7 +73,7 @@ def test_parse_file_3_1_9_ios(self): engagement = Engagement() engagement.product = Product() test.engagement = engagement - testfile = open("unittests/scans/mobsf/ios.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("mobsf") / "ios.json", encoding="utf-8") parser = MobSFParser() findings = parser.get_findings(testfile, test) testfile.close() @@ -94,7 +94,7 @@ def test_parse_file_mobsf_3_7_9(self): engagement = Engagement() engagement.product = Product() test.engagement = engagement - testfile = open("unittests/scans/mobsf/mobsf_3_7_9.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("mobsf") / "mobsf_3_7_9.json", encoding="utf-8") parser = MobSFParser() findings = parser.get_findings(testfile, test) testfile.close() @@ -109,7 +109,7 @@ def test_parse_issue_9132(self): engagement = Engagement() engagement.product = Product() test.engagement = engagement - testfile = open("unittests/scans/mobsf/issue_9132.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("mobsf") / "issue_9132.json", encoding="utf-8") parser = MobSFParser() findings = parser.get_findings(testfile, test) testfile.close() @@ -120,7 +120,7 @@ def test_parse_allsafe(self): engagement = Engagement() engagement.product = Product() test.engagement = engagement - testfile = open("unittests/scans/mobsf/allsafe.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("mobsf") / "allsafe.json", encoding="utf-8") parser = MobSFParser() findings = parser.get_findings(testfile, test) testfile.close() @@ -131,7 +131,7 @@ def test_parse_damnvulnrablebank(self): engagement = Engagement() engagement.product = Product() test.engagement = engagement - testfile = open("unittests/scans/mobsf/damnvulnrablebank.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("mobsf") / "damnvulnrablebank.json", encoding="utf-8") parser = MobSFParser() findings = parser.get_findings(testfile, test) testfile.close() diff --git a/unittests/tools/test_mobsf_scorecard_parser.py b/unittests/tools/test_mobsf_scorecard_parser.py index a873e6bcca6..0351ba5adc6 100644 --- a/unittests/tools/test_mobsf_scorecard_parser.py +++ b/unittests/tools/test_mobsf_scorecard_parser.py @@ -1,6 +1,6 @@ from dojo.models import Test from dojo.tools.mobsf_scorecard.parser import MobSFScorecardParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestMobSFScorecardParser(DojoTestCase): @@ -9,7 +9,7 @@ def test_parse_android_empty_file(self): parser = MobSFScorecardParser() - with open("unittests/scans/mobsf_scorecard/dvba_4_0_7_android_empty.json", encoding="utf-8") as android_empty_file: + with open(get_unit_tests_scans_path("mobsf_scorecard") / "dvba_4_0_7_android_empty.json", encoding="utf-8") as android_empty_file: android_empty_findings = parser.get_findings(android_empty_file, Test()) self.assertEqual(0, len(android_empty_findings)) @@ -18,7 +18,7 @@ def test_parse_android_one_file(self): parser = MobSFScorecardParser() - with open("unittests/scans/mobsf_scorecard/dvba_4_0_7_android_one.json", encoding="utf-8") as android_one_file: + with open(get_unit_tests_scans_path("mobsf_scorecard") / "dvba_4_0_7_android_one.json", encoding="utf-8") as android_one_file: android_one_findings = parser.get_findings(android_one_file, Test()) self.assertEqual(1, len(android_one_findings)) @@ -31,7 +31,7 @@ def test_parse_android_full_file(self): parser = MobSFScorecardParser() - with open("unittests/scans/mobsf_scorecard/dvba_4_0_7_android_full.json", encoding="utf-8") as android_full_file: + with open(get_unit_tests_scans_path("mobsf_scorecard") / "dvba_4_0_7_android_full.json", encoding="utf-8") as android_full_file: android_full_findings = parser.get_findings(android_full_file, Test()) self.assertEqual(18, len(android_full_findings)) @@ -48,7 +48,7 @@ def test_parse_ios_empty_file(self): parser = MobSFScorecardParser() - with open("unittests/scans/mobsf_scorecard/dvia2_4_0_7_ios_empty.json", encoding="utf-8") as ios_empty_file: + with open(get_unit_tests_scans_path("mobsf_scorecard") / "dvia2_4_0_7_ios_empty.json", encoding="utf-8") as ios_empty_file: ios_empty_findings = parser.get_findings(ios_empty_file, Test()) self.assertEqual(0, len(ios_empty_findings)) @@ -57,7 +57,7 @@ def test_parse_ios_one_file(self): parser = MobSFScorecardParser() - with open("unittests/scans/mobsf_scorecard/dvia2_4_0_7_ios_one.json", encoding="utf-8") as ios_one_file: + with open(get_unit_tests_scans_path("mobsf_scorecard") / "dvia2_4_0_7_ios_one.json", encoding="utf-8") as ios_one_file: ios_one_findings = parser.get_findings(ios_one_file, Test()) self.assertEqual(1, len(ios_one_findings)) @@ -70,7 +70,7 @@ def test_parse_ios_full_file(self): parser = MobSFScorecardParser() - with open("unittests/scans/mobsf_scorecard/dvia2_4_0_7_ios_full.json", encoding="utf-8") as ios_full_file: + with open(get_unit_tests_scans_path("mobsf_scorecard") / "dvia2_4_0_7_ios_full.json", encoding="utf-8") as ios_full_file: ios_full_findings = parser.get_findings(ios_full_file, Test()) self.assertEqual(11, len(ios_full_findings)) diff --git a/unittests/tools/test_mobsfscan_parser.py b/unittests/tools/test_mobsfscan_parser.py index a4051bd3a5a..c2c27f40b60 100644 --- a/unittests/tools/test_mobsfscan_parser.py +++ b/unittests/tools/test_mobsfscan_parser.py @@ -1,18 +1,18 @@ from dojo.models import Test from dojo.tools.mobsfscan.parser import MobsfscanParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestMobsfscanParser(DojoTestCase): def test_parse_no_findings(self): - with open("unittests/scans/mobsfscan/no_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("mobsfscan") / "no_findings.json", encoding="utf-8") as testfile: parser = MobsfscanParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_many_findings(self): - with open("unittests/scans/mobsfscan/many_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("mobsfscan") / "many_findings.json", encoding="utf-8") as testfile: parser = MobsfscanParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(8, len(findings)) @@ -94,7 +94,7 @@ def test_parse_many_findings(self): self.assertIsNotNone(finding.references) def test_parse_many_findings_cwe_lower(self): - with open("unittests/scans/mobsfscan/many_findings_cwe_lower.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("mobsfscan") / "many_findings_cwe_lower.json", encoding="utf-8") as testfile: parser = MobsfscanParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(7, len(findings)) diff --git a/unittests/tools/test_mozilla_observatory_parser.py b/unittests/tools/test_mozilla_observatory_parser.py index 96c5d0719e2..d05f28c6e46 100644 --- a/unittests/tools/test_mozilla_observatory_parser.py +++ b/unittests/tools/test_mozilla_observatory_parser.py @@ -1,11 +1,11 @@ from dojo.models import Test from dojo.tools.mozilla_observatory.parser import MozillaObservatoryParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestMozillaObservatoryParser(DojoTestCase): def test_parse_file_with_no_vuln_has_no_findings(self): - with open("unittests/scans/mozilla_observatory/mozilla_no_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("mozilla_observatory") / "mozilla_no_vuln.json", encoding="utf-8") as testfile: parser = MozillaObservatoryParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(4, len(findings)) @@ -19,20 +19,20 @@ def test_parse_file_with_no_vuln_has_no_findings(self): self.assertIn("Preloaded via the HTTP Strict Transport Security (HSTS) preloading process", finding.description) def test_parse_file_with_two_vuln_has_two_findings(self): - with open("unittests/scans/mozilla_observatory/mozilla_gitlab_two_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("mozilla_observatory") / "mozilla_gitlab_two_vuln.json", encoding="utf-8") as testfile: parser = MozillaObservatoryParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(2, len(findings)) def test_parse_file_with_multiple_vuln_has_multiple_finding(self): - with open("unittests/scans/mozilla_observatory/mozilla_google_many_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("mozilla_observatory") / "mozilla_google_many_vuln.json", encoding="utf-8") as testfile: parser = MozillaObservatoryParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(6, len(findings)) def test_parse_file_cli_mozilla_org(self): """Test from the CLI""" - with open("unittests/scans/mozilla_observatory/mozilla_org.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("mozilla_observatory") / "mozilla_org.json", encoding="utf-8") as testfile: parser = MozillaObservatoryParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(12, len(findings)) @@ -49,7 +49,7 @@ def test_parse_file_cli_mozilla_org(self): def test_parse_file_cli_demo(self): """Test from the CLI""" - with open("unittests/scans/mozilla_observatory/demo.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("mozilla_observatory") / "demo.json", encoding="utf-8") as testfile: parser = MozillaObservatoryParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(12, len(findings)) @@ -79,7 +79,7 @@ def test_parse_file_cli_demo(self): def test_parse_file_cli_juicy(self): """Test from the CLI""" - with open("unittests/scans/mozilla_observatory/juicy.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("mozilla_observatory") / "juicy.json", encoding="utf-8") as testfile: parser = MozillaObservatoryParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(12, len(findings)) @@ -120,7 +120,7 @@ def test_parse_file_cli_juicy(self): def test_parse_file_cli_nmap_scanme(self): """Test from the CLI""" - with open("unittests/scans/mozilla_observatory/nmap_scanme.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("mozilla_observatory") / "nmap_scanme.json", encoding="utf-8") as testfile: parser = MozillaObservatoryParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(12, len(findings)) @@ -179,7 +179,7 @@ def test_parse_file_cli_nmap_scanme(self): def test_parse_file_cli_nmap_scanme_no_name_attribute(self): """Test from the CLI""" - with open("unittests/scans/mozilla_observatory/nmap_scanme_2022.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("mozilla_observatory") / "nmap_scanme_2022.json", encoding="utf-8") as testfile: parser = MozillaObservatoryParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(12, len(findings)) diff --git a/unittests/tools/test_ms_defender_parser.py b/unittests/tools/test_ms_defender_parser.py index 586bc401c58..272c21262f7 100644 --- a/unittests/tools/test_ms_defender_parser.py +++ b/unittests/tools/test_ms_defender_parser.py @@ -1,12 +1,12 @@ from dojo.models import Test from dojo.tools.ms_defender.parser import MSDefenderParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestMSDefenderParser(DojoTestCase): def test_parse_many_findings(self): - testfile = open("unittests/scans/ms_defender/report_many_vulns.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("ms_defender") / "report_many_vulns.json", encoding="utf-8") parser = MSDefenderParser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -16,7 +16,7 @@ def test_parse_many_findings(self): self.assertEqual("CVE-5678-9887_wjeriowerjoiewrjoweirjeowij", finding.title) def test_parse_one_finding(self): - testfile = open("unittests/scans/ms_defender/report_one_vuln.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("ms_defender") / "report_one_vuln.json", encoding="utf-8") parser = MSDefenderParser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -27,14 +27,14 @@ def test_parse_one_finding(self): self.assertEqual("CVE-1234-5678", finding.unsaved_vulnerability_ids[0]) def test_parse_no_finding(self): - testfile = open("unittests/scans/ms_defender/report_no_vuln.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("ms_defender") / "report_no_vuln.json", encoding="utf-8") parser = MSDefenderParser() findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual(0, len(findings)) def test_parser_defender_zip(self): - testfile = open("unittests/scans/ms_defender/defender.zip", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("ms_defender") / "defender.zip", encoding="utf-8") parser = MSDefenderParser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -47,7 +47,7 @@ def test_parser_defender_zip(self): self.assertEqual("1.1.1.1", finding.unsaved_endpoints[0].host) def test_parser_defender_wrong_machines_zip(self): - testfile = open("unittests/scans/ms_defender/defender_wrong_machines.zip", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("ms_defender") / "defender_wrong_machines.zip", encoding="utf-8") parser = MSDefenderParser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -57,7 +57,7 @@ def test_parser_defender_wrong_machines_zip(self): self.assertEqual("CVE-5678-9887_wjeriowerjoiewrjoweirjeowij", finding.title) def test_parser_defender_multiple_files_zip(self): - testfile = open("unittests/scans/ms_defender/defender_multiple_files.zip", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("ms_defender") / "defender_multiple_files.zip", encoding="utf-8") parser = MSDefenderParser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -70,7 +70,7 @@ def test_parser_defender_multiple_files_zip(self): self.assertEqual("1.1.1.1", finding.unsaved_endpoints[0].host) def test_parser_defender_issue_11217(self): - testfile = open("unittests/scans/ms_defender/issue_11217.zip", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("ms_defender") / "issue_11217.zip", encoding="utf-8") parser = MSDefenderParser() findings = parser.get_findings(testfile, Test()) testfile.close() diff --git a/unittests/tools/test_nancy_parser.py b/unittests/tools/test_nancy_parser.py index 5de57ddd2e0..a97a0136884 100644 --- a/unittests/tools/test_nancy_parser.py +++ b/unittests/tools/test_nancy_parser.py @@ -1,20 +1,18 @@ -from os import path -from pathlib import Path from dojo.models import Test from dojo.tools.nancy.parser import NancyParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestNancyParser(DojoTestCase): def test_nancy_parser_with_no_vuln_has_no_findings(self): - with open(path.join(Path(__file__).parent, "../scans/nancy/nancy_no_findings.json"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("nancy") / "nancy_no_findings.json", encoding="utf-8") as testfile: parser = NancyParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_nancy_parser_with_one_vuln_has_one_findings(self): - with open(path.join(Path(__file__).parent, "../scans/nancy/nancy_one_findings.json"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("nancy") / "nancy_one_findings.json", encoding="utf-8") as testfile: parser = NancyParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -28,7 +26,7 @@ def test_nancy_parser_with_one_vuln_has_one_findings(self): self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:N/UI:R/S:C/C:L/I:L/A:N", finding.cvssv3) def test_nancy_plus_parser_with_many_vuln_has_many_findings(self): - with open(path.join(Path(__file__).parent, "../scans/nancy/nancy_many_findings.json"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("nancy") / "nancy_many_findings.json", encoding="utf-8") as testfile: parser = NancyParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(13, len(findings)) diff --git a/unittests/tools/test_netsparker_parser.py b/unittests/tools/test_netsparker_parser.py index 8537686b97b..0992cd6cb45 100644 --- a/unittests/tools/test_netsparker_parser.py +++ b/unittests/tools/test_netsparker_parser.py @@ -1,12 +1,12 @@ from dojo.models import Test from dojo.tools.netsparker.parser import NetsparkerParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestNetsparkerParser(DojoTestCase): def test_parse_file_with_one_finding(self): - with open("unittests/scans/netsparker/netsparker_one_finding.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("netsparker") / "netsparker_one_finding.json", encoding="utf-8") as testfile: parser = NetsparkerParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -26,7 +26,7 @@ def test_parse_file_with_one_finding(self): self.assertEqual(str(endpoint), "http://php.testsparker.com/auth/login.php") def test_parse_file_with_multiple_finding(self): - with open("unittests/scans/netsparker/netsparker_many_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("netsparker") / "netsparker_many_findings.json", encoding="utf-8") as testfile: parser = NetsparkerParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(16, len(findings)) @@ -70,7 +70,7 @@ def test_parse_file_with_multiple_finding(self): self.assertEqual(str(endpoint), "http://php.testsparker.com") def test_parse_file_issue_9816(self): - with open("unittests/scans/netsparker/issue_9816.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("netsparker") / "issue_9816.json", encoding="utf-8") as testfile: parser = NetsparkerParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(3, len(findings)) @@ -84,7 +84,7 @@ def test_parse_file_issue_9816(self): self.assertEqual("03/02/2019", finding.date.strftime("%d/%m/%Y")) def test_parse_file_issue_10311(self): - with open("unittests/scans/netsparker/issue_10311.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("netsparker") / "issue_10311.json", encoding="utf-8") as testfile: parser = NetsparkerParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(3, len(findings)) @@ -98,7 +98,7 @@ def test_parse_file_issue_10311(self): self.assertEqual("03/02/2019", finding.date.strftime("%d/%m/%Y")) def test_parse_file_issue_11020(self): - with open("unittests/scans/netsparker/issue_11020.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("netsparker") / "issue_11020.json", encoding="utf-8") as testfile: parser = NetsparkerParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(3, len(findings)) diff --git a/unittests/tools/test_neuvector_compliance_parser.py b/unittests/tools/test_neuvector_compliance_parser.py index 1370d61143a..3e6890bb1fd 100644 --- a/unittests/tools/test_neuvector_compliance_parser.py +++ b/unittests/tools/test_neuvector_compliance_parser.py @@ -1,21 +1,19 @@ -from os import path -from pathlib import Path from dojo.models import Test from dojo.tools.neuvector_compliance.parser import NeuVectorComplianceParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestNeuVectorComplianceParser(DojoTestCase): def test_parse_file_with_no_vuln(self): - testfile = open(path.join(Path(__file__).parent, "../scans/neuvector_compliance/no_vuln.json"), encoding="utf-8") + testfile = open(get_unit_tests_scans_path("neuvector_compliance") / "no_vuln.json", encoding="utf-8") parser = NeuVectorComplianceParser() findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual(0, len(findings)) def test_parse_file_with_one_vuln(self): - testfile = open(path.join(Path(__file__).parent, "../scans/neuvector_compliance/one_vuln.json"), encoding="utf-8") + testfile = open(get_unit_tests_scans_path("neuvector_compliance") / "one_vuln.json", encoding="utf-8") parser = NeuVectorComplianceParser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -23,7 +21,7 @@ def test_parse_file_with_one_vuln(self): self.assertEqual("docker_D.1.1.11", findings[0].vuln_id_from_tool) def test_parse_file_with_many_vulns(self): - testfile = open(path.join(Path(__file__).parent, "../scans/neuvector_compliance/many_vulns.json"), encoding="utf-8") + testfile = open(get_unit_tests_scans_path("neuvector_compliance") / "many_vulns.json", encoding="utf-8") parser = NeuVectorComplianceParser() findings = parser.get_findings(testfile, Test()) testfile.close() diff --git a/unittests/tools/test_neuvector_parser.py b/unittests/tools/test_neuvector_parser.py index ed4507dd9dc..4064c371b20 100644 --- a/unittests/tools/test_neuvector_parser.py +++ b/unittests/tools/test_neuvector_parser.py @@ -1,21 +1,19 @@ -from os import path -from pathlib import Path from dojo.models import Test from dojo.tools.neuvector.parser import NeuVectorParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestNeuVectorParser(DojoTestCase): def test_parse_file_with_no_vuln(self): - testfile = open(path.join(Path(__file__).parent, "../scans/neuvector/no_vuln.json"), encoding="utf-8") + testfile = open(get_unit_tests_scans_path("neuvector") / "no_vuln.json", encoding="utf-8") parser = NeuVectorParser() findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual(0, len(findings)) def test_parse_file_with_one_vuln(self): - testfile = open(path.join(Path(__file__).parent, "../scans/neuvector/one_vuln.json"), encoding="utf-8") + testfile = open(get_unit_tests_scans_path("neuvector") / "one_vuln.json", encoding="utf-8") parser = NeuVectorParser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -24,7 +22,7 @@ def test_parse_file_with_one_vuln(self): self.assertEqual("CVE-2015-8356", findings[0].unsaved_vulnerability_ids[0]) def test_parse_file_with_many_vulns(self): - testfile = open(path.join(Path(__file__).parent, "../scans/neuvector/many_vulns.json"), encoding="utf-8") + testfile = open(get_unit_tests_scans_path("neuvector") / "many_vulns.json", encoding="utf-8") parser = NeuVectorParser() findings = parser.get_findings(testfile, Test()) testfile.close() diff --git a/unittests/tools/test_nexpose_parser.py b/unittests/tools/test_nexpose_parser.py index c6fc0b116ea..c512ae51724 100644 --- a/unittests/tools/test_nexpose_parser.py +++ b/unittests/tools/test_nexpose_parser.py @@ -4,13 +4,13 @@ from dojo.models import Engagement, Product, Test from dojo.tools.nexpose.parser import NexposeParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestNexposeParser(DojoTestCase): def test_nexpose_parser_has_no_finding(self): - with open("unittests/scans/nexpose/no_vuln.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("nexpose") / "no_vuln.xml", encoding="utf-8") as testfile: parser = NexposeParser() findings = parser.get_findings(testfile, Test()) @@ -29,7 +29,7 @@ def test_nexpose_parser_has_many_finding(self): test = Test() test.engagement = Engagement() test.engagement.product = Product() - with open("unittests/scans/nexpose/many_vulns.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("nexpose") / "many_vulns.xml", encoding="utf-8") as testfile: parser = NexposeParser() findings = parser.get_findings(testfile, test) @@ -135,7 +135,7 @@ def test_nexpose_parser_has_many_finding(self): self.assertEqual("udp", endpoint.protocol) def test_nexpose_parser_tests_outside_endpoint(self): - with open("unittests/scans/nexpose/report_auth.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("nexpose") / "report_auth.xml", encoding="utf-8") as testfile: parser = NexposeParser() findings = parser.get_findings(testfile, Test()) @@ -167,7 +167,7 @@ def test_nexpose_parser_tests_outside_endpoint(self): self.assertIsNone(finding.unsaved_vulnerability_ids) def test_nexpose_parser_dns(self): - with open("unittests/scans/nexpose/dns.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("nexpose") / "dns.xml", encoding="utf-8") as testfile: parser = NexposeParser() findings = parser.get_findings(testfile, Test()) @@ -208,7 +208,7 @@ def test_nexpose_parser_dns(self): @override_settings(USE_FIRST_SEEN=True) def test_nexpose_parser_use_first_seen(self): - with open("unittests/scans/nexpose/dns.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("nexpose") / "dns.xml", encoding="utf-8") as testfile: parser = NexposeParser() findings = parser.get_findings(testfile, Test()) diff --git a/unittests/tools/test_nikto_parser.py b/unittests/tools/test_nikto_parser.py index 1c4da0b6a19..9fd29fd301c 100644 --- a/unittests/tools/test_nikto_parser.py +++ b/unittests/tools/test_nikto_parser.py @@ -1,6 +1,6 @@ from dojo.models import Engagement, Product, Test from dojo.tools.nikto.parser import NiktoParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestNiktoParser(DojoTestCase): @@ -10,7 +10,7 @@ def test_parse_file_with_old_format(self): engagement = Engagement() engagement.product = Product() test.engagement = engagement - with open("unittests/scans/nikto/nikto-report-old-format.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("nikto") / "nikto-report-old-format.xml", encoding="utf-8") as testfile: parser = NiktoParser() findings = parser.get_findings(testfile, test) for finding in findings: @@ -19,7 +19,7 @@ def test_parse_file_with_old_format(self): self.assertEqual(1, len(findings)) def test_parse_file_with_no_vuln_has_no_findings(self): - with open("unittests/scans/nikto/nikto-report-zero-vuln.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("nikto") / "nikto-report-zero-vuln.xml", encoding="utf-8") as testfile: parser = NiktoParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) @@ -29,7 +29,7 @@ def test_parse_file_with_one_vuln_has_one_finding(self): engagement = Engagement() engagement.product = Product() test.engagement = engagement - with open("unittests/scans/nikto/nikto-report-one-vuln.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("nikto") / "nikto-report-one-vuln.xml", encoding="utf-8") as testfile: parser = NiktoParser() findings = parser.get_findings(testfile, test) for finding in findings: @@ -42,7 +42,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_findings(self): engagement = Engagement() engagement.product = Product() test.engagement = engagement - with open("unittests/scans/nikto/nikto-report-many-vuln.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("nikto") / "nikto-report-many-vuln.xml", encoding="utf-8") as testfile: parser = NiktoParser() findings = parser.get_findings(testfile, test) for finding in findings: @@ -51,7 +51,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_findings(self): self.assertEqual(len(findings), 10) def test_parse_file_json_with_multiple_vuln_has_multiple_findings(self): - with open("unittests/scans/nikto/juice-shop.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("nikto") / "juice-shop.json", encoding="utf-8") as testfile: parser = NiktoParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -74,7 +74,7 @@ def test_parse_file_json_with_multiple_vuln_has_multiple_findings(self): self.assertEqual(140, len(finding.unsaved_endpoints)) def test_parse_file_json_with_uri_errors(self): - with open("unittests/scans/nikto/nikto-output.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("nikto") / "nikto-output.xml", encoding="utf-8") as testfile: parser = NiktoParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -103,7 +103,7 @@ def test_parse_file_json_with_uri_errors(self): self.assertEqual("examples/servlets/index.html", endpoint.path) def test_parse_file_json_another(self): - with open("unittests/scans/nikto/tdh.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("nikto") / "tdh.json", encoding="utf-8") as testfile: parser = NiktoParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -134,7 +134,7 @@ def test_parse_file_json_another(self): self.assertIsNone(endpoint.path) def test_parse_file_xml_another(self): - with open("unittests/scans/nikto/tdh.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("nikto") / "tdh.xml", encoding="utf-8") as testfile: parser = NiktoParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -176,7 +176,7 @@ def test_parse_file_xml_another(self): self.assertIsNone(endpoint.path) def test_parse_file_issue_9274(self): - with open("unittests/scans/nikto/issue_9274.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("nikto") / "issue_9274.json", encoding="utf-8") as testfile: parser = NiktoParser() findings = parser.get_findings(testfile, Test()) for finding in findings: diff --git a/unittests/tools/test_nmap_parser.py b/unittests/tools/test_nmap_parser.py index 5a36d43cc15..9dbbfde77db 100644 --- a/unittests/tools/test_nmap_parser.py +++ b/unittests/tools/test_nmap_parser.py @@ -2,13 +2,13 @@ from dojo.models import Test from dojo.tools.nmap.parser import NmapParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestNmapParser(DojoTestCase): def test_parse_file_with_no_open_ports_has_no_findings(self): - with open("unittests/scans/nmap/nmap_0port.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("nmap") / "nmap_0port.xml", encoding="utf-8") as testfile: parser = NmapParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -17,7 +17,7 @@ def test_parse_file_with_no_open_ports_has_no_findings(self): self.assertEqual(0, len(findings)) def test_parse_file_with_single_open_ports_has_single_finding(self): - with open("unittests/scans/nmap/nmap_1port.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("nmap") / "nmap_1port.xml", encoding="utf-8") as testfile: parser = NmapParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -37,7 +37,7 @@ def test_parse_file_with_single_open_ports_has_single_finding(self): self.assertEqual("tcp", endpoint.protocol) def test_parse_file_with_multiple_open_ports_has_multiple_finding(self): - with open("unittests/scans/nmap/nmap_multiple_port.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("nmap") / "nmap_multiple_port.xml", encoding="utf-8") as testfile: parser = NmapParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -57,7 +57,7 @@ def test_parse_file_with_multiple_open_ports_has_multiple_finding(self): self.assertEqual("tcp", endpoint.protocol) def test_parse_file_with_script_vulner(self): - with open("unittests/scans/nmap/nmap_script_vulners.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("nmap") / "nmap_script_vulners.xml", encoding="utf-8") as testfile: parser = NmapParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -83,7 +83,7 @@ def test_parse_file_with_script_vulner(self): self.assertEqual(datetime.datetime(2020, 2, 17, 9, 7, 25), findings[2].date) def test_parse_issue4406(self): - with open("unittests/scans/nmap/issue4406.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("nmap") / "issue4406.xml", encoding="utf-8") as testfile: parser = NmapParser() findings = parser.get_findings(testfile, Test()) for finding in findings: diff --git a/unittests/tools/test_noseyparker_parser.py b/unittests/tools/test_noseyparker_parser.py index 714e8a4fa7b..595017a618b 100644 --- a/unittests/tools/test_noseyparker_parser.py +++ b/unittests/tools/test_noseyparker_parser.py @@ -1,19 +1,18 @@ -from django.test import TestCase - from dojo.models import Test from dojo.tools.noseyparker.parser import NoseyParkerParser +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path -class TestNoseyParkerParser(TestCase): +class TestNoseyParkerParser(DojoTestCase): def test_noseyparker_parser__no_vulns(self): - with open("unittests/scans/noseyparker/noseyparker_zero_vul.jsonl", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("noseyparker") / "noseyparker_zero_vul.jsonl", encoding="utf-8") as testfile: parser = NoseyParkerParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_noseyparker_parser_one_vuln(self): - with open("unittests/scans/noseyparker/noseyparker_one_vul.jsonl", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("noseyparker") / "noseyparker_one_vul.jsonl", encoding="utf-8") as testfile: parser = NoseyParkerParser() findings = parser.get_findings(testfile, Test()) finding = findings[0] @@ -24,7 +23,7 @@ def test_noseyparker_parser_one_vuln(self): def test_noseyparker_parser_many_vulns(self): # Testfile contains 5 lines (Middle 2 are duplicates and line #4 has 2 of the same exact matches) - with open("unittests/scans/noseyparker/noseyparker_many_vul.jsonl", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("noseyparker") / "noseyparker_many_vul.jsonl", encoding="utf-8") as testfile: parser = NoseyParkerParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -34,7 +33,7 @@ def test_noseyparker_parser_many_vulns(self): def test_noseyparker_parser_error(self): with self.assertRaises(ValueError) as context: - with open("unittests/scans/noseyparker/empty_with_error.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("noseyparker") / "empty_with_error.json", encoding="utf-8") as testfile: parser = NoseyParkerParser() findings = parser.get_findings(testfile, Test()) testfile.close() diff --git a/unittests/tools/test_npm_audit_7_plus_parser.py b/unittests/tools/test_npm_audit_7_plus_parser.py index 0d937584c82..a6e293c105f 100644 --- a/unittests/tools/test_npm_audit_7_plus_parser.py +++ b/unittests/tools/test_npm_audit_7_plus_parser.py @@ -1,21 +1,19 @@ -from os import path -from pathlib import Path from dojo.models import Test from dojo.tools.npm_audit_7_plus.parser import NpmAudit7PlusParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestNpmAudit7PlusParser(DojoTestCase): def test_npm_audit_7_plus_parser_with_no_vuln_has_no_findings(self): - testfile = open(path.join(Path(__file__).parent, "../scans/npm_audit_7_plus/no_vuln.json"), encoding="utf-8") + testfile = open(get_unit_tests_scans_path("npm_audit_7_plus") / "no_vuln.json", encoding="utf-8") parser = NpmAudit7PlusParser() findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual(0, len(findings)) def test_npm_audit_7_plus_parser_with_one_vuln_has_one_findings(self): - testfile = open(path.join(Path(__file__).parent, "../scans/npm_audit_7_plus/one_vuln.json"), encoding="utf-8") + testfile = open(get_unit_tests_scans_path("npm_audit_7_plus") / "one_vuln.json", encoding="utf-8") parser = NpmAudit7PlusParser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -29,7 +27,7 @@ def test_npm_audit_7_plus_parser_with_one_vuln_has_one_findings(self): self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:L", finding.cvssv3) def test_npm_audit_7_plus_parser_with_many_vuln_has_many_findings(self): - testfile = open(path.join(Path(__file__).parent, "../scans/npm_audit_7_plus/many_vulns.json"), encoding="utf-8") + testfile = open(get_unit_tests_scans_path("npm_audit_7_plus") / "many_vulns.json", encoding="utf-8") parser = NpmAudit7PlusParser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -43,7 +41,7 @@ def test_npm_audit_7_plus_parser_with_many_vuln_has_many_findings(self): self.assertEqual("@vercel/fun", finding.title) def test_npm_audit_7_plus_parser_issue_10801(self): - testfile = open(path.join(Path(__file__).parent, "../scans/npm_audit_7_plus/issue_10801.json"), encoding="utf-8") + testfile = open(get_unit_tests_scans_path("npm_audit_7_plus") / "issue_10801.json", encoding="utf-8") parser = NpmAudit7PlusParser() findings = parser.get_findings(testfile, Test()) testfile.close() diff --git a/unittests/tools/test_npm_audit_parser.py b/unittests/tools/test_npm_audit_parser.py index c15da91da72..0a76dfbebed 100644 --- a/unittests/tools/test_npm_audit_parser.py +++ b/unittests/tools/test_npm_audit_parser.py @@ -1,20 +1,18 @@ -from os import path -from pathlib import Path from dojo.models import Test from dojo.tools.npm_audit.parser import NpmAuditParser, censor_path_hashes -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestNpmAuditParser(DojoTestCase): def test_npm_audit_parser_with_no_vuln_has_no_findings(self): - with open(path.join(Path(__file__).parent, "../scans/npm_audit/no_vuln.json"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("npm_audit") / "no_vuln.json", encoding="utf-8") as testfile: parser = NpmAuditParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_npm_audit_parser_with_one_criticle_vuln_has_one_findings(self): - with open(path.join(Path(__file__).parent, "../scans/npm_audit/one_vuln.json"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("npm_audit") / "one_vuln.json", encoding="utf-8") as testfile: parser = NpmAuditParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -23,7 +21,7 @@ def test_npm_audit_parser_with_one_criticle_vuln_has_one_findings(self): self.assertEqual("1.9.2", findings[0].component_version) def test_npm_audit_parser_with_many_vuln_has_many_findings(self): - with open(path.join(Path(__file__).parent, "../scans/npm_audit/many_vuln.json"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("npm_audit") / "many_vuln.json", encoding="utf-8") as testfile: parser = NpmAuditParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(5, len(findings)) @@ -40,7 +38,7 @@ def test_npm_audit_parser_with_many_vuln_has_many_findings(self): def test_npm_audit_parser_multiple_cwes_per_finding(self): # cwes formatted as escaped list: "cwe": "[\"CWE-346\",\"CWE-453\"]", - with open(path.join(Path(__file__).parent, "../scans/npm_audit/multiple_cwes.json"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("npm_audit") / "multiple_cwes.json", encoding="utf-8") as testfile: parser = NpmAuditParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(41, len(findings)) @@ -49,14 +47,14 @@ def test_npm_audit_parser_multiple_cwes_per_finding(self): def test_npm_audit_parser_multiple_cwes_per_finding_list(self): # cwes formatted as proper list: "cwe": ["CWE-918","CWE-1333"], - with open(path.join(Path(__file__).parent, "../scans/npm_audit/multiple_cwes2.json"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("npm_audit") / "multiple_cwes2.json", encoding="utf-8") as testfile: parser = NpmAuditParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(6, len(findings)) self.assertEqual(918, findings[0].cwe) def test_npm_audit_parser_with_one_criticle_vuln_has_null_as_cwe(self): - with open(path.join(Path(__file__).parent, "../scans/npm_audit/cwe_null.json"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("npm_audit") / "cwe_null.json", encoding="utf-8") as testfile: parser = NpmAuditParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -66,7 +64,7 @@ def test_npm_audit_parser_with_one_criticle_vuln_has_null_as_cwe(self): def test_npm_audit_parser_empty_with_error(self): with self.assertRaises(ValueError) as context: - with open(path.join(Path(__file__).parent, "../scans/npm_audit/empty_with_error.json"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("npm_audit") / "empty_with_error.json", encoding="utf-8") as testfile: parser = NpmAuditParser() parser.get_findings(testfile, Test()) @@ -75,7 +73,7 @@ def test_npm_audit_parser_empty_with_error(self): def test_npm_audit_parser_many_vuln_npm7(self): with self.assertRaises(ValueError) as context: - with open(path.join(Path(__file__).parent, "../scans/npm_audit/many_vuln_npm7.json"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("npm_audit") / "many_vuln_npm7.json", encoding="utf-8") as testfile: parser = NpmAuditParser() parser.get_findings(testfile, Test()) @@ -91,7 +89,7 @@ def test_npm_audit_censored_hash(self): self.assertEqual(censored_path, "censored_by_npm_audit>censored_by_npm_audit>lodash") def test_npm_audit_parser_issue_7897(self): - with open(path.join(Path(__file__).parent, "../scans/npm_audit/issue_7897.json"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("npm_audit") / "issue_7897.json", encoding="utf-8") as testfile: parser = NpmAuditParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(5, len(findings)) diff --git a/unittests/tools/test_nsp_parser.py b/unittests/tools/test_nsp_parser.py index 289c7a996ce..099725e8143 100644 --- a/unittests/tools/test_nsp_parser.py +++ b/unittests/tools/test_nsp_parser.py @@ -1,18 +1,18 @@ from dojo.models import Test from dojo.tools.nsp.parser import NspParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestNspParser(DojoTestCase): def test_parse_none(self): parser = NspParser() - with open("unittests/scans/nsp/none.json", encoding="utf-8") as test_file: + with open(get_unit_tests_scans_path("nsp") / "none.json", encoding="utf-8") as test_file: findings = parser.get_findings(test_file, Test()) self.assertEqual(0, len(findings)) def test_parse_ok(self): parser = NspParser() - with open("unittests/scans/nsp/scan.json", encoding="utf-8") as test_file: + with open(get_unit_tests_scans_path("nsp") / "scan.json", encoding="utf-8") as test_file: findings = parser.get_findings(test_file, Test()) self.assertEqual(9, len(findings)) diff --git a/unittests/tools/test_nuclei_parser.py b/unittests/tools/test_nuclei_parser.py index 6fb71d0b2f6..9389b891279 100644 --- a/unittests/tools/test_nuclei_parser.py +++ b/unittests/tools/test_nuclei_parser.py @@ -4,25 +4,25 @@ from dojo.models import Test, Test_Type from dojo.tools.nuclei.parser import NucleiParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestNucleiParser(DojoTestCase): def test_parse_no_empty(self): - with open("unittests/scans/nuclei/empty.jsonl", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("nuclei") / "empty.jsonl", encoding="utf-8") as testfile: parser = NucleiParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_no_findings(self): - with open("unittests/scans/nuclei/no_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("nuclei") / "no_findings.json", encoding="utf-8") as testfile: parser = NucleiParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_issue_9201(self): - with open("unittests/scans/nuclei/issue_9201.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("nuclei") / "issue_9201.json", encoding="utf-8") as testfile: parser = NucleiParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -32,7 +32,7 @@ def test_parse_issue_9201(self): self.assertEqual("example.com", finding.unsaved_endpoints[0].host) def test_parse_many_findings(self): - with open("unittests/scans/nuclei/many_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("nuclei") / "many_findings.json", encoding="utf-8") as testfile: parser = NucleiParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -151,7 +151,7 @@ def test_parse_many_findings(self): self.assertEqual("mysql-native-password-bruteforce", finding.vuln_id_from_tool) def test_parse_many_findings_new(self): - with open("unittests/scans/nuclei/many_findings_new.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("nuclei") / "many_findings_new.json", encoding="utf-8") as testfile: parser = NucleiParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -192,7 +192,7 @@ def test_parse_many_findings_new(self): self.assertEqual("prometheus-metrics", finding.vuln_id_from_tool) def test_parse_many_findings_third(self): - with open("unittests/scans/nuclei/many_findings_third.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("nuclei") / "many_findings_third.json", encoding="utf-8") as testfile: parser = NucleiParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -226,7 +226,7 @@ def test_parse_many_findings_third(self): self.assertEqual("asp.net-favicon", finding.component_name) def test_parse_many_findings_v3(self): - with open("unittests/scans/nuclei/multiple_v3.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("nuclei") / "multiple_v3.json", encoding="utf-8") as testfile: parser = NucleiParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -238,7 +238,7 @@ def test_parse_many_findings_v3(self): self.assertEqual("Info", finding.severity) def test_parse_invalid_cwe(self): - with open("unittests/scans/nuclei/invalid_cwe.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("nuclei") / "invalid_cwe.json", encoding="utf-8") as testfile: parser = NucleiParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -249,7 +249,7 @@ def test_parse_invalid_cwe(self): self.assertEqual(0, finding.cwe) def test_parse_same_template_multiple_matches(self): - with open("unittests/scans/nuclei/multiple_matches.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("nuclei") / "multiple_matches.json", encoding="utf-8") as testfile: parser = NucleiParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(2, len(findings)) diff --git a/unittests/tools/test_openscap_parser.py b/unittests/tools/test_openscap_parser.py index 0c2d5625f46..37c915cc817 100644 --- a/unittests/tools/test_openscap_parser.py +++ b/unittests/tools/test_openscap_parser.py @@ -1,19 +1,19 @@ from dojo.models import Test from dojo.tools.openscap.parser import OpenscapParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestOpenscapParser(DojoTestCase): def test_openscap_parser_with_no_vuln_has_no_findings(self): - testfile = open("unittests/scans/openscap/no_vuln_rhsa.xml", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("openscap") / "no_vuln_rhsa.xml", encoding="utf-8") parser = OpenscapParser() findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual(0, len(findings)) def test_openscap_parser_with_one_criticle_vuln_has_one_findings(self): - testfile = open("unittests/scans/openscap/one_vuln_rhsa.xml", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("openscap") / "one_vuln_rhsa.xml", encoding="utf-8") parser = OpenscapParser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -26,7 +26,7 @@ def test_openscap_parser_with_one_criticle_vuln_has_one_findings(self): self.assertEqual("CVE-2005-1038", finding.unsaved_vulnerability_ids[0]) def test_openscap_parser_with_many_vuln_has_many_findings(self): - testfile = open("unittests/scans/openscap/many_vuln_rhsa.xml", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("openscap") / "many_vuln_rhsa.xml", encoding="utf-8") parser = OpenscapParser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -51,7 +51,7 @@ def test_openscap_parser_with_many_vuln_has_many_findings(self): self.assertEqual("192.168.100.194", finding.unsaved_endpoints[6].host) def test_parser_from_spec_1_1_3(self): - testfile = open("unittests/scans/openscap/ios-sample-v1.1.3.xccdf.xml", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("openscap") / "ios-sample-v1.1.3.xccdf.xml", encoding="utf-8") parser = OpenscapParser() findings = parser.get_findings(testfile, Test()) testfile.close() diff --git a/unittests/tools/test_openvas_parser.py b/unittests/tools/test_openvas_parser.py index 5a2ba5a17ce..a5da585a643 100644 --- a/unittests/tools/test_openvas_parser.py +++ b/unittests/tools/test_openvas_parser.py @@ -1,11 +1,11 @@ from dojo.models import Engagement, Product, Test from dojo.tools.openvas.parser import OpenVASParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestOpenVASParser(DojoTestCase): def test_openvas_csv_one_vuln(self): - with open("unittests/scans/openvas/one_vuln.csv", encoding="utf-8") as f: + with open(get_unit_tests_scans_path("openvas") / "one_vuln.csv", encoding="utf-8") as f: test = Test() test.engagement = Engagement() test.engagement.product = Product() @@ -26,7 +26,7 @@ def test_openvas_csv_one_vuln(self): self.assertEqual(22, findings[0].unsaved_endpoints[0].port) def test_openvas_csv_many_vuln(self): - with open("unittests/scans/openvas/many_vuln.csv", encoding="utf-8") as f: + with open(get_unit_tests_scans_path("openvas") / "many_vuln.csv", encoding="utf-8") as f: test = Test() test.engagement = Engagement() test.engagement.product = Product() @@ -51,7 +51,7 @@ def test_openvas_csv_many_vuln(self): self.assertEqual(finding.unsaved_vulnerability_ids[0], "CVE-2011-3389") def test_openvas_csv_report_usingCVE(self): - with open("unittests/scans/openvas/report_using_CVE.csv", encoding="utf-8") as f: + with open(get_unit_tests_scans_path("openvas") / "report_using_CVE.csv", encoding="utf-8") as f: test = Test() test.engagement = Engagement() test.engagement.product = Product() @@ -67,7 +67,7 @@ def test_openvas_csv_report_usingCVE(self): self.assertEqual(finding.unsaved_vulnerability_ids[0], "CVE-2014-0117") def test_openvas_csv_report_usingOpenVAS(self): - with open("unittests/scans/openvas/report_using_openVAS.csv", encoding="utf-8") as f: + with open(get_unit_tests_scans_path("openvas") / "report_using_openVAS.csv", encoding="utf-8") as f: test = Test() test.engagement = Engagement() test.engagement.product = Product() @@ -83,7 +83,7 @@ def test_openvas_csv_report_usingOpenVAS(self): self.assertEqual(finding.unsaved_vulnerability_ids, []) def test_openvas_xml_no_vuln(self): - with open("unittests/scans/openvas/no_vuln.xml", encoding="utf-8") as f: + with open(get_unit_tests_scans_path("openvas") / "no_vuln.xml", encoding="utf-8") as f: test = Test() test.engagement = Engagement() test.engagement.product = Product() @@ -92,7 +92,7 @@ def test_openvas_xml_no_vuln(self): self.assertEqual(0, len(findings)) def test_openvas_xml_one_vuln(self): - with open("unittests/scans/openvas/one_vuln.xml", encoding="utf-8") as f: + with open(get_unit_tests_scans_path("openvas") / "one_vuln.xml", encoding="utf-8") as f: test = Test() test.engagement = Engagement() test.engagement.product = Product() @@ -108,7 +108,7 @@ def test_openvas_xml_one_vuln(self): self.assertEqual("Critical", finding.severity) def test_openvas_xml_many_vuln(self): - with open("unittests/scans/openvas/many_vuln.xml", encoding="utf-8") as f: + with open(get_unit_tests_scans_path("openvas") / "many_vuln.xml", encoding="utf-8") as f: test = Test() test.engagement = Engagement() test.engagement.product = Product() diff --git a/unittests/tools/test_ort_parser.py b/unittests/tools/test_ort_parser.py index d42098d1845..b33a222c507 100644 --- a/unittests/tools/test_ort_parser.py +++ b/unittests/tools/test_ort_parser.py @@ -1,6 +1,6 @@ from dojo.models import Test from dojo.tools.ort.parser import OrtParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestOrtParser(DojoTestCase): @@ -11,7 +11,7 @@ def test_parse_without_file_has_no_finding(self): def test_parse_file_has_many_finding_one_tool(self): testfile = open( - get_unit_tests_path() + "/scans/ort/evaluated-model-reporter-test-output.json", encoding="utf-8", + get_unit_tests_scans_path("ort") / "evaluated-model-reporter-test-output.json", encoding="utf-8", ) parser = OrtParser() findings = parser.get_findings(testfile, Test()) diff --git a/unittests/tools/test_ossindex_devaudit_parser.py b/unittests/tools/test_ossindex_devaudit_parser.py index 9b11e19cee9..ead37a61124 100644 --- a/unittests/tools/test_ossindex_devaudit_parser.py +++ b/unittests/tools/test_ossindex_devaudit_parser.py @@ -1,13 +1,13 @@ from dojo.models import Test from dojo.tools.ossindex_devaudit.parser import OssIndexDevauditParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestOssIndexDevauditParser(DojoTestCase): def test_ossindex_devaudit_parser_with_no_vulns_has_no_findings(self): testfile = open( - get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_no_vuln.json", encoding="utf-8", + get_unit_tests_scans_path("ossindex_devaudit") / "ossindex_devaudit_no_vuln.json", encoding="utf-8", ) parser = OssIndexDevauditParser() findings = parser.get_findings(testfile, Test()) @@ -16,7 +16,7 @@ def test_ossindex_devaudit_parser_with_no_vulns_has_no_findings(self): def test_ossindex_devaudit_parser_with_one_critical_vuln_has_one_finding(self): testfile = open( - get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_one_vuln.json", encoding="utf-8", + get_unit_tests_scans_path("ossindex_devaudit") / "ossindex_devaudit_one_vuln.json", encoding="utf-8", ) parser = OssIndexDevauditParser() findings = parser.get_findings(testfile, Test()) @@ -25,7 +25,7 @@ def test_ossindex_devaudit_parser_with_one_critical_vuln_has_one_finding(self): def test_ossindex_devaudit_parser_with_multiple_vulns_has_multiple_finding(self): testfile = open( - get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_multiple_vulns.json", encoding="utf-8", + get_unit_tests_scans_path("ossindex_devaudit") / "ossindex_devaudit_multiple_vulns.json", encoding="utf-8", ) parser = OssIndexDevauditParser() findings = parser.get_findings(testfile, Test()) @@ -34,7 +34,7 @@ def test_ossindex_devaudit_parser_with_multiple_vulns_has_multiple_finding(self) def test_ossindex_devaudit_parser_with_no_cve_returns_info_severity(self): testfile = open( - get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_vuln_no_cvssscore.json", encoding="utf-8", + get_unit_tests_scans_path("ossindex_devaudit") / "ossindex_devaudit_vuln_no_cvssscore.json", encoding="utf-8", ) parser = OssIndexDevauditParser() findings = parser.get_findings(testfile, Test()) @@ -43,7 +43,7 @@ def test_ossindex_devaudit_parser_with_no_cve_returns_info_severity(self): def test_ossindex_devaudit_parser_with_reference_shows_reference(self): testfile = open( - get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_one_vuln.json", encoding="utf-8", + get_unit_tests_scans_path("ossindex_devaudit") / "ossindex_devaudit_one_vuln.json", encoding="utf-8", ) parser = OssIndexDevauditParser() findings = parser.get_findings(testfile, Test()) @@ -55,7 +55,7 @@ def test_ossindex_devaudit_parser_with_reference_shows_reference(self): def test_ossindex_devaudit_parser_with_empty_reference_shows_empty_reference(self): testfile = open( - get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_empty_reference.json", encoding="utf-8", + get_unit_tests_scans_path("ossindex_devaudit") / "ossindex_devaudit_empty_reference.json", encoding="utf-8", ) parser = OssIndexDevauditParser() findings = parser.get_findings(testfile, Test()) @@ -66,7 +66,7 @@ def test_ossindex_devaudit_parser_with_empty_reference_shows_empty_reference(sel def test_ossindex_devaudit_parser_with_missing_reference_shows_empty(self): testfile = open( - get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_missing_reference.json", encoding="utf-8", + get_unit_tests_scans_path("ossindex_devaudit") / "ossindex_devaudit_missing_reference.json", encoding="utf-8", ) parser = OssIndexDevauditParser() findings = parser.get_findings(testfile, Test()) @@ -77,7 +77,7 @@ def test_ossindex_devaudit_parser_with_missing_reference_shows_empty(self): def test_ossindex_devaudit_parser_with_missing_cwe_shows_1035(self): testfile = open( - get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_missing_cwe.json", encoding="utf-8", + get_unit_tests_scans_path("ossindex_devaudit") / "ossindex_devaudit_missing_cwe.json", encoding="utf-8", ) parser = OssIndexDevauditParser() findings = parser.get_findings(testfile, Test()) @@ -88,7 +88,7 @@ def test_ossindex_devaudit_parser_with_missing_cwe_shows_1035(self): def test_ossindex_devaudit_parser_with_null_cwe_shows_1035(self): testfile = open( - get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_null_cwe.json", encoding="utf-8", + get_unit_tests_scans_path("ossindex_devaudit") / "ossindex_devaudit_null_cwe.json", encoding="utf-8", ) parser = OssIndexDevauditParser() findings = parser.get_findings(testfile, Test()) @@ -99,7 +99,7 @@ def test_ossindex_devaudit_parser_with_null_cwe_shows_1035(self): def test_ossindex_devaudit_parser_with_empty_cwe_shows_1035(self): testfile = open( - get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_empty_cwe.json", encoding="utf-8", + get_unit_tests_scans_path("ossindex_devaudit") / "ossindex_devaudit_empty_cwe.json", encoding="utf-8", ) parser = OssIndexDevauditParser() findings = parser.get_findings(testfile, Test()) @@ -110,7 +110,7 @@ def test_ossindex_devaudit_parser_with_empty_cwe_shows_1035(self): def test_ossindex_devaudit_parser_get_severity_shows_info(self): testfile = open( - get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_severity_info.json", encoding="utf-8", + get_unit_tests_scans_path("ossindex_devaudit") / "ossindex_devaudit_severity_info.json", encoding="utf-8", ) parser = OssIndexDevauditParser() findings = parser.get_findings(testfile, Test()) @@ -121,7 +121,7 @@ def test_ossindex_devaudit_parser_get_severity_shows_info(self): def test_ossindex_devaudit_parser_get_severity_shows_critical(self): testfile = open( - get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_severity_critical.json", encoding="utf-8", + get_unit_tests_scans_path("ossindex_devaudit") / "ossindex_devaudit_severity_critical.json", encoding="utf-8", ) parser = OssIndexDevauditParser() findings = parser.get_findings(testfile, Test()) @@ -132,7 +132,7 @@ def test_ossindex_devaudit_parser_get_severity_shows_critical(self): def test_ossindex_devaudit_parser_get_severity_shows_high(self): testfile = open( - get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_severity_high.json", encoding="utf-8", + get_unit_tests_scans_path("ossindex_devaudit") / "ossindex_devaudit_severity_high.json", encoding="utf-8", ) parser = OssIndexDevauditParser() findings = parser.get_findings(testfile, Test()) @@ -143,7 +143,7 @@ def test_ossindex_devaudit_parser_get_severity_shows_high(self): def test_ossindex_devaudit_parser_get_severity_shows_medium(self): testfile = open( - get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_severity_medium.json", encoding="utf-8", + get_unit_tests_scans_path("ossindex_devaudit") / "ossindex_devaudit_severity_medium.json", encoding="utf-8", ) parser = OssIndexDevauditParser() findings = parser.get_findings(testfile, Test()) @@ -154,7 +154,7 @@ def test_ossindex_devaudit_parser_get_severity_shows_medium(self): def test_ossindex_devaudit_parser_get_severity_shows_low(self): testfile = open( - get_unit_tests_path() + "/scans/ossindex_devaudit/ossindex_devaudit_severity_low.json", encoding="utf-8", + get_unit_tests_scans_path("ossindex_devaudit") / "ossindex_devaudit_severity_low.json", encoding="utf-8", ) parser = OssIndexDevauditParser() findings = parser.get_findings(testfile, Test()) diff --git a/unittests/tools/test_osv_scanner_parser.py b/unittests/tools/test_osv_scanner_parser.py index 196ff093367..c8634c72c75 100644 --- a/unittests/tools/test_osv_scanner_parser.py +++ b/unittests/tools/test_osv_scanner_parser.py @@ -1,20 +1,18 @@ -from os import path -from pathlib import Path from dojo.models import Test from dojo.tools.osv_scanner.parser import OSVScannerParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestOSVScannerParser(DojoTestCase): def test_no_findings(self): - with open(path.join(Path(__file__).parent, "../scans/osv_scanner/no_findings.json"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("osv_scanner") / "no_findings.json", encoding="utf-8") as testfile: parser = OSVScannerParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_some_findings(self): - with open(path.join(Path(__file__).parent, "../scans/osv_scanner/some_findings.json"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("osv_scanner") / "some_findings.json", encoding="utf-8") as testfile: parser = OSVScannerParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -26,7 +24,7 @@ def test_some_findings(self): self.assertEqual(finding.severity, "Low") def test_many_findings(self): - with open(path.join(Path(__file__).parent, "../scans/osv_scanner/many_findings.json"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("osv_scanner") / "many_findings.json", encoding="utf-8") as testfile: parser = OSVScannerParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(66, len(findings)) diff --git a/unittests/tools/test_outpost24_parser.py b/unittests/tools/test_outpost24_parser.py index fd132e649b3..4807fe7fac8 100644 --- a/unittests/tools/test_outpost24_parser.py +++ b/unittests/tools/test_outpost24_parser.py @@ -1,6 +1,6 @@ from dojo.models import Test from dojo.tools.outpost24.parser import Outpost24Parser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestOutpost24Parser(DojoTestCase): @@ -21,10 +21,10 @@ def assert_file_has_n_items(self, filename, item_count): self.assertEqual("CVE-2019-9315", findings[0].unsaved_vulnerability_ids[0]) def test_parser_no_items(self): - self.assert_file_has_n_items(get_unit_tests_path() + "/scans/outpost24/none.xml", 0) + self.assert_file_has_n_items(get_unit_tests_scans_path("outpost24") / "none.xml", 0) def test_parser_one_item(self): - self.assert_file_has_n_items(get_unit_tests_path() + "/scans/outpost24/one.xml", 1) + self.assert_file_has_n_items(get_unit_tests_scans_path("outpost24") / "one.xml", 1) def test_parser_sample_items(self): - self.assert_file_has_n_items(get_unit_tests_path() + "/scans/outpost24/sample.xml", 24) + self.assert_file_has_n_items(get_unit_tests_scans_path("outpost24") / "sample.xml", 24) diff --git a/unittests/tools/test_php_security_audit_v2_parser.py b/unittests/tools/test_php_security_audit_v2_parser.py index 4ae779e1304..216738bf3cd 100644 --- a/unittests/tools/test_php_security_audit_v2_parser.py +++ b/unittests/tools/test_php_security_audit_v2_parser.py @@ -1,12 +1,12 @@ from dojo.models import Test from dojo.tools.php_security_audit_v2.parser import PhpSecurityAuditV2Parser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestPhpSecurityAuditV2ParserParser(DojoTestCase): def test_php_symfony_security_check_parser_with_no_vuln_has_no_findings(self): - testfile = open("unittests/scans/php_security_audit_v2/php_security_audit_v2.0.0_unformatted.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("php_security_audit_v2") / "php_security_audit_v2.0.0_unformatted.json", encoding="utf-8") parser = PhpSecurityAuditV2Parser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -23,7 +23,7 @@ def test_php_symfony_security_check_parser_with_no_vuln_has_no_findings(self): def test_php_symfony_security_check_parser_with_many_vuln(self): """New report with latest version""" - testfile = open("unittests/scans/php_security_audit_v2/many_vulns.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("php_security_audit_v2") / "many_vulns.json", encoding="utf-8") parser = PhpSecurityAuditV2Parser() findings = parser.get_findings(testfile, Test()) testfile.close() diff --git a/unittests/tools/test_php_symfony_security_check_parser.py b/unittests/tools/test_php_symfony_security_check_parser.py index 6786d54b9c6..fa442000a10 100644 --- a/unittests/tools/test_php_symfony_security_check_parser.py +++ b/unittests/tools/test_php_symfony_security_check_parser.py @@ -1,13 +1,13 @@ from dojo.models import Test from dojo.tools.php_symfony_security_check.parser import PhpSymfonySecurityCheckParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestPhpSymfonySecurityCheckerParser(DojoTestCase): def test_php_symfony_security_check_parser_with_no_vuln_has_no_findings(self): testfile = open( - get_unit_tests_path() + "/scans/php_symfony_security_check/php_symfony_no_vuln.json", encoding="utf-8", + get_unit_tests_scans_path("php_symfony_security_check") / "php_symfony_no_vuln.json", encoding="utf-8", ) parser = PhpSymfonySecurityCheckParser() findings = parser.get_findings(testfile, Test()) @@ -19,7 +19,7 @@ def test_php_symfony_security_check_parser_with_one_criticle_vuln_has_one_findin self, ): testfile = open( - get_unit_tests_path() + "/scans/php_symfony_security_check/php_symfony_one_vuln.json", encoding="utf-8", + get_unit_tests_scans_path("php_symfony_security_check") / "php_symfony_one_vuln.json", encoding="utf-8", ) parser = PhpSymfonySecurityCheckParser() findings = parser.get_findings(testfile, Test()) @@ -28,7 +28,7 @@ def test_php_symfony_security_check_parser_with_one_criticle_vuln_has_one_findin def test_php_symfony_security_check_parser_with_many_vuln_has_many_findings(self): testfile = open( - get_unit_tests_path() + "/scans/php_symfony_security_check/php_symfony_many_vuln.json", encoding="utf-8", + get_unit_tests_scans_path("php_symfony_security_check") / "php_symfony_many_vuln.json", encoding="utf-8", ) parser = PhpSymfonySecurityCheckParser() findings = parser.get_findings(testfile, Test()) diff --git a/unittests/tools/test_pip_audit_parser.py b/unittests/tools/test_pip_audit_parser.py index 44c4e84085a..6a4d1b52509 100644 --- a/unittests/tools/test_pip_audit_parser.py +++ b/unittests/tools/test_pip_audit_parser.py @@ -1,13 +1,13 @@ from dojo.models import Test from dojo.tools.pip_audit.parser import PipAuditParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestPipAuditParser(DojoTestCase): def test_parser_empty(self): - testfiles = ["unittests/scans/pip_audit/empty.json", - "unittests/scans/pip_audit/empty_new.json"] + testfiles = [get_unit_tests_scans_path("pip_audit") / "empty.json", + get_unit_tests_scans_path("pip_audit") / "empty_new.json"] for path in testfiles: testfile = open(path, encoding="utf-8") parser = PipAuditParser() @@ -16,8 +16,8 @@ def test_parser_empty(self): self.assertEqual(0, len(findings)) def test_parser_zero_findings(self): - testfiles = ["unittests/scans/pip_audit/zero_vulns.json", - "unittests/scans/pip_audit/zero_vulns_new.json"] + testfiles = [get_unit_tests_scans_path("pip_audit") / "zero_vulns.json", + get_unit_tests_scans_path("pip_audit") / "zero_vulns_new.json"] for path in testfiles: testfile = open(path, encoding="utf-8") parser = PipAuditParser() @@ -26,8 +26,8 @@ def test_parser_zero_findings(self): self.assertEqual(0, len(findings)) def test_parser_many_vulns(self): - testfiles = ["unittests/scans/pip_audit/many_vulns.json", - "unittests/scans/pip_audit/many_vulns_new.json"] + testfiles = [get_unit_tests_scans_path("pip_audit") / "many_vulns.json", + get_unit_tests_scans_path("pip_audit") / "many_vulns_new.json"] for path in testfiles: testfile = open(path, encoding="utf-8") parser = PipAuditParser() diff --git a/unittests/tools/test_pmd_parser.py b/unittests/tools/test_pmd_parser.py index 5fbc74d9f76..6cd691f6519 100644 --- a/unittests/tools/test_pmd_parser.py +++ b/unittests/tools/test_pmd_parser.py @@ -1,24 +1,24 @@ from dojo.models import Test from dojo.tools.pmd.parser import PmdParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestPMDParser(DojoTestCase): def test_parse_file_with_no_vuln_has_no_findings(self): - with open("unittests/scans/pmd/pmd_no_vuln.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("pmd") / "pmd_no_vuln.csv", encoding="utf-8") as testfile: parser = PmdParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_file_with_one_vuln_has_one_findings(self): - with open("unittests/scans/pmd/pmd_one_vuln.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("pmd") / "pmd_one_vuln.csv", encoding="utf-8") as testfile: parser = PmdParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) def test_parse_file_with_multiple_vuln_has_multiple_finding(self): - with open("unittests/scans/pmd/pmd_many_vulns.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("pmd") / "pmd_many_vulns.csv", encoding="utf-8") as testfile: parser = PmdParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(16, len(findings)) diff --git a/unittests/tools/test_popeye_parser.py b/unittests/tools/test_popeye_parser.py index 17bb5b6b8ec..d378ac12846 100644 --- a/unittests/tools/test_popeye_parser.py +++ b/unittests/tools/test_popeye_parser.py @@ -1,19 +1,19 @@ from dojo.models import Test from dojo.tools.popeye.parser import PopeyeParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestPopeyeParser(DojoTestCase): def test_popeye_parser_with_no_vuln_has_no_findings(self): - testfile = open("unittests/scans/popeye/popeye_zero_vul.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("popeye") / "popeye_zero_vul.json", encoding="utf-8") parser = PopeyeParser() findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual(0, len(findings)) def test_popeye_parser_with_one_warning_has_one_findings(self): - testfile = open("unittests/scans/popeye/popeye_one_vul.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("popeye") / "popeye_one_vul.json", encoding="utf-8") parser = PopeyeParser() findings = parser.get_findings(testfile, Test()) finding_title = "pods test-namespace/6cff44dc94-d92km [POP-106] No resources requests/limits defined" @@ -31,7 +31,7 @@ def test_popeye_parser_with_one_warning_has_one_findings(self): self.assertEqual(finding_vuln_id_from_tool, findings[0].vuln_id_from_tool) def test_popeye_parser_with_many_vuln_has_many_findings(self): - testfile = open("unittests/scans/popeye/popeye_many_vul.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("popeye") / "popeye_many_vul.json", encoding="utf-8") parser = PopeyeParser() findings = parser.get_findings(testfile, Test()) testfile.close() diff --git a/unittests/tools/test_progpilot_parser.py b/unittests/tools/test_progpilot_parser.py index 9cc11fb6c30..ce4369af281 100644 --- a/unittests/tools/test_progpilot_parser.py +++ b/unittests/tools/test_progpilot_parser.py @@ -1,12 +1,12 @@ from dojo.models import Test from dojo.tools.progpilot.parser import ProgpilotParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestProgpilotParser(DojoTestCase): def test_progpilotparser_single_has_many_findings(self): - testfile = open("unittests/scans/progpilot/progpilot.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("progpilot") / "progpilot.json", encoding="utf-8") parser = ProgpilotParser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -22,21 +22,21 @@ def test_progpilotparser_single_has_many_findings(self): self.assertEqual(593, finding.line) def test_progpilotparser_single_has_one_finding(self): - testfile = open("unittests/scans/progpilot/progpilot2.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("progpilot") / "progpilot2.json", encoding="utf-8") parser = ProgpilotParser() findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual(1, len(findings)) def test_progpilotparser_single_has_many_findings3(self): - testfile = open("unittests/scans/progpilot/progpilot3.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("progpilot") / "progpilot3.json", encoding="utf-8") parser = ProgpilotParser() findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual(3, len(findings)) def test_progpilotparser_single_has_many_findings4(self): - testfile = open("unittests/scans/progpilot/progpilot4.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("progpilot") / "progpilot4.json", encoding="utf-8") parser = ProgpilotParser() findings = parser.get_findings(testfile, Test()) testfile.close() diff --git a/unittests/tools/test_ptart_parser.py b/unittests/tools/test_ptart_parser.py index 83be6417b3d..010044071c6 100644 --- a/unittests/tools/test_ptart_parser.py +++ b/unittests/tools/test_ptart_parser.py @@ -1,10 +1,9 @@ -from django.test import TestCase - from dojo.models import Engagement, Product, Test from dojo.tools.ptart.parser import PTARTParser +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path -class TestPTARTParser(TestCase): +class TestPTARTParser(DojoTestCase): def setUp(self): self.product = Product(name="sample product", @@ -411,19 +410,19 @@ def test_ptart_parser_tools_parse_references_from_hit(self): self.assertEqual("Reference1: https://ref.example.com\nReference: https://ref3.example.com", parse_references_from_hit(hit)) def test_ptart_parser_with_empty_json_throws_error(self): - with open("unittests/scans/ptart/empty_with_error.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("ptart") / "empty_with_error.json", encoding="utf-8") as testfile: parser = PTARTParser() findings = parser.get_findings(testfile, self.test) self.assertEqual(0, len(findings)) def test_ptart_parser_with_no_assessments_has_no_findings(self): - with open("unittests/scans/ptart/ptart_zero_vul.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("ptart") / "ptart_zero_vul.json", encoding="utf-8") as testfile: parser = PTARTParser() findings = parser.get_findings(testfile, self.test) self.assertEqual(0, len(findings)) def test_ptart_parser_with_one_assessment_has_one_finding(self): - with open("unittests/scans/ptart/ptart_one_vul.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("ptart") / "ptart_one_vul.json", encoding="utf-8") as testfile: parser = PTARTParser() findings = parser.get_findings(testfile, self.test) self.assertEqual(1, len(findings)) @@ -462,7 +461,7 @@ def test_ptart_parser_with_one_assessment_has_one_finding(self): self.assertEqual("Reference: https://ref.example.com", finding.references) def test_ptart_parser_with_one_assessment_has_many_findings(self): - with open("unittests/scans/ptart/ptart_many_vul.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("ptart") / "ptart_many_vul.json", encoding="utf-8") as testfile: parser = PTARTParser() findings = parser.get_findings(testfile, self.test) self.assertEqual(2, len(findings)) @@ -510,7 +509,7 @@ def test_ptart_parser_with_one_assessment_has_many_findings(self): self.assertEqual(None, finding.references) def test_ptart_parser_with_multiple_assessments_has_many_findings_correctly_grouped(self): - with open("unittests/scans/ptart/ptart_vulns_with_mult_assessments.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("ptart") / "ptart_vulns_with_mult_assessments.json", encoding="utf-8") as testfile: parser = PTARTParser() findings = parser.get_findings(testfile, self.test) self.assertEqual(3, len(findings)) @@ -578,7 +577,7 @@ def test_ptart_parser_with_multiple_assessments_has_many_findings_correctly_grou self.assertEqual(None, finding.references) def test_ptart_parser_with_single_vuln_on_import_test(self): - with open("unittests/scans/ptart/ptart_one_vul.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("ptart") / "ptart_one_vul.json", encoding="utf-8") as testfile: parser = PTARTParser() tests = parser.get_tests("PTART Report", testfile) self.assertEqual(1, len(tests)) @@ -624,7 +623,7 @@ def test_ptart_parser_with_single_vuln_on_import_test(self): self.assertEqual("Reference: https://ref.example.com", finding.references) def test_ptart_parser_with_retest_campaign(self): - with open("unittests/scans/ptart/ptart_vuln_plus_retest.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("ptart") / "ptart_vuln_plus_retest.json", encoding="utf-8") as testfile: parser = PTARTParser() findings = parser.get_findings(testfile, self.test) self.assertEqual(3, len(findings)) diff --git a/unittests/tools/test_pwn_sast_parser.py b/unittests/tools/test_pwn_sast_parser.py index c4b9f6033e4..f6c8ded484a 100644 --- a/unittests/tools/test_pwn_sast_parser.py +++ b/unittests/tools/test_pwn_sast_parser.py @@ -1,39 +1,39 @@ from dojo.models import Test from dojo.tools.pwn_sast.parser import PWNSASTParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestPWNSASTParser(DojoTestCase): def test_parse_no_findings(self): - with open("unittests/scans/pwn_sast/no_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("pwn_sast") / "no_findings.json", encoding="utf-8") as testfile: parser = PWNSASTParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_one_finding(self): - with open("unittests/scans/pwn_sast/one_finding.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("pwn_sast") / "one_finding.json", encoding="utf-8") as testfile: parser = PWNSASTParser() findings = parser.get_findings(testfile, Test()) self.assertIsInstance(findings, list) self.assertEqual(1, len(findings)) def test_parse_many_finding(self): - with open("unittests/scans/pwn_sast/many_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("pwn_sast") / "many_findings.json", encoding="utf-8") as testfile: parser = PWNSASTParser() findings = parser.get_findings(testfile, Test()) self.assertIsInstance(findings, list) self.assertEqual(3, len(findings)) def test_one_dup_finding(self): - with open("unittests/scans/pwn_sast/one_dup_finding.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("pwn_sast") / "one_dup_finding.json", encoding="utf-8") as testfile: parser = PWNSASTParser() findings = parser.get_findings(testfile, Test()) self.assertIsInstance(findings, list) self.assertEqual(1, len(findings)) def test_title_is_not_none(self): - with open("unittests/scans/pwn_sast/one_finding.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("pwn_sast") / "one_finding.json", encoding="utf-8") as testfile: parser = PWNSASTParser() findings = parser.get_findings(testfile, Test()) self.assertIsInstance(findings, list) diff --git a/unittests/tools/test_qualys_hacker_guardian_parser.py b/unittests/tools/test_qualys_hacker_guardian_parser.py index 47bd820ffb6..616341b9ece 100644 --- a/unittests/tools/test_qualys_hacker_guardian_parser.py +++ b/unittests/tools/test_qualys_hacker_guardian_parser.py @@ -1,21 +1,19 @@ -from os import path -from pathlib import Path from dojo.models import Test from dojo.tools.qualys_hacker_guardian.parser import QualysHackerGuardianParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestQualysHackerGuardianParser(DojoTestCase): def test_qualys_hacker_guardian_parser_with_no_findings(self): - with open(path.join(Path(__file__).parent, "../scans/qualys_hacker_guardian/zero_finding.csv"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("qualys_hacker_guardian") / "zero_finding.csv", encoding="utf-8") as testfile: parser = QualysHackerGuardianParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_qualys_hacker_guardian_parser_with_one_findings(self): - with open(path.join(Path(__file__).parent, "../scans/qualys_hacker_guardian/one_finding.csv"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("qualys_hacker_guardian") / "one_finding.csv", encoding="utf-8") as testfile: parser = QualysHackerGuardianParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -26,7 +24,7 @@ def test_qualys_hacker_guardian_parser_with_one_findings(self): self.assertEqual(len(finding.unsaved_endpoints), 2) def test_qualys_hacker_guardian_parser_with_many_findings(self): - with open(path.join(Path(__file__).parent, "../scans/qualys_hacker_guardian/many_finding.csv"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("qualys_hacker_guardian") / "many_finding.csv", encoding="utf-8") as testfile: parser = QualysHackerGuardianParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(3, len(findings)) diff --git a/unittests/tools/test_qualys_infrascan_webgui_parser.py b/unittests/tools/test_qualys_infrascan_webgui_parser.py index b76aeba84d9..c335f428ee1 100644 --- a/unittests/tools/test_qualys_infrascan_webgui_parser.py +++ b/unittests/tools/test_qualys_infrascan_webgui_parser.py @@ -4,14 +4,14 @@ from dojo.models import Test from dojo.tools.qualys_infrascan_webgui.parser import QualysInfrascanWebguiParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestQualysInfrascanWebguiParser(DojoTestCase): def test_parse_file_with_no_vuln_has_no_findings(self): with open( - get_unit_tests_path() + "/scans/qualys_infrascan_webgui/qualys_infrascan_webgui_0.xml", encoding="utf-8", + get_unit_tests_scans_path("qualys_infrascan_webgui") / "qualys_infrascan_webgui_0.xml", encoding="utf-8", ) as testfile: parser = QualysInfrascanWebguiParser() findings = parser.get_findings(testfile, Test()) @@ -21,7 +21,7 @@ def test_parse_file_with_no_vuln_has_no_findings(self): # + also verify data with one test def test_parse_file_with_one_vuln_has_one_findings(self): with open( - get_unit_tests_path() + "/scans/qualys_infrascan_webgui/qualys_infrascan_webgui_1.xml", encoding="utf-8", + get_unit_tests_scans_path("qualys_infrascan_webgui") / "qualys_infrascan_webgui_1.xml", encoding="utf-8", ) as testfile: parser = QualysInfrascanWebguiParser() findings = parser.get_findings(testfile, Test()) @@ -38,7 +38,7 @@ def test_parse_file_with_one_vuln_has_one_findings(self): # Sample with Multiple Test def test_parse_file_with_multiple_vuln_has_multiple_findings(self): with open( - get_unit_tests_path() + "/scans/qualys_infrascan_webgui/qualys_infrascan_webgui_multiple.xml", encoding="utf-8", + get_unit_tests_scans_path("qualys_infrascan_webgui") / "qualys_infrascan_webgui_multiple.xml", encoding="utf-8", ) as testfile: parser = QualysInfrascanWebguiParser() findings = parser.get_findings(testfile, Test()) @@ -61,7 +61,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_findings(self): # Sample with Multiple Test def test_parse_file_with_finding_no_dns(self): with open( - get_unit_tests_path() + "/scans/qualys_infrascan_webgui/qualys_infrascan_webgui_3.xml", encoding="utf-8", + get_unit_tests_scans_path("qualys_infrascan_webgui") / "qualys_infrascan_webgui_3.xml", encoding="utf-8", ) as testfile: parser = QualysInfrascanWebguiParser() findings = parser.get_findings(testfile, Test()) diff --git a/unittests/tools/test_qualys_parser.py b/unittests/tools/test_qualys_parser.py index 15840f8561d..5551c0537d4 100644 --- a/unittests/tools/test_qualys_parser.py +++ b/unittests/tools/test_qualys_parser.py @@ -4,7 +4,7 @@ from dojo.models import Test from dojo.tools.qualys.parser import QualysParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestQualysParser(DojoTestCase): @@ -18,7 +18,7 @@ def test_parse_file_with_no_vuln_has_no_findings(self): def parse_file_with_no_vuln_has_no_findings(self): with open( - get_unit_tests_path() + "/scans/qualys/empty.xml", encoding="utf-8", + get_unit_tests_scans_path("qualys") / "empty.xml", encoding="utf-8", ) as testfile: parser = QualysParser() findings = parser.get_findings(testfile, Test()) @@ -35,7 +35,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_findings(self): def parse_file_with_multiple_vuln_has_multiple_findings(self): with open( - get_unit_tests_path() + "/scans/qualys/Qualys_Sample_Report.xml", encoding="utf-8", + get_unit_tests_scans_path("qualys") / "Qualys_Sample_Report.xml", encoding="utf-8", ) as testfile: parser = QualysParser() findings = parser.get_findings(testfile, Test()) @@ -82,7 +82,7 @@ def test_parse_file_with_no_vuln_has_no_findings_csv(self): def parse_file_with_no_vuln_has_no_findings_csv(self): with open( - get_unit_tests_path() + "/scans/qualys/empty.csv", encoding="utf-8", + get_unit_tests_scans_path("qualys") / "empty.csv", encoding="utf-8", ) as testfile: parser = QualysParser() findings = parser.get_findings(testfile, Test()) @@ -99,7 +99,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_findings_csv(self): def parse_file_with_multiple_vuln_has_multiple_findings_csv(self): with open( - get_unit_tests_path() + "/scans/qualys/Qualys_Sample_Report.csv", encoding="utf-8", + get_unit_tests_scans_path("qualys") / "Qualys_Sample_Report.csv", encoding="utf-8", ) as testfile: parser = QualysParser() findings = parser.get_findings(testfile, Test()) @@ -136,7 +136,7 @@ def parse_file_with_multiple_vuln_has_multiple_findings_csv(self): def test_parse_file_monthly_pci_issue6932(self): with open( - get_unit_tests_path() + "/scans/qualys/monthly_pci_issue6932.csv", encoding="utf-8", + get_unit_tests_scans_path("qualys") / "monthly_pci_issue6932.csv", encoding="utf-8", ) as testfile: parser = QualysParser() findings = parser.get_findings(testfile, Test()) @@ -144,7 +144,7 @@ def test_parse_file_monthly_pci_issue6932(self): def test_parse_file_with_cvss_values_and_scores(self): with open( - get_unit_tests_path() + "/scans/qualys/Qualys_Sample_Report.xml", encoding="utf-8", + get_unit_tests_scans_path("qualys") / "Qualys_Sample_Report.xml", encoding="utf-8", ) as testfile: parser = QualysParser() findings = parser.get_findings(testfile, Test()) @@ -179,7 +179,7 @@ def test_parse_file_with_cvss_values_and_scores(self): ) def test_get_severity_legacy(self): - with open(get_unit_tests_path() + "/scans/qualys/Qualys_Sample_Report.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("qualys") / "Qualys_Sample_Report.xml", encoding="utf-8") as testfile: parser = QualysParser() findings = parser.get_findings(testfile, Test()) counts = {} @@ -197,7 +197,7 @@ def test_get_severity_legacy(self): @override_settings(USE_QUALYS_LEGACY_SEVERITY_PARSING=False) def test_get_severity(self): - with open(get_unit_tests_path() + "/scans/qualys/Qualys_Sample_Report.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("qualys") / "Qualys_Sample_Report.xml", encoding="utf-8") as testfile: parser = QualysParser() findings = parser.get_findings(testfile, Test()) counts = {} diff --git a/unittests/tools/test_qualys_webapp_parser.py b/unittests/tools/test_qualys_webapp_parser.py index 71bd295634e..1f68e022f0b 100644 --- a/unittests/tools/test_qualys_webapp_parser.py +++ b/unittests/tools/test_qualys_webapp_parser.py @@ -1,12 +1,12 @@ from dojo.models import Test from dojo.tools.qualys_webapp.parser import QualysWebAppParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestQualysWebAppParser(DojoTestCase): def test_qualys_webapp_parser_with_no_vuln_has_no_findings(self): - testfile = open("unittests/scans/qualys_webapp/qualys_webapp_no_vuln.xml", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("qualys_webapp") / "qualys_webapp_no_vuln.xml", encoding="utf-8") parser = QualysWebAppParser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -18,7 +18,7 @@ def test_qualys_webapp_parser_with_no_vuln_has_no_findings(self): self.assertEqual(17, len(findings)) def test_qualys_webapp_parser_with_one_criticle_vuln_has_one_findings(self): - testfile = open("unittests/scans/qualys_webapp/qualys_webapp_one_vuln.xml", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("qualys_webapp") / "qualys_webapp_one_vuln.xml", encoding="utf-8") parser = QualysWebAppParser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -31,7 +31,7 @@ def test_qualys_webapp_parser_with_one_criticle_vuln_has_one_findings(self): def test_qualys_webapp_parser_with_many_vuln_has_many_findings(self): testfile = open( - get_unit_tests_path() + "/scans/qualys_webapp/qualys_webapp_many_vuln.xml", encoding="utf-8", + get_unit_tests_scans_path("qualys_webapp") / "qualys_webapp_many_vuln.xml", encoding="utf-8", ) parser = QualysWebAppParser() findings = parser.get_findings(testfile, Test()) @@ -45,7 +45,7 @@ def test_qualys_webapp_parser_with_many_vuln_has_many_findings(self): def test_qualys_webapp_parser_info_is_vuln(self): testfile = open( - get_unit_tests_path() + "/scans/qualys_webapp/qualys_webapp_many_vuln.xml", encoding="utf-8", + get_unit_tests_scans_path("qualys_webapp") / "qualys_webapp_many_vuln.xml", encoding="utf-8", ) parser = QualysWebAppParser() findings = parser.get_findings(testfile, Test(), enable_weakness=True) @@ -59,7 +59,7 @@ def test_qualys_webapp_parser_info_is_vuln(self): def test_discussion_10239(self): testfile = open( - get_unit_tests_path() + "/scans/qualys_webapp/discussion_10239.xml", encoding="utf-8", + get_unit_tests_scans_path("qualys_webapp") / "discussion_10239.xml", encoding="utf-8", ) parser = QualysWebAppParser() findings = parser.get_findings(testfile, Test(), enable_weakness=True) diff --git a/unittests/tools/test_rapplex_parser.py b/unittests/tools/test_rapplex_parser.py index 97937cbd4d0..e0415ff2fa0 100644 --- a/unittests/tools/test_rapplex_parser.py +++ b/unittests/tools/test_rapplex_parser.py @@ -1,21 +1,19 @@ -from os import path -from pathlib import Path from dojo.models import Test from dojo.tools.rapplex.parser import RapplexParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestRapplexParser(DojoTestCase): def test_rapplex_parser_with_no_findings(self): - with open(path.join(Path(__file__).parent, "../scans/rapplex/rapplex_zero_vul.json"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("rapplex") / "rapplex_zero_vul.json", encoding="utf-8") as testfile: parser = RapplexParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_rapplex_parser_with_one_findings(self): - with open(path.join(Path(__file__).parent, "../scans/rapplex/rapplex_one_vul.json"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("rapplex") / "rapplex_one_vul.json", encoding="utf-8") as testfile: parser = RapplexParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -27,7 +25,7 @@ def test_rapplex_parser_with_one_findings(self): self.assertIsNotNone(finding.references) def test_rapplex_parser_with_many_findings(self): - with open(path.join(Path(__file__).parent, "../scans/rapplex/rapplex_many_vul.json"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("rapplex") / "rapplex_many_vul.json", encoding="utf-8") as testfile: parser = RapplexParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(8, len(findings)) diff --git a/unittests/tools/test_redhatsatellite_parser.py b/unittests/tools/test_redhatsatellite_parser.py index 63ab8ba3e41..1414782836a 100644 --- a/unittests/tools/test_redhatsatellite_parser.py +++ b/unittests/tools/test_redhatsatellite_parser.py @@ -1,24 +1,24 @@ from dojo.models import Test from dojo.tools.redhatsatellite.parser import RedHatSatelliteParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestRedHatSatelliteParser(DojoTestCase): def test_parse_file_with_no_vuln_has_no_findings(self): - with open("unittests/scans/redhatsatellite/no_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("redhatsatellite") / "no_findings.json", encoding="utf-8") as testfile: parser = RedHatSatelliteParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_file_with_one_finding(self): - with open("unittests/scans/redhatsatellite/one_finding.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("redhatsatellite") / "one_finding.json", encoding="utf-8") as testfile: parser = RedHatSatelliteParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) def test_parse_file_with_multiple_findingse(self): - with open("unittests/scans/redhatsatellite/many_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("redhatsatellite") / "many_findings.json", encoding="utf-8") as testfile: parser = RedHatSatelliteParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(3, len(findings)) @@ -27,7 +27,7 @@ def test_parse_file_with_multiple_findingse(self): self.assertEqual("CVE-1990-2", findings[0].unsaved_vulnerability_ids[2]) def test_parse_file_with_many_packages(self): - with open("unittests/scans/redhatsatellite/many_packages.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("redhatsatellite") / "many_packages.json", encoding="utf-8") as testfile: parser = RedHatSatelliteParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) diff --git a/unittests/tools/test_retirejs_parser.py b/unittests/tools/test_retirejs_parser.py index 80090385aeb..d1c0aafabd7 100644 --- a/unittests/tools/test_retirejs_parser.py +++ b/unittests/tools/test_retirejs_parser.py @@ -1,11 +1,11 @@ from dojo.models import Test from dojo.tools.retirejs.parser import RetireJsParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestRetireJsParser(DojoTestCase): def test_parse(self): - with open("unittests/scans/retirejs/latest.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("retirejs") / "latest.json", encoding="utf-8") as testfile: parser = RetireJsParser() findings = parser.get_findings(testfile, Test()) self.assertIsInstance(findings, list) diff --git a/unittests/tools/test_risk_recon_parser.py b/unittests/tools/test_risk_recon_parser.py index 38c8b496be6..f24b2490074 100644 --- a/unittests/tools/test_risk_recon_parser.py +++ b/unittests/tools/test_risk_recon_parser.py @@ -2,25 +2,25 @@ from dojo.models import Test from dojo.tools.risk_recon.parser import RiskReconParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestRiskReconAPIParser(DojoTestCase): def test_api_with_bad_url(self): - with open("unittests/scans/risk_recon/bad_url.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("risk_recon") / "bad_url.json", encoding="utf-8") as testfile: with self.assertRaises(Exception): parser = RiskReconParser() parser.get_findings(testfile, Test()) def test_api_with_bad_key(self): - with open("unittests/scans/risk_recon/bad_key.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("risk_recon") / "bad_key.json", encoding="utf-8") as testfile: with self.assertRaises(Exception): parser = RiskReconParser() parser.get_findings(testfile, Test()) def test_parser_without_api(self): - with open("unittests/scans/risk_recon/findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("risk_recon") / "findings.json", encoding="utf-8") as testfile: parser = RiskReconParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(2, len(findings)) diff --git a/unittests/tools/test_rubocop_parser.py b/unittests/tools/test_rubocop_parser.py index 0fa5d3cbdb3..8c13d30aa02 100644 --- a/unittests/tools/test_rubocop_parser.py +++ b/unittests/tools/test_rubocop_parser.py @@ -1,25 +1,25 @@ from dojo.models import Test from dojo.tools.rubocop.parser import RubocopParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestRubocopParser(DojoTestCase): def test_parser_empty(self): - testfile = open("unittests/scans/rubocop/empty.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("rubocop") / "empty.json", encoding="utf-8") parser = RubocopParser() findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual(0, len(findings)) def test_parser_zero_findings(self): - testfile = open("unittests/scans/rubocop/zero_vulns.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("rubocop") / "zero_vulns.json", encoding="utf-8") parser = RubocopParser() findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual(0, len(findings)) def test_parser_one_vuln(self): - testfile = open("unittests/scans/rubocop/one_finding.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("rubocop") / "one_finding.json", encoding="utf-8") parser = RubocopParser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -33,7 +33,7 @@ def test_parser_one_vuln(self): self.assertEqual("Security/MarshalLoad", finding.vuln_id_from_tool) def test_parser_many_vulns(self): - testfile = open("unittests/scans/rubocop/many_vulns.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("rubocop") / "many_vulns.json", encoding="utf-8") parser = RubocopParser() findings = parser.get_findings(testfile, Test()) testfile.close() diff --git a/unittests/tools/test_rusty_hog_parser.py b/unittests/tools/test_rusty_hog_parser.py index ff2420d00ed..67d45770575 100644 --- a/unittests/tools/test_rusty_hog_parser.py +++ b/unittests/tools/test_rusty_hog_parser.py @@ -1,29 +1,29 @@ from dojo.models import Test from dojo.tools.rusty_hog.parser import RustyhogParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestRustyhogParser(DojoTestCase): def test_parse_file_with_no_vuln_has_no_finding_choctawhog(self): - with open("unittests/scans/rusty_hog/choctawhog_no_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("rusty_hog") / "choctawhog_no_vuln.json", encoding="utf-8") as testfile: parser = RustyhogParser() findings = parser.get_items(testfile, "Rusty Hog", Test()) # The outputfile is empty. A subscanner can't be classified self.assertEqual(0, len(findings)) def test_parse_file_with_one_vuln_has_one_finding_choctawhog(self): - with open("unittests/scans/rusty_hog/choctawhog_one_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("rusty_hog") / "choctawhog_one_vuln.json", encoding="utf-8") as testfile: parser = RustyhogParser() findings = parser.get_items(testfile, "Choctaw Hog", Test()) self.assertEqual(1, len(findings)) def test_parse_file_with_multiple_vuln_has_multiple_finding_choctawhog(self): - with open("unittests/scans/rusty_hog/choctawhog_many_vulns.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("rusty_hog") / "choctawhog_many_vulns.json", encoding="utf-8") as testfile: parser = RustyhogParser() findings = parser.get_items(testfile, "Choctaw Hog", Test()) self.assertEqual(13, len(findings)) def test_parse_file_with_multiple_vuln_has_multiple_finding_choctawhog_content(self): - with open("unittests/scans/rusty_hog/choctawhog_many_vulns.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("rusty_hog") / "choctawhog_many_vulns.json", encoding="utf-8") as testfile: parser = RustyhogParser() findings = parser.get_items(testfile, "Choctaw Hog", Test()) self.assertEqual(findings[0].title, "Email address found in Git path .github/workflows/main.yml (a7bce96377c4ff2ac16cd51fb0da7fe7ea678829)") @@ -36,25 +36,25 @@ def test_parse_file_with_multiple_vuln_has_multiple_finding_choctawhog_content(s self.assertIn("Please ensure no secret material nor confidential information is kept in clear within git repositories.", findings[0].mitigation) def test_parse_file_with_no_vuln_has_no_finding_duorchog(self): - with open("unittests/scans/rusty_hog/durochog_no_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("rusty_hog") / "durochog_no_vuln.json", encoding="utf-8") as testfile: parser = RustyhogParser() findings = parser.get_items(testfile, "Rusty Hog", Test()) # The outputfile is empty. A subscanner can't be classified self.assertEqual(0, len(findings)) def test_parse_file_with_one_vuln_has_one_finding_durochog(self): - with open("unittests/scans/rusty_hog/durochog_one_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("rusty_hog") / "durochog_one_vuln.json", encoding="utf-8") as testfile: parser = RustyhogParser() findings = parser.get_items(testfile, "Duroc Hog", Test()) self.assertEqual(1, len(findings)) def test_parse_file_with_multiple_vuln_has_multiple_finding_durochog(self): - with open("unittests/scans/rusty_hog/durochog_many_vulns.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("rusty_hog") / "durochog_many_vulns.json", encoding="utf-8") as testfile: parser = RustyhogParser() findings = parser.get_items(testfile, "Duroc Hog", Test()) self.assertEqual(4, len(findings)) def test_parse_file_with_multiple_vuln_has_multiple_finding_durochog_content(self): - with open("unittests/scans/rusty_hog/durochog_many_vulns.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("rusty_hog") / "durochog_many_vulns.json", encoding="utf-8") as testfile: parser = RustyhogParser() findings = parser.get_items(testfile, "Duroc Hog", Test()) self.assertEqual(findings[0].title, "password (Password) found in path /scan_folder/unittests/scans/sonarqube/sonar-no-finding.html") @@ -65,25 +65,25 @@ def test_parse_file_with_multiple_vuln_has_multiple_finding_durochog_content(sel self.assertIn("Please ensure no secret material nor confidential information is kept in clear within directories, files, and archives.", findings[0].mitigation) def test_parse_file_with_no_vuln_has_no_finding_gottingenhog(self): - with open("unittests/scans/rusty_hog/gottingenhog_no_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("rusty_hog") / "gottingenhog_no_vuln.json", encoding="utf-8") as testfile: parser = RustyhogParser() findings = parser.get_items(testfile, "Rusty Hog", Test()) # The outputfile is empty. A subscanner can't be classified self.assertEqual(0, len(findings)) def test_parse_file_with_one_vuln_has_one_finding_gottingenhog(self): - with open("unittests/scans/rusty_hog/gottingenhog_one_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("rusty_hog") / "gottingenhog_one_vuln.json", encoding="utf-8") as testfile: parser = RustyhogParser() findings = parser.get_items(testfile, "Gottingen Hog", Test()) self.assertEqual(1, len(findings)) def test_parse_file_with_multiple_vuln_has_multiple_finding_gottingenhog(self): - with open("unittests/scans/rusty_hog/gottingenhog_many_vulns.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("rusty_hog") / "gottingenhog_many_vulns.json", encoding="utf-8") as testfile: parser = RustyhogParser() findings = parser.get_items(testfile, "Gottingen Hog", Test()) self.assertEqual(10, len(findings)) def test_parse_file_with_multiple_vuln_has_multiple_finding_gottingenhog_content(self): - with open("unittests/scans/rusty_hog/gottingenhog_many_vulns.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("rusty_hog") / "gottingenhog_many_vulns.json", encoding="utf-8") as testfile: parser = RustyhogParser() findings = parser.get_items(testfile, "Gottingen Hog", Test()) self.assertEqual(findings[0].title, "password found in Jira ID TEST-123 (Issue Description)") @@ -94,19 +94,19 @@ def test_parse_file_with_multiple_vuln_has_multiple_finding_gottingenhog_content self.assertIn("Please ensure no secret material nor confidential information is kept in clear within JIRA Tickets.", findings[0].mitigation) def test_parse_file_with_no_vuln_has_no_finding_essexhog(self): - with open("unittests/scans/rusty_hog/essexhog_no_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("rusty_hog") / "essexhog_no_vuln.json", encoding="utf-8") as testfile: parser = RustyhogParser() findings = parser.get_items(testfile, "Rusty Hog", Test()) # The outputfile is empty. A subscanner can't be classified self.assertEqual(0, len(findings)) def test_parse_file_with_one_vuln_has_one_finding_essexhog(self): - with open("unittests/scans/rusty_hog/essexhog_one_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("rusty_hog") / "essexhog_one_vuln.json", encoding="utf-8") as testfile: parser = RustyhogParser() findings = parser.get_items(testfile, "Essex Hog", Test()) self.assertEqual(1, len(findings)) def test_parse_file_with_multiple_vuln_has_multiple_finding_essexhog(self): - with open("unittests/scans/rusty_hog/essexhog_many_vulns.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("rusty_hog") / "essexhog_many_vulns.json", encoding="utf-8") as testfile: parser = RustyhogParser() findings = parser.get_items(testfile, "Essex Hog", Test()) self.assertEqual(3, len(findings)) @@ -115,7 +115,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_finding_essexhog(self): self.assertEqual("**Reason:** SSH (EC) private key", findings[0].description[:32]) def test_parse_file_with_multiple_vuln_has_multiple_finding_essexhog_content(self): - with open("unittests/scans/rusty_hog/essexhog_many_vulns.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("rusty_hog") / "essexhog_many_vulns.json", encoding="utf-8") as testfile: parser = RustyhogParser() findings = parser.get_items(testfile, "Essex Hog", Test()) self.assertEqual(findings[0].title, "SSH (EC) private key found in Confluence Page ID 12345") diff --git a/unittests/tools/test_sarif_parser.py b/unittests/tools/test_sarif_parser.py index 4b63b2e3488..60a3661a730 100644 --- a/unittests/tools/test_sarif_parser.py +++ b/unittests/tools/test_sarif_parser.py @@ -1,10 +1,8 @@ import datetime -from os import path -from pathlib import Path from dojo.models import Finding, Test from dojo.tools.sarif.parser import SarifParser, get_fingerprints_hashes -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestSarifParser(DojoTestCase): @@ -18,9 +16,8 @@ def common_checks(self, finding): def test_example_report(self): with open( - path.join( - get_unit_tests_path() + "/scans/sarif/DefectDojo_django-DefectDojo__2020-12-11_13 42 10__export.sarif", - ), encoding="utf-8", + get_unit_tests_scans_path("sarif") / "DefectDojo_django-DefectDojo__2020-12-11_13 42 10__export.sarif", + encoding="utf-8", )as testfile: parser = SarifParser() findings = parser.get_findings(testfile, Test()) @@ -30,7 +27,7 @@ def test_example_report(self): def test_suppression_report(self): """Test report file having different suppression definitions""" - with open(path.join(Path(__file__).parent, "../scans/sarif/suppression_test.sarif"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sarif") / "suppression_test.sarif", encoding="utf-8") as testfile: parser = SarifParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -42,7 +39,7 @@ def test_suppression_report(self): self.assertEqual(True, finding.active) def test_example2_report(self): - with open(path.join(Path(__file__).parent, "../scans/sarif/appendix_k.sarif"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sarif") / "appendix_k.sarif", encoding="utf-8") as testfile: parser = SarifParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -70,13 +67,13 @@ def test_example2_report(self): self.common_checks(finding) def test_example_k1_report(self): - with open(path.join(Path(__file__).parent, "../scans/sarif/appendix_k1.sarif"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sarif") / "appendix_k1.sarif", encoding="utf-8") as testfile: parser = SarifParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_example_k2_report(self): - with open(path.join(Path(__file__).parent, "../scans/sarif/appendix_k2.sarif"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sarif") / "appendix_k2.sarif", encoding="utf-8") as testfile: parser = SarifParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -91,7 +88,7 @@ def test_example_k2_report(self): self.common_checks(finding) def test_example_k3_report(self): - with open(path.join(Path(__file__).parent, "../scans/sarif/appendix_k3.sarif"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sarif") / "appendix_k3.sarif", encoding="utf-8") as testfile: parser = SarifParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -101,7 +98,7 @@ def test_example_k3_report(self): self.common_checks(finding) def test_example_k4_report_mitigation(self): - with open(path.join(Path(__file__).parent, "../scans/sarif/appendix_k4.sarif"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sarif") / "appendix_k4.sarif", encoding="utf-8") as testfile: parser = SarifParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -118,7 +115,7 @@ def test_example_k4_report_mitigation(self): def test_example_report_ms(self): """Report file come from Microsoft SARIF sdk on GitHub""" - with open(path.join(Path(__file__).parent, "../scans/sarif/SuppressionTestCurrent.sarif"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sarif") / "SuppressionTestCurrent.sarif", encoding="utf-8") as testfile: parser = SarifParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(4, len(findings)) @@ -128,7 +125,7 @@ def test_example_report_ms(self): self.common_checks(finding) def test_example_report_semgrep(self): - with open(path.join(Path(__file__).parent, "../scans/sarif/semgrepowasp-benchmark-sample.sarif"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sarif") / "semgrepowasp-benchmark-sample.sarif", encoding="utf-8") as testfile: test = Test() parser = SarifParser() findings = parser.get_findings(testfile, test) @@ -142,7 +139,7 @@ def test_example_report_semgrep(self): self.common_checks(finding) def test_example_report_scanlift_dependency_check(self): - with open(path.join(Path(__file__).parent, "../scans/sarif/dependency_check.sarif"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sarif") / "dependency_check.sarif", encoding="utf-8") as testfile: parser = SarifParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(13, len(findings)) @@ -165,7 +162,7 @@ def test_example_report_scanlift_dependency_check(self): self.common_checks(finding) def test_example_report_scanlift_bash(self): - with open(path.join(Path(__file__).parent, "../scans/sarif/bash-report.sarif"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sarif") / "bash-report.sarif", encoding="utf-8") as testfile: parser = SarifParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(27, len(findings)) @@ -194,7 +191,7 @@ def test_example_report_scanlift_bash(self): self.common_checks(finding) def test_example_report_taint_python(self): - with open(path.join(Path(__file__).parent, "../scans/sarif/taint-python-report.sarif"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sarif") / "taint-python-report.sarif", encoding="utf-8") as testfile: parser = SarifParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(11, len(findings)) @@ -236,7 +233,7 @@ def test_example_report_taint_python(self): def test_njsscan(self): """Generated with opensecurity/njsscan (https://github.com/ajinabraham/njsscan)""" - with open(path.join(Path(__file__).parent, "../scans/sarif/njsscan.sarif"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sarif") / "njsscan.sarif", encoding="utf-8") as testfile: parser = SarifParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(3, len(findings)) @@ -263,7 +260,7 @@ def test_njsscan(self): def test_dockle(self): """Generated with goodwithtech/dockle (https://github.com/goodwithtech/dockle)""" - with open(path.join(Path(__file__).parent, "../scans/sarif/dockle_0_3_15.sarif"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sarif") / "dockle_0_3_15.sarif", encoding="utf-8") as testfile: parser = SarifParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(4, len(findings)) @@ -311,7 +308,7 @@ def test_dockle(self): ) def test_mobsfscan(self): - with open(path.join(Path(__file__).parent, "../scans/sarif/mobsfscan.json"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sarif") / "mobsfscan.json", encoding="utf-8") as testfile: parser = SarifParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(18, len(findings)) @@ -319,7 +316,7 @@ def test_mobsfscan(self): self.common_checks(finding) def test_gitleaks(self): - with open(path.join(Path(__file__).parent, "../scans/sarif/gitleaks_7.5.0.sarif"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sarif") / "gitleaks_7.5.0.sarif", encoding="utf-8") as testfile: parser = SarifParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(8, len(findings)) @@ -369,7 +366,7 @@ def test_gitleaks(self): self.assertEqual(37, finding.line) def test_flawfinder(self): - with open(path.join(Path(__file__).parent, "../scans/sarif/flawfinder.sarif"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sarif") / "flawfinder.sarif", encoding="utf-8") as testfile: parser = SarifParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(53, len(findings)) @@ -445,7 +442,7 @@ def test_flawfinder(self): self.assertEqual("https://cwe.mitre.org/data/definitions/120.html", finding.references) def test_flawfinder_interfacev2(self): - with open(path.join(Path(__file__).parent, "../scans/sarif/flawfinder.sarif"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sarif") / "flawfinder.sarif", encoding="utf-8") as testfile: parser = SarifParser() tests = parser.get_tests(parser.get_scan_types()[0], testfile) self.assertEqual(1, len(tests)) @@ -514,7 +511,7 @@ def test_flawfinder_interfacev2(self): self.assertEqual("https://cwe.mitre.org/data/definitions/120.html", finding.references) def test_appendix_k1_double_interfacev2(self): - with open(path.join(Path(__file__).parent, "../scans/sarif/appendix_k1_double.sarif"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sarif") / "appendix_k1_double.sarif", encoding="utf-8") as testfile: parser = SarifParser() tests = parser.get_tests(parser.get_scan_types()[0], testfile) self.assertEqual(2, len(tests)) @@ -530,7 +527,7 @@ def test_appendix_k1_double_interfacev2(self): self.assertEqual(0, len(findings)) def test_codeql_snippet_report(self): - with open(path.join(Path(__file__).parent, "../scans/sarif/codeQL-output.sarif"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sarif") / "codeQL-output.sarif", encoding="utf-8") as testfile: parser = SarifParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(72, len(findings)) @@ -556,7 +553,7 @@ def test_codeql_snippet_report(self): self.common_checks(finding) def test_severity_cvss_from_grype(self): - with open(path.join(Path(__file__).parent, "../scans/sarif/cxf-3.4.6.sarif"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sarif") / "cxf-3.4.6.sarif", encoding="utf-8") as testfile: parser = SarifParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(22, len(findings)) @@ -585,14 +582,14 @@ def test_get_fingerprints_hashes(self): ) def test_tags_from_result_properties(self): - with open(path.join(Path(__file__).parent, "../scans/sarif/taint-python-report.sarif"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sarif") / "taint-python-report.sarif", encoding="utf-8") as testfile: parser = SarifParser() findings = parser.get_findings(testfile, Test()) item = findings[0] self.assertEqual(["Scan"], item.tags) def test_severity_in_properties(self): - with open(path.join(Path(__file__).parent, "../scans/sarif/issue_10191.json"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sarif") / "issue_10191.json", encoding="utf-8") as testfile: parser = SarifParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(77, len(findings)) diff --git a/unittests/tools/test_scantist_parser.py b/unittests/tools/test_scantist_parser.py index a2c6618b096..ce0a433f9ab 100644 --- a/unittests/tools/test_scantist_parser.py +++ b/unittests/tools/test_scantist_parser.py @@ -1,18 +1,18 @@ from dojo.models import Test from dojo.tools.scantist.parser import ScantistParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestScantistParser(DojoTestCase): def test_parse_file_with_no_vuln_has_no_findings(self): - with open("unittests/scans/scantist/scantist-no-vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("scantist") / "scantist-no-vuln.json", encoding="utf-8") as testfile: parser = ScantistParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_file_with_one_vuln_has_one_finding(self): - with open("unittests/scans/scantist/scantist-one-vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("scantist") / "scantist-one-vuln.json", encoding="utf-8") as testfile: parser = ScantistParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -30,7 +30,7 @@ def test_parse_file_with_one_vuln_has_one_finding(self): ) # Negligible is translated to Informational def test_parse_file_with_multiple_vuln_has_multiple_findings(self): - with open("unittests/scans/scantist/scantist-many-vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("scantist") / "scantist-many-vuln.json", encoding="utf-8") as testfile: parser = ScantistParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(17, len(findings)) diff --git a/unittests/tools/test_scout_suite_parser.py b/unittests/tools/test_scout_suite_parser.py index f689fcc8741..52192f1960e 100644 --- a/unittests/tools/test_scout_suite_parser.py +++ b/unittests/tools/test_scout_suite_parser.py @@ -2,18 +2,18 @@ from dojo.models import Test from dojo.tools.scout_suite.parser import ScoutSuiteParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestScoutSuiteParser(DojoTestCase): def test_scout_suite_parser_with_no_vuln_has_no_findings(self): - with open("unittests/scans/scout_suite/no_vuln.js", encoding="utf-8") as test_file: + with open(get_unit_tests_scans_path("scout_suite") / "no_vuln.js", encoding="utf-8") as test_file: parser = ScoutSuiteParser() findings = parser.get_findings(test_file, Test()) self.assertEqual(0, len(findings)) def test_scout_suite_parser_with_two_findings(self): - with open("unittests/scans/scout_suite/two_findings.js", encoding="utf-8") as test_file: + with open(get_unit_tests_scans_path("scout_suite") / "two_findings.js", encoding="utf-8") as test_file: parser = ScoutSuiteParser() findings = parser.get_findings(test_file, Test()) self.assertEqual(4, len(findings)) @@ -32,7 +32,7 @@ def test_scout_suite_parser_with_two_findings(self): self.assertEqual("gcp:cloudstorage-bucket-no-versioning", finding.vuln_id_from_tool) def test_get_findings(self): - with open("unittests/scans/scout_suite/new2.js", encoding="utf-8") as test_file: + with open(get_unit_tests_scans_path("scout_suite") / "new2.js", encoding="utf-8") as test_file: parser = ScoutSuiteParser() findings = parser.get_findings(test_file, Test()) self.assertEqual(356, len(findings)) @@ -56,7 +56,7 @@ def test_get_findings(self): self.assertEqual("aws:config-recorder-not-configured", finding.vuln_id_from_tool) def test_get_tests(self): - with open("unittests/scans/scout_suite/new2.js", encoding="utf-8") as test_file: + with open(get_unit_tests_scans_path("scout_suite") / "new2.js", encoding="utf-8") as test_file: parser = ScoutSuiteParser() scan_type = parser.get_scan_types()[0] tests = parser.get_tests(scan_type, test_file) diff --git a/unittests/tools/test_semgrep_parser.py b/unittests/tools/test_semgrep_parser.py index 26c0b4fdfe3..b9d395e2b38 100644 --- a/unittests/tools/test_semgrep_parser.py +++ b/unittests/tools/test_semgrep_parser.py @@ -1,18 +1,18 @@ from dojo.models import Test from dojo.tools.semgrep.parser import SemgrepParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestSemgrepParser(DojoTestCase): def test_parse_empty(self): - with open("unittests/scans/semgrep/empty.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("semgrep") / "empty.json", encoding="utf-8") as testfile: parser = SemgrepParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_one_finding(self): - with open("unittests/scans/semgrep/one_finding.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("semgrep") / "one_finding.json", encoding="utf-8") as testfile: parser = SemgrepParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -27,7 +27,7 @@ def test_parse_one_finding(self): self.assertIn("Using CBC with PKCS5Padding is susceptible to padding orcale attacks", finding.description) def test_parse_many_finding(self): - with open("unittests/scans/semgrep/many_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("semgrep") / "many_findings.json", encoding="utf-8") as testfile: parser = SemgrepParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(3, len(findings)) @@ -47,7 +47,7 @@ def test_parse_many_finding(self): self.assertEqual("java.lang.security.audit.cbc-padding-oracle.cbc-padding-oracle", finding.vuln_id_from_tool) def test_parse_repeated_finding(self): - with open("unittests/scans/semgrep/repeated_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("semgrep") / "repeated_findings.json", encoding="utf-8") as testfile: parser = SemgrepParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -61,7 +61,7 @@ def test_parse_repeated_finding(self): self.assertEqual(2, finding.nb_occurences) def test_parse_many_vulns(self): - with open("unittests/scans/semgrep/many_vulns.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("semgrep") / "many_vulns.json", encoding="utf-8") as testfile: parser = SemgrepParser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -91,7 +91,7 @@ def test_parse_many_vulns(self): self.assertEqual("python.lang.security.unquoted-csv-writer.unquoted-csv-writer", finding.vuln_id_from_tool) def test_parse_cwe_list(self): - with open("unittests/scans/semgrep/cwe_list.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("semgrep") / "cwe_list.json", encoding="utf-8") as testfile: parser = SemgrepParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -105,10 +105,10 @@ def test_parse_cwe_list(self): self.assertIn("A CSRF middleware was not detected in your express application. Ensure you are either using one such as `csurf` or `csrf` (see rule references) and/or you are properly doing CSRF validation in your routes with a token or cookies.", finding.description) def test_different_lines_same_fingerprint(self): - with open("unittests/scans/semgrep/semgrep_version_1_30_0_line_26.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("semgrep") / "semgrep_version_1_30_0_line_26.json", encoding="utf-8") as testfile: parser = SemgrepParser() findings_first = parser.get_findings(testfile, Test()) - with open("unittests/scans/semgrep/semgrep_version_1_30_0_line_27.json", encoding="utf-8") as testfile2: + with open(get_unit_tests_scans_path("semgrep") / "semgrep_version_1_30_0_line_27.json", encoding="utf-8") as testfile2: parser = SemgrepParser() findings_second = parser.get_findings(testfile2, Test()) self.assertEqual(len(findings_first), len(findings_second)) @@ -116,19 +116,19 @@ def test_different_lines_same_fingerprint(self): self.assertEqual(first.unique_id_from_tool, second.unique_id_from_tool) def test_parse_issue_8435(self): - with open("unittests/scans/semgrep/issue_8435.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("semgrep") / "issue_8435.json", encoding="utf-8") as testfile: parser = SemgrepParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) def test_parse_low_medium_high_severity(self): - with open("unittests/scans/semgrep/high-medium-low-severities.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("semgrep") / "high-medium-low-severities.json", encoding="utf-8") as testfile: parser = SemgrepParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(3, len(findings)) def test_parse_sca_deployments_vulns(self): - with open("unittests/scans/semgrep/sca-deployments-vulns.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("semgrep") / "sca-deployments-vulns.json", encoding="utf-8") as testfile: parser = SemgrepParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(18, len(findings)) @@ -139,7 +139,7 @@ def test_parse_sca_deployments_vulns(self): self.assertEqual(617, finding.cwe) def test_fingerprint_requires_login_and_null(self): - with open("unittests/scans/semgrep/fingerprint_test.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("semgrep") / "fingerprint_test.json", encoding="utf-8") as testfile: parser = SemgrepParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(2, len(findings)) diff --git a/unittests/tools/test_skf_parser.py b/unittests/tools/test_skf_parser.py index 655395cd6b3..8ff8410baa3 100644 --- a/unittests/tools/test_skf_parser.py +++ b/unittests/tools/test_skf_parser.py @@ -1,12 +1,12 @@ from dojo.models import Test from dojo.tools.skf.parser import SKFParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestSkfParser(DojoTestCase): def test_single_has_no_finding(self): - with open("unittests/scans/skf/export.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("skf") / "export.csv", encoding="utf-8") as testfile: parser = SKFParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(27, len(findings)) diff --git a/unittests/tools/test_snyk_code_parser.py b/unittests/tools/test_snyk_code_parser.py index 8d9fe8bd859..2c93fa1dcb6 100644 --- a/unittests/tools/test_snyk_code_parser.py +++ b/unittests/tools/test_snyk_code_parser.py @@ -1,19 +1,19 @@ from dojo.models import Test from dojo.tools.snyk_code.parser import SnykCodeParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestSnykCodeParser(DojoTestCase): def test_snykParser_single_has_many_findings(self): - testfile = open("unittests/scans/snyk_code/single_project_many_vulns.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("snyk_code") / "single_project_many_vulns.json", encoding="utf-8") parser = SnykCodeParser() findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual(206, len(findings)) def test_snykcode_issue_9270(self): - with open("unittests/scans/snyk_code/snykcode_issue_9270.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("snyk_code") / "snykcode_issue_9270.json", encoding="utf-8") as testfile: parser = SnykCodeParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(39, len(findings)) diff --git a/unittests/tools/test_snyk_parser.py b/unittests/tools/test_snyk_parser.py index 17efff35a1b..ea2f70ec4c0 100644 --- a/unittests/tools/test_snyk_parser.py +++ b/unittests/tools/test_snyk_parser.py @@ -1,54 +1,54 @@ from dojo.models import Test from dojo.tools.snyk.parser import SnykParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestSnykParser(DojoTestCase): def test_snykParser_single_has_no_finding(self): - testfile = open("unittests/scans/snyk/single_project_no_vulns.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("snyk") / "single_project_no_vulns.json", encoding="utf-8") parser = SnykParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) testfile.close() def test_snykParser_allprojects_has_no_finding(self): - testfile = open("unittests/scans/snyk/all-projects_no_vulns.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("snyk") / "all-projects_no_vulns.json", encoding="utf-8") parser = SnykParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) testfile.close() def test_snykParser_single_has_one_finding(self): - testfile = open("unittests/scans/snyk/single_project_one_vuln.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("snyk") / "single_project_one_vuln.json", encoding="utf-8") parser = SnykParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) testfile.close() def test_snykParser_allprojects_has_one_finding(self): - testfile = open("unittests/scans/snyk/all-projects_one_vuln.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("snyk") / "all-projects_one_vuln.json", encoding="utf-8") parser = SnykParser() findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual(1, len(findings)) def test_snykParser_single_has_many_findings(self): - testfile = open("unittests/scans/snyk/single_project_many_vulns.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("snyk") / "single_project_many_vulns.json", encoding="utf-8") parser = SnykParser() findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual(41, len(findings)) def test_snykParser_allprojects_has_many_findings(self): - testfile = open("unittests/scans/snyk/all-projects_many_vulns.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("snyk") / "all-projects_many_vulns.json", encoding="utf-8") parser = SnykParser() findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual(4, len(findings)) def test_snykParser_finding_has_fields(self): - testfile = open("unittests/scans/snyk/single_project_one_vuln.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("snyk") / "single_project_one_vuln.json", encoding="utf-8") parser = SnykParser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -89,7 +89,7 @@ def test_snykParser_finding_has_fields(self): ) def test_snykParser_file_path_with_ampersand_is_preserved(self): - testfile = open("unittests/scans/snyk/single_project_one_vuln_with_ampersands.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("snyk") / "single_project_one_vuln_with_ampersands.json", encoding="utf-8") parser = SnykParser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -102,7 +102,7 @@ def test_snykParser_file_path_with_ampersand_is_preserved(self): def test_snykParser_allprojects_issue4277(self): """Report to linked to issue 4277""" - testfile = open("unittests/scans/snyk/all_projects_issue4277.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("snyk") / "all_projects_issue4277.json", encoding="utf-8") parser = SnykParser() findings = list(parser.get_findings(testfile, Test())) testfile.close() @@ -139,7 +139,7 @@ def test_snykParser_allprojects_issue4277(self): self.assertEqual("CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:L/E:P/RL:O/RC:C", finding.cvssv3) def test_snykParser_cvssscore_none(self): - with open("unittests/scans/snyk/single_project_None_cvss.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("snyk") / "single_project_None_cvss.json", encoding="utf-8") as testfile: parser = SnykParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -150,7 +150,7 @@ def test_snykParser_cvssscore_none(self): ) def test_snykParser_target_file(self): - with open("unittests/scans/snyk/all_containers_target_output.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("snyk") / "all_containers_target_output.json", encoding="utf-8") as testfile: parser = SnykParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(40, len(findings)) @@ -160,7 +160,7 @@ def test_snykParser_target_file(self): self.assertIn("target_file:Mobile-Security-Framework-MobSF/requirements.txt", finding.unsaved_tags) def test_snykParser_update_libs_tag(self): - with open("unittests/scans/snyk/single_project_upgrade_libs.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("snyk") / "single_project_upgrade_libs.json", encoding="utf-8") as testfile: parser = SnykParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(254, len(findings)) @@ -172,19 +172,19 @@ def test_snykParser_update_libs_tag(self): self.assertIn("shell-quote@1.7.2", finding.mitigation) def test_snykcontainer_issue_9270(self): - with open("unittests/scans/snyk/snykcontainer_issue_9270.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("snyk") / "snykcontainer_issue_9270.json", encoding="utf-8") as testfile: parser = SnykParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(25, len(findings)) def test_snykcode_issue_9270(self): - with open("unittests/scans/snyk/snykcode_issue_9270.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("snyk") / "snykcode_issue_9270.json", encoding="utf-8") as testfile: parser = SnykParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(39, len(findings)) def test_snykcode_issue_9270_epss(self): - with open("unittests/scans/snyk/snykcontainer_issue_epss.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("snyk") / "snykcontainer_issue_epss.json", encoding="utf-8") as testfile: parser = SnykParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) diff --git a/unittests/tools/test_solar_appscreener_parser.py b/unittests/tools/test_solar_appscreener_parser.py index b6e327c1840..3aaa0d506e7 100644 --- a/unittests/tools/test_solar_appscreener_parser.py +++ b/unittests/tools/test_solar_appscreener_parser.py @@ -1,13 +1,13 @@ from dojo.models import Test from dojo.tools.solar_appscreener.parser import SolarAppscreenerParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestSolarAppscreenerParser(DojoTestCase): def test_solar_appscreener_parser_with_no_vuln_has_no_findings(self): testfile = open( - get_unit_tests_path() + "/scans/solar_appscreener/solar_appscreener_zero_vul.csv", encoding="utf-8") + get_unit_tests_scans_path("solar_appscreener") / "solar_appscreener_zero_vul.csv", encoding="utf-8") parser = SolarAppscreenerParser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -15,7 +15,7 @@ def test_solar_appscreener_parser_with_no_vuln_has_no_findings(self): def test_solar_appscreener_parser_with_one_criticle_vuln_has_one_findings(self): testfile = open( - get_unit_tests_path() + "/scans/solar_appscreener/solar_appscreener_one_vul.csv", encoding="utf-8") + get_unit_tests_scans_path("solar_appscreener") / "solar_appscreener_one_vul.csv", encoding="utf-8") parser = SolarAppscreenerParser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -33,7 +33,7 @@ def test_solar_appscreener_parser_with_one_criticle_vuln_has_one_findings(self): def test_solar_appscreener_parser_with_many_vuln_has_many_findings(self): testfile = open( - get_unit_tests_path() + "/scans/solar_appscreener/solar_appscreener_many_vul.csv", encoding="utf-8") + get_unit_tests_scans_path("solar_appscreener") / "solar_appscreener_many_vul.csv", encoding="utf-8") parser = SolarAppscreenerParser() findings = parser.get_findings(testfile, Test()) testfile.close() diff --git a/unittests/tools/test_sonarqube_parser.py b/unittests/tools/test_sonarqube_parser.py index 0b93fa4bb05..9edec4b00e8 100644 --- a/unittests/tools/test_sonarqube_parser.py +++ b/unittests/tools/test_sonarqube_parser.py @@ -1,6 +1,6 @@ from dojo.models import Engagement, Product, Test from dojo.tools.sonarqube.parser import SonarQubeParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestSonarQubeParser(DojoTestCase): @@ -21,7 +21,7 @@ def test_file_name_aggregated_parse_file_with_no_vulnerabilities_has_no_findings self, ): my_file_handle, _product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/sonarqube/sonar-no-finding.html", + get_unit_tests_scans_path("sonarqube") / "sonar-no-finding.html", ) parser = SonarQubeParser() findings = parser.get_findings(my_file_handle, test) @@ -31,7 +31,7 @@ def test_file_name_aggregated_parse_file_with_no_vulnerabilities_has_no_findings # SonarQube Scan detailed - no finding def test_detailed_parse_file_with_no_vulnerabilities_has_no_findings(self): my_file_handle, _product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/sonarqube/sonar-no-finding.html", + get_unit_tests_scans_path("sonarqube") / "sonar-no-finding.html", ) parser = SonarQubeParser() parser.set_mode("detailed") @@ -44,7 +44,7 @@ def test_file_name_aggregated_parse_file_with_single_vulnerability_has_single_fi self, ): my_file_handle, _product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/sonarqube/sonar-single-finding.html", + get_unit_tests_scans_path("sonarqube") / "sonar-single-finding.html", ) parser = SonarQubeParser() findings = parser.get_findings(my_file_handle, test) @@ -93,7 +93,7 @@ def test_file_name_aggregated_parse_file_with_single_vulnerability_has_single_fi def test_detailed_parse_file_with_single_vulnerability_has_single_finding(self): my_file_handle, _product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/sonarqube/sonar-single-finding.html", + get_unit_tests_scans_path("sonarqube") / "sonar-single-finding.html", ) parser = SonarQubeParser() parser.set_mode("detailed") @@ -141,7 +141,7 @@ def test_detailed_parse_file_with_multiple_vulnerabilities_has_multiple_findings self, ): my_file_handle, _product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/sonarqube/sonar-6-findings.html", + get_unit_tests_scans_path("sonarqube") / "sonar-6-findings.html", ) parser = SonarQubeParser() parser.set_mode("detailed") @@ -154,7 +154,7 @@ def test_file_name_aggregated_parse_file_with_multiple_vulnerabilities_has_multi self, ): my_file_handle, _product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/sonarqube/sonar-6-findings.html", + get_unit_tests_scans_path("sonarqube") / "sonar-6-findings.html", ) parser = SonarQubeParser() parser.set_mode("detailed") @@ -167,7 +167,7 @@ def test_file_name_aggregated_parse_file_with_multiple_vulnerabilities_has_multi def test_detailed_parse_file_with_table_in_table(self): """Test parsing when the vulnerability details include a table, with tr and td that should be ignored when looking for list of rules""" my_file_handle, _product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/sonarqube/sonar-table-in-table.html", + get_unit_tests_scans_path("sonarqube") / "sonar-table-in-table.html", ) parser = SonarQubeParser() parser.set_mode("detailed") @@ -247,7 +247,7 @@ def test_detailed_parse_file_with_table_in_table(self): def test_detailed_parse_file_with_rule_undefined(self): """The vulnerability's rule is not in the list of rules""" my_file_handle, _product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/sonarqube/sonar-rule-undefined.html", + get_unit_tests_scans_path("sonarqube") / "sonar-rule-undefined.html", ) parser = SonarQubeParser() parser.set_mode("detailed") @@ -293,7 +293,7 @@ def test_detailed_parse_file_with_rule_undefined(self): # SonarQube Scan - report with aggregations to be made def test_file_name_aggregated_parse_file_with_vuln_on_same_filename(self): my_file_handle, _product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/sonarqube/sonar-4-findings-3-to-aggregate.html", + get_unit_tests_scans_path("sonarqube") / "sonar-4-findings-3-to-aggregate.html", ) parser = SonarQubeParser() findings = parser.get_findings(my_file_handle, test) @@ -356,7 +356,7 @@ def test_file_name_aggregated_parse_file_with_vuln_on_same_filename(self): # SonarQube Scan detailed - report with aggregations to be made def test_detailed_parse_file_with_vuln_on_same_filename(self): my_file_handle, _product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/sonarqube/sonar-4-findings-3-to-aggregate.html", + get_unit_tests_scans_path("sonarqube") / "sonar-4-findings-3-to-aggregate.html", ) parser = SonarQubeParser() parser.set_mode("detailed") @@ -370,7 +370,7 @@ def test_detailed_parse_file_with_vuln_issue_3725(self): SonarQube Scan detailed - report that crash see: https://github.com/DefectDojo/django-DefectDojo/issues/3725 """ - my_file_handle, _product, _engagement, test = self.init(get_unit_tests_path() + "/scans/sonarqube/sonar.html") + my_file_handle, _product, _engagement, test = self.init(get_unit_tests_scans_path("sonarqube") / "sonar.html") parser = SonarQubeParser() parser.set_mode("detailed") findings = parser.get_findings(my_file_handle, test) @@ -385,7 +385,7 @@ def test_detailed_parse_file_table_has_whitespace(self): Data table will have some whitespaces, parser should strip it before compare or use these properties. """ my_file_handle, _product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/sonarqube/sonar-table-in-table-with-whitespace.html", + get_unit_tests_scans_path("sonarqube") / "sonar-table-in-table-with-whitespace.html", ) parser = SonarQubeParser() parser.set_mode("detailed") @@ -464,7 +464,7 @@ def test_detailed_parse_file_table_has_whitespace(self): def test_detailed_parse_json_file_with_no_vulnerabilities_has_no_findings(self): my_file_handle, _product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/sonarqube/sonar-no-finding.json", + get_unit_tests_scans_path("sonarqube") / "sonar-no-finding.json", ) parser = SonarQubeParser() parser.set_mode("detailed") @@ -474,7 +474,7 @@ def test_detailed_parse_json_file_with_no_vulnerabilities_has_no_findings(self): def test_detailed_parse_json_file_with_single_vulnerability_has_single_finding(self): my_file_handle, _product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/sonarqube/sonar-single-finding.json", + get_unit_tests_scans_path("sonarqube") / "sonar-single-finding.json", ) parser = SonarQubeParser() parser.set_mode("detailed") @@ -553,7 +553,7 @@ def test_detailed_parse_json_file_with_single_vulnerability_has_single_finding(s def test_detailed_parse_json_file_with_multiple_vulnerabilities_has_multiple_findings(self): my_file_handle, _product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/sonarqube/sonar-6-findings.json", + get_unit_tests_scans_path("sonarqube") / "sonar-6-findings.json", ) parser = SonarQubeParser() parser.set_mode("detailed") @@ -565,7 +565,7 @@ def test_detailed_parse_json_file_with_multiple_vulnerabilities_has_multiple_fin def test_parse_json_file_from_api_with_multiple_findings_json(self): my_file_handle, _product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/sonarqube/findings_over_api.json", + get_unit_tests_scans_path("sonarqube") / "findings_over_api.json", ) parser = SonarQubeParser() findings = parser.get_findings(my_file_handle, test) @@ -597,7 +597,7 @@ def test_parse_json_file_from_api_with_multiple_findings_json(self): def test_parse_json_file_from_api_with_multiple_findings_hotspots_json(self): my_file_handle, _product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/sonarqube/findings_over_api_hotspots.json", + get_unit_tests_scans_path("sonarqube") / "findings_over_api_hotspots.json", ) parser = SonarQubeParser() findings = parser.get_findings(my_file_handle, test) @@ -616,7 +616,7 @@ def test_parse_json_file_from_api_with_multiple_findings_hotspots_json(self): def test_parse_json_file_from_api_with_empty_json(self): my_file_handle, _product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/sonarqube/findings_over_api_empty.json", + get_unit_tests_scans_path("sonarqube") / "findings_over_api_empty.json", ) parser = SonarQubeParser() findings = parser.get_findings(my_file_handle, test) @@ -625,7 +625,7 @@ def test_parse_json_file_from_api_with_empty_json(self): def test_parse_json_file_from_api_with_emppty_zip(self): my_file_handle, _product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/sonarqube/empty_zip.zip", + get_unit_tests_scans_path("sonarqube") / "empty_zip.zip", ) parser = SonarQubeParser() findings = parser.get_findings(my_file_handle, test) @@ -634,7 +634,7 @@ def test_parse_json_file_from_api_with_emppty_zip(self): def test_parse_json_file_from_api_with_multiple_findings_zip(self): my_file_handle, _product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/sonarqube/findings_over_api.zip", + get_unit_tests_scans_path("sonarqube") / "findings_over_api.zip", ) parser = SonarQubeParser() findings = parser.get_findings(my_file_handle, test) @@ -654,7 +654,7 @@ def test_parse_json_file_from_api_with_multiple_findings_zip(self): def test_parse_json_file_issue_10150(self): my_file_handle, _product, _engagement, test = self.init( - get_unit_tests_path() + "/scans/sonarqube/issue_10150.json", + get_unit_tests_scans_path("sonarqube") / "issue_10150.json", ) parser = SonarQubeParser() findings = parser.get_findings(my_file_handle, test) diff --git a/unittests/tools/test_sonatype_parser.py b/unittests/tools/test_sonatype_parser.py index 7e6fd88fb38..a6b2018fd87 100644 --- a/unittests/tools/test_sonatype_parser.py +++ b/unittests/tools/test_sonatype_parser.py @@ -1,11 +1,11 @@ from dojo.models import Test from dojo.tools.sonatype.parser import SonatypeParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestSonatypeParser(DojoTestCase): def test_parse_file_with_two_vulns(self): - testfile = open("unittests/scans/sonatype/two_vulns.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("sonatype") / "two_vulns.json", encoding="utf-8") parser = SonatypeParser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -14,28 +14,28 @@ def test_parse_file_with_two_vulns(self): self.assertEqual("CVE-2016-2402", findings[0].unsaved_vulnerability_ids[0]) def test_parse_file_with_many_vulns(self): - testfile = open("unittests/scans/sonatype/many_vulns.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("sonatype") / "many_vulns.json", encoding="utf-8") parser = SonatypeParser() findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual(6, len(findings)) def test_parse_file_with_long_file_path(self): - testfile = open("unittests/scans/sonatype/long_file_path.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("sonatype") / "long_file_path.json", encoding="utf-8") parser = SonatypeParser() findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual(3, len(findings)) def test_find_no_vuln(self): - testfile = open("unittests/scans/sonatype/no_vuln.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("sonatype") / "no_vuln.json", encoding="utf-8") parser = SonatypeParser() findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual(0, len(findings)) def test_component_parsed_correctly(self): - testfile = open("unittests/scans/sonatype/many_vulns.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("sonatype") / "many_vulns.json", encoding="utf-8") parser = SonatypeParser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -44,7 +44,7 @@ def test_component_parsed_correctly(self): self.assertEqual("2.6.0", findings[5].component_version) def test_severity_parsed_correctly(self): - testfile = open("unittests/scans/sonatype/many_vulns.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("sonatype") / "many_vulns.json", encoding="utf-8") parser = SonatypeParser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -56,21 +56,21 @@ def test_severity_parsed_correctly(self): self.assertEqual("Medium", findings[5].severity) def test_cwe_parsed_correctly(self): - testfile = open("unittests/scans/sonatype/many_vulns.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("sonatype") / "many_vulns.json", encoding="utf-8") parser = SonatypeParser() findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual("693", findings[5].cwe) def test_cvssv3_parsed_correctly(self): - testfile = open("unittests/scans/sonatype/many_vulns.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("sonatype") / "many_vulns.json", encoding="utf-8") parser = SonatypeParser() findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual("CVSS:3.1/AV:N/AC:H/PR:N/UI:R/S:U/C:N/I:H/A:N", findings[5].cvssv3) def test_filepath_parsed_correctly(self): - testfile = open("unittests/scans/sonatype/many_vulns.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("sonatype") / "many_vulns.json", encoding="utf-8") parser = SonatypeParser() findings = parser.get_findings(testfile, Test()) testfile.close() diff --git a/unittests/tools/test_spotbugs_parser.py b/unittests/tools/test_spotbugs_parser.py index 7a549f3639f..7f2c93df3ab 100644 --- a/unittests/tools/test_spotbugs_parser.py +++ b/unittests/tools/test_spotbugs_parser.py @@ -1,46 +1,46 @@ from dojo.models import Test from dojo.tools.spotbugs.parser import SpotbugsParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestSpotbugsParser(DojoTestCase): def test_no_findings(self): parser = SpotbugsParser() - findings = parser.get_findings(get_unit_tests_path() + "/scans/spotbugs/no_finding.xml", Test()) + findings = parser.get_findings(get_unit_tests_scans_path("spotbugs") / "no_finding.xml", Test()) self.assertEqual(0, len(findings)) def test_parse_many_finding(self): parser = SpotbugsParser() - findings = parser.get_findings(get_unit_tests_path() + "/scans/spotbugs/many_findings.xml", Test()) + findings = parser.get_findings(get_unit_tests_scans_path("spotbugs") / "many_findings.xml", Test()) self.assertEqual(81, len(findings)) def test_find_sast_source_line(self): parser = SpotbugsParser() - findings = parser.get_findings(get_unit_tests_path() + "/scans/spotbugs/many_findings.xml", Test()) + findings = parser.get_findings(get_unit_tests_scans_path("spotbugs") / "many_findings.xml", Test()) test_finding = findings[0] self.assertEqual(95, test_finding.sast_source_line) def test_find_sast_source_path(self): parser = SpotbugsParser() - findings = parser.get_findings(get_unit_tests_path() + "/scans/spotbugs/many_findings.xml", Test()) + findings = parser.get_findings(get_unit_tests_scans_path("spotbugs") / "many_findings.xml", Test()) test_finding = findings[0] self.assertEqual("securitytest/command/IdentityFunctionCommandInjection.kt", test_finding.sast_source_file_path) def test_find_source_line(self): parser = SpotbugsParser() - findings = parser.get_findings(get_unit_tests_path() + "/scans/spotbugs/many_findings.xml", Test()) + findings = parser.get_findings(get_unit_tests_scans_path("spotbugs") / "many_findings.xml", Test()) test_finding = findings[0] self.assertEqual(95, test_finding.line) def test_find_file_path(self): parser = SpotbugsParser() - findings = parser.get_findings(get_unit_tests_path() + "/scans/spotbugs/many_findings.xml", Test()) + findings = parser.get_findings(get_unit_tests_scans_path("spotbugs") / "many_findings.xml", Test()) test_finding = findings[0] self.assertEqual("securitytest/command/IdentityFunctionCommandInjection.kt", test_finding.file_path) def test_file(self): parser = SpotbugsParser() - testfile = open("unittests/scans/spotbugs/many_findings.xml", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("spotbugs") / "many_findings.xml", encoding="utf-8") findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual(81, len(findings)) @@ -71,7 +71,7 @@ def test_file(self): def test_description(self): parser = SpotbugsParser() - findings = parser.get_findings(get_unit_tests_path() + "/scans/spotbugs/many_findings.xml", Test()) + findings = parser.get_findings(get_unit_tests_scans_path("spotbugs") / "many_findings.xml", Test()) test_finding = findings[0] # Test if line 13 is correct self.assertEqual( @@ -80,14 +80,14 @@ def test_description(self): def test_mitigation(self): parser = SpotbugsParser() - findings = parser.get_findings(get_unit_tests_path() + "/scans/spotbugs/many_findings.xml", Test()) + findings = parser.get_findings(get_unit_tests_scans_path("spotbugs") / "many_findings.xml", Test()) test_finding = findings[0] # Test if line 8 is correct self.assertEqual("#### Example", test_finding.mitigation.splitlines()[7]) def test_references(self): parser = SpotbugsParser() - findings = parser.get_findings(get_unit_tests_path() + "/scans/spotbugs/many_findings.xml", Test()) + findings = parser.get_findings(get_unit_tests_scans_path("spotbugs") / "many_findings.xml", Test()) test_finding = findings[0] # Test if line 2 is correct self.assertEqual( @@ -100,7 +100,7 @@ def test_version_4_4(self): There was a big difference between version < 4.4.x and after The dictionnary is not in the report anymore """ - testfile = open("unittests/scans/spotbugs/version_4.4.0.xml", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("spotbugs") / "version_4.4.0.xml", encoding="utf-8") parser = SpotbugsParser() findings = parser.get_findings(testfile, Test()) testfile.close() diff --git a/unittests/tools/test_ssh_audit_parser.py b/unittests/tools/test_ssh_audit_parser.py index ba8fd4a16fd..e30d13a1a48 100644 --- a/unittests/tools/test_ssh_audit_parser.py +++ b/unittests/tools/test_ssh_audit_parser.py @@ -1,12 +1,12 @@ from dojo.models import Test from dojo.tools.ssh_audit.parser import SSHAuditParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestSSHAuditParser(DojoTestCase): def test_parse_file_with_many_vuln_has_many_findings(self): - with open("unittests/scans/ssh_audit/many_vulns.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("ssh_audit") / "many_vulns.json", encoding="utf-8") as testfile: parser = SSHAuditParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -20,7 +20,7 @@ def test_parse_file_with_many_vuln_has_many_findings(self): self.assertEqual("CVE-2021-41617", findings[0].unsaved_vulnerability_ids[0]) def test_parse_file_with_many_vuln_has_many_findings2(self): - with open("unittests/scans/ssh_audit/many_vulns2.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("ssh_audit") / "many_vulns2.json", encoding="utf-8") as testfile: parser = SSHAuditParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -33,7 +33,7 @@ def test_parse_file_with_many_vuln_has_many_findings2(self): self.assertEqual(findings[9].severity, "Medium") def test_parse_file_with_many_vuln_bug_fix(self): - with open("unittests/scans/ssh_audit/bug_fix.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("ssh_audit") / "bug_fix.json", encoding="utf-8") as testfile: parser = SSHAuditParser() findings = parser.get_findings(testfile, Test()) for finding in findings: diff --git a/unittests/tools/test_ssl_labs_parser.py b/unittests/tools/test_ssl_labs_parser.py index 575b63dbb4e..10e6de7d23a 100644 --- a/unittests/tools/test_ssl_labs_parser.py +++ b/unittests/tools/test_ssl_labs_parser.py @@ -1,18 +1,18 @@ from dojo.models import Test from dojo.tools.ssl_labs.parser import SslLabsParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestSslLabsParser(DojoTestCase): def test_parse_none(self): parser = SslLabsParser() - with open("unittests/scans/ssl_labs/none.json", encoding="utf-8") as test_file: + with open(get_unit_tests_scans_path("ssl_labs") / "none.json", encoding="utf-8") as test_file: findings = parser.get_findings(test_file, Test()) self.assertEqual(0, len(findings)) def test_parse_ok(self): parser = SslLabsParser() - with open("unittests/scans/ssl_labs/ssl_labs_ok_v1.5.0.json", encoding="utf-8") as test_file: + with open(get_unit_tests_scans_path("ssl_labs") / "ssl_labs_ok_v1.5.0.json", encoding="utf-8") as test_file: findings = parser.get_findings(test_file, Test()) for finding in findings: for endpoint in finding.unsaved_endpoints: @@ -26,7 +26,7 @@ def test_parse_ok(self): def test_parse_dh1024(self): parser = SslLabsParser() - with open("unittests/scans/ssl_labs/ssl_labs_dh1024_v1.5.0.json", encoding="utf-8") as test_file: + with open(get_unit_tests_scans_path("ssl_labs") / "ssl_labs_dh1024_v1.5.0.json", encoding="utf-8") as test_file: findings = parser.get_findings(test_file, Test()) for finding in findings: for endpoint in finding.unsaved_endpoints: @@ -41,7 +41,7 @@ def test_parse_dh1024(self): def test_parse_3des(self): parser = SslLabsParser() - with open("unittests/scans/ssl_labs/ssl_labs_3des_v1.5.0.json", encoding="utf-8") as test_file: + with open(get_unit_tests_scans_path("ssl_labs") / "ssl_labs_3des_v1.5.0.json", encoding="utf-8") as test_file: findings = parser.get_findings(test_file, Test()) for finding in findings: for endpoint in finding.unsaved_endpoints: @@ -56,7 +56,7 @@ def test_parse_3des(self): def test_parse_revoked(self): parser = SslLabsParser() - with open("unittests/scans/ssl_labs/ssl_labs_revoked_v1.5.0.json", encoding="utf-8") as test_file: + with open(get_unit_tests_scans_path("ssl_labs") / "ssl_labs_revoked_v1.5.0.json", encoding="utf-8") as test_file: findings = parser.get_findings(test_file, Test()) for finding in findings: for endpoint in finding.unsaved_endpoints: @@ -71,7 +71,7 @@ def test_parse_revoked(self): def test_parse_multiple(self): parser = SslLabsParser() - with open("unittests/scans/ssl_labs/ssl_labs_multiple_v1.5.0.json", encoding="utf-8") as test_file: + with open(get_unit_tests_scans_path("ssl_labs") / "ssl_labs_multiple_v1.5.0.json", encoding="utf-8") as test_file: findings = parser.get_findings(test_file, Test()) for finding in findings: for endpoint in finding.unsaved_endpoints: diff --git a/unittests/tools/test_sslscan_parser.py b/unittests/tools/test_sslscan_parser.py index c7bfe5abbaa..32eab6cdf7e 100644 --- a/unittests/tools/test_sslscan_parser.py +++ b/unittests/tools/test_sslscan_parser.py @@ -1,18 +1,18 @@ from dojo.models import Test from dojo.tools.sslscan.parser import SslscanParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestSslscanParser(DojoTestCase): def test_parse_file_with_no_vuln_has_no_findings(self): - with open("unittests/scans/sslscan/sslscan_no_vuln.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sslscan") / "sslscan_no_vuln.xml", encoding="utf-8") as testfile: parser = SslscanParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_file_with_one_vuln_has_one_findings(self): - with open("unittests/scans/sslscan/sslscan_one_vuln.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sslscan") / "sslscan_one_vuln.xml", encoding="utf-8") as testfile: parser = SslscanParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -21,7 +21,7 @@ def test_parse_file_with_one_vuln_has_one_findings(self): endpoint.clean() def test_parse_file_with_multiple_vuln_has_multiple_finding(self): - with open("unittests/scans/sslscan/sslscan_many_vuln.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sslscan") / "sslscan_many_vuln.xml", encoding="utf-8") as testfile: parser = SslscanParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(2, len(findings)) diff --git a/unittests/tools/test_sslyze_parser.py b/unittests/tools/test_sslyze_parser.py index 05349de67a8..44af200e0c3 100644 --- a/unittests/tools/test_sslyze_parser.py +++ b/unittests/tools/test_sslyze_parser.py @@ -1,26 +1,23 @@ -from os import path -from pathlib import Path - from dojo.models import Test from dojo.tools.sslyze.parser import SslyzeParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestSslyzeJSONParser(DojoTestCase): def test_parse_json_file_with_one_target_has_zero_vuln_old(self): - with open(path.join(Path(__file__).parent, "../scans/sslyze/one_target_zero_vuln_old.json"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sslyze") / "one_target_zero_vuln_old.json", encoding="utf-8") as testfile: parser = SslyzeParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_json_file_issue_9848(self): - with open(path.join(Path(__file__).parent, "../scans/sslyze/issue_9848.json"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sslyze") / "issue_9848.json", encoding="utf-8") as testfile: parser = SslyzeParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(3, len(findings)) def test_parse_json_file_with_one_target_has_one_vuln_old(self): - with open(path.join(Path(__file__).parent, "../scans/sslyze/one_target_one_vuln_old.json"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sslyze") / "one_target_one_vuln_old.json", encoding="utf-8") as testfile: parser = SslyzeParser() findings = parser.get_findings(testfile, Test()) @@ -42,7 +39,7 @@ def test_parse_json_file_with_one_target_has_one_vuln_old(self): self.assertEqual(443, endpoint.port) def test_parse_json_file_with_one_target_has_four_vuln_old(self): - with open(path.join(Path(__file__).parent, "../scans/sslyze/one_target_many_vuln_old.json"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sslyze") / "one_target_many_vuln_old.json", encoding="utf-8") as testfile: parser = SslyzeParser() findings = parser.get_findings(testfile, Test()) @@ -55,20 +52,20 @@ def test_parse_json_file_with_one_target_has_four_vuln_old(self): self.assertEqual("CVE-2014-0224", findings[1].unsaved_vulnerability_ids[0]) def test_parse_json_file_with_two_target_has_many_vuln_old(self): - with open(path.join(Path(__file__).parent, "../scans/sslyze/two_targets_two_vuln_old.json"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sslyze") / "two_targets_two_vuln_old.json", encoding="utf-8") as testfile: parser = SslyzeParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(2, len(findings)) def test_parse_json_file_with_one_target_has_zero_vuln_new(self): - with open(path.join(Path(__file__).parent, "../scans/sslyze/one_target_zero_vuln_new.json"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sslyze") / "one_target_zero_vuln_new.json", encoding="utf-8") as testfile: parser = SslyzeParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_json_file_with_one_target_has_one_vuln_new(self): - with open(path.join(Path(__file__).parent, "../scans/sslyze/one_target_one_vuln_new.json"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sslyze") / "one_target_one_vuln_new.json", encoding="utf-8") as testfile: parser = SslyzeParser() findings = parser.get_findings(testfile, Test()) @@ -105,13 +102,13 @@ def test_parse_json_file_with_one_target_has_one_vuln_new(self): self.assertEqual(443, endpoint.port) def test_parse_json_file_with_one_target_has_three_vuln_new(self): - with open(path.join(Path(__file__).parent, "../scans/sslyze/one_target_many_vuln_new.json"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sslyze") / "one_target_many_vuln_new.json", encoding="utf-8") as testfile: parser = SslyzeParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(3, len(findings)) def test_parse_json_file_with_two_target_has_many_vuln_new(self): - with open(path.join(Path(__file__).parent, "../scans/sslyze/two_targets_many_vuln_new.json"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sslyze") / "two_targets_many_vuln_new.json", encoding="utf-8") as testfile: parser = SslyzeParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(5, len(findings)) @@ -160,7 +157,7 @@ def test_parse_json_file_with_two_target_has_many_vuln_new(self): class TestSSLyzeXMLParser(DojoTestCase): def test_parse_file_with_one_target_has_three_vuln(self): - with open(path.join(Path(__file__).parent, "../scans/sslyze/report_one_target_three_vuln.xml"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sslyze") / "report_one_target_three_vuln.xml", encoding="utf-8") as testfile: parser = SslyzeParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -169,7 +166,7 @@ def test_parse_file_with_one_target_has_three_vuln(self): self.assertEqual(3, len(findings)) def test_parse_xml_file_with_one_target_has_one_vuln(self): - with open(path.join(Path(__file__).parent, "../scans/sslyze/report_one_target_one_vuln.xml"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sslyze") / "report_one_target_one_vuln.xml", encoding="utf-8") as testfile: parser = SslyzeParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -178,7 +175,7 @@ def test_parse_xml_file_with_one_target_has_one_vuln(self): self.assertEqual(1, len(findings)) def test_parse_xml_file_with_one_target_has_three_vuln(self): - with open(path.join(Path(__file__).parent, "../scans/sslyze/report_one_target_three_vuln.xml"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sslyze") / "report_one_target_three_vuln.xml", encoding="utf-8") as testfile: parser = SslyzeParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -187,7 +184,7 @@ def test_parse_xml_file_with_one_target_has_three_vuln(self): self.assertEqual(3, len(findings)) def test_parse_xml_file_with_two_target_has_many_vuln(self): - with open(path.join(Path(__file__).parent, "../scans/sslyze/report_two_target_many_vuln.xml"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sslyze") / "report_two_target_many_vuln.xml", encoding="utf-8") as testfile: parser = SslyzeParser() findings = parser.get_findings(testfile, Test()) for finding in findings: diff --git a/unittests/tools/test_stackhawk_parser.py b/unittests/tools/test_stackhawk_parser.py index 7f63ea1d458..10dcd41e862 100644 --- a/unittests/tools/test_stackhawk_parser.py +++ b/unittests/tools/test_stackhawk_parser.py @@ -2,32 +2,32 @@ from dojo.models import Finding, Test from dojo.tools.stackhawk.parser import StackHawkParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestStackHawkParser(DojoTestCase): __test_datetime = datetime.datetime(2022, 2, 16, 23, 7, 19, 575000, datetime.UTC) def test_invalid_json_format(self): - with open("unittests/scans/stackhawk/invalid.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("stackhawk") / "invalid.json", encoding="utf-8") as testfile: parser = StackHawkParser() with self.assertRaises(ValueError): parser.get_findings(testfile, Test()) def test_parser_ensures_data_is_for_stackhawk_before_parsing(self): - with open("unittests/scans/stackhawk/oddly_familiar_json_that_isnt_us.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("stackhawk") / "oddly_familiar_json_that_isnt_us.json", encoding="utf-8") as testfile: parser = StackHawkParser() with self.assertRaises(ValueError): parser.get_findings(testfile, Test()) def test_stackhawk_parser_with_no_vuln_has_no_findings(self): - with open("unittests/scans/stackhawk/stackhawk_zero_vul.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("stackhawk") / "stackhawk_zero_vul.json", encoding="utf-8") as testfile: parser = StackHawkParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_stackhawk_parser_with_one_high_vuln_has_one_findings(self): - with open("unittests/scans/stackhawk/stackhawk_one_vul.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("stackhawk") / "stackhawk_one_vul.json", encoding="utf-8") as testfile: parser = StackHawkParser() findings = parser.get_findings(testfile, Test()) self.__assertAllEndpointsAreClean(findings) @@ -50,7 +50,7 @@ def test_stackhawk_parser_with_one_high_vuln_has_one_findings(self): ) def test_stackhawk_parser_with_many_vuln_has_many_findings_and_removes_duplicates(self): - with open("unittests/scans/stackhawk/stackhawk_many_vul.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("stackhawk") / "stackhawk_many_vul.json", encoding="utf-8") as testfile: parser = StackHawkParser() findings = parser.get_findings(testfile, Test()) self.__assertAllEndpointsAreClean(findings) @@ -141,7 +141,7 @@ def test_stackhawk_parser_with_many_vuln_has_many_findings_and_removes_duplicate ) def test_that_a_scan_import_updates_the_test_description(self): - with open("unittests/scans/stackhawk/stackhawk_zero_vul.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("stackhawk") / "stackhawk_zero_vul.json", encoding="utf-8") as testfile: parser = StackHawkParser() test = Test() parser.get_findings(testfile, test) @@ -153,7 +153,7 @@ def test_that_a_scan_import_updates_the_test_description(self): ) def test_that_a_scan_with_all_false_positive_endpoints_on_a_finding_marks_as_false_positive(self): - with open("unittests/scans/stackhawk/stackhawk_one_vuln_all_endpoints_false_positive.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("stackhawk") / "stackhawk_one_vuln_all_endpoints_false_positive.json", encoding="utf-8") as testfile: parser = StackHawkParser() findings = parser.get_findings(testfile, Test()) self.__assertAllEndpointsAreClean(findings) @@ -173,7 +173,7 @@ def test_that_a_scan_with_all_false_positive_endpoints_on_a_finding_marks_as_fal ) def test_that_a_scan_with_all_risk_accepted_endpoints_on_a_finding_marks_as_risk_accepted(self): - with open("unittests/scans/stackhawk/stackhawk_one_vuln_all_endpoints_risk_accepted.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("stackhawk") / "stackhawk_one_vuln_all_endpoints_risk_accepted.json", encoding="utf-8") as testfile: parser = StackHawkParser() findings = parser.get_findings(testfile, Test()) self.__assertAllEndpointsAreClean(findings) @@ -193,7 +193,7 @@ def test_that_a_scan_with_all_risk_accepted_endpoints_on_a_finding_marks_as_risk ) def test_that_a_scan_with_endpoints_in_differing_statuses_does_not_mark_as_risk_accepted_or_false_positive(self): - with open("unittests/scans/stackhawk/stackhawk_one_vuln_all_endpoints_have_different_status.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("stackhawk") / "stackhawk_one_vuln_all_endpoints_have_different_status.json", encoding="utf-8") as testfile: parser = StackHawkParser() findings = parser.get_findings(testfile, Test()) self.__assertAllEndpointsAreClean(findings) diff --git a/unittests/tools/test_sysdig_reports_parser.py b/unittests/tools/test_sysdig_reports_parser.py index d67ea363c0a..5afc7eb2435 100644 --- a/unittests/tools/test_sysdig_reports_parser.py +++ b/unittests/tools/test_sysdig_reports_parser.py @@ -1,19 +1,18 @@ -from django.test import TestCase - from dojo.models import Test from dojo.tools.sysdig_reports.parser import SysdigReportsParser +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path -class TestSysdigParser(TestCase): +class TestSysdigParser(DojoTestCase): def test_sysdig_parser_with_no_vuln_has_no_findings(self): - with open("unittests/scans/sysdig_reports/sysdig_reports_zero_vul.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sysdig_reports") / "sysdig_reports_zero_vul.csv", encoding="utf-8") as testfile: parser = SysdigReportsParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_sysdig_parser_with_one_criticle_vuln_has_one_findings(self): - with open("unittests/scans/sysdig_reports/sysdig_reports_one_vul.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sysdig_reports") / "sysdig_reports_one_vul.csv", encoding="utf-8") as testfile: parser = SysdigReportsParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -25,7 +24,7 @@ def test_sysdig_parser_with_one_criticle_vuln_has_one_findings(self): self.assertEqual("CVE-2018-19360", findings[0].unsaved_vulnerability_ids[0]) def test_sysdig_parser_with_many_vuln_has_many_findings(self): - with open("unittests/scans/sysdig_reports/sysdig_reports_many_vul.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sysdig_reports") / "sysdig_reports_many_vul.csv", encoding="utf-8") as testfile: parser = SysdigReportsParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -35,7 +34,7 @@ def test_sysdig_parser_with_many_vuln_has_many_findings(self): def test_sysdig_parser_missing_cve_field_id_from_csv_file(self): with self.assertRaises(ValueError) as context: - with open("unittests/scans/sysdig_reports/sysdig_reports_missing_cve_field.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sysdig_reports") / "sysdig_reports_missing_cve_field.csv", encoding="utf-8") as testfile: parser = SysdigReportsParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -47,7 +46,7 @@ def test_sysdig_parser_missing_cve_field_id_from_csv_file(self): def test_sysdig_parser_missing_cve_field_not_starting_with_cve(self): with self.assertRaises(ValueError) as context: - with open("unittests/scans/sysdig_reports/sysdig_reports_not_starting_with_cve.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sysdig_reports") / "sysdig_reports_not_starting_with_cve.csv", encoding="utf-8") as testfile: parser = SysdigReportsParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -58,7 +57,7 @@ def test_sysdig_parser_missing_cve_field_not_starting_with_cve(self): ) def test_sysdig_parser_json_with_many_findings(self): - with open("unittests/scans/sysdig_reports/sysdig.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("sysdig_reports") / "sysdig.json", encoding="utf-8") as testfile: parser = SysdigReportsParser() findings = parser.get_findings(testfile, Test()) for finding in findings: diff --git a/unittests/tools/test_talisman_parser.py b/unittests/tools/test_talisman_parser.py index 5f41d1da249..65be4218d0a 100644 --- a/unittests/tools/test_talisman_parser.py +++ b/unittests/tools/test_talisman_parser.py @@ -1,17 +1,17 @@ from dojo.models import Test from dojo.tools.talisman.parser import TalismanParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestTalismanParser(DojoTestCase): def test_parse_empty(self): - with open("unittests/scans/talisman/no_finding.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("talisman") / "no_finding.json", encoding="utf-8") as testfile: parser = TalismanParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_one_finding(self): - with open("unittests/scans/talisman/one_finding.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("talisman") / "one_finding.json", encoding="utf-8") as testfile: parser = TalismanParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -21,7 +21,7 @@ def test_parse_one_finding(self): self.assertIsNotNone(finding.description) def test_parse_many_finding(self): - with open("unittests/scans/talisman/many_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("talisman") / "many_findings.json", encoding="utf-8") as testfile: parser = TalismanParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(3, len(findings)) diff --git a/unittests/tools/test_tenable_parser.py b/unittests/tools/test_tenable_parser.py index 449510393cc..2e6522bec8c 100644 --- a/unittests/tools/test_tenable_parser.py +++ b/unittests/tools/test_tenable_parser.py @@ -1,9 +1,6 @@ -from os import path -from pathlib import Path - from dojo.models import Engagement, Finding, Product, Test from dojo.tools.tenable.parser import TenableParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestTenableParser(DojoTestCase): @@ -14,7 +11,7 @@ def create_test(self): return test def test_parse_some_findings_nessus_legacy(self): - with open(path.join(Path(__file__).parent, "../scans/tenable/nessus/nessus_many_vuln.xml"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("tenable/nessus") / "nessus_many_vuln.xml", encoding="utf-8") as testfile: parser = TenableParser() findings = parser.get_findings(testfile, self.create_test()) for finding in findings: @@ -31,7 +28,7 @@ def test_parse_some_findings_nessus_legacy(self): def test_parse_some_findings_csv_nessus_legacy(self): """Test one report provided by a user""" - with open(path.join(Path(__file__).parent, "../scans/tenable/nessus/nessus_many_vuln.csv"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("tenable/nessus") / "nessus_many_vuln.csv", encoding="utf-8") as testfile: parser = TenableParser() findings = parser.get_findings(testfile, self.create_test()) for finding in findings: @@ -61,7 +58,7 @@ def test_parse_some_findings_csv_nessus_legacy(self): def test_parse_some_findings_csv2_nessus_legacy(self): """Test that use default columns of Nessus Pro 8.13.1 (#257)""" - with open(path.join(Path(__file__).parent, "../scans/tenable/nessus/nessus_many_vuln2-default.csv"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("tenable/nessus") / "nessus_many_vuln2-default.csv", encoding="utf-8") as testfile: parser = TenableParser() findings = parser.get_findings(testfile, self.create_test()) for finding in findings: @@ -83,7 +80,7 @@ def test_parse_some_findings_csv2_nessus_legacy(self): def test_parse_some_findings_csv2_all_nessus_legacy(self): """Test that use a report with all columns of Nessus Pro 8.13.1 (#257)""" - with open(path.join(Path(__file__).parent, "../scans/tenable/nessus/nessus_many_vuln2-all.csv"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("tenable/nessus") / "nessus_many_vuln2-all.csv", encoding="utf-8") as testfile: parser = TenableParser() findings = parser.get_findings(testfile, self.create_test()) for finding in findings: @@ -105,19 +102,19 @@ def test_parse_some_findings_csv2_all_nessus_legacy(self): def test_parse_some_findings_csv_bytes_nessus_legacy(self): """This tests is designed to test the parser with different read modes""" - with open(path.join(Path(__file__).parent, "../scans/tenable/nessus/nessus_many_vuln2-all.csv"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("tenable/nessus") / "nessus_many_vuln2-all.csv", encoding="utf-8") as testfile: parser = TenableParser() findings = parser.get_findings(testfile, self.create_test()) for finding in findings: for endpoint in finding.unsaved_endpoints: endpoint.clean() - with open(path.join(Path(__file__).parent, "../scans/tenable/nessus/nessus_many_vuln2-all.csv"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("tenable/nessus") / "nessus_many_vuln2-all.csv", encoding="utf-8") as testfile: parser = TenableParser() findings = parser.get_findings(testfile, self.create_test()) for finding in findings: for endpoint in finding.unsaved_endpoints: endpoint.clean() - with open(path.join(Path(__file__).parent, "../scans/tenable/nessus/nessus_many_vuln2-all.csv"), "rb") as testfile: + with open(get_unit_tests_scans_path("tenable/nessus") / "nessus_many_vuln2-all.csv", "rb") as testfile: parser = TenableParser() findings = parser.get_findings(testfile, self.create_test()) for finding in findings: @@ -126,7 +123,7 @@ def test_parse_some_findings_csv_bytes_nessus_legacy(self): def test_parse_some_findings_samples_nessus_legacy(self): """Test that come from samples repo""" - with open(path.join(Path(__file__).parent, "../scans/tenable/nessus/nessus_v_unknown.xml"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("tenable/nessus") / "nessus_v_unknown.xml", encoding="utf-8") as testfile: parser = TenableParser() findings = parser.get_findings(testfile, self.create_test()) for finding in findings: @@ -157,7 +154,7 @@ def test_parse_some_findings_samples_nessus_legacy(self): def test_parse_some_findings_with_cvssv3_nessus_legacy(self): """Test with cvssv3""" - with open(path.join(Path(__file__).parent, "../scans/tenable/nessus/nessus_with_cvssv3.nessus"), encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("tenable/nessus") / "nessus_with_cvssv3.nessus", encoding="utf-8") as testfile: parser = TenableParser() findings = parser.get_findings(testfile, self.create_test()) for finding in findings: @@ -172,7 +169,7 @@ def test_parse_some_findings_with_cvssv3_nessus_legacy(self): self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:L/I:N/A:N", finding.cvssv3) def test_parse_many_findings_xml_nessus_was_legacy(self): - with open("unittests/scans/tenable/nessus_was/nessus_was_many_vuln.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("tenable/nessus_was") / "nessus_was_many_vuln.xml", encoding="utf-8") as testfile: parser = TenableParser() findings = parser.get_findings(testfile, self.create_test()) for finding in findings: @@ -188,7 +185,7 @@ def test_parse_many_findings_xml_nessus_was_legacy(self): self.assertEqual("Cross-Site Scripting (XSS)", finding.title) def test_parse_one_findings_xml_nessus_was_legacy(self): - with open("unittests/scans/tenable/nessus_was/nessus_was_one_vuln.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("tenable/nessus_was") / "nessus_was_one_vuln.xml", encoding="utf-8") as testfile: parser = TenableParser() findings = parser.get_findings(testfile, self.create_test()) for finding in findings: @@ -202,7 +199,7 @@ def test_parse_one_findings_xml_nessus_was_legacy(self): self.assertEqual("Cross-Site Scripting (XSS)", finding.title) def test_parse_no_findings_xml_nessus_was_legacy(self): - with open("unittests/scans/tenable/nessus_was/nessus_was_no_vuln.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("tenable/nessus_was") / "nessus_was_no_vuln.xml", encoding="utf-8") as testfile: parser = TenableParser() findings = parser.get_findings(testfile, self.create_test()) for finding in findings: @@ -211,7 +208,7 @@ def test_parse_no_findings_xml_nessus_was_legacy(self): self.assertEqual(0, len(findings)) def test_parse_many_findings_csv_nessus_was_legacy(self): - with open("unittests/scans/tenable/nessus_was/nessus_was_many_vuln.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("tenable/nessus_was") / "nessus_was_many_vuln.csv", encoding="utf-8") as testfile: parser = TenableParser() findings = parser.get_findings(testfile, self.create_test()) for finding in findings: @@ -229,7 +226,7 @@ def test_parse_many_findings_csv_nessus_was_legacy(self): self.assertEqual("http", finding.unsaved_endpoints[0].protocol) def test_parse_one_findings_csv_nessus_was_legacy(self): - with open("unittests/scans/tenable/nessus_was/nessus_was_one_vuln.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("tenable/nessus_was") / "nessus_was_one_vuln.csv", encoding="utf-8") as testfile: parser = TenableParser() findings = parser.get_findings(testfile, self.create_test()) for finding in findings: @@ -245,13 +242,13 @@ def test_parse_one_findings_csv_nessus_was_legacy(self): self.assertEqual("http", finding.unsaved_endpoints[0].protocol) def test_parse_no_findings_csv_nessus_was_legacy(self): - with open("unittests/scans/tenable/nessus_was/nessus_was_no_vuln.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("tenable/nessus_was") / "nessus_was_no_vuln.csv", encoding="utf-8") as testfile: parser = TenableParser() findings = parser.get_findings(testfile, self.create_test()) self.assertEqual(0, len(findings)) def test_parse_many_tenable_vulns(self): - with open("unittests/scans/tenable/tenable_many_vuln.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("tenable") / "tenable_many_vuln.csv", encoding="utf-8") as testfile: parser = TenableParser() findings = parser.get_findings(testfile, self.create_test()) for finding in findings: @@ -271,7 +268,7 @@ def test_parse_many_tenable_vulns(self): self.assertEqual("CVE-2023-32233", vulnerability_id) def test_parse_issue_6992(self): - with open("unittests/scans/tenable/nessus/issue_6992.nessus", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("tenable/nessus") / "issue_6992.nessus", encoding="utf-8") as testfile: parser = TenableParser() findings = parser.get_findings(testfile, self.create_test()) for finding in findings: @@ -281,7 +278,7 @@ def test_parse_issue_6992(self): self.assertEqual("High", findings[0].severity) def test_parse_nessus_new(self): - with open("unittests/scans/tenable/nessus/nessus_new.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("tenable/nessus") / "nessus_new.csv", encoding="utf-8") as testfile: parser = TenableParser() findings = parser.get_findings(testfile, self.create_test()) self.assertEqual(99, len(findings)) @@ -292,7 +289,7 @@ def test_parse_nessus_new(self): self.assertEqual("3.1", finding.cvssv3_score) def test_parse_issue_9612(self): - with open("unittests/scans/tenable/issue_9612.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("tenable") / "issue_9612.csv", encoding="utf-8") as testfile: parser = TenableParser() findings = parser.get_findings(testfile, self.create_test()) for finding in findings: @@ -302,7 +299,7 @@ def test_parse_issue_9612(self): self.assertEqual("Critical", findings[0].severity) def test_parse_issue_11102(self): - with open("unittests/scans/tenable/issue_11102.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("tenable") / "issue_11102.csv", encoding="utf-8") as testfile: parser = TenableParser() findings = parser.get_findings(testfile, self.create_test()) for finding in findings: @@ -312,7 +309,7 @@ def test_parse_issue_11102(self): self.assertEqual("Reconfigure the affected application if possible to avoid use of medium strength ciphers.", findings[0].mitigation) def test_parse_issue_11127(self): - with open("unittests/scans/tenable/issue_11102.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("tenable") / "issue_11102.csv", encoding="utf-8") as testfile: parser = TenableParser() findings = parser.get_findings(testfile, self.create_test()) for finding in findings: diff --git a/unittests/tools/test_terrascan_parser.py b/unittests/tools/test_terrascan_parser.py index 9046908ea2d..531fd269c63 100644 --- a/unittests/tools/test_terrascan_parser.py +++ b/unittests/tools/test_terrascan_parser.py @@ -1,18 +1,18 @@ from dojo.models import Test from dojo.tools.terrascan.parser import TerrascanParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestTerrascanParser(DojoTestCase): def test_parse_no_findings(self): - with open("unittests/scans/terrascan/no_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("terrascan") / "no_findings.json", encoding="utf-8") as testfile: parser = TerrascanParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_many_findings(self): - with open("unittests/scans/terrascan/many_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("terrascan") / "many_findings.json", encoding="utf-8") as testfile: parser = TerrascanParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(9, len(findings)) diff --git a/unittests/tools/test_testssl_parser.py b/unittests/tools/test_testssl_parser.py index 978a48b7945..22dffec6343 100644 --- a/unittests/tools/test_testssl_parser.py +++ b/unittests/tools/test_testssl_parser.py @@ -1,18 +1,18 @@ from dojo.models import Test from dojo.tools.testssl.parser import TestsslParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestTestsslParser(DojoTestCase): def test_parse_file_with_no_vuln_has_no_finding(self): - with open("unittests/scans/testssl/defectdojo_no_vuln.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("testssl") / "defectdojo_no_vuln.csv", encoding="utf-8") as testfile: parser = TestsslParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_file_with_one_vuln_has_one_finding(self): - with open("unittests/scans/testssl/defectdojo_one_vuln.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("testssl") / "defectdojo_one_vuln.csv", encoding="utf-8") as testfile: parser = TestsslParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -21,7 +21,7 @@ def test_parse_file_with_one_vuln_has_one_finding(self): self.assertEqual(1, len(findings)) def test_parse_file_with_many_vuln_has_many_findings(self): - with open("unittests/scans/testssl/defectdojo_many_vuln.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("testssl") / "defectdojo_many_vuln.csv", encoding="utf-8") as testfile: parser = TestsslParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -45,7 +45,7 @@ def test_parse_file_with_many_vuln_has_many_findings(self): self.assertEqual(310, finding.cwe) def test_parse_file_with_many_cves(self): - with open("unittests/scans/testssl/many_cves.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("testssl") / "many_cves.csv", encoding="utf-8") as testfile: parser = TestsslParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -66,7 +66,7 @@ def test_parse_file_with_many_cves(self): self.assertEqual(310, finding.cwe) def test_parse_file_with_31_version(self): - with open("unittests/scans/testssl/demo.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("testssl") / "demo.csv", encoding="utf-8") as testfile: parser = TestsslParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -75,7 +75,7 @@ def test_parse_file_with_31_version(self): self.assertEqual(12, len(findings)) def test_parse_file_with_31_version2(self): - with open("unittests/scans/testssl/demo2.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("testssl") / "demo2.csv", encoding="utf-8") as testfile: parser = TestsslParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -84,7 +84,7 @@ def test_parse_file_with_31_version2(self): self.assertEqual(3, len(findings)) def test_parse_file_with_one_vuln_has_overall_medium(self): - with open("unittests/scans/testssl/overall_medium.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("testssl") / "overall_medium.csv", encoding="utf-8") as testfile: parser = TestsslParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -93,7 +93,7 @@ def test_parse_file_with_one_vuln_has_overall_medium(self): self.assertEqual(2, len(findings)) def test_parse_file_with_one_vuln_has_overall_critical(self): - with open("unittests/scans/testssl/overall_critical.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("testssl") / "overall_critical.csv", encoding="utf-8") as testfile: parser = TestsslParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -102,7 +102,7 @@ def test_parse_file_with_one_vuln_has_overall_critical(self): self.assertEqual(145, len(findings)) def test_parse_file_with_one_vuln_has_failed_target(self): - with open("unittests/scans/testssl/failed_target.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("testssl") / "failed_target.csv", encoding="utf-8") as testfile: parser = TestsslParser() findings = parser.get_findings(testfile, Test()) for finding in findings: diff --git a/unittests/tools/test_tfsec_parser.py b/unittests/tools/test_tfsec_parser.py index c7ce3927771..811942fff9b 100644 --- a/unittests/tools/test_tfsec_parser.py +++ b/unittests/tools/test_tfsec_parser.py @@ -1,18 +1,18 @@ from dojo.models import Test from dojo.tools.tfsec.parser import TFSecParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestTFSecParser(DojoTestCase): def test_parse_no_findings(self): - with open("unittests/scans/tfsec/no_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("tfsec") / "no_findings.json", encoding="utf-8") as testfile: parser = TFSecParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_one_finding_legacy(self): - with open("unittests/scans/tfsec/one_finding_legacy.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("tfsec") / "one_finding_legacy.json", encoding="utf-8") as testfile: parser = TFSecParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -31,7 +31,7 @@ def test_parse_one_finding_legacy(self): self.assertEqual(1, finding.nb_occurences) def test_parse_many_findings_legacy(self): - with open("unittests/scans/tfsec/many_findings_legacy.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("tfsec") / "many_findings_legacy.json", encoding="utf-8") as testfile: parser = TFSecParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(3, len(findings)) @@ -76,7 +76,7 @@ def test_parse_many_findings_legacy(self): self.assertEqual(1, finding.nb_occurences) def test_parse_many_findings_current(self): - with open("unittests/scans/tfsec/many_findings_current.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("tfsec") / "many_findings_current.json", encoding="utf-8") as testfile: parser = TFSecParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(13, len(findings)) diff --git a/unittests/tools/test_threagile_parser.py b/unittests/tools/test_threagile_parser.py index 8bfe657fce0..71e97206446 100644 --- a/unittests/tools/test_threagile_parser.py +++ b/unittests/tools/test_threagile_parser.py @@ -1,11 +1,11 @@ from dojo.models import Test from dojo.tools.threagile.parser import ThreagileParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestThreAgileParser(DojoTestCase): def test_non_threagile_file_raises_error(self): - with open("unittests/scans/threagile/bad_formatted_risks_file.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("threagile") / "bad_formatted_risks_file.json", encoding="utf-8") as testfile: parser = ThreagileParser() with self.assertRaises(TypeError) as exc_context: parser.get_findings(testfile, Test()) @@ -13,13 +13,13 @@ def test_non_threagile_file_raises_error(self): self.assertEqual("Invalid ThreAgile risks file", str(exc)) def test_empty_file_returns_no_findings(self): - with open("unittests/scans/threagile/empty_file_no_risks.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("threagile") / "empty_file_no_risks.json", encoding="utf-8") as testfile: parser = ThreagileParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_file_with_vulnerabilities_returns_correct_findings(self): - with open("unittests/scans/threagile/risks.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("threagile") / "risks.json", encoding="utf-8") as testfile: parser = ThreagileParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(6, len(findings)) @@ -33,28 +33,28 @@ def test_file_with_vulnerabilities_returns_correct_findings(self): self.assertEqual("policies-rego-storage-ta", finding.component_name) def test_in_discussion_is_under_review(self): - with open("unittests/scans/threagile/risks.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("threagile") / "risks.json", encoding="utf-8") as testfile: parser = ThreagileParser() findings = parser.get_findings(testfile, Test()) finding = findings[1] self.assertTrue(finding.under_review) def test_accepted_finding_is_accepted(self): - with open("unittests/scans/threagile/risks.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("threagile") / "risks.json", encoding="utf-8") as testfile: parser = ThreagileParser() findings = parser.get_findings(testfile, Test()) finding = findings[2] self.assertTrue(finding.risk_accepted) def test_in_progress_is_verified(self): - with open("unittests/scans/threagile/risks.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("threagile") / "risks.json", encoding="utf-8") as testfile: parser = ThreagileParser() findings = parser.get_findings(testfile, Test()) finding = findings[3] self.assertTrue(finding.verified) def test_mitigated_is_mitigated(self): - with open("unittests/scans/threagile/risks.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("threagile") / "risks.json", encoding="utf-8") as testfile: parser = ThreagileParser() findings = parser.get_findings(testfile, Test()) finding = findings[4] @@ -62,7 +62,7 @@ def test_mitigated_is_mitigated(self): self.assertEqual("some-runtime", finding.component_name) def test_false_positive_is_false_positive(self): - with open("unittests/scans/threagile/risks.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("threagile") / "risks.json", encoding="utf-8") as testfile: parser = ThreagileParser() findings = parser.get_findings(testfile, Test()) finding = findings[5] diff --git a/unittests/tools/test_threat_composer_parser.py b/unittests/tools/test_threat_composer_parser.py index 9dfbf524c6c..93a7b413146 100644 --- a/unittests/tools/test_threat_composer_parser.py +++ b/unittests/tools/test_threat_composer_parser.py @@ -1,24 +1,24 @@ -import os + from dojo.models import Test from dojo.tools.threat_composer.parser import ThreatComposerParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path def sample_path(file_name: str): - return os.path.join("/scans/threat_composer", file_name) + return get_unit_tests_scans_path("threat_composer") / file_name class TestThreatComposerParser(DojoTestCase): def test_threat_composer_parser_with_no_threat_has_no_findings(self): - with open(get_unit_tests_path() + sample_path("threat_composer_zero_threats.json"), encoding="utf-8") as testfile: + with open(sample_path("threat_composer_zero_threats.json"), encoding="utf-8") as testfile: parser = ThreatComposerParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_threat_composer_parser_with_one_threat_has_one_finding(self): - with open(get_unit_tests_path() + sample_path("threat_composer_one_threat.json"), encoding="utf-8") as testfile: + with open(sample_path("threat_composer_one_threat.json"), encoding="utf-8") as testfile: parser = ThreatComposerParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -38,28 +38,28 @@ def test_threat_composer_parser_with_one_threat_has_one_finding(self): self.assertFalse(finding.verified) def test_threat_composer_parser_with_many_threats_has_many_findings(self): - with open(get_unit_tests_path() + sample_path("threat_composer_many_threats.json"), encoding="utf-8") as testfile: + with open(sample_path("threat_composer_many_threats.json"), encoding="utf-8") as testfile: parser = ThreatComposerParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(21, len(findings)) def test_threat_composer_parser_empty_with_error(self): with self.assertRaises(ValueError) as context: - with open(get_unit_tests_path() + sample_path("threat_composer_no_threats_with_error.json"), encoding="utf-8") as testfile: + with open(sample_path("threat_composer_no_threats_with_error.json"), encoding="utf-8") as testfile: parser = ThreatComposerParser() parser.get_findings(testfile, Test()) self.assertNotIn("No threats found in the JSON file", str(context.exception)) def test_threat_composer_parser_with_one_threat_has_not_assumptions(self): - with open(get_unit_tests_path() + sample_path("threat_composer_broken_assumptions.json"), encoding="utf-8") as testfile: + with open(sample_path("threat_composer_broken_assumptions.json"), encoding="utf-8") as testfile: parser = ThreatComposerParser() findings = parser.get_findings(testfile, Test()) finding = findings[0] self.assertNotIn("Assumption", str(finding.description)) def test_threat_composer_parser_with_one_threat_has_not_mitigations(self): - with open(get_unit_tests_path() + sample_path("threat_composer_broken_mitigations.json"), encoding="utf-8") as testfile: + with open(sample_path("threat_composer_broken_mitigations.json"), encoding="utf-8") as testfile: parser = ThreatComposerParser() findings = parser.get_findings(testfile, Test()) finding = findings[0] diff --git a/unittests/tools/test_trivy_operator_parser.py b/unittests/tools/test_trivy_operator_parser.py index 2c657d5bae8..395339292ac 100644 --- a/unittests/tools/test_trivy_operator_parser.py +++ b/unittests/tools/test_trivy_operator_parser.py @@ -1,12 +1,11 @@ -import os.path from dojo.models import Test from dojo.tools.trivy_operator.parser import TrivyOperatorParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path def sample_path(file_name): - return os.path.join(get_unit_tests_path() + "/scans/trivy_operator", file_name) + return get_unit_tests_scans_path("trivy_operator") / file_name class TestTrivyOperatorParser(DojoTestCase): diff --git a/unittests/tools/test_trivy_parser.py b/unittests/tools/test_trivy_parser.py index 0201368798f..f566adac982 100644 --- a/unittests/tools/test_trivy_parser.py +++ b/unittests/tools/test_trivy_parser.py @@ -1,13 +1,12 @@ -import os.path import re from dojo.models import Test from dojo.tools.trivy.parser import TrivyParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path def sample_path(file_name): - return os.path.join(get_unit_tests_path() + "/scans/trivy", file_name) + return get_unit_tests_scans_path("trivy") / file_name class TestTrivyParser(DojoTestCase): diff --git a/unittests/tools/test_trufflehog3_parser.py b/unittests/tools/test_trufflehog3_parser.py index 2e8a8523c23..f46482b35d1 100644 --- a/unittests/tools/test_trufflehog3_parser.py +++ b/unittests/tools/test_trufflehog3_parser.py @@ -1,13 +1,12 @@ import datetime -import os.path from dojo.models import Test from dojo.tools.trufflehog3.parser import TruffleHog3Parser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path def sample_path(file_name): - return os.path.join(get_unit_tests_path() + "/scans/trufflehog3", file_name) + return get_unit_tests_scans_path("trufflehog3") / file_name class TestTruffleHog3Parser(DojoTestCase): diff --git a/unittests/tools/test_trufflehog_parser.py b/unittests/tools/test_trufflehog_parser.py index cfb7a6f86e2..43252645b5f 100644 --- a/unittests/tools/test_trufflehog_parser.py +++ b/unittests/tools/test_trufflehog_parser.py @@ -1,12 +1,11 @@ -import os.path from dojo.models import Test from dojo.tools.trufflehog.parser import TruffleHogParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path def sample_path(file_name): - return os.path.join(get_unit_tests_path() + "/scans/trufflehog", file_name) + return get_unit_tests_scans_path("trufflehog") / file_name class TestTruffleHogParser(DojoTestCase): diff --git a/unittests/tools/test_trustwave_fusion_api_parser.py b/unittests/tools/test_trustwave_fusion_api_parser.py index c11c1eeb688..673e5355b6f 100644 --- a/unittests/tools/test_trustwave_fusion_api_parser.py +++ b/unittests/tools/test_trustwave_fusion_api_parser.py @@ -1,19 +1,19 @@ from dojo.models import Test from dojo.tools.trustwave_fusion_api.parser import TrustwaveFusionAPIParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestTrustwaveFusionAPIParser(DojoTestCase): def test_parse_file_with_no_vuln_has_no_findings(self): with open( - get_unit_tests_path() + "/scans/trustwave_fusion_api/trustwave_fusion_api_zero_vul.json", encoding="utf-8", + get_unit_tests_scans_path("trustwave_fusion_api") / "trustwave_fusion_api_zero_vul.json", encoding="utf-8", ) as testfile: parser = TrustwaveFusionAPIParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_vuln_with_valid_cve(self): - with open("unittests/scans/trustwave_fusion_api/test_cve.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("trustwave_fusion_api") / "test_cve.json", encoding="utf-8") as testfile: parser = TrustwaveFusionAPIParser() findings = parser.get_findings(testfile, Test()) @@ -42,7 +42,7 @@ def test_vuln_with_valid_cve(self): def test_parse_file_with_multiple_vuln_has_multiple_findings(self): with open( - get_unit_tests_path() + "/scans/trustwave_fusion_api/trustwave_fusion_api_many_vul.json", encoding="utf-8", + get_unit_tests_scans_path("trustwave_fusion_api") / "trustwave_fusion_api_many_vul.json", encoding="utf-8", ) as testfile: parser = TrustwaveFusionAPIParser() findings = parser.get_findings(testfile, Test()) diff --git a/unittests/tools/test_trustwave_parser.py b/unittests/tools/test_trustwave_parser.py index 8f8d7150eb1..5a53268d045 100644 --- a/unittests/tools/test_trustwave_parser.py +++ b/unittests/tools/test_trustwave_parser.py @@ -1,12 +1,11 @@ -import os.path from dojo.models import Engagement, Product, Test from dojo.tools.trustwave.parser import TrustwaveParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path def sample_path(file_name): - return os.path.join(get_unit_tests_path() + "/scans/trustwave", file_name) + return get_unit_tests_scans_path("trustwave") / file_name class TestTrustwaveParser(DojoTestCase): diff --git a/unittests/tools/test_twistlock_parser.py b/unittests/tools/test_twistlock_parser.py index b774c704625..a6315f865e0 100644 --- a/unittests/tools/test_twistlock_parser.py +++ b/unittests/tools/test_twistlock_parser.py @@ -1,21 +1,18 @@ -from os import path -from pathlib import Path - from dojo.models import Test from dojo.tools.twistlock.parser import TwistlockParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestTwistlockParser(DojoTestCase): def test_parse_file_with_no_vuln(self): - testfile = open(path.join(Path(__file__).parent, "../scans/twistlock/no_vuln.json"), encoding="utf-8") + testfile = open(get_unit_tests_scans_path("twistlock") / "no_vuln.json", encoding="utf-8") parser = TwistlockParser() findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual(0, len(findings)) def test_parse_file_with_one_vuln(self): - testfile = open(path.join(Path(__file__).parent, "../scans/twistlock/one_vuln.json"), encoding="utf-8") + testfile = open(get_unit_tests_scans_path("twistlock") / "one_vuln.json", encoding="utf-8") parser = TwistlockParser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -24,7 +21,7 @@ def test_parse_file_with_one_vuln(self): self.assertEqual("CVE-2013-7459", findings[0].unsaved_vulnerability_ids[0]) def test_parse_file_with_no_link(self): - testfile = open(path.join(Path(__file__).parent, "../scans/twistlock/one_vuln_no_link.json"), encoding="utf-8") + testfile = open(get_unit_tests_scans_path("twistlock") / "one_vuln_no_link.json", encoding="utf-8") parser = TwistlockParser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -33,14 +30,14 @@ def test_parse_file_with_no_link(self): self.assertEqual("PRISMA-2021-0013", findings[0].unsaved_vulnerability_ids[0]) def test_parse_file_with_many_vulns(self): - testfile = open(path.join(Path(__file__).parent, "../scans/twistlock/many_vulns.json"), encoding="utf-8") + testfile = open(get_unit_tests_scans_path("twistlock") / "many_vulns.json", encoding="utf-8") parser = TwistlockParser() findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual(5, len(findings)) def test_parse_file_which_contain_packages_info(self): - testfile = open(path.join(Path(__file__).parent, "../scans/twistlock/findings_include_packages.json"), encoding="utf-8") + testfile = open(get_unit_tests_scans_path("twistlock") / "findings_include_packages.json", encoding="utf-8") parser = TwistlockParser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -48,7 +45,7 @@ def test_parse_file_which_contain_packages_info(self): def test_parse_file_prisma_twistlock_images_no_vuln(self): testfile = open( - path.join(Path(__file__).parent, "../scans/twistlock/scan_report_prisma_twistlock_images_no_vuln.csv"), encoding="utf-8", + get_unit_tests_scans_path("twistlock") / "scan_report_prisma_twistlock_images_no_vuln.csv", encoding="utf-8", ) parser = TwistlockParser() findings = parser.get_findings(testfile, Test()) @@ -57,7 +54,7 @@ def test_parse_file_prisma_twistlock_images_no_vuln(self): def test_parse_file_prisma_twistlock_images_four_vulns(self): testfile = open( - path.join(Path(__file__).parent, "../scans/twistlock/scan_report_prisma_twistlock_images_four_vulns.csv"), encoding="utf-8", + get_unit_tests_scans_path("twistlock") / "scan_report_prisma_twistlock_images_four_vulns.csv", encoding="utf-8", ) parser = TwistlockParser() findings = parser.get_findings(testfile, Test()) @@ -68,9 +65,8 @@ def test_parse_file_prisma_twistlock_images_four_vulns(self): def test_parse_file_prisma_twistlock_images_long_package_name(self): testfile = open( - path.join( - Path(__file__).parent, "../scans/twistlock/scan_report_prisma_twistlock_images_long_package_name.csv", - ), encoding="utf-8", + get_unit_tests_scans_path("twistlock") / "scan_report_prisma_twistlock_images_long_package_name.csv", + encoding="utf-8", ) parser = TwistlockParser() findings = parser.get_findings(testfile, Test()) diff --git a/unittests/tools/test_veracode_parser.py b/unittests/tools/test_veracode_parser.py index 1f2ab6626a8..64861a3a04e 100644 --- a/unittests/tools/test_veracode_parser.py +++ b/unittests/tools/test_veracode_parser.py @@ -4,7 +4,7 @@ from dojo.models import Endpoint, Engagement, Product, Product_Type, Test from dojo.tools.veracode.parser import VeracodeParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestVeracodeScannerParser(DojoTestCase): @@ -24,7 +24,7 @@ def test_parse_file_with_one_finding(self): self.parse_file_with_one_finding() def parse_file_with_one_finding(self): - with open("unittests/scans/veracode/one_finding.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("veracode") / "one_finding.xml", encoding="utf-8") as testfile: parser = VeracodeParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -37,7 +37,7 @@ def test_parse_file_many_findings_different_hash_code_different_unique_id(self): self.parse_file_many_findings_different_hash_code_different_unique_id() def parse_file_many_findings_different_hash_code_different_unique_id(self): - with open("unittests/scans/veracode/many_findings_different_hash_code_different_unique_id.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("veracode") / "many_findings_different_hash_code_different_unique_id.xml", encoding="utf-8") as testfile: parser = VeracodeParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(4, len(findings)) @@ -68,7 +68,7 @@ def test_parse_file_with_multiple_finding(self): self.parse_file_with_multiple_finding() def parse_file_with_multiple_finding(self): - with open("unittests/scans/veracode/many_findings.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("veracode") / "many_findings.xml", encoding="utf-8") as testfile: parser = VeracodeParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(4, len(findings)) @@ -113,7 +113,7 @@ def test_parse_file_with_multiple_finding2(self): self.assertEqual(datetime.datetime.today().date(), finding.date) def parse_file_with_multiple_finding2(self): - with open("unittests/scans/veracode/veracode_scan.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("veracode") / "veracode_scan.xml", encoding="utf-8") as testfile: parser = VeracodeParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(7, len(findings)) @@ -147,7 +147,7 @@ def test_parse_file_with_mitigated_finding(self): self.parse_file_with_mitigated_finding() def parse_file_with_mitigated_finding(self): - with open("unittests/scans/veracode/mitigated_finding.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("veracode") / "mitigated_finding.xml", encoding="utf-8") as testfile: parser = VeracodeParser() findings = parser.get_findings(testfile, self.test) self.assertEqual(1, len(findings)) @@ -166,7 +166,7 @@ def test_parse_file_with_mitigated_fixed_finding(self): self.parse_file_with_mitigated_fixed_finding() def parse_file_with_mitigated_fixed_finding(self): - with open("unittests/scans/veracode/mitigated_fixed_finding.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("veracode") / "mitigated_fixed_finding.xml", encoding="utf-8") as testfile: parser = VeracodeParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -183,7 +183,7 @@ def test_parse_file_with_mitigated_sca_finding(self): self.parse_file_with_mitigated_sca_finding() def parse_file_with_mitigated_sca_finding(self): - with open("unittests/scans/veracode/veracode_scan_sca_mitigated.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("veracode") / "veracode_scan_sca_mitigated.xml", encoding="utf-8") as testfile: parser = VeracodeParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -202,7 +202,7 @@ def test_parse_file_with_dynamic_finding(self): self.assertEqual(datetime.datetime.today().date(), finding.date) def parse_file_with_dynamic_finding(self): - with open("unittests/scans/veracode/dynamic_finding.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("veracode") / "dynamic_finding.xml", encoding="utf-8") as testfile: parser = VeracodeParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -230,7 +230,7 @@ def test_parse_file_with_changed_severity(self): self.parse_file_with_changed_severity() def parse_file_with_changed_severity(self): - with open("unittests/scans/veracode/veracode_scan_changed_severity.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("veracode") / "veracode_scan_changed_severity.xml", encoding="utf-8") as testfile: parser = VeracodeParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(7, len(findings)) @@ -252,7 +252,7 @@ def test_maven_component_name(self): self.maven_component_name() def maven_component_name(self): - with open("unittests/scans/veracode/veracode_maven.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("veracode") / "veracode_maven.xml", encoding="utf-8") as testfile: parser = VeracodeParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -329,7 +329,7 @@ def test_json_static_findings_list_format(self): self.json_static_findings_list_format() def json_static_findings_list_format(self): - self.json_static_findings_test("unittests/scans/veracode/static_findings_list_format.json") + self.json_static_findings_test(get_unit_tests_scans_path("veracode") / "static_findings_list_format.json") @override_settings(USE_FIRST_SEEN=True) def test_json_static_embedded_format_first_seen(self): @@ -339,7 +339,7 @@ def test_json_static_embedded_format(self): self.json_static_embedded_format() def json_static_embedded_format(self): - self.json_static_findings_test("unittests/scans/veracode/static_embedded_format.json") + self.json_static_findings_test(get_unit_tests_scans_path("veracode") / "static_embedded_format.json") def json_dynamic_findings_test(self, file_name): with open(file_name, encoding="utf-8") as testfile: @@ -401,7 +401,7 @@ def test_json_dynamic_findings_list_format(self): self.json_dynamic_findings_list_format() def json_dynamic_findings_list_format(self): - self.json_dynamic_findings_test("unittests/scans/veracode/dynamic_findings_list_format.json") + self.json_dynamic_findings_test(get_unit_tests_scans_path("veracode") / "dynamic_findings_list_format.json") @override_settings(USE_FIRST_SEEN=True) def test_json_dynamic_embedded_format_first_seen(self): @@ -411,7 +411,7 @@ def test_json_dynamic_embedded_format(self): self.json_dynamic_embedded_format() def json_dynamic_embedded_format(self): - self.json_dynamic_findings_test("unittests/scans/veracode/dynamic_embedded_format.json") + self.json_dynamic_findings_test(get_unit_tests_scans_path("veracode") / "dynamic_embedded_format.json") def json_sca_findings_test(self, file_name): with open(file_name, encoding="utf-8") as testfile: @@ -490,7 +490,7 @@ def test_json_sca_findings_list_format(self): self.json_sca_findings_list_format() def json_sca_findings_list_format(self): - self.json_sca_findings_test("unittests/scans/veracode/sca_findings_list_format.json") + self.json_sca_findings_test(get_unit_tests_scans_path("veracode") / "sca_findings_list_format.json") @override_settings(USE_FIRST_SEEN=True) def test_json_sca_embedded_format_first_seen(self): @@ -500,4 +500,4 @@ def test_json_sca_embedded_format(self): self.json_sca_embedded_format() def json_sca_embedded_format(self): - self.json_sca_findings_test("unittests/scans/veracode/sca_embedded_format.json") + self.json_sca_findings_test(get_unit_tests_scans_path("veracode") / "sca_embedded_format.json") diff --git a/unittests/tools/test_veracode_sca_parser.py b/unittests/tools/test_veracode_sca_parser.py index 0951f5024d6..2c3ebe3f32f 100644 --- a/unittests/tools/test_veracode_sca_parser.py +++ b/unittests/tools/test_veracode_sca_parser.py @@ -5,7 +5,7 @@ from dojo.models import Test from dojo.tools.veracode_sca.parser import VeracodeScaParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestVeracodeScaScannerParser(DojoTestCase): @@ -18,7 +18,7 @@ def test_parse_csv(self): self.parse_csv() def parse_csv(self): - with open("unittests/scans/veracode_sca/veracode_sca.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("veracode_sca") / "veracode_sca.csv", encoding="utf-8") as testfile: parser = VeracodeScaParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(3, len(findings)) @@ -67,7 +67,7 @@ def test_parse_json(self): self.parse_json() def parse_json(self): - with open("unittests/scans/veracode_sca/veracode_sca.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("veracode_sca") / "veracode_sca.json", encoding="utf-8") as testfile: parser = VeracodeScaParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -93,7 +93,7 @@ def test_parse_json_fixed(self): self.parse_json_fixed() def parse_json_fixed(self): - with open("unittests/scans/veracode_sca/veracode_sca_fixed.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("veracode_sca") / "veracode_sca_fixed.json", encoding="utf-8") as testfile: parser = VeracodeScaParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) diff --git a/unittests/tools/test_wapiti_parser.py b/unittests/tools/test_wapiti_parser.py index 22278904420..147f95f5720 100644 --- a/unittests/tools/test_wapiti_parser.py +++ b/unittests/tools/test_wapiti_parser.py @@ -1,13 +1,13 @@ from dojo.models import Test from dojo.tools.wapiti.parser import WapitiParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestWapitiParser(DojoTestCase): def test_parse_file_3_0_4(self): """Generated with version 3.0.4 on OWASP Juicy Shop""" - with open("unittests/scans/wapiti/juicyshop.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("wapiti") / "juicyshop.xml", encoding="utf-8") as testfile: parser = WapitiParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -28,7 +28,7 @@ def test_parse_file_3_0_4(self): def test_parse_file_demo(self): """""" - with open("unittests/scans/wapiti/demo.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("wapiti") / "demo.xml", encoding="utf-8") as testfile: parser = WapitiParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -41,7 +41,7 @@ def test_parse_file_demo(self): def test_parse_file_example(self): """""" - with open("unittests/scans/wapiti/example.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("wapiti") / "example.xml", encoding="utf-8") as testfile: parser = WapitiParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -54,7 +54,7 @@ def test_parse_file_example(self): def test_parse_cwe(self): """File to test CWE""" - with open("unittests/scans/wapiti/cwe.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("wapiti") / "cwe.xml", encoding="utf-8") as testfile: parser = WapitiParser() findings = parser.get_findings(testfile, Test()) for finding in findings: diff --git a/unittests/tools/test_wazuh_parser.py b/unittests/tools/test_wazuh_parser.py index 3c8a33b003c..67c1c90d30f 100644 --- a/unittests/tools/test_wazuh_parser.py +++ b/unittests/tools/test_wazuh_parser.py @@ -1,18 +1,18 @@ from dojo.models import Test from dojo.tools.wazuh.parser import WazuhParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestWazuhParser(DojoTestCase): def test_parse_no_findings(self): - with open("unittests/scans/wazuh/no_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("wazuh") / "no_findings.json", encoding="utf-8") as testfile: parser = WazuhParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_one_finding(self): - with open("unittests/scans/wazuh/one_finding.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("wazuh") / "one_finding.json", encoding="utf-8") as testfile: parser = WazuhParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -26,7 +26,7 @@ def test_parse_one_finding(self): self.assertEqual(5.5, finding.cvssv3_score) def test_parse_many_finding(self): - with open("unittests/scans/wazuh/many_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("wazuh") / "many_findings.json", encoding="utf-8") as testfile: parser = WazuhParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -36,7 +36,7 @@ def test_parse_many_finding(self): self.assertEqual("2023-02-08", finding.date) def test_parse_one_finding_with_endpoint(self): - with open("unittests/scans/wazuh/one_finding_with_endpoint.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("wazuh") / "one_finding_with_endpoint.json", encoding="utf-8") as testfile: parser = WazuhParser() findings = parser.get_findings(testfile, Test()) for finding in findings: diff --git a/unittests/tools/test_wfuzz_parser.py b/unittests/tools/test_wfuzz_parser.py index 0434f419963..672a1b75a5d 100644 --- a/unittests/tools/test_wfuzz_parser.py +++ b/unittests/tools/test_wfuzz_parser.py @@ -1,18 +1,18 @@ from dojo.models import Test from dojo.tools.wfuzz.parser import WFuzzParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestWFuzzParser(DojoTestCase): def test_parse_no_findings(self): - with open("unittests/scans/wfuzz/no_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("wfuzz") / "no_findings.json", encoding="utf-8") as testfile: parser = WFuzzParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_one_finding(self): - with open("unittests/scans/wfuzz/one_finding.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("wfuzz") / "one_finding.json", encoding="utf-8") as testfile: parser = WFuzzParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -21,7 +21,7 @@ def test_parse_one_finding(self): self.assertEqual(1, len(findings)) def test_parse_many_finding(self): - with open("unittests/scans/wfuzz/many_findings.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("wfuzz") / "many_findings.json", encoding="utf-8") as testfile: parser = WFuzzParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -30,7 +30,7 @@ def test_parse_many_finding(self): self.assertEqual(4, len(findings)) def test_one_dup_finding(self): - with open("unittests/scans/wfuzz/one_dup_finding.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("wfuzz") / "one_dup_finding.json", encoding="utf-8") as testfile: parser = WFuzzParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -39,7 +39,7 @@ def test_one_dup_finding(self): self.assertEqual(4, len(findings)) def test_issue_7863(self): - with open("unittests/scans/wfuzz/issue_7863.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("wfuzz") / "issue_7863.json", encoding="utf-8") as testfile: parser = WFuzzParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -49,7 +49,7 @@ def test_issue_7863(self): self.assertEqual("Medium", findings[0].severity) def test_one_finding_responsecode_missing(self): - with open("unittests/scans/wfuzz/one_finding_responsecode_missing.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("wfuzz") / "one_finding_responsecode_missing.json", encoding="utf-8") as testfile: parser = WFuzzParser() findings = parser.get_findings(testfile, Test()) for finding in findings: diff --git a/unittests/tools/test_whispers_parser.py b/unittests/tools/test_whispers_parser.py index e1556605268..fdf4b700cae 100644 --- a/unittests/tools/test_whispers_parser.py +++ b/unittests/tools/test_whispers_parser.py @@ -1,15 +1,14 @@ -from django.test import TestCase - from dojo.models import Test from dojo.tools.whispers.parser import WhispersParser +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path -class TestWhispersParser(TestCase): +class TestWhispersParser(DojoTestCase): def test_whispers_parser_severity_map(self): fixtures = [ - "unittests/scans/whispers/whispers_one_vul.json", # v2.1 format - "unittests/scans/whispers/whispers_one_vul_v2.2.json", # v2.2 format + get_unit_tests_scans_path("whispers") / "whispers_one_vul.json", # v2.1 format + get_unit_tests_scans_path("whispers") / "whispers_one_vul_v2.2.json", # v2.2 format ] expected_severity = "High" @@ -21,14 +20,14 @@ def test_whispers_parser_severity_map(self): self.assertEqual(expected_severity, findings[0].severity) def test_whispers_parser_with_no_vuln_has_no_findings(self): - testfile = open("unittests/scans/whispers/whispers_zero_vul.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("whispers") / "whispers_zero_vul.json", encoding="utf-8") parser = WhispersParser() findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual(0, len(findings)) def test_whispers_parser_with_one_critical_vuln_has_one_findings(self): - testfile = open("unittests/scans/whispers/whispers_one_vul.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("whispers") / "whispers_one_vul.json", encoding="utf-8") parser = WhispersParser() findings = parser.get_findings(testfile, Test()) testfile.close() @@ -41,7 +40,7 @@ def test_whispers_parser_with_one_critical_vuln_has_one_findings(self): self.assertEqual("pip.conf Password", findings[0].vuln_id_from_tool) def test_whispers_parser_with_many_vuln_has_many_findings(self): - testfile = open("unittests/scans/whispers/whispers_many_vul.json", encoding="utf-8") + testfile = open(get_unit_tests_scans_path("whispers") / "whispers_many_vul.json", encoding="utf-8") parser = WhispersParser() findings = parser.get_findings(testfile, Test()) testfile.close() diff --git a/unittests/tools/test_whitehat_sentinel_parser.py b/unittests/tools/test_whitehat_sentinel_parser.py index 7cfd1ba6bb4..fa2c0203025 100644 --- a/unittests/tools/test_whitehat_sentinel_parser.py +++ b/unittests/tools/test_whitehat_sentinel_parser.py @@ -1,30 +1,30 @@ from dojo.models import Test from dojo.tools.whitehat_sentinel.parser import WhiteHatSentinelParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestWhiteHatSentinelParser(DojoTestCase): def test_parse_file_with_no_vuln_has_no_findings(self): with self.assertRaises(ValueError): - with open("unittests/scans/whitehat_sentinel/empty_file.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("whitehat_sentinel") / "empty_file.json", encoding="utf-8") as testfile: parser = WhiteHatSentinelParser() parser.get_findings(testfile, Test()) def test_parse_file_with_one_vuln_has_one_findings(self): - with open("unittests/scans/whitehat_sentinel/one_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("whitehat_sentinel") / "one_vuln.json", encoding="utf-8") as testfile: parser = WhiteHatSentinelParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) def test_parse_file_with_multiple_vuln_has_multiple_finding(self): - with open("unittests/scans/whitehat_sentinel/many_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("whitehat_sentinel") / "many_vuln.json", encoding="utf-8") as testfile: parser = WhiteHatSentinelParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(3, len(findings)) def test_parse_file_with_invalid_data(self): with self.assertRaises(ValueError): - with open("unittests/scans/whitehat_sentinel/invalid_data.txt", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("whitehat_sentinel") / "invalid_data.txt", encoding="utf-8") as testfile: parser = WhiteHatSentinelParser() parser.get_findings(testfile, Test()) diff --git a/unittests/tools/test_wiz_parser.py b/unittests/tools/test_wiz_parser.py index 19d8c7bd234..3a555d9e3b3 100644 --- a/unittests/tools/test_wiz_parser.py +++ b/unittests/tools/test_wiz_parser.py @@ -1,11 +1,11 @@ from dojo.models import Test from dojo.tools.wiz.parser import WizParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestWizParser(DojoTestCase): def test_no_findings(self): - with open("unittests/scans/wiz/no_findings.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("wiz") / "no_findings.csv", encoding="utf-8") as testfile: parser = WizParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -14,7 +14,7 @@ def test_no_findings(self): self.assertEqual(0, len(findings)) def test_one_findings(self): - with open("unittests/scans/wiz/one_finding.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("wiz") / "one_finding.csv", encoding="utf-8") as testfile: parser = WizParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -26,7 +26,7 @@ def test_one_findings(self): self.assertEqual("Informational", finding.severity) def test_multiple_findings(self): - with open("unittests/scans/wiz/multiple_findings.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("wiz") / "multiple_findings.csv", encoding="utf-8") as testfile: parser = WizParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -65,7 +65,7 @@ def test_multiple_findings(self): self.assertEqual("Informational", finding.severity) def test_sca_format(self): - with open("unittests/scans/wiz/sca_format.csv", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("wiz") / "sca_format.csv", encoding="utf-8") as testfile: parser = WizParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(5, len(findings)) diff --git a/unittests/tools/test_wizcli_dir_parser.py b/unittests/tools/test_wizcli_dir_parser.py index 7075aa42f90..8ebf71e43d7 100644 --- a/unittests/tools/test_wizcli_dir_parser.py +++ b/unittests/tools/test_wizcli_dir_parser.py @@ -1,17 +1,17 @@ from dojo.models import Test from dojo.tools.wizcli_dir.parser import WizcliDirParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestWizcliDirParser(DojoTestCase): def test_no_findings(self): - with open("unittests/scans/wizcli_dir/wizcli_dir_zero_vul.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("wizcli_dir") / "wizcli_dir_zero_vul.json", encoding="utf-8") as testfile: parser = WizcliDirParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(len(findings), 0) def test_one_findings(self): - with open("unittests/scans/wizcli_dir/wizcli_dir_one_vul.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("wizcli_dir") / "wizcli_dir_one_vul.json", encoding="utf-8") as testfile: parser = WizcliDirParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -35,7 +35,7 @@ def test_one_findings(self): ) def test_multiple_findings(self): - with open("unittests/scans/wizcli_dir/wizcli_dir_many_vul.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("wizcli_dir") / "wizcli_dir_many_vul.json", encoding="utf-8") as testfile: parser = WizcliDirParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(7, len(findings)) diff --git a/unittests/tools/test_wizcli_iac_parser.py b/unittests/tools/test_wizcli_iac_parser.py index 4d9d9d61547..3bef429ee00 100644 --- a/unittests/tools/test_wizcli_iac_parser.py +++ b/unittests/tools/test_wizcli_iac_parser.py @@ -1,17 +1,17 @@ from dojo.models import Test from dojo.tools.wizcli_iac.parser import WizcliIaCParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestWizcliIaCParser(DojoTestCase): def test_no_findings(self): - with open("unittests/scans/wizcli_iac/wizcli_iac_zero_vul.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("wizcli_iac") / "wizcli_iac_zero_vul.json", encoding="utf-8") as testfile: parser = WizcliIaCParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(len(findings), 0) def test_one_findings(self): - with open("unittests/scans/wizcli_iac/wizcli_iac_one_vul.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("wizcli_iac") / "wizcli_iac_one_vul.json", encoding="utf-8") as testfile: parser = WizcliIaCParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -33,7 +33,7 @@ def test_one_findings(self): ) def test_multiple_findings(self): - with open("unittests/scans/wizcli_iac/wizcli_iac_many_vul.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("wizcli_iac") / "wizcli_iac_many_vul.json", encoding="utf-8") as testfile: parser = WizcliIaCParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(25, len(findings)) diff --git a/unittests/tools/test_wizcli_img_parser.py b/unittests/tools/test_wizcli_img_parser.py index 36d2f7c9dbd..a21b07a282a 100644 --- a/unittests/tools/test_wizcli_img_parser.py +++ b/unittests/tools/test_wizcli_img_parser.py @@ -1,17 +1,17 @@ from dojo.models import Test from dojo.tools.wizcli_img.parser import WizcliImgParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestWizcliImgParser(DojoTestCase): def test_no_findings(self): - with open("unittests/scans/wizcli_img/wizcli_img_zero_vul.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("wizcli_img") / "wizcli_img_zero_vul.json", encoding="utf-8") as testfile: parser = WizcliImgParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(len(findings), 0) def test_one_findings(self): - with open("unittests/scans/wizcli_img/wizcli_img_one_vul.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("wizcli_img") / "wizcli_img_one_vul.json", encoding="utf-8") as testfile: parser = WizcliImgParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) @@ -31,7 +31,7 @@ def test_one_findings(self): ) def test_multiple_findings(self): - with open("unittests/scans/wizcli_img/wizcli_img_many_vul.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("wizcli_img") / "wizcli_img_many_vul.json", encoding="utf-8") as testfile: parser = WizcliImgParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(9, len(findings)) diff --git a/unittests/tools/test_wpscan_parser.py b/unittests/tools/test_wpscan_parser.py index 0b44ee49654..68845f407c5 100644 --- a/unittests/tools/test_wpscan_parser.py +++ b/unittests/tools/test_wpscan_parser.py @@ -2,20 +2,20 @@ from dojo.models import Test from dojo.tools.wpscan.parser import WpscanParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestWpscanParser(DojoTestCase): def test_parse_file_empty(self): """Report from the tool wich have no data""" - with open("unittests/scans/wpscan/empty.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("wpscan") / "empty.json", encoding="utf-8") as testfile: parser = WpscanParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_file_exemple(self): - with open("unittests/scans/wpscan/sample.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("wpscan") / "sample.json", encoding="utf-8") as testfile: parser = WpscanParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -29,7 +29,7 @@ def test_parse_file_exemple(self): self.assertEqual(datetime.datetime(2021, 3, 26, 11, 50, 50, tzinfo=datetime.UTC), finding.date) def test_parse_file_with_no_vuln_has_no_findings(self): - with open("unittests/scans/wpscan/wordpress_no_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("wpscan") / "wordpress_no_vuln.json", encoding="utf-8") as testfile: parser = WpscanParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -38,7 +38,7 @@ def test_parse_file_with_no_vuln_has_no_findings(self): self.assertEqual(7, len(findings)) def test_parse_file_with_one_vuln_has_one_findings(self): - with open("unittests/scans/wpscan/wordpress_one_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("wpscan") / "wordpress_one_vuln.json", encoding="utf-8") as testfile: parser = WpscanParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -52,7 +52,7 @@ def test_parse_file_with_one_vuln_has_one_findings(self): self.assertEqual(datetime.datetime(2019, 7, 2, 19, 11, 16, tzinfo=datetime.UTC), finding.date) def test_parse_file_with_multiple_vuln_has_multiple_finding(self): - with open("unittests/scans/wpscan/wordpress_many_vuln.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("wpscan") / "wordpress_many_vuln.json", encoding="utf-8") as testfile: parser = WpscanParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -66,7 +66,7 @@ def test_parse_file_with_multiple_vuln_has_multiple_finding(self): self.assertEqual(datetime.datetime(2019, 7, 2, 19, 11, 16, tzinfo=datetime.UTC), finding.date) def test_parse_file_with_multiple_vuln(self): - with open("unittests/scans/wpscan/wpscan.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("wpscan") / "wpscan.json", encoding="utf-8") as testfile: parser = WpscanParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -93,7 +93,7 @@ def test_parse_file_with_multiple_vuln(self): self.assertEqual("", finding.get_scanner_confidence_text()) # data are => "confidence": 100, def test_parse_file_with_multiple_vuln_in_version(self): - with open("unittests/scans/wpscan/wordpress_vuln_version.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("wpscan") / "wordpress_vuln_version.json", encoding="utf-8") as testfile: parser = WpscanParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -110,7 +110,7 @@ def test_parse_file_with_multiple_vuln_in_version(self): self.assertEqual("", finding.get_scanner_confidence_text()) # data are => 100% def test_parse_file_issue5774(self): - with open("unittests/scans/wpscan/issue5774.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("wpscan") / "issue5774.json", encoding="utf-8") as testfile: parser = WpscanParser() findings = parser.get_findings(testfile, Test()) for finding in findings: diff --git a/unittests/tools/test_xanitizer_parser.py b/unittests/tools/test_xanitizer_parser.py index 22b15010c98..b46a4f96959 100644 --- a/unittests/tools/test_xanitizer_parser.py +++ b/unittests/tools/test_xanitizer_parser.py @@ -1,24 +1,24 @@ from dojo.models import Test from dojo.tools.xanitizer.parser import XanitizerParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_path +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestXanitizerParser(DojoTestCase): def test_parse_file_with_no_findings(self): - with open("unittests/scans/xanitizer/no-findings.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("xanitizer") / "no-findings.xml", encoding="utf-8") as testfile: parser = XanitizerParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(0, len(findings)) def test_parse_file_with_one_findings(self): - with open("unittests/scans/xanitizer/one-findings.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("xanitizer") / "one-findings.xml", encoding="utf-8") as testfile: parser = XanitizerParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(1, len(findings)) def test_parse_file_with_multiple_findings(self): - with open("unittests/scans/xanitizer/multiple-findings.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("xanitizer") / "multiple-findings.xml", encoding="utf-8") as testfile: parser = XanitizerParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(9, len(findings)) @@ -27,7 +27,7 @@ def test_parse_file_with_multiple_findings(self): self.assertEqual("CVE-2015-5211", finding.unsaved_vulnerability_ids[0]) def test_parse_file_with_multiple_findings_no_details(self): - with open(get_unit_tests_path() + "/scans/xanitizer/multiple-findings-no-details.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("xanitizer") / "multiple-findings-no-details.xml", encoding="utf-8") as testfile: parser = XanitizerParser() findings = parser.get_findings(testfile, Test()) self.assertEqual(9, len(findings)) diff --git a/unittests/tools/test_yarn_audit_parser.py b/unittests/tools/test_yarn_audit_parser.py index 6c95592960d..428b4ac1c51 100644 --- a/unittests/tools/test_yarn_audit_parser.py +++ b/unittests/tools/test_yarn_audit_parser.py @@ -1,6 +1,6 @@ from dojo.models import Engagement, Product, Test from dojo.tools.yarn_audit.parser import YarnAuditParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestYarnAuditParser(DojoTestCase): @@ -16,13 +16,13 @@ def test_yarn_audit_parser_without_file_has_no_findings(self): self.assertEqual(0, len(findings)) def test_yarn_audit_parser_with_no_vuln_has_no_findings(self): - with open("unittests/scans/yarn_audit/yarn_audit_zero_vul.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("yarn_audit") / "yarn_audit_zero_vul.json", encoding="utf-8") as testfile: parser = YarnAuditParser() findings = parser.get_findings(testfile, self.get_test()) self.assertEqual(0, len(findings)) def test_yarn_audit_parser_with_one_criticle_vuln_has_one_findings(self): - with open("unittests/scans/yarn_audit/yarn_audit_one_vul.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("yarn_audit") / "yarn_audit_one_vul.json", encoding="utf-8") as testfile: parser = YarnAuditParser() findings = parser.get_findings(testfile, self.get_test()) self.assertEqual(1, len(findings)) @@ -30,14 +30,14 @@ def test_yarn_audit_parser_with_one_criticle_vuln_has_one_findings(self): self.assertEqual("4.5.2", findings[0].component_version) def test_yarn_audit_parser_with_many_vuln_has_many_findings(self): - with open("unittests/scans/yarn_audit/yarn_audit_many_vul.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("yarn_audit") / "yarn_audit_many_vul.json", encoding="utf-8") as testfile: parser = YarnAuditParser() findings = parser.get_findings(testfile, self.get_test()) self.assertEqual(3, len(findings)) def test_yarn_audit_parser_with_multiple_cwes_per_finding(self): # cwes formatted as escaped list: "cwe": "[\"CWE-346\",\"CWE-453\"]", - with open("unittests/scans/yarn_audit/yarn_audit_multiple_cwes.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("yarn_audit") / "yarn_audit_multiple_cwes.json", encoding="utf-8") as testfile: parser = YarnAuditParser() findings = parser.get_findings(testfile, self.get_test()) self.assertEqual(3, len(findings)) @@ -53,7 +53,7 @@ def test_yarn_audit_parser_with_multiple_cwes_per_finding(self): def test_yarn_audit_parser_with_multiple_cwes_per_finding_list(self): # cwes formatted as proper list: "cwe": ["CWE-918","CWE-1333"], - with open("unittests/scans/yarn_audit/yarn_audit_multiple_cwes2.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("yarn_audit") / "yarn_audit_multiple_cwes2.json", encoding="utf-8") as testfile: parser = YarnAuditParser() findings = parser.get_findings(testfile, self.get_test()) self.assertEqual(2, len(findings)) @@ -64,7 +64,7 @@ def test_yarn_audit_parser_with_multiple_cwes_per_finding_list(self): def test_yarn_audit_parser_empty_with_error(self): with self.assertRaises(ValueError) as context: - with open("unittests/scans/yarn_audit/empty_with_error.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("yarn_audit") / "empty_with_error.json", encoding="utf-8") as testfile: parser = YarnAuditParser() parser.get_findings(testfile, self.get_test()) self.assertIn( @@ -73,7 +73,7 @@ def test_yarn_audit_parser_empty_with_error(self): self.assertIn("ECONNREFUSED", str(context.exception)) def test_yarn_audit_parser_issue_6495(self): - with open("unittests/scans/yarn_audit/issue_6495.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("yarn_audit") / "issue_6495.json", encoding="utf-8") as testfile: parser = YarnAuditParser() findings = parser.get_findings(testfile, self.get_test()) testfile.close() @@ -83,7 +83,7 @@ def test_yarn_audit_parser_issue_6495(self): self.assertEqual(findings[1].cve, None) def test_yarn_audit_parser_yarn2_audit_issue9911(self): - with open("unittests/scans/yarn_audit/yarn2_audit_issue9911.json", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("yarn_audit") / "yarn2_audit_issue9911.json", encoding="utf-8") as testfile: parser = YarnAuditParser() findings = parser.get_findings(testfile, self.get_test()) testfile.close() diff --git a/unittests/tools/test_zap_parser.py b/unittests/tools/test_zap_parser.py index 914bb05757b..7d7fb296305 100644 --- a/unittests/tools/test_zap_parser.py +++ b/unittests/tools/test_zap_parser.py @@ -1,18 +1,18 @@ from dojo.models import Finding, Test from dojo.tools.zap.parser import ZapParser -from unittests.dojo_test_case import DojoTestCase +from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path class TestZapParser(DojoTestCase): def test_parse_no_findings(self): - with open("unittests/scans/zap/empty_2.9.0.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("zap") / "empty_2.9.0.xml", encoding="utf-8") as testfile: parser = ZapParser() findings = parser.get_findings(testfile, Test()) self.assertIsInstance(findings, list) self.assertEqual(0, len(findings)) def test_parse_some_findings(self): - with open("unittests/scans/zap/some_2.9.0.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("zap") / "some_2.9.0.xml", encoding="utf-8") as testfile: parser = ZapParser() findings = parser.get_findings(testfile, Test()) self.assertIsInstance(findings, list) @@ -23,7 +23,7 @@ def test_parse_some_findings(self): endpoint.clean() def test_parse_some_findings_0(self): - with open("unittests/scans/zap/0_zap_sample.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("zap") / "0_zap_sample.xml", encoding="utf-8") as testfile: parser = ZapParser() findings = parser.get_findings(testfile, Test()) self.assertIsInstance(findings, list) @@ -34,7 +34,7 @@ def test_parse_some_findings_0(self): endpoint.clean() def test_parse_some_findings_1(self): - with open("unittests/scans/zap/1_zap_sample_0_and_new_absent.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("zap") / "1_zap_sample_0_and_new_absent.xml", encoding="utf-8") as testfile: parser = ZapParser() findings = parser.get_findings(testfile, Test()) self.assertIsInstance(findings, list) @@ -45,7 +45,7 @@ def test_parse_some_findings_1(self): endpoint.clean() def test_parse_some_findings_2(self): - with open("unittests/scans/zap/2_zap_sample_0_and_new_endpoint.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("zap") / "2_zap_sample_0_and_new_endpoint.xml", encoding="utf-8") as testfile: parser = ZapParser() findings = parser.get_findings(testfile, Test()) self.assertIsInstance(findings, list) @@ -56,7 +56,7 @@ def test_parse_some_findings_2(self): endpoint.clean() def test_parse_some_findings_3(self): - with open("unittests/scans/zap/3_zap_sampl_0_and_different_severities.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("zap") / "3_zap_sampl_0_and_different_severities.xml", encoding="utf-8") as testfile: parser = ZapParser() findings = parser.get_findings(testfile, Test()) self.assertIsInstance(findings, list) @@ -67,7 +67,7 @@ def test_parse_some_findings_3(self): endpoint.clean() def test_parse_some_findings_5(self): - with open("unittests/scans/zap/5_zap_sample_one.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("zap") / "5_zap_sample_one.xml", encoding="utf-8") as testfile: parser = ZapParser() findings = parser.get_findings(testfile, Test()) self.assertIsInstance(findings, list) @@ -82,7 +82,7 @@ def test_parse_issue4360(self): Report from GitHub issue 4360 see: https://github.com/DefectDojo/django-DefectDojo/issues/4360 """ - with open("unittests/scans/zap/dvwa_baseline_dojo.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("zap") / "dvwa_baseline_dojo.xml", encoding="utf-8") as testfile: parser = ZapParser() findings = parser.get_findings(testfile, Test()) self.assertIsInstance(findings, list) @@ -117,7 +117,7 @@ def test_parse_issue4697(self): Report from GitHub issue 4697 see: https://github.com/DefectDojo/django-DefectDojo/issues/4697 """ - with open("unittests/scans/zap/zap-results-first-scan.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("zap") / "zap-results-first-scan.xml", encoding="utf-8") as testfile: parser = ZapParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -158,7 +158,7 @@ def test_parse_issue4697(self): def test_parse_juicy(self): """Generated with OWASP Juicy shop""" - with open("unittests/scans/zap/juicy2.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("zap") / "juicy2.xml", encoding="utf-8") as testfile: parser = ZapParser() findings = parser.get_findings(testfile, Test()) for finding in findings: @@ -198,7 +198,7 @@ def test_parse_juicy(self): self.assertEqual("assets", endpoint.path) def test_parse_xml_plus_format(self): - with open("unittests/scans/zap/zap-xml-plus-format.xml", encoding="utf-8") as testfile: + with open(get_unit_tests_scans_path("zap") / "zap-xml-plus-format.xml", encoding="utf-8") as testfile: parser = ZapParser() findings = parser.get_findings(testfile, Test()) for finding in findings: From 55526a084e7cd528fd1eebd2f891e67fe625ddc9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 30 Jan 2025 11:23:04 -0600 Subject: [PATCH 90/99] Bump boto3 from 1.36.8 to 1.36.9 (#11692) Bumps [boto3](https://github.com/boto/boto3) from 1.36.8 to 1.36.9. - [Release notes](https://github.com/boto/boto3/releases) - [Commits](https://github.com/boto/boto3/compare/1.36.8...1.36.9) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index f7a24d6c342..e8ffd6bc2a0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -69,7 +69,7 @@ django-ratelimit==4.1.0 argon2-cffi==23.1.0 blackduck==1.1.3 pycurl==7.45.4 # Required for Celery Broker AWS (SQS) support -boto3==1.36.8 # Required for Celery Broker AWS (SQS) support +boto3==1.36.9 # Required for Celery Broker AWS (SQS) support netaddr==1.3.0 vulners==2.2.3 fontawesomefree==6.6.0 From 3a9d81c2327d59521e86856d8f60885199e479f8 Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Thu, 30 Jan 2025 19:45:30 -0600 Subject: [PATCH 91/99] Jira Epic Mapping: Support for the removal of `Epic Name` custom fields (#11690) * Jira Epic Mapping: Support for the removal of `Epic Name` custom fields * Update cassettes --- dojo/jira_link/helper.py | 36 +- ...nt_with_jira_project_and_epic_mapping.yaml | 473 ++++++--- ...TestApi.test_engagement_epic_creation.yaml | 192 +++- ...enabled_create_epic_and_push_findings.yaml | 959 ++++++++++-------- 4 files changed, 1003 insertions(+), 657 deletions(-) diff --git a/dojo/jira_link/helper.py b/dojo/jira_link/helper.py index a557a05a3c1..a691d13c22d 100644 --- a/dojo/jira_link/helper.py +++ b/dojo/jira_link/helper.py @@ -677,10 +677,26 @@ def add_issues_to_epic(jira, obj, epic_id, issue_keys, ignore_epics=True): try: return jira.add_issues_to_epic(epic_id=epic_id, issue_keys=issue_keys, ignore_epics=ignore_epics) except JIRAError as e: - logger.error("error adding issues %s to epic %s for %s", issue_keys, epic_id, obj.id) - logger.exception(e) - log_jira_alert(e.text, obj) - return False + """ + We must try to accommodate the following: + + The request contains a next-gen issue. This operation can't add next-gen issues to epics. + To add a next-gen issue to an epic, use the Edit issue operation and set the parent property + (i.e., '"parent":{"key":"PROJ-123"}' where "PROJ-123" has an issue type at level one of the issue type hierarchy). + See developer.atlassian.com for more details. + """ + try: + if "The request contains a next-gen issue." in str(e): + # Attempt to update the issue manually + for issue_key in issue_keys: + issue = jira.issue(issue_key) + epic = jira.issue(epic_id) + issue.update(parent={"key": epic.key}) + except JIRAError as e: + logger.error("error adding issues %s to epic %s for %s", issue_keys, epic_id, obj.id) + logger.exception(e) + log_jira_alert(e.text, obj) + return False # we need two separate celery tasks due to the decorators we're using to map to/from ids @@ -1284,13 +1300,12 @@ def update_epic(engagement, **kwargs): if not epic_name: epic_name = engagement.name - epic_priority = kwargs.get("epic_priority") - jira_issue_update_kwargs = { "summary": epic_name, "description": epic_name, - "priority": {"name": epic_priority}, } + if (epic_priority := kwargs.get("epic_priority")) is not None: + jira_issue_update_kwargs["priority"] = {"name": epic_priority} issue.update(**jira_issue_update_kwargs) return True except JIRAError as e: @@ -1319,6 +1334,7 @@ def add_epic(engagement, **kwargs): jira_instance = get_jira_instance(engagement) if jira_project.enable_engagement_epic_mapping: epic_name = kwargs.get("epic_name") + epic_issue_type_name = getattr(jira_project, "epic_issue_type_name", "Epic") if not epic_name: epic_name = engagement.name issue_dict = { @@ -1328,14 +1344,16 @@ def add_epic(engagement, **kwargs): "summary": epic_name, "description": epic_name, "issuetype": { - "name": getattr(jira_project, "epic_issue_type_name", "Epic"), + "name": epic_issue_type_name, }, - get_epic_name_field_name(jira_instance): epic_name, } if kwargs.get("epic_priority"): issue_dict["priority"] = {"name": kwargs.get("epic_priority")} try: jira = get_jira_connection(jira_instance) + # Determine if we should add the epic name or not + if (epic_name_field := get_epic_name_field_name(jira_instance)) in get_issuetype_fields(jira, jira_project.project_key, epic_issue_type_name): + issue_dict[epic_name_field] = epic_name logger.debug("add_epic: %s", issue_dict) new_issue = jira.create_issue(fields=issue_dict) j_issue = JIRA_Issue( diff --git a/unittests/vcr/jira/JIRAConfigEngagementEpicTest.test_add_engagement_with_jira_project_and_epic_mapping.yaml b/unittests/vcr/jira/JIRAConfigEngagementEpicTest.test_add_engagement_with_jira_project_and_epic_mapping.yaml index 38f217ff706..ed2cdc787fc 100644 --- a/unittests/vcr/jira/JIRAConfigEngagementEpicTest.test_add_engagement_with_jira_project_and_epic_mapping.yaml +++ b/unittests/vcr/jira/JIRAConfigEngagementEpicTest.test_add_engagement_with_jira_project_and_epic_mapping.yaml @@ -1,34 +1,112 @@ interactions: - request: - body: '{}' + body: '{"description": "Event engagement_added has occurred.", "title": "Engagement + created for "Python How-to": new engagement", "user": null, "url_ui": + "http://localhost:8080/engagement/7", "url_api": "http://localhost:8080/api/v2/engagements/7/", + "product_type": {"name": "books", "id": 1, "url_ui": "http://localhost:8080/product/type/1", + "url_api": "http://localhost:8080/api/v2/product_types/1/"}, "product": {"name": + "Python How-to", "id": 1, "url_ui": "http://localhost:8080/product/1", "url_api": + "http://localhost:8080/api/v2/products/1/"}, "engagement": {"name": "new engagement", + "id": 7, "url_ui": "http://localhost:8080/engagement/7", "url_api": "http://localhost:8080/api/v2/engagements/7/"}}' headers: Accept: - - application/json,*.*;q=0.9 + - application/json + Accept-Encoding: + - gzip, deflate + Auth: + - Token xxx + Connection: + - keep-alive + Content-Length: + - '710' + Content-Type: + - application/json + User-Agent: + - DefectDojo-2.42.3 + X-DefectDojo-Event: + - engagement_added + X-DefectDojo-Instance: + - http://localhost:8080 + method: POST + uri: http://webhook.endpoint:8080/post + response: + body: + string: "{\n \"args\": {},\n \"headers\": {\n \"Accept\": [\n \"application/json\"\n + \ ],\n \"Accept-Encoding\": [\n \"gzip, deflate\"\n ],\n \"Auth\": + [\n \"Token xxx\"\n ],\n \"Connection\": [\n \"keep-alive\"\n + \ ],\n \"Content-Length\": [\n \"710\"\n ],\n \"Content-Type\": + [\n \"application/json\"\n ],\n \"Host\": [\n \"webhook.endpoint:8080\"\n + \ ],\n \"User-Agent\": [\n \"DefectDojo-2.42.3\"\n ],\n \"X-Defectdojo-Event\": + [\n \"engagement_added\"\n ],\n \"X-Defectdojo-Instance\": [\n + \ \"http://localhost:8080\"\n ]\n },\n \"method\": \"POST\",\n \"origin\": + \"172.18.0.2:48456\",\n \"url\": \"http://webhook.endpoint:8080/post\",\n + \ \"data\": \"{\\\"description\\\": \\\"Event engagement_added has occurred.\\\", + \\\"title\\\": \\\"Engagement created for "Python How-to": new engagement\\\", + \\\"user\\\": null, \\\"url_ui\\\": \\\"http://localhost:8080/engagement/7\\\", + \\\"url_api\\\": \\\"http://localhost:8080/api/v2/engagements/7/\\\", \\\"product_type\\\": + {\\\"name\\\": \\\"books\\\", \\\"id\\\": 1, \\\"url_ui\\\": \\\"http://localhost:8080/product/type/1\\\", + \\\"url_api\\\": \\\"http://localhost:8080/api/v2/product_types/1/\\\"}, \\\"product\\\": + {\\\"name\\\": \\\"Python How-to\\\", \\\"id\\\": 1, \\\"url_ui\\\": \\\"http://localhost:8080/product/1\\\", + \\\"url_api\\\": \\\"http://localhost:8080/api/v2/products/1/\\\"}, \\\"engagement\\\": + {\\\"name\\\": \\\"new engagement\\\", \\\"id\\\": 7, \\\"url_ui\\\": \\\"http://localhost:8080/engagement/7\\\", + \\\"url_api\\\": \\\"http://localhost:8080/api/v2/engagements/7/\\\"}}\",\n + \ \"files\": {},\n \"form\": {},\n \"json\": {\n \"description\": \"Event + engagement_added has occurred.\",\n \"engagement\": {\n \"id\": 7,\n + \ \"name\": \"new engagement\",\n \"url_api\": \"http://localhost:8080/api/v2/engagements/7/\",\n + \ \"url_ui\": \"http://localhost:8080/engagement/7\"\n },\n \"product\": + {\n \"id\": 1,\n \"name\": \"Python How-to\",\n \"url_api\": + \"http://localhost:8080/api/v2/products/1/\",\n \"url_ui\": \"http://localhost:8080/product/1\"\n + \ },\n \"product_type\": {\n \"id\": 1,\n \"name\": \"books\",\n + \ \"url_api\": \"http://localhost:8080/api/v2/product_types/1/\",\n \"url_ui\": + \"http://localhost:8080/product/type/1\"\n },\n \"title\": \"Engagement + created for "Python How-to": new engagement\",\n \"url_api\": + \"http://localhost:8080/api/v2/engagements/7/\",\n \"url_ui\": \"http://localhost:8080/engagement/7\",\n + \ \"user\": null\n }\n}\n" + headers: + Access-Control-Allow-Credentials: + - 'true' + Access-Control-Allow-Origin: + - '*' + Content-Type: + - application/json; charset=utf-8 + Date: + - Wed, 29 Jan 2025 20:45:18 GMT + Transfer-Encoding: + - chunked + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json,*/*;q=0.9 Accept-Encoding: - gzip, deflate Cache-Control: - no-cache Connection: - keep-alive - Content-Length: - - '2' Content-Type: - application/json User-Agent: - - python-requests/2.28.1 + - python-requests/2.32.3 method: GET uri: https://defectdojo.atlassian.net/rest/api/2/serverInfo response: body: string: !!binary | - H4sIAAAAAAAAA1SPTUvEMBCG/0uuttlJ0ibd3EQPKrIK2z2JSNokWEmT0qTCsux/N8XFj9vwzvPM - y5xQp6I5zA5J9J7SFOVmo401fdLhI2CVnIpxUB57k1CBPs0ch+AzTAAIBgzlfnf9vL97an+3u2Xs - 8oTkywoVUMBrgbSZXDiOxqf2OJl84MaFRWepWwanvxUks0ChvoS3Kq0gBUpLaEoiWmgkCElrDABX - mYTsRzPn3nYY/7HbFkAyLhnBgm9/2H689zZkkFdEWGatZaKhlANrAKpaUU56BZr3QlQGGP9bkNza - 8DDMCq3vWLW49Bh6tcYn5C4TMv7tsEfn8xcAAAD//wMAlf3vZFoBAAA= + H4sIAAAAAAAAA5yQUUvDMBDHv0te3bqkS2mXN5ngFJ1Cu5eJSJpcMJompUkHY+y7m+DQ+aa+HXe/ + //2OO6CWe9gMBjH0GkLv2WwmQYEI0r25jAfDvdfcZhYCmiCpfW/4/h98DcNOC5Dg31dg+iXYAMNf + lyydVWYEK+B3yR0MXjsbYYIxyXCGp/X68rFePTTf0/XYtbFC7ClBEzzBz9EJvXH7Ll7Z7PtkWxo3 + yhhqR23kZwSxGMjL8tS84iGBOc6LKSbTvGoIZZSweZVhjC9whGPexz/A0OjuB7tocsJowUiVUUK/ + WNHdWOUiqIqCAJRz0koQVAgOwElbSUUWJVeERoFqC8jPBMEkw60eeHohKD6acOcET+0DMqcKgX3Z + 1Oh4ftjW2TS5vm/Q8QMAAP//AwDv8BdOIAIAAA== headers: + Atl-Request-Id: + - 0142053b-a7dc-4dfd-b59f-77a77e6ddd88 Atl-Traceid: - - b0cc01e650d9c110 + - 0142053ba7dc4dfdb59f77a77e6ddd88 Cache-Control: - no-cache, no-store, no-transform Content-Encoding: @@ -36,20 +114,19 @@ interactions: Content-Type: - application/json;charset=UTF-8 Date: - - Thu, 18 Aug 2022 22:36:31 GMT - Expect-Ct: - - report-uri="https://web-security-reports.services.atlassian.com/expect-ct-report/atlassian-proxy", - max-age=86400 + - Wed, 29 Jan 2025 20:45:18 GMT Nel: - - '{"report_to": "endpoint-1", "max_age": 600, "include_subdomains": true, "failure_fraction": - 0.001}' + - '{"failure_fraction": 0.001, "include_subdomains": true, "max_age": 600, "report_to": + "endpoint-1"}' Report-To: - - '{"group": "endpoint-1", "max_age": 600, "endpoints": [{"url": "https://dz8aopenkvv6s.cloudfront.net"}], - "include_subdomains": true}' + - '{"endpoints": [{"url": "https://dz8aopenkvv6s.cloudfront.net"}], "group": + "endpoint-1", "include_subdomains": true, "max_age": 600}' Server: - - globaledge-envoy + - AtlassianEdge + Server-Timing: + - atl-edge;dur=170,atl-edge-internal;dur=16,atl-edge-upstream;dur=155,atl-edge-pop;desc="aws-us-east-1" Strict-Transport-Security: - - max-age=63072000; preload + - max-age=63072000; includeSubDomains; preload Timing-Allow-Origin: - '*' Transfer-Encoding: @@ -57,64 +134,62 @@ interactions: Vary: - Accept-Encoding X-Aaccountid: - - 5fa43d1b8405b10077912260 + - 5d3878b170e3c90c952f91f6 X-Arequestid: - - bbf8bde8-75c1-4c79-b8df-4c3ed40008f4 + - fdc0e3fa7a97f7656746ccc3405af67e X-Content-Type-Options: - nosniff - X-Envoy-Upstream-Service-Time: - - '204' X-Xss-Protection: - 1; mode=block status: code: 200 message: OK - request: - body: '{}' + body: null headers: Accept: - - application/json,*.*;q=0.9 + - application/json,*/*;q=0.9 Accept-Encoding: - gzip, deflate Cache-Control: - no-cache Connection: - keep-alive - Content-Length: - - '2' Content-Type: - application/json User-Agent: - - python-requests/2.28.1 + - python-requests/2.32.3 method: GET uri: https://defectdojo.atlassian.net/rest/api/2/issue/createmeta?projectKeys=NTEST&issuetypeNames=Task&expand=projects.issuetypes.fields response: body: string: !!binary | - H4sIAAAAAAAAA+xWS08bMRD+K9EeOEVsEiKKIkUIlR5QEapE6KVCldmdJSZee/EjjyL+e2f2ZQcq - uqEtvbCn9Xge34xnPvshgnXBZBpNokKrO0isifr+d/LtwStwYxzYTQGkYkBkKJtbW5hJHKeQoUGq - 7tQ+s4IZw5ncl2BjDcbGrODxKK69xsMBfuiCk9NmsYANri5mny5nuJIsB1xeSW4tOqCAbMks01da - IKqHaHy0Hh/tGN9JvgRtmPhe+YqXHFYxJdRCqzeGg/HgA8Ycjdej8b+Ncmz4D5ianAmBAYeH6+Hh - WwRcNxEPRuuD0VtEzCHlLo8e+2EfUX+9opNaD2UvjcJeokUKJtG8sFxJlJ70ylz7vZQby2ViewWH - BHoq662UXuyTdaIkdtaOKF4ogwfYFuJgeLRViLbJZ8wscOWk1UwawSykF1s7xt1Y+ptkTBjo+4HM - OIiUZqP+wbkwLs+Z3tCvhnvHNaCi1Q7NTDKHnNEOAUNzYzWXtxRgYyzkJKmtH1twl7WkGdBGox/N - mTmFjDlhvzLhoEWnCtCMSk+ni4dro2t0VzAN0m7Dqg2e4SqLJ7gsc2+g1Q48si+VoAFW7++Kqz2o - ThXz2gEyL/TgzkjWm1WaFcDQthPGa+Q8IdQK0lLpfVheGInK8AzzKm2xwhwrqZP55hyWgFgHj3Ta - icoLJbFPyln5fScyrRn1OseTRhNvH55/4NQ3wEcvbBog0OvYARFLU4oEFE9DrpYQ/aIrKDNmLUvm - eecZe5pZYB+kFkh9aiehapXalnW31FpiqC6MTuPX6Ia0UIsCXmiVamJo1zsAe17kV43en714amti - kc8V+arMrpDnqAA8LwRH3vdn/P5A+psB/98DqWSqLVbuMtDPL/PQhR+P00Da9GCoudOYNBcoXdav - pNTA/umdWkk99HNcQ9or71ZPq1sOmLOKqFeAhd3vqNJVXPBkAfo4cZpeFDWdnKXTPTNXq0t3Q1dQ - 9fiYEk+FYlNJatMSKE7uVDoh9u4d6M20c32J+ktiR6S3EqBbdZ0BHdaxtQ7ouxG15I2CnwAAAP// - vJhNT8MwDIb/ytQzayjrLkhoJxCHceU2oQ6FNtO2Vk0H5dD/jh0n/UqHWlJxdRPniev4daINbuHD - xRl5i/ZHziRHBd7ojH9QlW5qGEyafYjyFU4UmcdEop9neB4pv+vgtH028XkS5UIPbpKsPXQK+o2W - 8N/FO8tFmoui17Vf21k9uiOD2tbWwXqYEcLaYG2ABNfmHwD+oxDS2izAXzK+gRSnKOaS4QxpnAgw - JCJOwLsvP7Hk6R0/kxFXUDoLwXCDVb3yPLA2qcG8c8ZczYNJ+tMFfTHtt0JdOaOG86BCRnY5t+mX - gQydIdezQVopulU2g7r2qh1KffskuqH/ZypAqXm/yCI9qdeHN2hrg3BcBYvOWIRosqcuRn72XQrp - xznn5yTNoA75B5FH93Gy5KDHS/0aQFPomgerNcXuEQYtsD/w6tuWhTZQ9q7WbazKx2jPqZcesaOe - 2tjdmPbW6mbIYHj1dzcNDvxbRo6YvMTwa4vNRL0dunFW2JLuqh8AAAD//wMA4ncFkysWAAA= + H4sIAAAAAAAAA+xW32/TMBD+V6I8V03bVWOqhBBiICbQhLRuL9OETHJZb3OcYDttyrT/nXPixO5a + RjpgvCxP9vl+fOf77uK7EKqCiSSchYXMbyDWKhy45ezyzimgUiXodQFGRQFPSbbQulCzKEogJYMk + v8mHTHOmFDIxFKAjCUpHrMBoElmv0XhEH7lA47Td3MKadqfz92dz2gmWAW3PBWpNDkxAtmSayXPJ + CdVdOD2qpkd7xi8FLkEqxr82vqIlwioyCXXQ7MF4NB29opiTaTWZ/tsobxT+gNcqY5xTwPFhNT58 + joBVG/FgUh1MniNiBgmWWXg/8Hlk+PUEJnUeai5NfC6ZTQIqllhozAVJ3wZ1roMgQaVRxDooEGII + 8jRY5fJ2aKzjXBCz9kTxyDU4gN1FHIyPNi6iI/mcqVvalUJLJhRnGpLTjRNVftNmNUsZVzAIFwiS + yXix/gxLINCjgevRFIEnpl3sglpFlVnG5NosJXwvUQIpalmSJxUvIGPmxGAlc6UlimsTc600ZEZi + re87vGdW0vZsq0HAmDqGlJVcXzBeQgc4LwiwqYYpONVbh1c+DXohc9oeOCd08E6MLJg3mg1C37YX + xisaN5znK0hqpReePsLGxvCE8qptd9Dz3lS7YBKE3iy19bC71hxFHa6ttXXgCv2lEbRFtuc9K9yx + MM6zIhdkWffK77ExKZnhOhIqMnH2PlbPqcP7zglbzJ5eX9wsSUwkMPEkZPkSwh1sNZltsKtPatvN + 77twiRx70jYTX7NvKm0J7D+j1xhodX1iWJHHjE7JUqPb7wFs+1KfNAL+7NFjrc00+9QM2zzVK2K6 + uQDMCo40511JX95IfzPg/3sj1RMzxeqCPDSk7NO/D0eTid/0Y9crvk/XLx+wCqyym02+6j6NM7Aj + 6vHhVEjMJeoHr5JfZdZpb7S9lfl936m1jd8JthJoBsw2/h2An9j4TexobErS/8eNGbsGFRkL1TpB + EizwekHeh2ppRrTN+GMj/AkAAP//vJfBboMwDIZfpeqht8K60sskVO2wwyR22rmagFrARoERoq6H + vvviOIFQOo01aMe6cfLFNs5vPEH2FREMO1ipUaaBHZJqzHtrzPU0mPS99UFftOyRqGtrVG8aVFGR + fc6gPGpIzxpyMxnkoEQDadOom/l5h9LE/BLt0P+zFFAvctaUBzldvYlnfOWN62BhgU2InOdS+DnV + 6StjTlIDFGlZiT7kvGd1+JCkS6iyeKn0L7mQvBandc3uSSyaBbRIqckB2pW292Pfxq6chxGQdhhx + o4vXZqge1W4dckAGzav+F/2WNyWq4xwaGJnGthJWzp1LG7mMJyK1zfaTQ33yR1/+mqIWzGHThHF6 + GD20XMbD8DdiYli7uDyaSyk2Pe9x96AbxGV16sZrLKIb82n4G/yG1cir+A37mZy8u/T2NrBJsZqj + XVHwH1BvY17jpKdE/vPeX7C0PL7yCAdUGgp9fNxNMyOLcpWgQk/7Bc/zxU3l0oYZgZMCZBv7Pcic + Qd0rB+1tFIM2taXQGeyiiIe7tFsY5eAywDF9q+SoL8eQv0aDOgcK1t35GwAA//8DAFZhNg1MFgAA headers: + Atl-Request-Id: + - 1f2a65fd-a609-4604-a5b0-57706c020c82 Atl-Traceid: - - 48d321396cda1bba + - 1f2a65fda6094604a5b057706c020c82 Cache-Control: - no-cache, no-store, no-transform Content-Encoding: @@ -122,69 +197,154 @@ interactions: Content-Type: - application/json;charset=UTF-8 Date: - - Thu, 18 Aug 2022 22:36:32 GMT - Expect-Ct: - - report-uri="https://web-security-reports.services.atlassian.com/expect-ct-report/atlassian-proxy", - max-age=86400 + - Wed, 29 Jan 2025 20:45:18 GMT Nel: - - '{"report_to": "endpoint-1", "max_age": 600, "include_subdomains": true, "failure_fraction": - 0.001}' + - '{"failure_fraction": 0.001, "include_subdomains": true, "max_age": 600, "report_to": + "endpoint-1"}' Report-To: - - '{"group": "endpoint-1", "max_age": 600, "endpoints": [{"url": "https://dz8aopenkvv6s.cloudfront.net"}], - "include_subdomains": true}' + - '{"endpoints": [{"url": "https://dz8aopenkvv6s.cloudfront.net"}], "group": + "endpoint-1", "include_subdomains": true, "max_age": 600}' Server: - - globaledge-envoy + - AtlassianEdge + Server-Timing: + - atl-edge;dur=288,atl-edge-internal;dur=15,atl-edge-upstream;dur=270,atl-edge-pop;desc="aws-us-east-1" Strict-Transport-Security: - - max-age=63072000; preload + - max-age=63072000; includeSubDomains; preload Timing-Allow-Origin: - '*' Transfer-Encoding: - chunked Vary: - Accept-Encoding + Warning: + - 'The issue create meta endpoint has been deprecated. (Deprecation start date: + June 03, 2024)' X-Aaccountid: - - 5fa43d1b8405b10077912260 + - 5d3878b170e3c90c952f91f6 X-Arequestid: - - 846df59e-5d73-438d-9345-a85e12ec5b55 + - a2c7004340d79a5385a3da9a87056ab4 X-Content-Type-Options: - nosniff - X-Envoy-Upstream-Service-Time: - - '247' X-Xss-Protection: - 1; mode=block status: code: 200 message: OK - request: - body: '{}' + body: null headers: Accept: - - application/json,*.*;q=0.9 + - application/json,*/*;q=0.9 Accept-Encoding: - gzip, deflate Cache-Control: - no-cache Connection: - keep-alive - Content-Length: - - '2' Content-Type: - application/json User-Agent: - - python-requests/2.28.1 + - python-requests/2.32.3 method: GET uri: https://defectdojo.atlassian.net/rest/api/2/serverInfo response: body: string: !!binary | - H4sIAAAAAAAAA1SPTUvEMBCG/8tcbbPTpE1qbqIHFVmF7Z5kkbRJsJI2pUmFZdn/boqLH7fhneeZ - lzlBq4LZzw4kvMc4BbnZaGNNF7X/8ERFp0Lo1UhGEyGDTzOH3o8JLhALggTz3fbmZXf/3Pxut8vQ - pgnk6wplmOEhA20m54+DGWNznEw6cOv8opPULr3T3wrIJFCsLuGdiitIkdIc67wQDdYShaQVQcSr - RGLyg5lTb9MP/9jrBlEyLhklnFc/bDc8jNYnkJeFsMxay0RNKUdWI5aVorzoFGreCVEaZPxvQXRr - w2M/K1jfsWpx8cl3ao1P4C4TmPFtv4Pz+QsAAP//AwAW6AtAWgEAAA== + H4sIAAAAAAAAA5yQUUvDMBDHv0te3bpc19ItbzLBKTqFdi+KSJpcMJompUkHY+y7m6Cw+aa+HXe/ + //2OO5CWe9wOhjDyFkLv2WwmUaEI0r27jAfDvdfcZhYDmRCpfW/4/h98jcNOC5ToP9Zo+hXagMNf + l6ycVWZEK/B3yR0OXjsbYaAUMprRab25fKzXD81puhm7NlaEPSdoQif0JTqxN27fxSubfZ9sK+NG + GUPtqI38ihAWA3lVfTeveEhgTvNySmGaLxooWAFsvsgopRc0wjHv4x9waHT3g102ObCiZLDMoDqx + oruxykVQlSUgVnNoJYpCCI7IoV1IBcuKKyiiQLUl5meCYJLhVg88vRAVH024c4Kn9oGY74qgfd3W + 5Hh+2JOzaXJ935DjJwAAAP//AwCGwdS4IAIAAA== + headers: + Atl-Request-Id: + - a128a453-82a5-46fe-a051-0d622a2f0da1 + Atl-Traceid: + - a128a45382a546fea0510d622a2f0da1 + Cache-Control: + - no-cache, no-store, no-transform + Content-Encoding: + - gzip + Content-Type: + - application/json;charset=UTF-8 + Date: + - Wed, 29 Jan 2025 20:45:19 GMT + Nel: + - '{"failure_fraction": 0.001, "include_subdomains": true, "max_age": 600, "report_to": + "endpoint-1"}' + Report-To: + - '{"endpoints": [{"url": "https://dz8aopenkvv6s.cloudfront.net"}], "group": + "endpoint-1", "include_subdomains": true, "max_age": 600}' + Server: + - AtlassianEdge + Server-Timing: + - atl-edge;dur=162,atl-edge-internal;dur=14,atl-edge-upstream;dur=149,atl-edge-pop;desc="aws-us-east-1" + Strict-Transport-Security: + - max-age=63072000; includeSubDomains; preload + Timing-Allow-Origin: + - '*' + Transfer-Encoding: + - chunked + Vary: + - Accept-Encoding + X-Aaccountid: + - 5d3878b170e3c90c952f91f6 + X-Arequestid: + - 4c5701eb45fee93086d2d076d9db96b1 + X-Content-Type-Options: + - nosniff + X-Xss-Protection: + - 1; mode=block + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json,*/*;q=0.9 + Accept-Encoding: + - gzip, deflate + Cache-Control: + - no-cache + Connection: + - keep-alive + Content-Type: + - application/json + User-Agent: + - python-requests/2.32.3 + method: GET + uri: https://defectdojo.atlassian.net/rest/api/2/issue/createmeta?projectKeys=NTEST&issuetypeNames=Epic&expand=projects.issuetypes.fields + response: + body: + string: !!binary | + H4sIAAAAAAAAA+xWW0/bMBT+K1aeS0JLxVClaZqASWwTmsTlZULIbU5bg2NnttMmQ/3vO6e5OKVs + S9nGXshTfHIu37G/88UPAeQpV3EwClKj72DibNDzr6OvD95BWJuBK1IgFwtyira5c6kdRVEMUwyI + 9Z0OuZPcWsFVqMBFBqyLeCqiQVRljfr7+GAKQUnrxT0UuDq/PL24xJXiCeDySgnnMAEV5AvuuLky + ElE9BMOjfHi0Y/1MiQUYy+VtmStaCFhG1FADrfrQ3x/uv8Gag2E+GP7bKu+s+A5vbcKlxIL9w7x/ + +BIF87riwSA/GLxExQRikSXBqtfmEfHrGUxqMjzNpRjsxIjUCa3Q+p6NxYxlFgyzTpuCuTl3TAHE + ljnNxsDGRt+DYrFeqpAdG+AOYjYu2EdhOLvQU7fkBtgeOjClHcNGHNOGxSDBQUjVJ1ohM7t0IRI+ + AxtRhPV92AhSMQntYubJf4oWXGXKGa6sJFDnG19sNnbc3gejKZcWesFcgOFmMi8+wwIQTL/nZ3cq + QMY0RtULjpDNkoSbgl4NfMuEAXR0JsNMdjKHhNMXAofh1hmhCJstrIOELFX0qsF7UVnqWa49EBi3 + JzDlmXTXXGbQANYpAqZTIiIgD1xw06ZHJ2TeuwXOGz28M7Kxy9KzRNiO7YTxBmVISr2EeO30yt8O + /P0tS1d06BOdpFqBon9O+9SroK1j58ZwopbAA8cQH9+mQSup58GxN9Y8aPl1JELA45gqAdUzkOgF + BE+QgzrbOMwurW3PWjuFb+SkZa07aXt2baWeu0q6O01d7dsCWZs8wC+NUwnOB+0AbHtTnzVxf3b3 + qKJJPD6V2lYNFW2ASFIpUFb9kb5eVf5mwf93VSmVKUPVTda/zVskTL+/4xSX8cFaZcK0yIUNZwZA + zXWKbA/vUKNHs/ke6eee5GOgTsuYM0y/ruhHijSVrW8BjXZtwdtpvFZ0I8ivcYtKc5fWHmsvbXAp + OI0YtHN69B9EzipnL75t112g9yoN/rX6pkZoI9yjW87POmu8N3StsrWFrXGrla0xbDVQKug2/icA + P1PZhP4BAAD//7xYwW6DMAz9lx16XLaWHhHabZOYVKm79kAZAlQGGxBVPfTfZ8cJOINJlKAea7Dz + Yr/afuDZAis/d5DrIDkYsjzNILo9y1/JiCeoxgnJcAO7Xg7sEKmBuXaGuVkGJjUUG+g7NRkNdeMM + 1VsGKjDSxhlWZwPScwa5XQzkgKKhshmo24frAXcv/k90g35PKoyMHW9aB4tKbEI3zZy8PHUuZuR4 + f0ZOSC+Njxxk3qDt/du3sSurOTdz2gzXYx2thxySweDVz6HfyrbC9R9lz8Qydkx4fnwSFEg0MoXS + tsGPTOqLP/nyY5IBMEdtG8XZF0qXWflg/iwnzNrn5YW/SrmxvKfdg24QV9+XXq4jiWbWk/kz/MzK + 6gq/QdkqJd+X1wrgUmKtywUQ/pTUQSzrGvKiVczbp79qsuq8l8cPkLO7CJ/5ONy5uSGLdlVAQTD4 + pSyK1Sy6dGlGwCmI/mlJxq8FFh2MNyODMXVU6A1uWcTDBUWLjkUimgT1fqD3bV/prFuzQZ0DN/LD + 9RcAAP//AwBwEqjntBYAAA== headers: + Atl-Request-Id: + - 73c8b0f9-1d75-4368-acd7-aa0789b267cf Atl-Traceid: - - 1403d316903ec3d1 + - 73c8b0f91d754368acd7aa0789b267cf Cache-Control: - no-cache, no-store, no-transform Content-Encoding: @@ -192,34 +352,34 @@ interactions: Content-Type: - application/json;charset=UTF-8 Date: - - Thu, 18 Aug 2022 22:36:32 GMT - Expect-Ct: - - report-uri="https://web-security-reports.services.atlassian.com/expect-ct-report/atlassian-proxy", - max-age=86400 + - Wed, 29 Jan 2025 20:45:19 GMT Nel: - - '{"report_to": "endpoint-1", "max_age": 600, "include_subdomains": true, "failure_fraction": - 0.001}' + - '{"failure_fraction": 0.001, "include_subdomains": true, "max_age": 600, "report_to": + "endpoint-1"}' Report-To: - - '{"group": "endpoint-1", "max_age": 600, "endpoints": [{"url": "https://dz8aopenkvv6s.cloudfront.net"}], - "include_subdomains": true}' + - '{"endpoints": [{"url": "https://dz8aopenkvv6s.cloudfront.net"}], "group": + "endpoint-1", "include_subdomains": true, "max_age": 600}' Server: - - globaledge-envoy + - AtlassianEdge + Server-Timing: + - atl-edge;dur=297,atl-edge-internal;dur=15,atl-edge-upstream;dur=282,atl-edge-pop;desc="aws-us-east-1" Strict-Transport-Security: - - max-age=63072000; preload + - max-age=63072000; includeSubDomains; preload Timing-Allow-Origin: - '*' Transfer-Encoding: - chunked Vary: - Accept-Encoding + Warning: + - 'The issue create meta endpoint has been deprecated. (Deprecation start date: + June 03, 2024)' X-Aaccountid: - - 5fa43d1b8405b10077912260 + - 5d3878b170e3c90c952f91f6 X-Arequestid: - - 651b707d-2b09-4d87-9c46-1b43505ab6e0 + - ad710f2110169e1ee2d8b1c8c01707f3 X-Content-Type-Options: - nosniff - X-Envoy-Upstream-Service-Time: - - '123' X-Xss-Protection: - 1; mode=block status: @@ -230,7 +390,7 @@ interactions: "new engagement", "issuetype": {"name": "Epic"}, "customfield_10011": "new engagement"}}' headers: Accept: - - application/json,*.*;q=0.9 + - application/json,*/*;q=0.9 Accept-Encoding: - gzip, deflate Cache-Control: @@ -242,34 +402,35 @@ interactions: Content-Type: - application/json User-Agent: - - python-requests/2.28.1 + - python-requests/2.32.3 method: POST uri: https://defectdojo.atlassian.net/rest/api/2/issue response: body: - string: '{"id":"11823","key":"NTEST-1485","self":"https://defectdojo.atlassian.net/rest/api/2/issue/11823"}' + string: '{"id":"16611","key":"NTEST-1828","self":"https://defectdojo.atlassian.net/rest/api/2/issue/16611"}' headers: + Atl-Request-Id: + - e5d301a0-db46-476e-a680-b2b0eb8a1db6 Atl-Traceid: - - 20f9e5df98a5a95a + - e5d301a0db46476ea680b2b0eb8a1db6 Cache-Control: - no-cache, no-store, no-transform Content-Type: - application/json;charset=UTF-8 Date: - - Thu, 18 Aug 2022 22:36:33 GMT - Expect-Ct: - - report-uri="https://web-security-reports.services.atlassian.com/expect-ct-report/atlassian-proxy", - max-age=86400 + - Wed, 29 Jan 2025 20:45:20 GMT Nel: - - '{"report_to": "endpoint-1", "max_age": 600, "include_subdomains": true, "failure_fraction": - 0.001}' + - '{"failure_fraction": 0.001, "include_subdomains": true, "max_age": 600, "report_to": + "endpoint-1"}' Report-To: - - '{"group": "endpoint-1", "max_age": 600, "endpoints": [{"url": "https://dz8aopenkvv6s.cloudfront.net"}], - "include_subdomains": true}' + - '{"endpoints": [{"url": "https://dz8aopenkvv6s.cloudfront.net"}], "group": + "endpoint-1", "include_subdomains": true, "max_age": 600}' Server: - - globaledge-envoy + - AtlassianEdge + Server-Timing: + - atl-edge;dur=750,atl-edge-internal;dur=13,atl-edge-upstream;dur=737,atl-edge-pop;desc="aws-us-east-1" Strict-Transport-Security: - - max-age=63072000; preload + - max-age=63072000; includeSubDomains; preload Timing-Allow-Origin: - '*' Transfer-Encoding: @@ -277,71 +438,70 @@ interactions: Vary: - Accept-Encoding X-Aaccountid: - - 5fa43d1b8405b10077912260 + - 5d3878b170e3c90c952f91f6 X-Arequestid: - - dfe58a94-2020-4ddf-b61e-62b2adce0472 + - 4476e86ab623cefcadc29c6eea3b1844 X-Content-Type-Options: - nosniff - X-Envoy-Upstream-Service-Time: - - '646' X-Xss-Protection: - 1; mode=block status: code: 201 message: Created - request: - body: '{}' + body: null headers: Accept: - - application/json,*.*;q=0.9 + - application/json,*/*;q=0.9 Accept-Encoding: - gzip, deflate Cache-Control: - no-cache Connection: - keep-alive - Content-Length: - - '2' Content-Type: - application/json User-Agent: - - python-requests/2.28.1 + - python-requests/2.32.3 method: GET - uri: https://defectdojo.atlassian.net/rest/api/2/issue/NTEST-1485 + uri: https://defectdojo.atlassian.net/rest/api/2/issue/NTEST-1828 response: body: string: !!binary | - H4sIAAAAAAAAA7xX224bNxD9FYKvlbQ3WZYFBEFqK4VbxzFsOX0oCoPaHa0YccktybWkuv73Dvem - WJfAVtG8aTnkXM+cGT1RWOVMJnRENcgENCQfOYjEdCTLwHRMPIeMdVQOmlmupOlAwm0GlnXiOZMp - CJV2HkEblEFyC7kGA9LWd+PCWJXNnMKHwPcDv6fhrwKMnaxzuNEstjwG2qHc2Q+CYRjhhwExw8+5 - tbkZeV4CM4htor6qHrOCGcOZ7EmwHlqyHsu5F3rcmAK8RsEC1vj+ejK+m3SD/vAEj0oXDB09UYO+ - FSZmFlKl11UMCX7hi9APw64/7AZnE98fRYNRFPWiQfSTH/q+c9IZseh4qeZIJ917D1NRaSzDrj8S - MLHmuUscnn4gU56SwoAmmEK9JnbOLJEAiSFWkSmQqVYLkCRRS9kj5xowhoRM1+RXrhm5UzO7ZBpI - Fy8QqSxxZSNKkwQEWOg567GS91q8JgqesRSM516YTRzGg5zHPfOYojYHF1Q1xhNXw2JqmVnQ0YwJ - Ax0654gfHc/XV/AIaDJ47lDLEWA5goWOZCFEh26hJfIPCYJGkGv1FT0+siD16/3l+AZEm+juJbcW - FRja2nZI/q28a+qcu/B5lguODidtBtgjs0xjvksU9oer/vCN7haSu0Zj4qHS5T1yWHoloJpIakHg - 9/1TdCPsr8L+/2vlveF/wzuTMSHQYDBYBYMfYXDVWIzCVRT+CIsZNlCR0efnXTiGDRxnfPWlokIs - 8h9/YtHTVEOKnbmDdXRHiaJq9r0gD08PCYaHBGe7yitmq06XSi9KEqejblDTmcuK5nHlx9POmcMq - BmzmqhDJBTe5YOsa0Xi8ZBanQ8Wqb+++irM3LO1V6rTrrfLnuSpcvkpXf3cHXKZ0ZHXhbKNS+wXL - 5jqszkZFgXt5POydnfoNj2+n7RDNhC3NbAvagm8Log0xcaW5XR+Zm+a558bZsTxdK+F4UIH3JVN/ - qgBd015Ey6ROwfGTg+52aIMDMbt+3y9AANO80LlwjLgtHLrMzJlx8+KKy8VHJ7mA3O0gMm5AVkJv - WcraE6nkGGcZmwq4BWYq4Or6F725uv/l8vrh6vJ8fH03fhjf3n6+RfPYfQZTgxcmcyA3yNLSEmeX - cEOUFGuCHc+FU+rGazlDbzRkmKFyCJvevs4PsOOo/w/3/aGZjehWv2PyUy6ZwLJiXTZ96GTbZ/UG - Vee+bA2B3jU0ghVOcfY3t4vc9fUroF4tO0eCsHrcTseX+8nbcLlB3c8sXuDK2MCuUV7ZOq+3sv/k - cLPaeWFtJGyGuYSlQ6ISSl9X3kxFAd1UI6m1Dk4UuVBVsVWW41IrbV2F79X0ZXLQEAGZIuAyR/m7 - uDlEOUHwmtfhkfmp9JTd9Ll01dUW+xfhxzARbfCb2qBwD+qR5Wg6X3VLuui6RWP7Rv9brFvc8xcl - d7tZsn315EAq/IOClnAMxEVFsvsvHpqg/qEJ6rcTlFnL4nmZ/91Bvl18U2QZc6jdUzo3lJQ+smCO - ed6zOHaD8DJ5dzJj/SgJpsO+fzJFd09Pz4IwHLj+aS+hhe9cw79yXHxIErSBoKaVD/8CAAD//+xY - TU+DQBT8NT3uuoBgPRhtUj0bDx56W2GtmJYlC7QmDf/d2Q/WFoMxxigHEg7Lx76dPPLmzTyNgTjh - cyP3vCqpVLpCv5asJvuConrMZxTl0smmOAiDcxEwEYXZZZJGaXwRpPM4yxhPngMxv86uTJRZtJiF - d7jsPrLlhUsdIfZRRZuK7JERElJAzGjZPG3gOZAyUnJe6Yxhfw5dnqM9YLm8JwktC42/L3/Hj7iv - n8ePuK+/x44YDJRZHetawNLUAGjvVaLjWzup66mGR7Bq07LYCq0An982SsInrEA86ctH4WkXiLe+ - kvU5zgO77qFEKVUtJir4h58+UcFfIJ6oYJAKvHwAxLWtuIMeILk1Q1xZc0hp9lkhsSEbyoZsKPM2 - tP/C6zJR7HIlCytwnIRt3BzU3n4LqdzaCIdu6cjuBwR3NMI96+LCt/G3B1E1Gx346GxjGFS9qC2O - nax/bwxhg/mgOAs29VEav+UnH1IZF6OP9EBO0YYncN0Gk562bd8BAAD//wMAWs/2JHYXAAA= + H4sIAAAAAAAAA7xX23LbNhD9FQxfK4kXXaxoptNpHaWT1nE8tpw+dDoeiFxRiECABUBd4vrfuyBI + ypbEtlameROxwF7Pnl09erDNqUi8iadAJKAgeceAJ7ojaAa6o+MlZLQjc1DUMCl0BxJmMjC0Ey+p + SIHLtLMGpVEGyS3kCjQIU92NC21ktrAKH8IgCIOegj8L0Ga2y+FG0diwGLyOx6z9cDQKQ/zQwBf4 + uTQm1xPfT2ABsUnkZ9mjhlOtGRU9AcZHS8anOfMjn2ldgF8rWMEO31/PpnezbjiOxnhUuqC9yaOn + 0bdCx9RAKtXOxZDgF76IgmjYDcJu9GYWhZPBcBIFvf5w9B36HVgnrRGDjpdqznTSvvdRn9NYhl19 + JKBjxXKbODz9kcxZSgoNimAK1Y6YJTVEACSaGEnmQOZKrkCQRG5Ej1wqwBgSMt+RX5ii5E4uzIYq + IF28QIQ0xJaNSEUS4GCgZ63HUtwr/l+iYBlNQfv2hd7HoX3IWdzT6xS1Wbigqime2BoWc0P1ypss + KNfQ8ZYM8aPi5e4K1oAmw6eOZxgCLEeweBNRcN7xDtDSD2pBruRndOzMvFevT2f9GVb2QdwLZgwq + 0F5j2wL21/KurlJro2RZzhk6nDSB0jU1VGFaS7ANxtvB+JXuFoLZfqL8weny1ww2fombOpJKEAaD + 4ALdiAbbaPD/WvlBsy/wvc4o52gwHG3D0bcwuK0t9qNtP/oWFjPskyLznp6O4Ri24TSqBQu2/eSo + EKv/+x/HN/v1TZqmClJs2aMmwAAkLxwLnDY3bBOM2gQXLYKoVTBuE7w59tOxpzvdSLUqB4U36Yb4 + SQ3OD8e7r29cx+p7HvedOmXbsvx5KQubuNBy82/2gInUmxhVAJYPlZpPWHHbnFUsjiRbmD4IxjXT + HwbdENGhoA0SUQOJXDGpmNmdmYL6ud//CsKulDA8cPB+SdkfHOQrYux7x9CPGtxyOgdLbSfAHbXB + z1LFaQHCz0uoWj3sgHO5Oc58OLZ5W1Jtx8oVE6t3VvIWcruqiHjX0K5eyk0pa06EFFMceXTO4Rao + tu30iKB1v7ybq/uf318/XL2/nF7fTR+mt7cfb9E89qLGxOGF2RLIDbK8MMTaJUwTKfiOIGMwbpXa + KVyO2hsFGeavnNW6d4o5QmwaL/iLBYFafJl4bvJhjbFI+855wQhYrpQJyg8vVYtWVYCyPzh6V33b + +qe4ItS3i9y2Zhvew/Cixrvbic6EqHvcTNeXa8zrULvH5E80XuFmWYOyVu5sXVbL21c5XG+AflQZ + ieplQECJRMmlunbezHkB3VTR3d7BmSRvpSu2zHLcfYU53RbDNvIYNuTxTxV/mU50jYBIEaKZHRnH + OptGPYRgmxe4MP+72jA6M9VOT9mYH8sYLEyQDxDJFHPa5HFfZhSeaCAMy0uX225JP11bscMbg+fJ + NPjPYlXOgscTytrm56CZbQeCoO1F0DCbhrhwNH/6YtugDdoGbdA4Q42h8bIsjOvy50RxCBddZBm1 + nXGipnb6SXVmJf8GAAD//+xYPW/CMBD9K10Yk4YkNGFAFKFW6sDSoQObYzttpJBE+aBFKP+9z3Zw + IYpRVVWIAYnBxPbdceTevXcC3eaEUtFxX9hswrwwCKNx4HCPTh06nbjxdByLf1Yfgoczx6Aqk3TB + GHygcFBFbPd4FAiqStg6y6Zl0rmNwpTHxJ0Do/M5egdhY99/CGgYTLxpRKPAgfuY8DikczaTVkbe + YuQ+46PuWRuSdRmzLPWospvK+kQiLNcGbWR20UQpVA8yZRWEVCJRuJ9AMiToPFguV5ZjF5kArz4z + v/6I+9T++iPuS4NrjxiIxJKqSMmu6y5LvPp3KxLHDaWJLKAawkXxWIVna/QXHHxqyhziZQ2koR8/ + lSakKXZ16QoPnf4ebkm+qRn4Jj7raz7b39ANp+RFXtb8hi+Xe5Nu+HKJiG/4MoAvfRjQ9EuzE0T9 + rmpvL2Zo3dqBw7wmUAPOgBUjzzLikklaOu4w8hl5mIk4C0AY3HD0T+5teKYbnqZ0PNsmZZ4pTqce + saabQquvv8pevlEW9odlB/d/gN+jAfr9wS7kMPl65VWTCsNHvqUOK+tFreLY5vX/jXiUMW0UvqD+ + 33IpY6Wyb9WQSYhD4VIHchqtexJud0Gmp23bbwAAAP//AwA6zCNv9BgAAA== headers: + Atl-Request-Id: + - fc062cd3-da38-4d95-8b00-ecc24bb3dcb2 Atl-Traceid: - - dd000025791e2419 + - fc062cd3da384d958b00ecc24bb3dcb2 Cache-Control: - no-cache, no-store, no-transform Content-Encoding: @@ -349,20 +509,19 @@ interactions: Content-Type: - application/json;charset=UTF-8 Date: - - Thu, 18 Aug 2022 22:36:33 GMT - Expect-Ct: - - report-uri="https://web-security-reports.services.atlassian.com/expect-ct-report/atlassian-proxy", - max-age=86400 + - Wed, 29 Jan 2025 20:45:20 GMT Nel: - - '{"report_to": "endpoint-1", "max_age": 600, "include_subdomains": true, "failure_fraction": - 0.001}' + - '{"failure_fraction": 0.001, "include_subdomains": true, "max_age": 600, "report_to": + "endpoint-1"}' Report-To: - - '{"group": "endpoint-1", "max_age": 600, "endpoints": [{"url": "https://dz8aopenkvv6s.cloudfront.net"}], - "include_subdomains": true}' + - '{"endpoints": [{"url": "https://dz8aopenkvv6s.cloudfront.net"}], "group": + "endpoint-1", "include_subdomains": true, "max_age": 600}' Server: - - globaledge-envoy + - AtlassianEdge + Server-Timing: + - atl-edge;dur=271,atl-edge-internal;dur=14,atl-edge-upstream;dur=258,atl-edge-pop;desc="aws-us-east-1" Strict-Transport-Security: - - max-age=63072000; preload + - max-age=63072000; includeSubDomains; preload Timing-Allow-Origin: - '*' Transfer-Encoding: @@ -370,13 +529,11 @@ interactions: Vary: - Accept-Encoding X-Aaccountid: - - 5fa43d1b8405b10077912260 + - 5d3878b170e3c90c952f91f6 X-Arequestid: - - 4ccb1d1f-ed6a-4e7a-be67-7c9bed4b8dca + - 446c00b1e72bcc391857513273a2fa6a X-Content-Type-Options: - nosniff - X-Envoy-Upstream-Service-Time: - - '274' X-Xss-Protection: - 1; mode=block status: diff --git a/unittests/vcr/jira/JIRAImportAndPushTestApi.test_engagement_epic_creation.yaml b/unittests/vcr/jira/JIRAImportAndPushTestApi.test_engagement_epic_creation.yaml index 6cb5e1bfea4..9cb484b5a6b 100644 --- a/unittests/vcr/jira/JIRAImportAndPushTestApi.test_engagement_epic_creation.yaml +++ b/unittests/vcr/jira/JIRAImportAndPushTestApi.test_engagement_epic_creation.yaml @@ -19,17 +19,17 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA5yQUUvDMBDHv0teXbtL1nZb3mSCU3QK7V4UkTS9YDVNSpMOxth3N8Ghe1Tfjrvf - /37HHUgtHG4HTTh58753fDptUKH0jX23qfBaONcKkxr0ZEKa1vVa7P/BlzjsWokNuo816n6FxuPw - 1yUra5Qe0Uj8XXKHg2utCTAFoCmkkJSby8dy/VD9TDdjV4eK8OcITWACL8GJvbb7LlxZ7ftoW2k7 - NiFUj61uviKEhwCbz0/NK+EjyIDlCdAElhVjPJvxLIgBLiDAIe/CH3Co2u6cpVAx4DTnDNLFsvhm - ZXdjlA1gRouFFIA5KklncxSZUpkCqdhyli9qVWTICgXsTOB1NNy2g4gvRCVG7e+sFLF9IPpUETSv - 25Iczw97siZOru8rcvwEAAD//wMAOhBbeiACAAA= + H4sIAAAAAAAAA5yQUUvDMBDHv0te3bqka+iWN6ngFJ1Cu5eJSJpcMJompUkHY+y7m+DQ+aa+HXe/ + //2OO6CWe9gMBjH0GkLv2WwmQYEI0r25jAfDvdfcZhYCmiCpfW/4/h98DcNOC5Dg31dg+gpsgOGv + SypnlRnBCvhdcgeD185GmGBMMpzhab2+fKxXD833dD12bawQe0rQBE/wc3RCb9y+i1c2+z7ZKuNG + GUPtqI38jCAWA3lZnppXPCQwxzmdYjLNFw0pWEHYfJFhjC9whGPexz/A0OjuB7tscsIKygjNCkq+ + WNHdWOUiqCglAOWctBJEIQQH4KRdSEWWJVekiALVUsjPBMEkw60eeHohKD6acOcET+0DMqcKgX3Z + 1Oh4ftjW2TS5vm/Q8QMAAP//AwAhfxRWIAIAAA== headers: Atl-Request-Id: - - ad0b1495-8283-47ea-af20-cece1e560387 + - a416e98b-18d8-450f-852c-5a7cdd05e165 Atl-Traceid: - - ad0b1495828347eaaf20cece1e560387 + - a416e98b18d8450f852c5a7cdd05e165 Cache-Control: - no-cache, no-store, no-transform Content-Encoding: @@ -37,7 +37,7 @@ interactions: Content-Type: - application/json;charset=UTF-8 Date: - - Fri, 10 Jan 2025 19:15:20 GMT + - Wed, 29 Jan 2025 20:45:15 GMT Nel: - '{"failure_fraction": 0.001, "include_subdomains": true, "max_age": 600, "report_to": "endpoint-1"}' @@ -47,7 +47,7 @@ interactions: Server: - AtlassianEdge Server-Timing: - - atl-edge;dur=165,atl-edge-internal;dur=17,atl-edge-upstream;dur=147,atl-edge-pop;desc="aws-us-east-1" + - atl-edge;dur=147,atl-edge-internal;dur=18,atl-edge-upstream;dur=130,atl-edge-pop;desc="aws-us-east-1" Strict-Transport-Security: - max-age=63072000; includeSubDomains; preload Timing-Allow-Origin: @@ -59,7 +59,94 @@ interactions: X-Aaccountid: - 5d3878b170e3c90c952f91f6 X-Arequestid: - - 769698bf4fd3ab037cc03f3174493ceb + - 308663ded5c76f207008e263d9d9948f + X-Content-Type-Options: + - nosniff + X-Xss-Protection: + - 1; mode=block + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json,*/*;q=0.9 + Accept-Encoding: + - gzip, deflate + Cache-Control: + - no-cache + Connection: + - keep-alive + Content-Type: + - application/json + User-Agent: + - python-requests/2.32.3 + method: GET + uri: https://defectdojo.atlassian.net/rest/api/2/issue/createmeta?projectKeys=NTEST&issuetypeNames=Epic&expand=projects.issuetypes.fields + response: + body: + string: !!binary | + H4sIAAAAAAAAA+xWW0/bMBT+K1aeS0JLxVClaZqASWwTmsTlZULIbU5bg2NnttMmQ/3vO6e5OKVs + S9nGXshTfHIu37G/88UPAeQpV3EwClKj72DibNDzr6OvD95BWJuBK1IgFwtyira5c6kdRVEMUwyI + 9Z0OuZPcWsFVqMBFBqyLeCqiQVRljfr7+GAKQUnrxT0UuDq/PL24xJXiCeDySgnnMAEV5AvuuLky + ElE9BMOjfHi0Y/1MiQUYy+VtmStaCFhG1FADrfrQ3x/uv8Gag2E+GP7bKu+s+A5vbcKlxIL9w7x/ + +BIF87riwSA/GLxExQRikSXBqtfmEfHrGUxqMjzNpRjsxIjUCa3Q+p6NxYxlFgyzTpuCuTl3TAHE + ljnNxsDGRt+DYrFeqpAdG+AOYjYu2EdhOLvQU7fkBtgeOjClHcNGHNOGxSDBQUjVJ1ohM7t0IRI+ + AxtRhPV92AhSMQntYubJf4oWXGXKGa6sJFDnG19sNnbc3gejKZcWesFcgOFmMi8+wwIQTL/nZ3cq + QMY0RtULjpDNkoSbgl4NfMuEAXR0JsNMdjKHhNMXAofh1hmhCJstrIOELFX0qsF7UVnqWa49EBi3 + JzDlmXTXXGbQANYpAqZTIiIgD1xw06ZHJ2TeuwXOGz28M7Kxy9KzRNiO7YTxBmVISr2EeO30yt8O + /P0tS1d06BOdpFqBon9O+9SroK1j58ZwopbAA8cQH9+mQSup58GxN9Y8aPl1JELA45gqAdUzkOgF + BE+QgzrbOMwurW3PWjuFb+SkZa07aXt2baWeu0q6O01d7dsCWZs8wC+NUwnOB+0AbHtTnzVxf3b3 + qKJJPD6V2lYNFW2ASFIpUFb9kb5eVf5mwf93VSmVKUPVTda/zVskTL+/4xSX8cFaZcK0yIUNZwZA + zXWKbA/vUKNHs/ke6eee5GOgTsuYM0y/ruhHijSVrW8BjXZtwdtpvFZ0I8ivcYtKc5fWHmsvbXAp + OI0YtHN69B9EzipnL75t112g9yoN/rX6pkZoI9yjW87POmu8N3StsrWFrXGrla0xbDVQKug2/icA + P1PZhP4BAAD//7xYwW6DMAz9lx16XLaWHhHabZOYVKm79kAZAlQGGxBVPfTfZ8cJOINJlKAea7Dz + Yr/afuDZAis/d5DrIDkYsjzNILo9y1/JiCeoxgnJcAO7Xg7sEKmBuXaGuVkGJjUUG+g7NRkNdeMM + 1VsGKjDSxhlWZwPScwa5XQzkgKKhshmo24frAXcv/k90g35PKoyMHW9aB4tKbEI3zZy8PHUuZuR4 + f0ZOSC+Njxxk3qDt/du3sSurOTdz2gzXYx2thxySweDVz6HfyrbC9R9lz8Qydkx4fnwSFEg0MoXS + tsGPTOqLP/nyY5IBMEdtG8XZF0qXWflg/iwnzNrn5YW/SrmxvKfdg24QV9+XXq4jiWbWk/kz/MzK + 6gq/QdkqJd+X1wrgUmKtywUQ/pTUQSzrGvKiVczbp79qsuq8l8cPkLO7CJ/5ONy5uSGLdlVAQTD4 + pSyK1Sy6dGlGwCmI/mlJxq8FFh2MNyODMXVU6A1uWcTDBUWLjkUimgT1fqD3bV/prFuzQZ0DN/LD + 9RcAAP//AwBwEqjntBYAAA== + headers: + Atl-Request-Id: + - 369f45ce-734a-4a76-8264-4f6e40681459 + Atl-Traceid: + - 369f45ce734a4a7682644f6e40681459 + Cache-Control: + - no-cache, no-store, no-transform + Content-Encoding: + - gzip + Content-Type: + - application/json;charset=UTF-8 + Date: + - Wed, 29 Jan 2025 20:45:15 GMT + Nel: + - '{"failure_fraction": 0.001, "include_subdomains": true, "max_age": 600, "report_to": + "endpoint-1"}' + Report-To: + - '{"endpoints": [{"url": "https://dz8aopenkvv6s.cloudfront.net"}], "group": + "endpoint-1", "include_subdomains": true, "max_age": 600}' + Server: + - AtlassianEdge + Server-Timing: + - atl-edge;dur=279,atl-edge-internal;dur=14,atl-edge-upstream;dur=265,atl-edge-pop;desc="aws-us-east-1" + Strict-Transport-Security: + - max-age=63072000; includeSubDomains; preload + Timing-Allow-Origin: + - '*' + Transfer-Encoding: + - chunked + Vary: + - Accept-Encoding + Warning: + - 'The issue create meta endpoint has been deprecated. (Deprecation start date: + June 03, 2024)' + X-Aaccountid: + - 5d3878b170e3c90c952f91f6 + X-Arequestid: + - 9a47223711d986c096a708b4d5d8fd13 X-Content-Type-Options: - nosniff X-Xss-Protection: @@ -90,18 +177,18 @@ interactions: uri: https://defectdojo.atlassian.net/rest/api/2/issue response: body: - string: '{"id":"16078","key":"NTEST-1594","self":"https://defectdojo.atlassian.net/rest/api/2/issue/16078"}' + string: '{"id":"16610","key":"NTEST-1827","self":"https://defectdojo.atlassian.net/rest/api/2/issue/16610"}' headers: Atl-Request-Id: - - 6e92d4a7-a838-4be0-8752-5957888cb29d + - 05bc7a5a-49d2-4e5d-a335-da7fd0a8cd07 Atl-Traceid: - - 6e92d4a7a8384be087525957888cb29d + - 05bc7a5a49d24e5da335da7fd0a8cd07 Cache-Control: - no-cache, no-store, no-transform Content-Type: - application/json;charset=UTF-8 Date: - - Fri, 10 Jan 2025 19:15:21 GMT + - Wed, 29 Jan 2025 20:45:16 GMT Nel: - '{"failure_fraction": 0.001, "include_subdomains": true, "max_age": 600, "report_to": "endpoint-1"}' @@ -111,7 +198,7 @@ interactions: Server: - AtlassianEdge Server-Timing: - - atl-edge;dur=726,atl-edge-internal;dur=15,atl-edge-upstream;dur=712,atl-edge-pop;desc="aws-us-east-1" + - atl-edge;dur=811,atl-edge-internal;dur=15,atl-edge-upstream;dur=797,atl-edge-pop;desc="aws-us-east-1" Strict-Transport-Security: - max-age=63072000; includeSubDomains; preload Timing-Allow-Origin: @@ -123,7 +210,7 @@ interactions: X-Aaccountid: - 5d3878b170e3c90c952f91f6 X-Arequestid: - - 05a2df9725b31c24c5dd4b7622e319c0 + - a127865b86cdb4616e34c4e57c1539cb X-Content-Type-Options: - nosniff X-Xss-Protection: @@ -147,45 +234,44 @@ interactions: User-Agent: - python-requests/2.32.3 method: GET - uri: https://defectdojo.atlassian.net/rest/api/2/issue/NTEST-1594 + uri: https://defectdojo.atlassian.net/rest/api/2/issue/NTEST-1827 response: body: string: !!binary | - H4sIAAAAAAAAA7xX227jNhD9FUKvta2LL/EaKIo28RbbptnAcbYPRRHQ0ljmmiJVkvKlaf69Q1GS - E9sqNll03ywOOTOcOecM/ejBLqci8SaeApGAguQ9A57ojqAZ6I6OV5DRjsxBUcOk0B1ImMnA0E68 - oiIFLtPOBpRGGyQzyBVoEKbaGxfayGxpHT6EQRAGPQV/FaDNfJ/DraKxYTF4HY/Z+OEouBjjhwa+ - xM+VMbme+H4CS4hNIj/LHjWcas2o6AkwPkYyPs2ZH/lM6wL82sEa9nj+Zj69m3fD4bsBLpUpaG/y - 6GnMrdAxNZBKtXd3SPALT0RBNOwGYTcM5lEwCYeTKOyNRsF3mHdgk7RBDCZeunljkva8j/6cx/La - 1UcCOlYst4XD1R/JgqWk0KAIllDtiVlRQwRAoomRZAFkoeQaBEnkVvTIpQK8Q0IWe/ILU5TcyaXZ - UgWkixuIkIbYthGpSAIcDPRs9FiKe8W/5BYsoylo357Qh3toH3IW9/QmRW8WLuhqiiu2h8XCUL32 - JkvKNXS8FUP8qHi1v4YNYMjwqeMZhgDLESzeRBScd7wjtPSDNkNYG3IlP2PGb2xIdfp8O56B6HC7 - e8GMQQfaa2JbJP9a7tVVze31WZZzhgknTQXohhqqsN4lCgfj3WD8ynQLwSzRKH9wvvwNg61fAqq+ - SWUIg0FwgWlEg100+H+j/KDZ3/C9zijnGDAc7cLRtwi4qyP2o10/+hYRMyRQkXlPT6dwjGo4Ltnu - k5NCbPIff57u7Nc7aZoqSJGyJyTAPCUvnAqcR/+wzTBqM1y0GKJWw7jN8O40T6eebnUr1bocFN6k - G1aSaSuvWOyu9HiyZvmARdUrWfDkiumc033FGlzeUoMTyCn36xnu5sJhEvjOnbL8LX9eysKWvkz1 - d7vAROpNjCpsbHRqPiE0LIurajiZbZkV0Xhcz4rjsrVJWdRI2bGhAVWumFTM7N9Ygvq43/8Kya+c - MFxwPHgp+r85blQK2vdOORI1yD82NKDldAFWHM/wxmrK2dMhAtjbA+dye1r0cGxLtqLazqRrJtbv - reUKcvvOEXENshJ629LWrAgppjgv6YLDDKh2wFXVL+/2+v7nDzcP1x8upzd304fpbPZxhuGRyBpr - hhvmKyC3OAmEITYuYZpIwfcEVYVx69SO8HJO3yrIsHTloNe9c+oSIuO84B8WBHm2mXhubGJ7sT8H - 2r2QE+xUygTlx5uqV1pV4pIaHLOrvm3rU3xf1LuL3PK6DeqDqHkWuQfVG9HpDjcT+OUb6HWAPcDx - Jxqv8Vla47F27mJdVi+/r0q4fj76URUkqh8MAkokSi7VjctmwQvopgpFrUlwLsmVdM2WWY4PZ2HO - A3/YphvDRjf+q+Mvy7kFWCMIQaSI0syOnFO3bTQN2xIJwy/zHEZvLLjzU9LzY3kTCxYUBMQzxco2 - 1Tw0G41naIQ389LVrlvKTNeK4fGOwfOSGvxzsi6HgR1Ox1vbRnDQZhg0c/P4RCNtGuLC6fz5jW2z - Omib1UETkxpD41XZGMf153JxDBpdZBm1/HjR1n8BAAD//+xYPW/CMBD9K10Yk5okNGFAFKFW6sDS - oQObYzttJJpE+aCtEP+9z3bi0ihGFaoQAxKDcXznu0vu7r0zr1V2wLw88WXKMjenjMmu+8RnE+5H - YRSPQyJ8NiVsOvGS6TiRL9ccwg1HjoGbppsF57gDGYR04l/3B4YgvaSuo9BbxV24yFB1TMp08C8Q - aCKUj4PgLmRROPGnMYtDgusTKpKIzflMaRn5i5H3iJ+Wc95p1kbMcfRW5TaV84FAOJ4LjMndook3 - 4E6IlFNQWslAQT4Fv0jRgrBcrhziFpmsYn0Yf/kW93nA5Vvc5xGXbjGKEtdYuW0zS3z6NyuaJA1j - qUqgGixHY1ld0tZoNDj40JQ5mM4axYa9/WSa5LF4alJX3tCy+OHeFNhaQmDDtIHBtKUo8rIW1zJy - vg/mWkbOYfG1jAyUkX4ZsIHMwCAwA1DgzqtOyp0cuLVrAkvymoIWkAH1NgxGbHWJeMMFzsY6idUB - K0YznvUlbODNtz4wqE5k27TMMw3r9BZv2lm2/vuX6G3z+v/GK1qZUYqbQL9fcsUju4kOUkCbvOuW - bX852QA197/t9IKI089nUTUbqfjAWcUAy3pRa8flqEqyROm62f8t7P2SbgWUtfv9/hsAAP//AwAA - yG5wOhkAAA== + H4sIAAAAAAAAA7xXbW/bNhD+K4S+zrZerDiJgWLYUnfolqVB4rQfhiGgpbPMmiI1kortZfnvO4qS + 3NjS0KRYv1k88l6fe+786MG2oCL1pp4CkYKC9B0DnuqBoDnogU5WkNOBLEBRw6TQA0iZycHQQbKi + IgMus8EDKI0ySG+gUKBBmPpuUmoj86VVeB8GQRiMFPxVgjbzXQHXiiaGJeANPGbth5NJGOCHBr7E + z5UxhZ76fgpLSEwqP8sRNZxqzagYCTA+WjI+LZgf+UzrEvxGwRp2+P5qPrudD8Oz6BSPKhe0N330 + NPpW6oQayKTauRhS/MIXURCdDINwGJ3Po3Aan0zDyeg0jH5Av63ayohBxys1r3TSvvdRn9NYhV1/ + pKATxQqbODz9iSxYRkoNimAK1Y6YFTVEAKSaGEkWQBZKrkGQVG7EiFwowBhSstiRX5mi5FYuzYYq + IEO8QIQ0xJaNSEVS4GBgZK0nUtwp/jVRsJxmoH37Qu/j0D4ULBnphwy1Wbigqhme2BqWC0P12psu + Kdcw8FYM8aOS1e4SHgBNhk8DzzAEWIFg8aai5HzgHaBlHDSCQsnP6Ngr816/7s76F1jZB3EnmDGo + QHutbQvY36q7uk6tjZLlBWfocNoGSh+ooQrTWoEtPtvGZy90txTM9hPl906X/8Bg41e4aSKpBWEQ + BxbcUbyN4v/Xyo+a/Q1vdE45R4PhZBtOvofBbWNxHG3H0fewmGOflLn39HQMx7APp1EjWLLtR0eF + WP0//jy+OW5u0ixTkGHLHjUBBiB56Vig29xJn2DSJzjtEUS9grM+wfmxn4493elGqnU1KLzpMMRP + anB+ON59eeM6Vt/zuO/UKduW1c8LWdrEhZabP9kDJjJvalQJWD5Uaj5ixW1z1rE4kuxh+nEcN0x/ + GHRLRIeCPkhELSQKxaRiZvfKFDTP/fE3EHathOGBg/dzyv7dQb4mxrF3DP2oxS2nC7DU1gHuqA9+ + liq6BQg/L6Vqfb/gpeXTQ/mZzdqKajtULplYv7OSt1DYRUUku5Z09UpuKll7IqSY4cCjCw43QLVt + pkeErPvlXV/e/fL+6v7y/cXs6nZ2P7u5+XCD5rETNaYNL8xXQK6R44Uh1i5hmkjBdwT5gnGr1M7g + atBeK8gxe9Wk1qMu3gixZbzgHxYEaqmmnpt7WGEs0b5vnvEBFitjgvLDS/WaVae/6g6O3tXftvoZ + LgjN7bKwjdmH9nhy3qDdbUSvBKh73M7W50vMyzC7R+TPNFnjXtlAslHubF3Uq9s3Odzsf35UG4ma + VUDAxiJRcqmunDcWnMNM0d3ewbkkb6UrtswL3HyF6W6Kk5Y6/quwh49aWnmezg3AGkEIIkOU5nZm + HD9tO/UQhX0cFoZfpzmMXplwp6dqzw9VJBYsyAmIZ2rbvsnmvtgo7GgjjMzLVtthRUHDuMPD+Mtc + G/x3sa7mwWOHsr4ZGvQJ4nbwHb5o2U1DUjqq777YN2yDvmEbtDaf0cMheqgxNFlVVasgqMs8p7Y/ + ustqh6BULy3mvwAAAP//7FhNb4JAEP0rvXiEroAFDsYa0yY9eOmhB2/LLlQSBMKHrTH8975lYWsJ + a5qmMR5MPKzs7sw4Mm/em55TAeYWlDHReF/4fMZtz/WCqUtCm/mE+TMr8qeR+HPVIXg4cwziMk6W + nMMHKgjlxA+PJ4GgvISts6S6zXtookLbY+JOT+ycEE2E8qnjPLjMc2e2H7DAJXAf0TDy2ILPWysT + ezmxnvGR94wdTbuMGYZ8VJp1aXwgEYZlgj1yM6+DBOIHmTJySkuRKNyPoRxitCAsV2uDmHkqUGxI + 0K8/4iHDv/6Ihwrh2iMGKPG4zBN66NrMCq/+3ZpGUc1Y3BZQBf0i6ayEtA0aDQ4+1UUGDbMB2LDt + d6UJhYpdVbrCQyfDx3uTo2sJjo7WOorWDjdU2ynCPCuq8IYvl3uTbvhyiYhv+DKCL0MYUAxMERZE + /S5r7yhGad2awGFWUcgCMmJFx8GIDpeINQ5wOuFJdCzZ0XI09cuGN3TkzdZuKFYXpvu4yFLJ3OQj + XnfDaPn1V9nLdtLCsV92cP8H+D2Zo9/3dqGL6edrWNaJMHziuxVkRbWsZBz7rPq/SY80pozC15aW + b1mrZ1uJ38hZk1CJwqUK5Ge01o9wuwttepqm+QIAAP//AwA6/KnH+xgAAA== headers: Atl-Request-Id: - - e5e5c14b-a836-4cc1-9a3b-5934dac285ac + - 71ee13bb-d170-48a6-906c-72016de1bb34 Atl-Traceid: - - e5e5c14ba8364cc19a3b5934dac285ac + - 71ee13bbd17048a6906c72016de1bb34 Cache-Control: - no-cache, no-store, no-transform Content-Encoding: @@ -193,7 +279,7 @@ interactions: Content-Type: - application/json;charset=UTF-8 Date: - - Fri, 10 Jan 2025 19:15:22 GMT + - Wed, 29 Jan 2025 20:45:17 GMT Nel: - '{"failure_fraction": 0.001, "include_subdomains": true, "max_age": 600, "report_to": "endpoint-1"}' @@ -203,7 +289,7 @@ interactions: Server: - AtlassianEdge Server-Timing: - - atl-edge;dur=431,atl-edge-internal;dur=13,atl-edge-upstream;dur=418,atl-edge-pop;desc="aws-us-east-1" + - atl-edge;dur=249,atl-edge-internal;dur=14,atl-edge-upstream;dur=235,atl-edge-pop;desc="aws-us-east-1" Strict-Transport-Security: - max-age=63072000; includeSubDomains; preload Timing-Allow-Origin: @@ -215,7 +301,7 @@ interactions: X-Aaccountid: - 5d3878b170e3c90c952f91f6 X-Arequestid: - - 206c2fe40afcc4f1bc2298436d4073ea + - 99e237ceb0c6f5fc7a7b096b25effccd X-Content-Type-Options: - nosniff X-Xss-Protection: diff --git a/unittests/vcr/jira/JIRAImportAndPushTestApi.test_engagement_epic_mapping_enabled_create_epic_and_push_findings.yaml b/unittests/vcr/jira/JIRAImportAndPushTestApi.test_engagement_epic_mapping_enabled_create_epic_and_push_findings.yaml index e7f5f78a297..0d09d6e3409 100644 --- a/unittests/vcr/jira/JIRAImportAndPushTestApi.test_engagement_epic_mapping_enabled_create_epic_and_push_findings.yaml +++ b/unittests/vcr/jira/JIRAImportAndPushTestApi.test_engagement_epic_mapping_enabled_create_epic_and_push_findings.yaml @@ -19,17 +19,17 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA5yQTU/DMAyG/0uurJ2bpvvIDQ2JgWAgtbuAEEpTRwTSpGrSSdO0/04qJtgRuFn2 - 8/qxfCC18LjtDeHkLYTO8+m0QYUyNO7dpSIY4b0WNrUYyIQ02ndG7P/Bl9jvtMQG/ccaTbdCG7D/ - 65KVs8oMaCX+LrnD3mtnI5wBZCmkkJSby8dy/VD9TDdDW8eK8OcRmsAEXqITO+P2bbyy2nejbWXc - 0MRQPWjTfEUIjwE6n5+aVyKMIAVaJJAlsKwo5SznLIoBLiDCMe/jH7CvdHvOZlBR4FnBc5bSgn6z - sr2xykWQZbOFFIAFKpnlcxRMKaZAKrrMi0WtZgzpTAE9EwQzGm51L8YXohKDCXdOirF9IOZUEbSv - 25Iczw97cnacXN9X5PgJAAD//wMAVZhjdyACAAA= + H4sIAAAAAAAAA5yQUUvDMBDHv0te3bpL1tKZN5ngFJ1CuxdFJE0uWE2T0qSDMfbdTXDofFPfjrvf + /37H7UkjPG4GQzh5DaH3fDZTqFEG5d5cJoIR3rfCZhYDmRDV+t6I3T/4CodtK1Ghf1+h6ZdoAw5/ + XbJ0VpsRrcTfJbc4+NbZCFMAmkEG02p98VCt7uvv6XrsmlgR/pSgCUzgOTqxN27XxSvrXZ9sS+NG + FUPN2Br1GSE8BlhZHpuXIiSQASumQKdsUdOc55TPFxkAnEGEY97HP+BQt90P9rxmlOcFhyKDkn2x + sru22kVQFwVFLOe0UShzKQWioM1CaXpeCk3zKNBNgexEEEwy3LSDSC9ELUYTbp0Uqb0n5lgRtC+b + ihxOD3t0Nk2u7mpy+AAAAP//AwDjs6gYIAIAAA== headers: Atl-Request-Id: - - cf32c567-6ff5-41f4-ba07-35ef62cc163f + - dc2fdac2-6ca6-4d66-a1fa-7b7a8151f2c8 Atl-Traceid: - - cf32c5676ff541f4ba0735ef62cc163f + - dc2fdac26ca64d66a1fa7b7a8151f2c8 Cache-Control: - no-cache, no-store, no-transform Content-Encoding: @@ -37,7 +37,7 @@ interactions: Content-Type: - application/json;charset=UTF-8 Date: - - Fri, 10 Jan 2025 19:15:34 GMT + - Wed, 29 Jan 2025 20:45:05 GMT Nel: - '{"failure_fraction": 0.001, "include_subdomains": true, "max_age": 600, "report_to": "endpoint-1"}' @@ -47,7 +47,7 @@ interactions: Server: - AtlassianEdge Server-Timing: - - atl-edge;dur=144,atl-edge-internal;dur=15,atl-edge-upstream;dur=130,atl-edge-pop;desc="aws-us-east-1" + - atl-edge;dur=228,atl-edge-internal;dur=29,atl-edge-upstream;dur=200,atl-edge-pop;desc="aws-us-east-1" Strict-Transport-Security: - max-age=63072000; includeSubDomains; preload Timing-Allow-Origin: @@ -59,7 +59,94 @@ interactions: X-Aaccountid: - 5d3878b170e3c90c952f91f6 X-Arequestid: - - e95e0cabf3fed24c769d00719e17d11f + - 18474af2b7f4a69211e1b5cfeeef83a6 + X-Content-Type-Options: + - nosniff + X-Xss-Protection: + - 1; mode=block + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json,*/*;q=0.9 + Accept-Encoding: + - gzip, deflate + Cache-Control: + - no-cache + Connection: + - keep-alive + Content-Type: + - application/json + User-Agent: + - python-requests/2.32.3 + method: GET + uri: https://defectdojo.atlassian.net/rest/api/2/issue/createmeta?projectKeys=NTEST&issuetypeNames=Epic&expand=projects.issuetypes.fields + response: + body: + string: !!binary | + H4sIAAAAAAAAA+xWW0/bMBT+K1aeS0JLxVClaZqASWwTmsTlZULIbU5bg2NnttMmQ/3vO6e5OKVs + S9nGXshTfHIu37G/88UPAeQpV3EwClKj72DibNDzr6OvD95BWJuBK1IgFwtyira5c6kdRVEMUwyI + 9Z0OuZPcWsFVqMBFBqyLeCqiQVRljfr7+GAKQUnrxT0UuDq/PL24xJXiCeDySgnnMAEV5AvuuLky + ElE9BMOjfHi0Y/1MiQUYy+VtmStaCFhG1FADrfrQ3x/uv8Gag2E+GP7bKu+s+A5vbcKlxIL9w7x/ + +BIF87riwSA/GLxExQRikSXBqtfmEfHrGUxqMjzNpRjsxIjUCa3Q+p6NxYxlFgyzTpuCuTl3TAHE + ljnNxsDGRt+DYrFeqpAdG+AOYjYu2EdhOLvQU7fkBtgeOjClHcNGHNOGxSDBQUjVJ1ohM7t0IRI+ + AxtRhPV92AhSMQntYubJf4oWXGXKGa6sJFDnG19sNnbc3gejKZcWesFcgOFmMi8+wwIQTL/nZ3cq + QMY0RtULjpDNkoSbgl4NfMuEAXR0JsNMdjKHhNMXAofh1hmhCJstrIOELFX0qsF7UVnqWa49EBi3 + JzDlmXTXXGbQANYpAqZTIiIgD1xw06ZHJ2TeuwXOGz28M7Kxy9KzRNiO7YTxBmVISr2EeO30yt8O + /P0tS1d06BOdpFqBon9O+9SroK1j58ZwopbAA8cQH9+mQSup58GxN9Y8aPl1JELA45gqAdUzkOgF + BE+QgzrbOMwurW3PWjuFb+SkZa07aXt2baWeu0q6O01d7dsCWZs8wC+NUwnOB+0AbHtTnzVxf3b3 + qKJJPD6V2lYNFW2ASFIpUFb9kb5eVf5mwf93VSmVKUPVTda/zVskTL+/4xSX8cFaZcK0yIUNZwZA + zXWKbA/vUKNHs/ke6eee5GOgTsuYM0y/ruhHijSVrW8BjXZtwdtpvFZ0I8ivcYtKc5fWHmsvbXAp + OI0YtHN69B9EzipnL75t112g9yoN/rX6pkZoI9yjW87POmu8N3StsrWFrXGrla0xbDVQKug2/icA + P1PZhP4BAAD//7xYwW6DMAz9lx16XLaWHhHabZOYVKm79kAZAlQGGxBVPfTfZ8cJOINJlKAea7Dz + Yr/afuDZAis/d5DrIDkYsjzNILo9y1/JiCeoxgnJcAO7Xg7sEKmBuXaGuVkGJjUUG+g7NRkNdeMM + 1VsGKjDSxhlWZwPScwa5XQzkgKKhshmo24frAXcv/k90g35PKoyMHW9aB4tKbEI3zZy8PHUuZuR4 + f0ZOSC+Njxxk3qDt/du3sSurOTdz2gzXYx2thxySweDVz6HfyrbC9R9lz8Qydkx4fnwSFEg0MoXS + tsGPTOqLP/nyY5IBMEdtG8XZF0qXWflg/iwnzNrn5YW/SrmxvKfdg24QV9+XXq4jiWbWk/kz/MzK + 6gq/QdkqJd+X1wrgUmKtywUQ/pTUQSzrGvKiVczbp79qsuq8l8cPkLO7CJ/5ONy5uSGLdlVAQTD4 + pSyK1Sy6dGlGwCmI/mlJxq8FFh2MNyODMXVU6A1uWcTDBUWLjkUimgT1fqD3bV/prFuzQZ0DN/LD + 9RcAAP//AwBwEqjntBYAAA== + headers: + Atl-Request-Id: + - b87ab541-9873-4bcb-b4d0-171962f3a902 + Atl-Traceid: + - b87ab54198734bcbb4d0171962f3a902 + Cache-Control: + - no-cache, no-store, no-transform + Content-Encoding: + - gzip + Content-Type: + - application/json;charset=UTF-8 + Date: + - Wed, 29 Jan 2025 20:45:05 GMT + Nel: + - '{"failure_fraction": 0.001, "include_subdomains": true, "max_age": 600, "report_to": + "endpoint-1"}' + Report-To: + - '{"endpoints": [{"url": "https://dz8aopenkvv6s.cloudfront.net"}], "group": + "endpoint-1", "include_subdomains": true, "max_age": 600}' + Server: + - AtlassianEdge + Server-Timing: + - atl-edge;dur=362,atl-edge-internal;dur=15,atl-edge-upstream;dur=348,atl-edge-pop;desc="aws-us-east-1" + Strict-Transport-Security: + - max-age=63072000; includeSubDomains; preload + Timing-Allow-Origin: + - '*' + Transfer-Encoding: + - chunked + Vary: + - Accept-Encoding + Warning: + - 'The issue create meta endpoint has been deprecated. (Deprecation start date: + June 03, 2024)' + X-Aaccountid: + - 5d3878b170e3c90c952f91f6 + X-Arequestid: + - 123eca3c50496b773a888435b9d54f08 X-Content-Type-Options: - nosniff X-Xss-Protection: @@ -90,18 +177,18 @@ interactions: uri: https://defectdojo.atlassian.net/rest/api/2/issue response: body: - string: '{"id":"16083","key":"NTEST-1599","self":"https://defectdojo.atlassian.net/rest/api/2/issue/16083"}' + string: '{"id":"16607","key":"NTEST-1824","self":"https://defectdojo.atlassian.net/rest/api/2/issue/16607"}' headers: Atl-Request-Id: - - 81ac8855-a328-4d48-99e2-b6881fa538c0 + - 35a69b3f-045c-438b-bf97-7d1ae935c2bb Atl-Traceid: - - 81ac8855a3284d4899e2b6881fa538c0 + - 35a69b3f045c438bbf977d1ae935c2bb Cache-Control: - no-cache, no-store, no-transform Content-Type: - application/json;charset=UTF-8 Date: - - Fri, 10 Jan 2025 19:15:35 GMT + - Wed, 29 Jan 2025 20:45:06 GMT Nel: - '{"failure_fraction": 0.001, "include_subdomains": true, "max_age": 600, "report_to": "endpoint-1"}' @@ -111,7 +198,7 @@ interactions: Server: - AtlassianEdge Server-Timing: - - atl-edge;dur=747,atl-edge-internal;dur=13,atl-edge-upstream;dur=734,atl-edge-pop;desc="aws-us-east-1" + - atl-edge;dur=884,atl-edge-internal;dur=13,atl-edge-upstream;dur=871,atl-edge-pop;desc="aws-us-east-1" Strict-Transport-Security: - max-age=63072000; includeSubDomains; preload Timing-Allow-Origin: @@ -123,7 +210,7 @@ interactions: X-Aaccountid: - 5d3878b170e3c90c952f91f6 X-Arequestid: - - 55e6aabba5a96be9ab92e89700460e32 + - 8d6e3026c6ae2627c31fe90f2e23886b X-Content-Type-Options: - nosniff X-Xss-Protection: @@ -147,45 +234,44 @@ interactions: User-Agent: - python-requests/2.32.3 method: GET - uri: https://defectdojo.atlassian.net/rest/api/2/issue/NTEST-1599 + uri: https://defectdojo.atlassian.net/rest/api/2/issue/NTEST-1824 response: body: string: !!binary | - H4sIAAAAAAAAA7xXbXPiNhD+Kxp/LeAXICHMdDptjutcm+YyCbl+6HQywl5sHbLkSnKAS/Pfu7Js - kwDuXOj0vmHtal+ffVY8ebApqEi8qadAJKAgec+AJ7onaA66p+MMctqTBShqmBS6BwkzORjaizMq - UuAy7T2C0iiD5BYKBRqEqXXjUhuZL63BhzAIwmCg4K8StJlvC7hRNDYsBq/nMes/PAsmQ/zQwJf4 - mRlT6KnvJ7CE2CTysxxQw6nWjIqBAOOjJ+PTgvmRz7QuwW8MrGCL96/ns7t5PxxfXOBRFYL2pk+e - xthKHVMDqVRbl0OCX3gjCqJxPwj7YTCPgmk4ng7Hg2A4+Q7jDmyQ1onBwCszJwZp7/toz1ms0q4/ - EtCxYoUtHJ7+SBYsJaUGRbCEaktMRg0RAIkmRpIFkIWSKxAkkWsxIJcKMIeELLbkF6YouZNLs6YK - SB8ViJCG2LYRqUgCHAwMrPdYinvFvyYLltMUtG9v6F0e2oeCxQP9mKI1Cxc0NcMT28NyYaheedMl - 5Rp6XsYQPyrOtlfwCOgyfO55hiHACgSLNxUl5z1vDy3DoBEUSn7GwE6se337eNVfYGWXxL1gxqAB - 7bW+LWB/rXR1XVqbJcsLzjDgpE2UPlJDFZa1AttoshlN3hhuKZidJ8ofnC3/kcHar3DTZFILwmAU - nGMY0WgTjf5fLz9o9gW+1znlHB2GZ5vw7Fs43DQeh9FmGH0LjznOSZl7z8+HcAy7cBo1giXbfHJU - iN3/489DzWGjSdNUQYojezAEmIDkpWOB4+7GXYKzLsF5hyDqFEy6BBeHcTr2dKdrqVbVovCm/bCm - TNsSxWKX0tPBmR0UrLbOZMmTd0wXnG7rccLjNTW4gRxzv3303V7YbQLfmVN2sKufl7K0pa9C/d0e - MJF6U6NK6xuNmk+IGTvedTUczR7dFaPB2XnU7Ir9srVUti/oAlXUgqpQTCpmtieWoLnu2814KuXX - RhgeuAF5Tfq/uaGpqXXoHQ5P1CJ/X9CCltMFWNY8MjeWbI7eDhHAXkLV6mELnMv1YeXDia1bRrVd - TFdMrN5byTso7GNHxA3SKvytK1l7IqSY4dKkCw63QLVDr6p/eTdX9z9/uH64+nA5u76bPcxubz/e - onucZo2FQ4V5BuQG94QwxPolTBMp+JYg5zBujdo9Xi3rGwU51q/a9npwjHtCHDsv+JsFQSG+TD23 - O7HH2KTd7L3iFGxXygTl+0r1U62uczUfHKOrv23/U3xkNNplYYe7C+/nkxbv7lV1IkTd5XY/v34I - vQ21O0z+ROMVvk0bUDbGna/L+vn3nwJu3pB+VDuJmueEgAqJkkt17aJZ8BL6qUJmawOcS/JOumbL - vMDXszDH0T9uyePfGrt/qSWW1+VcA6wQhCBSRGlu987h1a5ZDbtYLAy/znIYnVhwZ6caz49VJhYs - yAqIZ4qVbau5azYKj4wRZual2aZfcU3f9m1fY/Sy1gb/oayqjWA31L5q1x4etTtyTxB03QhaftMQ - l47sjyt2Leyga2EHbTDUGBpnVWPcrL+ki3006TLPqZ2Ppq3/AAAA///sWD1vwjAQ/StdGJOaJDRh - QBShVurA0qEDm2M7bSSaRPmgrRD/vc924pYoRhWqEAMSg7F957Ode/eeD69VlsG8PPEyJczNKWOy - 9D7x2YT7URjF45AIn00Jm068ZDpO5OWaSVjhyDQI1HSz4BxrIIOQTvzr/lcgSC/p6ygxV+cuXGSo - miZtOnIYCBQRysdBcBeyKJz405jFIcHyCRVJxOZ8pryM/MXIe8RP2znvNGtPzHF0V+U2lfOBg3A8 - FwyUu0UTbyCgcFJOQWklDwr2KdRHihKE5nLlELfIJIr1Sf7lR9xXCZcfcV9lXHrEACWuCXNbZpb4 - 9G9WNEkaxlKVQDU0kCa0GtLWKDSY+NCUOXTQGmDD3n4yTapcjJrUlSu0Un64NgW2khDYiG1giG1/ - wJSdUhR5WYsrvpzvS7riyzkivuLLAL70YcAwMENQEPWrzr2dfI5r2wQL5jWFLCADXqxUy4pLNo1J - vGHks1IxG32WgDA4QMyWewO+zcI3rE5k27TMM03rdBdv2gdt/fcvp7fN6/97Y9HOjFOsBPn9kisd - 2T3r4EvXIe+6ZltfTg5APf7fdn4hxOnns6iajXT8a7NKAZb1otYbl+9VUiXKrZv+Q2PvwLo1UNHu - 9/tvAAAA//8DAEVZg+E/GQAA + H4sIAAAAAAAAA7xXbW/bNhD+K4S+zrZeLLuOgWHYUnfolqZB4nQfhiGgpbPEmiI1kortZfnvO+rN + iy0NjYv1m8Uj7/W5585PDuxyKmJn7igQMSiI3zHgsR4ImoEe6CiFjA5kDooaJoUeQMxMBoYOopSK + BLhMBo+gNMogvoVcgQZh6rtRoY3M1lbhg+95vjdS8GcB2iz3OdwoGhkWgTNwmLXvT6feG/zQwNf4 + mRqT67nrxrCGyMTysxxRw6nWjIqRAOOiJePSnLmBy7QuwG0UbGCP76+Xi7vl0J8FIR6VLmhn/uRo + 9K3QETWQSLWvYojxC18EXjAZev4wuFgG/jyczL3pKBwH36HfnnXSGjHoeKnmTCftexf1VRrLsOuP + GHSkWG4Th6c/khVLSKFBEUyh2hOTUkMEQKyJkWQFZKXkBgSJ5VaMyKUCjCEmqz35hSlK7uTabKkC + MsQLREhDbNmIVCQGDgZG1nokxb3iXxIFy2gC2rUv9CEO7ULOopF+TFCbhQuqWuCJrWGxMlRvnPma + cg0DJ2WIHxWl+yt4BDTpPw8cwxBgOYLFmYuC84FzhJax1whyJT+jY2fmvX7dnfV/YeUQxL1gxqAC + 7bS2LWB/Le/qOrU2SpblnKHDcRsofaSGKkxrCbZwtgtnr3S3EMz2E+UPlS73kcHWLXHTRFILfC8s + 8R6EOwT5/2rlB83+gu91RjlHg/5050+/hcFdY3Ec7MbBt7CYYZ8UmfP8fApHvw+nQSNYs92nigqx + +r//cXpz3NykSaIgwZY9aQIMQPKiYoFuc5M+wbRP8KZHEPQKZn2Ci1M/K/asTrdSbcpB4cyHPn5S + g/Oj4t3XN27F6gcedyt1yrZl+fNSFjZxvuXm3+wBE4kzN6oALB8qNZ+w4rY561gqkuxk+snoYhY2 + TH8cdEtEx4I+SAQtJHLFpGJmf2YKmufu+CsIu1bC8KCC90vK/lBBvibGsXMK/aDF7bGghRynK7Cc + 14F6SxWdr32En4N9sD9NuT+zCUuptvPkionNOyt5C7ndUUS0b/lWp3JbytoTIcUCZx1dcbgFqm0f + PSFaq1/OzdX9z++vH67eXy6u7xYPi9vbj7doHptQY8bwwjIFcoP0LgyxdgnTRAq+J0gVjFuldvyW + M/ZGQYaJK4e0HnVRho/d4nh/M89T6/HcqUYeFherc2iZF1SAdUqYoPz4Ur1h1QkuG4Ojd/W3LXyC + u0Fzu8htT3YDfTryw0kD9GoZOhOb1eN2rL7cX14H1wMYf6LRBlfKBo2N8srWZb21fZXDzernBrWR + oNkCBGwtEiWX6rryZsULGCaKWoTWDi4leSurYsssx6VXmG7YT/pYY9Kyxn9V/GU6twAbBCGIBFGa + 2XFxqravSf0+R3z/yzT7wZkJr/SU7fmxjMSCBekA8Uwxs202D8VGYUcbYWROku6GJckMfVu44yth + X4zttLTJNviXY1MOiadTM17fYA3boXf8omU2DVFR0Xz3xb5B6/UNWq+1+YIfjlFCjaFRWpatxKAu + sozaBumuqx2AUp1TzX8AAAD//+xYPW+DMBD9K10yQh0gBYYojaJW6pClQ4dsxoYGiQDiI20U8d/7 + bBM3jXBUVVWUIRKDwfbdcXDv3rPgU8C5GWVMNN0XPp1wN/CDaOyT2GUhYeHEScJxIr6uXgQPZ5ZB + WKbZnHP4QAmhnvju8SgQ1JewdZZQy7zHNkpULhN7DqTOi9FFKB973oPPAn/ihhGLfAL3CY2TgM34 + VFoZufOR84xL7bM2NO8zZlnqUW23tfWBRFiODebI7bKNMggfZMoqKa1ForA/hWpI0YMwXCwtYpe5 + gLFTcn79EZ+y++uP+FQdXHvEwB6e1mVGd32fWeDXv1vSJGkZS2UBNdAuisoq5Fqh02DhU1sV0C8r + gA1bf1eaUKeY1aUrPPQSfLg5eaae4JkoracpbRWXRdXENxi53A9zg5FLRHyDkQEYOYUBE8v0NAPT + hAWv866Kci+O0foxQSRFQ6ELyIB5EwcjJlwizjDAmUQnMb6AkaOZuKWAkMEJ1zihWV2cb9OqyBVz + U4942x9Eq9tfZa/YKAv7w7CH+z/g8tEZ+v3BLoQx/XyN6zYTho98S0VWNfNGxbEtmv875VHGtFH4 + WtP6rZCCVmr8Tp0zCZkoXOpAfkbr/Ai33yDT03XdFwAAAP//AwCr0ocf9xgAAA== headers: Atl-Request-Id: - - 4fd4aa1a-2618-4cf5-aaab-156783d4ab8f + - 3d4d84c7-56aa-4efc-9c2c-49e5c17f852b Atl-Traceid: - - 4fd4aa1a26184cf5aaab156783d4ab8f + - 3d4d84c756aa4efc9c2c49e5c17f852b Cache-Control: - no-cache, no-store, no-transform Content-Encoding: @@ -193,7 +279,7 @@ interactions: Content-Type: - application/json;charset=UTF-8 Date: - - Fri, 10 Jan 2025 19:15:35 GMT + - Wed, 29 Jan 2025 20:45:06 GMT Nel: - '{"failure_fraction": 0.001, "include_subdomains": true, "max_age": 600, "report_to": "endpoint-1"}' @@ -203,7 +289,7 @@ interactions: Server: - AtlassianEdge Server-Timing: - - atl-edge;dur=305,atl-edge-internal;dur=14,atl-edge-upstream;dur=292,atl-edge-pop;desc="aws-us-east-1" + - atl-edge;dur=260,atl-edge-internal;dur=19,atl-edge-upstream;dur=241,atl-edge-pop;desc="aws-us-east-1" Strict-Transport-Security: - max-age=63072000; includeSubDomains; preload Timing-Allow-Origin: @@ -215,7 +301,7 @@ interactions: X-Aaccountid: - 5d3878b170e3c90c952f91f6 X-Arequestid: - - d28683dfb7669434b9a9dcd3ce8099aa + - b21a099f1b3c205cf3170a7d4e62a792 X-Content-Type-Options: - nosniff X-Xss-Protection: @@ -243,17 +329,17 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA5yQTU/DMAyG/0uurJ2bNvvIDQ2JgWAgtbuAEEpTRwTSpGrSSdO0/04qJtgRuFn2 - 8/qxfCC18LjtDeHkLYTO8+m0QYUyNO7dpSIY4b0WNrUYyIQ02ndG7P/Bl9jvtMQG/ccaTbdCG7D/ - 65KVs8oMaCX+LrnD3mtnI5wBZCmkkJSby8dy/VD9TDdDW8eK8OcRmsAEXqITO+P2bbyy2nejbWXc - 0MRQPWjTfEUIjwE6n5+aVyKMIAXKEsgSWFaU8iLnRRQDXECEY97HP2Bf6faczaCiwDPGc5YuGPtm - ZXtjlYtgkc0WUgAyVDLL5ygKpQoFUtFlzha1mhVIZwromSCY0XCrezG+EJUYTLhzUoztAzGniqB9 - 3ZbkeH7Yk7Pj5Pq+IsdPAAAA//8DAHOVr/UgAgAA + H4sIAAAAAAAAA5yQ30vDMBDH/5e8unVJf9AubzLBKTqFdi8TkTS5YDRNSpMOytj/boJD55v6dtx9 + vvc57oBa5mA7aETRq/e9o4uFAAncC/tmE+Y1c04xkxjwaIaEcr1m0z/4Goa94iDAva9B9yswHoa/ + LllZI/UIhsPvknsYnLImwARjkuAEz+vN5WO9fmi+p5uxa0OF6FOEZniGn4MTem2nLlzZTH20rbQd + RQi1o9LiM4JoCKRleWpeMR/BFKfFHJN5WjUkpzmhWZVgjC9wgEPehT/A0KjuB7tsUkLzguIyyarq + i+XdjZE2gLIoCECZkVYAzzlnAIy0lZBkWTJJ8iCQbQHpmcDraLhVA4svBMlG7e8sZ7F9QPpUITAv + 2xodzw/bWRMn1/cNOn4AAAD//wMA82+DqSACAAA= headers: Atl-Request-Id: - - 18ba0aaa-972e-4fa9-9889-5204fc86b4d7 + - 3dbaf052-0574-4271-99d0-0ec013e2c8dc Atl-Traceid: - - 18ba0aaa972e4fa998895204fc86b4d7 + - 3dbaf0520574427199d00ec013e2c8dc Cache-Control: - no-cache, no-store, no-transform Content-Encoding: @@ -261,7 +347,7 @@ interactions: Content-Type: - application/json;charset=UTF-8 Date: - - Fri, 10 Jan 2025 19:15:35 GMT + - Wed, 29 Jan 2025 20:45:07 GMT Nel: - '{"failure_fraction": 0.001, "include_subdomains": true, "max_age": 600, "report_to": "endpoint-1"}' @@ -271,7 +357,7 @@ interactions: Server: - AtlassianEdge Server-Timing: - - atl-edge;dur=172,atl-edge-internal;dur=15,atl-edge-upstream;dur=158,atl-edge-pop;desc="aws-us-east-1" + - atl-edge;dur=152,atl-edge-internal;dur=13,atl-edge-upstream;dur=139,atl-edge-pop;desc="aws-us-east-1" Strict-Transport-Security: - max-age=63072000; includeSubDomains; preload Timing-Allow-Origin: @@ -283,7 +369,7 @@ interactions: X-Aaccountid: - 5d3878b170e3c90c952f91f6 X-Arequestid: - - 78545f24694651553f3ddac549fc9365 + - cd80cd9f60d98728446e60efee24320f X-Content-Type-Options: - nosniff X-Xss-Protection: @@ -334,9 +420,9 @@ interactions: Qd0rB+1tFIM2taXQGeyiiIe7tFsY5eAywDF9q+SoL8eQv0aDOgcK1t35GwAA//8DAFZhNg1MFgAA headers: Atl-Request-Id: - - 537c7e6d-0b1e-4846-ba06-78bbc186c3a9 + - 9ed7915e-d5aa-4323-a982-c6d13885ecce Atl-Traceid: - - 537c7e6d0b1e4846ba0678bbc186c3a9 + - 9ed7915ed5aa4323a982c6d13885ecce Cache-Control: - no-cache, no-store, no-transform Content-Encoding: @@ -344,7 +430,7 @@ interactions: Content-Type: - application/json;charset=UTF-8 Date: - - Fri, 10 Jan 2025 19:15:36 GMT + - Wed, 29 Jan 2025 20:45:07 GMT Nel: - '{"failure_fraction": 0.001, "include_subdomains": true, "max_age": 600, "report_to": "endpoint-1"}' @@ -354,7 +440,7 @@ interactions: Server: - AtlassianEdge Server-Timing: - - atl-edge;dur=304,atl-edge-internal;dur=13,atl-edge-upstream;dur=293,atl-edge-pop;desc="aws-us-east-1" + - atl-edge;dur=270,atl-edge-internal;dur=15,atl-edge-upstream;dur=255,atl-edge-pop;desc="aws-us-east-1" Strict-Transport-Security: - max-age=63072000; includeSubDomains; preload Timing-Allow-Origin: @@ -369,7 +455,7 @@ interactions: X-Aaccountid: - 5d3878b170e3c90c952f91f6 X-Arequestid: - - 925dbceca6905cd0c0cb09d08883e87f + - 95c3692dea6dac5b3ca1f5b2a5424029 X-Content-Type-Options: - nosniff X-Xss-Protection: @@ -380,11 +466,11 @@ interactions: - request: body: '{"fields": {"project": {"key": "NTEST"}, "issuetype": {"name": "Task"}, "summary": "Zap1: Cookie Without Secure Flag", "description": "\n\n\n\n\n\n*Title*: - [Zap1: Cookie Without Secure Flag|http://localhost:8080/finding/242]\n\n*Defect - Dojo link:* http://localhost:8080/finding/242 (242)\n\n*Severity:* Low\n\n\n*Due - Date:* May 10, 2025\n\n\n\n*CWE:* [CWE-614|https://cwe.mitre.org/data/definitions/614.html]\n\n\n\n*CVE:* + [Zap1: Cookie Without Secure Flag|http://localhost:8080/finding/232]\n\n*Defect + Dojo link:* http://localhost:8080/finding/232 (232)\n\n*Severity:* Low\n\n\n*Due + Date:* May 29, 2025\n\n\n\n*CWE:* [CWE-614|https://cwe.mitre.org/data/definitions/614.html]\n\n\n\n*CVE:* Unknown\n\n\n\n\n*Product/Engagement/Test:* [Security How-to|http://localhost:8080/product/2] - / [weekly engagement|http://localhost:8080/engagement/3] / [ZAP Scan|http://localhost:8080/test/93]\n\n\n\n\n\n\n\n\n*Systems/Endpoints*:\n\n* + / [weekly engagement|http://localhost:8080/engagement/3] / [ZAP Scan|http://localhost:8080/test/90]\n\n\n\n\n\n\n\n\n*Systems/Endpoints*:\n\n* https://mainsite.com/dashboard\n* https://mainsite.com\n\n\n\n\n\n\n\n*Description*:\nA cookie has been set without the secure flag, which means that the cookie can\nbe accessed via unencrypted connections.\n\n\n\n\n*Mitigation*:\nWhenever a cookie @@ -411,18 +497,18 @@ interactions: uri: https://defectdojo.atlassian.net/rest/api/2/issue response: body: - string: '{"id":"16084","key":"NTEST-1600","self":"https://defectdojo.atlassian.net/rest/api/2/issue/16084"}' + string: '{"id":"16608","key":"NTEST-1825","self":"https://defectdojo.atlassian.net/rest/api/2/issue/16608"}' headers: Atl-Request-Id: - - 73ad8331-fceb-4ba1-adde-c4cea47b3908 + - 77d664d0-36fc-4d13-ad09-dfc1fc543df8 Atl-Traceid: - - 73ad8331fceb4ba1addec4cea47b3908 + - 77d664d036fc4d13ad09dfc1fc543df8 Cache-Control: - no-cache, no-store, no-transform Content-Type: - application/json;charset=UTF-8 Date: - - Fri, 10 Jan 2025 19:15:36 GMT + - Wed, 29 Jan 2025 20:45:08 GMT Nel: - '{"failure_fraction": 0.001, "include_subdomains": true, "max_age": 600, "report_to": "endpoint-1"}' @@ -432,7 +518,7 @@ interactions: Server: - AtlassianEdge Server-Timing: - - atl-edge;dur=540,atl-edge-internal;dur=13,atl-edge-upstream;dur=527,atl-edge-pop;desc="aws-us-east-1" + - atl-edge;dur=643,atl-edge-internal;dur=16,atl-edge-upstream;dur=628,atl-edge-pop;desc="aws-us-east-1" Strict-Transport-Security: - max-age=63072000; includeSubDomains; preload Timing-Allow-Origin: @@ -444,7 +530,7 @@ interactions: X-Aaccountid: - 5d3878b170e3c90c952f91f6 X-Arequestid: - - 5f537c59b9296df8a23ae8d4973bc3b3 + - a782d3a8371f1e6328f557ca39f53571 X-Content-Type-Options: - nosniff X-Xss-Protection: @@ -468,53 +554,53 @@ interactions: User-Agent: - python-requests/2.32.3 method: GET - uri: https://defectdojo.atlassian.net/rest/api/2/issue/NTEST-1600 + uri: https://defectdojo.atlassian.net/rest/api/2/issue/NTEST-1825 response: body: string: !!binary | - H4sIAAAAAAAAA7xWbW/bNhD+K4Q+bZltvdhxXAHDkMXO1i3LgsRpgGZBwEhnmTVFaiQV22v733ek - XtwmUYtkWFEgtXi89+ce3nsPNgUVqRd7CkQKCtJjBjzVPUFz0D2dLCGnPVmAooZJoXuQMpODob1k - SUUGXGa9e1AaZZCeQ6FAgzD13aTURuYLa/A2DIIwGCj4uwRt5tsCzhRNDEvA63nM+g/HwWSEHxr4 - Aj+XxhQ69v0UFpCYVL6TA2o41ZpRMRBgfPRkfFowP/KZ1iX4jYEVbFH/dD67mPfxLMAjF4L24vee - xthKnVADmVTbKocUv1AjCqL9fhD2w2AeBXG4Hw/Hg0k4/gHjtjacE4OBOzMvDNLq+2gviNq0648U - dKJYYQuHp4dE55TzHkmZNkwkhhQMEiByQdZSrQZWO5HiUvFnRlEKZttF+S29p4Yq/57B2ndh7QKs - RWEwDCc/afYP/Jhj28scvVpYoMs51Svbq/LO2F/xgnINPa9SfI15Od2et2QIHJUstydwDxhr8LHn - GYbIKhAlXixKzNF7AJNh0AgKJd9hRi8seK3tyu0a2JTbfnwCkl1Wl4IZgwa01/q2SP3d3dVyYdZU - WbxqlhecYcDpg8yxHw5lo8lmNHlmuF/oTJNJ25dRcIBhRKNNNPp/vVTdd1hEh+F4E46/hcNN43EY - bYbRt/BYA/zjx8dwDLtwGjWCBdu8qTgQu3998/jmsLlJs0xBhnzz1SHYbwSYmeRlxQtPXx13CQ46 - BFGnYNIlePU4nIo2q1NLSu6F8OJ+WHOlbYliSRX5+0dndlCw2nopS55OmS443dbjhMdravDpqSj7 - +aNfPQi7J8CvzCk72O7nkSxt6V2oV/aAicyLjSqtbzRq3iBm7HjX1VCAyVr+eOqR2D8ImkfiYdla - Knso6AJV1IKqUEwqZrYvLEGj7tsn8RlvBctpBtq3GroxwvCAy/VA32c7sjyR64ZUR97jsYlazHN6 - B5YWnxgMyyZPliHsQmg4sfVYUj0rWHLCxOrYSqZQ2O1FJA2CHK7WTtaeCClmuLzQOw7nQHWFSlX/ - 8s5OLn95fXp78vpodnoxu52dn/95jvnhlGosCF6YL4GcIf8LQ6xfwjSRgm8Jcgnj1igxkvzGFCVn - CnIkE1JqRNzgKU4JcZy84AMLgkIexF71JmLvsPi7mfqMK7ANGROUP7xU7151eR3uOUZXf9u+ZgLa - 22Vhh7YLx+Nw2OC4WpNeCL1KuX13P99snofGHdx+pskKl80Gco3xytdRvc/9p4CbpdBvdrOoWRME - WKgnkkt1WkVzx0voZwoZa7cSSTKVVbNlXuA6LMzToN/vIoX9lhS+1PHPy/mX2P3bmzPDYS8m129p - EcbkSMoVA3LFDHKsIReQlArIMafZB1sdLA6XCeVLqU08CSaBv2AiRSL0o1F04wxOXfEwr3eSWFjF - e+SrmuQ7/PO9U7/Apc9yEKohW9RBTksgU8wHD/+gWxIGPWLB2CZxdDVD0TX+1x+HIxep7WOyhkHO - jIKBVJmPMKa2tQw3Ngt/H68OlibnLu7Kzhtr51KshFx/WqQzJdMSd4CZyHCwc2yTP8caW5+uQhgv - +VWu+0Z2VKmoDUQ3xCfXa4AVsgC01jq0dhf8oVN8e3hGLhIqOu7bPdR/NWzz+SSDi602kGvMIC0k - Q5jtxe7c9cbWKqdMaGZggEjEUunlnaQq7brxyP50hzBr+ZAk8l8AAAD//+xZbWvbMBD+K6JQaKF2 - ncRpkkHpQulgH8pGCyuUQpBluTFLZOOXumXrf99zkqK5XtyNMko/FPLBsaTT3eXuuecuOpCAuiyS - UrFSVqyxUVUBFksTWQki64A1y1Qs2VpyVWKRmx1WAsy9UZFkXAjgqozZXcpZjVQRxUMOYMI+paSh - B35Lo3P8yre6qSSFrpZSUWQx7uRm6DlhETQhs8D/WKqSrFjrMywrCK05VksCSwD1d6kOSDEITytm - OAjjq4Y/kIks51q7ukRMM65YS0F0jEqufHamSrLZWWh8cKPICXQb+QgaWA3LjYoksKzhoK2atmxu - WX+BLETdgcvIehsuTdP4WcPLXOcD0k/e+/ky17GMSxaQubB3L3gF0hXVCKnF3per+eVX7/LcQ4nW - WeouybOikgXlwR6P16naZ3v7PxEoqyr7gDD8k92MXYnvlrc+hBuEbYSrCmC6Jl5EBLtbHQHuLISO - inYWgr4TgWMZ+kfSnGr7xj7WEfTx4sApAx9zsaT8tqW3Xb27GF7W6zWncrXzN6QmrxP5zIoX1jYi - ISfINyK8n+PjcTyaTqbRYBLIkZgFYjYeJrNBcoR73Cbc8Mw2SSExj2PcgfqGYhc/fGwpAkAhWc+2 - wyZXfNRPvU3DlG3JQgmKx+NBGB5NxHQyHs0iEU0CXJ9wmUzFSXyspeyO5rvDT/iYc96aKwuvnmde - lX5deg0c4Q19gmw/r6NVKshTXs55SY7CeV1BQBDxeIqk8HNFPu+21m9f425v/vY17vb2b11jYFRs - 2lRLAk8R+uAwSVILkeoEIjw3baRBuGvQQGw8q4ssl4fXwB6x/J1pNFvCqktdusFO1LYzx7APV8O+ - djJ07WRh8f0dRl4tYN5h5DU0foeRLTDShYE+phY6Qub4Csy5NUn5g4bg9jmAJlnF7Qi/K6WXefXi - Ut/gJxhuR75eZtZrWS9lcyZ3FkZ9J0aO5El1lxaZMizPvIpr+/+R+fov3rvLqv832TTCnFDchDbt - W6anPJthKlLAqPxj82jry4sV0P+1HW7kHuys+f2FLOsVCW4Zq+czRTWvjOE0JaYZDpnu3j89PHxy - 2h7Q2j4+Pv4CAAD//wMAYqPKDa4cAAA= + H4sIAAAAAAAAA7xW+2/bNhD+Vwj9tGWy9fCjjoBh6BJ365ZlQeK0QLMioKWzxJoiVZKK7aX533fU + y20SdUiGFQVSk8d7f/fpbh3YFlQkTuQoEAkoSF4x4Il2Bc1BuzrOIKeuLEBRw6TQLiTM5GCoG2dU + pMBl6t6A0iiD5BwKBRqEad7GpTYyX1mD14HvB/5QwccStFnsCjhTNDYsBsd1mPUfTKf+DA8a+AqP + mTGFjjwvgRXEJpEf5JAaTrVmVAwFGA89GY8WzAs9pnUJXmtgDTvUP13MLxaDYBZO8KoKQTvRraMx + tlLH1EAq1a7OIcETaoR+OBn4wSA8XIRBNJ5E/mw4Dg5/wLh9G6R1YjDwyswzg7T6Htrzwy7t5pCA + jhUrbOHw9iXROeXcJQnThonYkIJBDESuyEaq9dBqx1JcKv7EKErBbLsov6Y31FDl3TDYeFVY+wAb + UeCPgtlPmv0NP+bY9jJHrxYW6HJB9dr2qlwa+ytaUa7BdWrF15hXpes6GUPgqDjbncANYKz+nesY + hsgqECVOJErM0bkHk5HfCgolP2BGzyx4o12Vu2pgW257+Awk+6wuBTMGDWin822R+nv1VsuV2VBl + 8apZXnCGASf3Msd+VCgbz7bj2RPD/Upn2ky6voz9FxhGON6G4//XS939CovoMJhug+m3cLhtPY7C + 7Sj8Fh4bgN/dPYRj0IfTsBWs2PZNzYHY/av3D1+O2pc0TRWkyDcPhgATkLysx/9xd5M+wbRP8KJH + EPYKZn2Cw4dx1rRZ31pSqr4QTjQI8EgNfjhqwn364NZ0vidwrzan7FhWP49kaQsXWFJ+ay+YSJ3I + qBKwfWjUvMGO2+FsclGAodrpf4zi/WnYUvz9pDsiui/og0TYQaJQTCpmds8sQavujZ/G9CynKWjP + aujWCMMLLjdDfZPuqe5EblpKHDsPQR92iOV0CZbUHoG15YJHyxD04SuY2XpkVM8LFp8wsX5lJcdQ + 2N1DxLuOTnUmN5WsuxFSzHH1oEsO50C1HZNbBGP9yzk7ufzl9en1yeuj+enF/Hp+fv7nOeaHM6ax + IPhgkQE5Q/YWhli/hGkiBd8RZALGrVFiJPmNKUrOFORIBaTUiLjhY4wQ4DA4/ifm+2q1jJz6i4a9 + w+LvJ+KLScc2pExQfv9Rszk15a1wzzG65mz7mgroXpeFHbk+HAeTFy2O6yXnmdCrlbuv5pd7ydPQ + uIfbzzRe46rYQq41Xvs6arax/xRwu9J57WYVth95ARbqseRSndbRLHkJg1TR3T7AhSTHsm62zAtc + ZoV5HPSTPlKYdKTwtY5/Wc6/xP7fwYIZDgcRuXpHiyAiR1KuGZC3zGSyNOQC4lIBecVp+slWB4vD + ZUx5JrWJZv7M91ZMJEiEXjgK31cGj6viYV4fJLGwig7Iv2qS7/DP95X6Ba5sloNQDdmiCfK4BHKM + +eDlH3RHwkOXWDB2SRy9naPoCv8bTINxFantY7yBYc6MgqFUqYcwpra1DPctC38Pnw4zk/Mq7trO + G2vnUqyF3HxepDMlkxK/4HOR4mDn2CZvgTW2PqsKYbzkV7kZGNlTpaIxEL4nHrnaAKyRBaCz1qO1 + f+CNKsV3L8/IRUxFz3u7RXqHfpfPZxlc7LSBXGMGSSEZwuwgqu6r3tha5ZQJzQwMEYlYKp0tJVVJ + 34sH9o/3CLOWX5K4BhKyLlkCCKLBkE2DKoO0qGtkrRBZLtlkLM5IDlRoFNL6RSz/AQAA///sWV1L + 40AU/StBEBRMTNvUtgviFnFhH2QXhRVEKNPJxAbTSciHtbj973vuzDgbs4m7yCJ9KPgQMzP3K3PP + PfdWSYC7d3IuHMY5cFWEzmPMnAqpwvN1BmDCPilx4+iTejWLLvGV71VLSAbdLISkm+UwKzdFxwiP + YAm5BfbmxDJK86U646Q5oTXDakFgCaB+EPKIDIPwuHRQK6okdFiyYmty0cmYsq4qcKcdJp2agej3 + pEg850IW5LP1UMfgTlIQSBvFCBYYC4sXE0lgUSFArZbWfK55f4UsRN1ByMh7c11Wq5WXrliRqXxA + +oknL1tk6i5DyQwyZ0b3jJVlHs8rXKnZwbeb6fV39/rSRYlWWWqVZGleipzy4ICFy1geOgeHP3FR + kjL9hGv4J7sZ2hLfLG9dCNcL6ghXon9/UMTruaVEdpHVwBLJxoLfdcLvYhm+ZRnq6ymy1b6xi9X6 + 1hjEmPEF5bcpvfXq3cTwolouGZWrvb8hNUWdyGeav7O2EQk5Q74R4f0ang7DwXg0nvdGvhjwic8n + w3406UUn0GM3QcMb2wRdiWkYQgfqG4pduP5cMwSAQrLebGZ1rnion2qbginTUAUCFI+FvSA4GfHx + aDiYzPl85EN9xEQ05mfhqZKyP5ju97/gT59zl0waeHVd/arwqsJdIRBu3yPI9rJqnsScIuVmjBUU + KJxXFQQEEY/nSAovkxTzZmO8/RY3O+vtt7jZmW+7xcCoMC6yhK0NCTzH1QeHiaKK81glEOG5biM1 + wt2CBmLjRZWnmTi+BcTwxe9Mo8kQVm3qkgYzD2tnjkEXrgZd7WRg28nmgsXu3AD/Dl8+7Cbt8OUj + LN7hSwu+NGHAEjLLV2D1vc69Zxphm2cfCtOSmQF8U0on8+rEpU5K1m9Hvq55kN/FQQkQWhd863Jj + YdB1YmBJnpCPcZ5KzfL0q7Ayv/7of/8leo9p+f8mm1qYFQpNaNN+pGrKowZfeiChTX5+eTT15d0G + qF/Kjl/kHu0t2dOVKKqEBNecVfOZvJyW2nGa8dIMh1y3718f7r86bQ4oazebzS8AAAD//wMATn/G + nGwcAAA= headers: Atl-Request-Id: - - 35e52e6c-ed75-4f23-8ca3-7e161a79ca9c + - bd1888e8-37a6-4dbe-a30e-345772f7ac9e Atl-Traceid: - - 35e52e6ced754f238ca37e161a79ca9c + - bd1888e837a64dbea30e345772f7ac9e Cache-Control: - no-cache, no-store, no-transform Content-Encoding: @@ -522,7 +608,7 @@ interactions: Content-Type: - application/json;charset=UTF-8 Date: - - Fri, 10 Jan 2025 19:15:37 GMT + - Wed, 29 Jan 2025 20:45:08 GMT Nel: - '{"failure_fraction": 0.001, "include_subdomains": true, "max_age": 600, "report_to": "endpoint-1"}' @@ -532,7 +618,7 @@ interactions: Server: - AtlassianEdge Server-Timing: - - atl-edge;dur=283,atl-edge-internal;dur=15,atl-edge-upstream;dur=268,atl-edge-pop;desc="aws-us-east-1" + - atl-edge;dur=378,atl-edge-internal;dur=14,atl-edge-upstream;dur=364,atl-edge-pop;desc="aws-us-east-1" Strict-Transport-Security: - max-age=63072000; includeSubDomains; preload Timing-Allow-Origin: @@ -544,7 +630,7 @@ interactions: X-Aaccountid: - 5d3878b170e3c90c952f91f6 X-Arequestid: - - a2bdf888f262427122a515c798491bdd + - 6d194404d9abd49996e32a1c3405dbb9 X-Content-Type-Options: - nosniff X-Xss-Protection: @@ -568,53 +654,53 @@ interactions: User-Agent: - python-requests/2.32.3 method: GET - uri: https://defectdojo.atlassian.net/rest/api/2/issue/16084 + uri: https://defectdojo.atlassian.net/rest/api/2/issue/16608 response: body: string: !!binary | - H4sIAAAAAAAAA7xWbW/bNhD+K4Q+bZltvdhxXAHD0MXu1i3LgsRJgWZFwEhnmTVFqiQV22vz33ek - XtzGUYdkWFEgtXi89+ce3kcPNgUVqRd7CkQKCtJXDHiqe4LmoHs6WUJOe7IARQ2TQvcgZSYHQ3vJ - kooMuMx6d6A0yiA9h0KBBmHqu0mpjcwX1uBNGARhMFDwoQRt5tsCzhRNDEvA63nM+g/HwWSEHxr4 - Aj+XxhQ69v0UFpCYVL6XA2o41ZpRMRBgfPRkfFowP/KZ1iX4jYEVbFH/dD67mPfxLMAjF4L24o+e - xthKnVADmVTbKocUv1AjCqLDfhD2w2AeBXF4GA/Hg0k4/gHjtjacE4OBOzPPDNLq+2gviNq0648U - dKJYYQuHpy+JzinnPZIybZhIDCkYJEDkgqylWg2sdiLFpeJPjKIUzLaL8ht6Rw1V/h2Dte/C2gVY - i8JgGE5+0uxv+DHHtpc5erWwQJdzqle2V+Wtsb/iBeUael6l+Brzcro9b8kQOCpZbk/gDjDW4L7n - GYbIKhAlXixKzNF7AJNh0AgKJd9jRs8seK3tyu0a2JTbfnwGkl1Wl4IZgwa01/q2SP3d3dVyYdZU - WbxqlhecYcDpg8yxHw5lo8lmNHliuF/pTJNJ25dRcIRhRKNNNPp/vVTdd1hEh+F4E46/hcNN43EY - bYbRt/BYA/z+fh+OYRdOo0awYJurigOx+9fv9m8Om5s0yxRkyDd7Q4AJSF5W4/+4u8MuwbhLcNQh - iDoFky7Bi/04K9qsTi0puRfCi/thzZW2JYolVUof987soGC19VKWPJ0yXXC6rccJj9fU4NNTUfbT - R796EHZPgF+ZU3aw3c9jWdrSu1Df2AMmMi82qrS+0ai5QszY8a6roQCTtfzx2CNxeBQ0j8TDsrVU - 9lDQBaqoBVWhmFTMbJ9Zgkbdt0/iE94KltMMtG81dGOE4QGX64G+y3ZkeSLXDamOvP2xiVrMc3oL - lhYfGQzLJo+WIexCaDix9VhSPStYcsLE6pWVTKGw24tIGgQ5XK2drD0RUsxweaG3HM6B6gqVqv7l - nZ1c/vL69Obk9fHs9GJ2Mzs///Mc88Mp1VgQvDBfAjlD/heGWL+EaSIF3xLkEsatUWIk+Y0pSs4U - 5EgmpNSIuMFjnBLiOHnBJxYEhTyKvepNxN5h8Xcz9QVXYBsyJih/eKneveryOtxzjK7+tn3NBLS3 - y8IObReOx+GwwXG1Jj0TepVy++5+udk8DY07uP1MkxUumw3kGuOVr+N6n/tPATdLod/sZlGzJgiw - UE8kl+q0iuaWl9DPFDLWbiWSZCqrZsu8wHVYmMdBf9iSwtca+1CpJYwvy/mX2P07mDPD4SAm129p - EcbkWMoVA/KGGeRYQy4gKRWQV5xmn2x1sDhcJpQvpTbxJJgE/oKJFInQj0bRO2dw6oqHeb2XxMIq - PiD/qkm+wz/fO/ULXPosB6EaskUd5LQEMsVE8fAPuiVh0CMWjG0Sx29mKLrG//rjcOQitX1M1jDI - mVEwkCrzEcbUtpbhxmbh7+PVwdLk3MVd2bmydi7FSsj150U6UzItcQeYiQwHO8c2+XMsvvXpKoTx - kl/lum9kR5WK2kD0jvjkeg2wQhaA1lqH1u6CP3SKb1+ekYuEio77dg/1XwzbfD7L4GKrDeQaM0gL - yRBmB7E7d72xtcopE5oZGCASsVR6eSupSrtu7Nmf7hBmLb8kifwHAAD//+xZbUvjQBD+K4sgKJiY - tqltD8Qr4sF9kDsUThChbJKNDdduQl6Mcud/v2d2t3sx1/UOOcQPgh9qsjtvnXnmmalKJKAui4SQ - rBI1a01W1YDFSmdWisw6YO0yi5dsLbis8JLrE0YC3L2RkWA8joGrImF3GWcNSiUuHwoAE85JKTQ9 - 8DsWneNbvlVDJRl0tRSSMotxKzfHzAmPYAm5Bf7HMpnm5VrdYXlJaM3xtiKwBFB/F/KADIPwrGaa - gzC+avkDucgKrqxrKuQ045J1DMTEKMXKZ2eyIp+thzoGN5KCQNooRrDAWFhtTCSBVYMAbbW043PH - +wtUIfoOQkbem3Rp29bPW14Vqh5QfuLeL5aFymUoWUDmwuhe8BqkK2qQUou9L1fzy6/e5bmHFq2q - 1Cop8rIWJdXBHk/Wmdxne/s/kSirOv+ANPyT3Yxti++3NxftGYRd6KtLYLoiXkQE+0dddDdwvQgt - R+3fsCxDfUmKU20/6GIdgYsXB1YnYszjJdW3ab3d7t0H96pZrzm1q52/ITVFnchnXr6wtxEJOUG9 - EeH9nByPk9F0Mo0Gk0CM4lkQz8bDdDZIj6DHHoKGZ44JSol5kkAH+huaXfLwsWMIAIVkPTsO61rx - 0T/VMQVTZiQLBSgeTwZheDSJp5PxaBbF0SSA+pSLdBqfJMdKyu5ovjv8hD99z1tzaeDV8/Sjym8q - r0UgvKFPkO0XTbTKYoqUV3BeUaBwX3UQEER8PEVR+IWkmPdH67dvcX82f/sW92f7t24xMCrRY6oh - gadIfXCYNG3iOFMFRHiux0iNcNeggTh41pR5IQ6vgT3x8nel0W4Jb23pkgazUdvOHEMXroaucTK0 - 42Rp8P0dRl4tYd5h5DUsfoeRLTDShwEXUwstIbN8Be7c6qL8QUtw8zmAJXnNzQq/L8VFyQIXLgXD - 7QDn2gcFTgeclM161r/h4nIj5wtL8oS8y8pcapanHyWN+f1I//sv0bvL6/+32dTCrFBowpj2LVdb - ns0yFSWgTf6x+Wj6y4sNUL+1HW7kHuys+f2FqJoVCe44q/YzZT2vteO0JaYdDrlunz+9PHxy21xQ - 1j4+Pv4CAAD//wMAkplGsa4cAAA= + H4sIAAAAAAAAA7xWbW/bNhD+K4Q+bZlsvdhxHAHD0CXuli3LgsRJgWZFQEtniTVFqiQV20vz33eU + LLlxog7JsKJAavF47889vHsHVgUViRM5CkQCCpK3DHiiXUFz0K6OM8ipKwtQ1DAptAsJMzkY6sYZ + FSlwmbp3oDTKILmAQoEGYTZ341Ibmc+twdvA9wO/r+BTCdpM1wWcKxobFoPjOsz6D0Yjf4wfGvgc + PzNjCh15XgJziE0iP8o+NZxqzajoCzAeejIeLZgXekzrErzGwALWqH82nVxOe8E43MejKgTtRPeO + xthKHVMDqVTrOocEv1Aj9MP9nh/0wsNpGETD/cgf94fB4Q8Yt2+DtE4MBl6ZeWWQVt9De37Ypr35 + SEDHihW2cHj6huiccu6ShGnDRGxIwSAGIudkKdWib7VjKa4Uf2EUpWC2XZTf0jtqqPLuGCy9Kqxt + gBtR4A+C8U+a/Q0/5tj2MkevFhbockr1wvaqnBn7K5pTrsF1asUTzKvSdZ2MIXBUnK1P4Q4wVv/B + dQxDZBWIEicSJebo7MBk4DeCQsmPmNErC77RrspdNbApt/34AiTbrK4EMwYNaKf1bZH6e3VXy7lZ + UmXxqllecIYBJzuZYz8qlA3Hq+H4heF+pTNNJm1fhv4BhhEOV+Hw//VSd7/CIjoMRqtg9C0crhqP + g3A1CL+Fxw3AHx6ewjHowmnYJRg0gjlbXdfkiLC4+YAwSVMFKfLNkyHABCQv6/F/3up+l2DUJTjo + EISdgnGX4PBpnDVt1qeWlKoXwol6AX5Sgw9HTbgvH9yazrcE7tXmlB3L6ueRLG3hAkvK7+wBE6kT + GVUCtg+NmmvsuB3OTS4KMFQ7/c9RvD8KG4rfTbolol1BFyTCFhKFYlIxs35lCRp1b/gypmc5TUF7 + VkM3RhgecLns67t0S3WnctlQ4tB5CvqwhTCnM7CkZtG7+553AS/owlcwtvXIqJ4ULD5lYvHWSo6h + sLuHiNctnepMLitZeyKkmODqQWccLoBqOyb3CMb6l3N+evXLydnt6cnR5Oxycju5uPjzAvPDGdNY + ELwwzYCcI3sLQ6xfwjSRgq8JMgHj1igxkvzGFCXnCnKkAlJqRFz/OUYIcBgc/zPzfTWfRU79omHv + sPjbiXg06diGlAnKdy9tNqdNeSvcc4yuIQvsayqgvV0WduS6cBzsHzQ4rpecV0KvVm5fzcd7ycvQ + uIXbzzRe4KrYQK4xXvs62mxj/yngZqXzms0qbB55ARbqseRSndXRzHgJvVTR9TbAqSTHsm62zAtc + ZoV5HvT7LSl8rbG7Si1hPC7nX2L7b2/KDIe9iNy8p0UQkSMpFwzIO2YyWRpyCXGpgLzlNP1sq4PF + 4TKmPJPaRGN/7HtzJhIkQi8chB8qg8dV8TCvj5JYWEV75F81yXf45/tK/RJXNstBqIZssQnyuARy + jIni4R90TcJDl1gwtkkcvZug6Ab/642CYRWp7WO8hH7OjIK+VKmHMKa2tQz3LQt/D6/2M5PzKu7a + zrW1cyUWQi6/LNK5kkmJL/hEpDjYObbJm2Lxrc+qQhgv+VUue0Z2VKnYGAg/EI/cLAEWyALQWuvQ + 2l7wBpXi+zfn5DKmouO+3SK9Q7/N54sMLtfaQK4xg6SQDGG2F1XnVW9srXLKhGYG+ohELJXOZpKq + pOvGE/vHW4RZy29IXAMJWZfMAATRYMhygyqDtKhrZM0RWS5ZZizOSA5UaBTS+kYs/wEAAP//7Flt + S+NAEP4rQRAUTEzb1LYH4hXx4D7IHQoniFC2ycYGk92QF2vx+t99Zjfdi7HrHXKIH4R+SJvNvGXm + mWemSgLcvRFz7rAwBK7yyLlPmFOjVMJilQOYcE4IZBy9Uq9l0Tne8q0aCcmgqwUXlFkOM3IlJkZ4 + BEvILbA3JxGxLDL1jCMLQmuGuyWBJYD6josDMgzCk8pBr6jTyGHpkq3IRSdnyrq6RE47TDgtAzHv + CZ56zpkoyWfjoY7BjaAgkDaKESxoLCw3JpLAskaAtlra8rnl/QWqEH0HISPvm3RZLpeeXLIyV/WA + 8uMPXr7IVS5DyQwyZ43uGauqIpnXSKnZ3o+r6eVP9/LcRYtWVWqU5LKoeEF1sMeiLBH7zt7+byRK + WskvSMOX7GZoWny3vdloTy+w3TDUlDCxwmB/pxjZ48veGRgi2bnh2+itb1iGekmKU20/aGMdvo3V + +sYYxJiFC6rvLTy9C+5lnWWM2tXO35Caok7kUxZv7G1EQk5Qb0R4v0fHw2gwHo3nvZHPB+HEDyfD + fjzpxUfQYw5BwyvHOKXENIqgA/0NzS5afW0ZAkAhWa8Os7pWPPRPdUzBVDNQBRwUj0W9IDgahePR + cDCZh/ORD/Ux4/E4PImOlZTdwXS3/w0f/ZybMdHAq+vqn0qvLt0lAuH2PYJsL6/naRJSpNycsZIC + hedVBwFBxOUpisLLBcW8Oxh/fIu7k/XHt7g7mX90iwFFUVLmKVs1JPAUqQ8OE8d1GCaqgAjP9Rip + gewaNBAHz+pC5vzwGtgTLv5UGm2GcNeULmlo9mHbmWNgw9XANk4GZpwsGnz/hJF3S5hPGHkPiz9h + ZAuMdGHAxtQCQ8gMX4E7t7ooH2mF3Vz7sERWrFnAd6VYmZcVl2yLH7+/HfmszMzqmZWy2TjowPbE + wJA8Lu6TQgrN8vRPUd38+6O//lP0ZKYlPG4uG7h/Ay63/rg63Mg92MnYwwUv65QEt3SrdUlRTStt + x72s/t+GVQszQqEL4+IvqbZNagG31jte2uGQSmPIc2v7z8xtHlDhWa/XTwAAAP//AwCKUuOjbBwA + AA== headers: Atl-Request-Id: - - cc775fb0-3f66-47cb-8ec9-ae93f20b3486 + - 0f5b7b79-1cd3-4b6d-bf34-0a0e87a8f0c5 Atl-Traceid: - - cc775fb03f6647cb8ec9ae93f20b3486 + - 0f5b7b791cd34b6dbf340a0e87a8f0c5 Cache-Control: - no-cache, no-store, no-transform Content-Encoding: @@ -622,7 +708,7 @@ interactions: Content-Type: - application/json;charset=UTF-8 Date: - - Fri, 10 Jan 2025 19:15:37 GMT + - Wed, 29 Jan 2025 20:45:09 GMT Nel: - '{"failure_fraction": 0.001, "include_subdomains": true, "max_age": 600, "report_to": "endpoint-1"}' @@ -632,7 +718,7 @@ interactions: Server: - AtlassianEdge Server-Timing: - - atl-edge;dur=268,atl-edge-internal;dur=14,atl-edge-upstream;dur=255,atl-edge-pop;desc="aws-us-east-1" + - atl-edge;dur=247,atl-edge-internal;dur=13,atl-edge-upstream;dur=235,atl-edge-pop;desc="aws-us-east-1" Strict-Transport-Security: - max-age=63072000; includeSubDomains; preload Timing-Allow-Origin: @@ -644,7 +730,7 @@ interactions: X-Aaccountid: - 5d3878b170e3c90c952f91f6 X-Arequestid: - - e524439e1a7c2af3b7de1348526f4ee0 + - 697fb4618e6c4d62015bdd36a63d4548 X-Content-Type-Options: - nosniff X-Xss-Protection: @@ -653,7 +739,7 @@ interactions: code: 200 message: OK - request: - body: '{"issues": ["16084"]}' + body: '{"issues": ["16608"]}' headers: Accept: - application/json,*/*;q=0.9 @@ -670,21 +756,21 @@ interactions: User-Agent: - python-requests/2.32.3 method: POST - uri: https://defectdojo.atlassian.net/rest/agile/1.0/epic/16083/issue + uri: https://defectdojo.atlassian.net/rest/agile/1.0/epic/16607/issue response: body: string: '' headers: Atl-Request-Id: - - 3b990d3b-59fe-4d1f-8d58-518c8e72adc6 + - 4d8d8178-6de0-4e14-85a2-f909d716eb0f Atl-Traceid: - - 3b990d3b59fe4d1f8d58518c8e72adc6 + - 4d8d81786de04e1485a2f909d716eb0f Cache-Control: - no-cache, no-store, no-transform Content-Type: - text/html;charset=UTF-8 Date: - - Fri, 10 Jan 2025 19:15:38 GMT + - Wed, 29 Jan 2025 20:45:09 GMT Nel: - '{"failure_fraction": 0.001, "include_subdomains": true, "max_age": 600, "report_to": "endpoint-1"}' @@ -694,7 +780,7 @@ interactions: Server: - AtlassianEdge Server-Timing: - - atl-edge;dur=475,atl-edge-internal;dur=12,atl-edge-upstream;dur=462,atl-edge-pop;desc="aws-us-east-1" + - atl-edge;dur=565,atl-edge-internal;dur=15,atl-edge-upstream;dur=551,atl-edge-pop;desc="aws-us-east-1" Strict-Transport-Security: - max-age=63072000; includeSubDomains; preload Timing-Allow-Origin: @@ -704,7 +790,7 @@ interactions: X-Aaccountid: - 5d3878b170e3c90c952f91f6 X-Arequestid: - - cdc7ff8ecfb25b053782770721534ea3 + - 025a1155b4e5baa6fb4758386409ef52 X-Content-Type-Options: - nosniff X-Xss-Protection: @@ -732,17 +818,17 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA5yQTU/DMAyG/0uubJ2bfmzLDQ2JgWAgtbuAEEpTRwTSpGrSSdO0/04iJtgRuFn2 - 8/qxfCANd7gdNGHkzfvesdmsRYnCt/bdJtxr7pziJjHoyYS0yvWa7//BVzjslMAW3ccadb9C43H4 - 65KVNVKPaAT+LrnDwSlrApwCpAkkMK02l4/V+qH+mW7GrgkVYc8RmsAEXoITe233Xbiy3vfRttJ2 - bEOoGZVuvyKEhQCdz0/NK+4jSIEWU0insKwpZXnG8iAGuIAAh7wLf8ChVt05m0JNgaUFyxZJXtJv - VnQ3RtoA5mm5EBywQCnSbI48lzKXICRdZsWikWWOtJRAzwReR8OtGnh8IUo+an9nBY/tA9GniqB5 - 3VbkeH7YkzVxcn1fk+MnAAAA//8DAOboofMgAgAA + H4sIAAAAAAAAA5yQ30vDMBDH/5e8unVJf9AubzLBKTqFdi8TkTS5YDRNSpMOytj/boJD55v6dtx9 + vvc57oBa5mA7aETRq/e9o4uFAAncC/tmE+Y1c04xkxjwaIaEcr1m0z/4Goa94iDAva9B9yswHoa/ + LllZI/UIhsPvknsYnLImwARjkuAEz+vN5WO9fmi+p5uxa0OF6FOEZniGn4MTem2nLlzZTH20rbQd + RQi1o9LiM4JoCKRleWpeMR/BFKfFHJN5WjUkpzmhWZVgjC9wgEPehT/A0KjuB7tsUkLzIixM8qz6 + Ynl3Y6QNoCwKAlBmpBXAc84ZACNtJSRZlkySPAhkW0B6JvA6Gm7VwOILQbJR+zvLWWwfkD5VCMzL + tkbH88N21sTJ9X2Djh8AAAD//wMAgMpZMSACAAA= headers: Atl-Request-Id: - - 8d0c70f8-0a2e-4736-91e9-121d55a86699 + - d752dd56-7df1-4465-a712-0e3c8eba73cb Atl-Traceid: - - 8d0c70f80a2e473691e9121d55a86699 + - d752dd567df14465a7120e3c8eba73cb Cache-Control: - no-cache, no-store, no-transform Content-Encoding: @@ -750,7 +836,7 @@ interactions: Content-Type: - application/json;charset=UTF-8 Date: - - Fri, 10 Jan 2025 19:15:38 GMT + - Wed, 29 Jan 2025 20:45:10 GMT Nel: - '{"failure_fraction": 0.001, "include_subdomains": true, "max_age": 600, "report_to": "endpoint-1"}' @@ -760,7 +846,7 @@ interactions: Server: - AtlassianEdge Server-Timing: - - atl-edge;dur=159,atl-edge-internal;dur=15,atl-edge-upstream;dur=145,atl-edge-pop;desc="aws-us-east-1" + - atl-edge;dur=182,atl-edge-internal;dur=14,atl-edge-upstream;dur=169,atl-edge-pop;desc="aws-us-east-1" Strict-Transport-Security: - max-age=63072000; includeSubDomains; preload Timing-Allow-Origin: @@ -772,7 +858,7 @@ interactions: X-Aaccountid: - 5d3878b170e3c90c952f91f6 X-Arequestid: - - 5f6c2fb5aaf4ecec9f224eafe697b533 + - 820dc70ae834b2bdda75d5ebd71e7c56 X-Content-Type-Options: - nosniff X-Xss-Protection: @@ -823,9 +909,9 @@ interactions: Qd0rB+1tFIM2taXQGeyiiIe7tFsY5eAywDF9q+SoL8eQv0aDOgcK1t35GwAA//8DAFZhNg1MFgAA headers: Atl-Request-Id: - - f723eba0-d9db-4625-b8c6-c6b59f3d45c5 + - e7295481-3fe6-463d-9954-d68587c44c11 Atl-Traceid: - - f723eba0d9db4625b8c6c6b59f3d45c5 + - e72954813fe6463d9954d68587c44c11 Cache-Control: - no-cache, no-store, no-transform Content-Encoding: @@ -833,7 +919,7 @@ interactions: Content-Type: - application/json;charset=UTF-8 Date: - - Fri, 10 Jan 2025 19:15:38 GMT + - Wed, 29 Jan 2025 20:45:10 GMT Nel: - '{"failure_fraction": 0.001, "include_subdomains": true, "max_age": 600, "report_to": "endpoint-1"}' @@ -843,7 +929,7 @@ interactions: Server: - AtlassianEdge Server-Timing: - - atl-edge;dur=310,atl-edge-internal;dur=17,atl-edge-upstream;dur=293,atl-edge-pop;desc="aws-us-east-1" + - atl-edge;dur=255,atl-edge-internal;dur=17,atl-edge-upstream;dur=238,atl-edge-pop;desc="aws-us-east-1" Strict-Transport-Security: - max-age=63072000; includeSubDomains; preload Timing-Allow-Origin: @@ -858,7 +944,7 @@ interactions: X-Aaccountid: - 5d3878b170e3c90c952f91f6 X-Arequestid: - - b4ed380b258248c60d5856fbfdc325d6 + - 646b22644e43607aaf975a6bd07be075 X-Content-Type-Options: - nosniff X-Xss-Protection: @@ -869,11 +955,11 @@ interactions: - request: body: '{"fields": {"project": {"key": "NTEST"}, "issuetype": {"name": "Task"}, "summary": "Zap2: Cookie Without Secure Flag", "description": "\n\n\n\n\n\n*Title*: - [Zap2: Cookie Without Secure Flag|http://localhost:8080/finding/243]\n\n*Defect - Dojo link:* http://localhost:8080/finding/243 (243)\n\n*Severity:* Low\n\n\n*Due - Date:* May 10, 2025\n\n\n\n*CWE:* [CWE-614|https://cwe.mitre.org/data/definitions/614.html]\n\n\n\n*CVE:* + [Zap2: Cookie Without Secure Flag|http://localhost:8080/finding/233]\n\n*Defect + Dojo link:* http://localhost:8080/finding/233 (233)\n\n*Severity:* Low\n\n\n*Due + Date:* May 29, 2025\n\n\n\n*CWE:* [CWE-614|https://cwe.mitre.org/data/definitions/614.html]\n\n\n\n*CVE:* Unknown\n\n\n\n\n*Product/Engagement/Test:* [Security How-to|http://localhost:8080/product/2] - / [weekly engagement|http://localhost:8080/engagement/3] / [ZAP Scan|http://localhost:8080/test/93]\n\n\n\n\n\n\n\n\n*Systems/Endpoints*:\n\n* + / [weekly engagement|http://localhost:8080/engagement/3] / [ZAP Scan|http://localhost:8080/test/90]\n\n\n\n\n\n\n\n\n*Systems/Endpoints*:\n\n* https://mainsite.com/dashboard\n* https://mainsite.com\n\n\n\n\n\n\n\n*Description*:\nA cookie has been set without the secure flag, which means that the cookie can\nbe accessed via unencrypted connections.\n\n\n\n\n*Mitigation*:\nWhenever a cookie @@ -900,18 +986,18 @@ interactions: uri: https://defectdojo.atlassian.net/rest/api/2/issue response: body: - string: '{"id":"16085","key":"NTEST-1601","self":"https://defectdojo.atlassian.net/rest/api/2/issue/16085"}' + string: '{"id":"16609","key":"NTEST-1826","self":"https://defectdojo.atlassian.net/rest/api/2/issue/16609"}' headers: Atl-Request-Id: - - b60554b4-448a-46a1-88df-cb7fa0ccdedc + - 02ef8b2c-291a-465c-9a3b-f2f2109609c7 Atl-Traceid: - - b60554b4448a46a188dfcb7fa0ccdedc + - 02ef8b2c291a465c9a3bf2f2109609c7 Cache-Control: - no-cache, no-store, no-transform Content-Type: - application/json;charset=UTF-8 Date: - - Fri, 10 Jan 2025 19:15:39 GMT + - Wed, 29 Jan 2025 20:45:11 GMT Nel: - '{"failure_fraction": 0.001, "include_subdomains": true, "max_age": 600, "report_to": "endpoint-1"}' @@ -921,7 +1007,7 @@ interactions: Server: - AtlassianEdge Server-Timing: - - atl-edge;dur=798,atl-edge-internal;dur=12,atl-edge-upstream;dur=785,atl-edge-pop;desc="aws-us-east-1" + - atl-edge;dur=778,atl-edge-internal;dur=12,atl-edge-upstream;dur=766,atl-edge-pop;desc="aws-us-east-1" Strict-Transport-Security: - max-age=63072000; includeSubDomains; preload Timing-Allow-Origin: @@ -933,7 +1019,7 @@ interactions: X-Aaccountid: - 5d3878b170e3c90c952f91f6 X-Arequestid: - - 1bd187e9b37f649272ef4658b8a37b6a + - e82e2ff5a18a14506c7e6f3ac6c6353d X-Content-Type-Options: - nosniff X-Xss-Protection: @@ -957,53 +1043,53 @@ interactions: User-Agent: - python-requests/2.32.3 method: GET - uri: https://defectdojo.atlassian.net/rest/api/2/issue/NTEST-1601 + uri: https://defectdojo.atlassian.net/rest/api/2/issue/NTEST-1826 response: body: string: !!binary | - H4sIAAAAAAAAA7xWbU/rNhT+K1Y+baxtXvpCiTRNjJaNjTFEC0iXIWSS09S3jp3ZDm3H5b/vOGnS - SyH3CqZdVYLYx+f98ePz6MAqoyJ2QkeBiEFBfMyAx7olaAq6paM5pLQlM1DUMCl0C2JmUjC0Fc2p - SIDLpPUASqMM4gvIFGgQZnM2yrWR6cwavPM9z/c6Cv7OQZvpOoNzRSPDInBaDrP+/YE37ONCA5/h - cm5MpkPXjWEGkYnlR9mhhlOtGRUdAcZFT8alGXMDl2mdg1sZWMAa9c+m48m0jXs+bhUhaCd8dDTG - luuIGkikWpc5xLhCjcAL+m3Pb/veNPBCvx92DzqDwf4PGLdng7RODAZemHlnkFbfRXteUKe9WcSg - I8UyWzjcPSQ6pZy3SMy0YSIyJGMQAZEzspRq0bHakRSXir8xilww2y7K7+gDNVS5DwyWbhHWNsCN - yPe6/vAnzf6BH1Nse56iVwsLdDmlemF7ld8b+xXOKNfQckrFE8yr0G05c4bAUdF8fQoPgLF6Ty3H - MERWhihxQpFjjs4OTLpeJciU/IgZvbPgG+2i3EUDq3LbxWcg2WZ1KZgxaEA7tW+L1N+Ls1rOzJIq - i1fN0owzDDjeyRz7UaCsN1z1hm8M9wudqTKp+9Lz9jGMoLcKev+vl7L7BRbRoT9Y+YNv4XBVeewG - q27wLTxuAP709BKOfhNOgyZBtxLM2OqqJEeExc0twiRJFCTIN1+9BP1KgJlJnpe88PrRQZNgv0EQ - NAqGTYKDl+GUtFnuWlIqXggnbPu4pAYfjpJw335xSzrfErhbmlP2WhafRzK3hfMtKV/bDSYSJzQq - B2wfGjVX2HF7OcvgCnvWvmJRWcfHF3s2VlTWc5nzeMR0xul6c7ktJBRgspY/Xnskuv2D6pHYLVtN - ZbuCJlAFNagyxaRiZv3OIlbqbu9tbwVLaQLatRq6MsJwg8tlRz8kW7I8lcuKVHvOy2sT1JeA03uw - tGjxvzsRNEHXb0KoP7T1mFM9zlh0ysTi2EpGkNnpRURVz4pOLgtZvSOkGOPwQu85XADVJQ7U5ss5 - P7385eTs7vTkaHw2Gd+NLy7+vMD88JZqLAgemM6BnCP/C0OsX8I0kYKvCXIJ49YoMZL8xhQl5wpS - JBOSa8Rs5zVO8fE6Od4n5nmZnIVO+SZi77D42zv1jCuwDQkTlO8e2sxem/IWqOYYXUU32NdEQH06 - z+ylbcJxr9uvcFyOSe+EXqlcv7vPJ5u3oXELt59ptMBhs4JcZbz0dbSZ5/5TwNVQ6FazWVCNCQIs - 1CPJpToro7nnObQThRyxHYkkGcmy2TLNcBwW5nXQ95tIoV+Twpc6/rycf4ntb2/KDIe9kNx8oFkQ - kiMpFwzINTPIaoZMIMoVkGNOk0+2OlgcLiPK51KbcOgNPXfGRIxU6ga97m1hcFQUD/P6KImFVbhH - vqpJvsM/3xfqExz6LAehGrLFJshRDmSE+eDmH3RNfK9FLBjrJI6uxyi6wX/tgd8rIrV9jJbQSZlR - 0JEqcRHG1LaW4cRm4e/i0c7cpLyIu7RzZe1cioWQy8+LdK5knOMMMBYJXuwU2+ROscbWZ1EhjJf8 - KpdtIxuqlG0MBLfEJTdLgAWyANTWGrS2B9xuofjh8JxMIioazts51D0o+/D8tzdZawOpxgziTDKE - 2V5Y7Be9sbVKKROaGeggErFUen4vqYqbTrywP9oizFo+JJH8FwAA///sWVFr2zAQ/iuiUGihdp3E - aZJB6ULpYA9lo4UVSiHIttyYxbKx7Lpl63/fd5KipV7cjTJKHwp5cCLp7nS+++67iw4koC6LhJBM - iZq1NqpqwKIykZUisg5Yu8ziJcsFlwqL3OywEnDdGxkJxuMYuCoSdpdx1iBV4uqhBDBhn5TCFGR/ - w6JzvOVb3VSSQVdLISmyGHdyC/ScuBEsoWuB/7FMpkWV6zOsqAitOVYVgSWA+ruQB2QYhGc1M1Wf - 8VXLH+iKrOTaukYhphmXbMNAdIxSrHx2JhXd2d3Q+OBGkhNIG/kIFlgL1dpEEqgaOGirpRt33rj9 - BbIQdQcuo9vbcGnb1i9arkqdD0g/ce+Xy1LHMpQsIHNhdS94DZoTNQipxd6Xq/nlV+/y3EOJ1lnq - lJRFVYuK8mCPJ3km99ne/k8EyqouPiAM/2Q3Y1fiu+WtD+EGYd+Co7sEfXUFsNecjjhZZ2voqGhn - IXAyugt9LCNwLEO/PU22tm/s48WBM+ZJte5iNl4Aj5eU/KYiqCbPOZWrnb8hNXmdyGdRvbC2EQk5 - Qb4RZf6cHI+T0XQyjQaTQIziWRDPxsN0NkiPoMdtgoZntgkKiXmSQAfqG4pd8vBxwxAACsl6th02 - ueKjfuptGqZsSxYKUDyeDMLwaBJPJ+PRLIqjSQD1KRfpND5JjrWU3dF8d/gJH3POy7m08Op55ifl - N8pr4Qhv6BNk+2UTrbKYPOWVnCtyFM7rCgKCiMdTJIVfSvJ5t7V++xZ3e/O3b3G3t3/rFgOKEtMY - WhJ4itAHh0nTJo4znUCE56YRNUB2DRqIjWdNVZTi8BoQEy9/ZxrNlrDqUpc02InaduYY9uFq2NdO - hn0zitBhd2WB/x1fXi2S3vHlNSx+x5ct+NKFAUfIHH+B1bcm937QENw+B1BY1NyO8LtSeplXLy71 - UrLhduTrmwcFfRyUAGHrQtDHQUd9J0aO5Al5l1WFNETO/JQ09v8j8/WfvFfkRsKP9aOF+xfA78Zf - X4druQc7Ob+/EKpZkeAN3XpcUtXz2thxV9T/b0ZrhDmh0IV28Vuhp03rMSpNiWmGQyqdIU+tHT4x - 1x7Q7nl8fPwFAAD//wMA+gwBq64cAAA= + H4sIAAAAAAAAA7xWbU/jRhD+Kyt/aqkTvyTkgqWqopBraSlFEA7pKEKLPbGXrHd9u2uS9O7+e2f9 + Fg7wVVD1dBKX3dl5f+bxfHRgXVCROJGjQCSgIHnLgCfaFTQH7eo4g5y6sgBFDZNCu5Awk4OhbpxR + kQKXqXsPSqMMkjMoFGgQpnkbl9rIfGEN3gS+H/hDBR9K0Ga+KeBU0diwGBzXYdZ/MJn4e3jQwBd4 + zIwpdOR5CSwgNom8k0NqONWaUTEUYDz0ZDxaMC/0mNYleK2BJWxQ/2Q+O58Pgmk4wasqBO1EHx2N + sZU6pgZSqTZ1DgmeUCP0w92BHwzCvXkYROPdKAiGk9HuDxi3b4O0TgwGXpl5ZZBW30N7ftil3RwS + 0LFihS0c3u4TnVPOXZIwbZiIDSkYxEDkgqykWg6tdizFheIvjKIUzLaL8ht6Tw1V3j2DlVeFtQ2w + EQX+KJj+pNnf8GOObS9z9GphgS7nVC9tr8pbY39FC8o1uE6teIR5VbqukzEEjoqzzTHcA8bqf3Yd + wxBZBaLEiUSJOTqPYDLyW0Gh5B1m9MqCN9pVuasGtuW2hwcg2WZ1IZgxaEA7nW+L1N+rt1ouzIoq + i1fN8oIzDDh5lDn2o0LZeLoeT18Y7lc602bS9WXsv8EwwvE6HP+/XuruV1hEh8FkHUy+hcN163EU + rkfht/DYAPzz56dwDPpwGraCBVu/qzkQu391/fTlqH1J01RBinzzZAgwAcnLevyfd7fbJ5j0Cd70 + CMJewbRPsPc0zpo261tLStUXwokGAR6pwQ9HTbgvH9yazrcE7tXmlB3L6ueBLG3hAkvKl/aCidSJ + jCoB24dGzTvsuB3OJhcFGKqd/ucoPhyPW4p/nHRHRI8FfZAIO0gUiknFzOaVJWjVvfHLmJ7lNAXt + WQ3dGmF4weVqqO/TLdUdy1VLiWPnKejDDrGc3oIltWdgbbng2TIEffgKprYeGdWzgsXHTCzfWskh + FHb3EPGmo1OdyVUl626EFDNcPegthzOg2o7JRwRj/cs5Pb745ejk5vjoYHZyPruZnZ39eYb54Yxp + LAg+mGdATpG9hSHWL2GaSME3BJmAcWuUGEl+Y4qSUwU5UgEpNSJu+BwjBDgMjv+J+b5a3EVO/UXD + 3mHxtxPxxaRjG1ImKH/8qNmcmvJWuOcYXXO2fU0FdK/Lwo5cH45Ho6DFcb3kvBJ6tXL31fxyL3kZ + Grdw+5nGS1wVW8i1xmtfB8029p8Cblc6r92swvYjL8BCPZZcqpM6mltewiBVdLMNcC7JoaybLfMC + l1lhngf9bkcKX2vsY6WOML4s519i+29nzgyHnYhcvadFGJEDKZcMyCUzmSwNOYe4VEDecpp+stXB + 4nAZU55JbaKpP/W9BRMJEqEXjkbXlcHDqniY150kFlbRDvlXTfId/vm+Uj/Hlc1yEKohWzRBHpZA + DjFRvPyDbki45xILxi6Jg8sZiq7wv8EkGFeR2j7GKxjmzCgYSpV6CGNqW8tw37Lw9/DpMDM5r+Ku + 7byzdi7EUsjVwyKdKpmU+AWfiRQHO8c2eXMsvvVZVQjjJb/K1cDInioVjYHwmnjkagWwRBaAzlqP + 1vaBN6oU3++fkvOYip73dov09vwunwcZnG+0gVxjBkkhGcJsJ6ruq97YWuWUCc0MDBGJWCqd3Uqq + kr4XT+wfbhFmLe+TuAYSsi65BRBEgyGrBlUGaVHXyFogslyyylickRyo0Cik9YtY/gMAAP//7Flt + S+NAEP4rQRAUTEzb9NoeiFfEg/sgdyicIELZbjY2mGxCXqzF63+/Z3bjXhq73iGH+KHQD2mzmbfM + PPPMVEmAu7dyLhzGOXBVhM5DzJwapcKLVQ5gwjkpkXH0Sr2WRRd4y3dqJCSDrhdCUmY5zMjNMDHC + I1hCboG9ObGMsiJVzzhZQWjNcLcksARQ3wt5RIZBeFw56BV1EjosWbIVuejkTFlXl8hph0mnZSDm + PSkSzzmXJflsPNQxuJUUBNJGMYIFjYXls4kksKwRoK2WtnxueX+JKkTfQcjI+yZdlsully1Zmat6 + QPmJRy9f5CqXoWQGmbNG94xVVRHPa6TU7OD79fTqh3t14aJFqyo1SvKsqERBdXDAwjSWh87B4S8k + SlJln5GGL9nN0LT4bnuz0Z5eYLthqClhYoXB/l4xsqeXvdO3sdjAMMzuE4ZlqJekONX2gzbW4dtY + rW90bnTrLpjjBTC+oOLXHaGs05RRu9r7G1JT1Il8ZsUbexuRkFPUGxHeb+HJMByMR+N5b+SLAZ/4 + fDLsR5NeRCsOcwgaXjkmKCWmYQgd6G9oduHqS8sQAArJenWY1bXioX+qYwqmmoEqEKB4LOwFwacR + H4+Gg8mcz0c+1EdMRGN+Gp4oKfuD6X7/Kz76OTdlsoFX19U/lV5duksEwu17BNleXs+TmFOk3Jyx + kgKF51UHAUHE5RmKwsslxbw7GH98i7uT9ce3uDuZf3SLAUVhXOYJWzUk8AypDw4TRTXnsSogwnM9 + RmoguwENxMHzushycXwD7OGLP5VGmyHcNaVLGpp92HbmGNhwNbCNk4EZJ4sG33cw8m4Js4OR97B4 + ByNbYKQLAzamFhhCZvgL3LnTRflEK+zm2oclWcWaBXxXio2S+TZc8vvbAc62D/KtDlgpm41qEoRs + vTGw3jAkT8iHuMikJnL6p7Bu/v3RX/8pelmqJTw9XzZw/wZcbv1xdfws92gvZY+XoqwTEtzSrdYl + RTWttB0PWfX/NqxamBEKXRgXf2Zq26QWcGu946UdDqk0hmxa298wt3lAhWe9Xv8GAAD//wMA3LOe + qmwcAAA= headers: Atl-Request-Id: - - 8327f36e-ae57-4b74-9f11-389bdeed0e8e + - 1a809b0b-bf28-4392-94a0-d0a6dec81d9a Atl-Traceid: - - 8327f36eae574b749f11389bdeed0e8e + - 1a809b0bbf28439294a0d0a6dec81d9a Cache-Control: - no-cache, no-store, no-transform Content-Encoding: @@ -1011,7 +1097,7 @@ interactions: Content-Type: - application/json;charset=UTF-8 Date: - - Fri, 10 Jan 2025 19:15:40 GMT + - Wed, 29 Jan 2025 20:45:12 GMT Nel: - '{"failure_fraction": 0.001, "include_subdomains": true, "max_age": 600, "report_to": "endpoint-1"}' @@ -1021,7 +1107,7 @@ interactions: Server: - AtlassianEdge Server-Timing: - - atl-edge;dur=308,atl-edge-internal;dur=14,atl-edge-upstream;dur=294,atl-edge-pop;desc="aws-us-east-1" + - atl-edge;dur=276,atl-edge-internal;dur=15,atl-edge-upstream;dur=261,atl-edge-pop;desc="aws-us-east-1" Strict-Transport-Security: - max-age=63072000; includeSubDomains; preload Timing-Allow-Origin: @@ -1033,7 +1119,7 @@ interactions: X-Aaccountid: - 5d3878b170e3c90c952f91f6 X-Arequestid: - - c26821749be615c24813df157fd1d96f + - 9fb4fed285f3e04d08489bccc85ade8d X-Content-Type-Options: - nosniff X-Xss-Protection: @@ -1057,53 +1143,53 @@ interactions: User-Agent: - python-requests/2.32.3 method: GET - uri: https://defectdojo.atlassian.net/rest/api/2/issue/16085 + uri: https://defectdojo.atlassian.net/rest/api/2/issue/16609 response: body: string: !!binary | - H4sIAAAAAAAAA7xWa08jNxT9K9Z8ammSeeSxYaSqoiS0tJRGEFhp6QqZmZuJNx571vaQpLv8917P - KwthtoKqq0gwftz3ucf3kwObjIrYCR0FIgYF8QkDHuuOoCnojo6WkNKOzEBRw6TQHYiZScHQTrSk - IgEuk849KI1nEF9ApkCDMNXdKNdGpgur8Nb3PN/rKfiYgzbzbQYzRSPDInA6DrP2/ZE3HuJCA1/g - cmlMpkPXjWEBkYnlB9mjhlOtGRU9AcZFS8alGXMDl2mdg1srWMEW5c/n08t5F/d83Cpc0E74ydHo - W64jaiCRalvGEOMKJQIvGHY9v+t788AL/WHYP+yNRm9+QL8966Q1YtDxQs0rnbTyLurzgibsahGD - jhTLbOJw94jolHLeITHThonIkIxBBEQuyFqqVc9KR1JcKf5CL3LBbLkov6X31FDl3jNYu4VbOwer - I9/r++OfNPsbfkyx7HmKVi0s0OSc6pWtVX5n7Fe4oFxDxykFTzGuQrbjLBkCR0XL7RncA/rqPXQc - wxBZGaLECUWOMTpPYNL36oNMyQ8Y0SsTXkkX6S4KWKfbLr4AyS6qK8GMQQXaaWxbpP5e3NVyYdZU - WbxqlmacocPxk8ixHgXKBuPNYPxCd79SmTqSpi4D7w26EQw2weD/tVJWv8AiGvRHG3/0LQxuaov9 - YNMPvoXFCuAPD/tw9NtwGtQHC7a5LjkQq3/zfv9mv75Jk0RBgnyz1wQYgOR52f7Pmxu2HYzaDt60 - HAStB+O2g8N9P0vaLHctKRUvhBN2/YorbUkUi8qQPu3t2UbBbOulzHk8YTrjdFu1E26vqcGnp6Ts - l7d++SDsngC3VKdsYxefxzK3qS9cfWs3mEic0Kjc2kal5hoxY9u7yoYCDNbyx3OPRH94WD8ST9PW - UNnTgzZQBQ2oMsWkYmb7yhTU4u7gZW8FS2kC2rUSulbCcIPLdU/fJzuyPJPrmlQHzn7bBA3mOb0D - S4vPNIZlk2fT4Lch1B/bfCypnmYsOmNidWJPJpDZ6UVENYIKXK2Ls2ZHSDHF4YXecbgAqktUqurL - mZ1d/XJ6fnt2ejw9v5zeTi8u/rzA+LBLNSYEL8yXQGbI/8IQa5cwTaTgW4JcwrhVSowkvzFFyUxB - imRCco2I6z3HKT62k+N9Zp6XyUXolG8i1g6Tv+upR1yBZUiYoPzppWr2qtJb4J6jd9Xa1jUR0NzO - M9u0bTge9Ic1jssx6ZXQK4Wbd/fxZPMyNO7g9jONVjhs1pCrlZe2jqt57j85XA+Fbj2bBfWYIMBC - PZJcqvPSmzueQzdRyFi7kUiSiSyLLdMMx2Fhngf9sCGFrxX2qVBDGI/T+ZfY/Q7mzHA4CMnNO5oF - ITmWcsWAvGUGOdaQS4hyBeSE0+SzzQ4mh8uI8qXUJhx7Y89dMBEjEbrBoP++UDgpkodxfZDEwio8 - IP8qSb7DP98X4pc49FkOQjFki8rJSQ5kgoHi5h90S3yvQywYmyCO307x6Ab/dUf+oPDU1jFaQy9l - RkFPqsRFGFNbWoYTm4W/i1d7S5Pywu9Sz7XVcyVWQq6/TNJMyTjHGWAqEmzsFMvkzjH51maRIfSX - /CrXXSNbspRVCoL3xCU3a4AVsgA02lqkdhfcfiH47mhGLiMqWu7bOdQ9LOvw+HdwudUGUo0RxJlk - CLODsNgvamNzlVImNDPQQyRiqvTyTlIVt93Y0z/ZIcxqPiKR/AcAAP//7Flda9swFP0rolBooXad - xGmSQelC6WAPZaOFFUohyLbcmCWy8UfdsvW/71xJ0Vwv6kYZpQ+FPDiWdL9y77nnKiqRgLosEkKy - StSsNVlVAxYrnVkpMuuAtcssXrK14LLCItc7jAS4eyMjwXgcA1dFwu4yzhqUSlw+FAAm7JNSaHrg - dyw6x698q4ZKMuhqKSRlFuNWbo6ZEx7BEnIL/I9lMs3LtTrD8pLQmmO1IrAEUH8X8oAMg/CsZpqD - ML5q+QO5yAqurGsq5DTjknUMxMQoxcpnZ7Iin62HOgY3koJA2ihGsMBYWG1MJIFVgwBttbTjc8f7 - C1Qh+g5CRt6bdGnb1s9bXhWqHlB+4t4vloXKZShZQObC6F7wGqQrapBSi70vV/PLr97luYcWrarU - KinyshYl1cEeT9aZ3Gd7+z+RKKs6/4A0/JPdjG2L77c3F+0ZhK4FS24JE+sSYK8YGTHE3tbAxYND - y1H7JyzLUD+S4lTbN7pYR+DixYHViRjzeEn1bVpvt3v3wb1q1mtO7Wrnb0hNUSfymZcv7G1EQk5Q - b0R4PyfH42Q0nUyjwSQQo3gWxLPxMJ0N0iPosZug4ZltglJiniTQgf6GZpc8fOwYAkAhWc+Ow7pW - fPRPtU3BlBnJQgGKx5NBGB5N4ulkPJpFcTQJoD7lIp3GJ8mxkrI7mu8OP+Gjz3lrLg28ep5+VflN - 5bUIhDf0CbL9oolWWUyR8grOKwoUzqsOAoKIx1MUhV9Iinl/tH77Fvdn87dvcX+2f+sWA4oSPaYa - EniK1AeHSdMmjjNVQITneozUQHYNGoiNZ02ZF+LwGtgTL39XGt0tYdWWLmkwN2rbmWPowtXQNU6G - dpwsDb6/w8irJcw7jLyGxe8wsgVG+jDgYmqhJWSWr8CdW12UP+gS3DwHsCSvubnC70txUbLAhUvB - cDvAue6DAqcDTsrmopoEIVsXRs4FS/KEvMvKXGqWp18ljfn/SH/9l+jd5fX/u9nUwqxQaMKY9i1X - tzyby1SUgDb5x+bR9JcXG6D+azvcyD3YWfP7C1E1KxLccVbdz5T1vNaO0y0x3eGQ6/b908PDJ6fN - AWXt4+PjLwAAAP//AwBSgNGgrhwAAA== + H4sIAAAAAAAAA7xWbU/jRhD+Kyt/aqkTvyTkgqWqopBraSlFEDjpKEKLPbGXrHfd3TVJyvHfO2vH + DhfwnaDq6SQu3tl5f+bZeXBgWVCROJGjQCSgIHnPgCfaFTQH7eo4g5y6sgBFDZNCu5Awk4OhbpxR + kQKXqXsPSqMMkjMoFGgQZn03LrWR+cwavAl8P/D7Cv4uQZvpqoBTRWPDYnBch1n/wWjk7+GHBj7D + z8yYQkeel8AMYpPIO9mnhlOtGRV9AcZDT8ajBfNCj2ldgtcYmMMK9U+mk/NpLxiHIzyqQtBO9OBo + jK3UMTWQSrWqc0jwCzVCP9zt+UEv3JuGQTTcjYKgPxrs/oBx+zZI68Rg4JWZNwZp9T2054dt2uuP + BHSsWGELh6f7ROeUc5ckTBsmYkMKBjEQOSMLqeZ9qx1LcaH4K6MoBbPtovyG3lNDlXfPYOFVYW0C + XIsCfxCMf9LsH/gxx7aXOXq1sECXU6rntlflrbG/ohnlGlynVjzCvCpd18kYAkfF2eoY7gFj9R9d + xzBEVoEocSJRYo7OFkwGfpcgaASFkneY6hs7sdau+lB1tumD/XiCnk26F4IZgwa00/q2EP69uqvl + zCyoskDWLC84w4CTrZJgoyr4DcfL4fiV4X6hZU0mbcOG/jsMIxwuw+H/66WGRQVSdBiMlsHoWzhc + Nh4H4XIQfguPa+Q/Pj6HY9iF00EjmLHlZU2O2P2ra0RDmipIkW++OgS7jQATkLyseeHlq6MuwbsO + QdgpGHcJ9p6HU9NmfWpJqXohnKgX4Cc1+HDUhPv6+azpfEPgXm1O2emrfh7I0hYusKT8wR4wkTqR + USVgl9CoucTG2hlc56IAQ7VD/hLFh8NhQ/HbSXcRUdgS0baghUShmFTMrN5YgkbdG76O6VlOU9Ce + 1dCNEYYHXC76+j7dMNqxXDTMN3SeYztsIczpLVjusujdfs+7gBd04SsY23pkVE8KFh8zMX9vJYdQ + 2N1DxKuWNXUmF5WsPRFSTHD1oLcczoBqOw0PCMb6l3N6fPHL0cnN8dHB5OR8cjM5O/vzDPPDGdNY + ELwwzYCcIkkLQ6xfwjSRgq8IDjzj1igxkvzGFCWnCnKceFJqRFz/pcEPcBgc/xPzfTW7i5z6RcPe + YfE3E/HZpGMbUiYo37603pzW5a1wzzG6hiywr6mA9nZZ2JHrwvFgEDQ4rpecN0KvVm4fx8/3kteh + cQO3n2k8x1WxgVxjvPZ1sN7G/lPAzUrnNZtV2LzlAizUY8mlOqmjueUl9FJFV5sAp5IcyrrZMi9w + mRXmZdDvtqTwpcZuK7WE8Xk5/xKbfztTZjjsROTqIy3CiBxIOWdAPjCTydKQc4hLBeQ9p+knWx0s + Dpcx5ZnUJhr7Y9+bMZEgEXrhYHBdGTysiod53UliYRXtkK9qku/wz/eV+jmubJaDUA3ZYh3kYQnk + EBPFwz/oioR7LrFgbJM4+DBB0RX+1xsFwypS28d4Af2cGQV9qVIPYUxtaxmuVRb+Hl7tZybnVdy1 + nUtr50LMhVw8LdKpkkmJD/VEpDjYObbJm2Lxrc+qQhgv+VUuekZ2VKlYGwiviUeuFgBzZAForXVo + bS54g0rx4/4pOY+p6Lhvl0Vvz2/zeZLB+UobyDVmkBSSIcx2ouq86o2tVU6Z0MxAH5GIpdLZraQq + 6brxzP7hBmHW8j6JayAh65JbAEE0GLJYo8ogLeoaWTNElksWGYszkgMVGoW0vhHLfwEAAP//7Flt + S+NAEP4rQRAUTEzb9NoeiFfEg/sgdyicIELZbjY2mG5CXqzF63+/Z3bXvRqz3iGH+EHwQ8zuzs5M + Zp55ZqokwNxrORce4xy4KmLvLmVeg1Th5boAMGGflIg4+qTBlkZn+Mo3qiUkhS4XQlJkeczKzdEx + wiJoQmaBpHmpTPJyqc54eUlozbBaEVgCqG+FPCDFIDytPdSKJos9lq3Ymkz0Cqa0ayrEtMekt6Ug + +j0pssA7lRXZbC3UPriW5AS6jXwEDYyG1aOKJLBq4KBOTbds3rL+HFmIugOXkfUmXFarVZCvWFWo + fED6ifugWBQqlnHJDDJn5u4Zq+synTcIqdne98vpxQ//4sxHiVZZai8p8rIWJeXBHouXqdz39vZ/ + IVCyOv+MMHzOboa2xLfLm4v29CLXgiWrhIk1GvtbxcgentfOyBLJ1kJoZbQXXCwjtCxDfT1Ftro3 + ulhtaJWBjxlfUH538PQ2uFfNcsmoXO38DanJ60Q+8/KVtY1IyDHyjQjvt/hoGA/Go/G8NwrFgE9C + Phn2k0kvoRGH3YQbXtgmKCSmcYw7UN9Q7OL1ly1FACgk68WeVedKgPqptimYMn1TJEDxWNyLok8j + Ph4NB5M5n49CXJ8wkYz5cXykpOwOprv9r/jT5/wlkwZefV+/qoKm8ldwhN8PCLKDoplnKSdP+QVj + FTkK51UFAUHE4wmSIigk+bzd/75/jdsN9PvXuN2Av3eNAUVxWhUZWxsSeILQB4dJkobzVCUQ4blu + IzWQXYEGYuNpU+aFOLwCxPDFn0yjARBWberSDWYe1s0cIxeuRq52MnJNGCKL3aUB/g98ebNI+sCX + t9D4A1868KUNA5aQWb4CrW907j3QpNo8h7gwr5kZwLelOJmXE5eclKzfjXyueVDo4qAECJ0LoYuD + DlwnBpbkCXmXlrnULE+/ihvz64/+91+8d5fX/2+yqYVZobgJbdrPXE151OBLDyS0yg+Pj6a+vFoB + 9UvZ4aPcg50luz8XVZOR4C1j1XymrKe1NpxmvDTDIdPt+6eH+09OmwNK281m8xsAAP//AwB8FoVT + bBwAAA== headers: Atl-Request-Id: - - 5a5e0fe9-f715-4b70-b2dc-13c9fe76cd3f + - 8d665a4d-ac63-432b-b5ef-bca6b1880049 Atl-Traceid: - - 5a5e0fe9f7154b70b2dc13c9fe76cd3f + - 8d665a4dac63432bb5efbca6b1880049 Cache-Control: - no-cache, no-store, no-transform Content-Encoding: @@ -1111,7 +1197,7 @@ interactions: Content-Type: - application/json;charset=UTF-8 Date: - - Fri, 10 Jan 2025 19:15:40 GMT + - Wed, 29 Jan 2025 20:45:12 GMT Nel: - '{"failure_fraction": 0.001, "include_subdomains": true, "max_age": 600, "report_to": "endpoint-1"}' @@ -1121,7 +1207,7 @@ interactions: Server: - AtlassianEdge Server-Timing: - - atl-edge;dur=270,atl-edge-internal;dur=14,atl-edge-upstream;dur=256,atl-edge-pop;desc="aws-us-east-1" + - atl-edge;dur=290,atl-edge-internal;dur=14,atl-edge-upstream;dur=277,atl-edge-pop;desc="aws-us-east-1" Strict-Transport-Security: - max-age=63072000; includeSubDomains; preload Timing-Allow-Origin: @@ -1133,7 +1219,7 @@ interactions: X-Aaccountid: - 5d3878b170e3c90c952f91f6 X-Arequestid: - - aa8de8f48550350c210f0edb3b2cec44 + - ccf764aeb6daa09afe8d5e767a89806c X-Content-Type-Options: - nosniff X-Xss-Protection: @@ -1142,7 +1228,7 @@ interactions: code: 200 message: OK - request: - body: '{"issues": ["16085"]}' + body: '{"issues": ["16609"]}' headers: Accept: - application/json,*/*;q=0.9 @@ -1159,21 +1245,21 @@ interactions: User-Agent: - python-requests/2.32.3 method: POST - uri: https://defectdojo.atlassian.net/rest/agile/1.0/epic/16083/issue + uri: https://defectdojo.atlassian.net/rest/agile/1.0/epic/16607/issue response: body: string: '' headers: Atl-Request-Id: - - 1df7a1d0-005f-40e7-a3c4-69ec938ecb4e + - f775a80a-f906-49d9-87a2-4e95b9927b64 Atl-Traceid: - - 1df7a1d0005f40e7a3c469ec938ecb4e + - f775a80af90649d987a24e95b9927b64 Cache-Control: - no-cache, no-store, no-transform Content-Type: - text/html;charset=UTF-8 Date: - - Fri, 10 Jan 2025 19:15:41 GMT + - Wed, 29 Jan 2025 20:45:13 GMT Nel: - '{"failure_fraction": 0.001, "include_subdomains": true, "max_age": 600, "report_to": "endpoint-1"}' @@ -1183,7 +1269,7 @@ interactions: Server: - AtlassianEdge Server-Timing: - - atl-edge;dur=470,atl-edge-internal;dur=15,atl-edge-upstream;dur=454,atl-edge-pop;desc="aws-us-east-1" + - atl-edge;dur=461,atl-edge-internal;dur=15,atl-edge-upstream;dur=444,atl-edge-pop;desc="aws-us-east-1" Strict-Transport-Security: - max-age=63072000; includeSubDomains; preload Timing-Allow-Origin: @@ -1193,7 +1279,7 @@ interactions: X-Aaccountid: - 5d3878b170e3c90c952f91f6 X-Arequestid: - - 95c3f0f1637d7fca3cb0f89edbd4d866 + - d1b63777dda0ac9ac859eccdef696c77 X-Content-Type-Options: - nosniff X-Xss-Protection: @@ -1203,14 +1289,14 @@ interactions: message: No Content - request: body: '{"description": "Event test_added has occurred.", "title": "Test created - for Security How-to: weekly engagement: ZAP Scan", "user": null, "url_ui": "http://localhost:8080/test/93", - "url_api": "http://localhost:8080/api/v2/tests/93/", "product_type": {"name": + for Security How-to: weekly engagement: ZAP Scan", "user": null, "url_ui": "http://localhost:8080/test/90", + "url_api": "http://localhost:8080/api/v2/tests/90/", "product_type": {"name": "ebooks", "id": 2, "url_ui": "http://localhost:8080/product/type/2", "url_api": "http://localhost:8080/api/v2/product_types/2/"}, "product": {"name": "Security How-to", "id": 2, "url_ui": "http://localhost:8080/product/2", "url_api": "http://localhost:8080/api/v2/products/2/"}, "engagement": {"name": "weekly engagement", "id": 3, "url_ui": "http://localhost:8080/engagement/3", "url_api": "http://localhost:8080/api/v2/engagements/3/"}, "test": {"title": - null, "id": 93, "url_ui": "http://localhost:8080/test/93", "url_api": "http://localhost:8080/api/v2/tests/93/"}}' + null, "id": 90, "url_ui": "http://localhost:8080/test/90", "url_api": "http://localhost:8080/api/v2/tests/90/"}}' headers: Accept: - application/json @@ -1225,7 +1311,7 @@ interactions: Content-Type: - application/json User-Agent: - - DefectDojo-2.42.0 + - DefectDojo-2.42.3 X-DefectDojo-Event: - test_added X-DefectDojo-Instance: @@ -1239,13 +1325,13 @@ interactions: [\n \"Token xxx\"\n ],\n \"Connection\": [\n \"keep-alive\"\n \ ],\n \"Content-Length\": [\n \"828\"\n ],\n \"Content-Type\": [\n \"application/json\"\n ],\n \"Host\": [\n \"webhook.endpoint:8080\"\n - \ ],\n \"User-Agent\": [\n \"DefectDojo-2.42.0\"\n ],\n \"X-Defectdojo-Event\": + \ ],\n \"User-Agent\": [\n \"DefectDojo-2.42.3\"\n ],\n \"X-Defectdojo-Event\": [\n \"test_added\"\n ],\n \"X-Defectdojo-Instance\": [\n \"http://localhost:8080\"\n - \ ]\n },\n \"method\": \"POST\",\n \"origin\": \"172.18.0.2:56242\",\n + \ ]\n },\n \"method\": \"POST\",\n \"origin\": \"172.18.0.2:48442\",\n \ \"url\": \"http://webhook.endpoint:8080/post\",\n \"data\": \"{\\\"description\\\": \\\"Event test_added has occurred.\\\", \\\"title\\\": \\\"Test created for Security How-to: weekly engagement: ZAP Scan\\\", \\\"user\\\": null, \\\"url_ui\\\": - \\\"http://localhost:8080/test/93\\\", \\\"url_api\\\": \\\"http://localhost:8080/api/v2/tests/93/\\\", + \\\"http://localhost:8080/test/90\\\", \\\"url_api\\\": \\\"http://localhost:8080/api/v2/tests/90/\\\", \\\"product_type\\\": {\\\"name\\\": \\\"ebooks\\\", \\\"id\\\": 2, \\\"url_ui\\\": \\\"http://localhost:8080/product/type/2\\\", \\\"url_api\\\": \\\"http://localhost:8080/api/v2/product_types/2/\\\"}, \\\"product\\\": {\\\"name\\\": \\\"Security How-to\\\", \\\"id\\\": 2, \\\"url_ui\\\": @@ -1253,8 +1339,8 @@ interactions: \\\"engagement\\\": {\\\"name\\\": \\\"weekly engagement\\\", \\\"id\\\": 3, \\\"url_ui\\\": \\\"http://localhost:8080/engagement/3\\\", \\\"url_api\\\": \\\"http://localhost:8080/api/v2/engagements/3/\\\"}, \\\"test\\\": {\\\"title\\\": - null, \\\"id\\\": 93, \\\"url_ui\\\": \\\"http://localhost:8080/test/93\\\", - \\\"url_api\\\": \\\"http://localhost:8080/api/v2/tests/93/\\\"}}\",\n \"files\": + null, \\\"id\\\": 90, \\\"url_ui\\\": \\\"http://localhost:8080/test/90\\\", + \\\"url_api\\\": \\\"http://localhost:8080/api/v2/tests/90/\\\"}}\",\n \"files\": {},\n \"form\": {},\n \"json\": {\n \"description\": \"Event test_added has occurred.\",\n \"engagement\": {\n \"id\": 3,\n \"name\": \"weekly engagement\",\n \"url_api\": \"http://localhost:8080/api/v2/engagements/3/\",\n @@ -1264,10 +1350,10 @@ interactions: \ },\n \"product_type\": {\n \"id\": 2,\n \"name\": \"ebooks\",\n \ \"url_api\": \"http://localhost:8080/api/v2/product_types/2/\",\n \"url_ui\": \"http://localhost:8080/product/type/2\"\n },\n \"test\": {\n \"id\": - 93,\n \"title\": null,\n \"url_api\": \"http://localhost:8080/api/v2/tests/93/\",\n - \ \"url_ui\": \"http://localhost:8080/test/93\"\n },\n \"title\": + 90,\n \"title\": null,\n \"url_api\": \"http://localhost:8080/api/v2/tests/90/\",\n + \ \"url_ui\": \"http://localhost:8080/test/90\"\n },\n \"title\": \"Test created for Security How-to: weekly engagement: ZAP Scan\",\n \"url_api\": - \"http://localhost:8080/api/v2/tests/93/\",\n \"url_ui\": \"http://localhost:8080/test/93\",\n + \"http://localhost:8080/api/v2/tests/90/\",\n \"url_ui\": \"http://localhost:8080/test/90\",\n \ \"user\": null\n }\n}\n" headers: Access-Control-Allow-Credentials: @@ -1277,7 +1363,7 @@ interactions: Content-Type: - application/json; charset=utf-8 Date: - - Fri, 10 Jan 2025 19:15:41 GMT + - Wed, 29 Jan 2025 20:45:13 GMT Transfer-Encoding: - chunked status: @@ -1303,17 +1389,17 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA5yQTU/DMAyG/0uubJ2TtfvIDQ2JgWAgtbuAEEpTRwTSpGrSSdO0/04iJtgRuFn2 - 8/qxfCC18LjtDeHkLYTO88mkQYUyNO7dZSIY4b0WNrMYyIg02ndG7P/Bl9jvtMQG/ccaTbdCG7D/ - 65KVs8oMaCX+LrnD3mtnI0wBaAYZjMvN5WO5fqh+ppuhrWNF+HOCRjCCl+jEzrh9G6+s9l2yrYwb - mhiqB22arwjhMcDm81PzSoQEMmDFGOgYlhVjPJ/yPIoBLiDCMe/jH7CvdHvOUqgYcFoktgD6zcr2 - xioXwZzOFlIAFqgknc5R5ErlCqRiy2mxqNUsRzZTwM4EwSTDre5FeiEqMZhw56RI7QMxp4qgfd2W - 5Hh+2JOzaXJ9X5HjJwAAAP//AwBnGWXmIAIAAA== + H4sIAAAAAAAAA5yQUUvDMBDHv0te3boka+mWN6ngFJ1Cu5eJSJpcMJompUkHY+y7m+Bg8019O+5+ + //sdd0At97AZDGLoPYTes9lMggIRpPtwGQ+Ge6+5zSwENEFS+97w/T/4GoadFiDBf67A9BXYAMNf + l1TOKjOCFfC75A4Gr52NMMGYZDjD03p9/VyvnprzdD12bawQe0nQBE/wa3RCb9y+i1c2+z7ZKuNG + GUPtqI38jiAWA7QsT80bHhJIMS2mmEzpoiE5ywmbLzKM8RWOcMz7+AcYGt39YJcNJSwvGJlnBT2z + oruzykVQFQUBKOeklSByITgAJ+1CKrIsuSJ5FKi2AHohCCYZ7vXA0wtB8dGEByd4ah+QOVUI7Num + RsfLw7bOpsntY4OOXwAAAP//AwBukVcqIAIAAA== headers: Atl-Request-Id: - - a6ccec44-fb6c-447a-9667-0063b8584791 + - 63c87549-8e2c-4cbf-8ecf-d90285decde7 Atl-Traceid: - - a6ccec44fb6c447a96670063b8584791 + - 63c875498e2c4cbf8ecfd90285decde7 Cache-Control: - no-cache, no-store, no-transform Content-Encoding: @@ -1321,7 +1407,7 @@ interactions: Content-Type: - application/json;charset=UTF-8 Date: - - Fri, 10 Jan 2025 19:15:41 GMT + - Wed, 29 Jan 2025 20:45:13 GMT Nel: - '{"failure_fraction": 0.001, "include_subdomains": true, "max_age": 600, "report_to": "endpoint-1"}' @@ -1331,7 +1417,7 @@ interactions: Server: - AtlassianEdge Server-Timing: - - atl-edge;dur=161,atl-edge-internal;dur=19,atl-edge-upstream;dur=141,atl-edge-pop;desc="aws-us-east-1" + - atl-edge;dur=167,atl-edge-internal;dur=15,atl-edge-upstream;dur=152,atl-edge-pop;desc="aws-us-east-1" Strict-Transport-Security: - max-age=63072000; includeSubDomains; preload Timing-Allow-Origin: @@ -1343,7 +1429,7 @@ interactions: X-Aaccountid: - 5d3878b170e3c90c952f91f6 X-Arequestid: - - fb437c4f1514de63a4c0cae70893f4ef + - 33a508a0dfc8263889715abe736ba6b0 X-Content-Type-Options: - nosniff X-Xss-Protection: @@ -1367,63 +1453,63 @@ interactions: User-Agent: - python-requests/2.32.3 method: GET - uri: https://defectdojo.atlassian.net/rest/agile/1.0/epic/NTEST-1599/issue + uri: https://defectdojo.atlassian.net/rest/agile/1.0/epic/NTEST-1824/issue response: body: string: !!binary | - H4sIAAAAAAAAA7xXWW/jNhD+K4Rea1uHj3UEFMU28RZp03ThONuHoghoaSxzLYkCScV2g/z3zlCH - HcdewGm6b+IMOfPNPXpyYFPwPHZCR0dLyHgn5xlop+Now5X5aJzQ6zgZ30xBl6nRTjjEs5GGp04Y - dByhdYnXw7+edoJkAYobIXPdeQSl8QPiKRQKNOSmZkAsTAaGd6IlzxNIZdJRkMegIP4kII0JgSBh - /sgbDwgOpAs8Lo0pdOi6MSwgMrH8KnvcpFxrwfNeDsZFLcbliUjB9Xuea/G5jZAVbFHG7WxyN+si - zUPSotIWPpHBptQRN5BIta1wxXjCF4EXDLue3/W9WeCF/jDsj3pjf/SD51sZVonZFmDFnAW0EG5Q - gaT3Lsrzgtb0+hCDjpQoyHFI/ch0xtO0w2KhjcgjwwoBETC5YGupVj16Hcn8XqVnoihzQeHi6QN/ - 5Bh891HA2rWwdgBrlu/1/fFPWvwDP2YYyjJDrZQ4qHLG9YriVc4NfYULnmroONXDa7TLvu04S4FZ - oqLl9gYeAbF6zx2n4JgEhrzYxr5/GLbhxcXZ6bDzsk0FkrkX9zLLuCINa4BVumWQJzyBjJDYMsCs - eGNgq8c2qpQnLwN5XqB2Dv6ZRyusl/0sIeGVrss6ff8T4KYG3CYVgyYGOayRFMlUqtsKzTwtoZso - vt3LAMmupPNM8VRCKmHeiqZ57lLAzvCVyDB+2qUXuhEikFClak8/kvNqsL836Wud2Xee37ucbYPY - D9RhOc9FwkoNimmDPmdmyQ3LAWLNjGRzYHMlV5CzWK7zHrtUgMGJ2XzLfhWKszu5MGusGtbFCyyX - hlFnZVKxGFIwcGY7eOG51g7tQiGil36bIIWy7qDMD6vaf6Y8MAJnSmFLOy+xdzkao7I7RSWantmK - fEAv9b2GUSj5FYG+MQ716+NR2OspO6Puc2EMCqDpU7+eof2/2bu6djVZLbIiFQg4Puhv6GbbKwbj - zWB8Jtxv9N/Gkrb7DrwPCCMYbILB/6ul6vF24qBCf7TxR99D4abR2A82/eB7aKzHGGXrYTr6p/I0 - aBgLsflSbTq0Cv39+ma/ucmTREGCJfyqJNAAmZZVVziubniKMTrF+HCCEZxkjE8xLl7jrJajikqr - h136nLDr45Eb3CbfOjarSb1b09xKnKKytJ+XsiTH+dSr/ySCyBMnNKqEpn2TNCWiyp1Pr2iEDK/q - pSzT+EroIsUBVpUykhGW+YI5Q+Vde6Nqu87xVXD4wWtWwUO3ta3skNEmFbXWZuOxy8k77Dvt+kvC - 25WnbnJHt5xv7kB23hPGCpg9PgTtqLx8yS5KVaRA3Bg3/51XDz3QVs97bQm04b911u1tCalcvxx1 - N5KWHjs9BmTWoSFtcad8DtT/j3QAaptHE8E/VYr+mPyx5Jom7Y3IV5+IcwUF/SblUZOuNonXltdS - cplPcAvg8xSmwHVVAqr+cj7f3P9yfftwc305ub2bPEym0z+maB+2I40OwQuzJbDPdhNnN/8CAAD/ - /+xYa0/bSBT9KyOkSoCw47zIQ6raCKh2P6BFTbtIQIUm43Hs1p6xPPa6kfjxPXdsnODGdLUrVXxA - REDsmTv3ee65g3NZZJhWyAmAZhSTUCImln1cZTIBalr6Ytx94NkHbhx4D5HnpXoyP6ioAGIH52/B - 4wkoIgzrSPG4vageJWv32sSLoV39neK6Bmt6XF2khE77C3biTqeTx4J95ff/g98LnaSocUX3AnuS - ftyg33OBbW/qarfjps6e+vlObX+OP0V5LI/n7PaGp/05O9P6WyTZdZQD6XO2lKIAV/4Q8/UDuQ1e - i7XgcahNPp96U68XRMpHM+kNRoMvVuC59SoM/qoZ5dv8mP1yJzvEryO7fQkaTOCEbYCRWsnzQrJz - eAAPL/mG9b0TRlnaGHF2fYFXt/jjnPZHVlMKsCilm0R5Jl2drXvIb04xj8BZqS56WOqGeRJbvSs5 - f5Ocz+qbwuyw46SrTPsFWNBFA/K9T4gKnWk9BH3ZH7p0ct3hpbQWMPjCeuz2p5bRsWu7oDe0G28W - V2wpuOpYT0y8Nxs29uxYsNyYXCYGFvipxiRhjuf2uY0N+SrhkTIR5h+kKFxlwpXmmd+14if559sM - I8kLJqpEAhxjJMM0ZmTOyjqrcuClqTIrQGadsDKMRMgSyZWpxjlaUUuAuXcKUx0XAoCLSe6fiLMC - NSSyTUqTHSBDyYqkuDsaXSLKa3t5Rgpdh1JRZjHeyNUqJ4ugCZkFBswiFegssXtoIASMc7w1hKJA - cMyUJ6QYhGNgrJgQ43HJN2QiS7nVrjDIacYV21Ew5NAwdtmFMmRzY2HlgztFTqDTyEfQoNbQPKpI - Ak0BB+3VdMfmHes/ogrRkOAysr5Ol7IsXV1yk9p6QPnJ724apjaXccg9ZN7XZ9/zHNRvVSCl7g// - ul4sr5zlpYPebau0OSTVWS4zqoND7ieROmKHRw9IlDjXc6QhQWOrvXXxu3EXWR93EgGa5J5wPgLN - PEM3sOyW+Gp7S9chXteLUUPj2zsatWwULRvbv7CLr3hdo4PXnPmkz7fbACLERWgZp+0llEXrnQF7 - S1F/Be0UJuLrFSn9D12S6Mw7FChNGX/6b8f+cDqZrvoTTw7FzBOz8SCY9YNTnNMswgnPLJOUQwvf - xxnolGib/ub9jiJAIJL17A1CVVwuOrFdZnGtnmJHEmSR+/3R6HQippPxcLYSq4mH4wMug6l457+1 - Ut4MF28GH/Cp9jkJVzUeO071yLiFcUo4whm4hPFuWqziSJCnnJRzQ47CfttyEBX8e4YqclNFPm/f - Rrx8jdvXGS9f4/Z1yEvXmIbAarqu6eQZUh+kJwgKISJbQNQAqtm9QrwbOzQeXBSZTmXvBlgkwm2l - 0XUc3jalSyfUd5D7OeioC6FHXTxz1AymWd0QXmHktyXMK4z8Do3/PYz8AAAA//8aHC6mSzGCXgzA - u5voEiYY7Rmgd9IhmbIaNG8AZSMmykFzm+im4GqiGeAsl3C1HA2MsJd8OFtqSD4DAAAA//9C1YGz - CQf3MpqEMS4dxvBGX2peWWZRfh6kYQcRSimFTqxDuMSEXll+CfWGkyGGwQ0F2gTs14Xlg8eL4KOV - +bkQJ1fDmND6hWwHgBch6MPMRV1UgeRZ8EgPdOVFLWRgHTQaBPI6XBxVsxGKbqgGsGtBk2+0XJhh - qkRqeGBZmAEyBG1hhqES2QszLPXMzOCDe9SdyR1dmGE1ujADw1REAA+hgVtqTbmMLswYXZhBejzA - ZuGxxsLowgyibBldmDG6MAMsAW9oI9yJZ2EG+kIIcJVO4eIIqi72MCRtsQe+hRmWesamlqMLM+DS - pCzMAAAAAP//gpby1GkljC7MGNwLM9IGx8IMEwM9c3N4hh1t31PQvh/cCzOM8M3e4VgMgFheAVkQ - QM7CDGPQwgzj0YUZ6LpGF2aMLswYXZiBsyEA6smNLswA94gILszAW7SDoml0YQad584wRyMGv4vR - hzMGv4vRh0MGu4tBnUCaz6iOLswYssUIAAAA///CSDCjxQg9XEz3YgQAAAD//6LQxXQpRtCLAXh3 - E10CvkphdGHGUFyYYTiACzNM6b4wI7YWAAAA//8DAAnZ2XF8QwAA + H4sIAAAAAAAAA7xXbW8iNxD+K9Z+LbAvkBxZqaquCVelTdMTIdcPVRWZ3QF87Nor2xugUf57Z7wv + EAInkab3DXvWM8/MM288ebAuuEy92DPJAnLekTwH43U8Y7m2H60XBx0v5+sxmDKzxovP8GyV5ZkX + Rx1PGFPi5/FfT1tFqgDNrVDSdB5BG/wB6RgKDQakrQWQCpuD5Z1kweUcMjXvaJApaEg/CchSQiBI + WXh+HgwJDmQzPC6sLUzs+ynMILGp+qp63GbcGMFlT4L10Yr1+Vxk4Ie9wHf4/EbJEjao43Yyupt0 + w2F0hlezylr8RA7b0iTcwlzpTYUrxRO+iILorBuE3ehiEoXx4CwOhr1BePFDEAYBASUjdlOAU3MS + 0EL4UQWS3vuoL4ha1+tDCibRoqDA4e1HZnKeZR2WCmOFTCwrBCTA1IytlF726HWi5L3OTkRRSkF0 + 8eyBP3Ik338UsPIdrC3AWhQG/XD4kxH/wI85UlnmaJUSB01OuFkSX+XU0q94xjMDHa96eI1+ubcd + byEwS3Sy2NzAIyDW4LnjFRyTwFIUW+4/vKZtcHI6bKPsUoF07vBe5jnXZGEFsMw2DOSczyEnJK4M + MCveSGz12LFKefKSyNOI2gb4Z54ssV52s4SUV7Yu6/T9T4CbGvCbVIwaDiSs8CpRmdK3FZppVkJ3 + rvlmJwMUu1LeM/GphdLCvhVN89zvnxYrkSN/xqcXplEi8KJK1Z55pODVYH9v0tcFs+89v3c5uwax + S9R+OU/FnJUGNDMWY87sglsmAVLDrGJTYFOtliBZqlayxy41IDkpm27Yr0JzdqdmdoVVw7r4AZPK + MuqsTGmWQgYWTmwHLyLX+mF8KETyMm4jvKGs2yvz/aoOnykPrMCZUrjSliX2Ls8gK9tTUqLruavI + B4xSP2gEhVZfEegbeahfH2Zhp6dsnbqXwlpUQNOnfj1B/39z35o61OS1yItMIOB0r79hmF2vGAzX + g+GJcL/RfxtP2u47cD0sGqyxGf6vVqoe7yYOGgzP1+H59zC4biz2o3U/+h4W6zFG2bqfjuGxPI0a + wUysv1SbDq1Cf7/+st98yedzDXMs4VclgQ6orKy6wmFzZ8cE58cEH44IoqOC4THBxWuc1XJU3dLq + 4ZY+L+6GdQslSrRIKpeeXt1RoWC0zUKVWXolTJHhEKnKCa+RW/sFeaMSq01wixvqW0dxNf23q59f + qdNU6u7npSqJDAf+T7oQcu7FVpeEJqnarnd4FQzOo2YV3A9b28r2BW1SUWttNh63nLzDvtOuv6S8 + XXnqJndwy/nmDuTmPWGsgLnjQxi1s/LypbwodZEBSVNc/beU7oegLZ/3WhMoUG8ddjtrQqZWL2fd + jaKtx42PAbm170hb3RmfAg2AAy2A+ubBTAiP1WI4pHgsuKFReyPk8hNJrqCg/0kyaWrFVdDKydob + qeQI1wA+zWAM3FT1p+tf3ueb+1+ubx9uri9Ht3ejh9F4/McY/cN+ZDAg+MFkAeyzW8XZvwAAAP// + 7FhrT9tIFP0rI6RKgLDjvMhDqtoIqHY/oEVNu0hAhSbjcezWnrE89rqR+PE9d2yc4MZ0tStVfEBE + QOyZ+5p7zz13SC+LDNMKSQHUjGISSszE0o+rTCaATctfjLsPPfsAjgPvIfK8LFjNDyougLND8Lfo + 8QQVcQzrSPG4vaieJevw2sSLYV39nc51Ddr0uLpICZ72V+zMPZ1OHiv2leD/D4IvdJKixhVdDOxJ + +nEDf88dbHtTA41Pw3mntj/Hn6I8lsdzdnvD0/6cnWn9LZLsOsrRTXK2lKIAJ/4Q8/UDRQfBibXg + cahNPp96U68XRMoHwPcGw8EXK/DcBg9+fdWM0mp+zH65kx3i15HdvgTdJQzCNqBFbeR5Idk5HMXD + S75hg9kJo2RsnDi7vsCrW/xxTvsjaymdoyilm0R5Jl2drXtIY05HG4GbUvr3sNQN8yS2dldy/iY5 + n9U3hRlhJ0hXmfYLsJ2LBsx7nxB80mkjBHvZH7p0ct0RpbQWMPjCeuz2p9bQsWu7oDe0G28WV2wp + uOpYT4y7N/Maf3Y8WG5MLhMDD/xUY2Iwx3P73J4NxSrhkTIR5hxkIkJlwpXmmd+14if559sMI8kL + JqpEAupi9MLUZWTOyjqrcsCiqTIrQGadsDKMRMgSyZWpxjZaUUuAu3cK0xsXAriKie2fiLMCpSKy + TUoTHJBByYoIuTsWXeKU1/aSjAy6DqWizGK8katVTh7BEnILTJdFKtBZYvfQ4Ae05nhrCCwB1Jgd + T8gwCMdgWLEtxuOSb8hFlnJrXWGQ04wrtmNgyGFh7LILZcjnxsMqBneKgkDaKEawoLbQPJpIAk2B + AO21dMfnHe8/ogrRdxAy8r5Ol7IsXV1yk9p6QPnJ724apjaXoeQeMu9r3fc8B71cFUip+8O/rhfL + K2d56aBF2yptlKQ6y2VGdXDI/SRSR+zw6AGJEud6jjQkBGxhUtPi2+2ti+CNu9j6uJMI0Cj3hPQR + aOYZuoGlokSW21u6lHhdL0YNj2/vaMyyx2vZ2P6FXXzF65odvEbnkz7fbgM4Oi5CSzltL6H0Wu9M + 2FuO+ivMp/Mjwl6R0v/QJYnOvEPl0kjwp/927A+nk+mqP/HkUMw8MRsPglk/OIWeZhE0PLNMUnIt + fB860CnRNv3N+x1DAE0k69krhKrqXHRiu8wCXj3GjiTIIvf7o9HpREwn4+FsJVYTD+oDLoOpeOe/ + tVLeDBdvBh/wqfY5CVc1UDtO9ci4hXFKBMIZuAT+blqs4khQpJyUc0OBwn7bi3Aq+PcM5eWmimLe + vo54+Ra37zNevsXt+5CXbjENgdVoX9PJM6Q+2FAQFEJEtoCoM1SDdoV4N3ZoPLgoMp3K3g2wSITb + SqP7OLxtSpc01JeQ+znoqAuhR133OqNmMM3qTvEKI78tYV5h5HdY/K9h5AcAAAD//xokLqZLMYJe + DOBq85mYYLRngN5Jh2TKatDEAZSNmCkHTW6im4KriWaAs1zC1XI0MMJe8uFsqSF8BgAAAP//QtOB + swkH9zKahDEuHcbwRl9qXllmUX4epGEHEUophc6sQ7jEhF5Zfgn1xn4hhsENBdoE7PCF5YPHi+Cj + lfm5ECdXw5jQ+oVsB4BXIejDzEVdVYHkWfBID3TpRS1kZB00GgTyOlwcVbMRim6oBrBrQbNvtFyZ + YalEanhgWZkBMgRtyBtU2YGTEiTKSViZYWioZ2ZsOroyY3RlBl4rR/rALbWmXEZXZoyuzCA9HmDT + 8FhjYXRlBlG2jK7MGF2ZAZaAN7QR7hzKKzPMqLYyA9gUNDIxGV2ZgZAnfmUGAAAA//+CF/PUaSaM + rswY3CszsgbHygxDIz0LY4vRlRlUaOAPipUZRvhm6XCsBkCsrzAme2WGMWhlhvHoygx0XaMrM0ZX + ZoyuzMDVWh9dmQGt59GrAZJWZuAt80HxN7oyg86TZ5jDEYPfxejjGYPfxejjIYPdxaBOIM2nVEdX + ZgzRYgQAAAD//8KSYEaLEXq4mM7FCAAAAP//otjFdClG0IsBXG2+0ZUZCAm6rcygzUIJS5IXSlBx + hYgZUStEKF2ZEVsLAAAA//8DAB+M+iV+QwAA headers: Atl-Request-Id: - - b024bf78-55af-4311-9eec-06e433041949 + - 1ab0c683-0f7f-4e43-8ad5-da93b3a539b1 Atl-Traceid: - - b024bf7855af43119eec06e433041949 + - 1ab0c6830f7f4e438ad5da93b3a539b1 Cache-Control: - no-cache, no-store, no-transform Content-Encoding: @@ -1431,7 +1517,7 @@ interactions: Content-Type: - application/json;charset=UTF-8 Date: - - Fri, 10 Jan 2025 19:15:42 GMT + - Wed, 29 Jan 2025 20:45:14 GMT Nel: - '{"failure_fraction": 0.001, "include_subdomains": true, "max_age": 600, "report_to": "endpoint-1"}' @@ -1441,7 +1527,7 @@ interactions: Server: - AtlassianEdge Server-Timing: - - atl-edge;dur=409,atl-edge-internal;dur=13,atl-edge-upstream;dur=396,atl-edge-pop;desc="aws-us-east-1" + - atl-edge;dur=499,atl-edge-internal;dur=16,atl-edge-upstream;dur=483,atl-edge-pop;desc="aws-us-east-1" Strict-Transport-Security: - max-age=63072000; includeSubDomains; preload Timing-Allow-Origin: @@ -1453,7 +1539,7 @@ interactions: X-Aaccountid: - 5d3878b170e3c90c952f91f6 X-Arequestid: - - 988788be61269a4b2bfb9852d324f6fc + - f8670a958f7cb0e80a132e5b4738e9e4 X-Content-Type-Options: - nosniff X-Xss-Protection: @@ -1481,17 +1567,17 @@ interactions: response: body: string: !!binary | - H4sIAAAAAAAAA5yQTU/DMAyG/0uubJ2TpfvIDQ2JgWAgtbuAEEpTRwTSpGrSSdO0/04qJtgRuFn2 - 8/qxfCCVDLjtLBHkLcY2iMmkRo0q1v7dZzJaGYKRLnMYyYjUJrRW7v/BF9jtjMIaw8cabbtCF7H7 - 65KVd9r26BT+LrnDLhjvEkwBaAYZjIvN5WOxfih/ppu+qVJFxPMAjWAEL8mJrfX7Jl1Z7tvBtrK+ - r1Oo6o2tvyJEpACbz0/NKxkHkAHLx0DHsCwZE3wqeBIDXECCUz6kP2BXmuacpVAyEDQXnGVswb9Z - 1dw47RPI6WyhJGCOWtHpHCXXmmtQmi2n+aLSM45spoGdCaIdDLemk8MLUcvexjuv5NA+EHuqCLrX - bUGO54c9eTdMru9LcvwEAAD//wMA3LYyZyACAAA= + H4sIAAAAAAAAA5yQwUoDMRCG3yVX222SZtmam1SwilZhtxdFJJtMMJpNlk22UErf3QSL1pt6G2a+ + f75h9qgVATaDRRy9xtgHPpsp0CCj8m++ENGKEIxwhYOIJkiZ0Fux+wdfw7A1EhSE9xXYfgkuwvDX + JUvvtB3BSfhdcgtDMN4lmGBMClzgab2+eKhX9833dD12baoQf8rQBE/wc3JCb/2uS1c2uz7bltaP + KoXa0Vj1GUE8BWhVHZuXImaQYlpOMZnSRUMYZ4TPFwXG+AwnOOVD+gMMjel+sOcNJZyVnLCCYfrF + yu7aaZ9AXZYEoJqTVoFkUgoAQdqF0uS8EpqwJNBtCfREEG023JhB5BeCFqONt16K3N4je6wQuJdN + jQ6nhz16lydXdw06fAAAAP//AwBRCcFyIAIAAA== headers: Atl-Request-Id: - - 56a92ebb-0d80-4a14-be44-24d868aa033f + - 841216f7-032d-4cd3-ae33-004340c3ab0d Atl-Traceid: - - 56a92ebb0d804a14be4424d868aa033f + - 841216f7032d4cd3ae33004340c3ab0d Cache-Control: - no-cache, no-store, no-transform Content-Encoding: @@ -1499,7 +1585,7 @@ interactions: Content-Type: - application/json;charset=UTF-8 Date: - - Fri, 10 Jan 2025 19:15:42 GMT + - Wed, 29 Jan 2025 20:45:14 GMT Nel: - '{"failure_fraction": 0.001, "include_subdomains": true, "max_age": 600, "report_to": "endpoint-1"}' @@ -1509,7 +1595,7 @@ interactions: Server: - AtlassianEdge Server-Timing: - - atl-edge;dur=164,atl-edge-internal;dur=22,atl-edge-upstream;dur=152,atl-edge-pop;desc="aws-us-east-1" + - atl-edge;dur=151,atl-edge-internal;dur=14,atl-edge-upstream;dur=138,atl-edge-pop;desc="aws-us-east-1" Strict-Transport-Security: - max-age=63072000; includeSubDomains; preload Timing-Allow-Origin: @@ -1521,7 +1607,7 @@ interactions: X-Aaccountid: - 5d3878b170e3c90c952f91f6 X-Arequestid: - - b77ef7609747894811d0ac7ba230183c + - 353f3bf5f2ae590f0c1aee49961249e2 X-Content-Type-Options: - nosniff X-Xss-Protection: @@ -1628,9 +1714,9 @@ interactions: NNXWxgIAAAD//wMADdrZt2VyAAA= headers: Atl-Request-Id: - - e9d6aa9b-29d2-42a0-bbea-f5bfd047a96d + - d6ed07fd-4a02-4f87-b6aa-be078e2352fe Atl-Traceid: - - e9d6aa9b29d242a0bbeaf5bfd047a96d + - d6ed07fd4a024f87b6aabe078e2352fe Cache-Control: - no-cache, no-store, no-transform Content-Encoding: @@ -1638,7 +1724,7 @@ interactions: Content-Type: - application/json;charset=UTF-8 Date: - - Fri, 10 Jan 2025 19:15:42 GMT + - Wed, 29 Jan 2025 20:45:14 GMT Nel: - '{"failure_fraction": 0.001, "include_subdomains": true, "max_age": 600, "report_to": "endpoint-1"}' @@ -1648,7 +1734,7 @@ interactions: Server: - AtlassianEdge Server-Timing: - - atl-edge;dur=216,atl-edge-internal;dur=13,atl-edge-upstream;dur=203,atl-edge-pop;desc="aws-us-east-1" + - atl-edge;dur=240,atl-edge-internal;dur=15,atl-edge-upstream;dur=226,atl-edge-pop;desc="aws-us-east-1" Strict-Transport-Security: - max-age=63072000; includeSubDomains; preload Timing-Allow-Origin: @@ -1660,7 +1746,7 @@ interactions: X-Aaccountid: - 5d3878b170e3c90c952f91f6 X-Arequestid: - - 29b795f14b715cfc907233c2771709e4 + - 6d01592ec71d5dd2e52ff8c9ee34d256 X-Content-Type-Options: - nosniff X-Xss-Protection: @@ -1684,57 +1770,56 @@ interactions: User-Agent: - python-requests/2.32.3 method: GET - uri: https://defectdojo.atlassian.net/rest/api/latest/issue/NTEST-1600 + uri: https://defectdojo.atlassian.net/rest/api/latest/issue/NTEST-1825 response: body: string: !!binary | - H4sIAAAAAAAAA7xXbW/jNgz+K4K/zolfkiZpgGHY2txwW68r2vT2YRgKxWYcXWTJk+QmWdf/PtIv - aZprNiS73TdLlEiKfPiQfvJgXXCVemPPgErBQPpOgEytr3gO1rfJAnLu6wIMd0Ir60MqXA6O+8mC - qwykzvxHMBZlkN5CYcCCcs3ZpLRO53NS+BCFYRR2DfxRgnXTTQE3hidOJOD5niD70SAc9XFhQc5x - uXCusOMgSGEOiUv1J93lTnJrBVddBS5ASy7ghQgkd/QprC0haLUsYYNKrqeTu2kH90Lcqvyw3vjJ - s+hgaRO8l2mzqR+S4gpvxGF81gmjThRO43AcnY17g+4oGnyDzpOOyohD7ys1x3oa107S/QD1hfH2 - 7c0iBZsYUVD0cPd7ZnMupc9SYZ1QiWOFgASYnrOVNssu3U60ujfySC9KJShnXD7wR+64CR4FrILK - rRcHG1EU9qLRd1b8Cd/mmPsyR6uEDTQ55XZJCStnjr7Gcy4t+F598T2+q7rrewuB6DHJYnMFj4C+ - hs++V3DEm6MobpPf20/b2fn5KXhoolxBgXTu5L3Mc27IwgpgKTcMVMYzyMkTv0HFiYmtL1dZJZy8 - TuRxiXoJ8A88WWKB7aKElNe2Lhr4/ieH2xoIWijGbQ4UrHAr0VKb69qbmSyhkxm+2UGAZpfae6Z8 - GqGNcKd6014PKGFHxErkmD8b0A3bKhG4UUO1ax8peI2zH1r4VsHsec9fupwrgthN1H45z0TGSguG - IS2aDXML7pgCSC1zms2AzYxegmKpXqkuuzCAyUnZbMN+EoazOz13K6wa1sEDTGnHiIqZNiwFCQ6O - pINXkdu+wwZQiOR13Ca4Q6jbK/P9qo6eCQdOYNsoqtJWJXKXt9cDemErKIz+hK6dGPnm9ttx32GR - l2fcK+GoUVhva5va0M/VWdsEl94p8kIKdDjdYzQMbMUO/dG6PzrS3X9g3PYlW77th0N0I+6v4/7/ - a6Vm9arHoMFosI4GX8PgurXYi9e9+GtYbBoX4XMfjtEhnMaHBL1WMBfrj/Xkg7D47XeESZYZyLBo - /7UIzloBvkzLsiaIt48ODgmGBwTxQcHokOD8c3fqcajepWGjGv+8cSfCJXc4FZ7aKOve/DKYBbU6 - Q2VZfV7okgIXETv/ShtCZd7YmRIwfajUfcSMU3HWzlX6SL8RSR3Hp8/2yFe8bBe6lOmlsIXEJlYX - N0GiJlrv7eHvbBi2w99+2LZUti84BKp4C6ov1S5p1D2V9HfapdSr15x/pan7V6Tap0a5/5BtEUg+ - A6JFwv/+uH8IutEhhEYjiseCW2o5V0It35HkEgr6NVFJm7Mqk6tKtt1RWk2wHfKZhFvgtsaBab68 - m6v7H99fP1y9v5hc300eJre3v9zi+7BKLQYED0wXwG6qkZSRXSYs0woHROQSIUkpdeiqDd8YyJFM - qj5uu29xSoTl5IV/iTAs9HDs1T0Rc4fBf6mpV1yBaciE4nL/UPNj1YS3QrVE71q6wbxmOD60p8uC - ivZtHA+7o9GwxfFxg+7fAAAA///sWW1P20gQ/iv7pRIg7DiJQ16kqhcBVfsBHbq0RQIqtFmvsa/2 - ruW164uOH99n1q4TTEyl+1AhHQKRyJ6dnZmdeeaZZS9v/P8SXaHTDLOuKvYn/aQFhecOtruoBYzH - 4bxV25+jT3GRyKMFu7nm2XDBTrX+Fkt2FRdAtYKtpCjBDd8n/P6BooPgJFrwJNKmWMy8mTcIYxUA - Sgcjf/TVKjyzwYNff2tGabU4Yr9cyQ7w59AuX4H2EQZhGdCiMfKslOwMjuLhBd+woXfMKBlbJ06v - zvHqBh/OydC3ltI5ikq6aVzk0tX5/QBpzOloYzA2Sv8BRN2oSBNrd63nC+n5rL4pcOWdIF3mOijB - Ac7bwW7wCcGnPW2EYC/7oCun0D1RyhoFo69swG6ejIk9q7YCg7FdeL28ZCvBVY+8vbCYj1t/djxY - bUwhUwMPgkzHSLOjhX1uz4ZilfJYmRh8H5mIUJlorXke9Ek80X+2zTDSvGSiTiSgLkYQTB9GFqxq - sqoALJo6s0Jk1jGrolhELJVcmXp8IYlGA9y9VZhiuBDAVUwu32POSpSKyDcZTTJABiXrhuzuWHSB - U763N0Zk0FUkFWUW461erQryCJaQW+B/LFahzlO7hgYgoDXHW0NgCaDGDHVMhkE5BqS66zOeVHxD - LrKMW+tKg5xmXLEdAyMOCxOXnStDPrce1jG4VRQE2o1iBAsaC81PE0mhKRGgvZbu+Lzj/V+oQvQd - hIy8b9KlqipXV9xkth5QfvIfN4sym8vY5A4675q973gBmrMukVJ3B39eLVeXzurCQYu2Vdpukum8 - kDnVwQEP0lgdsoPDByRKUugF0vApu5m0Lb7b3vpoz5DGlUdXN12BlvYSNhY5QN9yO+JmHVG/paSd - F16ro/uij214Lduwp2hJ137BPn7stcY86tpdUMdBcBHZyyTbGba3Tb9CbAoVkVCd/8ceR2TkHeqO - qPPH4O0kGM+ms/Vw6smxmHtiPhmF82F4gn1aIezwjJik1FgGAfZAn0PTCzZ/7BgCYCFdz47Fdc24 - 6KNWzMJVM5r5ElSPB0PfP5mK2XQynq/Feuph+5DLcCbeBW+tljfj5ZvRe/zW65yUqwZmHad+ZNzS - OBUC4Yxcgm43K9dJLChSTsa5oUBhve0kIIr4eoricDNFMe+O2C/f4u6M/vIt7s74L91iQFFQD4gN - GTxF6oPLhGEpRGwLiHC9HkhrILsGHYTgeZnrTA6uATEi2lYa3THhbVu6tENzlbafQfp9+Or3jZV+ - 312F32J43jSAV3z5bZn0ii+/w+JXfNmDL10Y8J/wF1h9X9fev3QZ3nz3sKEuePMvukbLDwAAAP// - gpuCs+WFs1zC2SQzwl7y4RoXMsDVFgUVCFglDOBeRpMwxqXDGN7IS80ryyzKz4M05CBCKaXQ+WEI - l6jQy8+FmFANY0KLezKKX6SpbX2YuTpKuYkVQanFpTkgg5HsBg+bFJU4lkDcUZZfgmesFgAAAP// - IsZepLFaiGFwQ4F2AbuNYfngUSfYcCpotBg0lgOyEu4QVNcaoTgXqgEcPLW1tQAAAAD//wMAm8Wf - eZMgAAA= + H4sIAAAAAAAAA7xXW3PqNhD+Kxq/1uALhEOY6XTaHE4nbZpmEnL60OlkhL0YHWTJleQATfPfu+sL + IQQ6Az3Nm6WVtLdvv10/ebAquEq9kWdApWAg/SRAptZXPAfr22QOOfd1AYY7oZX1IRUuB8f9ZM5V + BlJn/iMYizJIb6EwYEG55mxSWqfzGT34EIVhFHYN/FmCdZN1ATeGJ04k4PmeIP3RYBAOcWFBznA5 + d66woyBIYQaJS/UX3eVOcmsFV10FLkBNLuCFCCR39CmsLSFoX1nAGh+5nozvJp1oGJ/hVmWH9UZP + nkUDS5vgvUybde1Iiiu8EYfxWSeMOvH5JI5G/bNROOz2o/Nv0PiQLCUlDq2vnjnW0rg2ku4H+F4Y + b3xvFinYxIiCooe73zObcyl9lgrrhEocKwQkwPSMLbVZdOl2otW9kUdaUSpBOePygT9yx03wKGAZ + VGa9GNiIorAXDb+z4i/4NsfclzlqJWygygm3C0pYOXX0NZpxacH36ouX6Fd11/fmAtFjkvn6Ch4B + bQ2ffa/giDdHUdwk/8PbtPVPwUMT5QoK9OZW3ss854Y0LAEWcs1AZTyDnCzxG1ScmNj6cpVVwsnr + RB6XqJcA/8CTBRbYNkro8VrXRQPf/2RwWwNBC8W4zYGCJW4lWmpzXVszlSV0MsPXWwjQ7KP2nimf + Rmgj3KnWtNeD3nGxEjnmzwZ0w7aPCNyoodq1jxS8xthfWvhWwex5z1+7nCuC2E7UbjlPRcZKC4Yh + LZo1c3PumAJILXOaTYFNjV6AYqleqi67MIDJSdl0zX4ShrM7PXNLrBrWwQNMaceIipk2LAUJDo6k + g1eR2/hhAyhE8jpuY9wh1O2U+W5VR8+EAyewbRRVaasSucvb6QG9sBUURn9B006MfHN7f9y3WOTF + jXslHDUK6210Uxv6uTprm+CSnyIvpECD0x1Gw8BW7NAfrvrDI839F8ZtPdnwbb9irbi/Qvr7X7XU + rF71GFQYDVbR4D0UrlqNvXjVi99DY9O4CJ+7cIwO4TRuBTOx+lwPOJj93/94e7LXnuRZZiDDon1T + BOiAlmXNA/vVnR0SDA4JPhwQxAcFw0OC87d21uNQvUvDRjX+eaNOhEvucCo8tVHWvfllMAvq5wyV + ZfV5oUsKXETs/BttCJV5I2dKwPTho+4zZpyKs/Glpklv/+gWDuJ2dNt1ekNEu4JDkIg3kPhazY7G + m1Mpe6vZSb18zdhXmnp3RYl9anO7jmwQK/kUiNT2wJq4YG8YokP4ioYUjzm31DCuhFp8IslHKOjH + QiXrDZ3auV5Wss2O0mqMzYxPJdwCt1QmTwjG+su7ubr/8fL64eryYnx9N34Y397+eov+YY1ZDAge + mMyB3VQDJSO9TFimFY53yARC0qPUX6smemMgRyqourDt7mOECIvBC/8WYWhm05FXdzTMHQb/pSJe + VTqmIROKy91DzW9RE94K9xKta9aU1wybf3u6LKjk9uP4vDsYfmhx/N5j6j8AAAD//+xZbWvbSBD+ + K/ulkIRIlm05foHSmiTl7kNoOPcukKSE9UqK1Eq7QruqzrQ//p5Z6WRHtlK4DyUHISE20uzszOzM + M89s+rRu0+1/RFOFynJMqtIcTvpJCwrPHWx3UQsYT8N5L7c/J58Sk4YnC3Z3y/Phgp0r9TUJ2U1i + YlUatgpFCWb3IeWPPyg6CE6qBE9jpc1i5s28QZTIAEA4GI1Hn63CCxs8+PVFMUqrxQn76Up2hD/H + dvkKpI0wCMuAFo2RF2XILuAoHl7xDRvNTxklY+vE+c0lXt3hwzkb+tZSOkdRhW6WmCJ0VfE4QBpz + OtoEfIvSfwBRNzZZau2u9fxFev6UXyWY7k6QrgsVlOjgl+1YNviE4NOeNkKwl/2mKseonijljYLR + ZzZgd3tDXs+qrcBgbBfeLq/ZSnDZI2+vG+Ze68+OB6uNNmGm4UGQqwRpdrKwz+3ZUKwynkidgK0j + ExEqHa8VL4I+iT39F9sMI81LJupEAupigMDsoEPDqiarDGBR15kVIbNOWRUnImZZyKWuhw+SaDTA + 3XuJGYQLAVzF3PEt4axEqYhik9McAmSQyDg6UnfHoiuc8qO97yGDbuJQUmYx3upV0pBHsITcAntj + iYxUkdk1NL4ArTneagJLADUmoFMyDMox3qBXlGnAeFrxDbnIcm6tKzVymnHJdgyMOSxMXXYpNfnc + eljH4F5SEGg3ihEsaCzU/5pICnWJAB20dMfnHe//QBWi7yBk5H2TLlVVuariOrf1gPIL/3bzOLe5 + jE0eoPOh2fuBG1Mk6xIp9XD08Wa5unZWVw5atK3SdpNcFSYsqA6OeJAl8pgdHf9AoqRGLZCG++xm + 0rb4bnvroz1DGjaeXLx0BVqKSthoCoC+ZWbf93uo18dm/ZZpdle0bMMeluVWhwX72IfXx269ds8n + XbsL6jgILmJ7FWQ7w/au6GeITaEiEqqK/9jjiIy8Q90R8f09eDsJxrPpbD2ceuFYzD0xn4yi+TA6 + wz6tEHZ4Riyk1FgGAfZAn0PTCzbvdwwBsJCuZ4faumZc9FErZuGqGaz8EFSPB0PfP5uK2XQynq/F + euph+4iH0Uy8C95aLW/GyzejD/it1zkZlw3MOk79SLuldioEwhm5BN1uXq7TRFCknJxzTYHCettJ + QBTx9RzF4eaSYt4dkF++xd0J++Vb3J3QX7rFgKIg0XnKNw0ZPEfqg8tEUSlEYguIcL0eJ2sguwUd + hOBlWag8HNwCe0S8rTS6IcLbtnRph+Yi7DCD9Pvw1e8bK/12rCwanH+FkV+WMK8w8issfoWRAzDS + hYE+xub7e/wF7jzWRfmdrrKb7x4sUYY3/2DraumjZF4fLnmjGuD+AQAA//9CNwjXuJABTg/gbLLB + fYauA1dbzhinBLyRl5pXllmUnwdpyEGEUkqhs7sQLlGhl58LMaEaxoQW92SUy0gT0/owc3WUchMr + glKLS3NABiPZDR42KSpxLIG4oyy/hHojrRDD4IYC7coAAAAA//9KLA7LB486gQfiaiFjvaCxHJCV + cIegutYIxblQDeDgqa2tBQAAAP//AwDenZgOUSAAAA== headers: Atl-Request-Id: - - 04d520f6-2752-459b-bbcc-6de212d90a32 + - bab75e9e-a5fe-47ea-a26d-f5ab3c9c24d4 Atl-Traceid: - - 04d520f62752459bbbcc6de212d90a32 + - bab75e9ea5fe47eaa26df5ab3c9c24d4 Cache-Control: - no-cache, no-store, no-transform Content-Encoding: @@ -1742,7 +1827,7 @@ interactions: Content-Type: - application/json;charset=UTF-8 Date: - - Fri, 10 Jan 2025 19:15:42 GMT + - Wed, 29 Jan 2025 20:45:15 GMT Nel: - '{"failure_fraction": 0.001, "include_subdomains": true, "max_age": 600, "report_to": "endpoint-1"}' @@ -1752,7 +1837,7 @@ interactions: Server: - AtlassianEdge Server-Timing: - - atl-edge;dur=290,atl-edge-internal;dur=22,atl-edge-upstream;dur=271,atl-edge-pop;desc="aws-us-east-1" + - atl-edge;dur=332,atl-edge-internal;dur=12,atl-edge-upstream;dur=320,atl-edge-pop;desc="aws-us-east-1" Strict-Transport-Security: - max-age=63072000; includeSubDomains; preload Timing-Allow-Origin: @@ -1764,7 +1849,7 @@ interactions: X-Aaccountid: - 5d3878b170e3c90c952f91f6 X-Arequestid: - - a6f8d1a3f97ed13f9ab06f6304e94ca8 + - f01a7f6b4b22ae7ed6cf29061a83ba29 X-Content-Type-Options: - nosniff X-Xss-Protection: From 89e17c71a5d5411820b5bc35b46564a651f231e1 Mon Sep 17 00:00:00 2001 From: kiblik <5609770+kiblik@users.noreply.github.com> Date: Fri, 31 Jan 2025 02:55:24 +0100 Subject: [PATCH 92/99] feat(helm-local_settings): Add option to add local_settings.py (#10803) --- .../en/open_source/installation/configuration.md | 4 +++- .../templates/celery-beat-deployment.yaml | 11 +++++++++++ .../templates/celery-worker-deployment.yaml | 11 +++++++++++ .../templates/configmap-local-settings-py.yaml | 15 +++++++++++++++ helm/defectdojo/templates/django-deployment.yaml | 11 +++++++++++ helm/defectdojo/templates/initializer-job.yaml | 11 +++++++++++ helm/defectdojo/values.yaml | 12 ++++++++++++ 7 files changed, 74 insertions(+), 1 deletion(-) create mode 100644 helm/defectdojo/templates/configmap-local-settings-py.yaml diff --git a/docs/content/en/open_source/installation/configuration.md b/docs/content/en/open_source/installation/configuration.md index 402689a2164..aa97ec84b80 100644 --- a/docs/content/en/open_source/installation/configuration.md +++ b/docs/content/en/open_source/installation/configuration.md @@ -23,7 +23,7 @@ When you deploy DefectDojo in a **Kubernetes** cluster, you can set environment An example can be found in [`template_env`](https://github.com/DefectDojo/django-DefectDojo/blob/master/dojo/settings/template-env). -### local_settings.py (not with Kubernetes) +### local_settings.py `local_settings.py` can contain more complex customizations such as adding MIDDLEWARE or INSTALLED_APP entries. This file is processed *after* settings.dist.py is processed, so you can modify settings delivered by DefectDojo out of the box. @@ -34,6 +34,8 @@ An example can be found in [`dojo/settings/template-local_settings`](https://git In Docker Compose release mode, files in `docker/extra_settings/` (relative to the file `docker-compose.yml`) will be copied into `dojo/settings/` in the docker container on startup. +`local_settings.py` can be used in Kubernetes as well. Variable `localsettingspy` will be stored as ConfigMap and mounted to responsible location of containers. + ## Configuration in the UI Users with the superuser status can configure more options via the UI under `Configuration` / `System Settings`. diff --git a/helm/defectdojo/templates/celery-beat-deployment.yaml b/helm/defectdojo/templates/celery-beat-deployment.yaml index 676067238c9..d27adfa6b9b 100644 --- a/helm/defectdojo/templates/celery-beat-deployment.yaml +++ b/helm/defectdojo/templates/celery-beat-deployment.yaml @@ -61,6 +61,11 @@ spec: volumes: - name: run emptyDir: {} + {{- if .Values.localsettingspy }} + - name: localsettingspy + configMap: + name: {{ $fullName }}-localsettingspy + {{- end }} {{- if .Values.django.uwsgi.certificates.enabled }} - name: cert-mount configMap: @@ -119,6 +124,12 @@ spec: volumeMounts: - name: run mountPath: /run/defectdojo + {{- if .Values.localsettingspy }} + - name: localsettingspy + readOnly: true + mountPath: /app/dojo/settings/local_settings.py + subPath: file + {{- end }} {{- if .Values.django.uwsgi.certificates.enabled }} - name: cert-mount mountPath: {{ .Values.django.uwsgi.certificates.certMountPath }} diff --git a/helm/defectdojo/templates/celery-worker-deployment.yaml b/helm/defectdojo/templates/celery-worker-deployment.yaml index f612b9dcecb..2b615b5accf 100644 --- a/helm/defectdojo/templates/celery-worker-deployment.yaml +++ b/helm/defectdojo/templates/celery-worker-deployment.yaml @@ -59,6 +59,11 @@ spec: - name: {{ .Values.imagePullSecrets }} {{- end }} volumes: + {{- if .Values.localsettingspy }} + - name: localsettingspy + configMap: + name: {{ $fullName }}-localsettingspy + {{- end }} {{- if .Values.django.uwsgi.certificates.enabled }} - name: cert-mount configMap: @@ -114,6 +119,12 @@ spec: {{- end }} command: ['/entrypoint-celery-worker.sh'] volumeMounts: + {{- if .Values.localsettingspy }} + - name: localsettingspy + readOnly: true + mountPath: /app/dojo/settings/local_settings.py + subPath: file + {{- end }} {{- if .Values.django.uwsgi.certificates.enabled }} - name: cert-mount mountPath: {{ .Values.django.uwsgi.certificates.certMountPath }} diff --git a/helm/defectdojo/templates/configmap-local-settings-py.yaml b/helm/defectdojo/templates/configmap-local-settings-py.yaml new file mode 100644 index 00000000000..dc75942fbc0 --- /dev/null +++ b/helm/defectdojo/templates/configmap-local-settings-py.yaml @@ -0,0 +1,15 @@ +{{- if .Values.localsettingspy }} +{{- $fullName := include "defectdojo.fullname" . -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ $fullName }}-localsettingspy + labels: + app.kubernetes.io/name: {{ include "defectdojo.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ include "defectdojo.chart" . }} +data: + file: + {{ toYaml .Values.localsettingspy | indent 4 }} +{{- end }} diff --git a/helm/defectdojo/templates/django-deployment.yaml b/helm/defectdojo/templates/django-deployment.yaml index c0fb1d7518c..63f4a2a9452 100644 --- a/helm/defectdojo/templates/django-deployment.yaml +++ b/helm/defectdojo/templates/django-deployment.yaml @@ -68,6 +68,11 @@ spec: volumes: - name: run emptyDir: {} + {{- if .Values.localsettingspy }} + - name: localsettingspy + configMap: + name: {{ $fullName }}-localsettingspy + {{- end }} {{- if .Values.django.uwsgi.certificates.enabled }} - name: cert-mount configMap: @@ -150,6 +155,12 @@ spec: volumeMounts: - name: run mountPath: /run/defectdojo + {{- if .Values.localsettingspy }} + - name: localsettingspy + readOnly: true + mountPath: /app/dojo/settings/local_settings.py + subPath: file + {{- end }} {{- if .Values.django.uwsgi.certificates.enabled }} - name: cert-mount mountPath: {{ .Values.django.uwsgi.certificates.certMountPath }} diff --git a/helm/defectdojo/templates/initializer-job.yaml b/helm/defectdojo/templates/initializer-job.yaml index 60d82b9f2d7..248970a4b75 100644 --- a/helm/defectdojo/templates/initializer-job.yaml +++ b/helm/defectdojo/templates/initializer-job.yaml @@ -44,6 +44,11 @@ spec: - name: {{ .Values.imagePullSecrets }} {{- end }} volumes: + {{- if .Values.localsettingspy }} + - name: localsettingspy + configMap: + name: {{ $fullName }}-localsettingspy + {{- end }} {{- range .Values.initializer.extraVolumes }} - name: userconfig-{{ .name }} {{ .type }}: @@ -107,6 +112,12 @@ spec: {{- toYaml .Values.securityContext.djangoSecurityContext | nindent 10 }} {{- end }} volumeMounts: + {{- if .Values.localsettingspy }} + - name: localsettingspy + readOnly: true + mountPath: /app/dojo/settings/local_settings.py + subPath: file + {{- end }} {{- range .Values.initializer.extraVolumes }} - name: userconfig-{{ .name }} readOnly: true diff --git a/helm/defectdojo/values.yaml b/helm/defectdojo/values.yaml index 09f5f525e41..865fc4849ea 100644 --- a/helm/defectdojo/values.yaml +++ b/helm/defectdojo/values.yaml @@ -543,3 +543,15 @@ extraConfigs: {} # configMapKeyRef: # name: my-other-postgres-configmap # key: cluster_endpoint + +# To add code snippet which would extend setting functionality, you might add it here +# It will be stored as ConfigMap and mounted `dojo/settings/local_settings.py`. +# For more see: https://documentation.defectdojo.com/getting_started/configuration/ +# For example: +# localsettingspy: | +# INSTALLED_APPS += ( +# 'debug_toolbar', +# ) +# MIDDLEWARE = [ +# 'debug_toolbar.middleware.DebugToolbarMiddleware', +# ] + MIDDLEWARE From d3b3c261336057f8334c85ce4133540b4f9d4616 Mon Sep 17 00:00:00 2001 From: Paul Osinski <42211303+paulOsinski@users.noreply.github.com> Date: Thu, 30 Jan 2025 21:08:09 -0500 Subject: [PATCH 93/99] Edit defectdojo.com/pricing link (#11678) * Edit defectdojo.com/pricing link * Update entrypoint-initializer.sh --- docker/entrypoint-initializer.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/entrypoint-initializer.sh b/docker/entrypoint-initializer.sh index 08e77dc46ca..fa1f4647823 100755 --- a/docker/entrypoint-initializer.sh +++ b/docker/entrypoint-initializer.sh @@ -24,7 +24,7 @@ echo "Creating Announcement Banner" cat < Date: Thu, 30 Jan 2025 20:48:45 -0600 Subject: [PATCH 94/99] Django AuditLog: Upgrade to 3.x (#11592) * Django AuditLog: Upgrade to 3.x * Accommodate auditlog copy restrictions * ruff fixes * Ruff Autofix Bite * Take Kiblik's advice * Fixing ruff * Try with running tests with no deps --- .github/workflows/fetch-oas.yml | 2 +- .github/workflows/integration-tests.yml | 6 +-- .github/workflows/rest-framework-tests.yml | 4 +- docs/content/en/open_source/upgrading/2.43.md | 26 +++++++++-- dojo/models.py | 45 +++++++------------ dojo/settings/settings.dist.py | 3 ++ dojo/templatetags/display_tags.py | 4 +- requirements.txt | 2 +- unittests/test_copy_model.py | 18 ++++---- unittests/test_deduplication_logic.py | 21 ++++++--- unittests/test_duplication_loops.py | 19 +++----- .../test_false_positive_history_logic.py | 24 ++++++---- unittests/test_utils_deduplication_reopen.py | 20 +++------ 13 files changed, 104 insertions(+), 90 deletions(-) diff --git a/.github/workflows/fetch-oas.yml b/.github/workflows/fetch-oas.yml index 15720f1f31b..8f2b5514436 100644 --- a/.github/workflows/fetch-oas.yml +++ b/.github/workflows/fetch-oas.yml @@ -33,7 +33,7 @@ jobs: docker images - name: Start Dojo - run: docker compose up -d postgres nginx uwsgi + run: docker compose up --no-deps -d postgres nginx uwsgi env: DJANGO_VERSION: ${{ env.release_version }}-alpine NGINX_VERSION: ${{ env.release_version }}-alpine diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index 11a5d69d6b2..c60cb6f3403 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -63,21 +63,21 @@ jobs: run: ln -s docker-compose.override.integration_tests.yml docker-compose.override.yml - name: Start Dojo - run: docker compose up -d postgres nginx celerybeat celeryworker mailhog uwsgi redis + run: docker compose up --no-deps -d postgres nginx celerybeat celeryworker mailhog uwsgi redis env: DJANGO_VERSION: ${{ matrix.os }} NGINX_VERSION: ${{ matrix.os }} - name: Initialize timeout-minutes: 10 - run: docker compose up --exit-code-from initializer initializer + run: docker compose up --no-deps --exit-code-from initializer initializer env: DJANGO_VERSION: ${{ matrix.os }} NGINX_VERSION: ${{ matrix.os }} - name: Integration tests timeout-minutes: 10 - run: docker compose up --exit-code-from integration-tests integration-tests + run: docker compose up --no-deps --exit-code-from integration-tests integration-tests env: DD_INTEGRATION_TEST_FILENAME: ${{ matrix.test-case }} INTEGRATION_TESTS_VERSION: debian diff --git a/.github/workflows/rest-framework-tests.yml b/.github/workflows/rest-framework-tests.yml index 309eee1240d..e97cef47b40 100644 --- a/.github/workflows/rest-framework-tests.yml +++ b/.github/workflows/rest-framework-tests.yml @@ -39,12 +39,12 @@ jobs: # phased startup so we can use the exit code from unit test container - name: Start Postgres and webhook.endpoint - run: docker compose up -d postgres webhook.endpoint + run: docker compose up --no-deps -d postgres webhook.endpoint # no celery or initializer needed for unit tests - name: Unit tests timeout-minutes: 10 - run: docker compose up --exit-code-from uwsgi uwsgi + run: docker compose up --no-deps --exit-code-from uwsgi uwsgi env: DJANGO_VERSION: ${{ matrix.os }} diff --git a/docs/content/en/open_source/upgrading/2.43.md b/docs/content/en/open_source/upgrading/2.43.md index 59a4a1faeea..f9111964d42 100644 --- a/docs/content/en/open_source/upgrading/2.43.md +++ b/docs/content/en/open_source/upgrading/2.43.md @@ -2,14 +2,34 @@ title: 'Upgrading to DefectDojo Version 2.43.x' toc_hide: true weight: -20250106 -description: Disclaimer field renamed/split and removal of `dc-` scripts. +description: Disclaimer field renamed/split, removal of `dc-` scripts, audit log updates, and hash codes updates. --- +### Audit log migration + +As part of the upgrade to django-auditlog 3.x, there is a migration of +existing records from json-text to json. Depending on the number of +LogEntry objects in your database, this migration could take a long time +to fully execute. If you believe this period of time will be disruptive +to your operations, please consult the [migration guide](https://django-auditlog.readthedocs.io/en/latest/upgrade.html#upgrading-to-version-3) +for making this migration a two step process. + +--- + +### Removal of "dc" helper scripts + +In the past, when DefectDojo supported different database and message brokers, `dc-` scripts have been added to simplify start of Dojo stack. As these backends are not supported, mentioned scripts are not needed anymore. From now we recommend to use standard `docker compose` (or `docker-compose`) commands as they are described on [README.md](https://github.com/DefectDojo/django-DefectDojo/blob/master/README.md) + +--- + +### Diversification of Disclaimers + [Pull request #10902](https://github.com/DefectDojo/django-DefectDojo/pull/10902) introduced different kinds of disclaimers within the DefectDojo instance. The original content of the disclaimer was copied to all new fields where it had been used until now (so this change does not require any action on the user's side). However, if users were managing the original disclaimer via API (endpoint `/api/v2/system_settings/1/`, field `disclaimer`), be aware that the fields are now called `disclaimer_notifications` and `disclaimer_reports` (plus there is one additional, previously unused field called `disclaimer_notes`). -In the past, when DefectDojo supported different database and message brokers, `dc-` scripts have been added to simplify start of Dojo stack. As these backends are not supported, mentioned scripts are not needed anymore. From now we recommend to use standard `docker compose` (or `docker-compose`) commands as they are described on [README.md](https://github.com/DefectDojo/django-DefectDojo/blob/master/README.md) +--- + +### Hash Code changes -**Hash Code changes** The Rusty Hog parser has been [updated](https://github.com/DefectDojo/django-DefectDojo/pull/11433) to populate more fields. Some of these fields are part of the hash code calculation. To recalculate the hash code and deduplicate existing Rusty Hog findings, please execute the following command: `docker compose exec uwsgi /bin/bash -c "python manage.py dedupe.py --parser "Essex Hog Scan (Rusty Hog Scan)" --hash_code_only` diff --git a/dojo/models.py b/dojo/models.py index ddb03290194..e75f35ec777 100644 --- a/dojo/models.py +++ b/dojo/models.py @@ -123,6 +123,14 @@ def _manage_inherited_tags(obj, incoming_inherited_tags, potentially_existing_ta obj.tags.set(cleaned_tag_list) +def _copy_model_util(model_in_database, exclude_fields: list[str] = []): + new_model_instance = model_in_database.__class__() + for field in model_in_database._meta.fields: + if field.name not in ["id", *exclude_fields]: + setattr(new_model_instance, field.name, getattr(model_in_database, field.name)) + return new_model_instance + + @deconstructible class UniqueUploadNameProvider: @@ -703,9 +711,7 @@ class NoteHistory(models.Model): current_editor = models.ForeignKey(Dojo_User, editable=False, null=True, on_delete=models.CASCADE) def copy(self): - copy = self - copy.pk = None - copy.id = None + copy = _copy_model_util(self) copy.save() return copy @@ -731,12 +737,9 @@ def __str__(self): return self.entry def copy(self): - copy = self + copy = _copy_model_util(self) # Save the necessary ManyToMany relationships old_history = list(self.history.all()) - # Wipe the IDs of the new object - copy.pk = None - copy.id = None # Save the object before setting any ManyToMany relationships copy.save() # Copy the history @@ -751,10 +754,7 @@ class FileUpload(models.Model): file = models.FileField(upload_to=UniqueUploadNameProvider("uploaded_files")) def copy(self): - copy = self - # Wipe the IDs of the new object - copy.pk = None - copy.id = None + copy = _copy_model_util(self) # Add unique modifier to file name copy.title = f"{self.title} - clone-{str(uuid4())[:8]}" # Create new unique file name @@ -1538,16 +1538,13 @@ def get_absolute_url(self): return reverse("view_engagement", args=[str(self.id)]) def copy(self): - copy = self + copy = _copy_model_util(self) # Save the necessary ManyToMany relationships old_notes = list(self.notes.all()) old_files = list(self.files.all()) old_tags = list(self.tags.all()) old_risk_acceptances = list(self.risk_acceptance.all()) old_tests = list(Test.objects.filter(engagement=self)) - # Wipe the IDs of the new object - copy.pk = None - copy.id = None # Save the object before setting any ManyToMany relationships copy.save() # Copy the notes @@ -1658,10 +1655,8 @@ def __str__(self): return f"'{self.finding}' on '{self.endpoint}'" def copy(self, finding=None): - copy = self + copy = _copy_model_util(self) current_endpoint = self.endpoint - copy.pk = None - copy.id = None if finding: copy.finding = finding copy.endpoint = current_endpoint @@ -2127,15 +2122,12 @@ def get_breadcrumbs(self): return bc def copy(self, engagement=None): - copy = self + copy = _copy_model_util(self) # Save the necessary ManyToMany relationships old_notes = list(self.notes.all()) old_files = list(self.files.all()) old_tags = list(self.tags.all()) old_findings = list(Finding.objects.filter(test=self)) - # Wipe the IDs of the new object - copy.pk = None - copy.id = None if engagement: copy.engagement = engagement # Save the object before setting any ManyToMany relationships @@ -2748,7 +2740,7 @@ def get_absolute_url(self): return reverse("view_finding", args=[str(self.id)]) def copy(self, test=None): - copy = self + copy = _copy_model_util(self) # Save the necessary ManyToMany relationships old_notes = list(self.notes.all()) old_files = list(self.files.all()) @@ -2757,8 +2749,6 @@ def copy(self, test=None): old_found_by = list(self.found_by.all()) old_tags = list(self.tags.all()) # Wipe the IDs of the new object - copy.pk = None - copy.id = None if test: copy.test = test # Save the object before setting any ManyToMany relationships @@ -3744,13 +3734,10 @@ def engagement(self): return None def copy(self, engagement=None): - copy = self + copy = _copy_model_util(self) # Save the necessary ManyToMany relationships old_notes = list(self.notes.all()) old_accepted_findings_hash_codes = [finding.hash_code for finding in self.accepted_findings.all()] - # Wipe the IDs of the new object - copy.pk = None - copy.id = None # Save the object before setting any ManyToMany relationships copy.save() # Copy the notes diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py index f959cbf605a..7d5cd549803 100644 --- a/dojo/settings/settings.dist.py +++ b/dojo/settings/settings.dist.py @@ -1798,6 +1798,9 @@ def saml2_attrib_map_format(dict): # ------------------------------------------------------------------------------ AUDITLOG_FLUSH_RETENTION_PERIOD = env("DD_AUDITLOG_FLUSH_RETENTION_PERIOD") ENABLE_AUDITLOG = env("DD_ENABLE_AUDITLOG") +AUDITLOG_TWO_STEP_MIGRATION = False +AUDITLOG_USE_TEXT_CHANGES_IF_JSON_IS_NOT_PRESENT = False + USE_FIRST_SEEN = env("DD_USE_FIRST_SEEN") USE_QUALYS_LEGACY_SEVERITY_PARSING = env("DD_QUALYS_LEGACY_SEVERITY_PARSING") diff --git a/dojo/templatetags/display_tags.py b/dojo/templatetags/display_tags.py index 94bcf80d6e5..048cefe9352 100644 --- a/dojo/templatetags/display_tags.py +++ b/dojo/templatetags/display_tags.py @@ -3,6 +3,7 @@ import datetime import logging import mimetypes +from ast import literal_eval from itertools import chain import bleach @@ -323,8 +324,7 @@ def display_index(data, index): @register.filter(is_safe=True, needs_autoescape=False) @stringfilter def action_log_entry(value, autoescape=None): - import json - history = json.loads(value) + history = literal_eval(value) text = "" for k in history: if isinstance(history[k], dict): diff --git a/requirements.txt b/requirements.txt index e8ffd6bc2a0..c4691ca4637 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,7 +5,7 @@ bleach[css] celery==5.4.0 defusedxml==0.7.1 django_celery_results==2.5.1 -django-auditlog==2.3.0 +django-auditlog==3.0.0 django-dbbackup==4.2.1 django-environ==0.12.0 django-filter==24.3 diff --git a/unittests/test_copy_model.py b/unittests/test_copy_model.py index 2f20c6b6db3..5466528390a 100644 --- a/unittests/test_copy_model.py +++ b/unittests/test_copy_model.py @@ -78,7 +78,7 @@ def test_duplicate_finding_with_notes(self): # Make sure the copy was made without error self.assertEqual(current_finding_count + 1, Finding.objects.filter(test=test).count()) # Do the notes match - self.assertEqual(finding.notes, finding_copy.notes) + self.assertQuerySetEqual(finding.notes.all(), finding_copy.notes.all()) def test_duplicate_finding_with_tags_and_notes(self): # Set the scene @@ -98,9 +98,9 @@ def test_duplicate_finding_with_tags_and_notes(self): # Make sure the copy was made without error self.assertEqual(current_finding_count + 1, Finding.objects.filter(test=test).count()) # Do the tags match - self.assertEqual(finding.notes, finding_copy.notes) + self.assertEqual(finding.tags, finding_copy.tags) # Do the notes match - self.assertEqual(finding.notes, finding_copy.notes) + self.assertQuerySetEqual(finding.notes.all(), finding_copy.notes.all()) def test_duplicate_finding_with_endpoints(self): # Set the scene @@ -173,7 +173,9 @@ def test_duplicate_tests_different_engagements(self): # Do the enagements have the same number of findings self.assertEqual(Finding.objects.filter(test__engagement=engagement1).count(), Finding.objects.filter(test__engagement=engagement2).count()) # Are the tests equal - self.assertEqual(test, test_copy) + self.assertEqual(test.title, test_copy.title) + self.assertEqual(test.scan_type, test_copy.scan_type) + self.assertEqual(test.test_type, test_copy.test_type) # Does the product thave more findings self.assertEqual(product_finding_count + 1, Finding.objects.filter(test__engagement__product=product).count()) @@ -213,7 +215,7 @@ def test_duplicate_test_with_notes(self): # Make sure the copy was made without error self.assertEqual(current_test_count + 1, Test.objects.filter(engagement=engagement).count()) # Do the notes match - self.assertEqual(test.notes, test_copy.notes) + self.assertQuerySetEqual(test.notes.all(), test_copy.notes.all()) def test_duplicate_test_with_tags_and_notes(self): # Set the scene @@ -233,7 +235,7 @@ def test_duplicate_test_with_tags_and_notes(self): # Make sure the copy was made without error self.assertEqual(current_test_count + 1, Test.objects.filter(engagement=engagement).count()) # Do the notes match - self.assertEqual(test.notes, test_copy.notes) + self.assertQuerySetEqual(test.notes.all(), test_copy.notes.all()) # Do the tags match self.assertEqual(test.tags, test_copy.tags) @@ -297,7 +299,7 @@ def test_duplicate_engagement_with_notes(self): # Make sure the copy was made without error self.assertEqual(current_engagement_count + 1, Engagement.objects.filter(product=product).count()) # Do the notes match - self.assertEqual(engagement.notes, engagement_copy.notes) + self.assertQuerySetEqual(engagement.notes.all(), engagement_copy.notes.all()) def test_duplicate_engagement_with_tags_and_notes(self): # Set the scene @@ -317,6 +319,6 @@ def test_duplicate_engagement_with_tags_and_notes(self): # Make sure the copy was made without error self.assertEqual(current_engagement_count + 1, Engagement.objects.filter(product=product).count()) # Do the notes match - self.assertEqual(engagement.notes, engagement_copy.notes) + self.assertQuerySetEqual(engagement.notes.all(), engagement_copy.notes.all()) # Do the tags match self.assertEqual(engagement.tags, engagement_copy.tags) diff --git a/unittests/test_deduplication_logic.py b/unittests/test_deduplication_logic.py index 319c0761312..b3e9372a54a 100644 --- a/unittests/test_deduplication_logic.py +++ b/unittests/test_deduplication_logic.py @@ -4,7 +4,17 @@ from crum import impersonate from django.conf import settings -from dojo.models import Endpoint, Endpoint_Status, Engagement, Finding, Product, System_Settings, Test, User +from dojo.models import ( + Endpoint, + Endpoint_Status, + Engagement, + Finding, + Product, + System_Settings, + Test, + User, + _copy_model_util, +) from .dojo_test_case import DojoTestCase @@ -1189,8 +1199,7 @@ def log_summary(self, product=None, engagement=None, test=None): def copy_and_reset_finding(self, id): org = Finding.objects.get(id=id) - new = org - new.pk = None + new = _copy_model_util(org) new.duplicate = False new.duplicate_finding = None new.active = True @@ -1227,15 +1236,13 @@ def copy_and_reset_finding_add_endpoints(self, id, static=False, dynamic=True): def copy_and_reset_test(self, id): org = Test.objects.get(id=id) - new = org - new.pk = None + new = _copy_model_util(org) # return unsaved new finding and reloaded existing finding return new, Test.objects.get(id=id) def copy_and_reset_engagement(self, id): org = Engagement.objects.get(id=id) - new = org - new.pk = None + new = _copy_model_util(org) # return unsaved new finding and reloaded existing finding return new, Engagement.objects.get(id=id) diff --git a/unittests/test_duplication_loops.py b/unittests/test_duplication_loops.py index 6d97524ff4c..cc0d250774e 100644 --- a/unittests/test_duplication_loops.py +++ b/unittests/test_duplication_loops.py @@ -4,7 +4,7 @@ from django.test.utils import override_settings from dojo.management.commands.fix_loop_duplicates import fix_loop_duplicates -from dojo.models import Engagement, Finding, Product, User +from dojo.models import Engagement, Finding, Product, User, _copy_model_util from dojo.utils import set_duplicate from .dojo_test_case import DojoTestCase @@ -27,25 +27,22 @@ def run(self, result=None): super().run(result) def setUp(self): - self.finding_a = Finding.objects.get(id=2) - self.finding_a.pk = None + self.finding_a = _copy_model_util(Finding.objects.get(id=2), exclude_fields=["duplicate_finding"]) self.finding_a.title = "A: " + self.finding_a.title self.finding_a.duplicate = False - self.finding_a.duplicate_finding = None self.finding_a.hash_code = None self.finding_a.save() - self.finding_b = Finding.objects.get(id=3) - self.finding_b.pk = None + + self.finding_b = _copy_model_util(Finding.objects.get(id=3), exclude_fields=["duplicate_finding"]) self.finding_b.title = "B: " + self.finding_b.title self.finding_b.duplicate = False - self.finding_b.duplicate_finding = None self.finding_b.hash_code = None self.finding_b.save() - self.finding_c = Finding.objects.get(id=4) + + self.finding_c = _copy_model_util(Finding.objects.get(id=4), exclude_fields=["duplicate_finding"]) self.finding_c.pk = None self.finding_c.title = "C: " + self.finding_c.title self.finding_c.duplicate = False - self.finding_c.duplicate_finding = None self.finding_c.hash_code = None self.finding_c.save() @@ -265,10 +262,8 @@ def test_loop_relations_for_three(self): # Another loop-test for 4 findings def test_loop_relations_for_four(self): - self.finding_d = Finding.objects.get(id=4) - self.finding_d.pk = None + self.finding_d = _copy_model_util(Finding.objects.get(id=4), exclude_fields=["duplicate_finding"]) self.finding_d.duplicate = False - self.finding_d.duplicate_finding = None self.finding_d.save() # A -> B, B -> C, C -> D, D -> A diff --git a/unittests/test_false_positive_history_logic.py b/unittests/test_false_positive_history_logic.py index 04fca655b58..6af91445fa6 100644 --- a/unittests/test_false_positive_history_logic.py +++ b/unittests/test_false_positive_history_logic.py @@ -3,7 +3,17 @@ from crum import impersonate -from dojo.models import Endpoint, Endpoint_Status, Engagement, Finding, Product, System_Settings, Test, User +from dojo.models import ( + Endpoint, + Endpoint_Status, + Engagement, + Finding, + Product, + System_Settings, + Test, + User, + _copy_model_util, +) from .dojo_test_case import DojoTestCase @@ -1709,8 +1719,7 @@ def log_summary(self, product=None, engagement=None, test=None): def copy_and_reset_finding(self, id): org = Finding.objects.get(id=id) - new = org - new.pk = None + new = _copy_model_util(org) new.duplicate = False new.duplicate_finding = None new.false_p = False @@ -1721,22 +1730,19 @@ def copy_and_reset_finding(self, id): def copy_and_reset_test(self, id): org = Test.objects.get(id=id) - new = org - new.pk = None + new = _copy_model_util(org) # return unsaved new test and reloaded existing test return new, Test.objects.get(id=id) def copy_and_reset_engagement(self, id): org = Engagement.objects.get(id=id) - new = org - new.pk = None + new = _copy_model_util(org) # return unsaved new engagement and reloaded existing engagement return new, Engagement.objects.get(id=id) def copy_and_reset_product(self, id): org = Product.objects.get(id=id) - new = org - new.pk = None + new = _copy_model_util(org) new.name = f"{org.name} (Copy {datetime.now()})" # return unsaved new product and reloaded existing product return new, Product.objects.get(id=id) diff --git a/unittests/test_utils_deduplication_reopen.py b/unittests/test_utils_deduplication_reopen.py index a5f8fcf54d5..91ba2c49d12 100644 --- a/unittests/test_utils_deduplication_reopen.py +++ b/unittests/test_utils_deduplication_reopen.py @@ -2,7 +2,7 @@ import logging from dojo.management.commands.fix_loop_duplicates import fix_loop_duplicates -from dojo.models import Finding +from dojo.models import Finding, _copy_model_util from dojo.utils import set_duplicate from .dojo_test_case import DojoTestCase @@ -14,34 +14,28 @@ class TestDuplicationReopen(DojoTestCase): fixtures = ["dojo_testdata.json"] def setUp(self): - self.finding_a = Finding.objects.get(id=2) - self.finding_a.pk = None + self.finding_a = _copy_model_util(Finding.objects.get(id=2), exclude_fields=["duplicate_finding"]) self.finding_a.duplicate = False self.finding_a.mitigated = datetime.datetime(1970, 1, 1, tzinfo=datetime.UTC) self.finding_a.is_mitigated = True self.finding_a.false_p = True self.finding_a.active = False - self.finding_a.duplicate_finding = None self.finding_a.save() - self.finding_b = Finding.objects.get(id=3) - self.finding_b.pk = None + + self.finding_b = _copy_model_util(Finding.objects.get(id=3), exclude_fields=["duplicate_finding"]) self.finding_a.active = True self.finding_b.duplicate = False - self.finding_b.duplicate_finding = None self.finding_b.save() - self.finding_c = Finding.objects.get(id=4) + self.finding_c = _copy_model_util(Finding.objects.get(id=4), exclude_fields=["duplicate_finding"]) self.finding_c.duplicate = False self.finding_c.out_of_scope = True self.finding_c.active = False - self.finding_c.duplicate_finding = None - self.finding_c.pk = None logger.debug("creating finding_c") self.finding_c.save() - self.finding_d = Finding.objects.get(id=5) + + self.finding_d = _copy_model_util(Finding.objects.get(id=5), exclude_fields=["duplicate_finding"]) self.finding_d.duplicate = False - self.finding_d.duplicate_finding = None - self.finding_d.pk = None logger.debug("creating finding_d") self.finding_d.save() From 67ec0e09a14ac2f9ea40aeae7271c1245c69784f Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Fri, 31 Jan 2025 00:00:20 -0600 Subject: [PATCH 95/99] Jira Template: Do not HTML encode before shipping to jira (#11640) --- .../issue-trackers/jira_full/jira-description.tpl | 10 +++++----- .../jira_full/jira-finding-group-description.tpl | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/dojo/templates/issue-trackers/jira_full/jira-description.tpl b/dojo/templates/issue-trackers/jira_full/jira-description.tpl index 6fd326efb5f..3c3fd22faac 100644 --- a/dojo/templates/issue-trackers/jira_full/jira-description.tpl +++ b/dojo/templates/issue-trackers/jira_full/jira-description.tpl @@ -72,26 +72,26 @@ {% endif %} *Description*: -{{ finding.description }} +{{ finding.description|safe }} {% if finding.mitigation %} *Mitigation*: -{{ finding.mitigation }} +{{ finding.mitigation|safe }} {% endif %} -{% if finding.impact %} +{% if finding.impact|safe %} *Impact*: {{ finding.impact }} {% endif %} {% if finding.steps_to_reproduce %} *Steps to reproduce*: -{{ finding.steps_to_reproduce }} +{{ finding.steps_to_reproduce|safe }} {% endif %} {% if finding.references %} *References*: -{{ finding.references }} +{{ finding.references|safe }} {% endif %} *Reporter:* [{{ finding.reporter|full_name}} ({{ finding.reporter.email }})|mailto:{{ finding.reporter.email }}] diff --git a/dojo/templates/issue-trackers/jira_full/jira-finding-group-description.tpl b/dojo/templates/issue-trackers/jira_full/jira-finding-group-description.tpl index 258aa78ead9..92a8997af27 100644 --- a/dojo/templates/issue-trackers/jira_full/jira-finding-group-description.tpl +++ b/dojo/templates/issue-trackers/jira_full/jira-finding-group-description.tpl @@ -66,26 +66,26 @@ h3. [{{ finding.title|jiraencode}}|{{ finding_url|full_url }}] {% endif %} *Description*: -{{ finding.description }} +{{ finding.description|safe }} {% if finding.mitigation %} *Mitigation*: -{{ finding.mitigation }} +{{ finding.mitigation|safe }} {% endif %} {% if finding.impact %} *Impact*: -{{ finding.impact }} +{{ finding.impact|safe }} {% endif %} {% if finding.steps_to_reproduce %} *Steps to reproduce*: -{{ finding.steps_to_reproduce }} +{{ finding.steps_to_reproduce|safe }} {% endif %} {% if finding.references %} *References*: -{{ finding.references }} +{{ finding.references|safe }} {% endif %} *Reporter:* [{{ finding.reporter|full_name}} ({{ finding.reporter.email }})|mailto:{{ finding.reporter.email }}] From 8b2de9eaa0347e98dc7cfbcb794e63f164b893e2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 31 Jan 2025 13:18:48 -0600 Subject: [PATCH 96/99] Bump boto3 from 1.36.9 to 1.36.10 (#11700) Bumps [boto3](https://github.com/boto/boto3) from 1.36.9 to 1.36.10. - [Release notes](https://github.com/boto/boto3/releases) - [Commits](https://github.com/boto/boto3/compare/1.36.9...1.36.10) --- updated-dependencies: - dependency-name: boto3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index c4691ca4637..7ffe39caee7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -69,7 +69,7 @@ django-ratelimit==4.1.0 argon2-cffi==23.1.0 blackduck==1.1.3 pycurl==7.45.4 # Required for Celery Broker AWS (SQS) support -boto3==1.36.9 # Required for Celery Broker AWS (SQS) support +boto3==1.36.10 # Required for Celery Broker AWS (SQS) support netaddr==1.3.0 vulners==2.2.3 fontawesomefree==6.6.0 From 93a0678f06fbb38d85da00d44b919e1ff7cc93d1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 31 Jan 2025 13:21:13 -0600 Subject: [PATCH 97/99] Bump pytz from 2024.2 to 2025.1 (#11698) Bumps [pytz](https://github.com/stub42/pytz) from 2024.2 to 2025.1. - [Release notes](https://github.com/stub42/pytz/releases) - [Commits](https://github.com/stub42/pytz/compare/release_2024.2...release_2025.1) --- updated-dependencies: - dependency-name: pytz dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 7ffe39caee7..48b89b429e8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -34,7 +34,7 @@ Pillow==11.1.0 # required by django-imagekit psycopg[c]==3.2.4 cryptography==44.0.0 python-dateutil==2.9.0.post0 -pytz==2024.2 +pytz==2025.1 redis==5.2.1 requests==2.32.3 sqlalchemy==2.0.37 # Required by Celery broker transport From e0de9534315742f1a71c73e9da9a342fdbdea919 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 31 Jan 2025 13:59:01 -0600 Subject: [PATCH 98/99] Bump ruff from 0.9.2 to 0.9.4 (#11699) Bumps [ruff](https://github.com/astral-sh/ruff) from 0.9.2 to 0.9.4. - [Release notes](https://github.com/astral-sh/ruff/releases) - [Changelog](https://github.com/astral-sh/ruff/blob/main/CHANGELOG.md) - [Commits](https://github.com/astral-sh/ruff/compare/0.9.2...0.9.4) --- updated-dependencies: - dependency-name: ruff dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements-lint.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-lint.txt b/requirements-lint.txt index e3b98ed7307..24e7f8cc518 100644 --- a/requirements-lint.txt +++ b/requirements-lint.txt @@ -1 +1 @@ -ruff==0.9.2 \ No newline at end of file +ruff==0.9.4 \ No newline at end of file From 0b450db18a05e0ffdc696500772b4c0a4033cc75 Mon Sep 17 00:00:00 2001 From: DefectDojo release bot Date: Mon, 3 Feb 2025 15:21:09 +0000 Subject: [PATCH 99/99] Update versions in application files --- components/package.json | 2 +- dojo/__init__.py | 2 +- helm/defectdojo/Chart.yaml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/components/package.json b/components/package.json index 15ddb09f1bb..ce9f1de104e 100644 --- a/components/package.json +++ b/components/package.json @@ -1,6 +1,6 @@ { "name": "defectdojo", - "version": "2.43.0-dev", + "version": "2.43.0", "license" : "BSD-3-Clause", "private": true, "dependencies": { diff --git a/dojo/__init__.py b/dojo/__init__.py index 3a2e4a630a2..2346195ed21 100644 --- a/dojo/__init__.py +++ b/dojo/__init__.py @@ -4,6 +4,6 @@ # Django starts so that shared_task will use this app. from .celery import app as celery_app # noqa: F401 -__version__ = "2.43.0-dev" +__version__ = "2.43.0" __url__ = "https://github.com/DefectDojo/django-DefectDojo" __docs__ = "https://documentation.defectdojo.com" diff --git a/helm/defectdojo/Chart.yaml b/helm/defectdojo/Chart.yaml index 3712c36f9af..7f2793f05ea 100644 --- a/helm/defectdojo/Chart.yaml +++ b/helm/defectdojo/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 -appVersion: "2.43.0-dev" +appVersion: "2.43.0" description: A Helm chart for Kubernetes to install DefectDojo name: defectdojo -version: 1.6.171-dev +version: 1.6.171 icon: https://www.defectdojo.org/img/favicon.ico maintainers: - name: madchap