diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index ae0f0db498f..6a68f9ca170 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -26,7 +26,7 @@ This checklist is for your information. - [ ] Bugfixes should be submitted against the `bugfix` branch. - [ ] Give a meaningful name to your PR, as it may end up being used in the release notes. - [ ] Your code is flake8 compliant. -- [ ] Your code is python 3.12 compliant. +- [ ] Your code is python 3.13 compliant. - [ ] If this is a new feature and not a bug fix, you've included the proper documentation in the docs at https://github.com/DefectDojo/django-DefectDojo/tree/dev/docs as part of this PR. - [ ] Model changes must include the necessary migrations in the dojo/db_migrations folder. - [ ] Add applicable tests to the unit tests. diff --git a/.github/workflows/close-stale.yml b/.github/workflows/close-stale.yml index 0b371f1cb60..857f619c78b 100644 --- a/.github/workflows/close-stale.yml +++ b/.github/workflows/close-stale.yml @@ -16,7 +16,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Close issues and PRs that are pending closure - uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0 + uses: actions/stale@5f858e3efba33a5ca4407a664cc011ad407f2008 # v10.1.0 with: # Disable automatic stale marking - only close manually labeled items days-before-stale: -1 diff --git a/.github/workflows/gh-pages.yml b/.github/workflows/gh-pages.yml index dbe202e1c0c..5d2ef4314d1 100644 --- a/.github/workflows/gh-pages.yml +++ b/.github/workflows/gh-pages.yml @@ -19,7 +19,7 @@ jobs: extended: true - name: Setup Node - uses: actions/setup-node@a0853c24544627f65ddf259abe73b1d18a591444 # v5.0.0 + uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0 with: node-version: '22.20.0' # TODO: Renovate helper might not be needed here - needs to be fully tested diff --git a/.github/workflows/k8s-tests.yml b/.github/workflows/k8s-tests.yml index 475a0e1715a..c6252e5a533 100644 --- a/.github/workflows/k8s-tests.yml +++ b/.github/workflows/k8s-tests.yml @@ -5,15 +5,6 @@ on: env: DD_HOSTNAME: defectdojo.default.minikube.local - HELM_REDIS_BROKER_SETTINGS: " \ - --set redis.enabled=true \ - --set celery.broker=redis \ - --set createRedisSecret=true \ - " - HELM_PG_DATABASE_SETTINGS: " \ - --set postgresql.enabled=true \ - --set createPostgresqlSecret=true \ - " jobs: setting_minikube_cluster: name: Kubernetes Deployment @@ -23,11 +14,11 @@ jobs: matrix: include: # databases, broker and k8s are independent, so we don't need to test each combination - # lastest k8s version (https://kubernetes.io/releases/) and oldest supported version from aws - # are tested (https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html#available-versions) - - databases: pgsql - brokers: redis - k8s: 'v1.34.0' # renovate: datasource=github-releases depName=kubernetes/kubernetes versioning=loose + # lastest k8s version (https://kubernetes.io/releases/) and the oldest officially supported version + # are tested (https://kubernetes.io/releases/) + - k8s: 'v1.34.1' # renovate: datasource=github-releases depName=kubernetes/kubernetes versioning=loose + os: debian + - k8s: 'v1.31.13' # Do not track with renovate as we likely want to rev this manually os: debian steps: - name: Checkout @@ -68,12 +59,6 @@ jobs: helm dependency list ./helm/defectdojo helm dependency update ./helm/defectdojo - - name: Set confings into Outputs - id: set - run: |- - echo "pgsql=${{ env.HELM_PG_DATABASE_SETTINGS }}" >> $GITHUB_ENV - echo "redis=${{ env.HELM_REDIS_BROKER_SETTINGS }}" >> $GITHUB_ENV - - name: Deploying Django application with ${{ matrix.databases }} ${{ matrix.brokers }} timeout-minutes: 15 run: |- @@ -84,10 +69,14 @@ jobs: defectdojo \ ./helm/defectdojo \ --set django.ingress.enabled=true \ + --set images.django.image.tag=latest \ + --set images.nginx.image.tag=latest \ --set imagePullPolicy=Never \ --set initializer.keepSeconds="-1" \ - ${{ env[matrix.databases] }} \ - ${{ env[matrix.brokers] }} \ + --set redis.enabled=true \ + --set createRedisSecret=true \ + --set postgresql.enabled=true \ + --set createPostgresqlSecret=true \ --set createSecret=true - name: Check deployment status diff --git a/.github/workflows/release-1-create-pr.yml b/.github/workflows/release-1-create-pr.yml index 4e4b710400f..7d3f9bb64a0 100644 --- a/.github/workflows/release-1-create-pr.yml +++ b/.github/workflows/release-1-create-pr.yml @@ -98,7 +98,7 @@ jobs: chart-search-root: "helm/defectdojo" - name: Push version changes - uses: stefanzweifel/git-auto-commit-action@778341af668090896ca464160c2def5d1d1a3eb0 # v6.0.1 + uses: stefanzweifel/git-auto-commit-action@28e16e81777b558cc906c8750092100bbb34c5e3 # v7.0.0 with: commit_user_name: "${{ env.GIT_USERNAME }}" commit_user_email: "${{ env.GIT_EMAIL }}" diff --git a/.github/workflows/release-3-master-into-dev.yml b/.github/workflows/release-3-master-into-dev.yml index d13ce0a9323..15674b5af40 100644 --- a/.github/workflows/release-3-master-into-dev.yml +++ b/.github/workflows/release-3-master-into-dev.yml @@ -86,7 +86,7 @@ jobs: chart-search-root: "helm/defectdojo" - name: Push version changes - uses: stefanzweifel/git-auto-commit-action@778341af668090896ca464160c2def5d1d1a3eb0 # v6.0.1 + uses: stefanzweifel/git-auto-commit-action@28e16e81777b558cc906c8750092100bbb34c5e3 # v7.0.0 with: commit_user_name: "${{ env.GIT_USERNAME }}" commit_user_email: "${{ env.GIT_EMAIL }}" @@ -162,7 +162,7 @@ jobs: chart-search-root: "helm/defectdojo" - name: Push version changes - uses: stefanzweifel/git-auto-commit-action@778341af668090896ca464160c2def5d1d1a3eb0 # v6.0.1 + uses: stefanzweifel/git-auto-commit-action@28e16e81777b558cc906c8750092100bbb34c5e3 # v7.0.0 with: commit_user_name: "${{ env.GIT_USERNAME }}" commit_user_email: "${{ env.GIT_EMAIL }}" diff --git a/.github/workflows/release-x-manual-helm-chart.yml b/.github/workflows/release-x-manual-helm-chart.yml index b6b88edc1c2..a1105697c7d 100644 --- a/.github/workflows/release-x-manual-helm-chart.yml +++ b/.github/workflows/release-x-manual-helm-chart.yml @@ -69,16 +69,6 @@ jobs: helm dependency list ./helm/defectdojo helm dependency update ./helm/defectdojo - - name: Add yq - uses: mikefarah/yq@6251e95af8df3505def48c71f3119836701495d6 # v4.47.2 - - - name: Pin version docker version - id: pin_image - run: |- - yq --version - yq -i '.tag="${{ inputs.release_number }}"' helm/defectdojo/values.yaml - echo "Current image tag:`yq -r '.tag' helm/defectdojo/values.yaml`" - - name: Package Helm chart id: package-helm-chart run: | @@ -87,7 +77,7 @@ jobs: echo "chart_version=$(ls build | cut -d '-' -f 2,3 | sed 's|\.tgz||')" >> $GITHUB_ENV - name: Create release ${{ inputs.release_number }} - uses: softprops/action-gh-release@62c96d0c4e8a889135c1f3a25910db8dbe0e85f7 # v2.3.4 + uses: softprops/action-gh-release@6da8fa9354ddfdc4aeace5fc48d7f679b5214090 # v2.4.1 with: name: '${{ inputs.release_number }} 🌈' tag_name: ${{ inputs.release_number }} diff --git a/.github/workflows/test-helm-chart.yml b/.github/workflows/test-helm-chart.yml index f7e9199ab67..57d58f019c1 100644 --- a/.github/workflows/test-helm-chart.yml +++ b/.github/workflows/test-helm-chart.yml @@ -24,7 +24,7 @@ jobs: - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: - python-version: 3.13 # Renovate helper is not needed here + python-version: 3.14 # Renovate helper is not needed here - name: Configure Helm repos run: |- @@ -34,8 +34,8 @@ jobs: - name: Set up chart-testing uses: helm/chart-testing-action@0d28d3144d3a25ea2cc349d6e59901c4ff469b3b # v2.7.0 with: - yamale_version: 4.0.4 # renovate: datasource=pypi depName=yamale versioning=semver - yamllint_version: 1.35.1 # renovate: datasource=pypi depName=yamllint versioning=semver + yamale_version: 6.0.0 # renovate: datasource=pypi depName=yamale versioning=semver + yamllint_version: 1.37.1 # renovate: datasource=pypi depName=yamllint versioning=semver - name: Determine target branch id: ct-branch-target @@ -68,15 +68,23 @@ jobs: - name: Check update of "artifacthub.io/changes" HELM annotation if: env.changed == 'true' run: | + # fast fail if `git show` fails + set -e + set -o pipefail + target_branch=${{ env.ct-branch }} echo "Checking Chart.yaml annotation changes" # Get current branch annotation current_annotation=$(yq e '.annotations."artifacthub.io/changes"' "helm/defectdojo/Chart.yaml") + echo "Current annotation: " + echo $current_annotation # Get target branch version of Chart.yaml annotation - target_annotation=$(git show "${{ env.ct-branch }}:helm/defectdojo/Chart.yaml" | yq e '.annotations."artifacthub.io/changes"' -) + target_annotation=$(git show "origin/${{ env.ct-branch }}:helm/defectdojo/Chart.yaml" | yq e '.annotations."artifacthub.io/changes"' -) + echo "Target annotation: " + echo $target_annotation if [[ "$current_annotation" == "$target_annotation" ]]; then echo "::error file=helm/defectdojo/Chart.yaml::The 'artifacthub.io/changes' annotation has not been updated compared to ${{ env.ct-branch }}. For more, check the hint in 'helm/defectdojo/Chart.yaml'" @@ -121,7 +129,7 @@ jobs: # If this step fails, install https://github.com/losisin/helm-values-schema-json and run locally `helm schema --use-helm-docs` in `helm/defectdojo` before committing your changes. # The helm schema will be generated for you. - name: Generate values schema json - uses: losisin/helm-values-schema-json-action@d5847286fa04322702c4f8d45031974798c83ac7 # v2.3.0 + uses: losisin/helm-values-schema-json-action@660c441a4a507436a294fc55227e1df54aca5407 # v2.3.1 with: fail-on-diff: true working-directory: "helm/defectdojo" diff --git a/.github/workflows/validate_docs_build.yml b/.github/workflows/validate_docs_build.yml index c64f2a8f41c..fcece5635a6 100644 --- a/.github/workflows/validate_docs_build.yml +++ b/.github/workflows/validate_docs_build.yml @@ -16,7 +16,7 @@ jobs: extended: true - name: Setup Node - uses: actions/setup-node@a0853c24544627f65ddf259abe73b1d18a591444 # v5.0.0 + uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0 with: node-version: '22.20.0' # TODO: Renovate helper might not be needed here - needs to be fully tested diff --git a/Dockerfile.django-alpine b/Dockerfile.django-alpine index 010017b0f50..bcca856298a 100644 --- a/Dockerfile.django-alpine +++ b/Dockerfile.django-alpine @@ -5,7 +5,7 @@ # Dockerfile.nginx to use the caching mechanism of Docker. # Ref: https://devguide.python.org/#branchstatus -FROM python:3.12.11-alpine3.22@sha256:02a73ead8397e904cea6d17e18516f1df3590e05dc8823bd5b1c7f849227d272 AS base +FROM python:3.13.7-alpine3.22@sha256:9ba6d8cbebf0fb6546ae71f2a1c14f6ffd2fdab83af7fa5669734ef30ad48844 AS base FROM base AS build WORKDIR /app RUN \ diff --git a/Dockerfile.django-debian b/Dockerfile.django-debian index b8077bb0b77..e816d204e05 100644 --- a/Dockerfile.django-debian +++ b/Dockerfile.django-debian @@ -5,7 +5,7 @@ # Dockerfile.nginx to use the caching mechanism of Docker. # Ref: https://devguide.python.org/#branchstatus -FROM python:3.12.11-slim-trixie@sha256:d67a7b66b989ad6b6d6b10d428dcc5e0bfc3e5f88906e67d490c4d3daac57047 AS base +FROM python:3.13.7-slim-trixie@sha256:5f55cdf0c5d9dc1a415637a5ccc4a9e18663ad203673173b8cda8f8dcacef689 AS base FROM base AS build WORKDIR /app RUN \ diff --git a/Dockerfile.integration-tests-debian b/Dockerfile.integration-tests-debian index 95398cb6e8e..06cf3b7c435 100644 --- a/Dockerfile.integration-tests-debian +++ b/Dockerfile.integration-tests-debian @@ -3,7 +3,7 @@ FROM openapitools/openapi-generator-cli:v7.16.0@sha256:e56372add5e038753fb91aa1bbb470724ef58382fdfc35082bf1b3e079ce353c AS openapitools # currently only supports x64, no arm yet due to chrome and selenium dependencies -FROM python:3.12.11-slim-trixie@sha256:d67a7b66b989ad6b6d6b10d428dcc5e0bfc3e5f88906e67d490c4d3daac57047 AS build +FROM python:3.13.7-slim-trixie@sha256:5f55cdf0c5d9dc1a415637a5ccc4a9e18663ad203673173b8cda8f8dcacef689 AS build WORKDIR /app RUN \ apt-get -y update && \ diff --git a/Dockerfile.nginx-alpine b/Dockerfile.nginx-alpine index fd50cb9e472..7c608d08444 100644 --- a/Dockerfile.nginx-alpine +++ b/Dockerfile.nginx-alpine @@ -5,7 +5,7 @@ # Dockerfile.django-alpine to use the caching mechanism of Docker. # Ref: https://devguide.python.org/#branchstatus -FROM python:3.12.11-alpine3.22@sha256:02a73ead8397e904cea6d17e18516f1df3590e05dc8823bd5b1c7f849227d272 AS base +FROM python:3.13.7-alpine3.22@sha256:9ba6d8cbebf0fb6546ae71f2a1c14f6ffd2fdab83af7fa5669734ef30ad48844 AS base FROM base AS build WORKDIR /app RUN \ @@ -63,7 +63,7 @@ COPY dojo/ ./dojo/ # always collect static for debug toolbar as we can't make it dependant on env variables or build arguments without breaking docker layer caching RUN env DD_SECRET_KEY='.' DD_DJANGO_DEBUG_TOOLBAR_ENABLED=True python3 manage.py collectstatic --noinput --verbosity=2 && true -FROM nginx:1.29.1-alpine3.22@sha256:42a516af16b852e33b7682d5ef8acbd5d13fe08fecadc7ed98605ba5e3b26ab8 +FROM nginx:1.29.2-alpine3.22@sha256:61e01287e546aac28a3f56839c136b31f590273f3b41187a36f46f6a03bbfe22 ARG uid=1001 ARG appuser=defectdojo COPY --from=collectstatic /app/static/ /usr/share/nginx/html/static/ diff --git a/components/package.json b/components/package.json index e5898ca8f4d..9b3c0a01c58 100644 --- a/components/package.json +++ b/components/package.json @@ -14,7 +14,7 @@ "clipboard": "^2.0.11", "datatables.net": "^2.3.4", "datatables.net-buttons-bs": "^3.2.5", - "datatables.net-colreorder": "^2.1.1", + "datatables.net-colreorder": "^2.1.2", "drmonty-datatables-plugins": "^1.0.0", "drmonty-datatables-responsive": "^1.0.0", "easymde": "^2.20.0", diff --git a/components/yarn.lock b/components/yarn.lock index 78aa6e5e86e..9df054d62d4 100644 --- a/components/yarn.lock +++ b/components/yarn.lock @@ -204,10 +204,10 @@ datatables.net-buttons@3.2.5: datatables.net "^2" jquery ">=1.7" -datatables.net-colreorder@^2.1.1: - version "2.1.1" - resolved "https://registry.yarnpkg.com/datatables.net-colreorder/-/datatables.net-colreorder-2.1.1.tgz#ddcbfb27d5e2b97fe8ce4acdb8ca35442a801fe5" - integrity sha512-alhSZYEYmxsXujl43nIHh2+Ym8o/CBm/2kPIExcUz7sOB8FOw2Q614KztqRYh46V5IA+RUuGSxzodjakZ63wAQ== +datatables.net-colreorder@^2.1.2: + version "2.1.2" + resolved "https://registry.yarnpkg.com/datatables.net-colreorder/-/datatables.net-colreorder-2.1.2.tgz#cf45eae93f4afd0bbe2f34d47105b312defa8cc7" + integrity sha512-lIsUyOt2nBm4sD2cSzDKZcIVrGgrZkh90Z2f03s8p7DYcZSfXMHAhFBrDYf9/eAK6wJnODN8EDMsrtPHfgoSXA== dependencies: datatables.net "^2" jquery ">=1.7" diff --git a/docker-compose.yml b/docker-compose.yml index f18651fa52e..c6838267169 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -120,7 +120,7 @@ services: source: ./docker/extra_settings target: /app/docker/extra_settings postgres: - image: postgres:18.0-alpine@sha256:70b32afe0c274b4d93098fd724fcdaab3aba47270a4f1e63cbf9cc69d7bf1be4 + image: postgres:18.0-alpine@sha256:f898ac406e1a9e05115cc2efcb3c3abb3a92a4c0263f3b6f6aaae354cbb1953a environment: POSTGRES_DB: ${DD_DATABASE_NAME:-defectdojo} POSTGRES_USER: ${DD_DATABASE_USER:-defectdojo} @@ -129,7 +129,7 @@ services: - defectdojo_postgres:/var/lib/postgresql/data redis: # Pinning to this version due to licensing constraints - image: redis:7.2.11-alpine@sha256:7632e82373929f39cdbead93f2e45d8b3cd295072c4755e00e7e6b19d56cc512 + image: redis:7.2.11-alpine@sha256:1a34bdba051ecd8a58ec8a3cc460acef697a1605e918149cc53d920673c1a0a7 volumes: - defectdojo_redis:/data volumes: diff --git a/docs/content/en/connecting_your_tools/parsers/file/mobsf.md b/docs/content/en/connecting_your_tools/parsers/file/mobsf.md index 7bbbf564a0c..caac14fbf14 100644 --- a/docs/content/en/connecting_your_tools/parsers/file/mobsf.md +++ b/docs/content/en/connecting_your_tools/parsers/file/mobsf.md @@ -2,7 +2,9 @@ title: "MobSF Scanner" toc_hide: true --- -Export a JSON file using the API, api/v1/report\_json. +"Mobsfscan Scan" has been merged into the "MobSF Scan" parser. The "Mobsfscan Scan" scan_type has been retained to keep deduplication working for existing Tests, but users are encouraged to move to the "MobSF Scan" scan_type. + +Export a JSON file using the API, api/v1/report\_json and import it to Defectdojo or import a JSON report from ### Sample Scan Data Sample MobSF Scanner scans can be found [here](https://github.com/DefectDojo/django-DefectDojo/tree/master/unittests/scans/mobsf). diff --git a/docs/content/en/connecting_your_tools/parsers/file/mobsfscan.md b/docs/content/en/connecting_your_tools/parsers/file/mobsfscan.md deleted file mode 100644 index 2c39d114287..00000000000 --- a/docs/content/en/connecting_your_tools/parsers/file/mobsfscan.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: "Mobsfscan" -toc_hide: true ---- -Import JSON report from - -### Sample Scan Data -Sample Mobsfscan scans can be found [here](https://github.com/DefectDojo/django-DefectDojo/tree/master/unittests/scans/mobsfscan). - -### Default Deduplication Hashcode Fields -By default, DefectDojo identifies duplicate Findings using these [hashcode fields](https://docs.defectdojo.com/en/working_with_findings/finding_deduplication/about_deduplication/): - -- title -- severity -- cwe -- file path -- description diff --git a/docs/content/en/open_source/upgrading/2.51.md b/docs/content/en/open_source/upgrading/2.51.md index e3cf71186cc..3ce5c95a6f5 100644 --- a/docs/content/en/open_source/upgrading/2.51.md +++ b/docs/content/en/open_source/upgrading/2.51.md @@ -68,6 +68,8 @@ Sometimes it's easier to just perform the upgrade manually, which would look som It may need some tuning to your specific needs and docker compose setup. The guide is loosely based on https://simplebackups.com/blog/docker-postgres-backup-restore-guide-with-examples. If you already have a valid backup of the postgres 16 database, you can start at step 4. +_Note: If you are using a bound volume, the path has changed for Postgres18. It is now `/var/lib/postgresql/` instead of `/var/lib/postgresql/data`. Failure to change the path may result in errors about failure to create a shim task. See the discussion in [docker-library/postgres](https://github.com/docker-library/postgres/issues/1370)._ + ### 0. Backup Always back up your data before starting and save it somewhere. diff --git a/docs/content/en/open_source/upgrading/2.52.md b/docs/content/en/open_source/upgrading/2.52.md new file mode 100644 index 00000000000..c9f6b38418f --- /dev/null +++ b/docs/content/en/open_source/upgrading/2.52.md @@ -0,0 +1,46 @@ +--- +title: 'Upgrading to DefectDojo Version 2.52.x' +toc_hide: true +weight: -20251006 +description: MobSF parsers & Helm chart changes. +--- + +## Merge of MobSF parsers + +Mobsfscan Scan" has been merged into the "MobSF Scan" parser. The "Mobsfscan Scan" scan_type has been retained to keep deduplication working for existing Tests, but users are encouraged to move to the "MobSF Scan" scan_type. + +## Helm Chart Changes + +This release introduces more important changes to the Helm chart configuration: + +### Breaking changes + +#### Tags + +`tag` and `repositoryPrefix` fields have been deprecated. Currently, image tags used in containers are derived by default from the `appVersion` defined in the Chart. +This behavior can be overridden by setting the `tag` value in `images.django` and `images.nginx`. +If fine-tuning is necessary, each container’s image value can also be customized individually (`celery.beat.image`, `celery.worker.image`, `django.nginx.image`, `django.uwsgi.image`, `initializer.image`, and `dbMigrationChecker.image`). +Digest pinning is now supported as well. + +#### Security context + +This Helm chart extends security context capabilities to all deployed pods and containers. +You can define a default pod and container security context globally using `securityContext.podSecurityContext` and `securityContext.containerSecurityContext` keys. +Additionally, each deployment can specify its own pod and container security contexts, which will override or merge with the global ones. + +#### Fine-grained resources + +Now each container can specify the resource requests and limits. + +#### Moved values + +The following Helm chart values have been modified in this release: + +- `securityContext.djangoSecurityContext` → deprecated in favor of container-specific security contexts (`celery.beat.containerSecurityContext`, `celery.worker.containerSecurityContext`, `django.uwsgi.containerSecurityContext` and `dbMigrationChecker.containerSecurityContext`) +- `securityContext.nginxSecurityContext` → deprecated in favor of container-specific security contexts (`django.nginx.containerSecurityContext`) + +### Other changes + +- **Extra annotations**: Now we can add common annotations to all resources. + +There are other instructions for upgrading to 2.52.x. Check the [Release Notes](https://github.com/DefectDojo/django-DefectDojo/releases/tag/2.52.0) for the contents of the release. diff --git a/docs/package-lock.json b/docs/package-lock.json index cdd0561b267..1947f324dce 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -10,7 +10,7 @@ "license": "MIT", "dependencies": { "@docsearch/css": "4.1.0", - "@docsearch/js": "4.1.0", + "@docsearch/js": "4.2.0", "@tabler/icons": "3.35.0", "@thulite/doks-core": "1.8.0", "@thulite/images": "3.3.0", @@ -1513,9 +1513,9 @@ "license": "MIT" }, "node_modules/@docsearch/js": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/@docsearch/js/-/js-4.1.0.tgz", - "integrity": "sha512-49+CzeGfOiwG85k+dDvKfOsXLd9PQACoY/FLrZfFOKmpWv166u7bAHmBLdzvxlk8nJ289UgpGf0k6GQZtC85Fg==", + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/@docsearch/js/-/js-4.2.0.tgz", + "integrity": "sha512-KBHVPO29QiGUFJYeAqxW0oXtGf/aghNmRrIRPT4/28JAefqoCkNn/ZM/jeQ7fHjl0KNM6C+KlLVYjwyz6lNZnA==", "license": "MIT" }, "node_modules/@esbuild/aix-ppc64": { diff --git a/docs/package.json b/docs/package.json index 007c3374468..9bbc1be19b0 100644 --- a/docs/package.json +++ b/docs/package.json @@ -13,7 +13,7 @@ }, "dependencies": { "@docsearch/css": "4.1.0", - "@docsearch/js": "4.1.0", + "@docsearch/js": "4.2.0", "@thulite/doks-core": "1.8.0", "@thulite/images": "3.3.0", "@thulite/inline-svg": "1.2.1", diff --git a/dojo/__init__.py b/dojo/__init__.py index 2e2f6c6c559..0a21544849b 100644 --- a/dojo/__init__.py +++ b/dojo/__init__.py @@ -4,6 +4,6 @@ # Django starts so that shared_task will use this app. from .celery import app as celery_app # noqa: F401 -__version__ = "2.51.2" +__version__ = "2.52.0-dev" __url__ = "https://github.com/DefectDojo/django-DefectDojo" __docs__ = "https://documentation.defectdojo.com" diff --git a/dojo/api_v2/prefetch/schema.py b/dojo/api_v2/prefetch/schema.py index 21791f8daab..86078a86317 100644 --- a/dojo/api_v2/prefetch/schema.py +++ b/dojo/api_v2/prefetch/schema.py @@ -1,5 +1,5 @@ from .prefetcher import _Prefetcher -from .utils import _get_prefetchable_fields +from .utils import get_prefetchable_fields def _get_path_to_GET_serializer_map(generator): @@ -53,7 +53,7 @@ def prefetch_postprocessing_hook(result, generator, request, public): if parameter["name"] == "prefetch": prefetcher = _Prefetcher() - fields = _get_prefetchable_fields( + fields = get_prefetchable_fields( serializer_classes[path](), ) diff --git a/dojo/api_v2/prefetch/utils.py b/dojo/api_v2/prefetch/utils.py index eefb1b642ec..2c2546f9e03 100644 --- a/dojo/api_v2/prefetch/utils.py +++ b/dojo/api_v2/prefetch/utils.py @@ -33,7 +33,7 @@ def _is_one_to_one_relation(field): return isinstance(field, related.ForwardManyToOneDescriptor) -def _get_prefetchable_fields(serializer): +def get_prefetchable_fields(serializer): """ Get the fields that are prefetchable according to the serializer description. Method mainly used by for automatic schema generation. diff --git a/dojo/api_v2/views.py b/dojo/api_v2/views.py index 126ac2dee56..bff663173d3 100644 --- a/dojo/api_v2/views.py +++ b/dojo/api_v2/views.py @@ -399,7 +399,8 @@ def get_queryset(self): # @extend_schema_view(**schema_with_prefetch()) # Nested models with prefetch make the response schema too long for Swagger UI class EngagementViewSet( - PrefetchDojoModelViewSet, + # PrefetchDojoModelViewSet, + DojoModelViewSet, ra_api.AcceptedRisksMixin, ): serializer_class = serializers.EngagementSerializer diff --git a/dojo/apps.py b/dojo/apps.py index f47eb5184f2..f1b2769f760 100644 --- a/dojo/apps.py +++ b/dojo/apps.py @@ -72,21 +72,21 @@ def ready(self): # Load any signals here that will be ready for runtime # Importing the signals file is good enough if using the reciever decorator - import dojo.announcement.signals # noqa: PLC0415 raised: AppRegistryNotReady - import dojo.benchmark.signals # noqa: PLC0415 raised: AppRegistryNotReady - import dojo.cred.signals # noqa: PLC0415 raised: AppRegistryNotReady - import dojo.endpoint.signals # noqa: PLC0415 raised: AppRegistryNotReady - import dojo.engagement.signals # noqa: PLC0415 raised: AppRegistryNotReady - import dojo.file_uploads.signals # noqa: PLC0415 raised: AppRegistryNotReady - import dojo.finding_group.signals # noqa: PLC0415 raised: AppRegistryNotReady - import dojo.notes.signals # noqa: PLC0415 raised: AppRegistryNotReady - import dojo.product.signals # noqa: PLC0415 raised: AppRegistryNotReady - import dojo.product_type.signals # noqa: PLC0415 raised: AppRegistryNotReady - import dojo.risk_acceptance.signals # noqa: PLC0415 raised: AppRegistryNotReady - import dojo.sla_config.helpers # noqa: PLC0415 raised: AppRegistryNotReady - import dojo.tags_signals # noqa: PLC0415 raised: AppRegistryNotReady - import dojo.test.signals # noqa: PLC0415 raised: AppRegistryNotReady - import dojo.tool_product.signals # noqa: F401,PLC0415 raised: AppRegistryNotReady + import dojo.announcement.signals # noqa: PLC0415, F401 raised: AppRegistryNotReady + import dojo.benchmark.signals # noqa: PLC0415, F401 raised: AppRegistryNotReady + import dojo.cred.signals # noqa: PLC0415, F401 raised: AppRegistryNotReady + import dojo.endpoint.signals # noqa: PLC0415, F401 raised: AppRegistryNotReady + import dojo.engagement.signals # noqa: PLC0415, F401 raised: AppRegistryNotReady + import dojo.file_uploads.signals # noqa: PLC0415, F401 raised: AppRegistryNotReady + import dojo.finding_group.signals # noqa: PLC0415, F401 raised: AppRegistryNotReady + import dojo.notes.signals # noqa: PLC0415, F401 raised: AppRegistryNotReady + import dojo.product.signals # noqa: PLC0415, F401 raised: AppRegistryNotReady + import dojo.product_type.signals # noqa: PLC0415, F401 raised: AppRegistryNotReady + import dojo.risk_acceptance.signals # noqa: PLC0415, F401 raised: AppRegistryNotReady + import dojo.sla_config.helpers # noqa: PLC0415, F401 raised: AppRegistryNotReady + import dojo.tags_signals # noqa: PLC0415, F401 raised: AppRegistryNotReady + import dojo.test.signals # noqa: PLC0415, F401 raised: AppRegistryNotReady + import dojo.tool_product.signals # noqa: PLC0415, F401 raised: AppRegistryNotReady # Configure audit system after all models are loaded # This must be done in ready() to avoid "Models aren't loaded yet" errors diff --git a/dojo/decorators.py b/dojo/decorators.py index b7b84d59430..bba9efe234c 100644 --- a/dojo/decorators.py +++ b/dojo/decorators.py @@ -222,7 +222,7 @@ def _wrapped(request, *args, **kw): if username: dojo_user = Dojo_User.objects.filter(username=username).first() if dojo_user: - Dojo_User.enable_force_password_reset(dojo_user) + dojo_user.enable_force_password_reset() raise Ratelimited return fn(request, *args, **kw) return _wrapped diff --git a/dojo/endpoint/utils.py b/dojo/endpoint/utils.py index 10646ba265c..2cc835aa974 100644 --- a/dojo/endpoint/utils.py +++ b/dojo/endpoint/utils.py @@ -10,7 +10,7 @@ from django.db.models import Count, Q from django.http import HttpResponseRedirect from django.urls import reverse -from hyperlink._url import SCHEME_PORT_MAP +from hyperlink._url import SCHEME_PORT_MAP # noqa: PLC2701 from dojo.models import DojoMeta, Endpoint diff --git a/dojo/engagement/urls.py b/dojo/engagement/urls.py index c70bb56a95e..0f33c3aa697 100644 --- a/dojo/engagement/urls.py +++ b/dojo/engagement/urls.py @@ -30,6 +30,8 @@ name="close_engagement"), re_path(r"^engagement/(?P\d+)/reopen$", views.reopen_eng, name="reopen_engagement"), + re_path(r"^engagement/(?P\d+)/jira/unlink$", views.unlink_jira, + name="engagement_unlink_jira"), re_path(r"^engagement/(?P\d+)/complete_checklist$", views.complete_checklist, name="complete_checklist"), re_path(r"^engagement/(?P\d+)/risk_acceptance/add$", diff --git a/dojo/engagement/views.py b/dojo/engagement/views.py index 7ae3e758ead..a02ff45f6aa 100644 --- a/dojo/engagement/views.py +++ b/dojo/engagement/views.py @@ -19,13 +19,14 @@ from django.db.models import OuterRef, Q, Value from django.db.models.functions import Coalesce from django.db.models.query import Prefetch, QuerySet -from django.http import HttpRequest, HttpResponse, HttpResponseRedirect, QueryDict, StreamingHttpResponse +from django.http import HttpRequest, HttpResponse, HttpResponseRedirect, JsonResponse, QueryDict, StreamingHttpResponse from django.shortcuts import get_object_or_404, render from django.urls import Resolver404, reverse from django.utils import timezone from django.utils.translation import gettext as _ from django.views import View from django.views.decorators.cache import cache_page +from django.views.decorators.http import require_POST from django.views.decorators.vary import vary_on_cookie from openpyxl import Workbook from openpyxl.styles import Font @@ -974,7 +975,7 @@ def process_form( "apply_tags_to_endpoints": form.cleaned_data.get("apply_tags_to_endpoints", False), "close_old_findings_product_scope": form.cleaned_data.get("close_old_findings_product_scope", None), "group_by": form.cleaned_data.get("group_by", None), - "create_finding_groups_for_all_findings": form.cleaned_data.get("create_finding_groups_for_all_findings"), + "create_finding_groups_for_all_findings": form.cleaned_data.get("create_finding_groups_for_all_findings", None), "environment": self.get_development_environment(environment_name=form.cleaned_data.get("environment")), }) # Create the engagement if necessary @@ -1134,6 +1135,40 @@ def close_eng(request, eid): return HttpResponseRedirect(reverse("view_engagements", args=(eng.product.id, ))) +@user_is_authorized(Engagement, Permissions.Engagement_Edit, "eid") +@require_POST +def unlink_jira(request, eid): + eng = get_object_or_404(Engagement, id=eid) + logger.info("trying to unlink a linked jira epic from engagement %d:%s", eng.id, eng.name) + if eng.has_jira_issue: + try: + jira_helper.unlink_jira(request, eng) + messages.add_message( + request, + messages.SUCCESS, + "Link to JIRA epic successfully deleted", + extra_tags="alert-success", + ) + return JsonResponse({"result": "OK"}) + except Exception: + logger.exception("Link to JIRA epic could not be deleted") + messages.add_message( + request, + messages.ERROR, + "Link to JIRA epic could not be deleted, see alerts for details", + extra_tags="alert-danger", + ) + return HttpResponse(status=500) + else: + messages.add_message( + request, + messages.ERROR, + "Link to JIRA epic not found", + extra_tags="alert-danger", + ) + return HttpResponse(status=400) + + @user_is_authorized(Engagement, Permissions.Engagement_Edit, "eid") def reopen_eng(request, eid): eng = Engagement.objects.get(id=eid) diff --git a/dojo/importers/auto_create_context.py b/dojo/importers/auto_create_context.py index bf4d16cee92..26d37ae65b0 100644 --- a/dojo/importers/auto_create_context.py +++ b/dojo/importers/auto_create_context.py @@ -318,10 +318,16 @@ def get_or_create_engagement( target_end = (timezone.now() + timedelta(days=365)).date() # Create the engagement with transaction.atomic(): - return Engagement.objects.select_for_update().create( + # Lock the parent product row to serialize engagement creation per product + locked_product = Product.objects.select_for_update().get(pk=product.pk) + # Re-check for an existing engagement now that we hold the lock + existing = get_last_object_or_none(Engagement, product=locked_product, name=engagement_name) + if existing: + return existing + return Engagement.objects.create( engagement_type="CI/CD", name=engagement_name, - product=product, + product=locked_product, lead=get_current_user(), target_start=target_start, target_end=target_end, diff --git a/dojo/importers/base_importer.py b/dojo/importers/base_importer.py index f6d754ba929..212c976dc33 100644 --- a/dojo/importers/base_importer.py +++ b/dojo/importers/base_importer.py @@ -49,6 +49,7 @@ class Parser: and is purely for the sake of type hinting """ + @staticmethod def get_findings(scan_type: str, test: Test) -> list[Finding]: """ Stub function to make the hinting happier. The actual class diff --git a/dojo/importers/default_importer.py b/dojo/importers/default_importer.py index d127ed33f6a..726e55717eb 100644 --- a/dojo/importers/default_importer.py +++ b/dojo/importers/default_importer.py @@ -108,7 +108,7 @@ def process_scan( parser = self.get_parser() # Get the findings from the parser based on what methods the parser supplies # This could either mean traditional file parsing, or API pull parsing - parsed_findings = self.parse_findings(scan, parser) + parsed_findings = self.parse_findings(scan, parser) or [] # process the findings in the foreground or background new_findings = self.determine_process_method(parsed_findings, **kwargs) # Close any old findings in the processed list if the the user specified for that diff --git a/dojo/importers/default_reimporter.py b/dojo/importers/default_reimporter.py index 7adb2c65c48..17775eb22ae 100644 --- a/dojo/importers/default_reimporter.py +++ b/dojo/importers/default_reimporter.py @@ -93,7 +93,7 @@ def process_scan( parser = self.get_parser() # Get the findings from the parser based on what methods the parser supplies # This could either mean traditional file parsing, or API pull parsing - parsed_findings = self.parse_findings(scan, parser) + parsed_findings = self.parse_findings(scan, parser) or [] # process the findings in the foreground or background ( new_findings, diff --git a/dojo/importers/options.py b/dojo/importers/options.py index b83a8b8597c..3b7c624235d 100644 --- a/dojo/importers/options.py +++ b/dojo/importers/options.py @@ -96,6 +96,7 @@ def log_translation( for field in self.field_names: logger.debug(f"{field}: {getattr(self, field)}") + @staticmethod def _compress_decorator(function): @wraps(function) def inner_compress_function(*args, **kwargs): @@ -103,6 +104,7 @@ def inner_compress_function(*args, **kwargs): return function(*args, **kwargs) return inner_compress_function + @staticmethod def _decompress_decorator(function): @wraps(function) def inner_decompress_function(*args, **kwargs): diff --git a/dojo/models.py b/dojo/models.py index 8eb0f45f719..d308ff42fb1 100644 --- a/dojo/models.py +++ b/dojo/models.py @@ -129,7 +129,7 @@ def _manage_inherited_tags(obj, incoming_inherited_tags, potentially_existing_ta obj.tags.set(cleaned_tag_list) -def _copy_model_util(model_in_database, exclude_fields: list[str] | None = None): +def copy_model_util(model_in_database, exclude_fields: list[str] | None = None): if exclude_fields is None: exclude_fields = [] new_model_instance = model_in_database.__class__() @@ -231,15 +231,15 @@ def wants_block_execution(user): def force_password_reset(user): return hasattr(user, "usercontactinfo") and user.usercontactinfo.force_password_reset - def disable_force_password_reset(user): - if hasattr(user, "usercontactinfo"): - user.usercontactinfo.force_password_reset = False - user.usercontactinfo.save() + def disable_force_password_reset(self): + if hasattr(self, "usercontactinfo"): + self.usercontactinfo.force_password_reset = False + self.usercontactinfo.save() - def enable_force_password_reset(user): - if hasattr(user, "usercontactinfo"): - user.usercontactinfo.force_password_reset = True - user.usercontactinfo.save() + def enable_force_password_reset(self): + if hasattr(self, "usercontactinfo"): + self.usercontactinfo.force_password_reset = True + self.usercontactinfo.save() @staticmethod def generate_full_name(user): @@ -750,7 +750,7 @@ class NoteHistory(models.Model): current_editor = models.ForeignKey(Dojo_User, editable=False, null=True, on_delete=models.CASCADE) def copy(self): - copy = _copy_model_util(self) + copy = copy_model_util(self) copy.save() return copy @@ -776,7 +776,7 @@ def __str__(self): return self.entry def copy(self): - copy = _copy_model_util(self) + copy = copy_model_util(self) # Save the necessary ManyToMany relationships old_history = list(self.history.all()) # Save the object before setting any ManyToMany relationships @@ -801,7 +801,7 @@ def delete(self, *args, **kwargs): storage.delete(path) def copy(self): - copy = _copy_model_util(self) + copy = copy_model_util(self) # Add unique modifier to file name copy.title = f"{self.title} - clone-{str(uuid4())[:8]}" # Create new unique file name @@ -1581,7 +1581,7 @@ def get_absolute_url(self): return reverse("view_engagement", args=[str(self.id)]) def copy(self): - copy = _copy_model_util(self) + copy = copy_model_util(self) # Save the necessary ManyToMany relationships old_notes = list(self.notes.all()) old_files = list(self.files.all()) @@ -1699,7 +1699,7 @@ def __str__(self): return f"'{self.finding}' on '{self.endpoint}'" def copy(self, finding=None): - copy = _copy_model_util(self) + copy = copy_model_util(self) current_endpoint = self.endpoint if finding: copy.finding = finding @@ -2161,7 +2161,7 @@ def get_breadcrumbs(self): return bc def copy(self, engagement=None): - copy = _copy_model_util(self) + copy = copy_model_util(self) # Save the necessary ManyToMany relationships old_notes = list(self.notes.all()) old_files = list(self.files.all()) @@ -2829,7 +2829,7 @@ def get_absolute_url(self): return reverse("view_finding", args=[str(self.id)]) def copy(self, test=None): - copy = _copy_model_util(self) + copy = copy_model_util(self) # Save the necessary ManyToMany relationships old_notes = list(self.notes.all()) old_files = list(self.files.all()) @@ -3813,7 +3813,7 @@ def engagement(self): return None def copy(self, engagement=None): - copy = _copy_model_util(self) + copy = copy_model_util(self) # Save the necessary ManyToMany relationships old_notes = list(self.notes.all()) old_accepted_findings_hash_codes = [finding.hash_code for finding in self.accepted_findings.all()] diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py index baf28ec2b38..d67d289d929 100644 --- a/dojo/settings/settings.dist.py +++ b/dojo/settings/settings.dist.py @@ -214,6 +214,8 @@ # `RemoteUser` is usually used behind AuthN proxy and users should not know about this mechanism from Swagger because it is not usable by users. # It should be hidden by default. DD_AUTH_REMOTEUSER_VISIBLE_IN_SWAGGER=(bool, False), + # Some security policies require allowing users to have only one active session + DD_SINGLE_USER_SESSION=(bool, False), # if somebody is using own documentation how to use DefectDojo in his own company DD_DOCUMENTATION_URL=(str, "https://documentation.defectdojo.com"), # merging findings doesn't always work well with dedupe and reimport etc. @@ -622,6 +624,8 @@ def generate_url(scheme, double_slashes, user, password, host, port, path, param SOCIAL_AUTH_OIDC_KEY = env("DD_SOCIAL_AUTH_OIDC_KEY") SOCIAL_AUTH_OIDC_SECRET = env("DD_SOCIAL_AUTH_OIDC_SECRET") # Optional settings +if value := env("DD_LOGIN_REDIRECT_URL"): + SOCIAL_AUTH_LOGIN_REDIRECT_URL = value if value := env("DD_SOCIAL_AUTH_OIDC_ID_KEY"): SOCIAL_AUTH_OIDC_ID_KEY = value if value := env("DD_SOCIAL_AUTH_OIDC_USERNAME_KEY"): @@ -919,6 +923,7 @@ def generate_url(scheme, double_slashes, user, password, host, port, path, param "auditlog", "pgtrigger", "pghistory", + "single_session", ) # ------------------------------------------------------------------------------ @@ -1149,6 +1154,13 @@ def saml2_attrib_map_format(din): ("dojo.remote_user.RemoteUserAuthentication",) + \ REST_FRAMEWORK["DEFAULT_AUTHENTICATION_CLASSES"] +# ------------------------------------------------------------------------------ +# SINGLE_USER_SESSION +# ------------------------------------------------------------------------------ + +SESSION_ENGINE = "django.contrib.sessions.backends.db" +SINGLE_USER_SESSION = env("DD_SINGLE_USER_SESSION") + # ------------------------------------------------------------------------------ # CELERY # ------------------------------------------------------------------------------ @@ -1357,7 +1369,7 @@ def saml2_attrib_map_format(din): "HCLAppScan XML": ["title", "description"], "HCL AppScan on Cloud SAST XML": ["title", "file_path", "line", "severity"], "KICS Scan": ["file_path", "line", "severity", "description", "title"], - "MobSF Scan": ["title", "description", "severity"], + "MobSF Scan": ["title", "description", "severity", "file_path"], "MobSF Scorecard Scan": ["title", "description", "severity"], "OSV Scan": ["title", "description", "severity"], "Snyk Code Scan": ["vuln_id_from_tool", "file_path"], diff --git a/dojo/templates/dojo/view_eng.html b/dojo/templates/dojo/view_eng.html index 728b8867f7d..ab09dadb7c5 100644 --- a/dojo/templates/dojo/view_eng.html +++ b/dojo/templates/dojo/view_eng.html @@ -826,13 +826,18 @@

{% if jissue and jira_project %} - - Jira - {{ eng | jira_key }} + + Jira + + {{ eng | jira_key }} (epic) - - - + + {% if eng|has_object_permission:"Engagement_Edit" %} +   + + {% endif %} + + {% elif jira_project %} JIRA @@ -1088,6 +1093,28 @@

var host = slashes.concat(window.location.host); modal.find('p#questionnaireURL').text('Questionnaire URL: ' + host + path) }) + + function jira_action(elem, url) { + $(elem).removeClass().addClass('fa-solid fa-spin fa-spinner') + + $.ajax({ + type: "post", + dataType:'json', + data: '', + context: this, + url: url, + beforeSend: function (jqXHR, settings) { + jqXHR.setRequestHeader('X-CSRFToken', '{{ csrf_token }}'); + }, + complete: function(e) { + location.reload() + } + }); + } + + $("#unlink_eng_jira").on('click', function(e) { + jira_action(this,'{% url 'engagement_unlink_jira' eng.id %}') + }); }); {% include 'dojo/snippets/risk_acceptance_actions_snippet_js.html' %} diff --git a/dojo/templates/notifications/alert/user_mentioned.tpl b/dojo/templates/notifications/alert/user_mentioned.tpl index 1fc741ee2d7..9a0b35c0470 100644 --- a/dojo/templates/notifications/alert/user_mentioned.tpl +++ b/dojo/templates/notifications/alert/user_mentioned.tpl @@ -1,4 +1,4 @@ {% load i18n %}{% blocktranslate trimmed %} -User {{ user }} jotted a note on {{ section }}{% endblocktranslate %}: +User {{ requested_by }} jotted a note on {{ section }}{% endblocktranslate %}: {{ note }} \ No newline at end of file diff --git a/dojo/templates/notifications/mail/user_mentioned.tpl b/dojo/templates/notifications/mail/user_mentioned.tpl index 9601da3c9a5..d828940400d 100644 --- a/dojo/templates/notifications/mail/user_mentioned.tpl +++ b/dojo/templates/notifications/mail/user_mentioned.tpl @@ -9,7 +9,7 @@

{% blocktranslate trimmed %} - User {{ user }} jotted a note on {{ section }}:
+ User {{ requested_by }} jotted a note on {{ section }}:

{{ note }}

diff --git a/dojo/templates/notifications/msteams/user_mentioned.tpl b/dojo/templates/notifications/msteams/user_mentioned.tpl index ed8f38ee80c..aba4d11c089 100644 --- a/dojo/templates/notifications/msteams/user_mentioned.tpl +++ b/dojo/templates/notifications/msteams/user_mentioned.tpl @@ -54,7 +54,7 @@ NOTE: This template is currently NOT USED in practice because: }, { "type": "TextBlock", - "text": "{% trans 'User' %} {{ user }} {% trans 'mentioned you in' %} {{ section }}.", + "text": "{% trans 'User' %} {{ requested_by }} {% trans 'mentioned you in' %} {{ section }}.", "wrap": true, "spacing": "Medium" }, @@ -63,7 +63,7 @@ NOTE: This template is currently NOT USED in practice because: "facts": [ { "title": "{% trans 'User' %}:", - "value": "{{ user }}" + "value": "{{ requested_by }}" }, { "title": "{% trans 'Section' %}:", diff --git a/dojo/templates/notifications/slack/user_mentioned.tpl b/dojo/templates/notifications/slack/user_mentioned.tpl index aba6c9aed6a..9131de845a8 100644 --- a/dojo/templates/notifications/slack/user_mentioned.tpl +++ b/dojo/templates/notifications/slack/user_mentioned.tpl @@ -1,12 +1,12 @@ {% load i18n %}{% blocktranslate trimmed %} -User {{ user }} jotted a note on {{ section }}: +User {{ requested_by }} jotted a note on {{ section }}: {{ note }} Full details of the note can be reviewed at {{ url }} {% endblocktranslate %} {% if system_settings.disclaimer_notifications and system_settings.disclaimer_notifications.strip %} - + {% trans "Disclaimer" %}: {{ system_settings.disclaimer_notifications }} {% endif %} diff --git a/dojo/test/views.py b/dojo/test/views.py index 06301d20813..ad98b4e17a9 100644 --- a/dojo/test/views.py +++ b/dojo/test/views.py @@ -915,7 +915,7 @@ def process_form( "apply_tags_to_endpoints": form.cleaned_data.get("apply_tags_to_endpoints", False), "group_by": form.cleaned_data.get("group_by", None), "close_old_findings": form.cleaned_data.get("close_old_findings", None), - "create_finding_groups_for_all_findings": form.cleaned_data.get("create_finding_groups_for_all_findings"), + "create_finding_groups_for_all_findings": form.cleaned_data.get("create_finding_groups_for_all_findings", None), }) # Override the form values of active and verified if activeChoice := form.cleaned_data.get("active", None): diff --git a/dojo/tools/mobsf/api_report_json.py b/dojo/tools/mobsf/api_report_json.py new file mode 100644 index 00000000000..6f5bd1c6c75 --- /dev/null +++ b/dojo/tools/mobsf/api_report_json.py @@ -0,0 +1,388 @@ +from datetime import datetime + +from html2text import html2text + +from dojo.models import Finding + + +class MobSFapireport: + def get_findings(self, data, test): + dupes = {} + find_date = datetime.now() + + test_description = "" + if "name" in data: + test_description = "**Info:**\n" + if "packagename" in data: + test_description = "{} **Package Name:** {}\n".format(test_description, data["packagename"]) + + if "mainactivity" in data: + test_description = "{} **Main Activity:** {}\n".format(test_description, data["mainactivity"]) + + if "pltfm" in data: + test_description = "{} **Platform:** {}\n".format(test_description, data["pltfm"]) + + if "sdk" in data: + test_description = "{} **SDK:** {}\n".format(test_description, data["sdk"]) + + if "min" in data: + test_description = "{} **Min SDK:** {}\n".format(test_description, data["min"]) + + if "targetsdk" in data: + test_description = "{} **Target SDK:** {}\n".format(test_description, data["targetsdk"]) + + if "minsdk" in data: + test_description = "{} **Min SDK:** {}\n".format(test_description, data["minsdk"]) + + if "maxsdk" in data: + test_description = "{} **Max SDK:** {}\n".format(test_description, data["maxsdk"]) + + test_description = f"{test_description}\n**File Information:**\n" + + if "name" in data: + test_description = "{} **Name:** {}\n".format(test_description, data["name"]) + + if "md5" in data: + test_description = "{} **MD5:** {}\n".format(test_description, data["md5"]) + + if "sha1" in data: + test_description = "{} **SHA-1:** {}\n".format(test_description, data["sha1"]) + + if "sha256" in data: + test_description = "{} **SHA-256:** {}\n".format(test_description, data["sha256"]) + + if "size" in data: + test_description = "{} **Size:** {}\n".format(test_description, data["size"]) + + if "urls" in data: + curl = "" + for url in data["urls"]: + for durl in url["urls"]: + curl = f"{durl}\n" + + if curl: + test_description = f"{test_description}\n**URL's:**\n {curl}\n" + + if "bin_anal" in data: + test_description = "{} \n**Binary Analysis:** {}\n".format(test_description, data["bin_anal"]) + + test.description = html2text(test_description) + + mobsf_findings = [] + # Mobile Permissions + if "permissions" in data: + # for permission, details in data["permissions"].items(): + if isinstance(data["permissions"], list): + for details in data["permissions"]: + mobsf_item = { + "category": "Mobile Permissions", + "title": details.get("name", ""), + "severity": self.getSeverityForPermission(details.get("status")), + "description": "**Permission Type:** " + details.get("name", "") + " (" + details.get("status", "") + ")\n\n**Description:** " + details.get("description", "") + "\n\n**Reason:** " + details.get("reason", ""), + "file_path": None, + } + mobsf_findings.append(mobsf_item) + else: + for permission, details in list(data["permissions"].items()): + mobsf_item = { + "category": "Mobile Permissions", + "title": permission, + "severity": self.getSeverityForPermission(details.get("status", "")), + "description": "**Permission Type:** " + permission + "\n\n**Description:** " + details.get("description", ""), + "file_path": None, + } + mobsf_findings.append(mobsf_item) + + # Insecure Connections + if "insecure_connections" in data: + for details in data["insecure_connections"]: + insecure_urls = "" + for url in details.split(","): + insecure_urls = insecure_urls + url + "\n" + + mobsf_item = { + "category": None, + "title": "Insecure Connections", + "severity": "Low", + "description": insecure_urls, + "file_path": None, + } + mobsf_findings.append(mobsf_item) + + # Certificate Analysis + if "certificate_analysis" in data: + if data["certificate_analysis"] != {}: + certificate_info = data["certificate_analysis"]["certificate_info"] + for details in data["certificate_analysis"]["certificate_findings"]: + if len(details) == 3: + mobsf_item = { + "category": "Certificate Analysis", + "title": details[2], + "severity": details[0].title(), + "description": details[1] + "\n\n**Certificate Info:** " + certificate_info, + "file_path": None, + } + mobsf_findings.append(mobsf_item) + elif len(details) == 2: + mobsf_item = { + "category": "Certificate Analysis", + "title": details[1], + "severity": details[0].title(), + "description": details[1] + "\n\n**Certificate Info:** " + certificate_info, + "file_path": None, + } + mobsf_findings.append(mobsf_item) + + # Manifest Analysis + if "manifest_analysis" in data: + if data["manifest_analysis"] != {} and isinstance(data["manifest_analysis"], dict): + if data["manifest_analysis"]["manifest_findings"]: + for details in data["manifest_analysis"]["manifest_findings"]: + mobsf_item = { + "category": "Manifest Analysis", + "title": details["title"], + "severity": details["severity"].title(), + "description": details["description"] + "\n\n " + details["name"], + "file_path": None, + } + mobsf_findings.append(mobsf_item) + else: + for details in data["manifest_analysis"]: + mobsf_item = { + "category": "Manifest Analysis", + "title": details["title"], + "severity": details["stat"].title(), + "description": details["desc"] + "\n\n " + details["name"], + "file_path": None, + } + mobsf_findings.append(mobsf_item) + + # Code Analysis + if "code_analysis" in data: + if data["code_analysis"] != {}: + if data["code_analysis"].get("findings"): + for details in data["code_analysis"]["findings"]: + metadata = data["code_analysis"]["findings"][details] + mobsf_item = { + "category": "Code Analysis", + "title": details, + "severity": metadata["metadata"]["severity"].title(), + "description": metadata["metadata"]["description"], + "file_path": None, + } + mobsf_findings.append(mobsf_item) + else: + for details in data["code_analysis"]: + metadata = data["code_analysis"][details] + if metadata.get("metadata"): + mobsf_item = { + "category": "Code Analysis", + "title": details, + "severity": metadata["metadata"]["severity"].title(), + "description": metadata["metadata"]["description"], + "file_path": None, + } + mobsf_findings.append(mobsf_item) + + # Binary Analysis + if "binary_analysis" in data: + if isinstance(data["binary_analysis"], list): + for details in data["binary_analysis"]: + for binary_analysis_type in details: + if binary_analysis_type != "name": + mobsf_item = { + "category": "Binary Analysis", + "title": details[binary_analysis_type]["description"].split(".")[0], + "severity": details[binary_analysis_type]["severity"].title(), + "description": details[binary_analysis_type]["description"], + "file_path": details["name"], + } + mobsf_findings.append(mobsf_item) + elif data["binary_analysis"].get("findings"): + for details in data["binary_analysis"]["findings"].values(): + # "findings":{ + # "Binary makes use of insecure API(s)":{ + # "detailed_desc":"The binary may contain the following insecure API(s) _memcpy\n, _strlen\n", + # "severity":"high", + # "cvss":6, + # "cwe":"CWE-676: Use of Potentially Dangerous Function", + # "owasp-mobile":"M7: Client Code Quality", + # "masvs":"MSTG-CODE-8" + # }, + mobsf_item = { + "category": "Binary Analysis", + "title": details["detailed_desc"], + "severity": details["severity"].title(), + "description": details["detailed_desc"], + "file_path": None, + } + mobsf_findings.append(mobsf_item) + else: + for details in data["binary_analysis"].values(): + # "Binary makes use of insecure API(s)":{ + # "detailed_desc":"The binary may contain the following insecure API(s) _vsprintf.", + # "severity":"high", + # "cvss":6, + # "cwe":"CWE-676 - Use of Potentially Dangerous Function", + # "owasp-mobile":"M7: Client Code Quality", + # "masvs":"MSTG-CODE-8" + # } + mobsf_item = { + "category": "Binary Analysis", + "title": details["detailed_desc"], + "severity": details["severity"].title(), + "description": details["detailed_desc"], + "file_path": None, + } + mobsf_findings.append(mobsf_item) + + # specific node for Android reports + if "android_api" in data: + # "android_insecure_random": { + # "files": { + # "u/c/a/b/a/c.java": "9", + # "kotlinx/coroutines/repackaged/net/bytebuddy/utility/RandomString.java": "3", + # ... + # "hu/mycompany/vbnmqweq/gateway/msg/Response.java": "13" + # }, + # "metadata": { + # "id": "android_insecure_random", + # "description": "The App uses an insecure Random Number Generator.", + # "type": "Regex", + # "pattern": "java\\.util\\.Random;", + # "severity": "high", + # "input_case": "exact", + # "cvss": 7.5, + # "cwe": "CWE-330 Use of Insufficiently Random Values", + # "owasp-mobile": "M5: Insufficient Cryptography", + # "masvs": "MSTG-CRYPTO-6" + # } + # }, + for api, details in list(data["android_api"].items()): + mobsf_item = { + "category": "Android API", + "title": details["metadata"]["description"], + "severity": details["metadata"]["severity"].title(), + "description": "**API:** " + api + "\n\n**Description:** " + details["metadata"]["description"], + "file_path": None, + } + mobsf_findings.append(mobsf_item) + + # Manifest + if "manifest" in data: + for details in data["manifest"]: + mobsf_item = { + "category": "Manifest", + "title": details["title"], + "severity": details["stat"], + "description": details["desc"], + "file_path": None, + } + mobsf_findings.append(mobsf_item) + + # MobSF Findings + if "findings" in data: + for title, finding in list(data["findings"].items()): + description = title + file_path = None + + if "path" in finding: + description += "\n\n**Files:**\n" + for path in finding["path"]: + if file_path is None: + file_path = path + description = description + " * " + path + "\n" + + mobsf_item = { + "category": "Findings", + "title": title, + "severity": finding["level"], + "description": description, + "file_path": file_path, + } + + mobsf_findings.append(mobsf_item) + if isinstance(data, list): + for finding in data: + mobsf_item = { + "category": finding["category"], + "title": finding["name"], + "severity": finding["severity"], + "description": finding["description"] + "\n" + "**apk_exploit_dict:** " + str(finding["apk_exploit_dict"]) + "\n" + "**line_number:** " + str(finding["line_number"]), + "file_path": finding["file_object"], + } + mobsf_findings.append(mobsf_item) + for mobsf_finding in mobsf_findings: + title = mobsf_finding["title"] + sev = self.getCriticalityRating(mobsf_finding["severity"]) + description = "" + file_path = None + if mobsf_finding["category"]: + description += "**Category:** " + mobsf_finding["category"] + "\n\n" + description += html2text(mobsf_finding["description"]) + finding = Finding( + title=title, + cwe=919, # Weaknesses in Mobile Applications + test=test, + description=description, + severity=sev, + references=None, + date=find_date, + static_finding=True, + dynamic_finding=False, + nb_occurences=1, + ) + if mobsf_finding["file_path"]: + finding.file_path = mobsf_finding["file_path"] + dupe_key = sev + title + description + mobsf_finding["file_path"] + else: + dupe_key = sev + title + description + if mobsf_finding["category"]: + dupe_key += mobsf_finding["category"] + if dupe_key in dupes: + find = dupes[dupe_key] + if description is not None: + find.description += description + find.nb_occurences += 1 + else: + dupes[dupe_key] = finding + return list(dupes.values()) + + def getSeverityForPermission(self, status): + """ + Convert status for permission detection to severity + + In MobSF there is only 4 know values for permission, + we map them as this: + dangerous => High (Critical?) + normal => Info + signature => Info (it's positive so... Info) + signatureOrSystem => Info (it's positive so... Info) + """ + if status == "dangerous": + return "High" + return "Info" + + # Criticality rating + def getCriticalityRating(self, rating): + criticality = "Info" + if rating.lower() == "good": + criticality = "Info" + elif rating.lower() == "warning": + criticality = "Low" + elif rating.lower() == "vulnerability": + criticality = "Medium" + else: + criticality = rating.lower().capitalize() + return criticality + + def suite_data(self, suites): + suite_info = "" + suite_info += suites["name"] + "\n" + suite_info += "Cipher Strength: " + str(suites["cipherStrength"]) + "\n" + if "ecdhBits" in suites: + suite_info += "ecdhBits: " + str(suites["ecdhBits"]) + "\n" + if "ecdhStrength" in suites: + suite_info += "ecdhStrength: " + str(suites["ecdhStrength"]) + suite_info += "\n\n" + return suite_info diff --git a/dojo/tools/mobsf/parser.py b/dojo/tools/mobsf/parser.py index c61065ea892..ff5a7122655 100644 --- a/dojo/tools/mobsf/parser.py +++ b/dojo/tools/mobsf/parser.py @@ -1,22 +1,20 @@ import json -from datetime import datetime -from html2text import html2text - -from dojo.models import Finding +from dojo.tools.mobsf.api_report_json import MobSFapireport +from dojo.tools.mobsf.report import MobSFjsonreport class MobSFParser: def get_scan_types(self): - return ["MobSF Scan"] + return ["MobSF Scan", "Mobsfscan Scan"] def get_label_for_scan_types(self, scan_type): return "MobSF Scan" def get_description_for_scan_types(self, scan_type): - return "Export a JSON file using the API, api/v1/report_json." + return "Import JSON report from mobsfscan report file or from api/v1/report_json" def get_findings(self, filename, test): tree = filename.read() @@ -24,381 +22,8 @@ def get_findings(self, filename, test): data = json.loads(str(tree, "utf-8")) except: data = json.loads(tree) - find_date = datetime.now() - dupes = {} - test_description = "" - if "name" in data: - test_description = "**Info:**\n" - if "packagename" in data: - test_description = "{} **Package Name:** {}\n".format(test_description, data["packagename"]) - - if "mainactivity" in data: - test_description = "{} **Main Activity:** {}\n".format(test_description, data["mainactivity"]) - - if "pltfm" in data: - test_description = "{} **Platform:** {}\n".format(test_description, data["pltfm"]) - - if "sdk" in data: - test_description = "{} **SDK:** {}\n".format(test_description, data["sdk"]) - - if "min" in data: - test_description = "{} **Min SDK:** {}\n".format(test_description, data["min"]) - - if "targetsdk" in data: - test_description = "{} **Target SDK:** {}\n".format(test_description, data["targetsdk"]) - - if "minsdk" in data: - test_description = "{} **Min SDK:** {}\n".format(test_description, data["minsdk"]) - - if "maxsdk" in data: - test_description = "{} **Max SDK:** {}\n".format(test_description, data["maxsdk"]) - - test_description = f"{test_description}\n**File Information:**\n" - - if "name" in data: - test_description = "{} **Name:** {}\n".format(test_description, data["name"]) - - if "md5" in data: - test_description = "{} **MD5:** {}\n".format(test_description, data["md5"]) - - if "sha1" in data: - test_description = "{} **SHA-1:** {}\n".format(test_description, data["sha1"]) - - if "sha256" in data: - test_description = "{} **SHA-256:** {}\n".format(test_description, data["sha256"]) - - if "size" in data: - test_description = "{} **Size:** {}\n".format(test_description, data["size"]) - - if "urls" in data: - curl = "" - for url in data["urls"]: - for durl in url["urls"]: - curl = f"{durl}\n" - - if curl: - test_description = f"{test_description}\n**URL's:**\n {curl}\n" - - if "bin_anal" in data: - test_description = "{} \n**Binary Analysis:** {}\n".format(test_description, data["bin_anal"]) - - test.description = html2text(test_description) - - mobsf_findings = [] - # Mobile Permissions - if "permissions" in data: - # for permission, details in data["permissions"].items(): - if isinstance(data["permissions"], list): - for details in data["permissions"]: - mobsf_item = { - "category": "Mobile Permissions", - "title": details.get("name", ""), - "severity": self.getSeverityForPermission(details.get("status")), - "description": "**Permission Type:** " + details.get("name", "") + " (" + details.get("status", "") + ")\n\n**Description:** " + details.get("description", "") + "\n\n**Reason:** " + details.get("reason", ""), - "file_path": None, - } - mobsf_findings.append(mobsf_item) - else: - for permission, details in list(data["permissions"].items()): - mobsf_item = { - "category": "Mobile Permissions", - "title": permission, - "severity": self.getSeverityForPermission(details.get("status", "")), - "description": "**Permission Type:** " + permission + "\n\n**Description:** " + details.get("description", ""), - "file_path": None, - } - mobsf_findings.append(mobsf_item) - - # Insecure Connections - if "insecure_connections" in data: - for details in data["insecure_connections"]: - insecure_urls = "" - for url in details.split(","): - insecure_urls = insecure_urls + url + "\n" - - mobsf_item = { - "category": None, - "title": "Insecure Connections", - "severity": "Low", - "description": insecure_urls, - "file_path": None, - } - mobsf_findings.append(mobsf_item) - - # Certificate Analysis - if "certificate_analysis" in data: - if data["certificate_analysis"] != {}: - certificate_info = data["certificate_analysis"]["certificate_info"] - for details in data["certificate_analysis"]["certificate_findings"]: - if len(details) == 3: - mobsf_item = { - "category": "Certificate Analysis", - "title": details[2], - "severity": details[0].title(), - "description": details[1] + "\n\n**Certificate Info:** " + certificate_info, - "file_path": None, - } - mobsf_findings.append(mobsf_item) - elif len(details) == 2: - mobsf_item = { - "category": "Certificate Analysis", - "title": details[1], - "severity": details[0].title(), - "description": details[1] + "\n\n**Certificate Info:** " + certificate_info, - "file_path": None, - } - mobsf_findings.append(mobsf_item) - - # Manifest Analysis - if "manifest_analysis" in data: - if data["manifest_analysis"] != {} and isinstance(data["manifest_analysis"], dict): - if data["manifest_analysis"]["manifest_findings"]: - for details in data["manifest_analysis"]["manifest_findings"]: - mobsf_item = { - "category": "Manifest Analysis", - "title": details["title"], - "severity": details["severity"].title(), - "description": details["description"] + "\n\n " + details["name"], - "file_path": None, - } - mobsf_findings.append(mobsf_item) - else: - for details in data["manifest_analysis"]: - mobsf_item = { - "category": "Manifest Analysis", - "title": details["title"], - "severity": details["stat"].title(), - "description": details["desc"] + "\n\n " + details["name"], - "file_path": None, - } - mobsf_findings.append(mobsf_item) - - # Code Analysis - if "code_analysis" in data: - if data["code_analysis"] != {}: - if data["code_analysis"].get("findings"): - for details in data["code_analysis"]["findings"]: - metadata = data["code_analysis"]["findings"][details] - mobsf_item = { - "category": "Code Analysis", - "title": details, - "severity": metadata["metadata"]["severity"].title(), - "description": metadata["metadata"]["description"], - "file_path": None, - } - mobsf_findings.append(mobsf_item) - else: - for details in data["code_analysis"]: - metadata = data["code_analysis"][details] - if metadata.get("metadata"): - mobsf_item = { - "category": "Code Analysis", - "title": details, - "severity": metadata["metadata"]["severity"].title(), - "description": metadata["metadata"]["description"], - "file_path": None, - } - mobsf_findings.append(mobsf_item) - - # Binary Analysis - if "binary_analysis" in data: - if isinstance(data["binary_analysis"], list): - for details in data["binary_analysis"]: - for binary_analysis_type in details: - if binary_analysis_type != "name": - mobsf_item = { - "category": "Binary Analysis", - "title": details[binary_analysis_type]["description"].split(".")[0], - "severity": details[binary_analysis_type]["severity"].title(), - "description": details[binary_analysis_type]["description"], - "file_path": details["name"], - } - mobsf_findings.append(mobsf_item) - elif data["binary_analysis"].get("findings"): - for details in data["binary_analysis"]["findings"].values(): - # "findings":{ - # "Binary makes use of insecure API(s)":{ - # "detailed_desc":"The binary may contain the following insecure API(s) _memcpy\n, _strlen\n", - # "severity":"high", - # "cvss":6, - # "cwe":"CWE-676: Use of Potentially Dangerous Function", - # "owasp-mobile":"M7: Client Code Quality", - # "masvs":"MSTG-CODE-8" - # }, - mobsf_item = { - "category": "Binary Analysis", - "title": details["detailed_desc"], - "severity": details["severity"].title(), - "description": details["detailed_desc"], - "file_path": None, - } - mobsf_findings.append(mobsf_item) - else: - for details in data["binary_analysis"].values(): - # "Binary makes use of insecure API(s)":{ - # "detailed_desc":"The binary may contain the following insecure API(s) _vsprintf.", - # "severity":"high", - # "cvss":6, - # "cwe":"CWE-676 - Use of Potentially Dangerous Function", - # "owasp-mobile":"M7: Client Code Quality", - # "masvs":"MSTG-CODE-8" - # } - mobsf_item = { - "category": "Binary Analysis", - "title": details["detailed_desc"], - "severity": details["severity"].title(), - "description": details["detailed_desc"], - "file_path": None, - } - mobsf_findings.append(mobsf_item) - - # specific node for Android reports - if "android_api" in data: - # "android_insecure_random": { - # "files": { - # "u/c/a/b/a/c.java": "9", - # "kotlinx/coroutines/repackaged/net/bytebuddy/utility/RandomString.java": "3", - # ... - # "hu/mycompany/vbnmqweq/gateway/msg/Response.java": "13" - # }, - # "metadata": { - # "id": "android_insecure_random", - # "description": "The App uses an insecure Random Number Generator.", - # "type": "Regex", - # "pattern": "java\\.util\\.Random;", - # "severity": "high", - # "input_case": "exact", - # "cvss": 7.5, - # "cwe": "CWE-330 Use of Insufficiently Random Values", - # "owasp-mobile": "M5: Insufficient Cryptography", - # "masvs": "MSTG-CRYPTO-6" - # } - # }, - for api, details in list(data["android_api"].items()): - mobsf_item = { - "category": "Android API", - "title": details["metadata"]["description"], - "severity": details["metadata"]["severity"].title(), - "description": "**API:** " + api + "\n\n**Description:** " + details["metadata"]["description"], - "file_path": None, - } - mobsf_findings.append(mobsf_item) - - # Manifest - if "manifest" in data: - for details in data["manifest"]: - mobsf_item = { - "category": "Manifest", - "title": details["title"], - "severity": details["stat"], - "description": details["desc"], - "file_path": None, - } - mobsf_findings.append(mobsf_item) - - # MobSF Findings - if "findings" in data: - for title, finding in list(data["findings"].items()): - description = title - file_path = None - - if "path" in finding: - description += "\n\n**Files:**\n" - for path in finding["path"]: - if file_path is None: - file_path = path - description = description + " * " + path + "\n" - - mobsf_item = { - "category": "Findings", - "title": title, - "severity": finding["level"], - "description": description, - "file_path": file_path, - } - - mobsf_findings.append(mobsf_item) - if isinstance(data, list): - for finding in data: - mobsf_item = { - "category": finding["category"], - "title": finding["name"], - "severity": finding["severity"], - "description": finding["description"] + "\n" + "**apk_exploit_dict:** " + str(finding["apk_exploit_dict"]) + "\n" + "**line_number:** " + str(finding["line_number"]), - "file_path": finding["file_object"], - } - mobsf_findings.append(mobsf_item) - for mobsf_finding in mobsf_findings: - title = mobsf_finding["title"] - sev = self.getCriticalityRating(mobsf_finding["severity"]) - description = "" - file_path = None - if mobsf_finding["category"]: - description += "**Category:** " + mobsf_finding["category"] + "\n\n" - description += html2text(mobsf_finding["description"]) - finding = Finding( - title=title, - cwe=919, # Weaknesses in Mobile Applications - test=test, - description=description, - severity=sev, - references=None, - date=find_date, - static_finding=True, - dynamic_finding=False, - nb_occurences=1, - ) - if mobsf_finding["file_path"]: - finding.file_path = mobsf_finding["file_path"] - dupe_key = sev + title + description + mobsf_finding["file_path"] - else: - dupe_key = sev + title + description - if mobsf_finding["category"]: - dupe_key += mobsf_finding["category"] - if dupe_key in dupes: - find = dupes[dupe_key] - if description is not None: - find.description += description - find.nb_occurences += 1 - else: - dupes[dupe_key] = finding - return list(dupes.values()) - - def getSeverityForPermission(self, status): - """ - Convert status for permission detection to severity - - In MobSF there is only 4 know values for permission, - we map them as this: - dangerous => High (Critical?) - normal => Info - signature => Info (it's positive so... Info) - signatureOrSystem => Info (it's positive so... Info) - """ - if status == "dangerous": - return "High" - return "Info" - - # Criticality rating - def getCriticalityRating(self, rating): - criticality = "Info" - if rating.lower() == "good": - criticality = "Info" - elif rating.lower() == "warning": - criticality = "Low" - elif rating.lower() == "vulnerability": - criticality = "Medium" - else: - criticality = rating.lower().capitalize() - return criticality - - def suite_data(self, suites): - suite_info = "" - suite_info += suites["name"] + "\n" - suite_info += "Cipher Strength: " + str(suites["cipherStrength"]) + "\n" - if "ecdhBits" in suites: - suite_info += "ecdhBits: " + str(suites["ecdhBits"]) + "\n" - if "ecdhStrength" in suites: - suite_info += "ecdhStrength: " + str(suites["ecdhStrength"]) - suite_info += "\n\n" - return suite_info + if isinstance(data, list) or data.get("results") is None: + return MobSFapireport().get_findings(data, test) + if len(data.get("results")) == 0: + return [] + return MobSFjsonreport().get_findings(data, test) diff --git a/dojo/tools/mobsfscan/parser.py b/dojo/tools/mobsf/report.py similarity index 84% rename from dojo/tools/mobsfscan/parser.py rename to dojo/tools/mobsf/report.py index 49995720acb..3f076e2f8a5 100644 --- a/dojo/tools/mobsfscan/parser.py +++ b/dojo/tools/mobsf/report.py @@ -1,11 +1,10 @@ import hashlib -import json import re from dojo.models import Finding -class MobsfscanParser: +class MobSFjsonreport: """A class that can be used to parse the mobsfscan (https://github.com/MobSF/mobsfscan) JSON report file.""" @@ -15,19 +14,7 @@ class MobsfscanParser: "INFO": "Low", } - def get_scan_types(self): - return ["Mobsfscan Scan"] - - def get_label_for_scan_types(self, scan_type): - return "Mobsfscan Scan" - - def get_description_for_scan_types(self, scan_type): - return "Import JSON report for mobsfscan report file." - - def get_findings(self, filename, test): - data = json.load(filename) - if len(data.get("results")) == 0: - return [] + def get_findings(self, data, test): dupes = {} for key, item in data.get("results").items(): metadata = item.get("metadata") diff --git a/dojo/tools/mobsfscan/__init__.py b/dojo/tools/mobsfscan/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/dojo/tools/nexpose/parser.py b/dojo/tools/nexpose/parser.py index d7f197d2b21..9c03ba8f277 100644 --- a/dojo/tools/nexpose/parser.py +++ b/dojo/tools/nexpose/parser.py @@ -4,7 +4,7 @@ import html2text from defusedxml import ElementTree from django.conf import settings -from hyperlink._url import SCHEME_PORT_MAP +from hyperlink._url import SCHEME_PORT_MAP # noqa: PLC2701 from dojo.models import Endpoint, Finding diff --git a/dojo/tools/tenable/xml_format.py b/dojo/tools/tenable/xml_format.py index 53f82a440ac..7bbf36baa66 100644 --- a/dojo/tools/tenable/xml_format.py +++ b/dojo/tools/tenable/xml_format.py @@ -3,7 +3,7 @@ from cvss import CVSS3 from defusedxml import ElementTree -from hyperlink._url import SCHEME_PORT_MAP +from hyperlink._url import SCHEME_PORT_MAP # noqa: PLC2701 from dojo.models import Endpoint, Finding, Test diff --git a/dojo/user/views.py b/dojo/user/views.py index 603eb2e0db4..f4e2539d659 100644 --- a/dojo/user/views.py +++ b/dojo/user/views.py @@ -287,7 +287,7 @@ def change_password(request): new_password = form.cleaned_data["new_password"] user.set_password(new_password) - Dojo_User.disable_force_password_reset(user) + user.disable_force_password_reset() user.save() messages.add_message(request, diff --git a/dojo/utils.py b/dojo/utils.py index 414a8600a6f..07709c4bbbf 100644 --- a/dojo/utils.py +++ b/dojo/utils.py @@ -1461,7 +1461,8 @@ def process_tag_notifications(request, note, parent_url, parent_title): title=f"{request.user} jotted a note", url=parent_url, icon="commenting", - recipients=users_to_notify) + recipients=users_to_notify, + requested_by=get_current_user()) def encrypt(key, iv, plaintext): diff --git a/helm/defectdojo/README.md b/helm/defectdojo/README.md index f968334e020..699a17d954d 100644 --- a/helm/defectdojo/README.md +++ b/helm/defectdojo/README.md @@ -11,7 +11,7 @@ this [guide](https://helm.sh/docs/using_helm/#installing-helm). ## Supported Kubernetes Versions -The tests cover the deployment on the lastest [kubernetes version](https://kubernetes.io/releases/) and the oldest supported [version from AWS](https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html#available-versions). The assumption is that version in between do not have significant differences. Current tested versions can looks up in the [github k8s workflow](https://github.com/DefectDojo/django-DefectDojo/blob/master/.github/workflows/k8s-tests.yml). +The tests cover the deployment on the lastest [kubernetes version](https://kubernetes.io/releases/) and [the oldest officially supported version](https://kubernetes.io/releases/). The assumption is that version in between do not have significant differences. Current tested versions can looks up in the [github k8s workflow](https://github.com/DefectDojo/django-DefectDojo/blob/master/.github/workflows/k8s-tests.yml). ## Helm chart @@ -524,85 +524,97 @@ A Helm chart for Kubernetes to install DefectDojo | admin.password | string | `""` | | | admin.secretKey | string | `""` | | | admin.user | string | `"admin"` | | -| annotations | object | `{}` | | -| celery.annotations | object | `{}` | | +| alternativeHosts | list | `[]` | optional list of alternative hostnames to use that gets appended to DD_ALLOWED_HOSTS. This is necessary when your local hostname does not match the global hostname. | +| celery.annotations | object | `{}` | Common annotations to worker and beat deployments and pods. | | celery.beat.affinity | object | `{}` | | -| celery.beat.annotations | object | `{}` | | +| celery.beat.annotations | object | `{}` | Annotations for the Celery beat deployment. | | celery.beat.automountServiceAccountToken | bool | `false` | | -| celery.beat.extraEnv | list | `[]` | | -| celery.beat.extraInitContainers | list | `[]` | | -| celery.beat.extraVolumeMounts | list | `[]` | | -| celery.beat.extraVolumes | list | `[]` | | -| celery.beat.livenessProbe | object | `{}` | | +| celery.beat.containerSecurityContext | object | `{}` | Container security context for the Celery beat containers. | +| celery.beat.extraEnv | list | `[]` | Additional environment variables injected to Celery beat containers. | +| celery.beat.extraInitContainers | list | `[]` | A list of additional initContainers to run before celery beat containers. | +| celery.beat.extraVolumeMounts | list | `[]` | Array of additional volume mount points for the celery beat containers. | +| celery.beat.extraVolumes | list | `[]` | A list of extra volumes to mount @type: array | +| celery.beat.image | object | `{"digest":"","registry":"","repository":"","tag":""}` | If empty, uses values from images.django.image | +| celery.beat.livenessProbe | object | `{}` | Enable liveness probe for Celery beat container. ``` exec: command: - bash - -c - celery -A dojo inspect ping -t 5 initialDelaySeconds: 30 periodSeconds: 60 timeoutSeconds: 10 ``` | | celery.beat.nodeSelector | object | `{}` | | -| celery.beat.podAnnotations | object | `{}` | | -| celery.beat.readinessProbe | object | `{}` | | +| celery.beat.podAnnotations | object | `{}` | Annotations for the Celery beat pods. | +| celery.beat.podSecurityContext | object | `{}` | Pod security context for the Celery beat pods. | +| celery.beat.readinessProbe | object | `{}` | Enable readiness probe for Celery beat container. | | celery.beat.replicas | int | `1` | | | celery.beat.resources.limits.cpu | string | `"2000m"` | | | celery.beat.resources.limits.memory | string | `"256Mi"` | | | celery.beat.resources.requests.cpu | string | `"100m"` | | | celery.beat.resources.requests.memory | string | `"128Mi"` | | -| celery.beat.startupProbe | object | `{}` | | +| celery.beat.startupProbe | object | `{}` | Enable startup probe for Celery beat container. | | celery.beat.tolerations | list | `[]` | | | celery.broker | string | `"redis"` | | | celery.logLevel | string | `"INFO"` | | | celery.worker.affinity | object | `{}` | | -| celery.worker.annotations | object | `{}` | | -| celery.worker.appSettings.poolType | string | `"solo"` | | +| celery.worker.annotations | object | `{}` | Annotations for the Celery worker deployment. | +| celery.worker.appSettings.poolType | string | `"solo"` | Performance improved celery worker config when needing to deal with a lot of findings (e.g deduplication ops) poolType: prefork autoscaleMin: 2 autoscaleMax: 8 concurrency: 8 prefetchMultiplier: 128 | | celery.worker.automountServiceAccountToken | bool | `false` | | -| celery.worker.extraEnv | list | `[]` | | -| celery.worker.extraInitContainers | list | `[]` | | -| celery.worker.extraVolumeMounts | list | `[]` | | -| celery.worker.extraVolumes | list | `[]` | | -| celery.worker.livenessProbe | object | `{}` | | +| celery.worker.containerSecurityContext | object | `{}` | Container security context for the Celery worker containers. | +| celery.worker.extraEnv | list | `[]` | Additional environment variables injected to Celery worker containers. | +| celery.worker.extraInitContainers | list | `[]` | A list of additional initContainers to run before celery worker containers. | +| celery.worker.extraVolumeMounts | list | `[]` | Array of additional volume mount points for the celery worker containers. | +| celery.worker.extraVolumes | list | `[]` | A list of extra volumes to mount. @type: array | +| celery.worker.image | object | `{"digest":"","registry":"","repository":"","tag":""}` | If empty, uses values from images.django.image | +| celery.worker.livenessProbe | object | `{}` | Enable liveness probe for Celery worker containers. ``` exec: command: - bash - -c - celery -A dojo inspect ping -t 5 initialDelaySeconds: 30 periodSeconds: 60 timeoutSeconds: 10 ``` | | celery.worker.nodeSelector | object | `{}` | | -| celery.worker.podAnnotations | object | `{}` | | -| celery.worker.readinessProbe | object | `{}` | | +| celery.worker.podAnnotations | object | `{}` | Annotations for the Celery beat pods. | +| celery.worker.podSecurityContext | object | `{}` | Pod security context for the Celery worker pods. | +| celery.worker.readinessProbe | object | `{}` | Enable readiness probe for Celery worker container. | | celery.worker.replicas | int | `1` | | | celery.worker.resources.limits.cpu | string | `"2000m"` | | | celery.worker.resources.limits.memory | string | `"512Mi"` | | | celery.worker.resources.requests.cpu | string | `"100m"` | | | celery.worker.resources.requests.memory | string | `"128Mi"` | | -| celery.worker.startupProbe | object | `{}` | | +| celery.worker.startupProbe | object | `{}` | Enable startup probe for Celery worker container. | | celery.worker.tolerations | list | `[]` | | -| cloudsql.enable_iam_login | bool | `false` | | -| cloudsql.enabled | bool | `false` | | -| cloudsql.image.pullPolicy | string | `"IfNotPresent"` | | -| cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | | -| cloudsql.image.tag | string | `"1.37.9"` | | -| cloudsql.instance | string | `""` | | -| cloudsql.use_private_ip | bool | `false` | | -| cloudsql.verbose | bool | `true` | | -| createPostgresqlSecret | bool | `false` | | -| createRedisSecret | bool | `false` | | -| createSecret | bool | `false` | | -| dbMigrationChecker.enabled | bool | `true` | | -| dbMigrationChecker.resources.limits.cpu | string | `"200m"` | | -| dbMigrationChecker.resources.limits.memory | string | `"200Mi"` | | -| dbMigrationChecker.resources.requests.cpu | string | `"100m"` | | -| dbMigrationChecker.resources.requests.memory | string | `"100Mi"` | | -| disableHooks | bool | `false` | | +| cloudsql | object | `{"containerSecurityContext":{},"enable_iam_login":false,"enabled":false,"extraEnv":[],"extraVolumeMounts":[],"image":{"pullPolicy":"IfNotPresent","repository":"gcr.io/cloudsql-docker/gce-proxy","tag":"1.37.9"},"instance":"","resources":{},"use_private_ip":false,"verbose":true}` | Google CloudSQL support in GKE via gce-proxy | +| cloudsql.containerSecurityContext | object | `{}` | Optional: security context for the CloudSQL proxy container. | +| cloudsql.enable_iam_login | bool | `false` | use IAM database authentication | +| cloudsql.enabled | bool | `false` | To use CloudSQL in GKE set 'enable: true' | +| cloudsql.extraEnv | list | `[]` | Additional environment variables for the CloudSQL proxy container. | +| cloudsql.extraVolumeMounts | list | `[]` | Array of additional volume mount points for the CloudSQL proxy container | +| cloudsql.image | object | `{"pullPolicy":"IfNotPresent","repository":"gcr.io/cloudsql-docker/gce-proxy","tag":"1.37.9"}` | set repo and image tag of gce-proxy | +| cloudsql.instance | string | `""` | set CloudSQL instance: 'project:zone:instancename' | +| cloudsql.resources | object | `{}` | Optional: add resource requests/limits for the CloudSQL proxy container. | +| cloudsql.use_private_ip | bool | `false` | whether to use a private IP to connect to the database | +| cloudsql.verbose | bool | `true` | By default, the proxy has verbose logging. Set this to false to make it less verbose | +| createPostgresqlSecret | bool | `false` | create postgresql secret in defectdojo chart, outside of postgresql chart | +| createRedisSecret | bool | `false` | create redis secret in defectdojo chart, outside of redis chart | +| createSecret | bool | `false` | create defectdojo specific secret | +| dbMigrationChecker.containerSecurityContext | object | `{}` | Container security context for the DB migration checker. | +| dbMigrationChecker.enabled | bool | `true` | Enable/disable the DB migration checker. | +| dbMigrationChecker.extraEnv | list | `[]` | Additional environment variables for DB migration checker. | +| dbMigrationChecker.extraVolumeMounts | list | `[]` | Array of additional volume mount points for DB migration checker. | +| dbMigrationChecker.image | object | `{"digest":"","registry":"","repository":"","tag":""}` | If empty, uses values from images.django.image | +| dbMigrationChecker.resources | object | `{"limits":{"cpu":"200m","memory":"200Mi"},"requests":{"cpu":"100m","memory":"100Mi"}}` | Resource requests/limits for the DB migration checker. | +| disableHooks | bool | `false` | Avoid using pre-install hooks, which might cause issues with ArgoCD | | django.affinity | object | `{}` | | | django.annotations | object | `{}` | | | django.automountServiceAccountToken | bool | `false` | | -| django.extraInitContainers | list | `[]` | | -| django.extraVolumes | list | `[]` | | +| django.extraEnv | list | `[]` | Additional environment variables injected to all Django containers and initContainers. | +| django.extraInitContainers | list | `[]` | A list of additional initContainers to run before the uwsgi and nginx containers. | +| django.extraVolumeMounts | list | `[]` | Array of additional volume mount points common to all containers and initContainers. | +| django.extraVolumes | list | `[]` | A list of extra volumes to mount. | | django.ingress.activateTLS | bool | `true` | | -| django.ingress.annotations | object | `{}` | | +| django.ingress.annotations | object | `{}` | Restricts the type of ingress controller that can interact with our chart (nginx, traefik, ...) `kubernetes.io/ingress.class: nginx` Depending on the size and complexity of your scans, you might want to increase the default ingress timeouts if you see repeated 504 Gateway Timeouts `nginx.ingress.kubernetes.io/proxy-read-timeout: "1800"` `nginx.ingress.kubernetes.io/proxy-send-timeout: "1800"` | | django.ingress.enabled | bool | `true` | | | django.ingress.ingressClassName | string | `""` | | | django.ingress.secretName | string | `"defectdojo-tls"` | | -| django.mediaPersistentVolume.enabled | bool | `true` | | -| django.mediaPersistentVolume.fsGroup | int | `1001` | | -| django.mediaPersistentVolume.name | string | `"media"` | | -| django.mediaPersistentVolume.persistentVolumeClaim.accessModes[0] | string | `"ReadWriteMany"` | | -| django.mediaPersistentVolume.persistentVolumeClaim.create | bool | `false` | | -| django.mediaPersistentVolume.persistentVolumeClaim.name | string | `""` | | -| django.mediaPersistentVolume.persistentVolumeClaim.size | string | `"5Gi"` | | -| django.mediaPersistentVolume.persistentVolumeClaim.storageClassName | string | `""` | | -| django.mediaPersistentVolume.type | string | `"emptyDir"` | | -| django.nginx.extraEnv | list | `[]` | | -| django.nginx.extraVolumeMounts | list | `[]` | | +| django.mediaPersistentVolume | object | `{"enabled":true,"fsGroup":1001,"name":"media","persistentVolumeClaim":{"accessModes":["ReadWriteMany"],"create":false,"name":"","size":"5Gi","storageClassName":""},"type":"emptyDir"}` | This feature needs more preparation before can be enabled, please visit KUBERNETES.md#media-persistent-volume | +| django.mediaPersistentVolume.name | string | `"media"` | any name | +| django.mediaPersistentVolume.persistentVolumeClaim | object | `{"accessModes":["ReadWriteMany"],"create":false,"name":"","size":"5Gi","storageClassName":""}` | in case if pvc specified, should point to the already existing pvc | +| django.mediaPersistentVolume.persistentVolumeClaim.accessModes | list | `["ReadWriteMany"]` | check KUBERNETES.md doc first for option to choose | +| django.mediaPersistentVolume.persistentVolumeClaim.create | bool | `false` | set to true to create a new pvc and if django.mediaPersistentVolume.type is set to pvc | +| django.mediaPersistentVolume.type | string | `"emptyDir"` | could be emptyDir (not for production) or pvc | +| django.nginx.containerSecurityContext | object | `{"runAsUser":1001}` | Container security context for the nginx containers. | +| django.nginx.containerSecurityContext.runAsUser | int | `1001` | nginx dockerfile sets USER=1001 | +| django.nginx.extraEnv | list | `[]` | To extra environment variables to the nginx container, you can use extraEnv. For example: extraEnv: - name: FOO valueFrom: configMapKeyRef: name: foo key: bar | +| django.nginx.extraVolumeMounts | list | `[]` | Array of additional volume mount points for nginx containers. | +| django.nginx.image | object | `{"digest":"","registry":"","repository":"","tag":""}` | If empty, uses values from images.nginx.image | | django.nginx.resources.limits.cpu | string | `"2000m"` | | | django.nginx.resources.limits.memory | string | `"256Mi"` | | | django.nginx.resources.requests.cpu | string | `"100m"` | | @@ -610,28 +622,31 @@ A Helm chart for Kubernetes to install DefectDojo | django.nginx.tls.enabled | bool | `false` | | | django.nginx.tls.generateCertificate | bool | `false` | | | django.nodeSelector | object | `{}` | | +| django.podSecurityContext | object | `{"fsGroup":1001}` | Pod security context for the Django pods. | | django.replicas | int | `1` | | | django.service.annotations | object | `{}` | | | django.service.type | string | `""` | | | django.strategy | object | `{}` | | | django.tolerations | list | `[]` | | -| django.uwsgi.appSettings.maxFd | int | `0` | | +| django.uwsgi.appSettings.maxFd | int | `0` | Use this value to set the maximum number of file descriptors. If set to 0 will be detected by uwsgi e.g. 102400 | | django.uwsgi.appSettings.processes | int | `4` | | | django.uwsgi.appSettings.threads | int | `4` | | | django.uwsgi.certificates.certFileName | string | `"ca.crt"` | | | django.uwsgi.certificates.certMountPath | string | `"/certs/"` | | | django.uwsgi.certificates.configName | string | `"defectdojo-ca-certs"` | | -| django.uwsgi.certificates.enabled | bool | `false` | | -| django.uwsgi.enableDebug | bool | `false` | | -| django.uwsgi.extraEnv | list | `[]` | | -| django.uwsgi.extraVolumeMounts | list | `[]` | | -| django.uwsgi.livenessProbe.enabled | bool | `true` | | +| django.uwsgi.certificates.enabled | bool | `false` | includes additional CA certificate as volume, it refrences REQUESTS_CA_BUNDLE env varible to create configMap `kubectl create cm defectdojo-ca-certs --from-file=ca.crt` NOTE: it reflects REQUESTS_CA_BUNDLE for celery workers, beats as well | +| django.uwsgi.containerSecurityContext.runAsUser | int | `1001` | django dockerfile sets USER=1001 | +| django.uwsgi.enableDebug | bool | `false` | this also requires DD_DEBUG to be set to True | +| django.uwsgi.extraEnv | list | `[]` | To add (or override) extra variables which need to be pulled from another configMap, you can use extraEnv. For example: extraEnv: - name: DD_DATABASE_HOST valueFrom: configMapKeyRef: name: my-other-postgres-configmap key: cluster_endpoint | +| django.uwsgi.extraVolumeMounts | list | `[]` | Array of additional volume mount points for uwsgi containers. | +| django.uwsgi.image | object | `{"digest":"","registry":"","repository":"","tag":""}` | If empty, uses values from images.django.image | +| django.uwsgi.livenessProbe.enabled | bool | `true` | Enable liveness checks on uwsgi container. | | django.uwsgi.livenessProbe.failureThreshold | int | `6` | | | django.uwsgi.livenessProbe.initialDelaySeconds | int | `0` | | | django.uwsgi.livenessProbe.periodSeconds | int | `10` | | | django.uwsgi.livenessProbe.successThreshold | int | `1` | | | django.uwsgi.livenessProbe.timeoutSeconds | int | `5` | | -| django.uwsgi.readinessProbe.enabled | bool | `true` | | +| django.uwsgi.readinessProbe.enabled | bool | `true` | Enable readiness checks on uwsgi container. | | django.uwsgi.readinessProbe.failureThreshold | int | `6` | | | django.uwsgi.readinessProbe.initialDelaySeconds | int | `0` | | | django.uwsgi.readinessProbe.periodSeconds | int | `10` | | @@ -641,97 +656,97 @@ A Helm chart for Kubernetes to install DefectDojo | django.uwsgi.resources.limits.memory | string | `"512Mi"` | | | django.uwsgi.resources.requests.cpu | string | `"100m"` | | | django.uwsgi.resources.requests.memory | string | `"256Mi"` | | -| django.uwsgi.startupProbe.enabled | bool | `true` | | +| django.uwsgi.startupProbe.enabled | bool | `true` | Enable startup checks on uwsgi container. | | django.uwsgi.startupProbe.failureThreshold | int | `30` | | | django.uwsgi.startupProbe.initialDelaySeconds | int | `0` | | | django.uwsgi.startupProbe.periodSeconds | int | `5` | | | django.uwsgi.startupProbe.successThreshold | int | `1` | | | django.uwsgi.startupProbe.timeoutSeconds | int | `1` | | -| extraConfigs | object | `{}` | | -| extraEnv | list | `[]` | | -| extraLabels | object | `{}` | | -| extraSecrets | object | `{}` | | -| gke.useGKEIngress | bool | `false` | | -| gke.useManagedCertificate | bool | `false` | | -| gke.workloadIdentityEmail | string | `""` | | -| host | string | `"defectdojo.default.minikube.local"` | | +| extraAnnotations | object | `{}` | Annotations globally added to all resources | +| extraConfigs | object | `{}` | To add extra variables not predefined by helm config it is possible to define in extraConfigs block, e.g. below: NOTE Do not store any kind of sensitive information inside of it ``` DD_SOCIAL_AUTH_AUTH0_OAUTH2_ENABLED: 'true' DD_SOCIAL_AUTH_AUTH0_KEY: 'dev' DD_SOCIAL_AUTH_AUTH0_DOMAIN: 'xxxxx' ``` | +| extraEnv | list | `[]` | To add (or override) extra variables which need to be pulled from another configMap, you can use extraEnv. For example: ``` - name: DD_DATABASE_HOST valueFrom: configMapKeyRef: name: my-other-postgres-configmap key: cluster_endpoint ``` | +| extraLabels | object | `{}` | Labels globally added to all resources | +| extraSecrets | object | `{}` | Extra secrets can be created inside of extraSecrets block: NOTE This is just an exmaple, do not store sensitive data in plain text form, better inject it during the deployment/upgrade by --set extraSecrets.secret=someSecret ``` DD_SOCIAL_AUTH_AUTH0_SECRET: 'xxx' ``` | +| gke | object | `{"useGKEIngress":false,"useManagedCertificate":false,"workloadIdentityEmail":""}` | Settings to make running the chart on GKE simpler | +| gke.useGKEIngress | bool | `false` | Set to true to configure the Ingress to use the GKE provided ingress controller | +| gke.useManagedCertificate | bool | `false` | Set to true to have GKE automatically provision a TLS certificate for the host specified Requires useGKEIngress to be set to true When using this option, be sure to set django.ingress.activateTLS to false | +| gke.workloadIdentityEmail | string | `""` | Workload Identity allows the K8s service account to assume the IAM access of a GCP service account to interact with other GCP services Only works with serviceAccount.create = true | +| host | string | `"defectdojo.default.minikube.local"` | Primary hostname of instance | | imagePullPolicy | string | `"Always"` | | -| imagePullSecrets | string | `nil` | | +| imagePullSecrets | string | `nil` | When using a private registry, name of the secret that holds the registry secret (eg deploy token from gitlab-ci project) Create secrets as: kubectl create secret docker-registry defectdojoregistrykey --docker-username=registry_username --docker-password=registry_password --docker-server='https://index.docker.io/v1/' | +| images.django.image.digest | string | `""` | Prefix "sha@" is expected in this place | +| images.django.image.registry | string | `""` | | +| images.django.image.repository | string | `"defectdojo/defectdojo-django"` | | +| images.django.image.tag | string | `""` | If empty, use appVersion. Another possible values are: latest, X.X.X, X.X.X-debian, X.X.X-alpine (where X.X.X is version of DD). For dev builds (only for testing purposes): nightly-dev, nightly-dev-debian, nightly-dev-alpine. To see all, check https://hub.docker.com/r/defectdojo/defectdojo-django/tags. | +| images.nginx.image.digest | string | `""` | Prefix "sha@" is expected in this place | +| images.nginx.image.registry | string | `""` | | +| images.nginx.image.repository | string | `"defectdojo/defectdojo-nginx"` | | +| images.nginx.image.tag | string | `""` | If empty, use appVersion. Another possible values are: latest, X.X.X, X.X.X-alpine (where X.X.X is version of DD). For dev builds (only for testing purposes): nightly-dev, nightly-dev-alpine. To see all, check https://hub.docker.com/r/defectdojo/defectdojo-nginx/tags. | | initializer.affinity | object | `{}` | | | initializer.annotations | object | `{}` | | | initializer.automountServiceAccountToken | bool | `false` | | -| initializer.extraEnv | list | `[]` | | -| initializer.extraVolumeMounts | list | `[]` | | -| initializer.extraVolumes | list | `[]` | | +| initializer.containerSecurityContext | object | `{}` | Container security context for the initializer Job container | +| initializer.extraEnv | list | `[]` | Additional environment variables injected to the initializer job pods. | +| initializer.extraVolumeMounts | list | `[]` | Array of additional volume mount points for the initializer job (init)containers. | +| initializer.extraVolumes | list | `[]` | A list of extra volumes to attach to the initializer job pods. | +| initializer.image | object | `{"digest":"","registry":"","repository":"","tag":""}` | If empty, uses values from images.django.image | | initializer.jobAnnotations | object | `{}` | | -| initializer.keepSeconds | int | `60` | | +| initializer.keepSeconds | int | `60` | A positive integer will keep this Job and Pod deployed for the specified number of seconds, after which they will be removed. For all other values, the Job and Pod will remain deployed. | | initializer.labels | object | `{}` | | | initializer.nodeSelector | object | `{}` | | +| initializer.podSecurityContext | object | `{}` | Pod security context for the initializer Job | | initializer.resources.limits.cpu | string | `"2000m"` | | | initializer.resources.limits.memory | string | `"512Mi"` | | | initializer.resources.requests.cpu | string | `"100m"` | | | initializer.resources.requests.memory | string | `"256Mi"` | | | initializer.run | bool | `true` | | -| initializer.staticName | bool | `false` | | +| initializer.staticName | bool | `false` | staticName defines whether name of the job will be the same (e.g., "defectdojo-initializer") or different every time - generated based on current time (e.g., "defectdojo-initializer-2024-11-11-18-57") This might be handy for ArgoCD deployments | | initializer.tolerations | list | `[]` | | -| localsettingspy | string | `""` | | +| localsettingspy | string | `""` | To add code snippet which would extend setting functionality, you might add it here It will be stored as ConfigMap and mounted `dojo/settings/local_settings.py`. For more see: https://documentation.defectdojo.com/getting_started/configuration/ For example: ``` localsettingspy: | INSTALLED_APPS += ( 'debug_toolbar', ) MIDDLEWARE = [ 'debug_toolbar.middleware.DebugToolbarMiddleware', ] + MIDDLEWARE ``` | | monitoring.enabled | bool | `false` | | -| monitoring.prometheus.enabled | bool | `false` | | -| monitoring.prometheus.image | string | `"nginx/nginx-prometheus-exporter:1.4.2"` | | +| monitoring.prometheus.containerSecurityContext | object | `{}` | Optional: container security context for nginx prometheus exporter | +| monitoring.prometheus.enabled | bool | `false` | Add the nginx prometheus exporter sidecar | +| monitoring.prometheus.extraEnv | list | `[]` | Optional: additional environment variables injected to the nginx prometheus exporter container | +| monitoring.prometheus.extraVolumeMounts | list | `[]` | Array of additional volume mount points for the nginx prometheus exporter | +| monitoring.prometheus.image.digest | string | `""` | | +| monitoring.prometheus.image.registry | string | `""` | | +| monitoring.prometheus.image.repository | string | `"nginx/nginx-prometheus-exporter"` | | +| monitoring.prometheus.image.tag | string | `"1.4.2"` | | | monitoring.prometheus.imagePullPolicy | string | `"IfNotPresent"` | | -| networkPolicy.annotations | object | `{}` | | -| networkPolicy.egress | list | `[]` | | -| networkPolicy.enabled | bool | `false` | | -| networkPolicy.ingress | list | `[]` | | -| networkPolicy.ingressExtend | list | `[]` | | -| podLabels | object | `{}` | | -| postgresServer | string | `nil` | | -| postgresql.architecture | string | `"standalone"` | | -| postgresql.auth.database | string | `"defectdojo"` | | -| postgresql.auth.existingSecret | string | `"defectdojo-postgresql-specific"` | | -| postgresql.auth.password | string | `""` | | -| postgresql.auth.secretKeys.adminPasswordKey | string | `"postgresql-postgres-password"` | | -| postgresql.auth.secretKeys.replicationPasswordKey | string | `"postgresql-replication-password"` | | -| postgresql.auth.secretKeys.userPasswordKey | string | `"postgresql-password"` | | -| postgresql.auth.username | string | `"defectdojo"` | | -| postgresql.enabled | bool | `true` | | -| postgresql.primary.affinity | object | `{}` | | -| postgresql.primary.containerSecurityContext.enabled | bool | `true` | | -| postgresql.primary.containerSecurityContext.runAsUser | int | `1001` | | -| postgresql.primary.name | string | `"primary"` | | -| postgresql.primary.nodeSelector | object | `{}` | | -| postgresql.primary.persistence.enabled | bool | `true` | | -| postgresql.primary.podSecurityContext.enabled | bool | `true` | | -| postgresql.primary.podSecurityContext.fsGroup | int | `1001` | | -| postgresql.primary.service.ports.postgresql | int | `5432` | | -| postgresql.shmVolume.chmod.enabled | bool | `false` | | -| postgresql.volumePermissions.containerSecurityContext.runAsUser | int | `1001` | | -| postgresql.volumePermissions.enabled | bool | `false` | | -| redis.architecture | string | `"standalone"` | | -| redis.auth.existingSecret | string | `"defectdojo-redis-specific"` | | -| redis.auth.existingSecretPasswordKey | string | `"redis-password"` | | -| redis.auth.password | string | `""` | | -| redis.enabled | bool | `true` | | -| redis.sentinel.enabled | bool | `false` | | -| redis.tls.enabled | bool | `false` | | -| redisParams | string | `""` | | -| redisServer | string | `nil` | | -| repositoryPrefix | string | `"defectdojo"` | | -| revisionHistoryLimit | int | `10` | | -| secrets.annotations | object | `{}` | | -| securityContext.djangoSecurityContext.runAsUser | int | `1001` | | -| securityContext.enabled | bool | `true` | | -| securityContext.nginxSecurityContext.runAsUser | int | `1001` | | -| serviceAccount.annotations | object | `{}` | | -| serviceAccount.create | bool | `true` | | -| serviceAccount.labels | object | `{}` | | -| tag | string | `"latest"` | | +| monitoring.prometheus.resources | object | `{}` | Optional: add resource requests/limits for the nginx prometheus exporter container | +| networkPolicy | object | `{"annotations":{},"egress":[],"enabled":false,"ingress":[],"ingressExtend":[]}` | Enables application network policy For more info follow https://kubernetes.io/docs/concepts/services-networking/network-policies/ | +| networkPolicy.egress | list | `[]` | ``` egress: - to: - ipBlock: cidr: 10.0.0.0/24 ports: - protocol: TCP port: 443 ``` | +| networkPolicy.ingress | list | `[]` | For more detailed configuration with ports and peers. It will ignore ingressExtend ``` ingress: - from: - podSelector: matchLabels: app.kubernetes.io/instance: defectdojo - podSelector: matchLabels: app.kubernetes.io/instance: defectdojo-prometheus ports: - protocol: TCP port: 8443 ``` | +| networkPolicy.ingressExtend | list | `[]` | if additional labels need to be allowed (e.g. prometheus scraper) ``` ingressExtend: - podSelector: matchLabels: app.kubernetes.io/instance: defectdojo-prometheus ``` | +| podLabels | object | `{}` | Additional labels to add to the pods: ``` podLabels: key: value ``` | +| postgresServer | string | `nil` | To use an external PostgreSQL instance (like CloudSQL), set `postgresql.enabled` to false, set items in `postgresql.auth` part for authentication, and set the address here: | +| postgresql | object | `{"architecture":"standalone","auth":{"database":"defectdojo","existingSecret":"defectdojo-postgresql-specific","password":"","secretKeys":{"adminPasswordKey":"postgresql-postgres-password","replicationPasswordKey":"postgresql-replication-password","userPasswordKey":"postgresql-password"},"username":"defectdojo"},"enabled":true,"primary":{"affinity":{},"containerSecurityContext":{"enabled":true,"runAsUser":1001},"name":"primary","nodeSelector":{},"persistence":{"enabled":true},"podSecurityContext":{"enabled":true,"fsGroup":1001},"service":{"ports":{"postgresql":5432}}},"shmVolume":{"chmod":{"enabled":false}},"volumePermissions":{"containerSecurityContext":{"runAsUser":1001},"enabled":false}}` | For more advance options check the bitnami chart documentation: https://github.com/bitnami/charts/tree/main/bitnami/postgresql | +| postgresql.enabled | bool | `true` | To use an external instance, switch enabled to `false` and set the address in `postgresServer` below | +| postgresql.primary.containerSecurityContext.enabled | bool | `true` | Default is true for K8s. Enabled needs to false for OpenShift restricted SCC and true for anyuid SCC | +| postgresql.primary.containerSecurityContext.runAsUser | int | `1001` | runAsUser specification below is not applied if enabled=false. enabled=false is the required setting for OpenShift "restricted SCC" to work successfully. | +| postgresql.primary.podSecurityContext.enabled | bool | `true` | Default is true for K8s. Enabled needs to false for OpenShift restricted SCC and true for anyuid SCC | +| postgresql.primary.podSecurityContext.fsGroup | int | `1001` | fsGroup specification below is not applied if enabled=false. enabled=false is the required setting for OpenShift "restricted SCC" to work successfully. | +| postgresql.volumePermissions.containerSecurityContext | object | `{"runAsUser":1001}` | if using restricted SCC set runAsUser: "auto" and if running under anyuid SCC - runAsUser needs to match the line above | +| redis | object | `{"architecture":"standalone","auth":{"existingSecret":"defectdojo-redis-specific","existingSecretPasswordKey":"redis-password","password":""},"enabled":true,"sentinel":{"enabled":false},"tls":{"enabled":false}}` | For more advance options check the bitnami chart documentation: https://github.com/bitnami/charts/tree/main/bitnami/redis | +| redis.enabled | bool | `true` | To use an external instance, switch enabled to `false`` and set the address in `redisServer` below | +| redis.tls.enabled | bool | `false` | If TLS is enabled, the Redis broker will use the redis:// and optionally mount the certificates from an existing secret. | +| redisParams | string | `""` | Parameters attached to the redis connection string, defaults to "ssl_cert_reqs=optional" if `redis.tls.enabled` | +| redisServer | string | `nil` | To use an external Redis instance, set `redis.enabled` to false and set the address here: | +| revisionHistoryLimit | int | `10` | Allow overriding of revisionHistoryLimit across all deployments. | +| secrets.annotations | object | `{}` | Add annotations for secret resources | +| securityContext | object | `{"containerSecurityContext":{"runAsNonRoot":true},"enabled":true,"podSecurityContext":{"runAsNonRoot":true}}` | Security context settings | +| serviceAccount.annotations | object | `{}` | Optional additional annotations to add to the DefectDojo's Service Account. | +| serviceAccount.create | bool | `true` | Specifies whether a service account should be created. | +| serviceAccount.labels | object | `{}` | Optional additional labels to add to the DefectDojo's Service Account. | +| serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template | +| siteUrl | string | `""` | The full URL to your defectdojo instance, depends on the domain where DD is deployed, it also affects links in Jira. Use syntax: `siteUrl: 'https://'` | | tests.unitTests.automountServiceAccountToken | bool | `false` | | +| tests.unitTests.image | object | `{"digest":"","registry":"","repository":"","tag":""}` | If empty, uses values from images.django.image | | tests.unitTests.resources.limits.cpu | string | `"500m"` | | | tests.unitTests.resources.limits.memory | string | `"512Mi"` | | | tests.unitTests.resources.requests.cpu | string | `"100m"` | | | tests.unitTests.resources.requests.memory | string | `"128Mi"` | | -| trackConfig | string | `"disabled"` | | +| trackConfig | string | `"disabled"` | Track configuration (trackConfig): will automatically respin application pods in case of config changes detection can be: 1. disabled (default) 2. enabled, enables tracking configuration changes based on SHA256 | ---------------------------------------------- Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2) diff --git a/helm/defectdojo/README.md.gotmpl b/helm/defectdojo/README.md.gotmpl index 9583a95d167..e4ab067a647 100644 --- a/helm/defectdojo/README.md.gotmpl +++ b/helm/defectdojo/README.md.gotmpl @@ -11,7 +11,7 @@ this [guide](https://helm.sh/docs/using_helm/#installing-helm). ## Supported Kubernetes Versions -The tests cover the deployment on the lastest [kubernetes version](https://kubernetes.io/releases/) and the oldest supported [version from AWS](https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html#available-versions). The assumption is that version in between do not have significant differences. Current tested versions can looks up in the [github k8s workflow](https://github.com/DefectDojo/django-DefectDojo/blob/master/.github/workflows/k8s-tests.yml). +The tests cover the deployment on the lastest [kubernetes version](https://kubernetes.io/releases/) and [the oldest officially supported version](https://kubernetes.io/releases/). The assumption is that version in between do not have significant differences. Current tested versions can looks up in the [github k8s workflow](https://github.com/DefectDojo/django-DefectDojo/blob/master/.github/workflows/k8s-tests.yml). ## Helm chart diff --git a/helm/defectdojo/templates/_helpers.tpl b/helm/defectdojo/templates/_helpers.tpl index 025b35078db..b6243d6ac19 100644 --- a/helm/defectdojo/templates/_helpers.tpl +++ b/helm/defectdojo/templates/_helpers.tpl @@ -1,15 +1,15 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. +{{- /* vim: set filetype=mustache: */}} +{{- /* + Expand the name of the chart. */}} {{- define "defectdojo.name" -}} {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} {{- end -}} -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. +{{- /* + Create a default fully qualified app name. + We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). + If release name contains chart name it will be used as a full name. */}} {{- define "defectdojo.fullname" -}} {{- if .Values.fullnameOverride -}} @@ -24,15 +24,15 @@ If release name contains chart name it will be used as a full name. {{- end -}} {{- end -}} -{{/* -Create chart name and version as used by the chart label. +{{- /* + Create chart name and version as used by the chart label. */}} {{- define "defectdojo.chart" -}} {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} {{- end -}} -{{/* -Create the name of the service account to use +{{- /* + Create the name of the service account to use */}} {{- define "defectdojo.serviceAccountName" -}} {{- if .Values.serviceAccount.create -}} @@ -42,7 +42,7 @@ Create the name of the service account to use {{- end -}} {{- end -}} -{{/* +{{- /* Determine the hostname to use for PostgreSQL/Redis. */}} {{- define "postgresql.hostname" -}} @@ -67,7 +67,7 @@ Create the name of the service account to use {{- end -}} {{- end -}} -{{/* +{{- /* Determine the protocol to use for Redis. */}} {{- define "redis.scheme" -}} @@ -82,23 +82,67 @@ Create the name of the service account to use {{- end -}} {{- end -}} -{{/* +{{- /* Builds the repository names for use with local or private registries */}} -{{- define "celery.repository" -}} -{{- printf "%s" .Values.repositoryPrefix -}}/defectdojo-django +{{- define "celery.beat.image" -}} +{{ include "images.image" (dict "imageRoot" (merge .Values.celery.beat.image .Values.images.django.image) "global" .Values.global "chart" .Chart ) }} {{- end -}} -{{- define "django.nginx.repository" -}} -{{- printf "%s" .Values.repositoryPrefix -}}/defectdojo-nginx +{{- define "celery.worker.image" -}} +{{ include "images.image" (dict "imageRoot" (merge .Values.celery.worker.image .Values.images.django.image) "global" .Values.global "chart" .Chart ) }} {{- end -}} -{{- define "django.uwsgi.repository" -}} -{{- printf "%s" .Values.repositoryPrefix -}}/defectdojo-django +{{- define "django.nginx.image" -}} +{{ include "images.image" (dict "imageRoot" (merge .Values.django.nginx.image .Values.images.nginx.image) "global" .Values.global "chart" .Chart ) }} {{- end -}} -{{- define "initializer.repository" -}} -{{- printf "%s" .Values.repositoryPrefix -}}/defectdojo-django +{{- define "django.uwsgi.image" -}} +{{ include "images.image" (dict "imageRoot" (merge .Values.django.uwsgi.image .Values.images.django.image) "global" .Values.global "chart" .Chart ) }} +{{- end -}} + +{{- define "initializer.image" -}} +{{ include "images.image" (dict "imageRoot" (merge .Values.initializer.image .Values.images.django.image) "global" .Values.global "chart" .Chart ) }} +{{- end -}} + +{{- define "dbMigrationChecker.image" -}} +{{ include "images.image" (dict "imageRoot" (merge .Values.dbMigrationChecker.image .Values.images.django.image) "global" .Values.global "chart" .Chart ) }} +{{- end -}} + +{{- define "unitTests.image" -}} +{{ include "images.image" (dict "imageRoot" (merge .Values.tests.unitTests.image .Values.images.django.image) "global" .Values.global "chart" .Chart ) }} +{{- end -}} + +{{- define "monitoring.prometheus.image" -}} +{{ include "images.image" (dict "imageRoot" .Values.monitoring.prometheus.image "global" .Values.global ) }} +{{- end -}} + +{{- /* +Return the proper image name. +If image tag and digest are not defined, termination fallbacks to chart appVersion. +{{ include "images.image" ( dict "imageRoot" .Values.path.to.the.image "global" .Values.global "chart" .Chart ) }} +Inspired by Bitnami Common Chart v2.31.7 +*/}} +{{- define "images.image" -}} +{{- $registryName := default .imageRoot.registry ((.global).imageRegistry) -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $separator := ":" -}} +{{- $termination := .imageRoot.tag | toString -}} + +{{- if not .imageRoot.tag }} + {{- if .chart }} + {{- $termination = .chart.AppVersion | toString -}} + {{- end -}} +{{- end -}} +{{- if .imageRoot.digest }} + {{- $separator = "@" -}} + {{- $termination = .imageRoot.digest | toString -}} +{{- end -}} +{{- if $registryName }} + {{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}} +{{- else -}} + {{- printf "%s%s%s" $repositoryName $separator $termination -}} +{{- end -}} {{- end -}} {{- define "initializer.jobname" -}} @@ -109,7 +153,7 @@ Create the name of the service account to use {{- end -}} {{- end -}} -{{/* +{{- /* Creates the array for DD_ALLOWED_HOSTS in configmap */}} {{- define "django.allowed_hosts" -}} @@ -121,7 +165,7 @@ Create the name of the service account to use {{- end -}} {{- end -}} -{{/* +{{- /* Creates the persistentVolumeName */}} {{- define "django.pvc_name" -}} @@ -132,7 +176,7 @@ Create the name of the service account to use {{- end -}} {{- end -}} -{{/* +{{- /* Define db-migration-checker */}} {{- define "dbMigrationChecker" -}} @@ -141,11 +185,15 @@ Create the name of the service account to use - sh - -c - while ! /app/manage.py migrate --check; do echo "Database is not migrated to the latest state yet"; sleep 5; done; echo "Database is migrated to the latest state"; - image: '{{ template "django.uwsgi.repository" . }}:{{ .Values.tag }}' + image: '{{ template "dbMigrationChecker.image" . }}' imagePullPolicy: {{ .Values.imagePullPolicy }} {{- if .Values.securityContext.enabled }} securityContext: - {{- toYaml .Values.securityContext.djangoSecurityContext | nindent 4 }} + {{- include "helpers.securityContext" (list + .Values + "securityContext.containerSecurityContext" + "dbMigrationChecker.containerSecurityContext" + ) | nindent 4 }} {{- end }} envFrom: - configMapRef: @@ -163,9 +211,101 @@ Create the name of the service account to use secretKeyRef: name: {{ .Values.postgresql.auth.existingSecret | default "defectdojo-postgresql-specific" }} key: {{ .Values.postgresql.auth.secretKeys.userPasswordKey | default "postgresql-password" }} - {{- if .Values.extraEnv }} - {{- toYaml .Values.extraEnv | nindent 2 }} + {{- with .Values.extraEnv }} + {{- toYaml . | nindent 2 }} + {{- end }} + {{- with.Values.dbMigrationChecker.extraEnv }} + {{- toYaml . | nindent 2 }} {{- end }} resources: {{- toYaml .Values.dbMigrationChecker.resources | nindent 4 }} + {{- with .Values.dbMigrationChecker.extraVolumeMounts }} + volumeMounts: + {{- . | toYaml | nindent 4 }} + {{- end }} +{{- end -}} + +{{- /* + Define cloudsql-proxy +*/}} +{{- define "cloudsqlProxy" -}} +- name: cloudsql-proxy + image: {{ .Values.cloudsql.image.repository }}:{{ .Values.cloudsql.image.tag }} + imagePullPolicy: {{ .Values.cloudsql.image.pullPolicy }} + {{- with .Values.cloudsql.extraEnv }} + env: {{- . | toYaml | nindent 4 }} + {{- end }} + {{- with .Values.cloudsql.resources }} + resources: {{- . | toYaml | nindent 4 }} + {{- end }} + restartPolicy: Always + {{- if .Values.securityContext.enabled }} + securityContext: + {{- include "helpers.securityContext" (list + .Values + "securityContext.containerSecurityContext" + "cloudsql.containerSecurityContext" + ) | nindent 4 }} + {{- end }} + command: ["/cloud_sql_proxy"] + args: + - "-verbose={{ .Values.cloudsql.verbose }}" + - "-instances={{ .Values.cloudsql.instance }}=tcp:{{ .Values.postgresql.primary.service.ports.postgresql }}" + {{- if .Values.cloudsql.enable_iam_login }} + - "-enable_iam_login" + {{- end }} + {{- if .Values.cloudsql.use_private_ip }} + - "-ip_address_types=PRIVATE" + {{- end }} + {{- with .Values.cloudsql.extraVolumeMounts }} + volumeMounts: {{ . | toYaml | nindent 4 }} + {{- end }} +{{- end -}} + +{{- /* +Returns the JSON representation of the value for a dot-notation path +from a given context. + Args: + 0: context (e.g., .Values) + 1: path (e.g., "foo.bar") +*/}} +{{- define "helpers.getValue" -}} + {{- $ctx := merge dict (index . 0) -}} + {{- $path := index . 1 -}} + {{- $parts := splitList "." $path -}} + {{- $value := $ctx -}} + {{- range $idx, $part := $parts -}} + {{- if kindIs "map" $value -}} + {{- $value = index $value $part -}} + {{- else -}} + {{- $value = "" -}} + {{- /* Exit early by setting to last iteration */}} + {{- $idx = sub (len $parts) 1 -}} + {{- end -}} + {{- end -}} + {{- toJson $value -}} +{{- end -}} + +{{- /* + Build the security context. + Args: + 0: values context (.Values) + 1: the default security context key (e.g. "securityContext.containerSecurityContext") + 2: the key under the context with security context (e.g., "foo.bar") +*/}} +{{- define "helpers.securityContext" -}} +{{- $values := merge dict (index . 0) -}} +{{- $defaultSecurityContextKey := index . 1 -}} +{{- $securityContextKey := index . 2 -}} +{{- $securityContext := dict -}} +{{- with $values }} + {{- $securityContext = (merge + $securityContext + (include "helpers.getValue" (list $values $defaultSecurityContextKey) | fromJson) + (include "helpers.getValue" (list $values $securityContextKey) | fromJson) + ) -}} +{{- end -}} +{{- with $securityContext -}} +{{- . | toYaml | nindent 2 -}} +{{- end -}} {{- end -}} diff --git a/helm/defectdojo/templates/celery-beat-deployment.yaml b/helm/defectdojo/templates/celery-beat-deployment.yaml index 4e5b4833331..b1832f71e29 100644 --- a/helm/defectdojo/templates/celery-beat-deployment.yaml +++ b/helm/defectdojo/templates/celery-beat-deployment.yaml @@ -2,7 +2,12 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: {{ $fullName }}-celery-beat + {{- with mergeOverwrite dict .Values.extraAnnotations .Values.celery.annotations .Values.celery.beat.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ quote $value }} + {{- end }} + {{- end }} labels: defectdojo.org/component: celery defectdojo.org/subcomponent: beat @@ -10,13 +15,11 @@ metadata: app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} helm.sh/chart: {{ include "defectdojo.chart" . }} - {{- with .Values.extraLabels }} - {{- toYaml . | nindent 4 }} + {{- range $key, $value := .Values.extraLabels }} + {{ $key }}: {{ quote $value }} {{- end }} - {{- with mergeOverwrite .Values.celery.annotations .Values.celery.beat.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} + name: {{ $fullName }}-celery-beat + namespace: {{ .Release.Namespace }} spec: replicas: {{ .Values.celery.beat.replicas }} {{- with .Values.revisionHistoryLimit }} @@ -35,15 +38,12 @@ spec: defectdojo.org/subcomponent: beat app.kubernetes.io/name: {{ include "defectdojo.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} - {{- with .Values.extraLabels }} - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.podLabels }} - {{- toYaml . | nindent 8 }} + {{- range $key, $value := mergeOverwrite dict .Values.extraLabels .Values.podLabels }} + {{ $key }}: {{ quote $value }} {{- end }} annotations: - {{- with mergeOverwrite .Values.celery.annotations .Values.celery.beat.podAnnotations }} - {{- toYaml . | nindent 8 }} + {{- range $key, $value := mergeOverwrite dict .Values.extraAnnotations .Values.celery.annotations .Values.celery.beat.podAnnotations }} + {{ $key }}: {{ quote $value }} {{- end }} {{- if eq (.Values.trackConfig | default "disabled") "enabled" }} checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} @@ -51,6 +51,14 @@ spec: checksum/esecret: {{ include (print $.Template.BasePath "/extra-secret.yaml") . | sha256sum }} {{- end }} spec: + {{- if .Values.securityContext.enabled }} + securityContext: + {{- include "helpers.securityContext" (list + .Values + "securityContext.podSecurityContext" + "celery.beat.podSecurityContext" + ) | nindent 8 }} + {{- end }} serviceAccountName: {{ include "defectdojo.serviceAccountName" . }} automountServiceAccountToken: {{ .Values.celery.beat.automountServiceAccountToken }} {{- with .Values.imagePullSecrets }} @@ -60,12 +68,12 @@ spec: volumes: - name: run emptyDir: {} - {{- if .Values.localsettingspy }} + {{- if .Values.localsettingspy }} - name: localsettingspy configMap: name: {{ $fullName }}-localsettingspy {{- end }} - {{- if .Values.django.uwsgi.certificates.enabled }} + {{- if .Values.django.uwsgi.certificates.enabled }} - name: cert-mount configMap: name: {{ .Values.django.uwsgi.certificates.configName }} @@ -80,22 +88,7 @@ spec: {{- end }} {{- end }} {{- if .Values.cloudsql.enabled }} - - name: cloudsql-proxy - image: {{ .Values.cloudsql.image.repository }}:{{ .Values.cloudsql.image.tag }} - imagePullPolicy: {{ .Values.cloudsql.image.pullPolicy }} - restartPolicy: Always - securityContext: - runAsNonRoot: true - command: ["/cloud_sql_proxy"] - args: - - "-verbose={{ .Values.cloudsql.verbose }}" - - "-instances={{ .Values.cloudsql.instance }}=tcp:{{ .Values.postgresql.primary.service.ports.postgresql }}" - {{- if .Values.cloudsql.enable_iam_login }} - - "-enable_iam_login" - {{- end }} - {{- if .Values.cloudsql.use_private_ip }} - - "-ip_address_types=PRIVATE" - {{- end }} + {{- include "cloudsqlProxy" . | nindent 6 }} {{- end }} {{- if .Values.dbMigrationChecker.enabled }} {{$data := dict "fullName" $fullName }} @@ -106,7 +99,7 @@ spec: - command: - /entrypoint-celery-beat.sh name: celery - image: "{{ template "celery.repository" . }}:{{ .Values.tag }}" + image: "{{ template "celery.beat.image" . }}" imagePullPolicy: {{ .Values.imagePullPolicy }} {{- with .Values.celery.beat.livenessProbe }} livenessProbe: {{ toYaml . | nindent 10 }} @@ -119,12 +112,16 @@ spec: {{- end }} {{- if .Values.securityContext.enabled }} securityContext: - {{- toYaml .Values.securityContext.djangoSecurityContext | nindent 10 }} + {{- include "helpers.securityContext" (list + .Values + "securityContext.containerSecurityContext" + "celery.beat.containerSecurityContext" + ) | nindent 10 }} {{- end }} volumeMounts: - name: run mountPath: /run/defectdojo - {{- if .Values.localsettingspy }} + {{- if .Values.localsettingspy }} - name: localsettingspy readOnly: true mountPath: /app/dojo/settings/local_settings.py diff --git a/helm/defectdojo/templates/celery-worker-deployment.yaml b/helm/defectdojo/templates/celery-worker-deployment.yaml index 68a9cfdf077..14ddcf79f4b 100644 --- a/helm/defectdojo/templates/celery-worker-deployment.yaml +++ b/helm/defectdojo/templates/celery-worker-deployment.yaml @@ -2,7 +2,12 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: {{ $fullName }}-celery-worker + {{- with mergeOverwrite dict .Values.extraAnnotations .Values.celery.annotations .Values.celery.worker.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ quote $value }} + {{- end }} + {{- end }} labels: defectdojo.org/component: celery defectdojo.org/subcomponent: worker @@ -10,13 +15,11 @@ metadata: app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} helm.sh/chart: {{ include "defectdojo.chart" . }} - {{- with .Values.extraLabels }} - {{- toYaml . | nindent 4 }} + {{- range $key, $value := .Values.extraLabels }} + {{ $key }}: {{ quote $value }} {{- end }} - {{- with mergeOverwrite .Values.celery.annotations .Values.celery.worker.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} + name: {{ $fullName }}-celery-worker + namespace: {{ .Release.Namespace }} spec: replicas: {{ .Values.celery.worker.replicas }} {{- with .Values.revisionHistoryLimit }} @@ -35,15 +38,12 @@ spec: defectdojo.org/subcomponent: worker app.kubernetes.io/name: {{ include "defectdojo.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} - {{- with .Values.extraLabels }} - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.podLabels }} - {{- toYaml . | nindent 8 }} + {{- range $key, $value := mergeOverwrite dict .Values.extraLabels .Values.podLabels }} + {{ $key }}: {{ quote $value }} {{- end }} annotations: - {{- with mergeOverwrite .Values.celery.annotations .Values.celery.worker.podAnnotations }} - {{- toYaml . | nindent 8 }} + {{- range $key, $value := mergeOverwrite dict .Values.extraAnnotations .Values.celery.annotations .Values.celery.worker.podAnnotations }} + {{ $key }}: {{ quote $value }} {{- end }} {{- if eq (.Values.trackConfig | default "disabled") "enabled" }} checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} @@ -51,6 +51,14 @@ spec: checksum/esecret: {{ include (print $.Template.BasePath "/extra-secret.yaml") . | sha256sum }} {{- end }} spec: + {{- if .Values.securityContext.enabled }} + securityContext: + {{- include "helpers.securityContext" (list + .Values + "securityContext.podSecurityContext" + "celery.worker.podSecurityContext" + ) | nindent 8 }} + {{- end }} serviceAccountName: {{ include "defectdojo.serviceAccountName" . }} automountServiceAccountToken: {{ .Values.celery.worker.automountServiceAccountToken }} {{- with .Values.imagePullSecrets }} @@ -58,12 +66,12 @@ spec: - name: {{ . }} {{- end }} volumes: - {{- if .Values.localsettingspy }} + {{- if .Values.localsettingspy }} - name: localsettingspy configMap: name: {{ $fullName }}-localsettingspy {{- end }} - {{- if .Values.django.uwsgi.certificates.enabled }} + {{- if .Values.django.uwsgi.certificates.enabled }} - name: cert-mount configMap: name: {{ .Values.django.uwsgi.certificates.configName }} @@ -78,22 +86,7 @@ spec: {{- end }} {{- end }} {{- if .Values.cloudsql.enabled }} - - name: cloudsql-proxy - image: {{ .Values.cloudsql.image.repository }}:{{ .Values.cloudsql.image.tag }} - imagePullPolicy: {{ .Values.cloudsql.image.pullPolicy }} - restartPolicy: Always - securityContext: - runAsNonRoot: true - command: ["/cloud_sql_proxy"] - args: - - "-verbose={{ .Values.cloudsql.verbose }}" - - "-instances={{ .Values.cloudsql.instance }}=tcp:{{ .Values.postgresql.primary.service.ports.postgresql }}" - {{- if .Values.cloudsql.enable_iam_login }} - - "-enable_iam_login" - {{- end }} - {{- if .Values.cloudsql.use_private_ip }} - - "-ip_address_types=PRIVATE" - {{- end }} + {{- include "cloudsqlProxy" . | nindent 6 }} {{- end }} {{- if .Values.dbMigrationChecker.enabled }} {{$data := dict "fullName" $fullName }} @@ -102,7 +95,7 @@ spec: {{- end }} containers: - name: celery - image: "{{ template "celery.repository" . }}:{{ .Values.tag }}" + image: "{{ template "celery.worker.image" . }}" imagePullPolicy: {{ .Values.imagePullPolicy }} {{- with .Values.celery.worker.livenessProbe }} livenessProbe: {{ toYaml . | nindent 10 }} @@ -115,7 +108,11 @@ spec: {{- end }} {{- if .Values.securityContext.enabled }} securityContext: - {{- toYaml .Values.securityContext.djangoSecurityContext | nindent 10 }} + {{- include "helpers.securityContext" (list + .Values + "securityContext.containerSecurityContext" + "celery.worker.containerSecurityContext" + ) | nindent 10 }} {{- end }} command: ['/entrypoint-celery-worker.sh'] volumeMounts: @@ -125,7 +122,7 @@ spec: mountPath: /app/dojo/settings/local_settings.py subPath: file {{- end }} - {{- if .Values.django.uwsgi.certificates.enabled }} + {{- if .Values.django.uwsgi.certificates.enabled }} - name: cert-mount mountPath: {{ .Values.django.uwsgi.certificates.certMountPath }} {{- end }} diff --git a/helm/defectdojo/templates/configmap-local-settings-py.yaml b/helm/defectdojo/templates/configmap-local-settings-py.yaml index dc75942fbc0..30c42244251 100644 --- a/helm/defectdojo/templates/configmap-local-settings-py.yaml +++ b/helm/defectdojo/templates/configmap-local-settings-py.yaml @@ -1,14 +1,24 @@ -{{- if .Values.localsettingspy }} +{{- if .Values.localsettingspy }} {{- $fullName := include "defectdojo.fullname" . -}} apiVersion: v1 kind: ConfigMap metadata: - name: {{ $fullName }}-localsettingspy + {{- with .Values.extraAnnotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ quote $value }} + {{- end }} + {{- end }} labels: app.kubernetes.io/name: {{ include "defectdojo.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} helm.sh/chart: {{ include "defectdojo.chart" . }} + {{- with .Values.extraLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ $fullName }}-localsettingspy + namespace: {{ .Release.Namespace }} data: file: {{ toYaml .Values.localsettingspy | indent 4 }} diff --git a/helm/defectdojo/templates/configmap.yaml b/helm/defectdojo/templates/configmap.yaml index e5078f57903..d25926c2c3f 100644 --- a/helm/defectdojo/templates/configmap.yaml +++ b/helm/defectdojo/templates/configmap.yaml @@ -3,21 +3,22 @@ apiVersion: v1 kind: ConfigMap metadata: - name: {{ $fullName }} + {{- with .Values.extraAnnotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ quote $value }} + {{- end }} + {{- end }} labels: app.kubernetes.io/name: {{ include "defectdojo.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} helm.sh/chart: {{ include "defectdojo.chart" . }} - {{- with .Values.extraLabels }} - {{- toYaml . | nindent 4 }} + {{- range $key, $value := .Values.extraLabels }} + {{ $key }}: {{ quote $value }} {{- end }} -{{- if .Values.annotations }} - annotations: -{{- with .Values.annotations }} - {{- toYaml . | nindent 4 }} -{{- end }} -{{- end }} + name: {{ $fullName }} + namespace: {{ .Release.Namespace }} data: DD_ADMIN_USER: {{ .Values.admin.user | default "admin" }} DD_ADMIN_MAIL: {{ .Values.admin.Mail | default "admin@defectdojo.local" }} diff --git a/helm/defectdojo/templates/django-deployment.yaml b/helm/defectdojo/templates/django-deployment.yaml index 986c8898fc9..b4eee529383 100644 --- a/helm/defectdojo/templates/django-deployment.yaml +++ b/helm/defectdojo/templates/django-deployment.yaml @@ -2,20 +2,23 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: {{ $fullName }}-django + {{- with mergeOverwrite dict .Values.extraAnnotations .Values.django.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ quote $value }} + {{- end }} + {{- end }} labels: defectdojo.org/component: django app.kubernetes.io/name: {{ include "defectdojo.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} helm.sh/chart: {{ include "defectdojo.chart" . }} - {{- with .Values.extraLabels }} - {{- toYaml . | nindent 4 }} + {{- range $key, $value := .Values.extraLabels }} + {{ $key }}: {{ quote $value }} {{- end }} - {{- with .Values.django.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} + name: {{ $fullName }}-django + namespace: {{ .Release.Namespace }} spec: replicas: {{ .Values.django.replicas }} {{- with .Values.django.strategy }} @@ -36,15 +39,12 @@ spec: defectdojo.org/component: django app.kubernetes.io/name: {{ include "defectdojo.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} - {{- with .Values.extraLabels }} - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.podLabels }} - {{- toYaml . | nindent 8 }} - {{- end }} + {{- range $key, $value := mergeOverwrite dict .Values.extraLabels .Values.podLabels }} + {{ $key }}: {{ quote $value }} + {{- end }} annotations: - {{- with .Values.django.annotations }} - {{- toYaml . | nindent 8 }} + {{- range $key, $value := mergeOverwrite dict .Values.extraAnnotations .Values.django.annotations }} + {{ $key }}: {{ quote $value }} {{- end }} {{- if and .Values.monitoring.enabled .Values.monitoring.prometheus.enabled }} prometheus.io/path: /metrics @@ -65,8 +65,14 @@ spec: - name: {{ quote . }} {{- end }} {{- if .Values.django.mediaPersistentVolume.enabled }} + {{- if .Values.securityContext.enabled }} securityContext: - fsGroup: {{ .Values.django.mediaPersistentVolume.fsGroup | default 1001 }} + {{- include "helpers.securityContext" (list + .Values + "securityContext.podSecurityContext" + "django.podSecurityContext" + ) | nindent 8 }} + {{- end }} {{- end }} volumes: - name: run @@ -99,25 +105,10 @@ spec: - {{- . | toYaml | nindent 8 }} {{- end }} {{- if .Values.cloudsql.enabled }} - - name: cloudsql-proxy - image: {{ .Values.cloudsql.image.repository }}:{{ .Values.cloudsql.image.tag }} - imagePullPolicy: {{ .Values.cloudsql.image.pullPolicy }} - restartPolicy: Always - securityContext: - runAsNonRoot: true - command: ["/cloud_sql_proxy"] - args: - - "-verbose={{ .Values.cloudsql.verbose }}" - - "-instances={{ .Values.cloudsql.instance }}=tcp:{{ .Values.postgresql.primary.service.ports.postgresql }}" - {{- if .Values.cloudsql.enable_iam_login }} - - "-enable_iam_login" - {{- end }} - {{- if .Values.cloudsql.use_private_ip }} - - "-ip_address_types=PRIVATE" - {{- end }} + {{- include "cloudsqlProxy" . | nindent 6 }} {{- end }} {{- if .Values.dbMigrationChecker.enabled }} - {{$data := dict "fullName" $fullName }} + {{- $data := dict "fullName" $fullName }} {{- $newContext := merge . (dict "fullName" $fullName) }} {{- include "dbMigrationChecker" $newContext | nindent 6 }} {{- end }} @@ -125,9 +116,15 @@ spec: containers: {{- if and .Values.monitoring.enabled .Values.monitoring.prometheus.enabled }} - name: metrics - image: {{ .Values.monitoring.prometheus.image }} + image: '{{ template "monitoring.prometheus.image" . }}' imagePullPolicy: {{ .Values.monitoring.prometheus.imagePullPolicy }} - command: [ '/usr/bin/nginx-prometheus-exporter', '--nginx.scrape-uri', 'http://127.0.0.1:8080/nginx_status'] + command: + - /usr/bin/nginx-prometheus-exporter + - --nginx.scrape-uri + - http://127.0.0.1:8080/nginx_status + {{- with .Values.monitoring.prometheus.extraEnv }} + env: {{- . | toYaml | nindent 8 }} + {{- end }} ports: - name: http-metrics protocol: TCP @@ -139,13 +136,31 @@ spec: periodSeconds: 20 initialDelaySeconds: 15 timeoutSeconds: 5 + {{- with .Values.monitoring.prometheus.resources }} + resources: {{- . | toYaml | nindent 10 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + {{- include "helpers.securityContext" (list + .Values + "securityContext.containerSecurityContext" + "monitoring.prometheus.containerSecurityContext" + ) | nindent 10 }} + {{- end }} + {{- with .Values.monitoring.prometheus.extraVolumeMounts }} + volumeMounts: {{ . | toYaml | nindent 10 }} + {{- end }} {{- end }} - name: uwsgi - image: '{{ template "django.uwsgi.repository" . }}:{{ .Values.tag }}' + image: '{{ template "django.uwsgi.image" . }}' imagePullPolicy: {{ .Values.imagePullPolicy }} {{- if .Values.securityContext.enabled }} securityContext: - {{- toYaml .Values.securityContext.djangoSecurityContext | nindent 10 }} + {{- include "helpers.securityContext" (list + .Values + "securityContext.containerSecurityContext" + "django.uwsgi.containerSecurityContext" + ) | nindent 10 }} {{- end }} volumeMounts: - name: run @@ -160,6 +175,9 @@ spec: - name: cert-mount mountPath: {{ .Values.django.uwsgi.certificates.certMountPath }} {{- end }} + {{- with .Values.django.extraVolumeMounts }} + {{- . | toYaml | nindent 8 }} + {{- end }} {{- with .Values.django.uwsgi.extraVolumeMounts }} {{- . | toYaml | nindent 8 }} {{- end }} @@ -213,6 +231,9 @@ spec: {{- with .Values.extraEnv }} {{- . | toYaml | nindent 8 }} {{- end }} + {{- with .Values.django.extraEnv }} + {{- . | toYaml | nindent 8 }} + {{- end }} {{- with .Values.django.uwsgi.extraEnv }} {{- . | toYaml | nindent 8 }} {{- end }} @@ -235,15 +256,22 @@ spec: resources: {{- toYaml .Values.django.uwsgi.resources | nindent 10 }} - name: nginx - image: '{{ template "django.nginx.repository" . }}:{{ .Values.tag }}' + image: '{{ template "django.nginx.image" . }}' imagePullPolicy: {{ .Values.imagePullPolicy }} {{- if .Values.securityContext.enabled }} securityContext: - {{- toYaml .Values.securityContext.nginxSecurityContext | nindent 10 }} + {{- include "helpers.securityContext" (list + .Values + "securityContext.containerSecurityContext" + "django.nginx.containerSecurityContext" + ) | nindent 10 }} {{- end }} volumeMounts: - name: run mountPath: /run/defectdojo + {{- with .Values.django.extraVolumeMounts }} + {{- . | toYaml | nindent 8 }} + {{- end }} {{- with .Values.django.nginx.extraVolumeMounts }} {{- . | toYaml | nindent 8 }} {{- end }} @@ -271,6 +299,9 @@ spec: {{- with .Values.extraEnv }} {{- . | toYaml | nindent 8 }} {{- end }} + {{- with .Values.django.extraEnv }} + {{- . | toYaml | nindent 8 }} + {{- end }} {{- with .Values.django.nginx.extraEnv }} {{- . | toYaml | nindent 8 }} {{- end }} diff --git a/helm/defectdojo/templates/django-ingress.yaml b/helm/defectdojo/templates/django-ingress.yaml index 4a0209d15a2..aee880f23d9 100644 --- a/helm/defectdojo/templates/django-ingress.yaml +++ b/helm/defectdojo/templates/django-ingress.yaml @@ -3,28 +3,32 @@ apiVersion: networking.k8s.io/v1 kind: Ingress metadata: - name: {{ $fullName }} - labels: - defectdojo.org/component: django - app.kubernetes.io/name: {{ include "defectdojo.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ include "defectdojo.chart" . }} - {{- with .Values.extraLabels }} - {{- toYaml . | nindent 4 }} - {{- end }} -{{- if or .Values.django.ingress.annotations .Values.gke.useGKEIngress }} + {{- if or .Values.extraAnnotations .Values.django.ingress.annotations .Values.gke.useGKEIngress }} annotations: -{{- with .Values.django.ingress.annotations }} - {{- toYaml . | nindent 4 }} -{{- end }} + {{- range $key, $value := .Values.extraAnnotations }} + {{ $key }}: {{ quote $value }} + {{- end }} + {{- range $key, $value := .Values.django.ingress.annotations }} + {{ $key }}: {{ quote $value }} + {{- end }} {{- if .Values.gke.useGKEIngress }} {{- if .Values.gke.useManagedCertificate }} kubernetes.io/ingress.allow-http: "false" networking.gke.io/managed-certificates: {{ $fullName }}-django {{- end }} {{- end }} -{{- end }} + {{- end }} + labels: + defectdojo.org/component: django + app.kubernetes.io/name: {{ include "defectdojo.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ include "defectdojo.chart" . }} + {{- range $key, $value := .Values.extraLabels }} + {{ $key }}: {{ quote $value }} + {{- end }} + name: {{ $fullName }} + namespace: {{ .Release.Namespace }} spec: {{- if .Values.django.ingress.ingressClassName }} ingressClassName: {{ .Values.django.ingress.ingressClassName }} diff --git a/helm/defectdojo/templates/django-service.yaml b/helm/defectdojo/templates/django-service.yaml index f8c20aa092f..5f966c15edc 100644 --- a/helm/defectdojo/templates/django-service.yaml +++ b/helm/defectdojo/templates/django-service.yaml @@ -2,22 +2,23 @@ apiVersion: v1 kind: Service metadata: - name: {{ $fullName }}-django + {{- with mergeOverwrite dict .Values.extraAnnotations .Values.django.service.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} labels: defectdojo.org/component: django app.kubernetes.io/name: {{ include "defectdojo.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} helm.sh/chart: {{ include "defectdojo.chart" . }} - {{- with .Values.extraLabels }} - {{- toYaml . | nindent 4 }} + {{- range $key, $value := .Values.extraLabels }} + {{ $key }}: {{ quote $value }} {{- end }} -{{- if .Values.django.service.annotations }} - annotations: - {{- range $key, $value := .Values.django.service.annotations }} - {{ $key }}: {{ $value | quote }} - {{- end }} -{{- end }} + name: {{ $fullName }}-django + namespace: {{ .Release.Namespace }} spec: selector: defectdojo.org/component: django diff --git a/helm/defectdojo/templates/extra-secret.yaml b/helm/defectdojo/templates/extra-secret.yaml index d97800283a6..caa5b1fcbfa 100644 --- a/helm/defectdojo/templates/extra-secret.yaml +++ b/helm/defectdojo/templates/extra-secret.yaml @@ -3,24 +3,22 @@ apiVersion: v1 kind: Secret metadata: - name: {{ $fullName }}-extrasecrets + {{- with mergeOverwrite dict .Values.secrets.annotations .Values.extraAnnotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} labels: app.kubernetes.io/name: {{ include "defectdojo.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} helm.sh/chart: {{ include "defectdojo.chart" . }} - {{- with .Values.extraLabels }} - {{- toYaml . | nindent 4 }} + {{- range $key, $value := .Values.extraLabels }} + {{ $key }}: {{ quote $value }} {{- end }} - {{- if or .Values.secrets.annotations .Values.annotations }} - annotations: - {{- with .Values.secrets.annotations }} - {{- toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.annotations }} - {{- toYaml . | nindent 4 }} - {{- end }} - {{- end }} + name: {{ $fullName }}-extrasecrets + namespace: {{ .Release.Namespace }} type: Opaque data: {{- range $key, $value := .Values.extraSecrets }} diff --git a/helm/defectdojo/templates/gke-managed-certificate.yaml b/helm/defectdojo/templates/gke-managed-certificate.yaml index 43399626310..14dc539e6b7 100644 --- a/helm/defectdojo/templates/gke-managed-certificate.yaml +++ b/helm/defectdojo/templates/gke-managed-certificate.yaml @@ -1,9 +1,22 @@ -{{- if .Values.gke.useManagedCertificate }} +{{- if .Values.gke.useManagedCertificate | and (.Capabilities.APIVersions.Has "networking.gke.io/v1") }} {{- $fullName := include "defectdojo.fullname" . -}} apiVersion: networking.gke.io/v1 kind: ManagedCertificate metadata: + {{- with .Values.extraAnnotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ quote $value }} + {{- end }} + {{- end }} + {{- with .Values.extraLabels }} + labels: + {{- range $key, $value := . }} + {{ $key }}: {{ quote $value }} + {{- end }} + {{- end }} name: {{ $fullName }}-django + namespace: {{ .Release.Namespace }} spec: domains: - {{ .Values.host }} diff --git a/helm/defectdojo/templates/initializer-job.yaml b/helm/defectdojo/templates/initializer-job.yaml index aa4bff0cbd7..43dcd269d8f 100644 --- a/helm/defectdojo/templates/initializer-job.yaml +++ b/helm/defectdojo/templates/initializer-job.yaml @@ -3,20 +3,23 @@ apiVersion: batch/v1 kind: Job metadata: - name: {{ template "initializer.jobname" . }} + {{- with mergeOverwrite dict .Values.extraAnnotations .Values.initializer.jobAnnotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ quote $value }} + {{- end }} + {{- end }} labels: defectdojo.org/component: initializer app.kubernetes.io/name: {{ include "defectdojo.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} helm.sh/chart: {{ include "defectdojo.chart" . }} - {{- with .Values.extraLabels }} - {{- toYaml . | nindent 4 }} - {{- end }} - annotations: - {{- with .Values.initializer.jobAnnotations }} - {{- toYaml . | nindent 4 }} + {{- range $key, $value := .Values.extraLabels }} + {{ $key }}: {{ quote $value }} {{- end }} + name: {{ template "initializer.jobname" . }} + namespace: {{ .Release.Namespace }} spec: {{- if and (int .Values.initializer.keepSeconds) (gt (int .Values.initializer.keepSeconds) 0) }} ttlSecondsAfterFinished: {{ .Values.initializer.keepSeconds }} @@ -38,6 +41,14 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} spec: + {{- if .Values.securityContext.enabled }} + securityContext: + {{- include "helpers.securityContext" (list + .Values + "securityContext.podSecurityContext" + "initializer.podSecurityContext" + ) | nindent 8 }} + {{- end }} serviceAccountName: {{ include "defectdojo.serviceAccountName" . }} automountServiceAccountToken: {{ .Values.initializer.automountServiceAccountToken }} {{- with .Values.imagePullSecrets }} @@ -64,40 +75,22 @@ spec: {{- end }} initContainers: {{- if .Values.cloudsql.enabled }} - - name: cloudsql-proxy - image: {{ .Values.cloudsql.image.repository }}:{{ .Values.cloudsql.image.tag }} - imagePullPolicy: {{ .Values.cloudsql.image.pullPolicy }} - restartPolicy: Always - securityContext: - runAsNonRoot: true - command: ["/cloud_sql_proxy"] - args: - - "-verbose={{ .Values.cloudsql.verbose }}" - - "-instances={{ .Values.cloudsql.instance }}=tcp:{{ .Values.postgresql.primary.service.ports.postgresql }}" - {{- if .Values.cloudsql.enable_iam_login }} - - "-enable_iam_login" - {{- end }} - {{- if .Values.cloudsql.use_private_ip }} - - "-ip_address_types=PRIVATE" - {{- end }} - volumeMounts: - {{- range .Values.initializer.extraVolumes }} - - name: userconfig-{{ .name }} - readOnly: true - mountPath: {{ .path }} - subPath: {{ .subPath }} - {{- end }} + {{- include "cloudsqlProxy" . | nindent 6 }} {{- end }} - name: wait-for-db command: - '/bin/bash' - '-c' - '/wait-for-it.sh ${DD_DATABASE_HOST:-postgres}:${DD_DATABASE_PORT:-5432} -t 300 -s -- /bin/echo Database is up' - image: '{{ template "django.uwsgi.repository" . }}:{{ .Values.tag }}' + image: "{{ template "initializer.image" . }}" imagePullPolicy: {{ .Values.imagePullPolicy }} {{- if .Values.securityContext.enabled }} securityContext: - {{- toYaml .Values.securityContext.djangoSecurityContext | nindent 10 }} + {{- include "helpers.securityContext" (list + .Values + "securityContext.containerSecurityContext" + "django.uwsgi.containerSecurityContext" + ) | nindent 10 }} {{- end }} envFrom: - configMapRef: @@ -120,11 +113,15 @@ spec: {{- end }} containers: - name: initializer - image: "{{ template "initializer.repository" . }}:{{ .Values.tag }}" + image: "{{ template "initializer.image" . }}" imagePullPolicy: {{ .Values.imagePullPolicy }} {{- if .Values.securityContext.enabled }} securityContext: - {{- toYaml .Values.securityContext.djangoSecurityContext | nindent 10 }} + {{- include "helpers.securityContext" (list + .Values + "securityContext.containerSecurityContext" + "initializer.containerSecurityContext" + ) | nindent 10 }} {{- end }} volumeMounts: {{- if .Values.localsettingspy }} diff --git a/helm/defectdojo/templates/media-pvc.yaml b/helm/defectdojo/templates/media-pvc.yaml index d31d3251b44..57fcae8e0c7 100644 --- a/helm/defectdojo/templates/media-pvc.yaml +++ b/helm/defectdojo/templates/media-pvc.yaml @@ -1,22 +1,29 @@ {{- $fullName := include "django.pvc_name" $ -}} {{ with .Values.django.mediaPersistentVolume }} -{{- if and .enabled (eq .type "pvc") .persistentVolumeClaim.create }} +{{- if and .enabled (eq .type "pvc") .persistentVolumeClaim.create }} apiVersion: v1 kind: PersistentVolumeClaim metadata: + {{- with .Values.extraAnnotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ quote $value }} + {{- end }} + {{- end }} labels: defectdojo.org/component: django app.kubernetes.io/name: {{ include "defectdojo.name" $ }} app.kubernetes.io/instance: {{ $.Release.Name }} app.kubernetes.io/managed-by: {{ $.Release.Service }} helm.sh/chart: {{ include "defectdojo.chart" $ }} - {{- with $.Values.extraLabels }} - {{- toYaml . | nindent 4 }} + {{- range $key, $value := .Values.extraLabels }} + {{ $key }}: {{ quote $value }} {{- end }} name: {{ $fullName }} + namespace: {{ .Release.Namespace }} spec: accessModes: - {{- toYaml .persistentVolumeClaim.accessModes |nindent 4 }} + {{- toYaml .persistentVolumeClaim.accessModes | nindent 4 }} resources: requests: storage: {{ .persistentVolumeClaim.size }} diff --git a/helm/defectdojo/templates/network-policy.yaml b/helm/defectdojo/templates/network-policy.yaml index e580a0df80c..333b58da3e6 100644 --- a/helm/defectdojo/templates/network-policy.yaml +++ b/helm/defectdojo/templates/network-policy.yaml @@ -3,21 +3,22 @@ apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: {{ $fullName }}-networkpolicy + {{- with mergeOverwrite dict .Values.extraAnnotations .Values.networkPolicy.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ quote $value }} + {{- end }} + {{- end }} labels: app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} helm.sh/chart: {{ include "defectdojo.chart" . }} app.kubernetes.io/name: {{ include "defectdojo.name" . }} - {{- with .Values.extraLabels }} - {{- toYaml . | nindent 4 }} + {{- range $key, $value := .Values.extraLabels }} + {{ $key }}: {{ quote $value }} {{- end }} -{{- if .Values.networkPolicy.annotations }} - annotations: -{{- with .Values.networkPolicy.annotations }} - {{- toYaml . | nindent 4 }} -{{- end }} -{{- end }} + name: {{ $fullName }}-networkpolicy + namespace: {{ .Release.Namespace }} spec: podSelector: matchLabels: @@ -43,15 +44,22 @@ spec: apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: {{ $fullName }}-networkpolicy-django + {{- with mergeOverwrite dict .Values.extraAnnotations .Values.networkPolicy.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ quote $value }} + {{- end }} + {{- end }} labels: app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} helm.sh/chart: {{ include "defectdojo.chart" . }} app.kubernetes.io/name: {{ include "defectdojo.name" . }} -{{- with .Values.extraLabels }} - {{- toYaml . | nindent 4 }} -{{- end }} + {{- range $key, $value := .Values.extraLabels }} + {{ $key }}: {{ quote $value }} + {{- end }} + name: {{ $fullName }}-networkpolicy-django + namespace: {{ .Release.Namespace }} spec: podSelector: matchLabels: diff --git a/helm/defectdojo/templates/sa.yaml b/helm/defectdojo/templates/sa.yaml index 4345da6360a..1394f077945 100644 --- a/helm/defectdojo/templates/sa.yaml +++ b/helm/defectdojo/templates/sa.yaml @@ -2,31 +2,26 @@ kind: ServiceAccount apiVersion: v1 metadata: - name: {{ include "defectdojo.serviceAccountName" . }} - labels: - app.kubernetes.io/name: {{ include "defectdojo.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ include "defectdojo.chart" . }} - {{- with .Values.extraLabels }} - {{- toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.serviceAccount.labels }} - {{- toYaml . | nindent 4 }} - {{- end }} annotations: {{- if (not .Values.disableHooks) }} helm.sh/resource-policy: keep helm.sh/hook: "pre-install" helm.sh/hook-delete-policy: "before-hook-creation" {{- end }} - {{- with .Values.annotations }} - {{ toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.serviceAccount.annotations }} - {{ toYaml . | nindent 4 }} + {{- range $key, $value := mergeOverwrite dict .Values.extraAnnotations .Values.serviceAccount.annotations }} + {{ $key }}: {{ quote $value }} {{- end }} {{- if ne .Values.gke.workloadIdentityEmail "" }} iam.gke.io/gcp-service-account: {{ .Values.gke.workloadIdentityEmail }} {{- end }} + labels: + app.kubernetes.io/name: {{ include "defectdojo.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ include "defectdojo.chart" . }} + {{- range $key, $value := mergeOverwrite dict .Values.extraLabels .Values.serviceAccount.labels }} + {{ $key }}: {{ quote $value }} + {{- end }} + name: {{ include "defectdojo.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} {{- end }} \ No newline at end of file diff --git a/helm/defectdojo/templates/secret-postgresql.yaml b/helm/defectdojo/templates/secret-postgresql.yaml index 12924bb29c5..57f38a0e883 100644 --- a/helm/defectdojo/templates/secret-postgresql.yaml +++ b/helm/defectdojo/templates/secret-postgresql.yaml @@ -2,27 +2,25 @@ apiVersion: v1 kind: Secret metadata: - name: {{ .Values.postgresql.auth.existingSecret }} - labels: - app.kubernetes.io/name: {{ include "defectdojo.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ include "defectdojo.chart" . }} - {{- with .Values.extraLabels }} - {{- toYaml . | nindent 4 }} - {{- end }} annotations: {{- if (not .Values.disableHooks) }} helm.sh/resource-policy: keep helm.sh/hook: "pre-install" helm.sh/hook-delete-policy: "before-hook-creation" {{- end }} - {{- with .Values.secrets.annotations }} - {{- toYaml . | nindent 4 }} + {{- range $key, $value := mergeOverwrite dict .Values.extraAnnotations .Values.secrets.annotations }} + {{ $key }}: {{ quote $value }} {{- end }} - {{- with .Values.annotations }} - {{- toYaml . | nindent 4 }} + labels: + app.kubernetes.io/name: {{ include "defectdojo.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ include "defectdojo.chart" . }} + {{- range $key, $value := .Values.extraLabels }} + {{ $key }}: {{ quote $value }} {{- end }} + name: {{ .Values.postgresql.auth.existingSecret }} + namespace: {{ .Release.Namespace }} type: Opaque data: {{- if .Values.postgresql.auth.password }} diff --git a/helm/defectdojo/templates/secret-redis.yaml b/helm/defectdojo/templates/secret-redis.yaml index f6d102c2513..b2a5a3a84c2 100644 --- a/helm/defectdojo/templates/secret-redis.yaml +++ b/helm/defectdojo/templates/secret-redis.yaml @@ -2,27 +2,25 @@ apiVersion: v1 kind: Secret metadata: - name: {{ .Values.redis.auth.existingSecret }} - labels: - app.kubernetes.io/name: {{ include "defectdojo.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ include "defectdojo.chart" . }} - {{- with .Values.extraLabels }} - {{- toYaml . | nindent 4 }} - {{- end }} annotations: {{- if (not .Values.disableHooks) }} helm.sh/resource-policy: keep helm.sh/hook: "pre-install" helm.sh/hook-delete-policy: "before-hook-creation" {{- end }} - {{- with .Values.secrets.annotations }} - {{- toYaml . | nindent 4 }} + {{- range $key, $value := mergeOverwrite dict .Values.extraAnnotations .Values.secrets.annotations }} + {{ $key }}: {{ quote $value }} {{- end }} - {{- with .Values.annotations }} - {{- toYaml . | nindent 4 }} + labels: + app.kubernetes.io/name: {{ include "defectdojo.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ include "defectdojo.chart" . }} + {{- range $key, $value := .Values.extraLabels }} + {{ $key }}: {{ quote $value }} {{- end }} + name: {{ .Values.redis.auth.existingSecret }} + namespace: {{ .Release.Namespace }} type: Opaque data: {{- if .Values.redis.auth.password }} diff --git a/helm/defectdojo/templates/secret.yaml b/helm/defectdojo/templates/secret.yaml index c3a3c56f6c4..3a4a5299d64 100644 --- a/helm/defectdojo/templates/secret.yaml +++ b/helm/defectdojo/templates/secret.yaml @@ -3,47 +3,45 @@ apiVersion: v1 kind: Secret metadata: - name: {{ $fullName }} - labels: - app.kubernetes.io/name: {{ include "defectdojo.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - helm.sh/chart: {{ include "defectdojo.chart" . }} - {{- with .Values.extraLabels }} - {{- toYaml . | nindent 4 }} - {{- end }} annotations: {{- if (not .Values.disableHooks) }} helm.sh/resource-policy: keep helm.sh/hook: "pre-install" helm.sh/hook-delete-policy: "before-hook-creation" {{- end }} - {{- with .Values.secrets.annotations }} - {{- toYaml . | nindent 4 }} + {{- range $key, $value := mergeOverwrite dict .Values.extraAnnotations .Values.secrets.annotations }} + {{ $key }}: {{ quote $value }} {{- end }} - {{- with .Values.annotations }} - {{- toYaml . | nindent 4 }} + labels: + app.kubernetes.io/name: {{ include "defectdojo.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ include "defectdojo.chart" . }} + {{- range $key, $value := .Values.extraLabels }} + {{ $key }}: {{ quote $value }} {{- end }} + name: {{ $fullName }} + namespace: {{ .Release.Namespace }} type: Opaque data: {{- if .Values.admin.password }} DD_ADMIN_PASSWORD: {{ .Values.admin.password | b64enc | quote }} -{{- else}} +{{- else }} DD_ADMIN_PASSWORD: {{ randAlphaNum 22 | b64enc | quote }} -{{- end}} +{{- end }} {{- if .Values.admin.secretKey }} DD_SECRET_KEY: {{ .Values.admin.secretKey | b64enc | quote }} -{{- else}} +{{- else }} DD_SECRET_KEY: {{ randAlphaNum 128 | b64enc | quote }} -{{- end}} +{{- end }} {{- if .Values.admin.credentialAes256Key }} DD_CREDENTIAL_AES_256_KEY: {{ .Values.admin.credentialAes256Key | b64enc | quote }} -{{- else}} +{{- else }} DD_CREDENTIAL_AES_256_KEY: {{ randAlphaNum 128 | b64enc | quote }} -{{- end}} +{{- end }} {{- if .Values.admin.metricsHttpAuthPassword }} METRICS_HTTP_AUTH_PASSWORD: {{ .Values.admin.metricsHttpAuthPassword | b64enc | quote }} -{{- else}} +{{- else }} METRICS_HTTP_AUTH_PASSWORD: {{ randAlphaNum 32 | b64enc | quote }} -{{- end}} +{{- end }} {{- end }} diff --git a/helm/defectdojo/templates/tests/unit-tests.yaml b/helm/defectdojo/templates/tests/unit-tests.yaml index 08939429008..01fa4cf1041 100644 --- a/helm/defectdojo/templates/tests/unit-tests.yaml +++ b/helm/defectdojo/templates/tests/unit-tests.yaml @@ -19,7 +19,7 @@ spec: {{- end }} containers: - name: unit-tests - image: '{{ .Values.repositoryPrefix }}/defectdojo-django:{{ .Values.tag }}' + image: '{{ template "unitTests.image" . }}' imagePullPolicy: {{ .Values.imagePullPolicy }} {{- if .Values.securityContext.enabled }} securityContext: diff --git a/helm/defectdojo/values.schema.json b/helm/defectdojo/values.schema.json index 3d899e176e0..d091be4e1a2 100644 --- a/helm/defectdojo/values.schema.json +++ b/helm/defectdojo/values.schema.json @@ -31,13 +31,15 @@ } } }, - "annotations": { - "type": "object" + "alternativeHosts": { + "description": "optional list of alternative hostnames to use that gets appended to DD_ALLOWED_HOSTS. This is necessary when your local hostname does not match the global hostname.", + "type": "array" }, "celery": { "type": "object", "properties": { "annotations": { + "description": "Common annotations to worker and beat deployments and pods.", "type": "object" }, "beat": { @@ -47,33 +49,67 @@ "type": "object" }, "annotations": { + "description": "Annotations for the Celery beat deployment.", "type": "object" }, "automountServiceAccountToken": { "type": "boolean" }, + "containerSecurityContext": { + "description": "Container security context for the Celery beat containers.", + "type": "object" + }, "extraEnv": { + "description": "Additional environment variables injected to Celery beat containers.", "type": "array" }, "extraInitContainers": { + "description": "A list of additional initContainers to run before celery beat containers.", "type": "array" }, "extraVolumeMounts": { + "description": "Array of additional volume mount points for the celery beat containers.", "type": "array" }, "extraVolumes": { + "description": "A list of extra volumes to mount @type: array\u003cmap\u003e", "type": "array" }, + "image": { + "description": "If empty, uses values from images.django.image", + "type": "object", + "properties": { + "digest": { + "type": "string" + }, + "registry": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "tag": { + "type": "string" + } + } + }, "livenessProbe": { + "description": "Enable liveness probe for Celery beat container. ``` exec: command: - bash - -c - celery -A dojo inspect ping -t 5 initialDelaySeconds: 30 periodSeconds: 60 timeoutSeconds: 10 ```", "type": "object" }, "nodeSelector": { "type": "object" }, "podAnnotations": { + "description": "Annotations for the Celery beat pods.", + "type": "object" + }, + "podSecurityContext": { + "description": "Pod security context for the Celery beat pods.", "type": "object" }, "readinessProbe": { + "description": "Enable readiness probe for Celery beat container.", "type": "object" }, "replicas": { @@ -107,6 +143,7 @@ } }, "startupProbe": { + "description": "Enable startup probe for Celery beat container.", "type": "object" }, "tolerations": { @@ -127,12 +164,14 @@ "type": "object" }, "annotations": { + "description": "Annotations for the Celery worker deployment.", "type": "object" }, "appSettings": { "type": "object", "properties": { "poolType": { + "description": "Performance improved celery worker config when needing to deal with a lot of findings (e.g deduplication ops) poolType: prefork autoscaleMin: 2 autoscaleMax: 8 concurrency: 8 prefetchMultiplier: 128", "type": "string" } } @@ -140,28 +179,61 @@ "automountServiceAccountToken": { "type": "boolean" }, + "containerSecurityContext": { + "description": "Container security context for the Celery worker containers.", + "type": "object" + }, "extraEnv": { + "description": "Additional environment variables injected to Celery worker containers.", "type": "array" }, "extraInitContainers": { + "description": "A list of additional initContainers to run before celery worker containers.", "type": "array" }, "extraVolumeMounts": { + "description": "Array of additional volume mount points for the celery worker containers.", "type": "array" }, "extraVolumes": { + "description": "A list of extra volumes to mount. @type: array\u003cmap\u003e", "type": "array" }, + "image": { + "description": "If empty, uses values from images.django.image", + "type": "object", + "properties": { + "digest": { + "type": "string" + }, + "registry": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "tag": { + "type": "string" + } + } + }, "livenessProbe": { + "description": "Enable liveness probe for Celery worker containers. ``` exec: command: - bash - -c - celery -A dojo inspect ping -t 5 initialDelaySeconds: 30 periodSeconds: 60 timeoutSeconds: 10 ```", "type": "object" }, "nodeSelector": { "type": "object" }, "podAnnotations": { + "description": "Annotations for the Celery beat pods.", + "type": "object" + }, + "podSecurityContext": { + "description": "Pod security context for the Celery worker pods.", "type": "object" }, "readinessProbe": { + "description": "Enable readiness probe for Celery worker container.", "type": "object" }, "replicas": { @@ -195,6 +267,7 @@ } }, "startupProbe": { + "description": "Enable startup probe for Celery worker container.", "type": "object" }, "tolerations": { @@ -205,15 +278,31 @@ } }, "cloudsql": { + "description": "Google CloudSQL support in GKE via gce-proxy", "type": "object", "properties": { + "containerSecurityContext": { + "description": "Optional: security context for the CloudSQL proxy container.", + "type": "object" + }, "enable_iam_login": { + "description": "use IAM database authentication", "type": "boolean" }, "enabled": { + "description": "To use CloudSQL in GKE set 'enable: true'", "type": "boolean" }, + "extraEnv": { + "description": "Additional environment variables for the CloudSQL proxy container.", + "type": "array" + }, + "extraVolumeMounts": { + "description": "Array of additional volume mount points for the CloudSQL proxy container", + "type": "array" + }, "image": { + "description": "set repo and image tag of gce-proxy", "type": "object", "properties": { "pullPolicy": { @@ -228,32 +317,74 @@ } }, "instance": { + "description": "set CloudSQL instance: 'project:zone:instancename'", "type": "string" }, + "resources": { + "description": "Optional: add resource requests/limits for the CloudSQL proxy container.", + "type": "object" + }, "use_private_ip": { + "description": "whether to use a private IP to connect to the database", "type": "boolean" }, "verbose": { + "description": "By default, the proxy has verbose logging. Set this to false to make it less verbose", "type": "boolean" } } }, "createPostgresqlSecret": { + "description": "create postgresql secret in defectdojo chart, outside of postgresql chart", "type": "boolean" }, "createRedisSecret": { + "description": "create redis secret in defectdojo chart, outside of redis chart", "type": "boolean" }, "createSecret": { + "description": "create defectdojo specific secret", "type": "boolean" }, "dbMigrationChecker": { "type": "object", "properties": { + "containerSecurityContext": { + "description": "Container security context for the DB migration checker.", + "type": "object" + }, "enabled": { + "description": "Enable/disable the DB migration checker.", "type": "boolean" }, + "extraEnv": { + "description": "Additional environment variables for DB migration checker.", + "type": "array" + }, + "extraVolumeMounts": { + "description": "Array of additional volume mount points for DB migration checker.", + "type": "array" + }, + "image": { + "description": "If empty, uses values from images.django.image", + "type": "object", + "properties": { + "digest": { + "type": "string" + }, + "registry": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "tag": { + "type": "string" + } + } + }, "resources": { + "description": "Resource requests/limits for the DB migration checker.", "type": "object", "properties": { "limits": { @@ -283,6 +414,7 @@ } }, "disableHooks": { + "description": "Avoid using pre-install hooks, which might cause issues with ArgoCD", "type": "boolean" }, "django": { @@ -297,10 +429,20 @@ "automountServiceAccountToken": { "type": "boolean" }, + "extraEnv": { + "description": "Additional environment variables injected to all Django containers and initContainers.", + "type": "array" + }, "extraInitContainers": { + "description": "A list of additional initContainers to run before the uwsgi and nginx containers.", + "type": "array" + }, + "extraVolumeMounts": { + "description": "Array of additional volume mount points common to all containers and initContainers.", "type": "array" }, "extraVolumes": { + "description": "A list of extra volumes to mount.", "type": "array" }, "ingress": { @@ -310,6 +452,7 @@ "type": "boolean" }, "annotations": { + "description": "Restricts the type of ingress controller that can interact with our chart (nginx, traefik, ...) `kubernetes.io/ingress.class: nginx` Depending on the size and complexity of your scans, you might want to increase the default ingress timeouts if you see repeated 504 Gateway Timeouts `nginx.ingress.kubernetes.io/proxy-read-timeout: \"1800\"` `nginx.ingress.kubernetes.io/proxy-send-timeout: \"1800\"`", "type": "object" }, "enabled": { @@ -324,6 +467,7 @@ } }, "mediaPersistentVolume": { + "description": "This feature needs more preparation before can be enabled, please visit KUBERNETES.md#media-persistent-volume", "type": "object", "properties": { "enabled": { @@ -333,18 +477,22 @@ "type": "integer" }, "name": { + "description": "any name", "type": "string" }, "persistentVolumeClaim": { + "description": "in case if pvc specified, should point to the already existing pvc", "type": "object", "properties": { "accessModes": { + "description": "check KUBERNETES.md doc first for option to choose", "type": "array", "items": { "type": "string" } }, "create": { + "description": "set to true to create a new pvc and if django.mediaPersistentVolume.type is set to pvc", "type": "boolean" }, "name": { @@ -359,6 +507,7 @@ } }, "type": { + "description": "could be emptyDir (not for production) or pvc", "type": "string" } } @@ -366,12 +515,42 @@ "nginx": { "type": "object", "properties": { + "containerSecurityContext": { + "description": "Container security context for the nginx containers.", + "type": "object", + "properties": { + "runAsUser": { + "description": "nginx dockerfile sets USER=1001", + "type": "integer" + } + } + }, "extraEnv": { + "description": "To extra environment variables to the nginx container, you can use extraEnv. For example: extraEnv: - name: FOO valueFrom: configMapKeyRef: name: foo key: bar", "type": "array" }, "extraVolumeMounts": { + "description": "Array of additional volume mount points for nginx containers.", "type": "array" }, + "image": { + "description": "If empty, uses values from images.nginx.image", + "type": "object", + "properties": { + "digest": { + "type": "string" + }, + "registry": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "tag": { + "type": "string" + } + } + }, "resources": { "type": "object", "properties": { @@ -415,6 +594,15 @@ "nodeSelector": { "type": "object" }, + "podSecurityContext": { + "description": "Pod security context for the Django pods.", + "type": "object", + "properties": { + "fsGroup": { + "type": "integer" + } + } + }, "replicas": { "type": "integer" }, @@ -442,6 +630,7 @@ "type": "object", "properties": { "maxFd": { + "description": "Use this value to set the maximum number of file descriptors. If set to 0 will be detected by uwsgi e.g. 102400", "type": "integer" }, "processes": { @@ -465,23 +654,55 @@ "type": "string" }, "enabled": { + "description": "includes additional CA certificate as volume, it refrences REQUESTS_CA_BUNDLE env varible NOTE: it reflects REQUESTS_CA_BUNDLE for celery workers, beats as well", "type": "boolean" } } }, + "containerSecurityContext": { + "type": "object", + "properties": { + "runAsUser": { + "description": "django dockerfile sets USER=1001", + "type": "integer" + } + } + }, "enableDebug": { + "description": "this also requires DD_DEBUG to be set to True", "type": "boolean" }, "extraEnv": { + "description": "To add (or override) extra variables which need to be pulled from another configMap, you can use extraEnv. For example: extraEnv: - name: DD_DATABASE_HOST valueFrom: configMapKeyRef: name: my-other-postgres-configmap key: cluster_endpoint", "type": "array" }, "extraVolumeMounts": { + "description": "Array of additional volume mount points for uwsgi containers.", "type": "array" }, + "image": { + "description": "If empty, uses values from images.django.image", + "type": "object", + "properties": { + "digest": { + "type": "string" + }, + "registry": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "tag": { + "type": "string" + } + } + }, "livenessProbe": { "type": "object", "properties": { "enabled": { + "description": "Enable liveness checks on uwsgi container.", "type": "boolean" }, "failureThreshold": { @@ -505,6 +726,7 @@ "type": "object", "properties": { "enabled": { + "description": "Enable readiness checks on uwsgi container.", "type": "boolean" }, "failureThreshold": { @@ -555,6 +777,7 @@ "type": "object", "properties": { "enabled": { + "description": "Enable startup checks on uwsgi container.", "type": "boolean" }, "failureThreshold": { @@ -578,44 +801,111 @@ } } }, + "extraAnnotations": { + "description": "Annotations globally added to all resources", + "type": "object" + }, "extraConfigs": { + "description": "To add extra variables not predefined by helm config it is possible to define in extraConfigs block, e.g. below: NOTE Do not store any kind of sensitive information inside of it ``` DD_SOCIAL_AUTH_AUTH0_OAUTH2_ENABLED: 'true' DD_SOCIAL_AUTH_AUTH0_KEY: 'dev' DD_SOCIAL_AUTH_AUTH0_DOMAIN: 'xxxxx' ```", "type": "object" }, "extraEnv": { + "description": "To add (or override) extra variables which need to be pulled from another configMap, you can use extraEnv. For example: ``` - name: DD_DATABASE_HOST valueFrom: configMapKeyRef: name: my-other-postgres-configmap key: cluster_endpoint ```", "type": "array" }, "extraLabels": { + "description": "Labels globally added to all resources", "type": "object" }, "extraSecrets": { + "description": "Extra secrets can be created inside of extraSecrets block: ``` DD_SOCIAL_AUTH_AUTH0_SECRET: 'xxx' ```", "type": "object" }, "gke": { + "description": "Settings to make running the chart on GKE simpler", "type": "object", "properties": { "useGKEIngress": { + "description": "Set to true to configure the Ingress to use the GKE provided ingress controller", "type": "boolean" }, "useManagedCertificate": { + "description": "Set to true to have GKE automatically provision a TLS certificate for the host specified Requires useGKEIngress to be set to true When using this option, be sure to set django.ingress.activateTLS to false", "type": "boolean" }, "workloadIdentityEmail": { + "description": "Workload Identity allows the K8s service account to assume the IAM access of a GCP service account to interact with other GCP services Only works with serviceAccount.create = true", "type": "string" } } }, "host": { + "description": "Primary hostname of instance", "type": "string" }, "imagePullPolicy": { "type": "string" }, "imagePullSecrets": { + "description": "When using a private registry, name of the secret that holds the registry secret (eg deploy token from gitlab-ci project)", "type": [ "string", "null" ] }, + "images": { + "type": "object", + "properties": { + "django": { + "type": "object", + "properties": { + "image": { + "type": "object", + "properties": { + "digest": { + "description": "Prefix \"sha@\" is expected in this place", + "type": "string" + }, + "registry": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "tag": { + "description": "If empty, use appVersion. Another possible values are: latest, X.X.X, X.X.X-debian, X.X.X-alpine (where X.X.X is version of DD). For dev builds (only for testing purposes): nightly-dev, nightly-dev-debian, nightly-dev-alpine. To see all, check https://hub.docker.com/r/defectdojo/defectdojo-django/tags.", + "type": "string" + } + } + } + } + }, + "nginx": { + "type": "object", + "properties": { + "image": { + "type": "object", + "properties": { + "digest": { + "description": "Prefix \"sha@\" is expected in this place", + "type": "string" + }, + "registry": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "tag": { + "description": "If empty, use appVersion. Another possible values are: latest, X.X.X, X.X.X-alpine (where X.X.X is version of DD). For dev builds (only for testing purposes): nightly-dev, nightly-dev-alpine. To see all, check https://hub.docker.com/r/defectdojo/defectdojo-nginx/tags.", + "type": "string" + } + } + } + } + } + } + }, "initializer": { "type": "object", "properties": { @@ -628,19 +918,45 @@ "automountServiceAccountToken": { "type": "boolean" }, + "containerSecurityContext": { + "description": "Container security context for the initializer Job container", + "type": "object" + }, "extraEnv": { + "description": "Additional environment variables injected to the initializer job pods.", "type": "array" }, "extraVolumeMounts": { + "description": "Array of additional volume mount points for the initializer job (init)containers.", "type": "array" }, "extraVolumes": { + "description": "A list of extra volumes to attach to the initializer job pods.", "type": "array" }, + "image": { + "description": "If empty, uses values from images.django.image", + "type": "object", + "properties": { + "digest": { + "type": "string" + }, + "registry": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "tag": { + "type": "string" + } + } + }, "jobAnnotations": { "type": "object" }, "keepSeconds": { + "description": "A positive integer will keep this Job and Pod deployed for the specified number of seconds, after which they will be removed. For all other values, the Job and Pod will remain deployed.", "type": "integer" }, "labels": { @@ -649,6 +965,10 @@ "nodeSelector": { "type": "object" }, + "podSecurityContext": { + "description": "Pod security context for the initializer Job", + "type": "object" + }, "resources": { "type": "object", "properties": { @@ -680,6 +1000,7 @@ "type": "boolean" }, "staticName": { + "description": "staticName defines whether name of the job will be the same (e.g., \"defectdojo-initializer\") or different every time - generated based on current time (e.g., \"defectdojo-initializer-2024-11-11-18-57\") This might be handy for ArgoCD deployments", "type": "boolean" }, "tolerations": { @@ -688,6 +1009,7 @@ } }, "localsettingspy": { + "description": "To add code snippet which would extend setting functionality, you might add it here It will be stored as ConfigMap and mounted `dojo/settings/local_settings.py`. For more see: https://documentation.defectdojo.com/getting_started/configuration/ For example: ``` localsettingspy: | INSTALLED_APPS += ( 'debug_toolbar', ) MIDDLEWARE = [ 'debug_toolbar.middleware.DebugToolbarMiddleware', ] + MIDDLEWARE ```", "type": "string" }, "monitoring": { @@ -699,49 +1021,87 @@ "prometheus": { "type": "object", "properties": { + "containerSecurityContext": { + "description": "Optional: container security context for nginx prometheus exporter", + "type": "object" + }, "enabled": { + "description": "Add the nginx prometheus exporter sidecar", "type": "boolean" }, + "extraEnv": { + "description": "Optional: additional environment variables injected to the nginx prometheus exporter container", + "type": "array" + }, + "extraVolumeMounts": { + "description": "Array of additional volume mount points for the nginx prometheus exporter", + "type": "array" + }, "image": { - "type": "string" + "type": "object", + "properties": { + "digest": { + "type": "string" + }, + "registry": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "tag": { + "type": "string" + } + } }, "imagePullPolicy": { "type": "string" + }, + "resources": { + "description": "Optional: add resource requests/limits for the nginx prometheus exporter container", + "type": "object" } } } } }, "networkPolicy": { + "description": "Enables application network policy For more info follow https://kubernetes.io/docs/concepts/services-networking/network-policies/", "type": "object", "properties": { "annotations": { "type": "object" }, "egress": { + "description": " ``` egress: - to: - ipBlock: cidr: 10.0.0.0/24 ports: - protocol: TCP port: 443 ```", "type": "array" }, "enabled": { "type": "boolean" }, "ingress": { + "description": "For more detailed configuration with ports and peers. It will ignore ingressExtend ``` ingress: - from: - podSelector: matchLabels: app.kubernetes.io/instance: defectdojo - podSelector: matchLabels: app.kubernetes.io/instance: defectdojo-prometheus ports: - protocol: TCP port: 8443 ```", "type": "array" }, "ingressExtend": { + "description": "if additional labels need to be allowed (e.g. prometheus scraper) ``` ingressExtend: - podSelector: matchLabels: app.kubernetes.io/instance: defectdojo-prometheus ```", "type": "array" } } }, "podLabels": { + "description": "Additional labels to add to the pods: ``` podLabels: key: value ```", "type": "object" }, "postgresServer": { + "description": "To use an external PostgreSQL instance (like CloudSQL), set `postgresql.enabled` to false, set items in `postgresql.auth` part for authentication, and set the address here:", "type": [ "string", "null" ] }, "postgresql": { + "description": "For more advance options check the bitnami chart documentation: https://github.com/bitnami/charts/tree/main/bitnami/postgresql", "type": "object", "properties": { "architecture": { @@ -779,6 +1139,7 @@ } }, "enabled": { + "description": "To use an external instance, switch enabled to `false` and set the address in `postgresServer` below", "type": "boolean" }, "primary": { @@ -791,9 +1152,11 @@ "type": "object", "properties": { "enabled": { + "description": "Default is true for K8s. Enabled needs to false for OpenShift restricted SCC and true for anyuid SCC", "type": "boolean" }, "runAsUser": { + "description": "runAsUser specification below is not applied if enabled=false. enabled=false is the required setting for OpenShift \"restricted SCC\" to work successfully.", "type": "integer" } } @@ -816,9 +1179,11 @@ "type": "object", "properties": { "enabled": { + "description": "Default is true for K8s. Enabled needs to false for OpenShift restricted SCC and true for anyuid SCC", "type": "boolean" }, "fsGroup": { + "description": "fsGroup specification below is not applied if enabled=false. enabled=false is the required setting for OpenShift \"restricted SCC\" to work successfully.", "type": "integer" } } @@ -855,6 +1220,7 @@ "type": "object", "properties": { "containerSecurityContext": { + "description": "if using restricted SCC set runAsUser: \"auto\" and if running under anyuid SCC - runAsUser needs to match the line above", "type": "object", "properties": { "runAsUser": { @@ -870,6 +1236,7 @@ } }, "redis": { + "description": "For more advance options check the bitnami chart documentation: https://github.com/bitnami/charts/tree/main/bitnami/redis", "type": "object", "properties": { "architecture": { @@ -890,6 +1257,7 @@ } }, "enabled": { + "description": "To use an external instance, switch enabled to `false`` and set the address in `redisServer` below", "type": "boolean" }, "sentinel": { @@ -904,6 +1272,7 @@ "type": "object", "properties": { "enabled": { + "description": "If TLS is enabled, the Redis broker will use the redis:// and optionally mount the certificates from an existing secret.", "type": "boolean" } } @@ -911,47 +1280,49 @@ } }, "redisParams": { + "description": "Parameters attached to the redis connection string, defaults to \"ssl_cert_reqs=optional\" if `redis.tls.enabled`", "type": "string" }, "redisServer": { + "description": "To use an external Redis instance, set `redis.enabled` to false and set the address here:", "type": [ "string", "null" ] }, - "repositoryPrefix": { - "type": "string" - }, "revisionHistoryLimit": { + "description": "Allow overriding of revisionHistoryLimit across all deployments.", "type": "integer" }, "secrets": { "type": "object", "properties": { "annotations": { + "description": "Add annotations for secret resources", "type": "object" } } }, "securityContext": { + "description": "Security context settings", "type": "object", "properties": { - "djangoSecurityContext": { + "containerSecurityContext": { "type": "object", "properties": { - "runAsUser": { - "type": "integer" + "runAsNonRoot": { + "type": "boolean" } } }, "enabled": { "type": "boolean" }, - "nginxSecurityContext": { + "podSecurityContext": { "type": "object", "properties": { - "runAsUser": { - "type": "integer" + "runAsNonRoot": { + "type": "boolean" } } } @@ -961,17 +1332,25 @@ "type": "object", "properties": { "annotations": { + "description": "Optional additional annotations to add to the DefectDojo's Service Account.", "type": "object" }, "create": { + "description": "Specifies whether a service account should be created.", "type": "boolean" }, "labels": { + "description": "Optional additional labels to add to the DefectDojo's Service Account.", "type": "object" + }, + "name": { + "description": "The name of the service account to use. If not set and create is true, a name is generated using the fullname template", + "type": "string" } } }, - "tag": { + "siteUrl": { + "description": "The full URL to your defectdojo instance, depends on the domain where DD is deployed, it also affects links in Jira. Use syntax: `siteUrl: 'https://\u003cyourdomain\u003e'`", "type": "string" }, "tests": { @@ -983,6 +1362,24 @@ "automountServiceAccountToken": { "type": "boolean" }, + "image": { + "description": "If empty, uses values from images.django.image", + "type": "object", + "properties": { + "digest": { + "type": "string" + }, + "registry": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "tag": { + "type": "string" + } + } + }, "resources": { "type": "object", "properties": { @@ -1015,6 +1412,7 @@ } }, "trackConfig": { + "description": "Track configuration (trackConfig): will automatically respin application pods in case of config changes detection can be: 1. disabled (default) 2. enabled, enables tracking configuration changes based on SHA256", "type": "string" } } diff --git a/helm/defectdojo/values.yaml b/helm/defectdojo/values.yaml index dd47f65eea4..419fe3fe743 100644 --- a/helm/defectdojo/values.yaml +++ b/helm/defectdojo/values.yaml @@ -1,35 +1,70 @@ --- -# Global settings -# create defectdojo specific secret +# -- Security context settings +securityContext: + enabled: true + containerSecurityContext: + runAsNonRoot: true + podSecurityContext: + runAsNonRoot: true + +# -- create defectdojo specific secret createSecret: false -# create redis secret in defectdojo chart, outside of redis chart +# -- create redis secret in defectdojo chart, outside of redis chart createRedisSecret: false -# create postgresql secret in defectdojo chart, outside of postgresql chart +# -- create postgresql secret in defectdojo chart, outside of postgresql chart createPostgresqlSecret: false -# Track configuration (trackConfig): will automatically respin application pods in case of config changes detection +# -- Track configuration (trackConfig): will automatically respin application pods in case of config changes detection # can be: -# - disabled, default -# - enabled, enables tracking configuration changes based on SHA256 +# 1. disabled (default) +# 2. enabled, enables tracking configuration changes based on SHA256 trackConfig: disabled -# Avoid using pre-install hooks, which might cause issues with ArgoCD +# -- Avoid using pre-install hooks, which might cause issues with ArgoCD disableHooks: false +# -- Annotations globally added to all resources +extraAnnotations: {} +# -- Labels globally added to all resources extraLabels: {} -# Add extra labels for k8s -# Enables application network policy +images: + django: + image: + registry: "" + repository: defectdojo/defectdojo-django + # -- If empty, use appVersion. + # Another possible values are: latest, X.X.X, X.X.X-debian, X.X.X-alpine (where X.X.X is version of DD). + # For dev builds (only for testing purposes): nightly-dev, nightly-dev-debian, nightly-dev-alpine. + # To see all, check https://hub.docker.com/r/defectdojo/defectdojo-django/tags. + tag: "" + # -- Prefix "sha@" is expected in this place + digest: "" + nginx: + image: + registry: "" + repository: defectdojo/defectdojo-nginx + # -- If empty, use appVersion. + # Another possible values are: latest, X.X.X, X.X.X-alpine (where X.X.X is version of DD). + # For dev builds (only for testing purposes): nightly-dev, nightly-dev-alpine. + # To see all, check https://hub.docker.com/r/defectdojo/defectdojo-nginx/tags. + tag: "" + # -- Prefix "sha@" is expected in this place + digest: "" + +# -- Enables application network policy # For more info follow https://kubernetes.io/docs/concepts/services-networking/network-policies/ networkPolicy: enabled: false - # if additional labels need to be allowed (e.g. prometheus scraper) - ingressExtend: [] + # -- if additional labels need to be allowed (e.g. prometheus scraper) + # ``` # ingressExtend: # - podSelector: # matchLabels: # app.kubernetes.io/instance: defectdojo-prometheus - # For more detailed configuration with ports and peers. It will ignore ingressExtend - ingress: [] + # ``` + ingressExtend: [] + # -- For more detailed configuration with ports and peers. It will ignore ingressExtend + # ``` # ingress: # - from: # - podSelector: @@ -41,7 +76,10 @@ networkPolicy: # ports: # - protocol: TCP # port: 8443 - egress: [] + # ``` + ingress: [] + # -- + # ``` # egress: # - to: # - ipBlock: @@ -49,61 +87,68 @@ networkPolicy: # ports: # - protocol: TCP # port: 443 + # ``` + egress: [] annotations: {} -# Primary hostname of instance +# -- Primary hostname of instance host: defectdojo.default.minikube.local -# The full URL to your defectdojo instance, depends on the domain where DD is deployed, it also affects links in Jira -# siteUrl: 'https://' +# -- The full URL to your defectdojo instance, depends on the domain where DD is deployed, it also affects links in Jira. +# Use syntax: `siteUrl: 'https://'` +siteUrl: "" -# optional list of alternative hostnames to use that gets appended to +# -- optional list of alternative hostnames to use that gets appended to # DD_ALLOWED_HOSTS. This is necessary when your local hostname does not match # the global hostname. -# alternativeHosts: +alternativeHosts: [] # - defectdojo.example.com imagePullPolicy: Always -# Where to pull the defectDojo images from. Defaults to "defectdojo/*" repositories on hub.docker.com -repositoryPrefix: defectdojo -# When using a private registry, name of the secret that holds the registry secret (eg deploy token from gitlab-ci project) -# Create secrets as: kubectl create secret docker-registry defectdojoregistrykey --docker-username=registry_username --docker-password=registry_password --docker-server='https://index.docker.io/v1/' # @schema type:[string, null] +# -- When using a private registry, name of the secret that holds the registry secret (eg deploy token from gitlab-ci project) +# Create secrets as: kubectl create secret docker-registry defectdojoregistrykey --docker-username=registry_username --docker-password=registry_password --docker-server='https://index.docker.io/v1/' imagePullSecrets: ~ -tag: latest -# Additional labels to add to the pods: +# -- Additional labels to add to the pods: +# ``` # podLabels: # key: value +# ``` podLabels: {} -# Allow overriding of revisionHistoryLimit across all deployments. +# -- Allow overriding of revisionHistoryLimit across all deployments. revisionHistoryLimit: 10 -securityContext: - enabled: true - djangoSecurityContext: - # django dockerfile sets USER=1001 - runAsUser: 1001 - nginxSecurityContext: - # nginx dockerfile sets USER=1001 - runAsUser: 1001 - serviceAccount: - # Specifies whether a service account should be created. + # -- Specifies whether a service account should be created. create: true - # The name of the service account to use. + # -- The name of the service account to use. # If not set and create is true, a name is generated using the fullname template - # name: "" + name: "" - # Optional additional annotations to add to the DefectDojo's Service Account. + # -- Optional additional annotations to add to the DefectDojo's Service Account. annotations: {} - # Optional additional labels to add to the DefectDojo's Service Account. + # -- Optional additional labels to add to the DefectDojo's Service Account. labels: {} dbMigrationChecker: + # -- If empty, uses values from images.django.image + image: + registry: "" + repository: "" + tag: "" + digest: "" + # -- Enable/disable the DB migration checker. enabled: true + # -- Container security context for the DB migration checker. + containerSecurityContext: {} + # -- Additional environment variables for DB migration checker. + extraEnv: [] + # -- Array of additional volume mount points for DB migration checker. + extraVolumeMounts: [] + # -- Resource requests/limits for the DB migration checker. resources: requests: cpu: 100m @@ -114,6 +159,12 @@ dbMigrationChecker: tests: unitTests: + # -- If empty, uses values from images.django.image + image: + registry: "" + repository: "" + tag: "" + digest: "" automountServiceAccountToken: false resources: requests: @@ -135,52 +186,74 @@ admin: monitoring: enabled: false - # Add the nginx prometheus exporter sidecar prometheus: + # -- Add the nginx prometheus exporter sidecar enabled: false - image: nginx/nginx-prometheus-exporter:1.4.2 + image: + registry: "" + repository: nginx/nginx-prometheus-exporter + tag: "1.4.2" + digest: "" imagePullPolicy: IfNotPresent - -annotations: {} + # -- Optional: container security context for nginx prometheus exporter + containerSecurityContext: {} + # -- Optional: additional environment variables injected to the nginx prometheus exporter container + extraEnv: [] + # -- Array of additional volume mount points for the nginx prometheus exporter + extraVolumeMounts: [] + # -- Optional: add resource requests/limits for the nginx prometheus exporter container + resources: {} secrets: - # Add annotations for secret resources + # -- Add annotations for secret resources annotations: {} # Components celery: broker: redis logLevel: INFO - # Common annotations to worker and beat deployments and pods. + # -- Common annotations to worker and beat deployments and pods. annotations: {} beat: + # -- If empty, uses values from images.django.image + image: + registry: "" + repository: "" + tag: "" + digest: "" automountServiceAccountToken: false - # Annotations for the Celery beat deployment. + # -- Annotations for the Celery beat deployment. annotations: {} affinity: {} - # Additional environment variables injected to Celery beat containers. + # -- Container security context for the Celery beat containers. + containerSecurityContext: {} + # -- Additional environment variables injected to Celery beat containers. extraEnv: [] - # A list of additional initContainers to run before celery beat containers. + # -- A list of additional initContainers to run before celery beat containers. extraInitContainers: [] - # Array of additional volume mount points for the celery beat containers. + # -- Array of additional volume mount points for the celery beat containers. extraVolumeMounts: [] - # A list of extra volumes to mount + # -- A list of extra volumes to mount # @type: array extraVolumes: [] - # Enable liveness probe for Celery beat container. + # -- Enable liveness probe for Celery beat container. + # ``` + # exec: + # command: + # - bash + # - -c + # - celery -A dojo inspect ping -t 5 + # initialDelaySeconds: 30 + # periodSeconds: 60 + # timeoutSeconds: 10 + # ``` livenessProbe: {} - # exec: - # command: - # - bash - # - -c - # - celery -A dojo inspect ping -t 5 - # initialDelaySeconds: 30 - # periodSeconds: 60 - # timeoutSeconds: 10 nodeSelector: {} - # Annotations for the Celery beat pods. + # -- Annotations for the Celery beat pods. podAnnotations: {} - # Enable readiness probe for Celery beat container. + # -- Pod security context for the Celery beat pods. + podSecurityContext: {} + # -- Enable readiness probe for Celery beat container. readinessProbe: {} replicas: 1 resources: @@ -190,37 +263,49 @@ celery: limits: cpu: 2000m memory: 256Mi - # Enable startup probe for Celery beat container. + # -- Enable startup probe for Celery beat container. startupProbe: {} tolerations: [] worker: + # -- If empty, uses values from images.django.image + image: + registry: "" + repository: "" + tag: "" + digest: "" automountServiceAccountToken: false - # Annotations for the Celery worker deployment. + # -- Annotations for the Celery worker deployment. annotations: {} affinity: {} - # Additional environment variables injected to Celery worker containers. + # -- Container security context for the Celery worker containers. + containerSecurityContext: {} + # -- Additional environment variables injected to Celery worker containers. extraEnv: [] - # A list of additional initContainers to run before celery worker containers. + # -- A list of additional initContainers to run before celery worker containers. extraInitContainers: [] - # Array of additional volume mount points for the celery worker containers. + # -- Array of additional volume mount points for the celery worker containers. extraVolumeMounts: [] - # A list of extra volumes to mount. + # -- A list of extra volumes to mount. # @type: array extraVolumes: [] - # Enable liveness probe for Celery worker containers. + # -- Enable liveness probe for Celery worker containers. + # ``` + # exec: + # command: + # - bash + # - -c + # - celery -A dojo inspect ping -t 5 + # initialDelaySeconds: 30 + # periodSeconds: 60 + # timeoutSeconds: 10 + # ``` livenessProbe: {} - # exec: - # command: - # - bash - # - -c - # - celery -A dojo inspect ping -t 5 - # initialDelaySeconds: 30 - # periodSeconds: 60 - # timeoutSeconds: 10 nodeSelector: {} - # Annotations for the Celery beat pods. + # -- Annotations for the Celery beat pods. podAnnotations: {} - # Enable readiness probe for Celery worker container. + # -- Pod security context for the Celery worker pods. + podSecurityContext: {} + # -- Enable readiness probe for Celery worker container. readinessProbe: {} replicas: 1 resources: @@ -230,18 +315,17 @@ celery: limits: cpu: 2000m memory: 512Mi - # Enable startup probe for Celery worker container. + # -- Enable startup probe for Celery worker container. startupProbe: {} tolerations: [] appSettings: - poolType: solo - # Performance improved celery worker config when needing to deal with a lot of findings (e.g deduplication ops) - # Comment out the "solo" line, and uncomment the following lines. + # -- Performance improved celery worker config when needing to deal with a lot of findings (e.g deduplication ops) # poolType: prefork # autoscaleMin: 2 # autoscaleMax: 8 # concurrency: 8 # prefetchMultiplier: 128 + poolType: solo django: automountServiceAccountToken: false @@ -250,19 +334,32 @@ django: annotations: {} type: "" affinity: {} + # -- Pod security context for the Django pods. + podSecurityContext: + fsGroup: 1001 ingress: enabled: true ingressClassName: "" activateTLS: true secretName: defectdojo-tls + # -- Restricts the type of ingress controller that can interact with our chart (nginx, traefik, ...) + # `kubernetes.io/ingress.class: nginx` + # Depending on the size and complexity of your scans, you might want to increase the default ingress timeouts if you see repeated 504 Gateway Timeouts + # `nginx.ingress.kubernetes.io/proxy-read-timeout: "1800"` + # `nginx.ingress.kubernetes.io/proxy-send-timeout: "1800"` annotations: {} - # Restricts the type of ingress controller that can interact with our chart (nginx, traefik, ...) - # kubernetes.io/ingress.class: nginx - # Depending on the size and complexity of your scans, you might want to increase the default ingress timeouts if you see repeated 504 Gateway Timeouts - # nginx.ingress.kubernetes.io/proxy-read-timeout: "1800" - # nginx.ingress.kubernetes.io/proxy-send-timeout: "1800" nginx: - # To extra environment variables to the nginx container, you can use extraEnv. For example: + # -- If empty, uses values from images.nginx.image + image: + registry: "" + repository: "" + tag: "" + digest: "" + # -- Container security context for the nginx containers. + containerSecurityContext: + # -- nginx dockerfile sets USER=1001 + runAsUser: 1001 + # -- To extra environment variables to the nginx container, you can use extraEnv. For example: # extraEnv: # - name: FOO # valueFrom: @@ -270,7 +367,7 @@ django: # name: foo # key: bar extraEnv: [] - # Array of additional volume mount points for nginx containers. + # -- Array of additional volume mount points for nginx containers. extraVolumeMounts: [] tls: enabled: false @@ -287,7 +384,16 @@ django: strategy: {} tolerations: [] uwsgi: - # To add (or override) extra variables which need to be pulled from another configMap, you can + # -- If empty, uses values from images.django.image + image: + registry: "" + repository: "" + tag: "" + digest: "" + containerSecurityContext: + # -- django dockerfile sets USER=1001 + runAsUser: 1001 + # -- To add (or override) extra variables which need to be pulled from another configMap, you can # use extraEnv. For example: # extraEnv: # - name: DD_DATABASE_HOST @@ -296,10 +402,10 @@ django: # name: my-other-postgres-configmap # key: cluster_endpoint extraEnv: [] - # Array of additional volume mount points for uwsgi containers. + # -- Array of additional volume mount points for uwsgi containers. extraVolumeMounts: [] livenessProbe: - # Enable liveness checks on uwsgi container. + # -- Enable liveness checks on uwsgi container. enabled: true failureThreshold: 6 initialDelaySeconds: 0 @@ -307,7 +413,7 @@ django: successThreshold: 1 timeoutSeconds: 5 readinessProbe: - # Enable readiness checks on uwsgi container. + # -- Enable readiness checks on uwsgi container. enabled: true failureThreshold: 6 initialDelaySeconds: 0 @@ -315,7 +421,7 @@ django: successThreshold: 1 timeoutSeconds: 5 startupProbe: - # Enable startup checks on uwsgi container. + # -- Enable startup checks on uwsgi container. enabled: true failureThreshold: 30 initialDelaySeconds: 0 @@ -332,10 +438,13 @@ django: appSettings: processes: 4 threads: 4 - maxFd: 0 # 102400 # Use this value to set the maximum number of file descriptors. If set to 0 will be detected by uwsgi - enableDebug: false # this also requires DD_DEBUG to be set to True + # -- Use this value to set the maximum number of file descriptors. If set to 0 will be detected by uwsgi + # e.g. 102400 + maxFd: 0 + # -- this also requires DD_DEBUG to be set to True + enableDebug: false certificates: - # includes additional CA certificate as volume, it refrences REQUESTS_CA_BUNDLE env varible + # -- includes additional CA certificate as volume, it refrences REQUESTS_CA_BUNDLE env varible # to create configMap `kubectl create cm defectdojo-ca-certs --from-file=ca.crt` # NOTE: it reflects REQUESTS_CA_BUNDLE for celery workers, beats as well enabled: false @@ -343,27 +452,32 @@ django: certMountPath: /certs/ certFileName: ca.crt - # A list of additional initContainers to run before the uwsgi and nginx containers. + # -- Additional environment variables injected to all Django containers and initContainers. + extraEnv: [] + # -- A list of additional initContainers to run before the uwsgi and nginx containers. extraInitContainers: [] - # A list of extra volumes to mount. + # -- Array of additional volume mount points common to all containers and initContainers. + extraVolumeMounts: [] + # -- A list of extra volumes to mount. extraVolumes: [] - # This feature needs more preparation before can be enabled, please visit KUBERNETES.md#media-persistent-volume + # -- This feature needs more preparation before can be enabled, please visit KUBERNETES.md#media-persistent-volume mediaPersistentVolume: enabled: true fsGroup: 1001 - # any name + # -- any name name: media - # could be emptyDir (not for production) or pvc + # -- could be emptyDir (not for production) or pvc type: emptyDir - # in case if pvc specified, should point to the already existing pvc + # -- in case if pvc specified, should point to the already existing pvc persistentVolumeClaim: - # set to true to create a new pvc and if django.mediaPersistentVolume.type is set to pvc + # -- set to true to create a new pvc and if django.mediaPersistentVolume.type is set to pvc create: false name: "" size: 5Gi + # -- check KUBERNETES.md doc first for option to choose accessModes: - - ReadWriteMany # check KUBERNETES.md doc first for option to choose + - ReadWriteMany storageClassName: "" initializer: @@ -372,10 +486,17 @@ initializer: jobAnnotations: {} annotations: {} labels: {} - keepSeconds: 60 # A positive integer will keep this Job and Pod deployed for the specified number of seconds, after which they will be removed. For all other values, the Job and Pod will remain deployed. + # -- A positive integer will keep this Job and Pod deployed for the specified number of seconds, after which they will be removed. For all other values, the Job and Pod will remain deployed. + keepSeconds: 60 affinity: {} nodeSelector: {} tolerations: [] + # -- If empty, uses values from images.django.image + image: + registry: "" + repository: "" + tag: "" + digest: "" resources: requests: cpu: 100m @@ -383,21 +504,25 @@ initializer: limits: cpu: 2000m memory: 512Mi - # Additional environment variables injected to the initializer job pods. + # -- Container security context for the initializer Job container + containerSecurityContext: {} + # -- Additional environment variables injected to the initializer job pods. extraEnv: [] - # Array of additional volume mount points for the initializer job (init)containers. + # -- Array of additional volume mount points for the initializer job (init)containers. extraVolumeMounts: [] - # A list of extra volumes to attach to the initializer job pods. + # -- A list of extra volumes to attach to the initializer job pods. extraVolumes: [] + # -- Pod security context for the initializer Job + podSecurityContext: {} - # staticName defines whether name of the job will be the same (e.g., "defectdojo-initializer") + # -- staticName defines whether name of the job will be the same (e.g., "defectdojo-initializer") # or different every time - generated based on current time (e.g., "defectdojo-initializer-2024-11-11-18-57") # This might be handy for ArgoCD deployments staticName: false -# For more advance options check the bitnami chart documentation: https://github.com/bitnami/charts/tree/main/bitnami/postgresql +# -- For more advance options check the bitnami chart documentation: https://github.com/bitnami/charts/tree/main/bitnami/postgresql postgresql: - # To use an external instance, switch enabled to `false` and set the address in `postgresServer` below + # -- To use an external instance, switch enabled to `false` and set the address in `postgresServer` below enabled: true auth: username: defectdojo @@ -417,59 +542,67 @@ postgresql: ports: postgresql: 5432 podSecurityContext: - # Default is true for K8s. Enabled needs to false for OpenShift restricted SCC and true for anyuid SCC + # -- Default is true for K8s. Enabled needs to false for OpenShift restricted SCC and true for anyuid SCC enabled: true - # fsGroup specification below is not applied if enabled=false. enabled=false is the required setting for OpenShift "restricted SCC" to work successfully. + # -- fsGroup specification below is not applied if enabled=false. enabled=false is the required setting for OpenShift "restricted SCC" to work successfully. fsGroup: 1001 containerSecurityContext: - # Default is true for K8s. Enabled needs to false for OpenShift restricted SCC and true for anyuid SCC + # -- Default is true for K8s. Enabled needs to false for OpenShift restricted SCC and true for anyuid SCC enabled: true - # runAsUser specification below is not applied if enabled=false. enabled=false is the required setting for OpenShift "restricted SCC" to work successfully. + # -- runAsUser specification below is not applied if enabled=false. enabled=false is the required setting for OpenShift "restricted SCC" to work successfully. runAsUser: 1001 affinity: {} nodeSelector: {} volumePermissions: enabled: false - # if using restricted SCC set runAsUser: "auto" and if running under anyuid SCC - runAsUser needs to match the line above + # -- if using restricted SCC set runAsUser: "auto" and if running under anyuid SCC - runAsUser needs to match the line above containerSecurityContext: runAsUser: 1001 shmVolume: chmod: enabled: false -# Google CloudSQL support in GKE via gce-proxy +# -- Google CloudSQL support in GKE via gce-proxy cloudsql: - # To use CloudSQL in GKE set 'enable: true' + # -- To use CloudSQL in GKE set 'enable: true' enabled: false - # By default, the proxy has verbose logging. Set this to false to make it less verbose + # -- By default, the proxy has verbose logging. Set this to false to make it less verbose verbose: true + # -- set repo and image tag of gce-proxy image: - # set repo and image tag of gce-proxy repository: gcr.io/cloudsql-docker/gce-proxy tag: 1.37.9 pullPolicy: IfNotPresent - # set CloudSQL instance: 'project:zone:instancename' + # -- set CloudSQL instance: 'project:zone:instancename' instance: "" - # use IAM database authentication + # -- use IAM database authentication enable_iam_login: false - # whether to use a private IP to connect to the database + # -- whether to use a private IP to connect to the database use_private_ip: false + # -- Optional: security context for the CloudSQL proxy container. + containerSecurityContext: {} + # -- Additional environment variables for the CloudSQL proxy container. + extraEnv: [] + # -- Array of additional volume mount points for the CloudSQL proxy container + extraVolumeMounts: [] + # -- Optional: add resource requests/limits for the CloudSQL proxy container. + resources: {} -# Settings to make running the chart on GKE simpler +# -- Settings to make running the chart on GKE simpler gke: - # Set to true to configure the Ingress to use the GKE provided ingress controller + # -- Set to true to configure the Ingress to use the GKE provided ingress controller useGKEIngress: false - # Set to true to have GKE automatically provision a TLS certificate for the host specified + # -- Set to true to have GKE automatically provision a TLS certificate for the host specified # Requires useGKEIngress to be set to true # When using this option, be sure to set django.ingress.activateTLS to false useManagedCertificate: false - # Workload Identity allows the K8s service account to assume the IAM access of a GCP service account to interact with other GCP services + # -- Workload Identity allows the K8s service account to assume the IAM access of a GCP service account to interact with other GCP services # Only works with serviceAccount.create = true workloadIdentityEmail: "" -# For more advance options check the bitnami chart documentation: https://github.com/bitnami/charts/tree/main/bitnami/redis +# -- For more advance options check the bitnami chart documentation: https://github.com/bitnami/charts/tree/main/bitnami/redis redis: - # To use an external instance, switch enabled to `false`` and set the address in `redisServer` below + # -- To use an external instance, switch enabled to `false`` and set the address in `redisServer` below enabled: true auth: existingSecret: defectdojo-redis-specific @@ -484,41 +617,47 @@ redis: # Sentinel configuration parameters sentinel: enabled: false - # If TLS is enabled, the Redis broker will use the redis:// and optionally mount the certificates - # from an existing secret. tls: + # -- If TLS is enabled, the Redis broker will use the redis:// and optionally mount the certificates + # from an existing secret. enabled: false # existingSecret: redis-tls # certFilename: tls.crt # certKeyFilename: tls.key # certCAFilename: ca.crt -# To add extra variables not predefined by helm config it is possible to define in extraConfigs block, e.g. below: +# -- To add extra variables not predefined by helm config it is possible to define in extraConfigs block, e.g. below: # NOTE Do not store any kind of sensitive information inside of it +# ``` +# DD_SOCIAL_AUTH_AUTH0_OAUTH2_ENABLED: 'true' +# DD_SOCIAL_AUTH_AUTH0_KEY: 'dev' +# DD_SOCIAL_AUTH_AUTH0_DOMAIN: 'xxxxx' +# ``` extraConfigs: {} -# DD_SOCIAL_AUTH_AUTH0_OAUTH2_ENABLED: 'true' -# DD_SOCIAL_AUTH_AUTH0_KEY: 'dev' -# DD_SOCIAL_AUTH_AUTH0_DOMAIN: 'xxxxx' -# Extra secrets can be created inside of extraSecrets block: +# -- Extra secrets can be created inside of extraSecrets block: # NOTE This is just an exmaple, do not store sensitive data in plain text form, better inject it during the deployment/upgrade by --set extraSecrets.secret=someSecret +# ``` +# DD_SOCIAL_AUTH_AUTH0_SECRET: 'xxx' +# ``` extraSecrets: {} -# DD_SOCIAL_AUTH_AUTH0_SECRET: 'xxx' -# To add (or override) extra variables which need to be pulled from another configMap, you can +# -- To add (or override) extra variables which need to be pulled from another configMap, you can # use extraEnv. For example: -extraEnv: [] +# ``` # - name: DD_DATABASE_HOST # valueFrom: # configMapKeyRef: # name: my-other-postgres-configmap # key: cluster_endpoint +# ``` +extraEnv: [] -# To add code snippet which would extend setting functionality, you might add it here +# -- To add code snippet which would extend setting functionality, you might add it here # It will be stored as ConfigMap and mounted `dojo/settings/local_settings.py`. # For more see: https://documentation.defectdojo.com/getting_started/configuration/ -localsettingspy: "" # For example: +# ``` # localsettingspy: | # INSTALLED_APPS += ( # 'debug_toolbar', @@ -526,16 +665,19 @@ localsettingspy: "" # MIDDLEWARE = [ # 'debug_toolbar.middleware.DebugToolbarMiddleware', # ] + MIDDLEWARE +# ``` +localsettingspy: "" + # # External database support. # -# To use an external Redis instance, set `redis.enabled` to false and set the address here: # @schema type:[string, null] +# -- To use an external Redis instance, set `redis.enabled` to false and set the address here: redisServer: ~ -# Parameters attached to the redis connection string, defaults to "ssl_cert_reqs=optional" if `redis.tls.enabled` +# -- Parameters attached to the redis connection string, defaults to "ssl_cert_reqs=optional" if `redis.tls.enabled` redisParams: "" # -# To use an external PostgreSQL instance (like CloudSQL), set `postgresql.enabled` to false, -# set items in `postgresql.auth` part for authentication, and set the address here: # @schema type:[string, null] +# -- To use an external PostgreSQL instance (like CloudSQL), set `postgresql.enabled` to false, +# set items in `postgresql.auth` part for authentication, and set the address here: postgresServer: ~ diff --git a/readme-docs/CONTRIBUTING.md b/readme-docs/CONTRIBUTING.md index e440f498c07..27f8093355e 100644 --- a/readme-docs/CONTRIBUTING.md +++ b/readme-docs/CONTRIBUTING.md @@ -56,7 +56,7 @@ Please use [these test scripts](../tests) to test your changes. These are the sc For changes that require additional settings, you can now use local_settings.py file. See the logging section below for more information. ## Python3 Version -For compatibility reasons, the code in dev branch should be python3.12 compliant. +For compatibility reasons, the code in dev branch should be python3.13 compliant. ## Database migrations When changes are made to the database model, a database migration is needed. This migration can be generated using something like @@ -82,7 +82,7 @@ DefectDojo. 0. Pull requests should be submitted to the `dev` or `bugfix` branch. -0. In dev branch, the code should be python 3.12 compliant. +0. In dev branch, the code should be python 3.13 compliant. [dojo_settings]: /dojo/settings/settings.dist.py "DefectDojo settings file" [pep8]: https://www.python.org/dev/peps/pep-0008/ "PEP8" diff --git a/requirements-lint.txt b/requirements-lint.txt index 6a0ba23ce92..59e562b9654 100644 --- a/requirements-lint.txt +++ b/requirements-lint.txt @@ -1 +1 @@ -ruff==0.13.2 \ No newline at end of file +ruff==0.14.1 diff --git a/requirements.txt b/requirements.txt index 5063cff6e11..f45a0ea031e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,11 +6,11 @@ celery==5.5.3 defusedxml==0.7.1 django_celery_results==2.6.0 django-auditlog==3.2.1 -django-pghistory==3.7.0 +django-pghistory==3.8.3 django-dbbackup==5.0.0 django-environ==0.12.0 django-filter==25.1 -django-imagekit==5.0.0 +django-imagekit==6.0.0 django-multiselectfield==1.0.1 django-polymorphic==4.1.0 django-crispy-forms==2.4 @@ -19,30 +19,31 @@ django-slack==5.19.0 django-watson==1.6.3 django-prometheus==2.4.1 Django==5.1.13 +django-single-session==0.2.0 djangorestframework==3.16.1 html2text==2025.4.15 -humanize==4.13.0 -jira==3.8.0 +humanize==4.14.0 +jira==3.10.5 PyGithub==2.8.1 lxml==6.0.2 Markdown==3.9 openpyxl==3.1.5 -Pillow==11.3.0 # required by django-imagekit +Pillow==12.0.0 # required by django-imagekit psycopg[c]==3.2.10 -cryptography==46.0.2 +cryptography==46.0.3 python-dateutil==2.9.0.post0 redis==6.4.0 requests==2.32.5 -sqlalchemy==2.0.43 # Required by Celery broker transport +sqlalchemy==2.0.44 # Required by Celery broker transport urllib3==2.5.0 -uWSGI==2.0.30 +uWSGI==2.0.31 vobject==0.9.9 whitenoise==5.2.0 titlecase==2.4.1 -social-auth-app-django==5.4.3 -social-auth-core==4.7.0 +social-auth-app-django==5.6.0 +social-auth-core==4.8.1 gitpython==3.1.45 -python-gitlab==6.4.0 +python-gitlab==6.5.0 cpe==1.3.1 packageurl-python==0.17.5 django-crum==0.7.9 @@ -61,9 +62,9 @@ django-ratelimit==4.1.0 argon2-cffi==25.1.0 blackduck==1.1.3 pycurl==7.45.7 # Required for Celery Broker AWS (SQS) support -boto3==1.40.44 # Required for Celery Broker AWS (SQS) support +boto3==1.40.55 # Required for Celery Broker AWS (SQS) support netaddr==1.3.0 -vulners==2.3.7 +vulners==3.1.1 fontawesomefree==6.6.0 PyYAML==6.0.3 pyopenssl==25.3.0 diff --git a/ruff.toml b/ruff.toml index 598517dd435..670a95b7b99 100644 --- a/ruff.toml +++ b/ruff.toml @@ -1,5 +1,5 @@ -# Always generate Python 3.12-compatible code. -target-version = "py312" +# Always generate Python 3.13-compatible code. +target-version = "py313" # Same as Black. line-length = 120 @@ -36,9 +36,9 @@ select = [ "FAST", "YTT", "ASYNC", - "S1", "S2", "S302", "S303", "S304", "S305", "S306", "S307", "S31", "S321", "S323", "S324", "S401", "S402", "S406", "S407", "S408", "S409", "S41", "S5", "S601", "S602", "S604", "S605", "S606", "S607", "S609", "S61", "S7", + "S1", "S2", "S302", "S303", "S304", "S305", "S306", "S307", "S31", "S32", "S401", "S402", "S406", "S407", "S408", "S409", "S41", "S5", "S601", "S602", "S604", "S605", "S606", "S607", "S609", "S61", "S7", "FBT", - "B00", "B010", "B011", "B012", "B013", "B014", "B015", "B016", "B017", "B018", "B019", "B020", "B021", "B022", "B023", "B025", "B028", "B029", "B03", "B901", "B903", "B905", "B911", + "B00", "B01", "B020", "B021", "B022", "B023", "B025", "B027", "B028", "B029", "B03", "B901", "B903", "B905", "B911", "A", "COM", "C4", @@ -58,7 +58,7 @@ select = [ "PIE", "T20", "PYI", - "PT001", "PT002", "PT003", "PT006", "PT007", "PT008", "PT01", "PT020", "PT021", "PT022", "PT023", "PT024", "PT025", "PT026", "PT028", "PT029", "PT03", + "PT", "Q", "RSE", "RET", @@ -74,7 +74,7 @@ select = [ "C90", "NPY", "PD", - "N803", "N804", "N811", "N812", "N813", "N814", "N817", "N818", "N999", + "N803", "N804", "N805", "N811", "N812", "N813", "N814", "N817", "N818", "N999", "PERF1", "PERF2", "PERF401", "PERF403", "E", "W", @@ -82,7 +82,7 @@ select = [ "D2", "D3", "D402", "D403", "D405", "D406", "D407", "D408", "D409", "D410", "D411", "D412", "D413", "D414", "D416", "F", "PGH", - "PLC0", "PLC1", "PLC24", "PLC28", "PLC3", + "PLC", "PLE", "PLR01", "PLR02", "PLR04", "PLR0915", "PLR1711", "PLR1704", "PLR1714", "PLR1716", "PLR172", "PLR173", "PLR2044", "PLR5", "PLR6104", "PLR6201", "PLW", @@ -101,6 +101,8 @@ ignore = [ "FIX002", # TODOs need some love but we will probably not get of them "D211", # `one-blank-line-before-class` (D203) and `no-blank-line-before-class` (D211) are incompatible. "D212", # `multi-line-summary-first-line` (D212) and `multi-line-summary-second-line` (D213) are incompatible. + "PT009", # We are using a different style of tests (official Django tests), so it does not make sense to try to fix it + "PT027", # Same ^ ] # Allow autofix for all enabled rules (when `--fix`) is provided. diff --git a/unittests/scans/mobsfscan/many_findings.json b/unittests/scans/mobsf/many_findings.json similarity index 100% rename from unittests/scans/mobsfscan/many_findings.json rename to unittests/scans/mobsf/many_findings.json diff --git a/unittests/scans/mobsfscan/many_findings_cwe_lower.json b/unittests/scans/mobsf/many_findings_cwe_lower.json similarity index 100% rename from unittests/scans/mobsfscan/many_findings_cwe_lower.json rename to unittests/scans/mobsf/many_findings_cwe_lower.json diff --git a/unittests/scans/mobsfscan/no_findings.json b/unittests/scans/mobsf/no_findings.json similarity index 100% rename from unittests/scans/mobsfscan/no_findings.json rename to unittests/scans/mobsf/no_findings.json diff --git a/unittests/test_deduplication_logic.py b/unittests/test_deduplication_logic.py index 1be76d911ce..c9e8e26e53d 100644 --- a/unittests/test_deduplication_logic.py +++ b/unittests/test_deduplication_logic.py @@ -13,7 +13,7 @@ System_Settings, Test, User, - _copy_model_util, + copy_model_util, ) from .dojo_test_case import DojoTestCase @@ -1384,7 +1384,7 @@ def log_summary(self, product=None, engagement=None, test=None): def copy_and_reset_finding(self, find_id): org = Finding.objects.get(id=find_id) - new = _copy_model_util(org) + new = copy_model_util(org) new.duplicate = False new.duplicate_finding = None new.active = True @@ -1421,13 +1421,13 @@ def copy_and_reset_finding_add_endpoints(self, find_id, *, static=False, dynamic def copy_and_reset_test(self, test_id): org = Test.objects.get(id=test_id) - new = _copy_model_util(org) + new = copy_model_util(org) # return unsaved new finding and reloaded existing finding return new, Test.objects.get(id=test_id) def copy_and_reset_engagement(self, eng_id): org = Engagement.objects.get(id=eng_id) - new = _copy_model_util(org) + new = copy_model_util(org) # return unsaved new finding and reloaded existing finding return new, Engagement.objects.get(id=eng_id) diff --git a/unittests/test_duplication_loops.py b/unittests/test_duplication_loops.py index cc0d250774e..d85e52e1046 100644 --- a/unittests/test_duplication_loops.py +++ b/unittests/test_duplication_loops.py @@ -4,7 +4,7 @@ from django.test.utils import override_settings from dojo.management.commands.fix_loop_duplicates import fix_loop_duplicates -from dojo.models import Engagement, Finding, Product, User, _copy_model_util +from dojo.models import Engagement, Finding, Product, User, copy_model_util from dojo.utils import set_duplicate from .dojo_test_case import DojoTestCase @@ -27,19 +27,19 @@ def run(self, result=None): super().run(result) def setUp(self): - self.finding_a = _copy_model_util(Finding.objects.get(id=2), exclude_fields=["duplicate_finding"]) + self.finding_a = copy_model_util(Finding.objects.get(id=2), exclude_fields=["duplicate_finding"]) self.finding_a.title = "A: " + self.finding_a.title self.finding_a.duplicate = False self.finding_a.hash_code = None self.finding_a.save() - self.finding_b = _copy_model_util(Finding.objects.get(id=3), exclude_fields=["duplicate_finding"]) + self.finding_b = copy_model_util(Finding.objects.get(id=3), exclude_fields=["duplicate_finding"]) self.finding_b.title = "B: " + self.finding_b.title self.finding_b.duplicate = False self.finding_b.hash_code = None self.finding_b.save() - self.finding_c = _copy_model_util(Finding.objects.get(id=4), exclude_fields=["duplicate_finding"]) + self.finding_c = copy_model_util(Finding.objects.get(id=4), exclude_fields=["duplicate_finding"]) self.finding_c.pk = None self.finding_c.title = "C: " + self.finding_c.title self.finding_c.duplicate = False @@ -262,7 +262,7 @@ def test_loop_relations_for_three(self): # Another loop-test for 4 findings def test_loop_relations_for_four(self): - self.finding_d = _copy_model_util(Finding.objects.get(id=4), exclude_fields=["duplicate_finding"]) + self.finding_d = copy_model_util(Finding.objects.get(id=4), exclude_fields=["duplicate_finding"]) self.finding_d.duplicate = False self.finding_d.save() diff --git a/unittests/test_false_positive_history_logic.py b/unittests/test_false_positive_history_logic.py index 4a380383f7d..5d8f31a15de 100644 --- a/unittests/test_false_positive_history_logic.py +++ b/unittests/test_false_positive_history_logic.py @@ -12,7 +12,7 @@ System_Settings, Test, User, - _copy_model_util, + copy_model_util, ) from .dojo_test_case import DojoTestCase @@ -1719,7 +1719,7 @@ def log_summary(self, product=None, engagement=None, test=None): def copy_and_reset_finding(self, find_id): org = Finding.objects.get(id=find_id) - new = _copy_model_util(org) + new = copy_model_util(org) new.duplicate = False new.duplicate_finding = None new.false_p = False @@ -1730,19 +1730,19 @@ def copy_and_reset_finding(self, find_id): def copy_and_reset_test(self, test_id): org = Test.objects.get(id=test_id) - new = _copy_model_util(org) + new = copy_model_util(org) # return unsaved new test and reloaded existing test return new, Test.objects.get(id=test_id) def copy_and_reset_engagement(self, eng_id): org = Engagement.objects.get(id=eng_id) - new = _copy_model_util(org) + new = copy_model_util(org) # return unsaved new engagement and reloaded existing engagement return new, Engagement.objects.get(id=eng_id) def copy_and_reset_product(self, prod_id): org = Product.objects.get(id=prod_id) - new = _copy_model_util(org) + new = copy_model_util(org) new.name = f"{org.name} (Copy {datetime.now()})" # return unsaved new product and reloaded existing product return new, Product.objects.get(id=prod_id) diff --git a/unittests/test_rest_framework.py b/unittests/test_rest_framework.py index 4f5120792d4..f32350e2e86 100644 --- a/unittests/test_rest_framework.py +++ b/unittests/test_rest_framework.py @@ -34,7 +34,7 @@ from dojo.api_v2.mixins import DeletePreviewModelMixin from dojo.api_v2.prefetch import PrefetchListMixin, PrefetchRetrieveMixin -from dojo.api_v2.prefetch.utils import _get_prefetchable_fields +from dojo.api_v2.prefetch.utils import get_prefetchable_fields from dojo.api_v2.views import ( AnnouncementViewSet, AppAnalysisViewSet, @@ -416,7 +416,7 @@ def test_detail(self): @skipIfNotSubclass(PrefetchRetrieveMixin) def test_detail_prefetch(self): # print("=======================================================") - prefetchable_fields = [x[0] for x in _get_prefetchable_fields(self.viewset.serializer_class)] + prefetchable_fields = [x[0] for x in get_prefetchable_fields(self.viewset.serializer_class)] current_objects = self.client.get(self.url, format="json").data relative_url = self.url + "{}/".format(current_objects["results"][0]["id"]) @@ -508,7 +508,7 @@ def test_list(self): @skipIfNotSubclass(PrefetchListMixin) def test_list_prefetch(self): - prefetchable_fields = [x[0] for x in _get_prefetchable_fields(self.viewset.serializer_class)] + prefetchable_fields = [x[0] for x in get_prefetchable_fields(self.viewset.serializer_class)] response = self.client.get(self.url, data={ "prefetch": ",".join(prefetchable_fields), diff --git a/unittests/test_utils_deduplication_reopen.py b/unittests/test_utils_deduplication_reopen.py index 91ba2c49d12..a7e72ede118 100644 --- a/unittests/test_utils_deduplication_reopen.py +++ b/unittests/test_utils_deduplication_reopen.py @@ -2,7 +2,7 @@ import logging from dojo.management.commands.fix_loop_duplicates import fix_loop_duplicates -from dojo.models import Finding, _copy_model_util +from dojo.models import Finding, copy_model_util from dojo.utils import set_duplicate from .dojo_test_case import DojoTestCase @@ -14,7 +14,7 @@ class TestDuplicationReopen(DojoTestCase): fixtures = ["dojo_testdata.json"] def setUp(self): - self.finding_a = _copy_model_util(Finding.objects.get(id=2), exclude_fields=["duplicate_finding"]) + self.finding_a = copy_model_util(Finding.objects.get(id=2), exclude_fields=["duplicate_finding"]) self.finding_a.duplicate = False self.finding_a.mitigated = datetime.datetime(1970, 1, 1, tzinfo=datetime.UTC) self.finding_a.is_mitigated = True @@ -22,19 +22,19 @@ def setUp(self): self.finding_a.active = False self.finding_a.save() - self.finding_b = _copy_model_util(Finding.objects.get(id=3), exclude_fields=["duplicate_finding"]) + self.finding_b = copy_model_util(Finding.objects.get(id=3), exclude_fields=["duplicate_finding"]) self.finding_a.active = True self.finding_b.duplicate = False self.finding_b.save() - self.finding_c = _copy_model_util(Finding.objects.get(id=4), exclude_fields=["duplicate_finding"]) + self.finding_c = copy_model_util(Finding.objects.get(id=4), exclude_fields=["duplicate_finding"]) self.finding_c.duplicate = False self.finding_c.out_of_scope = True self.finding_c.active = False logger.debug("creating finding_c") self.finding_c.save() - self.finding_d = _copy_model_util(Finding.objects.get(id=5), exclude_fields=["duplicate_finding"]) + self.finding_d = copy_model_util(Finding.objects.get(id=5), exclude_fields=["duplicate_finding"]) self.finding_d.duplicate = False logger.debug("creating finding_d") self.finding_d.save() diff --git a/unittests/tools/test_mobsf_parser.py b/unittests/tools/test_mobsf_parser.py index 53ddbb8a3e6..88719e57d88 100644 --- a/unittests/tools/test_mobsf_parser.py +++ b/unittests/tools/test_mobsf_parser.py @@ -136,3 +136,162 @@ def test_parse_damnvulnrablebank(self): findings = parser.get_findings(testfile, test) testfile.close() self.assertEqual(80, len(findings)) + + def test_parse_no_findings(self): + with (get_unit_tests_scans_path("mobsf") / "no_findings.json").open(encoding="utf-8") as testfile: + parser = MobSFParser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(0, len(findings)) + + def test_parse_many_findings(self): + with (get_unit_tests_scans_path("mobsf") / "many_findings.json").open(encoding="utf-8") as testfile: + parser = MobSFParser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(8, len(findings)) + + with self.subTest(i=0): + finding = findings[0] + self.assertEqual("android_certificate_transparency", finding.title) + self.assertEqual("Low", finding.severity) + self.assertEqual(1, finding.nb_occurences) + self.assertIsNotNone(finding.description) + self.assertEqual(295, finding.cwe) + self.assertIsNotNone(finding.references) + + with self.subTest(i=1): + finding = findings[1] + self.assertEqual("android_kotlin_hardcoded", finding.title) + self.assertEqual("Medium", finding.severity) + self.assertEqual(1, finding.nb_occurences) + self.assertIsNotNone(finding.description) + self.assertEqual(798, finding.cwe) + self.assertIsNotNone(finding.references) + self.assertEqual("app/src/main/java/com/routes/domain/analytics/event/Signatures.kt", finding.file_path) + self.assertEqual(10, finding.line) + + with self.subTest(i=2): + finding = findings[2] + self.assertEqual("android_kotlin_hardcoded", finding.title) + self.assertEqual("Medium", finding.severity) + self.assertEqual(1, finding.nb_occurences) + self.assertIsNotNone(finding.description) + self.assertEqual(798, finding.cwe) + self.assertIsNotNone(finding.references) + self.assertEqual("app/src/main/java/com/routes/domain/analytics/event/Signatures2.kt", finding.file_path) + self.assertEqual(20, finding.line) + + with self.subTest(i=3): + finding = findings[3] + self.assertEqual("android_prevent_screenshot", finding.title) + self.assertEqual("Low", finding.severity) + self.assertEqual(1, finding.nb_occurences) + self.assertIsNotNone(finding.description) + self.assertEqual(200, finding.cwe) + self.assertIsNotNone(finding.references) + + with self.subTest(i=4): + finding = findings[4] + self.assertEqual("android_root_detection", finding.title) + self.assertEqual("Low", finding.severity) + self.assertEqual(1, finding.nb_occurences) + self.assertIsNotNone(finding.description) + self.assertEqual(919, finding.cwe) + self.assertIsNotNone(finding.references) + + with self.subTest(i=5): + finding = findings[5] + self.assertEqual("android_safetynet", finding.title) + self.assertEqual("Low", finding.severity) + self.assertEqual(1, finding.nb_occurences) + self.assertIsNotNone(finding.description) + self.assertEqual(353, finding.cwe) + self.assertIsNotNone(finding.references) + + with self.subTest(i=6): + finding = findings[6] + self.assertEqual("android_ssl_pinning", finding.title) + self.assertEqual("Low", finding.severity) + self.assertEqual(1, finding.nb_occurences) + self.assertIsNotNone(finding.description) + self.assertEqual(295, finding.cwe) + self.assertIsNotNone(finding.references) + + with self.subTest(i=7): + finding = findings[7] + self.assertEqual("android_tapjacking", finding.title) + self.assertEqual("Low", finding.severity) + self.assertEqual(1, finding.nb_occurences) + self.assertIsNotNone(finding.description) + self.assertEqual(200, finding.cwe) + self.assertIsNotNone(finding.references) + + def test_parse_many_findings_cwe_lower(self): + with (get_unit_tests_scans_path("mobsf") / "many_findings_cwe_lower.json").open(encoding="utf-8") as testfile: + parser = MobSFParser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(7, len(findings)) + + with self.subTest(i=0): + finding = findings[0] + self.assertEqual("android_certificate_transparency", finding.title) + self.assertEqual("Low", finding.severity) + self.assertEqual(1, finding.nb_occurences) + self.assertIsNotNone(finding.description) + self.assertEqual(295, finding.cwe) + self.assertIsNotNone(finding.references) + + with self.subTest(i=1): + finding = findings[1] + self.assertEqual("android_kotlin_hardcoded", finding.title) + self.assertEqual("Medium", finding.severity) + self.assertEqual(1, finding.nb_occurences) + self.assertIsNotNone(finding.description) + self.assertEqual(798, finding.cwe) + self.assertIsNotNone(finding.references) + self.assertEqual("app/src/main/java/com/routes/domain/analytics/event/Signatures.kt", finding.file_path) + self.assertEqual(10, finding.line) + + with self.subTest(i=2): + finding = findings[2] + self.assertEqual("android_prevent_screenshot", finding.title) + self.assertEqual("Low", finding.severity) + self.assertEqual(1, finding.nb_occurences) + self.assertIsNotNone(finding.description) + self.assertEqual(200, finding.cwe) + self.assertIsNotNone(finding.references) + + with self.subTest(i=3): + finding = findings[3] + self.assertEqual("android_root_detection", finding.title) + self.assertEqual("Low", finding.severity) + self.assertEqual(1, finding.nb_occurences) + self.assertIsNotNone(finding.description) + self.assertEqual(919, finding.cwe) + self.assertIsNotNone(finding.references) + + with self.subTest(i=4): + finding = findings[4] + self.assertEqual("android_safetynet", finding.title) + self.assertEqual("Low", finding.severity) + self.assertEqual(1, finding.nb_occurences) + self.assertIsNotNone(finding.description) + self.assertEqual(353, finding.cwe) + self.assertIsNotNone(finding.references) + + with self.subTest(i=5): + finding = findings[5] + self.assertEqual("android_ssl_pinning", finding.title) + self.assertEqual("Low", finding.severity) + self.assertEqual(1, finding.nb_occurences) + self.assertIsNotNone(finding.description) + self.assertEqual(295, finding.cwe) + self.assertIsNotNone(finding.references) + + with self.subTest(i=6): + finding = findings[6] + self.assertEqual("android_tapjacking", finding.title) + self.assertEqual("Low", finding.severity) + self.assertEqual(1, finding.nb_occurences) + self.assertIsNotNone(finding.description) + self.assertEqual(200, finding.cwe) + self.assertIsNotNone(finding.references) diff --git a/unittests/tools/test_mobsfscan_parser.py b/unittests/tools/test_mobsfscan_parser.py deleted file mode 100644 index cbb6245c227..00000000000 --- a/unittests/tools/test_mobsfscan_parser.py +++ /dev/null @@ -1,165 +0,0 @@ -from dojo.models import Test -from dojo.tools.mobsfscan.parser import MobsfscanParser -from unittests.dojo_test_case import DojoTestCase, get_unit_tests_scans_path - - -class TestMobsfscanParser(DojoTestCase): - - def test_parse_no_findings(self): - with (get_unit_tests_scans_path("mobsfscan") / "no_findings.json").open(encoding="utf-8") as testfile: - parser = MobsfscanParser() - findings = parser.get_findings(testfile, Test()) - self.assertEqual(0, len(findings)) - - def test_parse_many_findings(self): - with (get_unit_tests_scans_path("mobsfscan") / "many_findings.json").open(encoding="utf-8") as testfile: - parser = MobsfscanParser() - findings = parser.get_findings(testfile, Test()) - self.assertEqual(8, len(findings)) - - with self.subTest(i=0): - finding = findings[0] - self.assertEqual("android_certificate_transparency", finding.title) - self.assertEqual("Low", finding.severity) - self.assertEqual(1, finding.nb_occurences) - self.assertIsNotNone(finding.description) - self.assertEqual(295, finding.cwe) - self.assertIsNotNone(finding.references) - - with self.subTest(i=1): - finding = findings[1] - self.assertEqual("android_kotlin_hardcoded", finding.title) - self.assertEqual("Medium", finding.severity) - self.assertEqual(1, finding.nb_occurences) - self.assertIsNotNone(finding.description) - self.assertEqual(798, finding.cwe) - self.assertIsNotNone(finding.references) - self.assertEqual("app/src/main/java/com/routes/domain/analytics/event/Signatures.kt", finding.file_path) - self.assertEqual(10, finding.line) - - with self.subTest(i=2): - finding = findings[2] - self.assertEqual("android_kotlin_hardcoded", finding.title) - self.assertEqual("Medium", finding.severity) - self.assertEqual(1, finding.nb_occurences) - self.assertIsNotNone(finding.description) - self.assertEqual(798, finding.cwe) - self.assertIsNotNone(finding.references) - self.assertEqual("app/src/main/java/com/routes/domain/analytics/event/Signatures2.kt", finding.file_path) - self.assertEqual(20, finding.line) - - with self.subTest(i=3): - finding = findings[3] - self.assertEqual("android_prevent_screenshot", finding.title) - self.assertEqual("Low", finding.severity) - self.assertEqual(1, finding.nb_occurences) - self.assertIsNotNone(finding.description) - self.assertEqual(200, finding.cwe) - self.assertIsNotNone(finding.references) - - with self.subTest(i=4): - finding = findings[4] - self.assertEqual("android_root_detection", finding.title) - self.assertEqual("Low", finding.severity) - self.assertEqual(1, finding.nb_occurences) - self.assertIsNotNone(finding.description) - self.assertEqual(919, finding.cwe) - self.assertIsNotNone(finding.references) - - with self.subTest(i=5): - finding = findings[5] - self.assertEqual("android_safetynet", finding.title) - self.assertEqual("Low", finding.severity) - self.assertEqual(1, finding.nb_occurences) - self.assertIsNotNone(finding.description) - self.assertEqual(353, finding.cwe) - self.assertIsNotNone(finding.references) - - with self.subTest(i=6): - finding = findings[6] - self.assertEqual("android_ssl_pinning", finding.title) - self.assertEqual("Low", finding.severity) - self.assertEqual(1, finding.nb_occurences) - self.assertIsNotNone(finding.description) - self.assertEqual(295, finding.cwe) - self.assertIsNotNone(finding.references) - - with self.subTest(i=7): - finding = findings[7] - self.assertEqual("android_tapjacking", finding.title) - self.assertEqual("Low", finding.severity) - self.assertEqual(1, finding.nb_occurences) - self.assertIsNotNone(finding.description) - self.assertEqual(200, finding.cwe) - self.assertIsNotNone(finding.references) - - def test_parse_many_findings_cwe_lower(self): - with (get_unit_tests_scans_path("mobsfscan") / "many_findings_cwe_lower.json").open(encoding="utf-8") as testfile: - parser = MobsfscanParser() - findings = parser.get_findings(testfile, Test()) - self.assertEqual(7, len(findings)) - - with self.subTest(i=0): - finding = findings[0] - self.assertEqual("android_certificate_transparency", finding.title) - self.assertEqual("Low", finding.severity) - self.assertEqual(1, finding.nb_occurences) - self.assertIsNotNone(finding.description) - self.assertEqual(295, finding.cwe) - self.assertIsNotNone(finding.references) - - with self.subTest(i=1): - finding = findings[1] - self.assertEqual("android_kotlin_hardcoded", finding.title) - self.assertEqual("Medium", finding.severity) - self.assertEqual(1, finding.nb_occurences) - self.assertIsNotNone(finding.description) - self.assertEqual(798, finding.cwe) - self.assertIsNotNone(finding.references) - self.assertEqual("app/src/main/java/com/routes/domain/analytics/event/Signatures.kt", finding.file_path) - self.assertEqual(10, finding.line) - - with self.subTest(i=2): - finding = findings[2] - self.assertEqual("android_prevent_screenshot", finding.title) - self.assertEqual("Low", finding.severity) - self.assertEqual(1, finding.nb_occurences) - self.assertIsNotNone(finding.description) - self.assertEqual(200, finding.cwe) - self.assertIsNotNone(finding.references) - - with self.subTest(i=3): - finding = findings[3] - self.assertEqual("android_root_detection", finding.title) - self.assertEqual("Low", finding.severity) - self.assertEqual(1, finding.nb_occurences) - self.assertIsNotNone(finding.description) - self.assertEqual(919, finding.cwe) - self.assertIsNotNone(finding.references) - - with self.subTest(i=4): - finding = findings[4] - self.assertEqual("android_safetynet", finding.title) - self.assertEqual("Low", finding.severity) - self.assertEqual(1, finding.nb_occurences) - self.assertIsNotNone(finding.description) - self.assertEqual(353, finding.cwe) - self.assertIsNotNone(finding.references) - - with self.subTest(i=5): - finding = findings[5] - self.assertEqual("android_ssl_pinning", finding.title) - self.assertEqual("Low", finding.severity) - self.assertEqual(1, finding.nb_occurences) - self.assertIsNotNone(finding.description) - self.assertEqual(295, finding.cwe) - self.assertIsNotNone(finding.references) - - with self.subTest(i=6): - finding = findings[6] - self.assertEqual("android_tapjacking", finding.title) - self.assertEqual("Low", finding.severity) - self.assertEqual(1, finding.nb_occurences) - self.assertIsNotNone(finding.description) - self.assertEqual(200, finding.cwe) - self.assertIsNotNone(finding.references)