diff --git a/.github/workflows/test-helm-chart.yml b/.github/workflows/test-helm-chart.yml index c1dac77d12c..90249c6096f 100644 --- a/.github/workflows/test-helm-chart.yml +++ b/.github/workflows/test-helm-chart.yml @@ -1,200 +1,200 @@ -name: Lint Helm chart -on: - pull_request: - branches: - - master - - dev - - bugfix - - release/** - - hotfix/** - -jobs: - lint: - name: Lint chart (version) - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - with: - persist-credentials: false - fetch-depth: 0 - - - name: Set up Helm - uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4 # v4.3.1 - - - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 - with: - python-version: 3.14 # Renovate helper is not needed here - - - name: Configure Helm repos - run: |- - helm dependency list ./helm/defectdojo - helm dependency update ./helm/defectdojo - - - name: Set up chart-testing - uses: helm/chart-testing-action@6ec842c01de15ebb84c8627d2744a0c2f2755c9f # v2.8.0 - with: - yamale_version: 6.1.0 # renovate: datasource=pypi depName=yamale versioning=semver - yamllint_version: 1.37.1 # renovate: datasource=pypi depName=yamllint versioning=semver - - - name: Determine target branch - id: ct-branch-target - run: | - if [ ! -z ${GITHUB_BASE_REF} ]; then - echo "ct-branch=${GITHUB_BASE_REF}" >> $GITHUB_ENV - else - echo "ct-branch=${GITHUB_REF#refs/heads/}" >> $GITHUB_ENV - fi - - - name: Run chart-testing (list-changed) - id: list_changed - run: | - changed=$(ct list-changed --config ct.yaml --target-branch ${{ env.ct-branch}}) - if [[ -n "$changed" ]]; then - echo "changed=true" >> $GITHUB_OUTPUT - fi - - # run version check only if not dev as in dev we have a `x.y.z-dev` version - # x.y.z gets bumped automatically when doing a release - - name: Run chart-testing (lint) - run: ct lint --config ct.yaml --target-branch ${{ env.ct-branch }} --check-version-increment=true - if: ${{ steps.list_changed.outputs.changed == 'true' && env.ct-branch != 'dev' && env.ct-branch != 'bugfix' }} - - # run all checks but version increment always when something changed - - name: Run chart-testing (lint) - run: ct lint --config ct.yaml --target-branch ${{ env.ct-branch }} --check-version-increment=false - if: steps.list_changed.outputs.changed == 'true' - - - name: Check update of "artifacthub.io/changes" HELM annotation - if: ${{ steps.list_changed.outputs.changed == 'true' && !(startsWith(github.head_ref, 'master-into-dev/') || startsWith(github.head_ref, 'master-into-bugfix/')) }} - run: | - # fast fail if `git show` fails - set -e - set -o pipefail - - target_branch=${{ env.ct-branch }} - - echo "Checking Chart.yaml annotation changes" - - # Get current branch annotation - current_annotation=$(yq e '.annotations."artifacthub.io/changes"' "helm/defectdojo/Chart.yaml") - echo "Current annotation: " - echo $current_annotation - - # Get target branch version of Chart.yaml annotation - target_annotation=$(git show "origin/${{ env.ct-branch }}:helm/defectdojo/Chart.yaml" | yq e '.annotations."artifacthub.io/changes"' -) - echo "Target annotation: " - echo $target_annotation - - if [[ "$current_annotation" == "$target_annotation" ]]; then - echo "::error file=helm/defectdojo/Chart.yaml::The 'artifacthub.io/changes' annotation has not been updated compared to ${{ env.ct-branch }}. For more, check the hint in 'helm/defectdojo/Chart.yaml'" - exit 1 - fi - - echo "'artifacthub.io/changes' annotation updated in helm/defectdojo" - - # - name: Create kind cluster - # uses: helm/kind-action@v1.1.0 - # if: steps.list_changed.outputs.changed == 'true' - - # - name: Run chart-testing (install) - # run: ct install --config ct.yaml --target-branch ${{ env.ct-branch }} --helm-extra-args '--set createSecret=true --set createRabbitMqSecret=true --set createPostgresqlSecret=true --set timeout=900' - # if: steps.list_changed.outputs.changed == 'true' - - docs_generation: - name: Update documentation - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - with: - repository: ${{ github.event.pull_request.head.repo.full_name }} - ref: ${{ github.event.pull_request.head.ref }} - - - name: Update values in HELM chart - if: startsWith(github.head_ref, 'renovate/') || startsWith(github.head_ref, 'dependabot/') - run: | - title=${{ github.event.pull_request.title }} - chars='{}:[],&*#?|-<>=!%@' - for c in $(echo "$chars" | grep -o .); do - title="${title//"$c"/_}" - done - yq -i '.annotations."artifacthub.io/changes" += "- kind: changed\n description: $title\n"' helm/defectdojo/Chart.yaml - git add helm/defectdojo/Chart.yaml - git commit -m "ci: update Chart annotations from PR #${{ github.event.pull_request.number }}" || echo "No changes to commit" - - - name: Run helm-docs (update) - uses: losisin/helm-docs-github-action@a57fae5676e4c55a228ea654a1bcaec8dd3cf5b5 # v1.6.2 - if: startsWith(github.head_ref, 'renovate/') || startsWith(github.head_ref, 'dependabot/') - with: - chart-search-root: "helm/defectdojo" - git-push: true - - - name: Run helm-docs (check) - uses: losisin/helm-docs-github-action@a57fae5676e4c55a228ea654a1bcaec8dd3cf5b5 # v1.6.2 - if: ${{ !(startsWith(github.head_ref, 'renovate/') || startsWith(github.head_ref, 'dependabot/')) }} - with: - fail-on-diff: true - chart-search-root: "helm/defectdojo" - - - name: Failed Information - if: failure() - run: |- - echo "Your HELM chart changed but you haven't adjusted documentation. Check https://github.com/defectdojo/django-DefectDojo/tree/master/helm/defectdojo#helm-docs-update for more information." - - generate_schema: - name: Update schema - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - - - name: Generate values schema json - uses: losisin/helm-values-schema-json-action@660c441a4a507436a294fc55227e1df54aca5407 # v2.3.1 - with: - fail-on-diff: true - working-directory: "helm/defectdojo" - useHelmDocs: true - values: values.yaml - - - name: Failed Information - if: failure() - run: |- - echo "Your HELM chart changed but you haven't adjusted schema. Check https://github.com/defectdojo/django-DefectDojo/tree/master/helm/defectdojo#helm-schema-update for more information." - - lint_format: - name: Lint chart (format) - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - with: - persist-credentials: false - fetch-depth: 0 - - - name: Set up Helm - uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4 # v4.3.1 - - - name: Configure Helm repos - run: |- - helm dependency list ./helm/defectdojo - helm dependency update ./helm/defectdojo - - - name: Lint - run: |- - helm lint ./helm/defectdojo --strict - - artifacthub_linter: - name: Artifacthub Lint - runs-on: ubuntu-latest - steps: - - name: Checkout code - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - - name: Run ah lint - working-directory: ./helm/defectdojo - run: |- - docker run --rm \ - -v ${{ github.workspace }}/helm/defectdojo:/workspace \ - -w /workspace \ - artifacthub/ah:v1.21.0@sha256:511818fa90ce87d7132c6214e51ea6dd62eea030f5d2271ce073f948b3060972 \ - ah lint +name: Lint Helm chart +on: + pull_request: + branches: + - master + - dev + - bugfix + - release/** + - hotfix/** + +jobs: + lint: + name: Lint chart (version) + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + with: + persist-credentials: false + fetch-depth: 0 + + - name: Set up Helm + uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4 # v4.3.1 + + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + with: + python-version: 3.14 # Renovate helper is not needed here + + - name: Configure Helm repos + run: |- + helm dependency list ./helm/defectdojo + helm dependency update ./helm/defectdojo + + - name: Set up chart-testing + uses: helm/chart-testing-action@6ec842c01de15ebb84c8627d2744a0c2f2755c9f # v2.8.0 + with: + yamale_version: 6.1.0 # renovate: datasource=pypi depName=yamale versioning=semver + yamllint_version: 1.37.1 # renovate: datasource=pypi depName=yamllint versioning=semver + + - name: Determine target branch + id: ct-branch-target + run: | + if [ ! -z ${GITHUB_BASE_REF} ]; then + echo "ct-branch=${GITHUB_BASE_REF}" >> $GITHUB_ENV + else + echo "ct-branch=${GITHUB_REF#refs/heads/}" >> $GITHUB_ENV + fi + + - name: Run chart-testing (list-changed) + id: list_changed + run: | + changed=$(ct list-changed --config ct.yaml --target-branch ${{ env.ct-branch}}) + if [[ -n "$changed" ]]; then + echo "changed=true" >> $GITHUB_OUTPUT + fi + + # run version check only if not dev as in dev we have a `x.y.z-dev` version + # x.y.z gets bumped automatically when doing a release + - name: Run chart-testing (lint) + run: ct lint --config ct.yaml --target-branch ${{ env.ct-branch }} --check-version-increment=true + if: ${{ steps.list_changed.outputs.changed == 'true' && env.ct-branch != 'dev' && env.ct-branch != 'bugfix' }} + + # run all checks but version increment always when something changed + - name: Run chart-testing (lint) + run: ct lint --config ct.yaml --target-branch ${{ env.ct-branch }} --check-version-increment=false + if: steps.list_changed.outputs.changed == 'true' + + - name: Check update of "artifacthub.io/changes" HELM annotation + if: ${{ steps.list_changed.outputs.changed == 'true' && !(startsWith(github.head_ref, 'master-into-dev/') || startsWith(github.head_ref, 'master-into-bugfix/')) }} + run: | + # fast fail if `git show` fails + set -e + set -o pipefail + + target_branch=${{ env.ct-branch }} + + echo "Checking Chart.yaml annotation changes" + + # Get current branch annotation + current_annotation=$(yq e '.annotations."artifacthub.io/changes"' "helm/defectdojo/Chart.yaml") + echo "Current annotation: " + echo $current_annotation + + # Get target branch version of Chart.yaml annotation + target_annotation=$(git show "origin/${{ env.ct-branch }}:helm/defectdojo/Chart.yaml" | yq e '.annotations."artifacthub.io/changes"' -) + echo "Target annotation: " + echo $target_annotation + + if [[ "$current_annotation" == "$target_annotation" ]]; then + echo "::error file=helm/defectdojo/Chart.yaml::The 'artifacthub.io/changes' annotation has not been updated compared to ${{ env.ct-branch }}. For more, check the hint in 'helm/defectdojo/Chart.yaml'" + exit 1 + fi + + echo "'artifacthub.io/changes' annotation updated in helm/defectdojo" + + # - name: Create kind cluster + # uses: helm/kind-action@v1.1.0 + # if: steps.list_changed.outputs.changed == 'true' + + # - name: Run chart-testing (install) + # run: ct install --config ct.yaml --target-branch ${{ env.ct-branch }} --helm-extra-args '--set createSecret=true --set createRabbitMqSecret=true --set createPostgresqlSecret=true --set timeout=900' + # if: steps.list_changed.outputs.changed == 'true' + + docs_generation: + name: Update documentation + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + with: + repository: ${{ github.event.pull_request.head.repo.full_name }} + ref: ${{ github.event.pull_request.head.ref }} + + - name: Update values in HELM chart + if: startsWith(github.head_ref, 'renovate/') || startsWith(github.head_ref, 'dependabot/') + run: | + title=${{ github.event.pull_request.title }} + chars='{}:[],&*#?|-<>=!%@' + for c in $(echo "$chars" | grep -o .); do + title="${title//"$c"/_}" + done + yq -i '.annotations."artifacthub.io/changes" += "- kind: changed\n description: $title\n"' helm/defectdojo/Chart.yaml + git add helm/defectdojo/Chart.yaml + git commit -m "ci: update Chart annotations from PR #${{ github.event.pull_request.number }}" || echo "No changes to commit" + + - name: Run helm-docs (update) + uses: losisin/helm-docs-github-action@a57fae5676e4c55a228ea654a1bcaec8dd3cf5b5 # v1.6.2 + if: startsWith(github.head_ref, 'renovate/') || startsWith(github.head_ref, 'dependabot/') + with: + chart-search-root: "helm/defectdojo" + git-push: true + + - name: Run helm-docs (check) + uses: losisin/helm-docs-github-action@a57fae5676e4c55a228ea654a1bcaec8dd3cf5b5 # v1.6.2 + if: ${{ !(startsWith(github.head_ref, 'renovate/') || startsWith(github.head_ref, 'dependabot/')) }} + with: + fail-on-diff: true + chart-search-root: "helm/defectdojo" + + - name: Failed Information + if: failure() + run: |- + echo "Your HELM chart changed but you haven't adjusted documentation. Check https://github.com/defectdojo/django-DefectDojo/tree/master/helm/defectdojo#helm-docs-update for more information." + + generate_schema: + name: Update schema + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + + - name: Generate values schema json + uses: losisin/helm-values-schema-json-action@660c441a4a507436a294fc55227e1df54aca5407 # v2.3.1 + with: + fail-on-diff: true + working-directory: "helm/defectdojo" + useHelmDocs: true + values: values.yaml + + - name: Failed Information + if: failure() + run: |- + echo "Your HELM chart changed but you haven't adjusted schema. Check https://github.com/defectdojo/django-DefectDojo/tree/master/helm/defectdojo#helm-schema-update for more information." + + lint_format: + name: Lint chart (format) + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + with: + persist-credentials: false + fetch-depth: 0 + + - name: Set up Helm + uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4 # v4.3.1 + + - name: Configure Helm repos + run: |- + helm dependency list ./helm/defectdojo + helm dependency update ./helm/defectdojo + + - name: Lint + run: |- + helm lint ./helm/defectdojo --strict + + artifacthub_linter: + name: Artifacthub Lint + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - name: Run ah lint + working-directory: ./helm/defectdojo + run: |- + docker run --rm \ + -v ${{ github.workspace }}/helm/defectdojo:/workspace \ + -w /workspace \ + artifacthub/ah:v1.21.0@sha256:511818fa90ce87d7132c6214e51ea6dd62eea030f5d2271ce073f948b3060972 \ + ah lint diff --git a/components/package.json b/components/package.json index d9500b421b6..fc3426e867d 100644 --- a/components/package.json +++ b/components/package.json @@ -1,42 +1,42 @@ -{ - "name": "defectdojo", - "version": "2.54.0-dev", - "license" : "BSD-3-Clause", - "private": true, - "dependencies": { - "JUMFlot": "jumjum123/JUMFlot#*", - "bootstrap": "^3.4.1", - "bootstrap-select": "^1.13.18", - "bootstrap-social": "^4.0.0", - "bootstrap-wysiwyg": "^2.0.0", - "chosen-bootstrap": "https://github.com/dbtek/chosen-bootstrap", - "chosen-js": "^1.8.7", - "clipboard": "^2.0.11", - "datatables.net": "^2.3.5", - "datatables.net-buttons-bs": "^3.2.5", - "datatables.net-colreorder": "^2.1.2", - "drmonty-datatables-plugins": "^1.0.0", - "drmonty-datatables-responsive": "^1.0.0", - "easymde": "^2.20.0", - "flot": "flot/flot#~0.8.3", - "font-awesome": "^4.0.0", - "fullcalendar": "^3.10.2", - "google-code-prettify": "^1.0.0", - "jquery": "^3.7.1", - "jquery-highlight": "3.5.0", - "jquery-ui": "1.14.1", - "jquery.cookie": "1.4.1", - "jquery.flot.tooltip": "^0.9.0", - "jquery.hotkeys": "jeresig/jquery.hotkeys#master", - "jszip": "^3.10.1", - "justgage": "^1.7.0", - "metismenu": "~3.0.7", - "moment": "^2.30.1", - "morris.js": "morrisjs/morris.js", - "pdfmake": "^0.2.20", - "startbootstrap-sb-admin-2": "1.0.7" - }, - "engines": { - "yarn": ">= 1.0.0" - } -} +{ + "name": "defectdojo", + "version": "2.54.0-dev", + "license" : "BSD-3-Clause", + "private": true, + "dependencies": { + "JUMFlot": "jumjum123/JUMFlot#*", + "bootstrap": "^3.4.1", + "bootstrap-select": "^1.13.18", + "bootstrap-social": "^4.0.0", + "bootstrap-wysiwyg": "^2.0.0", + "chosen-bootstrap": "https://github.com/dbtek/chosen-bootstrap", + "chosen-js": "^1.8.7", + "clipboard": "^2.0.11", + "datatables.net": "^2.3.5", + "datatables.net-buttons-bs": "^3.2.5", + "datatables.net-colreorder": "^2.1.2", + "drmonty-datatables-plugins": "^1.0.0", + "drmonty-datatables-responsive": "^1.0.0", + "easymde": "^2.20.0", + "flot": "flot/flot#~0.8.3", + "font-awesome": "^4.0.0", + "fullcalendar": "^3.10.2", + "google-code-prettify": "^1.0.0", + "jquery": "^3.7.1", + "jquery-highlight": "3.5.0", + "jquery-ui": "1.14.1", + "jquery.cookie": "1.4.1", + "jquery.flot.tooltip": "^0.9.0", + "jquery.hotkeys": "jeresig/jquery.hotkeys#master", + "jszip": "^3.10.1", + "justgage": "^1.7.0", + "metismenu": "~3.0.7", + "moment": "^2.30.1", + "morris.js": "morrisjs/morris.js", + "pdfmake": "^0.2.20", + "startbootstrap-sb-admin-2": "1.0.7" + }, + "engines": { + "yarn": ">= 1.0.0" + } +} diff --git a/docs/content/en/about_defectdojo/about_docs.md b/docs/content/en/about_defectdojo/about_docs.md index ddc1baf94d2..cfc33de1121 100644 --- a/docs/content/en/about_defectdojo/about_docs.md +++ b/docs/content/en/about_defectdojo/about_docs.md @@ -1,120 +1,120 @@ ---- -title: "About Our Documentation" -date: 2021-02-02T20:46:29+01:00 -draft: false -type: docs -weight: 1 ---- - -![image](images/dashboard.png) - - -DefectDojo Inc. and open-source contributors maintain this documentation to support both the Community and Pro editions of DefectDojo. - -## What is DefectDojo? - -DefectDojo is a Developer Security Operations (DevSecOps) platform. DefectDojo streamlines DevSecOps by serving as an automatic aggregator for your suite of security tools, allowing you to easily organize your security work and report your organization’s security posture to other stakeholders. - -While security process automation and integrated development pipelines are the end goals of DefectDojo, at its core this software is a bug tracker for security vulnerabilities, which is meant to ingest, organize and standardize reports from many security tools. - -### What does DefectDojo do? - -DefectDojo has smart features to enhance and tune the results from your security tools, including the ability to: - -- Track and report on security Findings in context -- Enforce SLAs in context -- Handle False Positives, Risk Acceptances and other triage decisions -- Distill duplicates using DefectDojo's deduplication algorithm -- Integrate with external Project Tracking software. -- Provide metrics/reports across repositories and development branches using CI/CD integration. -- Coordinate traditional Pen test management. -- Set and enforce SLAs for vulnerability remediation procedures. -- Create and track Risk Acceptances for security vulnerabilities. - -Ultimately, DefectDojo's Product:Engagement model allows you to take inventory of your development environment and immediately place new security Findings in context. - ---- -Here are some examples of ways DefectDojo can be implemented, with DefectDojo co-founder and CTO Matt Tesauro: - - ---- - -## DefectDojo Open-Source - -DefectDojo's core functionality is available in DefectDojo Open-Source. - -This edition of DefectDojo includes: - -- Import/Reimport for all 200+ Supported Tools -- REST API -- Deduplication features -- Limited UI, metrics and reporting features -- Jira integration capability - -For teams managing a smaller volume of Findings, DefectDojo Open-Source is a great starting point. - -### Installation Guides - -There are a few supported ways to install DefectDojo’s Open-Source edition ([available on Github](https://github.com/DefectDojo/django-DefectDojo)): - -[Docker Compose](https://github.com/DefectDojo/django-DefectDojo/blob/master/readme-docs/DOCKER.md) is the easiest method to install the core program and services required to run DefectDojo. -Our [Architecture](https://docs.defectdojo.com/en/open_source/installation/architecture/) guide gives you an overview of each service and component used by DefectDojo. -[Running In Production](https://docs.defectdojo.com/en/open_source/installation/running-in-production/) lists system requirements, performance tweaks and maintenance processes for running DefectDojo on a production server (with Docker Compose). - -Kubernetes is not fully supported at the Open-Source level, but this guide can be referenced and used as a starting point to integrate DefectDojo into Kubernetes architecture. - -If you run into trouble with an Open-Source install, we highly recommend asking questions on the [OWASP Slack](https://owasp.org/slack/invite). Our community members are active on the #defectdojo channel and can help you with issues you’re facing. - -## 🟧 DefectDojo Pro Edition - - - -DefectDojo Inc. hosts a Pro edition of this software for commercial purposes. Along with a sleek, modern UI, DefectDojo Pro includes: - -* [Connectors](/en/connecting_your_tools/connectors/about_connectors/): out-of-the-box API integrations with enterprise-level scanners (such as Checkmarx One, BurpSuite, Semgrep and more) -* **Configurable Import Methods**: [Universal Parser](/supported_tools/parsers/universal_parser/), [Smart Upload](/en/connecting_your_tools/import_scan_files/smart_upload/) -* **[CLI Tools](/en/connecting_your_tools/external_tools/)** for rapid integration with your systems -* **[Additional Project Tracking Integrations](/en/share_your_findings/integrations/)**: ServiceNow, Azure DevOps, GitHub and GitLab -* **[Improved Metrics](/en/customize_dojo/dashboards/pro_dashboards/)** for executive reporting and high-level analysis -* **[Priority And Risk](/en/working_with_findings/finding_priority/)** to identify the Findings of highest urgency, system-wide -* **Premium Support** and implementation guidance for your organization - -The Pro edition is available as a cloud-hosted SaaS offering, and is also available for installation on-premises. - -For more information on DefectDojo Pro, check out our [Pricing page](https://defectdojo.com/pricing). - -## Online Demos - -Online demos for both Open-Source and Pro versions of DefectDojo are available. Both can be accessed using the following credentials: - -- Username: `admin` -- Password: `1Defectdojo@demo#appsec` - -These demos come loaded with sample data, and are reset on a daily basis. - -### Open-Source Demo - -A running example of DefectDojo (Open-Source Edition) is available at [https://demo.defectdojo.org/](https://demo.defectdojo.org/). - -### Pro Demo - -A running example of DefectDojo Pro is available at -[https://pro.demo.defectdojo.com/](https://pro.demo.defectdojo.com/). - -## Learning DefectDojo - -Whether you’re a Pro or an Open-Source user, we have many resources to help you get started with DefectDojo. - -* Our [New User Checklist](/en/about_defectdojo/new_user_checklist/) covers the fundamentals of setting up your DefectDojo environment and establishing your import, triage and reporting workflows. -* Review our supported [security tool integrations](/en/connecting_your_tools/parsers/) to help fit DefectDojo in your DevSecOps program. -* Our team maintains a [YouTube Channel](https://www.youtube.com/@defectdojo) which hosts tutorials, archived Office Hours events, and other content. - -## Connect With Us - -To get in touch with the DefectDojo Inc team, you can always reach out to [hello@defectdojo.com](mailto:hello@defectdojo.com). - -We regularly on [LinkedIn](https://www.linkedin.com/company/33245534) and also host online presentations for AppSec professionals that can be accessed live or on demand. You can learn about upcoming events on our [Events page](https://defectdojo.com/events) or watch past presentations on our [YouTube Channel](https://www.youtube.com/@defectdojo). - -### Stickers - +--- +title: "About Our Documentation" +date: 2021-02-02T20:46:29+01:00 +draft: false +type: docs +weight: 1 +--- + +![image](images/dashboard.png) + + +DefectDojo Inc. and open-source contributors maintain this documentation to support both the Community and Pro editions of DefectDojo. + +## What is DefectDojo? + +DefectDojo is a Developer Security Operations (DevSecOps) platform. DefectDojo streamlines DevSecOps by serving as an automatic aggregator for your suite of security tools, allowing you to easily organize your security work and report your organization’s security posture to other stakeholders. + +While security process automation and integrated development pipelines are the end goals of DefectDojo, at its core this software is a bug tracker for security vulnerabilities, which is meant to ingest, organize and standardize reports from many security tools. + +### What does DefectDojo do? + +DefectDojo has smart features to enhance and tune the results from your security tools, including the ability to: + +- Track and report on security Findings in context +- Enforce SLAs in context +- Handle False Positives, Risk Acceptances and other triage decisions +- Distill duplicates using DefectDojo's deduplication algorithm +- Integrate with external Project Tracking software. +- Provide metrics/reports across repositories and development branches using CI/CD integration. +- Coordinate traditional Pen test management. +- Set and enforce SLAs for vulnerability remediation procedures. +- Create and track Risk Acceptances for security vulnerabilities. + +Ultimately, DefectDojo's Product:Engagement model allows you to take inventory of your development environment and immediately place new security Findings in context. + +--- +Here are some examples of ways DefectDojo can be implemented, with DefectDojo co-founder and CTO Matt Tesauro: + + +--- + +## DefectDojo Open-Source + +DefectDojo's core functionality is available in DefectDojo Open-Source. + +This edition of DefectDojo includes: + +- Import/Reimport for all 200+ Supported Tools +- REST API +- Deduplication features +- Limited UI, metrics and reporting features +- Jira integration capability + +For teams managing a smaller volume of Findings, DefectDojo Open-Source is a great starting point. + +### Installation Guides + +There are a few supported ways to install DefectDojo’s Open-Source edition ([available on Github](https://github.com/DefectDojo/django-DefectDojo)): + +[Docker Compose](https://github.com/DefectDojo/django-DefectDojo/blob/master/readme-docs/DOCKER.md) is the easiest method to install the core program and services required to run DefectDojo. +Our [Architecture](https://docs.defectdojo.com/en/open_source/installation/architecture/) guide gives you an overview of each service and component used by DefectDojo. +[Running In Production](https://docs.defectdojo.com/en/open_source/installation/running-in-production/) lists system requirements, performance tweaks and maintenance processes for running DefectDojo on a production server (with Docker Compose). + +Kubernetes is not fully supported at the Open-Source level, but this guide can be referenced and used as a starting point to integrate DefectDojo into Kubernetes architecture. + +If you run into trouble with an Open-Source install, we highly recommend asking questions on the [OWASP Slack](https://owasp.org/slack/invite). Our community members are active on the #defectdojo channel and can help you with issues you’re facing. + +## 🟧 DefectDojo Pro Edition + + + +DefectDojo Inc. hosts a Pro edition of this software for commercial purposes. Along with a sleek, modern UI, DefectDojo Pro includes: + +* [Connectors](/en/connecting_your_tools/connectors/about_connectors/): out-of-the-box API integrations with enterprise-level scanners (such as Checkmarx One, BurpSuite, Semgrep and more) +* **Configurable Import Methods**: [Universal Parser](/supported_tools/parsers/universal_parser/), [Smart Upload](/en/connecting_your_tools/import_scan_files/smart_upload/) +* **[CLI Tools](/en/connecting_your_tools/external_tools/)** for rapid integration with your systems +* **[Additional Project Tracking Integrations](/en/share_your_findings/integrations/)**: ServiceNow, Azure DevOps, GitHub and GitLab +* **[Improved Metrics](/en/customize_dojo/dashboards/pro_dashboards/)** for executive reporting and high-level analysis +* **[Priority And Risk](/en/working_with_findings/finding_priority/)** to identify the Findings of highest urgency, system-wide +* **Premium Support** and implementation guidance for your organization + +The Pro edition is available as a cloud-hosted SaaS offering, and is also available for installation on-premises. + +For more information on DefectDojo Pro, check out our [Pricing page](https://defectdojo.com/pricing). + +## Online Demos + +Online demos for both Open-Source and Pro versions of DefectDojo are available. Both can be accessed using the following credentials: + +- Username: `admin` +- Password: `1Defectdojo@demo#appsec` + +These demos come loaded with sample data, and are reset on a daily basis. + +### Open-Source Demo + +A running example of DefectDojo (Open-Source Edition) is available at [https://demo.defectdojo.org/](https://demo.defectdojo.org/). + +### Pro Demo + +A running example of DefectDojo Pro is available at +[https://pro.demo.defectdojo.com/](https://pro.demo.defectdojo.com/). + +## Learning DefectDojo + +Whether you’re a Pro or an Open-Source user, we have many resources to help you get started with DefectDojo. + +* Our [New User Checklist](/en/about_defectdojo/new_user_checklist/) covers the fundamentals of setting up your DefectDojo environment and establishing your import, triage and reporting workflows. +* Review our supported [security tool integrations](/en/connecting_your_tools/parsers/) to help fit DefectDojo in your DevSecOps program. +* Our team maintains a [YouTube Channel](https://www.youtube.com/@defectdojo) which hosts tutorials, archived Office Hours events, and other content. + +## Connect With Us + +To get in touch with the DefectDojo Inc team, you can always reach out to [hello@defectdojo.com](mailto:hello@defectdojo.com). + +We regularly on [LinkedIn](https://www.linkedin.com/company/33245534) and also host online presentations for AppSec professionals that can be accessed live or on demand. You can learn about upcoming events on our [Events page](https://defectdojo.com/events) or watch past presentations on our [YouTube Channel](https://www.youtube.com/@defectdojo). + +### Stickers + Looking for cool DefectDojo laptop stickers? As a thank you for being a part of the DefectDojo community, you can sign up to get some free DefectDojo stickers. For more information, check out [this link](https://defectdojo.com/defectdojo-sticker-request). \ No newline at end of file diff --git a/docs/content/en/about_defectdojo/contact_defectdojo_support.md b/docs/content/en/about_defectdojo/contact_defectdojo_support.md index 521c2c47c4a..c99e50c550f 100644 --- a/docs/content/en/about_defectdojo/contact_defectdojo_support.md +++ b/docs/content/en/about_defectdojo/contact_defectdojo_support.md @@ -1,45 +1,45 @@ ---- -title: "Get Support" -description: "For Pro users: support@defectdojo.com + other options" -draft: "false" -pro-feature: true -weight: 7 ---- - -Need help with DefectDojo? Here are some ways to get assistance. - -## Open-Source Support - -Open-Source users can receive help and advice through our community channels. - -For Open-Source users, the quickest way to get help is through the [OWASP Slack Channel](https://owasp.org/slack/invite). Our community members are active on the **# defectdojo channel** and can help you with issues you're facing. - -To report a bug, issues can be raised on our [GitHub](https://github.com/DefectDojo/django-DefectDojo). - -See our [Community Site](https://defectdojo.com/open-source) for more information. - -## DefectDojo Pro Support - -DefectDojo Pro subscriptions come with full support from the DefectDojo Inc. team during the initial trial period and beyond. - -### Email - -Customers/Pro Users can always email our team directly at [support@defectdojo.com](mailto:support@defectdojo.com) for assistance. - -### Within DefectDojo - -You can contact the Support team through the DefectDojo App in two ways: - -1. by opening **Cloud Manager > Contact Support** from the left sidebar -2. through **{your-instance}.defectdojo.com/cloud_portal/support**. - -![image](images/contact_defectdojo_support.png) - -### Through the Cloud Portal - -You can also contact our support team through your Cloud Portal: - -1. by clicking on **Contact Us** (on the left sidebar) -2. or via ****. - -![image](images/contact_defectdojo_support_2.png) +--- +title: "Get Support" +description: "For Pro users: support@defectdojo.com + other options" +draft: "false" +pro-feature: true +weight: 7 +--- + +Need help with DefectDojo? Here are some ways to get assistance. + +## Open-Source Support + +Open-Source users can receive help and advice through our community channels. + +For Open-Source users, the quickest way to get help is through the [OWASP Slack Channel](https://owasp.org/slack/invite). Our community members are active on the **# defectdojo channel** and can help you with issues you're facing. + +To report a bug, issues can be raised on our [GitHub](https://github.com/DefectDojo/django-DefectDojo). + +See our [Community Site](https://defectdojo.com/open-source) for more information. + +## DefectDojo Pro Support + +DefectDojo Pro subscriptions come with full support from the DefectDojo Inc. team during the initial trial period and beyond. + +### Email + +Customers/Pro Users can always email our team directly at [support@defectdojo.com](mailto:support@defectdojo.com) for assistance. + +### Within DefectDojo + +You can contact the Support team through the DefectDojo App in two ways: + +1. by opening **Cloud Manager > Contact Support** from the left sidebar +2. through **{your-instance}.defectdojo.com/cloud_portal/support**. + +![image](images/contact_defectdojo_support.png) + +### Through the Cloud Portal + +You can also contact our support team through your Cloud Portal: + +1. by clicking on **Contact Us** (on the left sidebar) +2. or via ****. + +![image](images/contact_defectdojo_support_2.png) diff --git a/docs/content/en/about_defectdojo/examples_of_use.md b/docs/content/en/about_defectdojo/examples_of_use.md index 450d28304b7..6452e0e0d4e 100644 --- a/docs/content/en/about_defectdojo/examples_of_use.md +++ b/docs/content/en/about_defectdojo/examples_of_use.md @@ -1,157 +1,157 @@ ---- -title: "💡 Common Use Cases" -description: "Use Cases and examples" -draft: "false" -weight: 2 -chapter: true ---- - -This article is based on DefectDojo Inc's February 2025 Office Hours: "Tackling Common Use Cases". - - -## Examples of Use Cases - -DefectDojo is designed to handle any security implementation, no matter your security team size, IT complexity level, or reporting volume. The following stories are intended as jumping-off points for your own needs, but they’re based on real examples from our community and the DefectDojo Pro team. - -### Large Enterprise: RBAC and Engagements - - -‘BigCorp’ is a large multinational enterprise, with a Chief Information Security Officer (CISO) and a centralized IT security group that includes AppSec. - -Security at BigCorp is highly centralized. Certain things are delegated out to Business Information Security Officers (BISO). - -The key concerns for BigCorp are: - -- Set and maintain a consistent testing method across all business units in the organization -- Meet compliance requirements and avoid regulatory issues - -#### Testing Model - -BigCorp handles security data from many sources: - -- CI/CD jobs that run SAST, SCA and Secret scanning tools automatically -- Third-party Pen testing for certain Products -- PCI compliance auditing for certain Products - -Each of these report categories can be handled by a separate Engagement, with a separate Test for each kind of scan in DefectDojo. - -![image](images/example_product_hierarchy_bigcorp.png) - -- If a Product has a CI/CD pipeline, all of the results from that pipeline can be continually imported into a single open-ended Engagement. Each tool used will create a separate Test within the CI/CD Engagement, which can be continuously updated with new data. -(See our guide to [Reimport](/en/connecting_your_tools/import_scan_files/using_reimport/)) -- Each Pen Test effort can have a separate Engagement created to contain all of the results: e.g. "Q1 Pen Test 2024," "Q2 Pen Test 2024," etc. -- BigCorp will likely want to run their own mock PCI audit so that they're prepared for the real thing. The results of those audits can also be stored as a separate Engagement. - -#### RBAC Model - -- Each BISO has Reader access assigned for each business unit (Product Type) that they're in charge of. -- Each Product Owner has Writer access for the Product that they're in charge of. Within their Product, Product Owners can interact with DefectDojo by keeping notes, setting up [CI/CD pipelines](/en/connecting_your_tools/import_scan_files/api_pipeline_modelling/), creating Risk Acceptances and using other features. -- Developers at BigCorp have no access to DefectDojo at all, and they don't need it. The Product Owner can push Jira tickets directly from DefectDojo which contain all of the relevant vulnerability information. The developers are already using Jira, so they don't have to track remediation any differently than a different development task. - -### Embedded Systems: Version-Controlled Reporting - -Cyber Robotics is a company that sells manufacturing hardware that comes with embedded software systems. They have a Chief Product Officer (CPO) that oversees both their product and cybersecurity as a whole. - -Though they have less diverse security information to manage than BigCorp, it's still essential for them to properly contextualize their security information so that they can proactively respond to any significant Findings. - -Key concerns for Cyber Robotics: - -- They have a limited product line but **many** versions of each product that they need to properly catalog. -- Maintenance for their products is complex and costs are high, so unnecessary work needs to be avoided. - -#### Testing Model - -Cyber Robotics has a standardized testing process for all of their embedded systems: - -- CI/CD, SAST, and SCA tests are run -- Security Control Reviews -- Network Scans -- Third Party Code Review - -However, because each version of their software is isolated, they’ll inevitably have a lot of data to organize, much of which is only useful in a single context (i.e., the particular version of the software they’re running). - -Cyber Robotics can solve this problem by using Product Types to represent a single product line, and individual Products for each separate version. This will allow them to drill down to determine which Products are associated with a single vulnerability. - -![image](images/example_product_hierarchy_robotics.png) - -Assigning software versions to Products, rather than Engagements, allows Cyber Robotics to limit access to a particular software version, if necessary. Field technicians and Support staff can be granted access to a single version of the software without having to give them access to the entire product line. - -#### RBAC Model - -The AppSec team here has Global Roles assigned that govern their level of interaction. - -- The CPO has Global Reader access to DefectDojo, as with the CISO in BigCorp. -- Individual Product Owners have Global Reader access to any Product in DefectDojo, as well as Writer access to the Product that they own. - -On the Support side: - -- Support personnel are temporarily granted Reader access to specific Products that they're assigned to maintain, but they do not have access to all DefectDojo data. - -### Dynamic IT environments and microservices: Cloud Services company - -Kate's Cloud Service operates a rapidly changing environment that uses Kubernetes, microservices, and automation. Kate's Cloud Service has a VP of Cloud that oversees Cloud Security issues. They also have a CISO who manages the software development on offer, but for this example we will focus specifically on their Cloud security concerns. - -Kate's Cloud Service has fully automated all of their reporting and ingests data into DefectDojo as soon as reports are produced. - -Key Concerns for Kate's Cloud Service: - -- Managing multi-tenant cloud security, preventing cross-customer interaction while enabling shared service delivery. -- Handling rapid changes in their cloud environment. - -#### Tagging Shared Services - -Because Kate's model contains many shared services that can impact other Products, the team [Tags](/en/working_with_findings/organizing_engagements_tests/tagging_objects/) their Products to indicate which cloud offerings rely on those services. This allows any issues with shared services to be filtered across Products and reported to the relevant teams. Each of these shared services are in a single Product Type that separates them from the main cloud offerings. - -![image](images/example_product_hierarchy_microservices.png) - -Because the company is rapidly growing and tech leads are changing frequently, Kate can use Tags to track which tech lead is currently responsible for each cloud product, avoiding the need for constant manual updates to their DefectDojo system. These tech lead associations are tracked by a service that’s external to DefectDojo and can govern the import pipelines or call the DefectDojo API. - -For more information on Tagging, see our guide to [Tags](/en/working_with_findings/organizing_engagements_tests/tagging_objects/). - -#### RBAC Model - -On the Security/Compliance side: - -- The Product Security Team that owns DefectDojo has admin access to the entire system. -- Analysts working for the VP of Cloud are granted read-only access across the system, allowing them to generate the necessary reports and metrics for the VP to assess the security of various cloud offerings. - -On the development side: - -- Tech Leads for each specific cloud product (e.g., compute, storage, shared services) have **Maintainer access** to their assigned Product in order to triage the security results related to their specific cloud product offering. They can review Findings and take action within their Product and can also reorganize their Finding data significantly. -- Developers working on specific Products are given **Writer Access** to the Product they're working on, enabling them to comment on Findings, request Peer Reviews, and create Risk Acceptances. - -### Onboarding New Acquisitions: SaaSy Software - -SaaSy software is a rapidly growing firm which frequently acquires other software companies. Every time a new company is acquired, the Director Of Quality engineering and the AppSec team is suddenly in charge of many new code repos, developers, and processes. Their DefectDojo model ensures that they can get up to speed as soon as possible. - -Key Concerns for SaaSy Software: - -- Avoiding public security issues while maintaining compliance programs (such as SOC2). -- Ability to confidently onboard tools and processes from new products. -- Ability to report and categorize vulnerabilities on both in-production and in-development branches. - -#### Testing Model - -Testing at SaaSy is focused on broad strokes rather than standardized tool use, since each acquisition comes with their own tools and processes for AppSec. SaaSy needs to perform both internal assessments (CI/CD, DAST, container scans, and threat modeling) and external assessments (Third party pen tests, compliance audits). - -To assist with onboarding new applications, SaaSy software has a standard approach to their data model: each time SaaSy onboards a new application, they create a new Product Type for that app, and create sub-products for the repositories that make it up (Front-End, Backend API, etc). - -![image](images/example_product_hierarchy_saas.png) - -Each of these Products is further subdivided into Engagements, one for the main branch and one for each branch of development. Tests within these Engagements are used to categorize the testing efforts. Development branches have separate Tests which store the results of CI/CD and SCA scans. The Main branch has those as well, but also adds Tests which store Manual Code Review and Threat Model reports. - -All of these Tests are open-ended and can be updated on a regular basis using Reimport. [Deduplication](/en/working_with_findings/finding_deduplication/about_deduplication/) is only handled at the Engagement level, which prevents Findings in one Code branch from closing Findings in another. - -By applying this model consistently, SaaSy has a model that they can apply to any new software acquisition, and the AppSec team can quickly begin monitoring the data to ensure compliance. - -#### RBAC Model - -On the Security/Compliance side: - -- The AppSec team at SaaSy software owns DefectDojo and has full admin access to the software. -- QE and Compliance teams have read-only access to the entire system, to pull reports and dive into data if required. - -On the development side: - -- Each Product Owner has Writer access to the Product they own in DefectDojo, which allows them to write Risk Acceptances and view metrics for the Product. -- Developers have read-only access to each Product they work on. They can Request Peer Reviews on Findings or issues they are trying to remediate. +--- +title: "💡 Common Use Cases" +description: "Use Cases and examples" +draft: "false" +weight: 2 +chapter: true +--- + +This article is based on DefectDojo Inc's February 2025 Office Hours: "Tackling Common Use Cases". + + +## Examples of Use Cases + +DefectDojo is designed to handle any security implementation, no matter your security team size, IT complexity level, or reporting volume. The following stories are intended as jumping-off points for your own needs, but they’re based on real examples from our community and the DefectDojo Pro team. + +### Large Enterprise: RBAC and Engagements + + +‘BigCorp’ is a large multinational enterprise, with a Chief Information Security Officer (CISO) and a centralized IT security group that includes AppSec. + +Security at BigCorp is highly centralized. Certain things are delegated out to Business Information Security Officers (BISO). + +The key concerns for BigCorp are: + +- Set and maintain a consistent testing method across all business units in the organization +- Meet compliance requirements and avoid regulatory issues + +#### Testing Model + +BigCorp handles security data from many sources: + +- CI/CD jobs that run SAST, SCA and Secret scanning tools automatically +- Third-party Pen testing for certain Products +- PCI compliance auditing for certain Products + +Each of these report categories can be handled by a separate Engagement, with a separate Test for each kind of scan in DefectDojo. + +![image](images/example_product_hierarchy_bigcorp.png) + +- If a Product has a CI/CD pipeline, all of the results from that pipeline can be continually imported into a single open-ended Engagement. Each tool used will create a separate Test within the CI/CD Engagement, which can be continuously updated with new data. +(See our guide to [Reimport](/en/connecting_your_tools/import_scan_files/using_reimport/)) +- Each Pen Test effort can have a separate Engagement created to contain all of the results: e.g. "Q1 Pen Test 2024," "Q2 Pen Test 2024," etc. +- BigCorp will likely want to run their own mock PCI audit so that they're prepared for the real thing. The results of those audits can also be stored as a separate Engagement. + +#### RBAC Model + +- Each BISO has Reader access assigned for each business unit (Product Type) that they're in charge of. +- Each Product Owner has Writer access for the Product that they're in charge of. Within their Product, Product Owners can interact with DefectDojo by keeping notes, setting up [CI/CD pipelines](/en/connecting_your_tools/import_scan_files/api_pipeline_modelling/), creating Risk Acceptances and using other features. +- Developers at BigCorp have no access to DefectDojo at all, and they don't need it. The Product Owner can push Jira tickets directly from DefectDojo which contain all of the relevant vulnerability information. The developers are already using Jira, so they don't have to track remediation any differently than a different development task. + +### Embedded Systems: Version-Controlled Reporting + +Cyber Robotics is a company that sells manufacturing hardware that comes with embedded software systems. They have a Chief Product Officer (CPO) that oversees both their product and cybersecurity as a whole. + +Though they have less diverse security information to manage than BigCorp, it's still essential for them to properly contextualize their security information so that they can proactively respond to any significant Findings. + +Key concerns for Cyber Robotics: + +- They have a limited product line but **many** versions of each product that they need to properly catalog. +- Maintenance for their products is complex and costs are high, so unnecessary work needs to be avoided. + +#### Testing Model + +Cyber Robotics has a standardized testing process for all of their embedded systems: + +- CI/CD, SAST, and SCA tests are run +- Security Control Reviews +- Network Scans +- Third Party Code Review + +However, because each version of their software is isolated, they’ll inevitably have a lot of data to organize, much of which is only useful in a single context (i.e., the particular version of the software they’re running). + +Cyber Robotics can solve this problem by using Product Types to represent a single product line, and individual Products for each separate version. This will allow them to drill down to determine which Products are associated with a single vulnerability. + +![image](images/example_product_hierarchy_robotics.png) + +Assigning software versions to Products, rather than Engagements, allows Cyber Robotics to limit access to a particular software version, if necessary. Field technicians and Support staff can be granted access to a single version of the software without having to give them access to the entire product line. + +#### RBAC Model + +The AppSec team here has Global Roles assigned that govern their level of interaction. + +- The CPO has Global Reader access to DefectDojo, as with the CISO in BigCorp. +- Individual Product Owners have Global Reader access to any Product in DefectDojo, as well as Writer access to the Product that they own. + +On the Support side: + +- Support personnel are temporarily granted Reader access to specific Products that they're assigned to maintain, but they do not have access to all DefectDojo data. + +### Dynamic IT environments and microservices: Cloud Services company + +Kate's Cloud Service operates a rapidly changing environment that uses Kubernetes, microservices, and automation. Kate's Cloud Service has a VP of Cloud that oversees Cloud Security issues. They also have a CISO who manages the software development on offer, but for this example we will focus specifically on their Cloud security concerns. + +Kate's Cloud Service has fully automated all of their reporting and ingests data into DefectDojo as soon as reports are produced. + +Key Concerns for Kate's Cloud Service: + +- Managing multi-tenant cloud security, preventing cross-customer interaction while enabling shared service delivery. +- Handling rapid changes in their cloud environment. + +#### Tagging Shared Services + +Because Kate's model contains many shared services that can impact other Products, the team [Tags](/en/working_with_findings/organizing_engagements_tests/tagging_objects/) their Products to indicate which cloud offerings rely on those services. This allows any issues with shared services to be filtered across Products and reported to the relevant teams. Each of these shared services are in a single Product Type that separates them from the main cloud offerings. + +![image](images/example_product_hierarchy_microservices.png) + +Because the company is rapidly growing and tech leads are changing frequently, Kate can use Tags to track which tech lead is currently responsible for each cloud product, avoiding the need for constant manual updates to their DefectDojo system. These tech lead associations are tracked by a service that’s external to DefectDojo and can govern the import pipelines or call the DefectDojo API. + +For more information on Tagging, see our guide to [Tags](/en/working_with_findings/organizing_engagements_tests/tagging_objects/). + +#### RBAC Model + +On the Security/Compliance side: + +- The Product Security Team that owns DefectDojo has admin access to the entire system. +- Analysts working for the VP of Cloud are granted read-only access across the system, allowing them to generate the necessary reports and metrics for the VP to assess the security of various cloud offerings. + +On the development side: + +- Tech Leads for each specific cloud product (e.g., compute, storage, shared services) have **Maintainer access** to their assigned Product in order to triage the security results related to their specific cloud product offering. They can review Findings and take action within their Product and can also reorganize their Finding data significantly. +- Developers working on specific Products are given **Writer Access** to the Product they're working on, enabling them to comment on Findings, request Peer Reviews, and create Risk Acceptances. + +### Onboarding New Acquisitions: SaaSy Software + +SaaSy software is a rapidly growing firm which frequently acquires other software companies. Every time a new company is acquired, the Director Of Quality engineering and the AppSec team is suddenly in charge of many new code repos, developers, and processes. Their DefectDojo model ensures that they can get up to speed as soon as possible. + +Key Concerns for SaaSy Software: + +- Avoiding public security issues while maintaining compliance programs (such as SOC2). +- Ability to confidently onboard tools and processes from new products. +- Ability to report and categorize vulnerabilities on both in-production and in-development branches. + +#### Testing Model + +Testing at SaaSy is focused on broad strokes rather than standardized tool use, since each acquisition comes with their own tools and processes for AppSec. SaaSy needs to perform both internal assessments (CI/CD, DAST, container scans, and threat modeling) and external assessments (Third party pen tests, compliance audits). + +To assist with onboarding new applications, SaaSy software has a standard approach to their data model: each time SaaSy onboards a new application, they create a new Product Type for that app, and create sub-products for the repositories that make it up (Front-End, Backend API, etc). + +![image](images/example_product_hierarchy_saas.png) + +Each of these Products is further subdivided into Engagements, one for the main branch and one for each branch of development. Tests within these Engagements are used to categorize the testing efforts. Development branches have separate Tests which store the results of CI/CD and SCA scans. The Main branch has those as well, but also adds Tests which store Manual Code Review and Threat Model reports. + +All of these Tests are open-ended and can be updated on a regular basis using Reimport. [Deduplication](/en/working_with_findings/finding_deduplication/about_deduplication/) is only handled at the Engagement level, which prevents Findings in one Code branch from closing Findings in another. + +By applying this model consistently, SaaSy has a model that they can apply to any new software acquisition, and the AppSec team can quickly begin monitoring the data to ensure compliance. + +#### RBAC Model + +On the Security/Compliance side: + +- The AppSec team at SaaSy software owns DefectDojo and has full admin access to the software. +- QE and Compliance teams have read-only access to the entire system, to pull reports and dive into data if required. + +On the development side: + +- Each Product Owner has Writer access to the Product they own in DefectDojo, which allows them to write Risk Acceptances and view metrics for the Product. +- Developers have read-only access to each Product they work on. They can Request Peer Reviews on Findings or issues they are trying to remediate. diff --git a/docs/content/en/about_defectdojo/faq.md b/docs/content/en/about_defectdojo/faq.md index f94ebceab3d..53461963f06 100644 --- a/docs/content/en/about_defectdojo/faq.md +++ b/docs/content/en/about_defectdojo/faq.md @@ -1,135 +1,135 @@ ---- -title: "❓ Frequently Asked Questions" -description: "DefectDojo FAQ" -draft: "false" -weight: 2 -chapter: true ---- - -Here are some frequently asked questions about working with DefectDojo - both in DefectDojo Pro or DefectDojo OS. - -## General Questions - -### How should I organize my security testing in DefectDojo? - -While DefectDojo can support any security or testing environment, everyone’s security team and operations look different, so there’s no one-size-fits-all approach to using it. We have a very detailed article on [common use cases](/en/about_defectdojo/examples_of_use) that has examples of how different organizations apply RBAC and the DefectDojo data model to support their needs. - -### What are the recommended workflows for security testing in DefectDojo? - -DefectDojo is meant to be the central source of truth for your organization's security posture, and it can fill different needs depending on your organization's requirements, such as: - -- Allowing users to identify duplicate findings across scans and tools, minimizing alert fatigue. -- Enforcing SLAs on vulnerabilities, ensuring that your organization handles each Finding within an appropriate timeframe. -- Sending tickets to [Jira](/en/share_your_findings/jira_guide/), ServiceNow or other Project Tracking software, allowing your development team to integrate issue remediation into their standard release process without requiring them to learn another project management tool. -- Integrating into automated [CI/CD pipelines](/en/connecting_your_tools/import_scan_files/api_pipeline_modelling/) to automatically ingest report data from repositories, even down to the branch level. -- Creating [reports](/en/share_your_findings/pro_reports/using_the_report_builder/) on any set of vulnerabilities or software context, to quickly share scan results or status updates with stakeholders. -- Establishing acceptance and mitigation workflows, supporting formal risk-management tracking. - - -DefectDojo is designed to support and standardize your current security workflow. All of these methods can be used to enhance your team’s processes and adapt to how you currently operate. - -### What features are available in DefectDojo Pro? - -DefectDojo Pro expands on the above workflows further, adding: - -- An [improved UI](/en/about_defectdojo/ui_pro_vs_os/) designed for speed and efficiency when navigating through enterprise-level data volumes. It also includes a dark mode. -- The ability to [pre-triage your Findings](/en/working_with_findings/finding_priority/) by Priority and Risk, allowing your team to identify and fix your most critical issues first. -- A [Rules Engine](/en/customize_dojo/rules_engine/) to script automated bulk actions and build custom workflows to handle Findings and other objects, no programming experience required. -- [Enhanced report and metrics generation capabilities](/en/about_defectdojo/ui_pro_vs_os/#new-dashboards) to easily share the security posture of your apps and repos. -- [Advanced deduplication settings](/en/working_with_findings/finding_deduplication/tune_deduplication/) to fine-tune how DefectDojo identifies and manages duplicate findings. -- Streamlined import capabilities, such as: - - An optimized upload method which processes Findings in the background. - - The ability to quickly build a [command-line pipeline](/en/connecting_your_tools/external_tools/) using our Universal Importer and DefectDojo CLI apps, allowing you to easily import, reimport, and export data to your DefectDojo Pro instance. - - A [Universal Parser](/en/connecting_your_tools/parsers/universal_parser/) to turn any .json or .csv report into an actional set of Findings and have DefectDojo Pro will parse the data however you like. - - [Connectors](/en/connecting_your_tools/connectors/about_connectors/), which provide an instant connection to supported tools to import new Finding data so you can get an automated Import pipeline established without the need to set up any API calls or cron jobs. - -Further information regarding DefectDojo Pro’s capabilities can be found [here](/en/about_defectdojo/pro_features/). - -### How does DefectDojo handle access control? - -DefectDojo can be used by large teams, and setting up [RBAC (Rule Based Access Control)](/en/customize_dojo/user_management/about_perms_and_roles/) is highly recommended, both to properly establish context for each team member, and to control access to certain parts of Infrastructure. - -Role and permission assignment generally happens at the Product Type / Product level. Each team member can be assigned to one or more Products or Product Types, and can be given a role which governs how they can interact with the vulnerability data within (read only, read-write, or full control). For more information, see our [RBAC guide](/en/customize_dojo/user_management/about_perms_and_roles/). - -### How does DefectDojo handle access control for a team of users? - -Whether you’re a one-person security team for a small organization or a CISO overseeing a swath of software projects,you can easily organize [Role-Based Access Control (RBAC)](/en/customize_dojo/user_management/about_perms_and_roles/) in order to properly establish context for each team member and control access to certain parts of Infrastructure. - -Generally, role and permission assignment happens at the [Product Type/Product level](/en/working_with_findings/organizing_engagements_tests/product_hierarchy/). Each team member can be given a role pertaining to one or more Products or Product Types that governs how they can interact with the vulnerability data within (e.g., read only, read-write, or full control). - -## Import Workflows - -### What tools are supported by DefectDojo? - -DefectDojo supports reports from [over 200](/en/connecting_your_tools/parsers/) commercial and open-source security security tools. - -If you're looking to add a new tool to your suite, we have a list of recommended Open-Source tools which you can check out [here](https://defectdojo.com/blog/announcing-the-defectdojo-open-source-security-awards). - -### What is the different between Import and Reimport? - -There are two different methods to import a single report from a security tool: - -- **[Import](/en/connecting_your_tools/import_scan_files/import_scan_ui/)** handles the report as a single point-in-time record. Importing a report creates a Test containing the resulting Findings. -- **[Reimport](/en/connecting_your_tools/import_scan_files/using_reimport/)** is used to update an existing Test with a new set of results. If you have a more open-ended approach to your testing process, you can continuously Reimport the latest version of your report to an existing Test. DefectDojo will compare the results of the incoming report to your existing data, record any changes, and then adjust the Findings in the Test to match the latest report. - -To understand the difference, it’s helpful to think of Import as recording a single instance of a scan event, and Reimport as updating a continual record of scanning. - -Here is an analogy; if you were an accountant, you could use Import to track a single receipt, while you would use Reimport to track a continuous ledger of expenses - -Both methods also use Deduplication differently: while two discrete Imported Tests in the same Product will identify and label duplicate Findings separately, Reimport will not create any Findings it identifies as [duplicates](https://docs.defectdojo.com/en/working_with_findings/finding_deduplication/avoiding_duplicates_via_reimport/) within the Test. - -Generally speaking, if a point-in-time report is what you need, Import is the best method to use. If you are continuously running and ingesting reports from a tool, Reimport is the better method for keeping things organized. - -### How can I troubleshoot Import errors? - -DefectDojo supports a wide variety of tools. If you’re seeing inconsistent behavior when importing a report, we recommend checking if the file structure matches what the tool is expecting. See our [Parser List](/en/connecting_your_tools/parsers/) to confirm that your tool is supported, and check to make sure that the file format matches what the tool expects. You can also compare the structure to our Unit Tests. - -DefectDojo Pro has a Universal Parser import method which allows you to handle any JSON, CSV or XML file. DefectDojo OS users can write custom parsers for the same purpose. - -Finally, third-party report formats have been known to change without warning: Our OS community greatly appreciates [PRs and contributions](/en/open_source/contributing/how-to-write-a-parser/) to keep our parsers up to date. - -### How should I handle large scan files? - -Importing a large report into DefectDojo can be a lengthy process. Reports of 2MB contain substantial amounts of data, which can take a long time to translate into Findings depending on the security tool’s report format. - -Our recommended approach is to break down large reports before import to reflect different subsections of available data. If your security tool can filter results by software project, application, or other context, exporting smaller reports makes it easier for DefectDojo to handle and categorize the data. This also has the added benefit of proactively organizing your Findings based on how the data was broken down, which makes for more relevant and faster report generation. - -DefectDojo Pro can process reports in the background. However, files still need to be uploaded and validated by DefectDojo before the background Finding creation process can begin. - -### How do I connect a CI/CD pipeline to DefectDojo? - -Many of DefectDojo's core features can be completely automated. CI/CD (or any kind of automated import) can be handled by calling the [DefectDojo REST API](/en/connecting_your_tools/import_scan_files/api_pipeline_modelling/). - -**DefectDojo Pro** users also have access to the **Universal Importer / DefectDojo CLI** [command-line tools](/en/connecting_your_tools/external_tools/), which can be installed to run in many automated environments. - -## Finding Management - -### What does the status of a Finding mean? - -Findings can have many statuses. A status of Active or Inactive is always set on a Finding, while other statuses such as Verified, False Positive, or Out Of Scope can be applied at your discretion. - -These statuses are described in more detail in our [Finding Status Definitions](/en/working_with_findings/findings_workflows/finding_status_definitions/) guide, along with information about how they can be used. - -### How can I delete Findings from DefectDojo? - -Generally speaking, we recommend retaining Closed Findings as ‘Inactive’ rather than deleting them outright, as it’s important to maintain historical records in AppSec work. Deleting a Finding will remove all notes and metric-tracking from that Finding outright, which can lead to inaccurate reports or an incomplete archive. - -Findings from DefectDojo can be deleted in a few ways: -- By running a [Bulk Delete](/en/working_with_findings/findings_workflows/editing_findings/#bulk-delete-findings) action on the Findings that you want to delete -- By calling `DELETE /findings/{id}` through the API -- By deleting a parent object, such as a Test, Engagement, Product Type or Product. - - Note that subclasses are not preserved independently of their parent object: Deleting a parent object such as a Product Type will delete any Products, Engagements, Tests, Findings, and Endpoints within the Product Type. Conversely, deleting an Engagement will preserve the Products, and Product Types that precede it. - -## Reporting and Jira - -### How can I generate a report in DefectDojo? - -You can quickly create a customized report in DefectDojo using the [Report Builder](/en/share_your_findings/pro_reports/using_the_report_builder/). - -DefectDojo Pro users also have access to [executive-level Metrics dashboards](/en/about_defectdojo/ui_pro_vs_os/#new-dashboards) that can report on Product Types, Products or other data in real-time. - -### How can I integrate a project management tool with DefectDojo? - -In both Pro and Open-Source editions of DefectDojo, Findings in DefectDojo can be pushed to Jira as Issues, which allows you to integrate issue remediation with your development team. We have a [complete guide to Jira](/en/share_your_findings/jira_guide/) written which describes the process in detail. - +--- +title: "❓ Frequently Asked Questions" +description: "DefectDojo FAQ" +draft: "false" +weight: 2 +chapter: true +--- + +Here are some frequently asked questions about working with DefectDojo - both in DefectDojo Pro or DefectDojo OS. + +## General Questions + +### How should I organize my security testing in DefectDojo? + +While DefectDojo can support any security or testing environment, everyone’s security team and operations look different, so there’s no one-size-fits-all approach to using it. We have a very detailed article on [common use cases](/en/about_defectdojo/examples_of_use) that has examples of how different organizations apply RBAC and the DefectDojo data model to support their needs. + +### What are the recommended workflows for security testing in DefectDojo? + +DefectDojo is meant to be the central source of truth for your organization's security posture, and it can fill different needs depending on your organization's requirements, such as: + +- Allowing users to identify duplicate findings across scans and tools, minimizing alert fatigue. +- Enforcing SLAs on vulnerabilities, ensuring that your organization handles each Finding within an appropriate timeframe. +- Sending tickets to [Jira](/en/share_your_findings/jira_guide/), ServiceNow or other Project Tracking software, allowing your development team to integrate issue remediation into their standard release process without requiring them to learn another project management tool. +- Integrating into automated [CI/CD pipelines](/en/connecting_your_tools/import_scan_files/api_pipeline_modelling/) to automatically ingest report data from repositories, even down to the branch level. +- Creating [reports](/en/share_your_findings/pro_reports/using_the_report_builder/) on any set of vulnerabilities or software context, to quickly share scan results or status updates with stakeholders. +- Establishing acceptance and mitigation workflows, supporting formal risk-management tracking. + + +DefectDojo is designed to support and standardize your current security workflow. All of these methods can be used to enhance your team’s processes and adapt to how you currently operate. + +### What features are available in DefectDojo Pro? + +DefectDojo Pro expands on the above workflows further, adding: + +- An [improved UI](/en/about_defectdojo/ui_pro_vs_os/) designed for speed and efficiency when navigating through enterprise-level data volumes. It also includes a dark mode. +- The ability to [pre-triage your Findings](/en/working_with_findings/finding_priority/) by Priority and Risk, allowing your team to identify and fix your most critical issues first. +- A [Rules Engine](/en/customize_dojo/rules_engine/) to script automated bulk actions and build custom workflows to handle Findings and other objects, no programming experience required. +- [Enhanced report and metrics generation capabilities](/en/about_defectdojo/ui_pro_vs_os/#new-dashboards) to easily share the security posture of your apps and repos. +- [Advanced deduplication settings](/en/working_with_findings/finding_deduplication/tune_deduplication/) to fine-tune how DefectDojo identifies and manages duplicate findings. +- Streamlined import capabilities, such as: + - An optimized upload method which processes Findings in the background. + - The ability to quickly build a [command-line pipeline](/en/connecting_your_tools/external_tools/) using our Universal Importer and DefectDojo CLI apps, allowing you to easily import, reimport, and export data to your DefectDojo Pro instance. + - A [Universal Parser](/en/connecting_your_tools/parsers/universal_parser/) to turn any .json or .csv report into an actional set of Findings and have DefectDojo Pro will parse the data however you like. + - [Connectors](/en/connecting_your_tools/connectors/about_connectors/), which provide an instant connection to supported tools to import new Finding data so you can get an automated Import pipeline established without the need to set up any API calls or cron jobs. + +Further information regarding DefectDojo Pro’s capabilities can be found [here](/en/about_defectdojo/pro_features/). + +### How does DefectDojo handle access control? + +DefectDojo can be used by large teams, and setting up [RBAC (Rule Based Access Control)](/en/customize_dojo/user_management/about_perms_and_roles/) is highly recommended, both to properly establish context for each team member, and to control access to certain parts of Infrastructure. + +Role and permission assignment generally happens at the Product Type / Product level. Each team member can be assigned to one or more Products or Product Types, and can be given a role which governs how they can interact with the vulnerability data within (read only, read-write, or full control). For more information, see our [RBAC guide](/en/customize_dojo/user_management/about_perms_and_roles/). + +### How does DefectDojo handle access control for a team of users? + +Whether you’re a one-person security team for a small organization or a CISO overseeing a swath of software projects,you can easily organize [Role-Based Access Control (RBAC)](/en/customize_dojo/user_management/about_perms_and_roles/) in order to properly establish context for each team member and control access to certain parts of Infrastructure. + +Generally, role and permission assignment happens at the [Product Type/Product level](/en/working_with_findings/organizing_engagements_tests/product_hierarchy/). Each team member can be given a role pertaining to one or more Products or Product Types that governs how they can interact with the vulnerability data within (e.g., read only, read-write, or full control). + +## Import Workflows + +### What tools are supported by DefectDojo? + +DefectDojo supports reports from [over 200](/en/connecting_your_tools/parsers/) commercial and open-source security security tools. + +If you're looking to add a new tool to your suite, we have a list of recommended Open-Source tools which you can check out [here](https://defectdojo.com/blog/announcing-the-defectdojo-open-source-security-awards). + +### What is the different between Import and Reimport? + +There are two different methods to import a single report from a security tool: + +- **[Import](/en/connecting_your_tools/import_scan_files/import_scan_ui/)** handles the report as a single point-in-time record. Importing a report creates a Test containing the resulting Findings. +- **[Reimport](/en/connecting_your_tools/import_scan_files/using_reimport/)** is used to update an existing Test with a new set of results. If you have a more open-ended approach to your testing process, you can continuously Reimport the latest version of your report to an existing Test. DefectDojo will compare the results of the incoming report to your existing data, record any changes, and then adjust the Findings in the Test to match the latest report. + +To understand the difference, it’s helpful to think of Import as recording a single instance of a scan event, and Reimport as updating a continual record of scanning. + +Here is an analogy; if you were an accountant, you could use Import to track a single receipt, while you would use Reimport to track a continuous ledger of expenses + +Both methods also use Deduplication differently: while two discrete Imported Tests in the same Product will identify and label duplicate Findings separately, Reimport will not create any Findings it identifies as [duplicates](https://docs.defectdojo.com/en/working_with_findings/finding_deduplication/avoiding_duplicates_via_reimport/) within the Test. + +Generally speaking, if a point-in-time report is what you need, Import is the best method to use. If you are continuously running and ingesting reports from a tool, Reimport is the better method for keeping things organized. + +### How can I troubleshoot Import errors? + +DefectDojo supports a wide variety of tools. If you’re seeing inconsistent behavior when importing a report, we recommend checking if the file structure matches what the tool is expecting. See our [Parser List](/en/connecting_your_tools/parsers/) to confirm that your tool is supported, and check to make sure that the file format matches what the tool expects. You can also compare the structure to our Unit Tests. + +DefectDojo Pro has a Universal Parser import method which allows you to handle any JSON, CSV or XML file. DefectDojo OS users can write custom parsers for the same purpose. + +Finally, third-party report formats have been known to change without warning: Our OS community greatly appreciates [PRs and contributions](/en/open_source/contributing/how-to-write-a-parser/) to keep our parsers up to date. + +### How should I handle large scan files? + +Importing a large report into DefectDojo can be a lengthy process. Reports of 2MB contain substantial amounts of data, which can take a long time to translate into Findings depending on the security tool’s report format. + +Our recommended approach is to break down large reports before import to reflect different subsections of available data. If your security tool can filter results by software project, application, or other context, exporting smaller reports makes it easier for DefectDojo to handle and categorize the data. This also has the added benefit of proactively organizing your Findings based on how the data was broken down, which makes for more relevant and faster report generation. + +DefectDojo Pro can process reports in the background. However, files still need to be uploaded and validated by DefectDojo before the background Finding creation process can begin. + +### How do I connect a CI/CD pipeline to DefectDojo? + +Many of DefectDojo's core features can be completely automated. CI/CD (or any kind of automated import) can be handled by calling the [DefectDojo REST API](/en/connecting_your_tools/import_scan_files/api_pipeline_modelling/). + +**DefectDojo Pro** users also have access to the **Universal Importer / DefectDojo CLI** [command-line tools](/en/connecting_your_tools/external_tools/), which can be installed to run in many automated environments. + +## Finding Management + +### What does the status of a Finding mean? + +Findings can have many statuses. A status of Active or Inactive is always set on a Finding, while other statuses such as Verified, False Positive, or Out Of Scope can be applied at your discretion. + +These statuses are described in more detail in our [Finding Status Definitions](/en/working_with_findings/findings_workflows/finding_status_definitions/) guide, along with information about how they can be used. + +### How can I delete Findings from DefectDojo? + +Generally speaking, we recommend retaining Closed Findings as ‘Inactive’ rather than deleting them outright, as it’s important to maintain historical records in AppSec work. Deleting a Finding will remove all notes and metric-tracking from that Finding outright, which can lead to inaccurate reports or an incomplete archive. + +Findings from DefectDojo can be deleted in a few ways: +- By running a [Bulk Delete](/en/working_with_findings/findings_workflows/editing_findings/#bulk-delete-findings) action on the Findings that you want to delete +- By calling `DELETE /findings/{id}` through the API +- By deleting a parent object, such as a Test, Engagement, Product Type or Product. + - Note that subclasses are not preserved independently of their parent object: Deleting a parent object such as a Product Type will delete any Products, Engagements, Tests, Findings, and Endpoints within the Product Type. Conversely, deleting an Engagement will preserve the Products, and Product Types that precede it. + +## Reporting and Jira + +### How can I generate a report in DefectDojo? + +You can quickly create a customized report in DefectDojo using the [Report Builder](/en/share_your_findings/pro_reports/using_the_report_builder/). + +DefectDojo Pro users also have access to [executive-level Metrics dashboards](/en/about_defectdojo/ui_pro_vs_os/#new-dashboards) that can report on Product Types, Products or other data in real-time. + +### How can I integrate a project management tool with DefectDojo? + +In both Pro and Open-Source editions of DefectDojo, Findings in DefectDojo can be pushed to Jira as Issues, which allows you to integrate issue remediation with your development team. We have a [complete guide to Jira](/en/share_your_findings/jira_guide/) written which describes the process in detail. + DefectDojo Pro adds support for [Additional Project Tracking Integrations](/en/share_your_findings/integrations/)**: ServiceNow, Azure DevOps, GitHub and GitLab. \ No newline at end of file diff --git a/docs/content/en/about_defectdojo/new_user_checklist.md b/docs/content/en/about_defectdojo/new_user_checklist.md index d2e93499da9..2d846de14e1 100644 --- a/docs/content/en/about_defectdojo/new_user_checklist.md +++ b/docs/content/en/about_defectdojo/new_user_checklist.md @@ -1,43 +1,43 @@ ---- -title: "☑️ New User Checklist" -description: "Get Started With DefectDojo" -draft: "false" -weight: 3 -chapter: true ---- - -Here's a quick reference you can use to ensure successful implementation, from a blank canvas to a fully functional app. - -The essence of DefectDojo is to import security data, organize it, and present it to the folks who need to know. Here are ways to achieve those things in DefectDojo Pro and Open-Source: - -### DefectDojo Pro - -1. Start by [importing a file](/en/connecting_your_tools/import_scan_files/import_scan_ui) using the UI. This is generally the quickest way to see how your data fits into the DefectDojo model. - -2. Now that you have data in DefectDojo, learn more about how to organize it with the [Product Hierarchy Overview](/en/working_with_findings/organizing_engagements_tests/product_hierarchy). The Product Hierarchy creates a working inventory of your apps, which helps you divide your data into logical categories, apply access control rules, sort Findings by [Priority and Risk](/en/working_with_findings/finding_priority/) or to segment your reports to the correct team. - -3. Check out your [Metrics pages](/en/customize_dojo/dashboards/pro_dashboards/) which can be used to quickly share Finding reports with key stakeholders. - -### DefectDojo Open-Source - -1. Open-Source users can start by creating their first [Product Type and Product](/en/working_with_findings/organizing_engagements_tests/product_hierarchy). Once those are created, they can [import a file](/en/connecting_your_tools/import_scan_files/import_scan_ui) to one of those Products using the UI. - -2. Now that you have data in DefectDojo, consider expanding your Product layout [Product Hierarchy Overview](/en/working_with_findings/organizing_engagements_tests/product_hierarchy). The Product Hierarchy creates a working inventory of your apps, which helps you divide your data up into logical categories. These categories can be used to apply access control rules, or to segment your reports to the correct team. - -3. Use the [Report Builder](/en/share_your_findings/pro_reports/using_the_report_builder/#opening-the-report-builder) to summarize the data you've imported. Reports can be used to quickly share Findings with stakeholders such as Product Owners. - -This is the essence of DefectDojo - import security data, organize it, and present it to the folks who need to know. - -All of these features can be automated, and because DefectDojo can handle over 200 tools (at time of writing) you should be all set to create a functional security inventory of your entire organizational output. - -## Other guides - -### Pro Features -- If your organization uses ServiceNow, AzureDevops, GitHub or GitLab for issue tracking, check out our [documentation](/en/share_your_findings/integrations/) on those integrations. -- Customize your [main Dashboard](/en/customize_dojo/dashboards/introduction_dashboard/) with filtered tiles to view your environment at a glance. -- Learn how to rapidly import data and mirror your team's existing security environment with [Connectors](/en/connecting_your_tools/connectors/about_connectors/). - -### Open-Source Features -- Does your organization use Jira? Learn how to use our [Jira integration](/en/share_your_findings/jira_guide/) to create Jira tickets from the data you ingest. -- Are you expecting to share DefectDojo with many users in your organization? Check out our guides to [user management](/en/customize_dojo/user_management/about_perms_and_roles/) and set up role-based access control (RBAC). +--- +title: "☑️ New User Checklist" +description: "Get Started With DefectDojo" +draft: "false" +weight: 3 +chapter: true +--- + +Here's a quick reference you can use to ensure successful implementation, from a blank canvas to a fully functional app. + +The essence of DefectDojo is to import security data, organize it, and present it to the folks who need to know. Here are ways to achieve those things in DefectDojo Pro and Open-Source: + +### DefectDojo Pro + +1. Start by [importing a file](/en/connecting_your_tools/import_scan_files/import_scan_ui) using the UI. This is generally the quickest way to see how your data fits into the DefectDojo model. + +2. Now that you have data in DefectDojo, learn more about how to organize it with the [Product Hierarchy Overview](/en/working_with_findings/organizing_engagements_tests/product_hierarchy). The Product Hierarchy creates a working inventory of your apps, which helps you divide your data into logical categories, apply access control rules, sort Findings by [Priority and Risk](/en/working_with_findings/finding_priority/) or to segment your reports to the correct team. + +3. Check out your [Metrics pages](/en/customize_dojo/dashboards/pro_dashboards/) which can be used to quickly share Finding reports with key stakeholders. + +### DefectDojo Open-Source + +1. Open-Source users can start by creating their first [Product Type and Product](/en/working_with_findings/organizing_engagements_tests/product_hierarchy). Once those are created, they can [import a file](/en/connecting_your_tools/import_scan_files/import_scan_ui) to one of those Products using the UI. + +2. Now that you have data in DefectDojo, consider expanding your Product layout [Product Hierarchy Overview](/en/working_with_findings/organizing_engagements_tests/product_hierarchy). The Product Hierarchy creates a working inventory of your apps, which helps you divide your data up into logical categories. These categories can be used to apply access control rules, or to segment your reports to the correct team. + +3. Use the [Report Builder](/en/share_your_findings/pro_reports/using_the_report_builder/#opening-the-report-builder) to summarize the data you've imported. Reports can be used to quickly share Findings with stakeholders such as Product Owners. + +This is the essence of DefectDojo - import security data, organize it, and present it to the folks who need to know. + +All of these features can be automated, and because DefectDojo can handle over 200 tools (at time of writing) you should be all set to create a functional security inventory of your entire organizational output. + +## Other guides + +### Pro Features +- If your organization uses ServiceNow, AzureDevops, GitHub or GitLab for issue tracking, check out our [documentation](/en/share_your_findings/integrations/) on those integrations. +- Customize your [main Dashboard](/en/customize_dojo/dashboards/introduction_dashboard/) with filtered tiles to view your environment at a glance. +- Learn how to rapidly import data and mirror your team's existing security environment with [Connectors](/en/connecting_your_tools/connectors/about_connectors/). + +### Open-Source Features +- Does your organization use Jira? Learn how to use our [Jira integration](/en/share_your_findings/jira_guide/) to create Jira tickets from the data you ingest. +- Are you expecting to share DefectDojo with many users in your organization? Check out our guides to [user management](/en/customize_dojo/user_management/about_perms_and_roles/) and set up role-based access control (RBAC). - Ready to dive into automation? Learn how to use the [DefectDojo API](/en/connecting_your_tools/import_scan_files/api_pipeline_modelling) to automatically import new data, and build a robust CI/CD pipeline. \ No newline at end of file diff --git a/docs/content/en/about_defectdojo/pro_features.md b/docs/content/en/about_defectdojo/pro_features.md index 8fd83333f07..01c05b3a853 100644 --- a/docs/content/en/about_defectdojo/pro_features.md +++ b/docs/content/en/about_defectdojo/pro_features.md @@ -1,105 +1,105 @@ ---- -title: "📊 Pro Features List" -description: "List of Pro Features in DefectDojo" -draft: "false" -weight: 4 -chapter: true -exclude_search: true ---- - -Here is a list of DefectDojo Pro’s many additional features, along with links to documentation to see them in action: - -## Improved UX - -### Pro UI - -DefectDojo's UI has been reworked in DefectDojo Pro to be faster, more functional, fully customizable, and better at navigating through enterprise-level data volume. It also includes a dark mode. -See our [Pro UI Guide](../ui_pro_vs_os) for more information. - -![image](images/enabling_deduplication_within_an_engagement_2.png) - -### Finding Priority - -DefectDojo Pro can pre-triage your Findings by Priority and Risk, allowing your team to identify and fix your most critical issues first. -See our [Finding Priority Guide](/en/working_with_findings/finding_priority/) for more details. - -### Rules Engine - -DefectDojo Pro's Rules Engine allows you to script automated bulk actions and build custom workflows to handle Findings and other objects, no programming experience required. - -See our [Rules Engine Guide](/en/customize_dojo/rules_engine) for more info. - -![image](images/rules_engine_4.png) - -### Pro Dashboards and Reporting - -Generate [instant reports and metrics](../ui_pro_vs_os/#new-dashboards) to share the security posture of your apps and repos, evaluate your security tools and analyze your team's performance in addressing security issues. - -The graphics on the landing page can be exported as SVG files, and the data used to create the graphics can also be exported as a table. - -Additionally, DefectDojo Pro includes several new [insights dashboards](/en/about_defectdojo/ui_pro_vs_os/#new-dashboards), offering enhanced metrics for various audiences of your security program. - -### Deduplication Tuning - -Advanced Deduplication settings allow you to fine-tune how DefectDojo identifies and manages duplicate findings. Adjust same-tool, **cross-tool**, and reimport Deduplication for precision matching between all your chosen security tools and vulnerability findings. - -See our [Deduplication Tuning Guide](/en/working_with_findings/finding_deduplication/tune_deduplication/) for more information. - -![image](images/deduplication_tuning.png) - -## Streamlined import - -### More Import Options - -DefectDojo Pro includes four additional import methods: [Universal Importer](/en/connecting_your_tools/external_tools/), [API Connectors](/en/connecting_your_tools/connectors/about_connectors/), [Universal Parser](/supported_tools/parsers/universal_parser/), and [Smart Upload](/en/connecting_your_tools/import_scan_files/smart_upload/). - -![image](images/pro_import_methods.png) - - -### Background Imports - -For enterprise-level reports, DefectDojo Pro offers an optimized upload method which processes Findings in the background. - -### CLI Tools - -Quickly build a command-line pipeline to import, reimport, and export data to your DefectDojo Pro instance using our Universal Importer and DefectDojo-CLI apps; no API scripting necessary (available for Windows, Macintosh, or Linux). - -See our [External Tools Guide](/en/connecting_your_tools/external_tools/) for more information. - -### Connectors - -DefectDojo can instantly connect to enterprise-level scanning tools to import new Finding data, creating an automated Import pipeline that works out-of-the-box without the need to set up any API calls or cron jobs. - -See our [Connectors Guide](/en/connecting_your_tools/connectors/about_connectors/) for more information. - -![image](images/add_edit_connectors_2.png) - -Supported tools for Connectors include: - -* Anchore -* AWS Security Hub -* BurpSuite -* Checkmarx ONE -* Dependency-Track -* Probely -* Semgrep -* SonarQube -* Snyk -* Tenable -* Wiz - -### Universal Parser (Beta) - -If you’re using an unsupported/customized scanning tool, or just wish that DefectDojo handled a report slightly differently, use DefectDojo Pro's Universal Parser to turn any .json or .csv report into an actionable set of Findings. Your parser will parse and map the data however you like. - -See our [Universal Parser Guide](/en/connecting_your_tools/universal_parser/) for more information. - -![image](images/universal_parser_3.png) - -## Support - -DefectDojo Pro subscriptions include world-class support for both on-premise and Cloud installations. Our team is available to help your organization implement and maximize your use of DefectDojo Pro. Your subscription includes: - -- **Comprehensive Support**: Unlimited support tickets and seats are available to assist your entire team. -- **Dedicated Engineering Focus**: User-reported issues, bugs, and feature requests receive priority attention from our engineering team. -- **SaaS Management**: We provide monitoring, maintenance, and backups for all SaaS instances. +--- +title: "📊 Pro Features List" +description: "List of Pro Features in DefectDojo" +draft: "false" +weight: 4 +chapter: true +exclude_search: true +--- + +Here is a list of DefectDojo Pro’s many additional features, along with links to documentation to see them in action: + +## Improved UX + +### Pro UI + +DefectDojo's UI has been reworked in DefectDojo Pro to be faster, more functional, fully customizable, and better at navigating through enterprise-level data volume. It also includes a dark mode. +See our [Pro UI Guide](../ui_pro_vs_os) for more information. + +![image](images/enabling_deduplication_within_an_engagement_2.png) + +### Finding Priority + +DefectDojo Pro can pre-triage your Findings by Priority and Risk, allowing your team to identify and fix your most critical issues first. +See our [Finding Priority Guide](/en/working_with_findings/finding_priority/) for more details. + +### Rules Engine + +DefectDojo Pro's Rules Engine allows you to script automated bulk actions and build custom workflows to handle Findings and other objects, no programming experience required. + +See our [Rules Engine Guide](/en/customize_dojo/rules_engine) for more info. + +![image](images/rules_engine_4.png) + +### Pro Dashboards and Reporting + +Generate [instant reports and metrics](../ui_pro_vs_os/#new-dashboards) to share the security posture of your apps and repos, evaluate your security tools and analyze your team's performance in addressing security issues. + +The graphics on the landing page can be exported as SVG files, and the data used to create the graphics can also be exported as a table. + +Additionally, DefectDojo Pro includes several new [insights dashboards](/en/about_defectdojo/ui_pro_vs_os/#new-dashboards), offering enhanced metrics for various audiences of your security program. + +### Deduplication Tuning + +Advanced Deduplication settings allow you to fine-tune how DefectDojo identifies and manages duplicate findings. Adjust same-tool, **cross-tool**, and reimport Deduplication for precision matching between all your chosen security tools and vulnerability findings. + +See our [Deduplication Tuning Guide](/en/working_with_findings/finding_deduplication/tune_deduplication/) for more information. + +![image](images/deduplication_tuning.png) + +## Streamlined import + +### More Import Options + +DefectDojo Pro includes four additional import methods: [Universal Importer](/en/connecting_your_tools/external_tools/), [API Connectors](/en/connecting_your_tools/connectors/about_connectors/), [Universal Parser](/supported_tools/parsers/universal_parser/), and [Smart Upload](/en/connecting_your_tools/import_scan_files/smart_upload/). + +![image](images/pro_import_methods.png) + + +### Background Imports + +For enterprise-level reports, DefectDojo Pro offers an optimized upload method which processes Findings in the background. + +### CLI Tools + +Quickly build a command-line pipeline to import, reimport, and export data to your DefectDojo Pro instance using our Universal Importer and DefectDojo-CLI apps; no API scripting necessary (available for Windows, Macintosh, or Linux). + +See our [External Tools Guide](/en/connecting_your_tools/external_tools/) for more information. + +### Connectors + +DefectDojo can instantly connect to enterprise-level scanning tools to import new Finding data, creating an automated Import pipeline that works out-of-the-box without the need to set up any API calls or cron jobs. + +See our [Connectors Guide](/en/connecting_your_tools/connectors/about_connectors/) for more information. + +![image](images/add_edit_connectors_2.png) + +Supported tools for Connectors include: + +* Anchore +* AWS Security Hub +* BurpSuite +* Checkmarx ONE +* Dependency-Track +* Probely +* Semgrep +* SonarQube +* Snyk +* Tenable +* Wiz + +### Universal Parser (Beta) + +If you’re using an unsupported/customized scanning tool, or just wish that DefectDojo handled a report slightly differently, use DefectDojo Pro's Universal Parser to turn any .json or .csv report into an actionable set of Findings. Your parser will parse and map the data however you like. + +See our [Universal Parser Guide](/en/connecting_your_tools/universal_parser/) for more information. + +![image](images/universal_parser_3.png) + +## Support + +DefectDojo Pro subscriptions include world-class support for both on-premise and Cloud installations. Our team is available to help your organization implement and maximize your use of DefectDojo Pro. Your subscription includes: + +- **Comprehensive Support**: Unlimited support tickets and seats are available to assist your entire team. +- **Dedicated Engineering Focus**: User-reported issues, bugs, and feature requests receive priority attention from our engineering team. +- **SaaS Management**: We provide monitoring, maintenance, and backups for all SaaS instances. diff --git a/docs/content/en/about_defectdojo/request_a_trial.md b/docs/content/en/about_defectdojo/request_a_trial.md index 18cc2094a93..e015fbe883e 100644 --- a/docs/content/en/about_defectdojo/request_a_trial.md +++ b/docs/content/en/about_defectdojo/request_a_trial.md @@ -1,67 +1,67 @@ ---- -title: "Request a DefectDojo Pro Trial" -description: "How to request and work with a trial of DefectDojo Cloud" -draft: "false" -weight: 6 -pro-feature: true ---- - -If your team requires an on-premise DefectDojo installation, please connect with our Sales team by emailing → [hello@defectdojo.com](mailto:hello@defectdojo.com) . This trial setup process only applies to DefectDojo Cloud users. - -All DefectDojo plans include a free 2-week trial, which you can use to evaluate our software. DefectDojo Trial instances are fully-featured and can be immediately converted into paid instances by our team; no need to set everything up again, or reupload any data when your trial period ends. - -At the end of this process, you'll be put in touch with our Sales team who will follow up to receive your billing information and authorize your company's trial instance. - -# **Requesting your Trial** - -In order to sign up for a trial, you'll need to create an account on our [Cloud Portal](https://defectdojo.com/pricing), and then click the New Subscription menu option from the sidebar. - -![image](images/request_a_trial_mg.png) - -## Step 1: Welcome -Click Continue to begin setting up your instance. - -![image](images/request_a_trial.png) - -## Step 2: Enter your Company Information \& create your Domain - -Enter your company's **Name** and the **Server Label** you want to use with DefectDojo. You will then have a custom domain created for your DefectDojo instance on our servers. - -![image](images/request_a_trial_2.png) - -Normally, DefectDojo will name your domain according to your company name, but if you select "Use Server Label in Domain" DefectDojo will instead label your domain according to the Server Label you chose. This approach may be preferred if you plan to use multiple DefectDojo instances (such as a Production instance and a Test instance, for example). Please contact our Sales team → [hello@defectdojo.com](mailto:hello@defectdojo.com) if you require multiple instances. - -## Step 3: Select a Server Location - -Select a Server Location from the drop\-down menu. We recommend selecting a server that is geographically closest to the main DefectDojo team to reduce server latency. - -![image](images/request_a_trial_3.png) - -## Step 4: Configure your Firewall Rules - -Enter the IP address ranges, subnet mask and labels that you want to allow to access DefectDojo. Additional IP addresses and rules can be added or changed by your team after your instance is up and running. - -![image](images/request_a_trial_4.png) - -If you want to use external services with DefectDojo (e.g., GitHub or JIRA), check the appropriate boxes listed under **Select External Services.** - -## Step 5: Confirm your Plan type and Billing Frequency - -Before you complete the process, please confirm the plan you want to use along with your billing frequency (monthly or annually). - -![image](images/request_a_trial_5.png) - -## Step 6: Review and Submit your Request - -We'll prompt you to look over your request one more time. Once submitted, only Firewall rules can be changed by your team without assistance from Support. To contact Support, please email [support@defectdojo.com](mailto:support@defectdojo.com) or follow the instructions in [this article](https://docs.defectdojo.com/en/about_defectdojo/contact_defectdojo_support/). - -![image](images/request_a_trial_6.png) - -After reviewing and accepting DefectDojo's License and Support Agreement, you can click **Checkout With Stripe** or **Contact Sales**. - -* Checkout With Stripe will take you to a Stripe page where you can enter your billing information. -* If you do not wish to enter your billing info at this time, you can click Contact Sales, and our Sales team will be in touch to help you finalize your trial subscription. - -# Once your trial has been approved - -Our Support team will send you a Welcome email with links and an initial password to access your DefectDojo instance. You can always reach out to [support@defectdojo.com](mailto:support@defectdojo.com) for DefectDojo Pro assistance once your trial begins. +--- +title: "Request a DefectDojo Pro Trial" +description: "How to request and work with a trial of DefectDojo Cloud" +draft: "false" +weight: 6 +pro-feature: true +--- + +If your team requires an on-premise DefectDojo installation, please connect with our Sales team by emailing → [hello@defectdojo.com](mailto:hello@defectdojo.com) . This trial setup process only applies to DefectDojo Cloud users. + +All DefectDojo plans include a free 2-week trial, which you can use to evaluate our software. DefectDojo Trial instances are fully-featured and can be immediately converted into paid instances by our team; no need to set everything up again, or reupload any data when your trial period ends. + +At the end of this process, you'll be put in touch with our Sales team who will follow up to receive your billing information and authorize your company's trial instance. + +# **Requesting your Trial** + +In order to sign up for a trial, you'll need to create an account on our [Cloud Portal](https://defectdojo.com/pricing), and then click the New Subscription menu option from the sidebar. + +![image](images/request_a_trial_mg.png) + +## Step 1: Welcome +Click Continue to begin setting up your instance. + +![image](images/request_a_trial.png) + +## Step 2: Enter your Company Information \& create your Domain + +Enter your company's **Name** and the **Server Label** you want to use with DefectDojo. You will then have a custom domain created for your DefectDojo instance on our servers. + +![image](images/request_a_trial_2.png) + +Normally, DefectDojo will name your domain according to your company name, but if you select "Use Server Label in Domain" DefectDojo will instead label your domain according to the Server Label you chose. This approach may be preferred if you plan to use multiple DefectDojo instances (such as a Production instance and a Test instance, for example). Please contact our Sales team → [hello@defectdojo.com](mailto:hello@defectdojo.com) if you require multiple instances. + +## Step 3: Select a Server Location + +Select a Server Location from the drop\-down menu. We recommend selecting a server that is geographically closest to the main DefectDojo team to reduce server latency. + +![image](images/request_a_trial_3.png) + +## Step 4: Configure your Firewall Rules + +Enter the IP address ranges, subnet mask and labels that you want to allow to access DefectDojo. Additional IP addresses and rules can be added or changed by your team after your instance is up and running. + +![image](images/request_a_trial_4.png) + +If you want to use external services with DefectDojo (e.g., GitHub or JIRA), check the appropriate boxes listed under **Select External Services.** + +## Step 5: Confirm your Plan type and Billing Frequency + +Before you complete the process, please confirm the plan you want to use along with your billing frequency (monthly or annually). + +![image](images/request_a_trial_5.png) + +## Step 6: Review and Submit your Request + +We'll prompt you to look over your request one more time. Once submitted, only Firewall rules can be changed by your team without assistance from Support. To contact Support, please email [support@defectdojo.com](mailto:support@defectdojo.com) or follow the instructions in [this article](https://docs.defectdojo.com/en/about_defectdojo/contact_defectdojo_support/). + +![image](images/request_a_trial_6.png) + +After reviewing and accepting DefectDojo's License and Support Agreement, you can click **Checkout With Stripe** or **Contact Sales**. + +* Checkout With Stripe will take you to a Stripe page where you can enter your billing information. +* If you do not wish to enter your billing info at this time, you can click Contact Sales, and our Sales team will be in touch to help you finalize your trial subscription. + +# Once your trial has been approved + +Our Support team will send you a Welcome email with links and an initial password to access your DefectDojo instance. You can always reach out to [support@defectdojo.com](mailto:support@defectdojo.com) for DefectDojo Pro assistance once your trial begins. diff --git a/docs/content/en/about_defectdojo/ui_pro_vs_os.md b/docs/content/en/about_defectdojo/ui_pro_vs_os.md index 99378ee987b..14ef1077f62 100644 --- a/docs/content/en/about_defectdojo/ui_pro_vs_os.md +++ b/docs/content/en/about_defectdojo/ui_pro_vs_os.md @@ -1,60 +1,60 @@ ---- -title: "🎨 Pro UI Changes" -description: "Working with different UIs in DefectDojo" -draft: "false" -weight: 5 -pro-feature: true ---- - -In late 2023, DefectDojo Inc. released a new UI for DefectDojo Pro, which is now the default UI for this edition. - -The Pro UI brings the following enhancements to DefectDojo: - -- Modern and sleek design using Vue.js. -- Optimized data delivery and load times, especially for large datasets. -- Access to new Pro features, including [API Connectors](/en/connecting_your_tools/connectors/about_connectors/), [Universal Importer](/en/connecting_your_tools/external_tools/), and [Pro Metrics](https://docs.defectdojo.com/en/customize_dojo/dashboards/pro_dashboards/) views. -- Improved UI workflows: better filtering, dashboards, and navigation. - -## Switching To The Pro UI - -To access the Pro UI, open your User Options menu from the top-right hand corner. You can also switch back to the Classic UI from the same menu. - -![image](images/beta-classic-uis.png) - -## Navigational Changes - -![image](images/pro_ui_overview.png) - -1. The **Sidebar** has been reorganized into four parent categories: Dashboards, Import, Manage, and Settings. - -2. The Homepage, [AI-powered native API connection capabilities](/en/ai/mcp_server_pro/), Pro Metrics, and the Calendar view are all accessible under Dashboards. - -4. Import methods can be found in the Import section: set up [API Connectors](/en/connecting_your_tools/connectors/about_connectors/), use the [Import Scan](/en/connecting_your_tools/import_scan_files/import_scan_ui/) form to Add Findings, use [Smart Upload](/en/connecting_your_tools/import_scan_files/smart_upload/) to handle infrastructure scanning tools, or use our external tools—[Universal Importer and DefectDojo CLI](/en/connecting_your_tools/external_tools/)—to streamline both the import and reimport processes of Findings and associated objects. - -5. The **Manage** section allows you to view different objects in the [Product Hierarchy](/en/working_with_findings/organizing_engagements_tests/product_hierarchy/), with views for Product Types, Products, Engagements, Tests, Findings, Risk Acceptances, Endpoints, and Components. There are additional sections for generating reports (Report Builder), using surveys (Surveys), as well as a [Rules Engine](/en/customize_dojo/rules_engine/). - -5. The **Settings** section allows you to configure your DefectDojo instance, including your Integrations, License, Cloud Settings, Users, Feature Configuration and admin-level Enterprise Settings. - -6. The **Pro Settings** section contains the System Settings, Banner Settings, Notification Settings, Jira Instances, Deduplication Settings, and Authentication Settings, including SAML, OIDC, OAuth, Login, and MFA forms. - -7. The Pro UI also has a **new table format**, used in the [Product Hierarchy](/en/working_with_findings/organizing_engagements_tests/product_hierarchy/) to help with navigation. Each column can be clicked on to apply a relevant filter, and columns can be reordered to present data however you like. - -8. The table also has a **"Toggle Columns"** menu which can add or remove columns from the table. - -## Filtering the Table - -In this screenshot we are filtering for all Findings that are in “Sam’s Awesome Product.” Once we click Apply, the contents of this Finding list will update to reflect the chosen filter. - -![image](images/pro_ui_sams_filter.png) - -## New Dashboards - -New Metrics visualizations are included in the Pro UI. All of these reports can be filtered and exported as PDFs to share them with a wider audience. - -![image](images/program_insights.png) - -- The **Executive Insights** dashboard displays the current state of your Products and Product Types. -- **Priority Insights** show the most critical findings with the option to filter for various timelines, Product Types, Products, and Tags. -- The **Program Insights** dashboard displays the effectiveness of your security team and the cost savings associated with separating duplicates and false positives from actionable Findings. -- **Remediation Insights** displays your team's effectiveness at remediating Findings. -- **Tool Insights** displays the effectiveness of your tool suite (and Connectors pipelines) at detecting and reporting vulnerabilities. +--- +title: "🎨 Pro UI Changes" +description: "Working with different UIs in DefectDojo" +draft: "false" +weight: 5 +pro-feature: true +--- + +In late 2023, DefectDojo Inc. released a new UI for DefectDojo Pro, which is now the default UI for this edition. + +The Pro UI brings the following enhancements to DefectDojo: + +- Modern and sleek design using Vue.js. +- Optimized data delivery and load times, especially for large datasets. +- Access to new Pro features, including [API Connectors](/en/connecting_your_tools/connectors/about_connectors/), [Universal Importer](/en/connecting_your_tools/external_tools/), and [Pro Metrics](https://docs.defectdojo.com/en/customize_dojo/dashboards/pro_dashboards/) views. +- Improved UI workflows: better filtering, dashboards, and navigation. + +## Switching To The Pro UI + +To access the Pro UI, open your User Options menu from the top-right hand corner. You can also switch back to the Classic UI from the same menu. + +![image](images/beta-classic-uis.png) + +## Navigational Changes + +![image](images/pro_ui_overview.png) + +1. The **Sidebar** has been reorganized into four parent categories: Dashboards, Import, Manage, and Settings. + +2. The Homepage, [AI-powered native API connection capabilities](/en/ai/mcp_server_pro/), Pro Metrics, and the Calendar view are all accessible under Dashboards. + +4. Import methods can be found in the Import section: set up [API Connectors](/en/connecting_your_tools/connectors/about_connectors/), use the [Import Scan](/en/connecting_your_tools/import_scan_files/import_scan_ui/) form to Add Findings, use [Smart Upload](/en/connecting_your_tools/import_scan_files/smart_upload/) to handle infrastructure scanning tools, or use our external tools—[Universal Importer and DefectDojo CLI](/en/connecting_your_tools/external_tools/)—to streamline both the import and reimport processes of Findings and associated objects. + +5. The **Manage** section allows you to view different objects in the [Product Hierarchy](/en/working_with_findings/organizing_engagements_tests/product_hierarchy/), with views for Product Types, Products, Engagements, Tests, Findings, Risk Acceptances, Endpoints, and Components. There are additional sections for generating reports (Report Builder), using surveys (Surveys), as well as a [Rules Engine](/en/customize_dojo/rules_engine/). + +5. The **Settings** section allows you to configure your DefectDojo instance, including your Integrations, License, Cloud Settings, Users, Feature Configuration and admin-level Enterprise Settings. + +6. The **Pro Settings** section contains the System Settings, Banner Settings, Notification Settings, Jira Instances, Deduplication Settings, and Authentication Settings, including SAML, OIDC, OAuth, Login, and MFA forms. + +7. The Pro UI also has a **new table format**, used in the [Product Hierarchy](/en/working_with_findings/organizing_engagements_tests/product_hierarchy/) to help with navigation. Each column can be clicked on to apply a relevant filter, and columns can be reordered to present data however you like. + +8. The table also has a **"Toggle Columns"** menu which can add or remove columns from the table. + +## Filtering the Table + +In this screenshot we are filtering for all Findings that are in “Sam’s Awesome Product.” Once we click Apply, the contents of this Finding list will update to reflect the chosen filter. + +![image](images/pro_ui_sams_filter.png) + +## New Dashboards + +New Metrics visualizations are included in the Pro UI. All of these reports can be filtered and exported as PDFs to share them with a wider audience. + +![image](images/program_insights.png) + +- The **Executive Insights** dashboard displays the current state of your Products and Product Types. +- **Priority Insights** show the most critical findings with the option to filter for various timelines, Product Types, Products, and Tags. +- The **Program Insights** dashboard displays the effectiveness of your security team and the cost savings associated with separating duplicates and false positives from actionable Findings. +- **Remediation Insights** displays your team's effectiveness at remediating Findings. +- **Tool Insights** displays the effectiveness of your tool suite (and Connectors pipelines) at detecting and reporting vulnerabilities. diff --git a/docs/content/en/api/api-v2-docs.md b/docs/content/en/api/api-v2-docs.md index 819138bf43a..3bdbb9095f3 100644 --- a/docs/content/en/api/api-v2-docs.md +++ b/docs/content/en/api/api-v2-docs.md @@ -1,283 +1,283 @@ ---- -title: "DefectDojo API v2" -description: "DefectDojo's API lets you automate tasks, e.g. uploading scan reports in CI/CD pipelines." -draft: false -weight: 2 ---- - -DefectDojo\'s API is created using [Django Rest -Framework](http://www.django-rest-framework.org/). The documentation of -each endpoint is available within each DefectDojo installation at -[`/api/v2/oa3/swagger-ui`](https://demo.defectdojo.org/api/v2/oa3/swagger-ui/) and can be accessed by choosing the API v2 -Docs link on the user drop down menu in the header. - -![image](images/api_v2_1.png) - -The documentation is generated using [drf-spectacular](https://drf-spectacular.readthedocs.io/) at [`/api/v2/oa3/swagger-ui/`](https://demo.defectdojo.org/api/v2/oa3/swagger-ui/), and is -interactive. On the top of API v2 docs is a link that generates an OpenAPI v3 spec. - -To interact with the documentation, a valid Authorization header value -is needed. Visit the `/api/key-v2` view to generate your -API Key (`Token `) and copy the header value provided. - -![image](images/api_v2_2.png) - -Each section allows you to make calls to the API and view the Request -URL, Response Body, Response Code and Response Headers. - -![image](images/api_v2_3.png) - -If you're logged in to the Defect Dojo web UI, you do not need to provide the authorization token. - -## Authentication - -The API uses header authentication with API key. The format of the -header should be: : - - Authorization: Token - -For example: : - - Authorization: Token c8572a5adf107a693aa6c72584da31f4d1f1dcff - -### Alternative authentication method - -If you use [an alternative authentication method](en/customize_dojo/user_management/configure_sso/ for users, you may want to disable DefectDojo API tokens because it could bypass your authentication concept. \ -Using of DefectDojo API tokens can be disabled by specifying the environment variable `DD_API_TOKENS_ENABLED` to `False`. -Or only `api/v2/api-token-auth/` endpoint can be disabled by setting `DD_API_TOKEN_AUTH_ENDPOINT_ENABLED` to `False`. - -## Sample Code - -Here are some simple python examples and their results produced against -the `/users` endpoint: : - -{{< highlight python >}} -import requests - -url = 'http://127.0.0.1:8000/api/v2/users' -headers = {'content-type': 'application/json', - 'Authorization': 'Token c8572a5adf107a693aa6c72584da31f4d1f1dcff'} -r = requests.get(url, headers=headers, verify=True) # set verify to False if ssl cert is self-signed - -for key, value in r.__dict__.items(): - print(f"'{key}': '{value}'") - print('------------------') -{{< /highlight >}} - -This code will return the list of all the users defined in DefectDojo. -The json object result looks like : : - -{{< highlight json >}} - [ - { - "first_name": "Tyagi", - "id": 22, - "last_login": "2019-06-18T08:05:51.925743", - "last_name": "Paz", - "username": "dev7958" - }, - { - "first_name": "saurabh", - "id": 31, - "last_login": "2019-06-06T11:44:32.533035", - "last_name": "", - "username": "saurabh.paz" - } - ] -{{< /highlight >}} - -Here is another example against the `/users` endpoint, this -time we will filter the results to include only the users whose user -name includes `jay`: - -{{< highlight python >}} -import requests - -url = 'http://127.0.0.1:8000/api/v2/users/?username__contains=jay' -headers = {'content-type': 'application/json', - 'Authorization': 'Token c8572a5adf107a693aa6c72584da31f4d1f1dcff'} -r = requests.get(url, headers=headers, verify=True) # set verify to False if ssl cert is self-signed - -for key, value in r.__dict__.items(): - print(f"'{key}': '{value}'") - print('------------------') -{{< /highlight >}} - -The json object result is: : - -{{< highlight json >}} -[ - { - "first_name": "Jay", - "id": 22, - "last_login": "2015-10-28T08:05:51.925743", - "last_name": "Paz", - "username": "jay7958" - }, - { - "first_name": "", - "id": 31, - "last_login": "2015-10-13T11:44:32.533035", - "last_name": "", - "username": "jay.paz" - } -] -{{< /highlight >}} - -See [Django Rest Framework\'s documentation on interacting with an -API](https://www.django-rest-framework.org/) for -additional examples and tips. - -## Manually calling the API - -Tools like Postman can be used for testing the API. - -Example for importing a scan result: - -- Verb: POST -- URI: -- Headers tab: - - add the authentication header - : - Key: Authorization - - Value: Token c8572a5adf107a693aa6c72584da31f4d1f1dcff - -- Body tab - - - select \"form-data\", click \"bulk edit\". Example for a ZAP scan: - - - - engagement:3 - verified:true - active:true - lead:1 - tags:test - scan_type:ZAP Scan - minimum_severity:Info - close_old_findings:false - -- Body tab - - - Click \"Key-value\" edit - - Add a \"file\" parameter of type \"file\". This will trigger - multi-part form data for sending the file content - - Browse for the file to upload - -- Click send - -## Clients / API Wrappers - -| Wrapper | Status | Notes | -| -----------------------------| ------------------------| ------------------------| -| [Specific python wrapper](https://github.com/DefectDojo/defectdojo_api) | working (2021-01-21) | API Wrapper including scripts for continous CI/CD uploading. Is lagging behind a bit on latest API features as we plan to revamp the API wrapper | -| [Openapi python wrapper](https://github.com/alles-klar/defectdojo-api-v2-client) | | proof of concept only where we found out the the OpenAPI spec is not perfect yet | -| [Java library](https://github.com/secureCodeBox/defectdojo-client-java) | working (2021-08-30) | Created by the kind people of [SecureCodeBox](https://github.com/secureCodeBox/secureCodeBox) | -| [Image using the Java library](https://github.com/SDA-SE/defectdojo-client) | working (2021-08-30) | | -| [.Net/C# library](https://www.nuget.org/packages/DefectDojo.Api/) | working (2021-06-08) | | -| [dd-import](https://github.com/MaibornWolff/dd-import) | working (2021-08-24) | dd-import is not directly an API wrapper. It offers some convenience functions to make it easier to import findings and language data from CI/CD pipelines. | - -Some of the api wrappers contain quite a bit of logic to ease scanning and importing in CI/CD environments. We are in the process of simplifying this by making the DefectDojo API smarter (so api wrappers / script can be dumber). - -## API Notes - -### Import / Reimport - -**Reimport** is actually the easiest way to get started as it will create any entities on the fly if needed and it will automatically detect if it is a first time upload or a re-upload. - -## Import -Importing via the API is performed via the [import-scan](https://demo.defectdojo.org/api/v2/doc/) endpoint. - -As described in the [Product Hierarchy](/en/working_with_findings/organizing_engagements_tests/product_hierarchy), Test gets created inside an Engagement, inside a Product, inside a Product Type. - -An import can be performed by specifying the names of these entities in the API request: - - -```JSON -{ - "minimum_severity": 'Info', - "active": True, - "verified": True, - "scan_type": 'ZAP Scan', - "test_title": 'Manual ZAP Scan by John', - "product_type_name": 'Good Products', - "product_name": 'My little product', - "engagement_name": 'Important import', - "auto_create_context": True, -} -``` - -When `auto_create_context` is `True`, the product, engagement, and environment will be created if needed. Make sure your user has sufficient [permissions](/en/customize_dojo/user_management/about_perms_and_roles/) to do this. - -A classic way of importing a scan is by specifying the ID of the engagement instead: - -```JSON -{ - "minimum_severity": 'Info', - "active": True, - "verified": True, - "scan_type": 'ZAP Scan', - "test_title": 'Manual ZAP Scan by John', - "engagement": 123, -} -``` - -## Reimport -ReImporting via the API is performed via the [reimport-scan](https://demo.defectdojo.org/api/v2/doc/) endpoint. - -A reimport can be performed by specifying the names of these entities in the API request: - - -```JSON -{ - "minimum_severity": 'Info', - "active": True, - "verified": True, - "scan_type": 'ZAP Scan', - "test_title": 'Manual ZAP Scan by John', - "product_type_name": 'Good Products', - "product_name": 'My little product', - "engagement_name": 'Important import', - "auto_create_context": True, - "do_not_reactivate": False, -} -``` - -When `auto_create_context` is `True`, the Product Type, Product and Engagement will be created if they do not already exist. Make sure your user has sufficient [permissions](/en/customize_dojo/user_management/about_perms_and_roles/) to create a Product/Product Type. - -When `do_not_reactivate` is `True`, the importing/reimporting will ignore uploaded active findings and not reactivate previously closed findings, while still creating new findings if there are new ones. You will get a note on the finding to explain that it was not reactivated for that reason. - -A reimport will automatically select the latest test inside the provided engagement that satisifes the provided `scan_type` and (optionally) provided `test_title`. - -If no existing Test is found, the reimport endpoint will use the import function to import the provided report into a new Test. This means a (CI/CD) script using the API doesn't need to know if a Test already exists, or if it is a first time upload for this Product / Engagement. - -A classic way of reimporting a scan is by specifying the ID of the test instead: - -```JSON -{ - "minimum_severity": 'Info', - "active": True, - "verified": True, - "scan_type": 'ZAP Scan', - "test": 123, -} -``` - -## Using the Scan Completion Date (API: `scan_date`) field - -DefectDojo offers a plethora of supported scanner reports, but not all of them contain the -information most important to a user. The `scan_date` field is a flexible smart feature that -allows users to set the completion date of the a given scan report, and have it propagate -down to all the findings imported. This field is **not** mandatory, but the default value for -this field is the date of import (whenever the request is processed and a successful response is returned). - -Here are the following use cases for using this field: - -1. The report **does not** set the date, and `scan_date` is **not** set at import - - Finding date will be the default value of `scan_date` -2. The report **sets** the date, and the `scan_date` is **not** set at import - - Finding date will be whatever the report sets -3. The report **does not** set the date, and the `scan_date` is **set** at import - - Finding date will be whatever the user set for `scan_date` -4. The report **sets** the date, and the `scan_date` is **set** at import - - Finding date will be whatever the user set for `scan_date` +--- +title: "DefectDojo API v2" +description: "DefectDojo's API lets you automate tasks, e.g. uploading scan reports in CI/CD pipelines." +draft: false +weight: 2 +--- + +DefectDojo\'s API is created using [Django Rest +Framework](http://www.django-rest-framework.org/). The documentation of +each endpoint is available within each DefectDojo installation at +[`/api/v2/oa3/swagger-ui`](https://demo.defectdojo.org/api/v2/oa3/swagger-ui/) and can be accessed by choosing the API v2 +Docs link on the user drop down menu in the header. + +![image](images/api_v2_1.png) + +The documentation is generated using [drf-spectacular](https://drf-spectacular.readthedocs.io/) at [`/api/v2/oa3/swagger-ui/`](https://demo.defectdojo.org/api/v2/oa3/swagger-ui/), and is +interactive. On the top of API v2 docs is a link that generates an OpenAPI v3 spec. + +To interact with the documentation, a valid Authorization header value +is needed. Visit the `/api/key-v2` view to generate your +API Key (`Token `) and copy the header value provided. + +![image](images/api_v2_2.png) + +Each section allows you to make calls to the API and view the Request +URL, Response Body, Response Code and Response Headers. + +![image](images/api_v2_3.png) + +If you're logged in to the Defect Dojo web UI, you do not need to provide the authorization token. + +## Authentication + +The API uses header authentication with API key. The format of the +header should be: : + + Authorization: Token + +For example: : + + Authorization: Token c8572a5adf107a693aa6c72584da31f4d1f1dcff + +### Alternative authentication method + +If you use [an alternative authentication method](en/customize_dojo/user_management/configure_sso/ for users, you may want to disable DefectDojo API tokens because it could bypass your authentication concept. \ +Using of DefectDojo API tokens can be disabled by specifying the environment variable `DD_API_TOKENS_ENABLED` to `False`. +Or only `api/v2/api-token-auth/` endpoint can be disabled by setting `DD_API_TOKEN_AUTH_ENDPOINT_ENABLED` to `False`. + +## Sample Code + +Here are some simple python examples and their results produced against +the `/users` endpoint: : + +{{< highlight python >}} +import requests + +url = 'http://127.0.0.1:8000/api/v2/users' +headers = {'content-type': 'application/json', + 'Authorization': 'Token c8572a5adf107a693aa6c72584da31f4d1f1dcff'} +r = requests.get(url, headers=headers, verify=True) # set verify to False if ssl cert is self-signed + +for key, value in r.__dict__.items(): + print(f"'{key}': '{value}'") + print('------------------') +{{< /highlight >}} + +This code will return the list of all the users defined in DefectDojo. +The json object result looks like : : + +{{< highlight json >}} + [ + { + "first_name": "Tyagi", + "id": 22, + "last_login": "2019-06-18T08:05:51.925743", + "last_name": "Paz", + "username": "dev7958" + }, + { + "first_name": "saurabh", + "id": 31, + "last_login": "2019-06-06T11:44:32.533035", + "last_name": "", + "username": "saurabh.paz" + } + ] +{{< /highlight >}} + +Here is another example against the `/users` endpoint, this +time we will filter the results to include only the users whose user +name includes `jay`: + +{{< highlight python >}} +import requests + +url = 'http://127.0.0.1:8000/api/v2/users/?username__contains=jay' +headers = {'content-type': 'application/json', + 'Authorization': 'Token c8572a5adf107a693aa6c72584da31f4d1f1dcff'} +r = requests.get(url, headers=headers, verify=True) # set verify to False if ssl cert is self-signed + +for key, value in r.__dict__.items(): + print(f"'{key}': '{value}'") + print('------------------') +{{< /highlight >}} + +The json object result is: : + +{{< highlight json >}} +[ + { + "first_name": "Jay", + "id": 22, + "last_login": "2015-10-28T08:05:51.925743", + "last_name": "Paz", + "username": "jay7958" + }, + { + "first_name": "", + "id": 31, + "last_login": "2015-10-13T11:44:32.533035", + "last_name": "", + "username": "jay.paz" + } +] +{{< /highlight >}} + +See [Django Rest Framework\'s documentation on interacting with an +API](https://www.django-rest-framework.org/) for +additional examples and tips. + +## Manually calling the API + +Tools like Postman can be used for testing the API. + +Example for importing a scan result: + +- Verb: POST +- URI: +- Headers tab: + + add the authentication header + : - Key: Authorization + - Value: Token c8572a5adf107a693aa6c72584da31f4d1f1dcff + +- Body tab + + - select \"form-data\", click \"bulk edit\". Example for a ZAP scan: + + + + engagement:3 + verified:true + active:true + lead:1 + tags:test + scan_type:ZAP Scan + minimum_severity:Info + close_old_findings:false + +- Body tab + + - Click \"Key-value\" edit + - Add a \"file\" parameter of type \"file\". This will trigger + multi-part form data for sending the file content + - Browse for the file to upload + +- Click send + +## Clients / API Wrappers + +| Wrapper | Status | Notes | +| -----------------------------| ------------------------| ------------------------| +| [Specific python wrapper](https://github.com/DefectDojo/defectdojo_api) | working (2021-01-21) | API Wrapper including scripts for continous CI/CD uploading. Is lagging behind a bit on latest API features as we plan to revamp the API wrapper | +| [Openapi python wrapper](https://github.com/alles-klar/defectdojo-api-v2-client) | | proof of concept only where we found out the the OpenAPI spec is not perfect yet | +| [Java library](https://github.com/secureCodeBox/defectdojo-client-java) | working (2021-08-30) | Created by the kind people of [SecureCodeBox](https://github.com/secureCodeBox/secureCodeBox) | +| [Image using the Java library](https://github.com/SDA-SE/defectdojo-client) | working (2021-08-30) | | +| [.Net/C# library](https://www.nuget.org/packages/DefectDojo.Api/) | working (2021-06-08) | | +| [dd-import](https://github.com/MaibornWolff/dd-import) | working (2021-08-24) | dd-import is not directly an API wrapper. It offers some convenience functions to make it easier to import findings and language data from CI/CD pipelines. | + +Some of the api wrappers contain quite a bit of logic to ease scanning and importing in CI/CD environments. We are in the process of simplifying this by making the DefectDojo API smarter (so api wrappers / script can be dumber). + +## API Notes + +### Import / Reimport + +**Reimport** is actually the easiest way to get started as it will create any entities on the fly if needed and it will automatically detect if it is a first time upload or a re-upload. + +## Import +Importing via the API is performed via the [import-scan](https://demo.defectdojo.org/api/v2/doc/) endpoint. + +As described in the [Product Hierarchy](/en/working_with_findings/organizing_engagements_tests/product_hierarchy), Test gets created inside an Engagement, inside a Product, inside a Product Type. + +An import can be performed by specifying the names of these entities in the API request: + + +```JSON +{ + "minimum_severity": 'Info', + "active": True, + "verified": True, + "scan_type": 'ZAP Scan', + "test_title": 'Manual ZAP Scan by John', + "product_type_name": 'Good Products', + "product_name": 'My little product', + "engagement_name": 'Important import', + "auto_create_context": True, +} +``` + +When `auto_create_context` is `True`, the product, engagement, and environment will be created if needed. Make sure your user has sufficient [permissions](/en/customize_dojo/user_management/about_perms_and_roles/) to do this. + +A classic way of importing a scan is by specifying the ID of the engagement instead: + +```JSON +{ + "minimum_severity": 'Info', + "active": True, + "verified": True, + "scan_type": 'ZAP Scan', + "test_title": 'Manual ZAP Scan by John', + "engagement": 123, +} +``` + +## Reimport +ReImporting via the API is performed via the [reimport-scan](https://demo.defectdojo.org/api/v2/doc/) endpoint. + +A reimport can be performed by specifying the names of these entities in the API request: + + +```JSON +{ + "minimum_severity": 'Info', + "active": True, + "verified": True, + "scan_type": 'ZAP Scan', + "test_title": 'Manual ZAP Scan by John', + "product_type_name": 'Good Products', + "product_name": 'My little product', + "engagement_name": 'Important import', + "auto_create_context": True, + "do_not_reactivate": False, +} +``` + +When `auto_create_context` is `True`, the Product Type, Product and Engagement will be created if they do not already exist. Make sure your user has sufficient [permissions](/en/customize_dojo/user_management/about_perms_and_roles/) to create a Product/Product Type. + +When `do_not_reactivate` is `True`, the importing/reimporting will ignore uploaded active findings and not reactivate previously closed findings, while still creating new findings if there are new ones. You will get a note on the finding to explain that it was not reactivated for that reason. + +A reimport will automatically select the latest test inside the provided engagement that satisifes the provided `scan_type` and (optionally) provided `test_title`. + +If no existing Test is found, the reimport endpoint will use the import function to import the provided report into a new Test. This means a (CI/CD) script using the API doesn't need to know if a Test already exists, or if it is a first time upload for this Product / Engagement. + +A classic way of reimporting a scan is by specifying the ID of the test instead: + +```JSON +{ + "minimum_severity": 'Info', + "active": True, + "verified": True, + "scan_type": 'ZAP Scan', + "test": 123, +} +``` + +## Using the Scan Completion Date (API: `scan_date`) field + +DefectDojo offers a plethora of supported scanner reports, but not all of them contain the +information most important to a user. The `scan_date` field is a flexible smart feature that +allows users to set the completion date of the a given scan report, and have it propagate +down to all the findings imported. This field is **not** mandatory, but the default value for +this field is the date of import (whenever the request is processed and a successful response is returned). + +Here are the following use cases for using this field: + +1. The report **does not** set the date, and `scan_date` is **not** set at import + - Finding date will be the default value of `scan_date` +2. The report **sets** the date, and the `scan_date` is **not** set at import + - Finding date will be whatever the report sets +3. The report **does not** set the date, and the `scan_date` is **set** at import + - Finding date will be whatever the user set for `scan_date` +4. The report **sets** the date, and the `scan_date` is **set** at import + - Finding date will be whatever the user set for `scan_date` diff --git a/docs/content/en/connecting_your_tools/external_tools.md b/docs/content/en/connecting_your_tools/external_tools.md index 18f39813091..6611c2f059a 100644 --- a/docs/content/en/connecting_your_tools/external_tools.md +++ b/docs/content/en/connecting_your_tools/external_tools.md @@ -1,924 +1,924 @@ ---- -title: "External Tools: Universal Importer & DefectDojo-CLI (Pro)" -description: "Import files to DefectDojo from the command line" -draft: false -weight: 2 ---- - -Note: The following external tools are DefectDojo Pro-only features. These binaries will not work unless they are connected to an instance with a DefectDojo Pro license. - -## About External Tools - -`defectdojo-cli` and `universal-importer` are command-line tools designed to streamline both the import and re-import processes of Findings and associated objects, making it ideal for users who want to quickly set up these interactions with the DefectDojo API. - -DefectDojo-CLI has the same functionality as Universal Importer, but also includes the ability to export Findings from DefectDojo to JSON or CSV. - -## Installation - -1. Locate “External Tools” from your User Profile menu: - -2. Download the appropriate binary for your operating system from the platform. - -![image](images/external-tools.png) - -3. Extract the downloaded archive within a directory of your choice. Optionally, add the directory containing the extracted binary to your system's $PATH for repeat access. - -**Note that Macintosh users may be blocked from running DefectDojo-CLI or Universal Importer as they are apps from an unidentified developer. See [Apple Support](https://support.apple.com/en-ca/guide/mac-help/mh40616/mac) for instructions on how to override the block from Apple.** - -**Windows Users: If you receive the "Couldn't download - virus detected" error, disabling Smartscreen may work. Otherwise, use a different browser to download the tool from the Cloud portal.** - -## Configuration - -Universal Importer & DefectDojo-CLI can be configured using flags, environment variables, or a configuration file. The most important configuration is the API token, which must be set as an environment variable: - -1. Add your API key to your environment variables. -You can retrieve your API key from: `https://YOUR_INSTANCE.cloud.defectdojo.com/api/key-v2` - -or - -Via the DefectDojo user interface -in the user dropdown in the top-right corner: - -![image](images/api-token.png) - -2. Set your environment variable for the API token. - -**For DefectDojo-CLI:** - `export DD_CLI_API_TOKEN=YOUR_API_KEY` - -**For Universal Importer:** - `export DD_IMPORTER_DOJO_API_TOKEN=YOUR_API_KEY` - -Note: On Windows, use `set` instead of `export`. - -### Windows: Using PowerShell - -1. Open PowerShell (Windows Key, then search for "PowerShell"). -2. Set the environment variables: - - **Temporary:** - ```powershell - $env:DD_IMPORTER_DOJO_API_TOKEN = "[VALUE_FROM_DEFECTDOJO_API]" - $env:DD_IMPORTER_DEFECTDOJO_URL=”[e.g. http://localhost:8080/defectdojo]” - ``` - - **Permanent:** - ```powershell - [Environment]::SetEnvironmentVariable("DD_IMPORTER_DOJO_API_TOKEN", "[VALUE_FROM_DEFECTDOJO_API]", "Machine") - ``` -3. Restart your PowerShell session. -4. Verify the setting: - ```powershell - echo $env:DD_IMPORTER_DOJO_API_TOKEN - echo $env:DD_IMPORTER_DEFECTDOJO_URL - ``` - -### Windows: Using Command Prompt (Administrative Accounts) -1. Open Command Prompt (Windows Key, then search for "Command Prompt"). -2. Set the environment variables: - - **Temporary:** - ```cmd - set DD_IMPORTER_DOJO_API_TOKEN = "[VALUE_FROM_DEFECTDOJO_API]" - set DD_IMPORTER_DEFECTDOJO_URL=”[e.g. http://localhost:8080/defectdojo]” - ``` - - **Permanent:** - ```cmd - setx DD_IMPORTER_DOJO_API_TOKEN = "[VALUE_FROM_DEFECTDOJO_API]" - setx DD_IMPORTER_DEFECTDOJO_URL=”[e.g. http://localhost:8080/defectdojo]” - ``` - -### Using Windows Settings (Non-Administrative Accounts) -1. Press `Win + I` to open the system settings dialog. -2. In the search box, type "environment". -3. Choose "Edit Environment variables for your account". -4. Under "User variables for [username]", click the "New…" button. -5. Set the variable: - - **Variable name:** `DD_IMPORTER_DOJO_API_TOKEN` - - **Variable value:** `[VALUE_FROM_DEFECTDOJO_API]` -6. Click "OK". -7. Repeat steps 4 through 6 for the DD_IMPORTER_DEFECTDOJO_URL variable -8. Restart any open command windows. -9. Verify the settings: - ```cmd - echo %DD_IMPORTER_DOJO_API_TOKEN% - echo %DD_IMPORTER_DEFECTDOJO_URL% - ``` - -## DefectDojo-CLI - -`defectdojo-cli` seamlessly integrates scan results into DefectDojo, streamlining the import and reimport processes of Findings and associated objects. Designed for ease of use, the tool supports various endpoints, catering to both initial imports and subsequent reimports — ideal for users requiring robust and flexible interaction with the DefectDojo API. DefectDojo-CLI can perform the same functions as `universal-importer`, and adds export functionality for Findings. - -### Commands - -- [`import`](./#import) Imports findings into DefectDojo. -- [`reimport`](./#reimport) Reimports findings into DefectDojo. -- [`export`](./#export) Exports findings from DefectDojo. -- [`interactive`](./#interactive) Starts an interactive mode to configure the import and reimport process, step by - -### Global Options - -`--help, -h` -* show help - -`--version, -v` -* print the version - -#### CLI Formatting - -`--no-color` -* Disable color output. (default: false) `[$DD_CLI_NO_COLOR]` -`--no-emojis, --no-emoji` - -* Disable emojis in the output. (default: false) `[$DD_CLI_NO_EMOJIS]` - -* `--verbose` -Enable verbose output. (default: false) `[$DD_CLI_VERBOSE]` - -### Import - -Use the import command to import new findings into DefectDojo. - -#### Usage - -``` -defectdojo-cli [global options] import [optional flags] - or: defectdojo-cli [global options] import --config ./config-file-path - or: defectdojo-cli import [-h | --help] - or: defectdojo-cli import example [subcommand options] - or: defectdojo-cli import example [-h | --help] - ->> The API token must be set in the environment variable `DD_CLI_API_TOKEN`. -``` - -`import` can import Findings in two ways: - -**By ID:** -* Create a Product (or use an existing product) -* Create an Engagement inside the product -* Provide the id of the Engagement in the engagement parameter - -In this scenario, a new Test will be created inside the Engagement. - -**By Name:** - -* Create a Product (or use an existing product) -* Create an Engagement inside the product -* Provide product-name -* Provide engagement-name -* Optionally provide product-type-name - -In this scenario, DefectDojo will look up the Engagement by the provided details. - -When using names you can let the importer automatically create Engagements, Products and Product-types by using `auto-create-context=true`. -You can use `deduplication-on-engagement` to restrict deduplication for imported Findings to the newly created Engagement. - - -**Import Basic syntax:** -``` -defectdojo-cli import [options] -``` - -#### **Import Example:** -``` -defectdojo-cli import \ ---defectdojo-url "https://YOUR_INSTANCE.cloud.defectdojo.com/" \ ---scan-type "burp scan" \ ---report-path "./examples/burp_findings.xml" \ ---product-name "dev" \ ---engagement-name "dev" \ ---product-type-name "Research and Development" \ ---test-name "burp-test-dev" \ ---verified \ ---active \ ---minimum-severity "info" \ ---tag "dev" --tag "tools" --tag "burp" --tag "test-dev" \ ---test-version "0.0.1" \ ---auto-create-context -``` - -#### Commands -`example, x` -* Shows an example of required and optional flags for import operation - -#### Options - -`--active, -a` -* Dictates whether Findings should be forced to Active or Inactive on import. A value of True forces Findings to Active, while a value of False forces all Findings to Inactive. If no value is set, Active status will instead rely on the incoming report file. (default: unset) `[$DD_CLI_ACTIVE]` - -`--api-scan-configuration value, --asc value` -* The ID of the API Scan Configuration object to use when importing or reimporting. (default: 0) `[$DD_CLI_API_SCAN_CONFIGURATION]` - -`--apply-tags-endpoints, --te` -* If set to true, the tags (from the option --tag) will be applied to the endpoints (default: false) -`[$DD_CLI_APPLY_TAGS_ENDPOINTS]` - -`--apply-tags-findings, --tf` -* If set to true, the tags (from the option --tag) will be applied to the findings (default: false) `[$DD_CLI_APPLY_TAGS_FINDINGS]` - -`--auto-create-context, --acc` -* If set to true, the importer automatically creates Engagements, Products, and Product_Types (default: false) `[$DD_CLI_AUTO_CREATE_CONTEXT]` - -`--close-old-findings, --cof` -* If True, old Findings no longer present in the report will be Closed as Mitigated when importing. If Service has been set, only the Findings for this Service will be closed. [$DD_CLI_CLOSE_OLD_FINDINGS] - -`--close-old-findings-product-scope, --cofps` -* Select if --close-old-findings applies to **all** Findings of the same type in the Product. By default, this is set to false, meaning that only old Findings of the same type in the Engagement are in scope (and will be closed by Close Old Findings). [$DD_CLI_CLOSE_OLD_FINDINGS_PRODUCT_SCOPE] - -`--deduplication-on-engagement, --doe` -* If set to true, the importer restricts deduplication for imported findings to the newly created Engagement. (default: false) `[$DD_CLI_DEDUPLICATION_ON_ENGAGEMENT]` - -`--engagement-id value, --ei value` -* The ID of the Engagement to import findings into. (default: 0) `[$DD_CLI_ENGAGEMENT_ID]` - -`--engagement-name value, -e value` -* The name of the Engagement to import findings into. `[$DD_CLI_ENGAGEMENT_NAME]` - -`--minimum-severity value, --ms value` -* Dictates the lowest level severity that should be imported. Valid values are: Critical, High, Medium, Low, Info. (default: "Info") `[$DD_CLI_MINIMUM_SEVERITY]` - -`--product-name value, -p value` -* The name of the Product to import findings into. `[$DD_CLI_PRODUCT_NAME]` - -`--product-type-name value, --pt value` -* The name of the Product Type to import findings into. `[$DD_CLI_PRODUCT_TYPE_NAME]` - -`--report-path value, -r value` -* The path to the report to import. (required). `[$DD_CLI_REPORT_PATH]` - -`--scan-type value, -s value` -* The scan type of the tool (required). `[$DD_CLI_SCAN_TYPE]` - -`--tag value, -t value [ --tag value, -t value ]` -* Any tags to be applied to the Test object `[$DD_CLI_TAGS]` - -`--test-name value, --tn value` -* The name of the Test to import findings into - Defaults to the name of the scan type. `[$DD_CLI_TEST_NAME]` - -`--test-version value, -V value` -* The version of the test. `[$DD_CLI_TEST_VERSION]` - -`--verified, -v` -* Dictates whether Findings should be set to Verified on import. A value of True forces Findings to Verified. If no value is set, Verified status will instead rely on the incoming report file. `[$DD_CLI_VERIFIED]` - -**Settings:** - -`--config value, -c value` -* The path to the TOML configuration file is used to set values for the options. If the option is set in the configuration file and the CLI, the option will take the value set from the CLI. `[$DD_CLI_CONFIG_FILE]` -`--defectdojo-url value, -u value` -* The URL of the DefectDojo instance to import findings into. (required). `[$DD_CLI_DEFECTDOJO_URL]` -* --insecure-tls, --no-tls ignore TLS validation errors when connecting to the provided DefectDojo instance. Most users should not enable this flag. (default: false) `[$DD_CLI_INSECURE_TLS]` - -### Reimport - -Use the `reimport` command to extend an existing Test with Findings from a new report in one of two ways: - -By ID: -- Create a Product (or use an existing product) -- Create an Engagement inside the product -- Import a scan report and find the id of the Test -- Provide this in the test-id parameter - -By Names: -- Create a Product (or use an existing product) -- Create an Engagement inside the product -- Import a report which will create a Test -- Provide product-name -- Provide engagement-name -- Optional: Provide test-name - -In this scenario, DefectDojo will look up the Test by the provided details. If no test-name is provided, the latest test inside the engagement will be chosen based on scan-type. - -When using names you can let the importer automatically create Engagements, Products and Product-types by using `auto-create-context=true`. -You can use `deduplication-on-engagement` to restrict deduplication for imported Findings to the newly created Engagement. - -#### Usage - -``` -defectdojo-cli [global options] reimport [optional flags] - or: defectdojo-cli [global options] reimport --config ./config-file-path - or: defectdojo-cli reimport [-h | --help] - or: defectdojo-cli reimport example [subcommand options] - or: defectdojo-cli reimport example [-h | --help] - ->> The API token must be set in the environment variable `DD_CLI_API_TOKEN`. -``` - -#### **Reimport Example:** - -``` -defectdojo-cli reimport \ ---defectdojo-url "https://YOUR_INSTANCE.cloud.defectdojo.com/" \ ---scan-type "Nancy Scan" \ ---report-path "./examples/nancy_findings.json" \ ---test-id 11 \ ---verified \ ---active \ ---minimum-severity "info" \ ---tag "dev" --tag "tools" --tag "nancy" --tag "test-dev" \ ---test-version "1.0" \ ---auto-create-context -``` - -#### Commands - -``` -example, x Shows an example of required and optional flags for reimport operation -``` - -#### Options - -`--active, -a` -* Dictates whether Findings should be forced to Active or Inactive on import. A value of True forces Findings to Active, while a value of False forces all Findings to Inactive. If no value is set, Active status will instead rely on the incoming report file. `[$DD_CLI_ACTIVE]` - -`--api-scan-configuration value, --asc value` - -* The ID of the API Scan Configuration object to use when importing or reimporting. (default: 0) `[$DD_CLI_API_SCAN_CONFIGURATION]` - -`--apply-tags-endpoints, --te` -* If set to true, the tags (from the option --tag) will be applied to the endpoints (default: false) `[$DD_CLI_APPLY_TAGS_ENDPOINTS]` - -`--apply-tags-findings, --tf` -* If set to true, the tags (from the option --tag) will be applied to the findings (default: false) `[$DD_CLI_APPLY_TAGS_FINDINGS]` - -`--auto-create-context, --acc` -* If set to true, the importer automatically creates Engagements, Products, and Product_Types (default: false) `[$DD_CLI_AUTO_CREATE_CONTEXT]` - -`--close-old-findings, --cof` -* If True, old Findings no longer present in the report will be Closed as Mitigated when importing. If Service has been set, only the findings for this Service will be closed.[$DD_CLI_CLOSE_OLD_FINDINGS] - -`--close-old-findings-product-scope, --cofps` -* Select if --close-old-findings applies to **all** Findings of the same type in the Product. By default, this is set to false, meaning that only old Findings of the same type in the Engagement are in scope (and will be closed by Close Old Findings). [$DD_CLI_CLOSE_OLD_FINDINGS_PRODUCT_SCOPE] - -`--deduplication-on-engagement, --doe` -* If set to true, the importer restricts deduplication for imported findings to the newly created Engagement. (default: false) `[$DD_CLI_DEDUPLICATION_ON_ENGAGEMENT]` - -`--engagement-name value, -e value` -* The name of the Engagement to import findings into. `[$DD_CLI_ENGAGEMENT_NAME]` - -`--minimum-severity value, --ms value` -* Dictates the lowest level severity that should be imported. Valid values are: Critical, High, Medium, Low, Info. (default: "Info") `[$DD_CLI_MINIMUM_SEVERITY]` - -`--product-name value, -p value` -* The name of the Product to import findings into. `[$DD_CLI_PRODUCT_NAME]` - -`--product-type-name value, --pt value` -* The name of the Product Type to import findings into. `[$DD_CLI_PRODUCT_TYPE_NAME]` - -`--report-path value, -r value` -* The path to the report to import. (required). `[$DD_CLI_REPORT_PATH]` - -`--scan-type value, -s value` -* The scan type of the tool (required). `[$DD_CLI_SCAN_TYPE]` - -`--tag value, -t value [ --tag value, -t value ]` -* Any tags to be applied to the Test object `[$DD_CLI_TAGS]` - -`--test-id value, --ti value` -* The ID of the Test to reimport findings into. (default: 0) `[$DD_CLI_TEST_ID]` - -`--test-name value, --tn value` -* The name of the Test to import findings into - Defaults to the name of the scan type. `[$DD_CLI_TEST_NAME]` - -`--test-version value, -V value` -* The version of the test. `[$DD_CLI_TEST_VERSION]` - -`--verified, -v` -* Dictates whether Findings should be set to Verified on import. A value of True forces Findings to Verified. If no value is set, Verified status will instead rely on the incoming report file. `[$DD_CLI_VERIFIED]` - -**Settings:** - -`--config value, -c value` -* The path to the TOML configuration file is used to set values for the options. If the option is set in the configuration file and the CLI, the option will take the value set from the CLI. `[$DD_CLI_CONFIG_FILE]` - -`--defectdojo-url value, -u value` -* The URL of the DefectDojo instance to import findings into. (required). `[$DD_CLI_DEFECTDOJO_URL]` - -`--insecure-tls, --no-tls` -* ignore TLS validation errors when connecting to the provided DefectDojo instance. Most users should not enable this flag. (default: false) `[$DD_CLI_INSECURE_TLS]` - -### Export - -#### Usage - -``` -defectdojo-cli export [optional options] - or: defectdojo-cli [global options] export --defectdojo-url --json ./output_file_path.json [optional filters] - or: defectdojo-cli [global options] export --defectdojo-url --csv ./output_file_path.csv [optional filters] - or: defectdojo-cli [global options] export --defectdojo-url --json ./output_file_path.json --csv ./output_file_path.csv [optional filters] - or: defectdojo-cli [global options] export --config ./config-file-path - or: defectdojo-cli [global options] export --config ./config-file-path --json ./output_file_path.json - or: defectdojo-cli [global options] export --config ./config-file-path --csv ./output_file_path.csv - or: defectdojo-cli export [-h | --help] - or: defectdojo-cli export example [subcommand options] - or: defectdojo-cli export example [-h | --help] - ->> The API token must be set in the environment variable `DD_CLI_API_TOKEN`. -``` - -To export Findings from DefectDojo-CLI, you will need to supply a configuration file which contains details explaining which Findings you wish to export. This is similar to the GET Findings method via the API. - -For assistance use `defectdojo-cli export --help`. - -#### **Export Example** - -This example specifies the URL, export format and a few filter parameters to create a list of Findings. - -``` -defectdojo-cli export \ ---defectdojo-url "https://your-dojo-instance.cloud.defectdojo.com/" ---json "./path/to/findings.json" \ ---active "true" \ ---created "Past 90 days" -``` - -#### Commands - -`example, x` -* Shows an example of required and optional flags for export operation - -`help, h` -* Shows a list of commands or help for one command - -#### Options - -**Findings Filters:** - -`--active true|false, -a true|false` -* Findings by active status. `[$DD_CLI_FINDINGS_FILTERS_ACTIVE]` - -`--created value` -* Findings by created date. Supported values: None, Today, Past 7 days, Past 30 days, Past 90 days, Current month, Current year, Past year `[$DD_CLI_FINDINGS_FILTERS_CREATED]` - -`--cvssv3-score value` -* Findings by CVSS v3 score. (default: ignored) `[$DD_CLI_FINDINGS_FILTERS_CVSSV3_SCORE]` - -`--cwe value` -* Findings by CWE ID. (default: ignored) `[$DD_CLI_FINDINGS_FILTERS_CWE]` - -`--date value` -* Findings by date. Supported values: None, Today, Past 7 days, Past 30 days, Past 90 days, Current month, Current year, Past year `[$DD_CLI_FINDINGS_FILTERS_DATE]` - -`--discovered-after value` -* Findings by discovered after the specified date. Format: YYYY-MM-DD `[$DD_CLI_FINDINGS_FILTERS_DISCOVERED_AFTER]` - -`--discovered-before value` -* Findings by discovered before the specified date. Format: YYYY-MM-DD `[$DD_CLI_FINDINGS_FILTERS_DISCOVERED_BEFORE]` - -`--discovered-on value` -* Findings by discovered date. Format: YYYY-MM-DD `[$DD_CLI_FINDINGS_FILTERS_DISCOVERED_ON]` - -`--duplicate true|false` -* Findings by duplicated status. `[$DD_CLI_FINDINGS_FILTERS_DUPLICATE]` - -`--engagement-ids value [ --engagement-ids value ]` -* Findings by engagement IDs. This flag can be used multiple times or as a comma-separated list. `[$DD_CLI_FINDINGS_FILTERS_ENGAGEMENT]` - -`--epss-percentile value` -* Findings by EPSS percentile. (default: ignored) `[$DD_CLI_FINDINGS_FILTERS_EPSS_PERCENTILE]` - -`--epss-score value` -* Findings by EPSS score. (default: ignored) `[$DD_CLI_FINDINGS_FILTERS_EPSS_SCORE]` - -`--false-positive true|false` -* Findings by false positive status. `[$DD_CLI_FINDINGS_FILTERS_FALSE_POSITIVE]` - -`--is-mitigated true|false` -* Findings by mitigation status. `[$DD_CLI_FINDINGS_FILTERS_IS_MITIGATED]` - -`--mitigated value` -* Findings by the date range in which they were marked mitigated Supported values: None, Today, Past 7 days, Past 30 days, Past 90 days, Current month, Current year, Past year `[$DD_CLI_FINDINGS_FILTERS_MITIGATED]` - -`--mitigated-after value` -* Findings by mitigation after the specified date. Format: YYYY-MM-DD `[$DD_CLI_FINDINGS_FILTERS_MITIGATED_AFTER]` - -`--mitigated-before value` -* Findings by mitigation before the specified date. Format: YYYY-MM-DD `[$DD_CLI_FINDINGS_FILTERS_MITIGATED_BEFORE]` - -`--mitigated-by-ids value [ --mitigated-by-ids value ]` -* Findings by mitigated_by user IDs. This flag can be used multiple times or as a comma-separated list. Could be combined with --mitigated-by-names. `[$DD_CLI_FINDINGS_FILTERS_MITIGATED_BY_IDS]` - -`--mitigated-by-names value [ --mitigated-by-names value ]` -* Findings by mitigated_by user names. This flag can be used multiple times or as a comma-separated list. Could be combined with --mitigated-by-ids. `[$DD_CLI_FINDINGS_FILTERS_MITIGATED_BY_NAMES]` - -`--mitigated-on value` -* Findings by mitigation date. Format: YYYY-MM-DD `[$DD_CLI_FINDINGS_FILTERS_MITIGATED_ON]` - -`--not-tags value [ --not-tags value ]` -* Findings by tags that should not be present. This flag can be used multiple times or as a comma-separated list. `[$DD_CLI_FINDINGS_FILTERS_NOT_TAGS]` - -`--out-of-scope true|false` -* Findings by out of scope or in scope status. `[$DD_CLI_FINDINGS_FILTERS_OUT_OF_SCOPE]` - -`--out-of-sla true|false` -* Findings by outside or inside SLA status. `[$DD_CLI_FINDINGS_FILTERS_OUT_OF_SLA]` - -`--product-name value` -* Findings by product name. `[$DD_CLI_FINDINGS_FILTERS_PRODUCT_NAME]` - -`--product-name-contains value` -* Findings by product name contains. `[$DD_CLI_FINDINGS_FILTERS_PRODUCT_NAME_CONTAINS]` - -`--product-type-ids value [ --product-type-ids value ]` -* Findings by product type IDs. This flag can be used multiple times or as a comma-separated list. Could be combined with --product-type-names `[$DD_CLI_FINDINGS_FILTERS_PRODUCT_TYPE_IDS]` - -`--product-type-names value [ --product-type-names value ]` -* Findings by product type names. This flag can be used multiple times or as a comma-separated list. Could be combined with --product-type-ids `[$DD_CLI_FINDINGS_FILTERS_PRODUCT_TYPE_NAMES]` - -`--risk-accepted true|false` -* Findings by risk accepted status. `[$DD_CLI_FINDINGS_FILTERS_RISK_ACCEPTED]` - -`--severity value [ --severity value ]` -* Findings by severity. Valid values are: Critical, High, Medium, Low, Info. This flag can be used multiple times or as a comma-separated list. `[$DD_CLI_FINDINGS_FILTERS_SEVERITY]` - -`--tags value [ --tags value ]` -* Findings by tags that should be present. This flag can be used multiple times or as a comma-separated list. `[$DD_CLI_FINDINGS_FILTERS_TAGS]` - -`--test-id value` -* Findings by test ID. (default: ignored) `[$DD_CLI_FINDINGS_FILTERS_TEST_ID]` - -`--title-contains value` -* Findings by containing the given string in their title. `[$DD_CLI_FINDINGS_FILTERS_TITLE_CONTAINS]` - -`--under-review true|false` -* Findings by under review status. `[$DD_CLI_FINDINGS_FILTERS_UNDER_REVIEW]` - -`--verified true|false` -* Findings by verified status. (default: ignored) `[$DD_CLI_FINDINGS_FILTERS_VERIFIED]` - -`--vulnerability-id value [ --vulnerability-id value ]` -* Findings by vulnerability ID. This flag can be used multiple times or as a comma-separated list. `[$DD_CLI_FINDINGS_FILTERS_VULNERABILITY_ID]` - -**Findings Output** - -`--csv value` -* Path of the file where the CSV file of the findings will be written. `[$DD_CLI_FINDINGS_OUTPUT_CSV_PATH_FILE]` - -`--json value` Path of the file where the JSON file of the findings will be written. `[$DD_CLI_FINDINGS_OUTPUT_JSON_PATH_FILE]` - -**Settings** - -`--config value, -c value` -The path to the TOML configuration file is used to set values for the options. If the option is set in the configuration file and the CLI, the option will take the value set from the CLI. `[$DD_CLI_CONFIG_FILE]` - -`--defectdojo-url value, -u value` -The URL of the DefectDojo instance to import findings into. (required). `[$DD_CLI_DEFECTDOJO_URL]` - -`--insecure-tls, --no-tls` -ignore TLS validation errors when connecting to the provided DefectDojo instance. Most users should not enable this flag. (default: false) `[$DD_CLI_INSECURE_TLS]` - -#### Export Example: - -``` -defectdojo-cli export \ ---defectdojo-url "https://your-dojo-instance.cloud.defectdojo.com/" -``` - -### Interactive - -Interactive mode allows you to configure import and reimport process, step-by-step. - -#### Usage - -``` -defectdojo-cli interactive - or: defectdojo-cli interactive [--skip-intro] [--no-full-screen] [--log-path] - or: defectdojo-cli interactive [-h | --help] -``` - -#### Options - -`--skip-intro ` -* Skip the intro screen (default: false) - -`--no-full-screen` -* Disable full screen mode (default: false) - -`--log-path value` -* Path to the log file - -`--help, -h` -* show help - -## Universal Importer - -`universal-importer` seamlessly integrates scan results into DefectDojo, streamlining both the import and reimport processes of findings and associated objects. Designed for ease of use, the tool supports various endpoints, catering to both initial imports and subsequent reimports — ideal for users requiring robust and flexible interaction with the DefectDojo API. - -While similar to DefectDojo-CLI, Universal Importer does not have the Export functionality, and environment variables are encoded differently. - -### Commands - -- [`import`](./#import-1) Imports findings into DefectDojo. -- [`reimport`](./#reimport-1) Reimports findings into DefectDojo. -- [`interactive`](./#interactive-1) Starts an interactive mode to configure the import and reimport process, step by - -### Global Options - -`--help, -h` -* show help - -`--version, -v` -* print the version - -#### CLI Formatting - -`--no-color` -* Disable color output. (default: false) `[$DD_IMPORTER_NO_COLOR]` - -`--no-emojis, --no-emoji` -* Disable emojis in the output. (default: false) `[$DD_IMPORTER_NO_EMOJIS]` - -`--verbose` -* Enable verbose output. (default: false) `[$DD_IMPORTER_VERBOSE]` - -### Import - -Use the import command to import new findings into DefectDojo. - -#### Usage - -``` -universal-importer [global options] import [optional flags] - or: universal-importer [global options] import --config ./config-file-path - or: universal-importer import [-h | --help] - or: universal-importer import example [subcommand options] - or: universal-importer import example [-h | --help] - ->> The API token must be set in the environment variable `DD_IMPORTER_DOJO_API_TOKEN`. -``` - -`import` can import Findings in two ways: - -**By ID:** -* Create a Product (or use an existing product) -* Create an Engagement inside the product -* Provide the id of the Engagement in the engagement parameter - -In this scenario a new Test will be created inside the Engagement. - -**By Name:** -* Create a Product (or use an existing product) -* Create an Engagement inside the product -* Provide product-name -* Provide engagement-name -* Optionally provide product-type-name - -In this scenario DefectDojo will look up the Engagement by the provided details. - -When using names you can let the importer automatically create Engagements, Products and Product-types by using `auto-create-context=true`. -You can use `deduplication-on-engagement` to restrict deduplication for imported Findings to the newly created Engagement. - - -**Import Basic syntax:** - -``` -universal-importer import [options] -``` - -#### **Import Example:** - -``` -universal-importer import \ ---defectdojo-url "https://YOUR_INSTANCE.cloud.defectdojo.com/" \ ---scan-type "burp scan" \ ---report-path "./examples/burp_findings.xml" \ ---product-name "dev" \ ---engagement-name "dev" \ ---product-type-name "Research and Development" \ ---test-name "burp-test-dev" \ ---verified \ ---active \ ---minimum-severity "info" \ ---tag "dev" --tag "tools" --tag "burp" --tag "test-dev" \ ---test-version "0.0.1" \ ---auto-create-context -``` - -#### Commands - -`example, x` -* Shows an example of required and optional flags for import operation - -#### Options - -`--active, -a` -* Dictates whether Findings should be forced to Active or Inactive on import. A value of True forces Findings to Active, while a value of False forces all Findings to Inactive. If no value is set, Active status will instead rely on the incoming report file. `[$DD_IMPORTER_ACTIVE]` - -`--api-scan-configuration value, --asc value` -* The ID of the API Scan Configuration object to use when importing or reimporting. (default: 0) `[$DD_IMPORTER_API_SCAN_CONFIGURATION]` - -`--apply-tags-endpoints, --te` -* If set to true, the tags (from the option --tag) will be applied to the endpoints (default: false) -`[$DD_IMPORTER_APPLY_TAGS_ENDPOINTS]` - -`--apply-tags-findings, --tf` -* If set to true, the tags (from the option --tag) will be applied to the findings (default: false) `[$DD_IMPORTER_APPLY_TAGS_FINDINGS]` - -`--auto-create-context, --acc` -* If set to true, the importer automatically creates Engagements, Products, and Product_Types (default: false) `[$DD_IMPORTER_AUTO_CREATE_CONTEXT]` - -`--close-old-findings, --cof` -* If True, old Findings no longer present in the report will be Closed as Mitigated when importing. If Service has been set, only the findings for this Service will be closed. [$DD_IMPORTER_CLOSE_OLD_FINDINGS] - -`--close-old-findings-product-scope, --cofps` -* Select if --close-old-findings applies to **all** Findings of the same type in the Product. By default, this is set to false, meaning that only old Findings of the same type in the Engagement are in scope (and will be closed by Close Old Findings). [$DD_IMPORTER_CLOSE_OLD_FINDINGS_PRODUCT_SCOPE] - -`--deduplication-on-engagement, --doe` -* If set to true, the importer restricts deduplication for imported findings to the newly created Engagement. (default: false) `[$DD_IMPORTER_DEDUPLICATION_ON_ENGAGEMENT]` - -`--engagement-id value, --ei value` -* The ID of the Engagement to import findings into. (default: 0) `[$DD_IMPORTER_ENGAGEMENT_ID]` - -`--engagement-name value, -e value` -* The name of the Engagement to import findings into. `[$DD_IMPORTER_ENGAGEMENT_NAME]` - -`--minimum-severity value, --ms value` -* Dictates the lowest level severity that should be imported. Valid values are: Critical, High, Medium, Low, Info. (default: "Info") `[$DD_IMPORTER_MINIMUM_SEVERITY]` - -`--product-name value, -p value` -* The name of the Product to import findings into. `[$DD_IMPORTER_PRODUCT_NAME]` - -`--product-type-name value, --pt value` -* The name of the Product Type to import findings into. `[$DD_IMPORTER_PRODUCT_TYPE_NAME]` - -`--report-path value, -r value` -* The path to the report to import. (required). `[$DD_IMPORTER_REPORT_PATH]` - -`--scan-type value, -s value` -* The scan type of the tool (required). `[$DD_IMPORTER_SCAN_TYPE]` - -`--tag value, -t value [ --tag value, -t value ]` -* Any tags to be applied to the Test object `[$DD_IMPORTER_TAGS]` - -`--test-name value, --tn value` -* The name of the Test to import findings into - Defaults to the name of the scan type. `[$DD_IMPORTER_TEST_NAME]` - -`--test-version value, -V value` -* The version of the test. `[$DD_IMPORTER_TEST_VERSION]` - -`--verified, -v` -* Dictates whether Findings should be set to Verified on import. A value of True forces Findings to Verified. If no value is set, Verified status will instead rely on the incoming report file. `[$DD_IMPORTER_VERIFIED]` - -**Settings:** - -`--config value, -c value` -* The path to the TOML configuration file is used to set values for the options. If the option is set in the configuration file and the CLI, the option will take the value set from the CLI. `[$DD_IMPORTER_CONFIG_FILE]` -`--defectdojo-url value, -u value` -* The URL of the DefectDojo instance to import findings into. (required). `[$DD_IMPORTER_DEFECTDOJO_URL]` -* --insecure-tls, --no-tls ignore TLS validation errors when connecting to the provided DefectDojo instance. Most users should not enable this flag. (default: false) `[$DD_IMPORTER_INSECURE_TLS]` - -### Reimport - -Use the `reimport` command to extend an existing Test with Findings from a new report in one of two ways: - -By ID: -- Create a Product (or use an existing product) -- Create an Engagement inside the product -- Import a scan report and find the id of the Test -- Provide this in the test-id parameter - -By Names: -- Create a Product (or use an existing product) -- Create an Engagement inside the product -- Import a report which will create a Test -- Provide product-name -- Provide engagement-name -- Optional: Provide test-name - -In this scenario DefectDojo will look up the Test by the provided details. If no test-name is provided, the latest test inside the engagement will be chosen based on scan-type. - -When using names you can let the importer automatically create Engagements, Products and Product-types by using `auto-create-context=true`. -You can use `deduplication-on-engagement` to restrict deduplication for imported Findings to the newly created Engagement. - -#### Usage - -``` -universal-importer [global options] reimport [optional flags] - or: universal-importer [global options] reimport --config ./config-file-path - or: universal-importer reimport [-h | --help] - or: universal-importer reimport example [subcommand options] - or: universal-importer reimport example [-h | --help] - ->> The API token must be set in the environment variable `DD_IMPORTER_DOJO_API_TOKEN`. -``` - -#### **Reimport Example:** - -``` -universal-importer reimport \ ---defectdojo-url "https://YOUR_INSTANCE.cloud.defectdojo.com/" \ ---scan-type "Nancy Scan" \ ---report-path "./examples/nancy_findings.json" \ ---test-id 11 \ ---verified \ ---active \ ---minimum-severity "info" \ ---tag "dev" --tag "tools" --tag "nancy" --tag "test-dev" \ ---test-version "1.0" \ ---auto-create-context -``` - -#### Commands - -``` -example, x Shows an example of required and optional flags for reimport operation -``` - -#### Options - -`--active, -a` -* Dictates whether Findings should be forced to Active or Inactive on import. A value of True forces Findings to Active, while a value of False forces all Findings to Inactive. If no value is set, Active status will instead rely on the incoming report file. `[$DD_IMPORTER_ACTIVE]` - -`--api-scan-configuration value, --asc value` -* The ID of the API Scan Configuration object to use when importing or reimporting. (default: 0) `[$DD_IMPORTER_API_SCAN_CONFIGURATION]` - -`--apply-tags-endpoints, --te` -* If set to true, the tags (from the option --tag) will be applied to the endpoints (default: false) `[$DD_IMPORTER_APPLY_TAGS_ENDPOINTS]` - -`--apply-tags-findings, --tf` -* If set to true, the tags (from the option --tag) will be applied to the findings (default: false) `[$DD_IMPORTER_APPLY_TAGS_FINDINGS]` - -`--auto-create-context, --acc` -* If set to true, the importer automatically creates Engagements, Products, and Product_Types (default: false) `[$DD_IMPORTER_AUTO_CREATE_CONTEXT]` - -`--close-old-findings, --cof` -* If True, old Findings no longer present in the report will be Closed as Mitigated when importing. If Service has been set, only the Findings for this Service will be closed. [$DD_IMPORTER_CLOSE_OLD_FINDINGS] - -`--close-old-findings-product-scope, --cofps` -* Select if --close-old-findings applies to **all** Findings of the same type in the Product. By default, this is set to false, meaning that only old Findings of the same type in the Engagement are in scope (and will be closed by Close Old Findings). [$DD_IMPORTER_CLOSE_OLD_FINDINGS_PRODUCT_SCOPE] - -`--deduplication-on-engagement, --doe` -* If set to true, the importer restricts deduplication for imported findings to the newly created Engagement. (default: false) `[$DD_IMPORTER_DEDUPLICATION_ON_ENGAGEMENT]` - -`--engagement-name value, -e value` -* The name of the Engagement to import findings into. `[$DD_IMPORTER_ENGAGEMENT_NAME]` - -`--minimum-severity value, --ms value` -* Dictates the lowest level severity that should be imported. Valid values are: Critical, High, Medium, Low, Info. (default: "Info") `[$DD_IMPORTER_MINIMUM_SEVERITY]` - -`--product-name value, -p value` -* The name of the Product to import findings into. `[$DD_IMPORTER_PRODUCT_NAME]` - -`--product-type-name value, --pt value` -* The name of the Product Type to import findings into. `[$DD_IMPORTER_PRODUCT_TYPE_NAME]` - -`--report-path value, -r value` -* The path to the report to import. (required). `[$DD_IMPORTER_REPORT_PATH]` - -`--scan-type value, -s value` -* The scan type of the tool (required). `[$DD_IMPORTER_SCAN_TYPE]` - -`--tag value, -t value [ --tag value, -t value ]` -* Any tags to be applied to the Test object `[$DD_IMPORTER_TAGS]` - -`--test-id value, --ti value` -* The ID of the Test to reimport findings into. (default: 0) `[$DD_IMPORTER_TEST_ID]` - -`--test-name value, --tn value` -* The name of the Test to import findings into - Defaults to the name of the scan type. `[$DD_IMPORTER_TEST_NAME]` - -`--test-version value, -V value` -* The version of the test. `[$DD_IMPORTER_TEST_VERSION]` - -`--verified, -v` -* Dictates whether Findings should be set to Verified on import. A value of True forces Findings to Verified. If no value is set, Verified status will instead rely on the incoming report file. (default: unset) `[$DD_IMPORTER_VERIFIED]` - -**Settings:** - -`--config value, -c value` -* The path to the TOML configuration file is used to set values for the options. If the option is set in the configuration file and the CLI, the option will take the value set from the CLI. `[$DD_IMPORTER_CONFIG_FILE]` - -`--defectdojo-url value, -u value` -* The URL of the DefectDojo instance to import findings into. (required). `[$DD_IMPORTER_DEFECTDOJO_URL]` - -`--insecure-tls, --no-tls` -* ignore TLS validation errors when connecting to the provided DefectDojo instance. Most users should not enable this flag. (default: false) `[$DD_IMPORTER_INSECURE_TLS]` - -### Interactive -Interactive mode allows you to configure import and reimport process, step-by-step. - -#### Usage - -``` -universal-importer interactive - or: universal-importer interactive [--skip-intro] [--no-full-screen] [--log-path] - or: universal-importer interactive [-h | --help] -``` - -#### Options - -`--skip-intro ` -* Skip the intro screen (default: false) - -`--no-full-screen` -* Disable full screen mode (default: false) -`--log-path value` -* Path to the log file -`--help, -h` -* show help - - -## Troubleshooting - -If you encounter any issues with these tools, please check the following: -- Ensure you're using the correct binary for your operating system and CPU architecture. -- Verify that the API key is set correctly in your environment variables. -- Check that the DefectDojo URL is correct and accessible. -- When importing, confirm that the report file exists and is in the supported format for the specified scan type. You can review the supported scanners for DefectDojo on our [supported tools list](/supported_tools). +--- +title: "External Tools: Universal Importer & DefectDojo-CLI (Pro)" +description: "Import files to DefectDojo from the command line" +draft: false +weight: 2 +--- + +Note: The following external tools are DefectDojo Pro-only features. These binaries will not work unless they are connected to an instance with a DefectDojo Pro license. + +## About External Tools + +`defectdojo-cli` and `universal-importer` are command-line tools designed to streamline both the import and re-import processes of Findings and associated objects, making it ideal for users who want to quickly set up these interactions with the DefectDojo API. + +DefectDojo-CLI has the same functionality as Universal Importer, but also includes the ability to export Findings from DefectDojo to JSON or CSV. + +## Installation + +1. Locate “External Tools” from your User Profile menu: + +2. Download the appropriate binary for your operating system from the platform. + +![image](images/external-tools.png) + +3. Extract the downloaded archive within a directory of your choice. Optionally, add the directory containing the extracted binary to your system's $PATH for repeat access. + +**Note that Macintosh users may be blocked from running DefectDojo-CLI or Universal Importer as they are apps from an unidentified developer. See [Apple Support](https://support.apple.com/en-ca/guide/mac-help/mh40616/mac) for instructions on how to override the block from Apple.** + +**Windows Users: If you receive the "Couldn't download - virus detected" error, disabling Smartscreen may work. Otherwise, use a different browser to download the tool from the Cloud portal.** + +## Configuration + +Universal Importer & DefectDojo-CLI can be configured using flags, environment variables, or a configuration file. The most important configuration is the API token, which must be set as an environment variable: + +1. Add your API key to your environment variables. +You can retrieve your API key from: `https://YOUR_INSTANCE.cloud.defectdojo.com/api/key-v2` + +or + +Via the DefectDojo user interface +in the user dropdown in the top-right corner: + +![image](images/api-token.png) + +2. Set your environment variable for the API token. + +**For DefectDojo-CLI:** + `export DD_CLI_API_TOKEN=YOUR_API_KEY` + +**For Universal Importer:** + `export DD_IMPORTER_DOJO_API_TOKEN=YOUR_API_KEY` + +Note: On Windows, use `set` instead of `export`. + +### Windows: Using PowerShell + +1. Open PowerShell (Windows Key, then search for "PowerShell"). +2. Set the environment variables: + - **Temporary:** + ```powershell + $env:DD_IMPORTER_DOJO_API_TOKEN = "[VALUE_FROM_DEFECTDOJO_API]" + $env:DD_IMPORTER_DEFECTDOJO_URL=”[e.g. http://localhost:8080/defectdojo]” + ``` + - **Permanent:** + ```powershell + [Environment]::SetEnvironmentVariable("DD_IMPORTER_DOJO_API_TOKEN", "[VALUE_FROM_DEFECTDOJO_API]", "Machine") + ``` +3. Restart your PowerShell session. +4. Verify the setting: + ```powershell + echo $env:DD_IMPORTER_DOJO_API_TOKEN + echo $env:DD_IMPORTER_DEFECTDOJO_URL + ``` + +### Windows: Using Command Prompt (Administrative Accounts) +1. Open Command Prompt (Windows Key, then search for "Command Prompt"). +2. Set the environment variables: + - **Temporary:** + ```cmd + set DD_IMPORTER_DOJO_API_TOKEN = "[VALUE_FROM_DEFECTDOJO_API]" + set DD_IMPORTER_DEFECTDOJO_URL=”[e.g. http://localhost:8080/defectdojo]” + ``` + - **Permanent:** + ```cmd + setx DD_IMPORTER_DOJO_API_TOKEN = "[VALUE_FROM_DEFECTDOJO_API]" + setx DD_IMPORTER_DEFECTDOJO_URL=”[e.g. http://localhost:8080/defectdojo]” + ``` + +### Using Windows Settings (Non-Administrative Accounts) +1. Press `Win + I` to open the system settings dialog. +2. In the search box, type "environment". +3. Choose "Edit Environment variables for your account". +4. Under "User variables for [username]", click the "New…" button. +5. Set the variable: + - **Variable name:** `DD_IMPORTER_DOJO_API_TOKEN` + - **Variable value:** `[VALUE_FROM_DEFECTDOJO_API]` +6. Click "OK". +7. Repeat steps 4 through 6 for the DD_IMPORTER_DEFECTDOJO_URL variable +8. Restart any open command windows. +9. Verify the settings: + ```cmd + echo %DD_IMPORTER_DOJO_API_TOKEN% + echo %DD_IMPORTER_DEFECTDOJO_URL% + ``` + +## DefectDojo-CLI + +`defectdojo-cli` seamlessly integrates scan results into DefectDojo, streamlining the import and reimport processes of Findings and associated objects. Designed for ease of use, the tool supports various endpoints, catering to both initial imports and subsequent reimports — ideal for users requiring robust and flexible interaction with the DefectDojo API. DefectDojo-CLI can perform the same functions as `universal-importer`, and adds export functionality for Findings. + +### Commands + +- [`import`](./#import) Imports findings into DefectDojo. +- [`reimport`](./#reimport) Reimports findings into DefectDojo. +- [`export`](./#export) Exports findings from DefectDojo. +- [`interactive`](./#interactive) Starts an interactive mode to configure the import and reimport process, step by + +### Global Options + +`--help, -h` +* show help + +`--version, -v` +* print the version + +#### CLI Formatting + +`--no-color` +* Disable color output. (default: false) `[$DD_CLI_NO_COLOR]` +`--no-emojis, --no-emoji` + +* Disable emojis in the output. (default: false) `[$DD_CLI_NO_EMOJIS]` + +* `--verbose` +Enable verbose output. (default: false) `[$DD_CLI_VERBOSE]` + +### Import + +Use the import command to import new findings into DefectDojo. + +#### Usage + +``` +defectdojo-cli [global options] import [optional flags] + or: defectdojo-cli [global options] import --config ./config-file-path + or: defectdojo-cli import [-h | --help] + or: defectdojo-cli import example [subcommand options] + or: defectdojo-cli import example [-h | --help] + +>> The API token must be set in the environment variable `DD_CLI_API_TOKEN`. +``` + +`import` can import Findings in two ways: + +**By ID:** +* Create a Product (or use an existing product) +* Create an Engagement inside the product +* Provide the id of the Engagement in the engagement parameter + +In this scenario, a new Test will be created inside the Engagement. + +**By Name:** + +* Create a Product (or use an existing product) +* Create an Engagement inside the product +* Provide product-name +* Provide engagement-name +* Optionally provide product-type-name + +In this scenario, DefectDojo will look up the Engagement by the provided details. + +When using names you can let the importer automatically create Engagements, Products and Product-types by using `auto-create-context=true`. +You can use `deduplication-on-engagement` to restrict deduplication for imported Findings to the newly created Engagement. + + +**Import Basic syntax:** +``` +defectdojo-cli import [options] +``` + +#### **Import Example:** +``` +defectdojo-cli import \ +--defectdojo-url "https://YOUR_INSTANCE.cloud.defectdojo.com/" \ +--scan-type "burp scan" \ +--report-path "./examples/burp_findings.xml" \ +--product-name "dev" \ +--engagement-name "dev" \ +--product-type-name "Research and Development" \ +--test-name "burp-test-dev" \ +--verified \ +--active \ +--minimum-severity "info" \ +--tag "dev" --tag "tools" --tag "burp" --tag "test-dev" \ +--test-version "0.0.1" \ +--auto-create-context +``` + +#### Commands +`example, x` +* Shows an example of required and optional flags for import operation + +#### Options + +`--active, -a` +* Dictates whether Findings should be forced to Active or Inactive on import. A value of True forces Findings to Active, while a value of False forces all Findings to Inactive. If no value is set, Active status will instead rely on the incoming report file. (default: unset) `[$DD_CLI_ACTIVE]` + +`--api-scan-configuration value, --asc value` +* The ID of the API Scan Configuration object to use when importing or reimporting. (default: 0) `[$DD_CLI_API_SCAN_CONFIGURATION]` + +`--apply-tags-endpoints, --te` +* If set to true, the tags (from the option --tag) will be applied to the endpoints (default: false) +`[$DD_CLI_APPLY_TAGS_ENDPOINTS]` + +`--apply-tags-findings, --tf` +* If set to true, the tags (from the option --tag) will be applied to the findings (default: false) `[$DD_CLI_APPLY_TAGS_FINDINGS]` + +`--auto-create-context, --acc` +* If set to true, the importer automatically creates Engagements, Products, and Product_Types (default: false) `[$DD_CLI_AUTO_CREATE_CONTEXT]` + +`--close-old-findings, --cof` +* If True, old Findings no longer present in the report will be Closed as Mitigated when importing. If Service has been set, only the Findings for this Service will be closed. [$DD_CLI_CLOSE_OLD_FINDINGS] + +`--close-old-findings-product-scope, --cofps` +* Select if --close-old-findings applies to **all** Findings of the same type in the Product. By default, this is set to false, meaning that only old Findings of the same type in the Engagement are in scope (and will be closed by Close Old Findings). [$DD_CLI_CLOSE_OLD_FINDINGS_PRODUCT_SCOPE] + +`--deduplication-on-engagement, --doe` +* If set to true, the importer restricts deduplication for imported findings to the newly created Engagement. (default: false) `[$DD_CLI_DEDUPLICATION_ON_ENGAGEMENT]` + +`--engagement-id value, --ei value` +* The ID of the Engagement to import findings into. (default: 0) `[$DD_CLI_ENGAGEMENT_ID]` + +`--engagement-name value, -e value` +* The name of the Engagement to import findings into. `[$DD_CLI_ENGAGEMENT_NAME]` + +`--minimum-severity value, --ms value` +* Dictates the lowest level severity that should be imported. Valid values are: Critical, High, Medium, Low, Info. (default: "Info") `[$DD_CLI_MINIMUM_SEVERITY]` + +`--product-name value, -p value` +* The name of the Product to import findings into. `[$DD_CLI_PRODUCT_NAME]` + +`--product-type-name value, --pt value` +* The name of the Product Type to import findings into. `[$DD_CLI_PRODUCT_TYPE_NAME]` + +`--report-path value, -r value` +* The path to the report to import. (required). `[$DD_CLI_REPORT_PATH]` + +`--scan-type value, -s value` +* The scan type of the tool (required). `[$DD_CLI_SCAN_TYPE]` + +`--tag value, -t value [ --tag value, -t value ]` +* Any tags to be applied to the Test object `[$DD_CLI_TAGS]` + +`--test-name value, --tn value` +* The name of the Test to import findings into - Defaults to the name of the scan type. `[$DD_CLI_TEST_NAME]` + +`--test-version value, -V value` +* The version of the test. `[$DD_CLI_TEST_VERSION]` + +`--verified, -v` +* Dictates whether Findings should be set to Verified on import. A value of True forces Findings to Verified. If no value is set, Verified status will instead rely on the incoming report file. `[$DD_CLI_VERIFIED]` + +**Settings:** + +`--config value, -c value` +* The path to the TOML configuration file is used to set values for the options. If the option is set in the configuration file and the CLI, the option will take the value set from the CLI. `[$DD_CLI_CONFIG_FILE]` +`--defectdojo-url value, -u value` +* The URL of the DefectDojo instance to import findings into. (required). `[$DD_CLI_DEFECTDOJO_URL]` +* --insecure-tls, --no-tls ignore TLS validation errors when connecting to the provided DefectDojo instance. Most users should not enable this flag. (default: false) `[$DD_CLI_INSECURE_TLS]` + +### Reimport + +Use the `reimport` command to extend an existing Test with Findings from a new report in one of two ways: + +By ID: +- Create a Product (or use an existing product) +- Create an Engagement inside the product +- Import a scan report and find the id of the Test +- Provide this in the test-id parameter + +By Names: +- Create a Product (or use an existing product) +- Create an Engagement inside the product +- Import a report which will create a Test +- Provide product-name +- Provide engagement-name +- Optional: Provide test-name + +In this scenario, DefectDojo will look up the Test by the provided details. If no test-name is provided, the latest test inside the engagement will be chosen based on scan-type. + +When using names you can let the importer automatically create Engagements, Products and Product-types by using `auto-create-context=true`. +You can use `deduplication-on-engagement` to restrict deduplication for imported Findings to the newly created Engagement. + +#### Usage + +``` +defectdojo-cli [global options] reimport [optional flags] + or: defectdojo-cli [global options] reimport --config ./config-file-path + or: defectdojo-cli reimport [-h | --help] + or: defectdojo-cli reimport example [subcommand options] + or: defectdojo-cli reimport example [-h | --help] + +>> The API token must be set in the environment variable `DD_CLI_API_TOKEN`. +``` + +#### **Reimport Example:** + +``` +defectdojo-cli reimport \ +--defectdojo-url "https://YOUR_INSTANCE.cloud.defectdojo.com/" \ +--scan-type "Nancy Scan" \ +--report-path "./examples/nancy_findings.json" \ +--test-id 11 \ +--verified \ +--active \ +--minimum-severity "info" \ +--tag "dev" --tag "tools" --tag "nancy" --tag "test-dev" \ +--test-version "1.0" \ +--auto-create-context +``` + +#### Commands + +``` +example, x Shows an example of required and optional flags for reimport operation +``` + +#### Options + +`--active, -a` +* Dictates whether Findings should be forced to Active or Inactive on import. A value of True forces Findings to Active, while a value of False forces all Findings to Inactive. If no value is set, Active status will instead rely on the incoming report file. `[$DD_CLI_ACTIVE]` + +`--api-scan-configuration value, --asc value` + +* The ID of the API Scan Configuration object to use when importing or reimporting. (default: 0) `[$DD_CLI_API_SCAN_CONFIGURATION]` + +`--apply-tags-endpoints, --te` +* If set to true, the tags (from the option --tag) will be applied to the endpoints (default: false) `[$DD_CLI_APPLY_TAGS_ENDPOINTS]` + +`--apply-tags-findings, --tf` +* If set to true, the tags (from the option --tag) will be applied to the findings (default: false) `[$DD_CLI_APPLY_TAGS_FINDINGS]` + +`--auto-create-context, --acc` +* If set to true, the importer automatically creates Engagements, Products, and Product_Types (default: false) `[$DD_CLI_AUTO_CREATE_CONTEXT]` + +`--close-old-findings, --cof` +* If True, old Findings no longer present in the report will be Closed as Mitigated when importing. If Service has been set, only the findings for this Service will be closed.[$DD_CLI_CLOSE_OLD_FINDINGS] + +`--close-old-findings-product-scope, --cofps` +* Select if --close-old-findings applies to **all** Findings of the same type in the Product. By default, this is set to false, meaning that only old Findings of the same type in the Engagement are in scope (and will be closed by Close Old Findings). [$DD_CLI_CLOSE_OLD_FINDINGS_PRODUCT_SCOPE] + +`--deduplication-on-engagement, --doe` +* If set to true, the importer restricts deduplication for imported findings to the newly created Engagement. (default: false) `[$DD_CLI_DEDUPLICATION_ON_ENGAGEMENT]` + +`--engagement-name value, -e value` +* The name of the Engagement to import findings into. `[$DD_CLI_ENGAGEMENT_NAME]` + +`--minimum-severity value, --ms value` +* Dictates the lowest level severity that should be imported. Valid values are: Critical, High, Medium, Low, Info. (default: "Info") `[$DD_CLI_MINIMUM_SEVERITY]` + +`--product-name value, -p value` +* The name of the Product to import findings into. `[$DD_CLI_PRODUCT_NAME]` + +`--product-type-name value, --pt value` +* The name of the Product Type to import findings into. `[$DD_CLI_PRODUCT_TYPE_NAME]` + +`--report-path value, -r value` +* The path to the report to import. (required). `[$DD_CLI_REPORT_PATH]` + +`--scan-type value, -s value` +* The scan type of the tool (required). `[$DD_CLI_SCAN_TYPE]` + +`--tag value, -t value [ --tag value, -t value ]` +* Any tags to be applied to the Test object `[$DD_CLI_TAGS]` + +`--test-id value, --ti value` +* The ID of the Test to reimport findings into. (default: 0) `[$DD_CLI_TEST_ID]` + +`--test-name value, --tn value` +* The name of the Test to import findings into - Defaults to the name of the scan type. `[$DD_CLI_TEST_NAME]` + +`--test-version value, -V value` +* The version of the test. `[$DD_CLI_TEST_VERSION]` + +`--verified, -v` +* Dictates whether Findings should be set to Verified on import. A value of True forces Findings to Verified. If no value is set, Verified status will instead rely on the incoming report file. `[$DD_CLI_VERIFIED]` + +**Settings:** + +`--config value, -c value` +* The path to the TOML configuration file is used to set values for the options. If the option is set in the configuration file and the CLI, the option will take the value set from the CLI. `[$DD_CLI_CONFIG_FILE]` + +`--defectdojo-url value, -u value` +* The URL of the DefectDojo instance to import findings into. (required). `[$DD_CLI_DEFECTDOJO_URL]` + +`--insecure-tls, --no-tls` +* ignore TLS validation errors when connecting to the provided DefectDojo instance. Most users should not enable this flag. (default: false) `[$DD_CLI_INSECURE_TLS]` + +### Export + +#### Usage + +``` +defectdojo-cli export [optional options] + or: defectdojo-cli [global options] export --defectdojo-url --json ./output_file_path.json [optional filters] + or: defectdojo-cli [global options] export --defectdojo-url --csv ./output_file_path.csv [optional filters] + or: defectdojo-cli [global options] export --defectdojo-url --json ./output_file_path.json --csv ./output_file_path.csv [optional filters] + or: defectdojo-cli [global options] export --config ./config-file-path + or: defectdojo-cli [global options] export --config ./config-file-path --json ./output_file_path.json + or: defectdojo-cli [global options] export --config ./config-file-path --csv ./output_file_path.csv + or: defectdojo-cli export [-h | --help] + or: defectdojo-cli export example [subcommand options] + or: defectdojo-cli export example [-h | --help] + +>> The API token must be set in the environment variable `DD_CLI_API_TOKEN`. +``` + +To export Findings from DefectDojo-CLI, you will need to supply a configuration file which contains details explaining which Findings you wish to export. This is similar to the GET Findings method via the API. + +For assistance use `defectdojo-cli export --help`. + +#### **Export Example** + +This example specifies the URL, export format and a few filter parameters to create a list of Findings. + +``` +defectdojo-cli export \ +--defectdojo-url "https://your-dojo-instance.cloud.defectdojo.com/" +--json "./path/to/findings.json" \ +--active "true" \ +--created "Past 90 days" +``` + +#### Commands + +`example, x` +* Shows an example of required and optional flags for export operation + +`help, h` +* Shows a list of commands or help for one command + +#### Options + +**Findings Filters:** + +`--active true|false, -a true|false` +* Findings by active status. `[$DD_CLI_FINDINGS_FILTERS_ACTIVE]` + +`--created value` +* Findings by created date. Supported values: None, Today, Past 7 days, Past 30 days, Past 90 days, Current month, Current year, Past year `[$DD_CLI_FINDINGS_FILTERS_CREATED]` + +`--cvssv3-score value` +* Findings by CVSS v3 score. (default: ignored) `[$DD_CLI_FINDINGS_FILTERS_CVSSV3_SCORE]` + +`--cwe value` +* Findings by CWE ID. (default: ignored) `[$DD_CLI_FINDINGS_FILTERS_CWE]` + +`--date value` +* Findings by date. Supported values: None, Today, Past 7 days, Past 30 days, Past 90 days, Current month, Current year, Past year `[$DD_CLI_FINDINGS_FILTERS_DATE]` + +`--discovered-after value` +* Findings by discovered after the specified date. Format: YYYY-MM-DD `[$DD_CLI_FINDINGS_FILTERS_DISCOVERED_AFTER]` + +`--discovered-before value` +* Findings by discovered before the specified date. Format: YYYY-MM-DD `[$DD_CLI_FINDINGS_FILTERS_DISCOVERED_BEFORE]` + +`--discovered-on value` +* Findings by discovered date. Format: YYYY-MM-DD `[$DD_CLI_FINDINGS_FILTERS_DISCOVERED_ON]` + +`--duplicate true|false` +* Findings by duplicated status. `[$DD_CLI_FINDINGS_FILTERS_DUPLICATE]` + +`--engagement-ids value [ --engagement-ids value ]` +* Findings by engagement IDs. This flag can be used multiple times or as a comma-separated list. `[$DD_CLI_FINDINGS_FILTERS_ENGAGEMENT]` + +`--epss-percentile value` +* Findings by EPSS percentile. (default: ignored) `[$DD_CLI_FINDINGS_FILTERS_EPSS_PERCENTILE]` + +`--epss-score value` +* Findings by EPSS score. (default: ignored) `[$DD_CLI_FINDINGS_FILTERS_EPSS_SCORE]` + +`--false-positive true|false` +* Findings by false positive status. `[$DD_CLI_FINDINGS_FILTERS_FALSE_POSITIVE]` + +`--is-mitigated true|false` +* Findings by mitigation status. `[$DD_CLI_FINDINGS_FILTERS_IS_MITIGATED]` + +`--mitigated value` +* Findings by the date range in which they were marked mitigated Supported values: None, Today, Past 7 days, Past 30 days, Past 90 days, Current month, Current year, Past year `[$DD_CLI_FINDINGS_FILTERS_MITIGATED]` + +`--mitigated-after value` +* Findings by mitigation after the specified date. Format: YYYY-MM-DD `[$DD_CLI_FINDINGS_FILTERS_MITIGATED_AFTER]` + +`--mitigated-before value` +* Findings by mitigation before the specified date. Format: YYYY-MM-DD `[$DD_CLI_FINDINGS_FILTERS_MITIGATED_BEFORE]` + +`--mitigated-by-ids value [ --mitigated-by-ids value ]` +* Findings by mitigated_by user IDs. This flag can be used multiple times or as a comma-separated list. Could be combined with --mitigated-by-names. `[$DD_CLI_FINDINGS_FILTERS_MITIGATED_BY_IDS]` + +`--mitigated-by-names value [ --mitigated-by-names value ]` +* Findings by mitigated_by user names. This flag can be used multiple times or as a comma-separated list. Could be combined with --mitigated-by-ids. `[$DD_CLI_FINDINGS_FILTERS_MITIGATED_BY_NAMES]` + +`--mitigated-on value` +* Findings by mitigation date. Format: YYYY-MM-DD `[$DD_CLI_FINDINGS_FILTERS_MITIGATED_ON]` + +`--not-tags value [ --not-tags value ]` +* Findings by tags that should not be present. This flag can be used multiple times or as a comma-separated list. `[$DD_CLI_FINDINGS_FILTERS_NOT_TAGS]` + +`--out-of-scope true|false` +* Findings by out of scope or in scope status. `[$DD_CLI_FINDINGS_FILTERS_OUT_OF_SCOPE]` + +`--out-of-sla true|false` +* Findings by outside or inside SLA status. `[$DD_CLI_FINDINGS_FILTERS_OUT_OF_SLA]` + +`--product-name value` +* Findings by product name. `[$DD_CLI_FINDINGS_FILTERS_PRODUCT_NAME]` + +`--product-name-contains value` +* Findings by product name contains. `[$DD_CLI_FINDINGS_FILTERS_PRODUCT_NAME_CONTAINS]` + +`--product-type-ids value [ --product-type-ids value ]` +* Findings by product type IDs. This flag can be used multiple times or as a comma-separated list. Could be combined with --product-type-names `[$DD_CLI_FINDINGS_FILTERS_PRODUCT_TYPE_IDS]` + +`--product-type-names value [ --product-type-names value ]` +* Findings by product type names. This flag can be used multiple times or as a comma-separated list. Could be combined with --product-type-ids `[$DD_CLI_FINDINGS_FILTERS_PRODUCT_TYPE_NAMES]` + +`--risk-accepted true|false` +* Findings by risk accepted status. `[$DD_CLI_FINDINGS_FILTERS_RISK_ACCEPTED]` + +`--severity value [ --severity value ]` +* Findings by severity. Valid values are: Critical, High, Medium, Low, Info. This flag can be used multiple times or as a comma-separated list. `[$DD_CLI_FINDINGS_FILTERS_SEVERITY]` + +`--tags value [ --tags value ]` +* Findings by tags that should be present. This flag can be used multiple times or as a comma-separated list. `[$DD_CLI_FINDINGS_FILTERS_TAGS]` + +`--test-id value` +* Findings by test ID. (default: ignored) `[$DD_CLI_FINDINGS_FILTERS_TEST_ID]` + +`--title-contains value` +* Findings by containing the given string in their title. `[$DD_CLI_FINDINGS_FILTERS_TITLE_CONTAINS]` + +`--under-review true|false` +* Findings by under review status. `[$DD_CLI_FINDINGS_FILTERS_UNDER_REVIEW]` + +`--verified true|false` +* Findings by verified status. (default: ignored) `[$DD_CLI_FINDINGS_FILTERS_VERIFIED]` + +`--vulnerability-id value [ --vulnerability-id value ]` +* Findings by vulnerability ID. This flag can be used multiple times or as a comma-separated list. `[$DD_CLI_FINDINGS_FILTERS_VULNERABILITY_ID]` + +**Findings Output** + +`--csv value` +* Path of the file where the CSV file of the findings will be written. `[$DD_CLI_FINDINGS_OUTPUT_CSV_PATH_FILE]` + +`--json value` Path of the file where the JSON file of the findings will be written. `[$DD_CLI_FINDINGS_OUTPUT_JSON_PATH_FILE]` + +**Settings** + +`--config value, -c value` +The path to the TOML configuration file is used to set values for the options. If the option is set in the configuration file and the CLI, the option will take the value set from the CLI. `[$DD_CLI_CONFIG_FILE]` + +`--defectdojo-url value, -u value` +The URL of the DefectDojo instance to import findings into. (required). `[$DD_CLI_DEFECTDOJO_URL]` + +`--insecure-tls, --no-tls` +ignore TLS validation errors when connecting to the provided DefectDojo instance. Most users should not enable this flag. (default: false) `[$DD_CLI_INSECURE_TLS]` + +#### Export Example: + +``` +defectdojo-cli export \ +--defectdojo-url "https://your-dojo-instance.cloud.defectdojo.com/" +``` + +### Interactive + +Interactive mode allows you to configure import and reimport process, step-by-step. + +#### Usage + +``` +defectdojo-cli interactive + or: defectdojo-cli interactive [--skip-intro] [--no-full-screen] [--log-path] + or: defectdojo-cli interactive [-h | --help] +``` + +#### Options + +`--skip-intro ` +* Skip the intro screen (default: false) + +`--no-full-screen` +* Disable full screen mode (default: false) + +`--log-path value` +* Path to the log file + +`--help, -h` +* show help + +## Universal Importer + +`universal-importer` seamlessly integrates scan results into DefectDojo, streamlining both the import and reimport processes of findings and associated objects. Designed for ease of use, the tool supports various endpoints, catering to both initial imports and subsequent reimports — ideal for users requiring robust and flexible interaction with the DefectDojo API. + +While similar to DefectDojo-CLI, Universal Importer does not have the Export functionality, and environment variables are encoded differently. + +### Commands + +- [`import`](./#import-1) Imports findings into DefectDojo. +- [`reimport`](./#reimport-1) Reimports findings into DefectDojo. +- [`interactive`](./#interactive-1) Starts an interactive mode to configure the import and reimport process, step by + +### Global Options + +`--help, -h` +* show help + +`--version, -v` +* print the version + +#### CLI Formatting + +`--no-color` +* Disable color output. (default: false) `[$DD_IMPORTER_NO_COLOR]` + +`--no-emojis, --no-emoji` +* Disable emojis in the output. (default: false) `[$DD_IMPORTER_NO_EMOJIS]` + +`--verbose` +* Enable verbose output. (default: false) `[$DD_IMPORTER_VERBOSE]` + +### Import + +Use the import command to import new findings into DefectDojo. + +#### Usage + +``` +universal-importer [global options] import [optional flags] + or: universal-importer [global options] import --config ./config-file-path + or: universal-importer import [-h | --help] + or: universal-importer import example [subcommand options] + or: universal-importer import example [-h | --help] + +>> The API token must be set in the environment variable `DD_IMPORTER_DOJO_API_TOKEN`. +``` + +`import` can import Findings in two ways: + +**By ID:** +* Create a Product (or use an existing product) +* Create an Engagement inside the product +* Provide the id of the Engagement in the engagement parameter + +In this scenario a new Test will be created inside the Engagement. + +**By Name:** +* Create a Product (or use an existing product) +* Create an Engagement inside the product +* Provide product-name +* Provide engagement-name +* Optionally provide product-type-name + +In this scenario DefectDojo will look up the Engagement by the provided details. + +When using names you can let the importer automatically create Engagements, Products and Product-types by using `auto-create-context=true`. +You can use `deduplication-on-engagement` to restrict deduplication for imported Findings to the newly created Engagement. + + +**Import Basic syntax:** + +``` +universal-importer import [options] +``` + +#### **Import Example:** + +``` +universal-importer import \ +--defectdojo-url "https://YOUR_INSTANCE.cloud.defectdojo.com/" \ +--scan-type "burp scan" \ +--report-path "./examples/burp_findings.xml" \ +--product-name "dev" \ +--engagement-name "dev" \ +--product-type-name "Research and Development" \ +--test-name "burp-test-dev" \ +--verified \ +--active \ +--minimum-severity "info" \ +--tag "dev" --tag "tools" --tag "burp" --tag "test-dev" \ +--test-version "0.0.1" \ +--auto-create-context +``` + +#### Commands + +`example, x` +* Shows an example of required and optional flags for import operation + +#### Options + +`--active, -a` +* Dictates whether Findings should be forced to Active or Inactive on import. A value of True forces Findings to Active, while a value of False forces all Findings to Inactive. If no value is set, Active status will instead rely on the incoming report file. `[$DD_IMPORTER_ACTIVE]` + +`--api-scan-configuration value, --asc value` +* The ID of the API Scan Configuration object to use when importing or reimporting. (default: 0) `[$DD_IMPORTER_API_SCAN_CONFIGURATION]` + +`--apply-tags-endpoints, --te` +* If set to true, the tags (from the option --tag) will be applied to the endpoints (default: false) +`[$DD_IMPORTER_APPLY_TAGS_ENDPOINTS]` + +`--apply-tags-findings, --tf` +* If set to true, the tags (from the option --tag) will be applied to the findings (default: false) `[$DD_IMPORTER_APPLY_TAGS_FINDINGS]` + +`--auto-create-context, --acc` +* If set to true, the importer automatically creates Engagements, Products, and Product_Types (default: false) `[$DD_IMPORTER_AUTO_CREATE_CONTEXT]` + +`--close-old-findings, --cof` +* If True, old Findings no longer present in the report will be Closed as Mitigated when importing. If Service has been set, only the findings for this Service will be closed. [$DD_IMPORTER_CLOSE_OLD_FINDINGS] + +`--close-old-findings-product-scope, --cofps` +* Select if --close-old-findings applies to **all** Findings of the same type in the Product. By default, this is set to false, meaning that only old Findings of the same type in the Engagement are in scope (and will be closed by Close Old Findings). [$DD_IMPORTER_CLOSE_OLD_FINDINGS_PRODUCT_SCOPE] + +`--deduplication-on-engagement, --doe` +* If set to true, the importer restricts deduplication for imported findings to the newly created Engagement. (default: false) `[$DD_IMPORTER_DEDUPLICATION_ON_ENGAGEMENT]` + +`--engagement-id value, --ei value` +* The ID of the Engagement to import findings into. (default: 0) `[$DD_IMPORTER_ENGAGEMENT_ID]` + +`--engagement-name value, -e value` +* The name of the Engagement to import findings into. `[$DD_IMPORTER_ENGAGEMENT_NAME]` + +`--minimum-severity value, --ms value` +* Dictates the lowest level severity that should be imported. Valid values are: Critical, High, Medium, Low, Info. (default: "Info") `[$DD_IMPORTER_MINIMUM_SEVERITY]` + +`--product-name value, -p value` +* The name of the Product to import findings into. `[$DD_IMPORTER_PRODUCT_NAME]` + +`--product-type-name value, --pt value` +* The name of the Product Type to import findings into. `[$DD_IMPORTER_PRODUCT_TYPE_NAME]` + +`--report-path value, -r value` +* The path to the report to import. (required). `[$DD_IMPORTER_REPORT_PATH]` + +`--scan-type value, -s value` +* The scan type of the tool (required). `[$DD_IMPORTER_SCAN_TYPE]` + +`--tag value, -t value [ --tag value, -t value ]` +* Any tags to be applied to the Test object `[$DD_IMPORTER_TAGS]` + +`--test-name value, --tn value` +* The name of the Test to import findings into - Defaults to the name of the scan type. `[$DD_IMPORTER_TEST_NAME]` + +`--test-version value, -V value` +* The version of the test. `[$DD_IMPORTER_TEST_VERSION]` + +`--verified, -v` +* Dictates whether Findings should be set to Verified on import. A value of True forces Findings to Verified. If no value is set, Verified status will instead rely on the incoming report file. `[$DD_IMPORTER_VERIFIED]` + +**Settings:** + +`--config value, -c value` +* The path to the TOML configuration file is used to set values for the options. If the option is set in the configuration file and the CLI, the option will take the value set from the CLI. `[$DD_IMPORTER_CONFIG_FILE]` +`--defectdojo-url value, -u value` +* The URL of the DefectDojo instance to import findings into. (required). `[$DD_IMPORTER_DEFECTDOJO_URL]` +* --insecure-tls, --no-tls ignore TLS validation errors when connecting to the provided DefectDojo instance. Most users should not enable this flag. (default: false) `[$DD_IMPORTER_INSECURE_TLS]` + +### Reimport + +Use the `reimport` command to extend an existing Test with Findings from a new report in one of two ways: + +By ID: +- Create a Product (or use an existing product) +- Create an Engagement inside the product +- Import a scan report and find the id of the Test +- Provide this in the test-id parameter + +By Names: +- Create a Product (or use an existing product) +- Create an Engagement inside the product +- Import a report which will create a Test +- Provide product-name +- Provide engagement-name +- Optional: Provide test-name + +In this scenario DefectDojo will look up the Test by the provided details. If no test-name is provided, the latest test inside the engagement will be chosen based on scan-type. + +When using names you can let the importer automatically create Engagements, Products and Product-types by using `auto-create-context=true`. +You can use `deduplication-on-engagement` to restrict deduplication for imported Findings to the newly created Engagement. + +#### Usage + +``` +universal-importer [global options] reimport [optional flags] + or: universal-importer [global options] reimport --config ./config-file-path + or: universal-importer reimport [-h | --help] + or: universal-importer reimport example [subcommand options] + or: universal-importer reimport example [-h | --help] + +>> The API token must be set in the environment variable `DD_IMPORTER_DOJO_API_TOKEN`. +``` + +#### **Reimport Example:** + +``` +universal-importer reimport \ +--defectdojo-url "https://YOUR_INSTANCE.cloud.defectdojo.com/" \ +--scan-type "Nancy Scan" \ +--report-path "./examples/nancy_findings.json" \ +--test-id 11 \ +--verified \ +--active \ +--minimum-severity "info" \ +--tag "dev" --tag "tools" --tag "nancy" --tag "test-dev" \ +--test-version "1.0" \ +--auto-create-context +``` + +#### Commands + +``` +example, x Shows an example of required and optional flags for reimport operation +``` + +#### Options + +`--active, -a` +* Dictates whether Findings should be forced to Active or Inactive on import. A value of True forces Findings to Active, while a value of False forces all Findings to Inactive. If no value is set, Active status will instead rely on the incoming report file. `[$DD_IMPORTER_ACTIVE]` + +`--api-scan-configuration value, --asc value` +* The ID of the API Scan Configuration object to use when importing or reimporting. (default: 0) `[$DD_IMPORTER_API_SCAN_CONFIGURATION]` + +`--apply-tags-endpoints, --te` +* If set to true, the tags (from the option --tag) will be applied to the endpoints (default: false) `[$DD_IMPORTER_APPLY_TAGS_ENDPOINTS]` + +`--apply-tags-findings, --tf` +* If set to true, the tags (from the option --tag) will be applied to the findings (default: false) `[$DD_IMPORTER_APPLY_TAGS_FINDINGS]` + +`--auto-create-context, --acc` +* If set to true, the importer automatically creates Engagements, Products, and Product_Types (default: false) `[$DD_IMPORTER_AUTO_CREATE_CONTEXT]` + +`--close-old-findings, --cof` +* If True, old Findings no longer present in the report will be Closed as Mitigated when importing. If Service has been set, only the Findings for this Service will be closed. [$DD_IMPORTER_CLOSE_OLD_FINDINGS] + +`--close-old-findings-product-scope, --cofps` +* Select if --close-old-findings applies to **all** Findings of the same type in the Product. By default, this is set to false, meaning that only old Findings of the same type in the Engagement are in scope (and will be closed by Close Old Findings). [$DD_IMPORTER_CLOSE_OLD_FINDINGS_PRODUCT_SCOPE] + +`--deduplication-on-engagement, --doe` +* If set to true, the importer restricts deduplication for imported findings to the newly created Engagement. (default: false) `[$DD_IMPORTER_DEDUPLICATION_ON_ENGAGEMENT]` + +`--engagement-name value, -e value` +* The name of the Engagement to import findings into. `[$DD_IMPORTER_ENGAGEMENT_NAME]` + +`--minimum-severity value, --ms value` +* Dictates the lowest level severity that should be imported. Valid values are: Critical, High, Medium, Low, Info. (default: "Info") `[$DD_IMPORTER_MINIMUM_SEVERITY]` + +`--product-name value, -p value` +* The name of the Product to import findings into. `[$DD_IMPORTER_PRODUCT_NAME]` + +`--product-type-name value, --pt value` +* The name of the Product Type to import findings into. `[$DD_IMPORTER_PRODUCT_TYPE_NAME]` + +`--report-path value, -r value` +* The path to the report to import. (required). `[$DD_IMPORTER_REPORT_PATH]` + +`--scan-type value, -s value` +* The scan type of the tool (required). `[$DD_IMPORTER_SCAN_TYPE]` + +`--tag value, -t value [ --tag value, -t value ]` +* Any tags to be applied to the Test object `[$DD_IMPORTER_TAGS]` + +`--test-id value, --ti value` +* The ID of the Test to reimport findings into. (default: 0) `[$DD_IMPORTER_TEST_ID]` + +`--test-name value, --tn value` +* The name of the Test to import findings into - Defaults to the name of the scan type. `[$DD_IMPORTER_TEST_NAME]` + +`--test-version value, -V value` +* The version of the test. `[$DD_IMPORTER_TEST_VERSION]` + +`--verified, -v` +* Dictates whether Findings should be set to Verified on import. A value of True forces Findings to Verified. If no value is set, Verified status will instead rely on the incoming report file. (default: unset) `[$DD_IMPORTER_VERIFIED]` + +**Settings:** + +`--config value, -c value` +* The path to the TOML configuration file is used to set values for the options. If the option is set in the configuration file and the CLI, the option will take the value set from the CLI. `[$DD_IMPORTER_CONFIG_FILE]` + +`--defectdojo-url value, -u value` +* The URL of the DefectDojo instance to import findings into. (required). `[$DD_IMPORTER_DEFECTDOJO_URL]` + +`--insecure-tls, --no-tls` +* ignore TLS validation errors when connecting to the provided DefectDojo instance. Most users should not enable this flag. (default: false) `[$DD_IMPORTER_INSECURE_TLS]` + +### Interactive +Interactive mode allows you to configure import and reimport process, step-by-step. + +#### Usage + +``` +universal-importer interactive + or: universal-importer interactive [--skip-intro] [--no-full-screen] [--log-path] + or: universal-importer interactive [-h | --help] +``` + +#### Options + +`--skip-intro ` +* Skip the intro screen (default: false) + +`--no-full-screen` +* Disable full screen mode (default: false) +`--log-path value` +* Path to the log file +`--help, -h` +* show help + + +## Troubleshooting + +If you encounter any issues with these tools, please check the following: +- Ensure you're using the correct binary for your operating system and CPU architecture. +- Verify that the API key is set correctly in your environment variables. +- Check that the DefectDojo URL is correct and accessible. +- When importing, confirm that the report file exists and is in the supported format for the specified scan type. You can review the supported scanners for DefectDojo on our [supported tools list](/supported_tools). diff --git a/docs/content/en/working_with_findings/finding_deduplication/deduplication_algorithms.md b/docs/content/en/working_with_findings/finding_deduplication/deduplication_algorithms.md index 5b5d56ca85d..2a9df3ce190 100644 --- a/docs/content/en/working_with_findings/finding_deduplication/deduplication_algorithms.md +++ b/docs/content/en/working_with_findings/finding_deduplication/deduplication_algorithms.md @@ -1,63 +1,63 @@ ---- -title: "Deduplication Algorithms" -description: "How DefectDojo identifies duplicates: Unique ID, Hash Code, Unique ID or Hash Code, Legacy" -weight: 3 ---- - -## Overview - -DefectDojo supports four deduplication algorithms that can be selected per parser (test type): - -- **Unique ID From Tool**: Uses the scanner-provided unique identifier. -- **Hash Code**: Uses a configured set of fields to compute a hash. -- **Unique ID From Tool or Hash Code**: Prefer the tool’s unique ID; fall back to hash when no matching unique ID is found. -- **Legacy**: Historical algorithm with multiple conditions; only available in the Open Source version. - -Algorithm selection per parser is controlled by `DEDUPLICATION_ALGORITHM_PER_PARSER` (see the [Open-Source tuning page](/en/working_with_findings/finding_deduplication/deduplication_tuning_os/) for configuration details). - -## How endpoints are assessed per algorithm - -Endpoints can influence deduplication in different ways depending on the algorithm and configuration. - -### Unique ID From Tool - -- Deduplication uses `unique_id_from_tool` (or `vuln_id_from_tool`). -- **Endpoints are ignored** for duplicate matching. -- A finding’s hash may still be calculated for other features, but it does not affect deduplication under this algorithm. - -### Hash Code - -- Deduplication uses a hash computed from fields specified by `HASHCODE_FIELDS_PER_SCANNER` for the given parser. -- The hash also includes fields from `HASH_CODE_FIELDS_ALWAYS` (see Service field section below). -- Endpoints can affect deduplication in two ways: - - If the scanner’s hash fields include `endpoints`, they are part of the hash and must match accordingly. -- If the scanner’s hash fields do not include `endpoints`, optional endpoint-based matching can be enabled via `DEDUPE_ALGO_ENDPOINT_FIELDS` (OS setting). When configured: - - Set it to an empty list `[]` to ignore endpoints entirely. - - Set it to a list of endpoint attributes (e.g. `["host", "port"]`). If at least one endpoint pair between the two findings matches on all listed attributes, deduplication can occur. - -### Unique ID From Tool or Hash Code -A finding is a duplicate with another if they have the same unique_id_from_tool OR the same hash_code. - -The endpoints also have to match for the findings to be considered duplicates, see the Hash Code algorithm above. - -### Legacy (OS only) - -- Deduplication considers multiple attributes including endpoints. -- Behavior differs for static vs dynamic findings: - - **Static findings**: The new finding must contain all endpoints of the original. Extra endpoints on the new finding are allowed. - - **Dynamic findings**: Endpoints must strictly match (commonly by host and port); differing endpoints prevent deduplication. -- If there are no endpoints and both `file_path` and `line` are empty, deduplication typically does not occur. - -## Background processing - -- Dedupe is triggered on import/reimport and during certain updates run via Celery in the background. - -## Service field and its impact - -- By default, `HASH_CODE_FIELDS_ALWAYS = ["service"]`, meaning the `service` associated with a finding is appended to the hash for all scanners. -- Practical implications: - - Two otherwise identical findings with different `service` values will produce different hashes and will not deduplicate under Hash-based paths. - - During import/reimport, the `Service` field entered in the UI can override the parser-provided service. Changing it can change the hash and therefore affect deduplication outcomes. - - If you want service to have no impact on deduplication, configure `HASH_CODE_FIELDS_ALWAYS` accordingly (see the OS tuning page). Removing `service` from the always-included list will stop it from affecting hashes. - -See also: the [Open Source tuning guide](/en/working_with_findings/finding_deduplication/deduplication_tuning_os/) for configuration details and examples. +--- +title: "Deduplication Algorithms" +description: "How DefectDojo identifies duplicates: Unique ID, Hash Code, Unique ID or Hash Code, Legacy" +weight: 3 +--- + +## Overview + +DefectDojo supports four deduplication algorithms that can be selected per parser (test type): + +- **Unique ID From Tool**: Uses the scanner-provided unique identifier. +- **Hash Code**: Uses a configured set of fields to compute a hash. +- **Unique ID From Tool or Hash Code**: Prefer the tool’s unique ID; fall back to hash when no matching unique ID is found. +- **Legacy**: Historical algorithm with multiple conditions; only available in the Open Source version. + +Algorithm selection per parser is controlled by `DEDUPLICATION_ALGORITHM_PER_PARSER` (see the [Open-Source tuning page](/en/working_with_findings/finding_deduplication/deduplication_tuning_os/) for configuration details). + +## How endpoints are assessed per algorithm + +Endpoints can influence deduplication in different ways depending on the algorithm and configuration. + +### Unique ID From Tool + +- Deduplication uses `unique_id_from_tool` (or `vuln_id_from_tool`). +- **Endpoints are ignored** for duplicate matching. +- A finding’s hash may still be calculated for other features, but it does not affect deduplication under this algorithm. + +### Hash Code + +- Deduplication uses a hash computed from fields specified by `HASHCODE_FIELDS_PER_SCANNER` for the given parser. +- The hash also includes fields from `HASH_CODE_FIELDS_ALWAYS` (see Service field section below). +- Endpoints can affect deduplication in two ways: + - If the scanner’s hash fields include `endpoints`, they are part of the hash and must match accordingly. +- If the scanner’s hash fields do not include `endpoints`, optional endpoint-based matching can be enabled via `DEDUPE_ALGO_ENDPOINT_FIELDS` (OS setting). When configured: + - Set it to an empty list `[]` to ignore endpoints entirely. + - Set it to a list of endpoint attributes (e.g. `["host", "port"]`). If at least one endpoint pair between the two findings matches on all listed attributes, deduplication can occur. + +### Unique ID From Tool or Hash Code +A finding is a duplicate with another if they have the same unique_id_from_tool OR the same hash_code. + +The endpoints also have to match for the findings to be considered duplicates, see the Hash Code algorithm above. + +### Legacy (OS only) + +- Deduplication considers multiple attributes including endpoints. +- Behavior differs for static vs dynamic findings: + - **Static findings**: The new finding must contain all endpoints of the original. Extra endpoints on the new finding are allowed. + - **Dynamic findings**: Endpoints must strictly match (commonly by host and port); differing endpoints prevent deduplication. +- If there are no endpoints and both `file_path` and `line` are empty, deduplication typically does not occur. + +## Background processing + +- Dedupe is triggered on import/reimport and during certain updates run via Celery in the background. + +## Service field and its impact + +- By default, `HASH_CODE_FIELDS_ALWAYS = ["service"]`, meaning the `service` associated with a finding is appended to the hash for all scanners. +- Practical implications: + - Two otherwise identical findings with different `service` values will produce different hashes and will not deduplicate under Hash-based paths. + - During import/reimport, the `Service` field entered in the UI can override the parser-provided service. Changing it can change the hash and therefore affect deduplication outcomes. + - If you want service to have no impact on deduplication, configure `HASH_CODE_FIELDS_ALWAYS` accordingly (see the OS tuning page). Removing `service` from the always-included list will stop it from affecting hashes. + +See also: the [Open Source tuning guide](/en/working_with_findings/finding_deduplication/deduplication_tuning_os/) for configuration details and examples. diff --git a/docs/content/en/working_with_findings/finding_deduplication/deduplication_tuning_os.md b/docs/content/en/working_with_findings/finding_deduplication/deduplication_tuning_os.md index 2acf22e0e08..c3d7ea20873 100644 --- a/docs/content/en/working_with_findings/finding_deduplication/deduplication_tuning_os.md +++ b/docs/content/en/working_with_findings/finding_deduplication/deduplication_tuning_os.md @@ -1,147 +1,147 @@ ---- -title: "Deduplication Tuning (Open Source)" -description: "Configure deduplication in DefectDojo Open Source: algorithms, hash fields, endpoints, and service" -weight: 5 ---- - -This page explains how to tune deduplication in the Open Source (OS) edition of DefectDojo. For a visual, feature-rich tuning UI, see the Pro documentation. The OS edition uses settings files and environment variables. - -See also: [Configuration](/en/open_source/installation/configuration) for details on environment variables and `local_settings.py` overrides. - -## What you can configure - -- **Algorithm per parser**: Choose one of Unique ID From Tool, Hash Code, Unique ID From Tool or Hash Code, or Legacy (OS only). -- **Hash fields per scanner**: Decide which fields contribute to the hash for each parser. -- **Allow null CWE**: Control whether a missing/zero CWE is acceptable when hashing. -- **Endpoint consideration**: Optionally use endpoints for deduplication when they’re not part of the hash. -- **Always-included fields**: Add fields (e.g., `service`) to all hashes regardless of per-scanner settings. - -## Key settings (defaults shown) - -All defaults are defined in `dojo/settings/settings.dist.py`. Override via environment or `local_settings.py`. - -### Algorithm per parser - -- Setting: `DEDUPLICATION_ALGORITHM_PER_PARSER` -- Values per parser: one of `unique_id_from_tool`, `hash_code`, `unique_id_from_tool_or_hash_code`, `legacy`. -- Example (env variable JSON string): - -```bash -DD_DEDUPLICATION_ALGORITHM_PER_PARSER='{"Trivy Scan": "hash_code", "Veracode Scan": "unique_id_from_tool_or_hash_code"}' -``` - -### Hash fields per scanner - -- Setting: `HASHCODE_FIELDS_PER_SCANNER` -- Example default for Trivy in OS: - -```startLine:endLine:dojo/settings/settings.dist.py -1318:1321:dojo/settings/settings.dist.py - "Trivy Operator Scan": ["title", "severity", "vulnerability_ids", "description"], - "Trivy Scan": ["title", "severity", "vulnerability_ids", "cwe", "description"], - "TFSec Scan": ["severity", "vuln_id_from_tool", "file_path", "line"], - "Snyk Scan": ["vuln_id_from_tool", "file_path", "component_name", "component_version"], -``` - -- Override example (env variable JSON string): - -```bash -DD_HASHCODE_FIELDS_PER_SCANNER='{"ZAP Scan":["title","cwe","severity"],"Trivy Scan":["title","severity","vulnerability_ids","description"]}' -``` - -### Allow null CWE per scanner - -- Setting: `HASHCODE_ALLOWS_NULL_CWE` -- Controls per parser whether a null/zero CWE is acceptable in hashing. If False and the finding has `cwe = 0`, the hash falls back to the legacy computation for that finding. - -### Always-included fields in hash - -- Setting: `HASH_CODE_FIELDS_ALWAYS` -- Default: `["service"]` -- Impact: Appended to the hash for every scanner. Removing `service` here stops it from affecting hashes across the board. - -```startLine:endLine:dojo/settings/settings.dist.py -1464:1466:dojo/settings/settings.dist.py -# Adding fields to the hash_code calculation regardless of the previous settings -HASH_CODE_FIELDS_ALWAYS = ["service"] -``` - -### Optional endpoint-based dedupe - -- Setting: `DEDUPE_ALGO_ENDPOINT_FIELDS` -- Default: `["host", "path"]` -- Purpose: If endpoints are not part of the hash fields, you can still require a minimal endpoint match to deduplicate. If the list is empty `[]`, endpoints are ignored on the dedupe path. - -```startLine:endLine:dojo/settings/settings.dist.py -1491:1499:dojo/settings/settings.dist.py -# Allows to deduplicate with endpoints if endpoints is not included in the hashcode. -# Possible values are: scheme, host, port, path, query, fragment, userinfo, and user. -# If a finding has more than one endpoint, only one endpoint pair must match to mark the finding as duplicate. -DEDUPE_ALGO_ENDPOINT_FIELDS = ["host", "path"] -``` - -## Endpoints: how to tune - -Endpoints can affect deduplication via two mechanisms: - -1) Include `endpoints` in `HASHCODE_FIELDS_PER_SCANNER` for a parser. Then endpoints are part of the hash and must match exactly according to the parser’s hashing rules. -2) If endpoints are not in the hash fields, use `DEDUPLE_ALGO_ENDPOINT_FIELDS` to specify attributes to compare. Examples: - - `[]`: endpoints are ignored for dedupe. - - `["host"]`: findings dedupe if any endpoint pair matches by host. - - `["host", "port"]`: findings dedupe if any endpoint pair matches by host AND port. - -Notes: - -- For Legacy algorithm, static vs dynamic findings have different endpoint matching rules (see the algorithms page). The `DEDUPLE_ALGO_ENDPOINT_FIELDS` setting applies to the hash-code path, not the Legacy algorithm’s intrinsic logic. -- For `unique_id_from_tool` (ID-based) matching, endpoints are ignored for the dedupe decision. - -## Service field: dedupe and reimport - -- With default `HASH_CODE_FIELDS_ALWAYS = ["service"]`, the `service` field is appended to the hash. Two otherwise equal findings with different `service` values will not dedupe on hash-based paths. -- During import via UI/API, the `Service` input can override the parser-provided service. Changing it changes the hash and can alter dedupe behavior and reimport matching. -- If you want dedupe independent of service, remove `service` from `HASH_CODE_FIELDS_ALWAYS` or leave the `Service` field empty during import. - -## After changing deduplication settings - -- Changes to dedupe configuration (e.g., `HASHCODE_FIELDS_PER_SCANNER`, `HASH_CODE_FIELDS_ALWAYS`, `DEDUPLICATION_ALGORITHM_PER_PARSER`) are not applied retroactively automatically. To re-evaluate existing findings you must run the management command below. - -Run inside the uwsgi container. Example (hash codes only, no dedupe): - -```bash -docker compose exec uwsgi /bin/bash -c "python manage.py dedupe --hash_code_only" -``` - -Help/usage: - -options: - --parser PARSER List of parsers for which hash_code needs recomputing - (defaults to all parsers) - --hash_code_only Only compute hash codes - --dedupe_only Only run deduplication - --dedupe_sync Run dedupe in the foreground, default false -``` - -If you submit dedupe to Celery (without `--dedupe_sync`), allow time for tasks to complete before evaluating results. - -## Where to configure - -- Prefer environment variables in deployments. For local development or advanced overrides, use `local_settings.py`. -- See `configuration.md` for details on how to set environment variables and configure local overrides. - -### Troubleshooting - -To help troubleshooting deduplication use the following tools: - -- Observe log out in the `dojo.specific-loggers.deduplication` category. This is a class independant logger that outputs details about the deduplication process and settings when processing findings. -- Observe the `unique_id_from_tool` and `hash_code` values by hovering over the `ID` field or `Status` column: - -![Unique ID from Tool and Hash Code on the View Finding page](images/hash_code_id_field.png) - -![Unique ID from Tool and Hash Code on the Finding List Status Column](images/hash_code_status_column.png) - -## Related documentation - -- [Deduplication Algorithms](deduplication_algorithms): conceptual overview and endpoint behavior. -- [Avoiding duplicates via reimport](avoiding_duplicates_via_reimport). - - +--- +title: "Deduplication Tuning (Open Source)" +description: "Configure deduplication in DefectDojo Open Source: algorithms, hash fields, endpoints, and service" +weight: 5 +--- + +This page explains how to tune deduplication in the Open Source (OS) edition of DefectDojo. For a visual, feature-rich tuning UI, see the Pro documentation. The OS edition uses settings files and environment variables. + +See also: [Configuration](/en/open_source/installation/configuration) for details on environment variables and `local_settings.py` overrides. + +## What you can configure + +- **Algorithm per parser**: Choose one of Unique ID From Tool, Hash Code, Unique ID From Tool or Hash Code, or Legacy (OS only). +- **Hash fields per scanner**: Decide which fields contribute to the hash for each parser. +- **Allow null CWE**: Control whether a missing/zero CWE is acceptable when hashing. +- **Endpoint consideration**: Optionally use endpoints for deduplication when they’re not part of the hash. +- **Always-included fields**: Add fields (e.g., `service`) to all hashes regardless of per-scanner settings. + +## Key settings (defaults shown) + +All defaults are defined in `dojo/settings/settings.dist.py`. Override via environment or `local_settings.py`. + +### Algorithm per parser + +- Setting: `DEDUPLICATION_ALGORITHM_PER_PARSER` +- Values per parser: one of `unique_id_from_tool`, `hash_code`, `unique_id_from_tool_or_hash_code`, `legacy`. +- Example (env variable JSON string): + +```bash +DD_DEDUPLICATION_ALGORITHM_PER_PARSER='{"Trivy Scan": "hash_code", "Veracode Scan": "unique_id_from_tool_or_hash_code"}' +``` + +### Hash fields per scanner + +- Setting: `HASHCODE_FIELDS_PER_SCANNER` +- Example default for Trivy in OS: + +```startLine:endLine:dojo/settings/settings.dist.py +1318:1321:dojo/settings/settings.dist.py + "Trivy Operator Scan": ["title", "severity", "vulnerability_ids", "description"], + "Trivy Scan": ["title", "severity", "vulnerability_ids", "cwe", "description"], + "TFSec Scan": ["severity", "vuln_id_from_tool", "file_path", "line"], + "Snyk Scan": ["vuln_id_from_tool", "file_path", "component_name", "component_version"], +``` + +- Override example (env variable JSON string): + +```bash +DD_HASHCODE_FIELDS_PER_SCANNER='{"ZAP Scan":["title","cwe","severity"],"Trivy Scan":["title","severity","vulnerability_ids","description"]}' +``` + +### Allow null CWE per scanner + +- Setting: `HASHCODE_ALLOWS_NULL_CWE` +- Controls per parser whether a null/zero CWE is acceptable in hashing. If False and the finding has `cwe = 0`, the hash falls back to the legacy computation for that finding. + +### Always-included fields in hash + +- Setting: `HASH_CODE_FIELDS_ALWAYS` +- Default: `["service"]` +- Impact: Appended to the hash for every scanner. Removing `service` here stops it from affecting hashes across the board. + +```startLine:endLine:dojo/settings/settings.dist.py +1464:1466:dojo/settings/settings.dist.py +# Adding fields to the hash_code calculation regardless of the previous settings +HASH_CODE_FIELDS_ALWAYS = ["service"] +``` + +### Optional endpoint-based dedupe + +- Setting: `DEDUPE_ALGO_ENDPOINT_FIELDS` +- Default: `["host", "path"]` +- Purpose: If endpoints are not part of the hash fields, you can still require a minimal endpoint match to deduplicate. If the list is empty `[]`, endpoints are ignored on the dedupe path. + +```startLine:endLine:dojo/settings/settings.dist.py +1491:1499:dojo/settings/settings.dist.py +# Allows to deduplicate with endpoints if endpoints is not included in the hashcode. +# Possible values are: scheme, host, port, path, query, fragment, userinfo, and user. +# If a finding has more than one endpoint, only one endpoint pair must match to mark the finding as duplicate. +DEDUPE_ALGO_ENDPOINT_FIELDS = ["host", "path"] +``` + +## Endpoints: how to tune + +Endpoints can affect deduplication via two mechanisms: + +1) Include `endpoints` in `HASHCODE_FIELDS_PER_SCANNER` for a parser. Then endpoints are part of the hash and must match exactly according to the parser’s hashing rules. +2) If endpoints are not in the hash fields, use `DEDUPLE_ALGO_ENDPOINT_FIELDS` to specify attributes to compare. Examples: + - `[]`: endpoints are ignored for dedupe. + - `["host"]`: findings dedupe if any endpoint pair matches by host. + - `["host", "port"]`: findings dedupe if any endpoint pair matches by host AND port. + +Notes: + +- For Legacy algorithm, static vs dynamic findings have different endpoint matching rules (see the algorithms page). The `DEDUPLE_ALGO_ENDPOINT_FIELDS` setting applies to the hash-code path, not the Legacy algorithm’s intrinsic logic. +- For `unique_id_from_tool` (ID-based) matching, endpoints are ignored for the dedupe decision. + +## Service field: dedupe and reimport + +- With default `HASH_CODE_FIELDS_ALWAYS = ["service"]`, the `service` field is appended to the hash. Two otherwise equal findings with different `service` values will not dedupe on hash-based paths. +- During import via UI/API, the `Service` input can override the parser-provided service. Changing it changes the hash and can alter dedupe behavior and reimport matching. +- If you want dedupe independent of service, remove `service` from `HASH_CODE_FIELDS_ALWAYS` or leave the `Service` field empty during import. + +## After changing deduplication settings + +- Changes to dedupe configuration (e.g., `HASHCODE_FIELDS_PER_SCANNER`, `HASH_CODE_FIELDS_ALWAYS`, `DEDUPLICATION_ALGORITHM_PER_PARSER`) are not applied retroactively automatically. To re-evaluate existing findings you must run the management command below. + +Run inside the uwsgi container. Example (hash codes only, no dedupe): + +```bash +docker compose exec uwsgi /bin/bash -c "python manage.py dedupe --hash_code_only" +``` + +Help/usage: + +options: + --parser PARSER List of parsers for which hash_code needs recomputing + (defaults to all parsers) + --hash_code_only Only compute hash codes + --dedupe_only Only run deduplication + --dedupe_sync Run dedupe in the foreground, default false +``` + +If you submit dedupe to Celery (without `--dedupe_sync`), allow time for tasks to complete before evaluating results. + +## Where to configure + +- Prefer environment variables in deployments. For local development or advanced overrides, use `local_settings.py`. +- See `configuration.md` for details on how to set environment variables and configure local overrides. + +### Troubleshooting + +To help troubleshooting deduplication use the following tools: + +- Observe log out in the `dojo.specific-loggers.deduplication` category. This is a class independant logger that outputs details about the deduplication process and settings when processing findings. +- Observe the `unique_id_from_tool` and `hash_code` values by hovering over the `ID` field or `Status` column: + +![Unique ID from Tool and Hash Code on the View Finding page](images/hash_code_id_field.png) + +![Unique ID from Tool and Hash Code on the Finding List Status Column](images/hash_code_status_column.png) + +## Related documentation + +- [Deduplication Algorithms](deduplication_algorithms): conceptual overview and endpoint behavior. +- [Avoiding duplicates via reimport](avoiding_duplicates_via_reimport). + + diff --git a/docs/content/en/working_with_findings/organizing_engagements_tests/tagging_objects.md b/docs/content/en/working_with_findings/organizing_engagements_tests/tagging_objects.md index d551f07de0b..048454fb4c7 100644 --- a/docs/content/en/working_with_findings/organizing_engagements_tests/tagging_objects.md +++ b/docs/content/en/working_with_findings/organizing_engagements_tests/tagging_objects.md @@ -1,178 +1,178 @@ ---- -title: "Tags" -description: "Use Tags to create a new slice of your data model" -draft: false -weight: 2 -exclude_search: false ---- - -Tags are ideal for grouping objects in a manner that can be filtered out into smaller, more digestible chunks. They can be used to denote status, or to create custom sets of Product Type, Products, Engagements or Findings across the data model. - -In DefectDojo, tags are a first class citizen and are recognized as the facilitators -of organization within each level of the [data model](../product_hierarchy). - -Here is an example with a Product with two tags and four findings each with a single tag: - -![High level example of usage with tags](images/tags-high-level-example.png) - -### Tag Formats - -Tags can be formatted in any of the following ways: -- StringWithNoSpaces -- string-with-hyphens -- string_with_underscores -- colons:acceptable - -## Tag Management (Pro UI) - -### Adding and Removing - -Tags can be managed in the following ways: - -1. **Creating or Editing new objects** - - When a new object is created or edited through the UI or API, there is a field for specifying - the tags to be set on a given object. - - ![tag](images/tags_product.png) - -2. **When Importing/Reimporting Findings** - - Tags are available on the Import/Reimport form, both in the UI and via the API. When this form is submitted, the **Test** will be tagged with `[tag]` and `[daily-import]`. If "Apply Tags to Findings" or "Apply Tags to Endpoints" is selected, those objects will also be tagged. Tags provide an opportunity to append automation run details and tool information that may not be captured in the Test or Finding object directly. - - ![tag](images/tags_importscan.png) - -3. **Via Bulk Edit** - - When many Findings are selected from a table, you can use the Bulk Edit menu to change the associated Tags for many Findings simultaneously. Note that this will replace all Finding-level Tags with the Tags specified; existing Finding Tags will be overwritten. - - ![bulk editing findings](images/Bulk_Editing_Findings.png) - - For more information, see our guide to [Bulk Editing Findings](/en/working_with_findings/findings_workflows/editing_findings/#bulk-edit-findings). - - -## Tag Management (Classic UI / OpenSource) - -### Adding and Removing - -Tags can be managed in the following ways: - -1. Creating or Editing new objects - - When a new object is created or edited through the UI or API, there is a field for specifying - the tags to be set on a given object. This field is a multiselect field that also has - auto completion to make searching and adding existing tags a breeze. Here is what the field - looks like on the Product from the screenshot in the previous section: - - ![Tag management on an object](images/tags-management-on-object.png) - -2. Import and Reimport - - Tags can also be applied to a given test at the time of import or reimport. This is a very - handy use case when importing via the API with automation as it provides an opportunity to - append automation run details and tool information that may not be captured in the test - or finding object directly. - - The field looks and behaves exactly as it does on a given object - -3. Bulk Edit Menu (Findings only) - - When needing to update many Findings with the same set of tags, the bulk edit menu can be - used to ease the burden. - - In the following example, lets say I want to update the tags of the two findings with the tag "tag-group-alpha" to be a new tag list like this ["tag-group-charlie", "tag-group-delta"]. - First I would select the tags to be updated: - - ![Select findings for bulk edit tag update](images/tags-select-findings-for-bulk-edit.png) - - Once a finding is selected, a new button appears with the name "Bulk Edit". Clicking this button - produces a dropdown menu with many options, but the focus is just on tags for now. Update the - field to have the desired tag list as follows, and click submit - - ![Apply changes for bulk edit tag update](images/tags-bulk-edit-submit.png) - - The tags on the selected Findings will be updated to whatever was specified in the tags field - within the bulk edit menu - - ![Completed bulk edit tag update](images/tags-bulk-edit-complete.png) - -## Tag Inheritance - -**Pro UI note: though Tag inheritance can be configured using the Pro UI, inherited Tags currently can only be accessed and filtered for through the Classic UI or the API.** - -When Tag Inheritance is enabled, tags applied to a given Product will automatically be applied to all objects under Products in the [Product Hierarchy](/en/working_with_findings/organizing_engagements_tests/Product_hierarchy). - -### Configuration - -Tag Inheritance can be enabled at the following scope levels: -- Global Scope - - Every Product system wide will begin applying tags to all children objects (Engagements, Tests and Findings) - - This is set within the System Settings -- Product Scope - - Only the selected Product will begin applying tags to all children objects (Engagements, Tests and Findings) - - This is set at the Product creation/edit page - -### Behaviors - -When Tag Inheritance is enabled, standard Tags can be added to and removed from objects in the standard way. -However inherited tags cannot be removed from a child object without removing them from the parent object -See the following example of adding a tag "test_only_tag" to the Test object and a tag "engagement_only_tag" to the Engagement. - -![Example of inherited tags](images/tags-inherit-exmaple.png) - -When updates are made to the tag list on a Product, the same changes are made to all objects within the Product asynchronously. The duration of this task directly correlates to the number the objects contained within a finding. - -**Open-Source:** If Tag changes are not observed within a reasonable time period, consult the celery worker logs to identify where any problems might have arisen. - - -### Filtering for Tags (Classic UI) - -Tags can be filtered in many ways through both the UI and the API. For example, here is a snippet -of the Finding filters: - -![Snippet of the finding filters](images/tags-finding-filter-snippet.png) - -There are ten fields related to tags: - - - Tags: filter on any tags that are attached to a given Finding - - Examples: - - Finding will be returned - - Finding Tags: ["A", "B", "C"] - - Filter Query: "B" - - Finding Will *not* be returned - - Finding Tags: ["A", "B", "C"] - - Filter Query: "F" - - Not Tags: filter on any tags that are *not* attached to a given Finding - - Examples: - - Finding will be returned - - Finding Tags: ["A", "B", "C"] - - Filter Query: "F" - - Finding Will *not* be returned - - Finding Tags: ["A", "B", "C"] - - Filter Query: "B" - - Tag Name Contains: filter on any tags that contain part or all of the query in the given Finding - - Examples: - - Finding will be returned - - Finding Tags: ["Alpha", "Beta", "Charlie"] - - Filter Query: "et" (part of "Beta") - - Finding Will *not* be returned - - Finding Tags: ["Alpha", "Beta", "Charlie"] - - Filter Query: "meg" (part of "Omega") - - Not Tags: filter on any tags that do *not* contain part or all of the query in the given Finding - - Examples: - - Finding will be returned - - Finding Tags: ["Alpha", "Beta", "Charlie"] - - Filter Query: "meg" (part of "Omega") - - Finding Will *not* be returned - - Finding Tags: ["Alpha", "Beta", "Charlie"] - - Filter Query: "et" (part of "Beta") - -For the other six tag filters, they follow the same rules as "Tags" and "Not Tags" as above, -but at different levels in the data model: - - - Tags (Test): filter on any tags that are attached to the Test of a given Finding - - Not Tags (Test): filter on any tags that are *not* attached to the Test of a given Finding - - Tags (Engagement): filter on any tags that are attached to the Engagement of a given Finding - - Not Tags (Engagement): filter on any tags that are *not* attached to the Engagement of a given Finding - - Tags (Product): filter on any tags that are attached to the Product of a given Finding - - Not Tags (Product): filter on any tags that are *not* attached to the Product of a given Finding +--- +title: "Tags" +description: "Use Tags to create a new slice of your data model" +draft: false +weight: 2 +exclude_search: false +--- + +Tags are ideal for grouping objects in a manner that can be filtered out into smaller, more digestible chunks. They can be used to denote status, or to create custom sets of Product Type, Products, Engagements or Findings across the data model. + +In DefectDojo, tags are a first class citizen and are recognized as the facilitators +of organization within each level of the [data model](../product_hierarchy). + +Here is an example with a Product with two tags and four findings each with a single tag: + +![High level example of usage with tags](images/tags-high-level-example.png) + +### Tag Formats + +Tags can be formatted in any of the following ways: +- StringWithNoSpaces +- string-with-hyphens +- string_with_underscores +- colons:acceptable + +## Tag Management (Pro UI) + +### Adding and Removing + +Tags can be managed in the following ways: + +1. **Creating or Editing new objects** + + When a new object is created or edited through the UI or API, there is a field for specifying + the tags to be set on a given object. + + ![tag](images/tags_product.png) + +2. **When Importing/Reimporting Findings** + + Tags are available on the Import/Reimport form, both in the UI and via the API. When this form is submitted, the **Test** will be tagged with `[tag]` and `[daily-import]`. If "Apply Tags to Findings" or "Apply Tags to Endpoints" is selected, those objects will also be tagged. Tags provide an opportunity to append automation run details and tool information that may not be captured in the Test or Finding object directly. + + ![tag](images/tags_importscan.png) + +3. **Via Bulk Edit** + + When many Findings are selected from a table, you can use the Bulk Edit menu to change the associated Tags for many Findings simultaneously. Note that this will replace all Finding-level Tags with the Tags specified; existing Finding Tags will be overwritten. + + ![bulk editing findings](images/Bulk_Editing_Findings.png) + + For more information, see our guide to [Bulk Editing Findings](/en/working_with_findings/findings_workflows/editing_findings/#bulk-edit-findings). + + +## Tag Management (Classic UI / OpenSource) + +### Adding and Removing + +Tags can be managed in the following ways: + +1. Creating or Editing new objects + + When a new object is created or edited through the UI or API, there is a field for specifying + the tags to be set on a given object. This field is a multiselect field that also has + auto completion to make searching and adding existing tags a breeze. Here is what the field + looks like on the Product from the screenshot in the previous section: + + ![Tag management on an object](images/tags-management-on-object.png) + +2. Import and Reimport + + Tags can also be applied to a given test at the time of import or reimport. This is a very + handy use case when importing via the API with automation as it provides an opportunity to + append automation run details and tool information that may not be captured in the test + or finding object directly. + + The field looks and behaves exactly as it does on a given object + +3. Bulk Edit Menu (Findings only) + + When needing to update many Findings with the same set of tags, the bulk edit menu can be + used to ease the burden. + + In the following example, lets say I want to update the tags of the two findings with the tag "tag-group-alpha" to be a new tag list like this ["tag-group-charlie", "tag-group-delta"]. + First I would select the tags to be updated: + + ![Select findings for bulk edit tag update](images/tags-select-findings-for-bulk-edit.png) + + Once a finding is selected, a new button appears with the name "Bulk Edit". Clicking this button + produces a dropdown menu with many options, but the focus is just on tags for now. Update the + field to have the desired tag list as follows, and click submit + + ![Apply changes for bulk edit tag update](images/tags-bulk-edit-submit.png) + + The tags on the selected Findings will be updated to whatever was specified in the tags field + within the bulk edit menu + + ![Completed bulk edit tag update](images/tags-bulk-edit-complete.png) + +## Tag Inheritance + +**Pro UI note: though Tag inheritance can be configured using the Pro UI, inherited Tags currently can only be accessed and filtered for through the Classic UI or the API.** + +When Tag Inheritance is enabled, tags applied to a given Product will automatically be applied to all objects under Products in the [Product Hierarchy](/en/working_with_findings/organizing_engagements_tests/Product_hierarchy). + +### Configuration + +Tag Inheritance can be enabled at the following scope levels: +- Global Scope + - Every Product system wide will begin applying tags to all children objects (Engagements, Tests and Findings) + - This is set within the System Settings +- Product Scope + - Only the selected Product will begin applying tags to all children objects (Engagements, Tests and Findings) + - This is set at the Product creation/edit page + +### Behaviors + +When Tag Inheritance is enabled, standard Tags can be added to and removed from objects in the standard way. +However inherited tags cannot be removed from a child object without removing them from the parent object +See the following example of adding a tag "test_only_tag" to the Test object and a tag "engagement_only_tag" to the Engagement. + +![Example of inherited tags](images/tags-inherit-exmaple.png) + +When updates are made to the tag list on a Product, the same changes are made to all objects within the Product asynchronously. The duration of this task directly correlates to the number the objects contained within a finding. + +**Open-Source:** If Tag changes are not observed within a reasonable time period, consult the celery worker logs to identify where any problems might have arisen. + + +### Filtering for Tags (Classic UI) + +Tags can be filtered in many ways through both the UI and the API. For example, here is a snippet +of the Finding filters: + +![Snippet of the finding filters](images/tags-finding-filter-snippet.png) + +There are ten fields related to tags: + + - Tags: filter on any tags that are attached to a given Finding + - Examples: + - Finding will be returned + - Finding Tags: ["A", "B", "C"] + - Filter Query: "B" + - Finding Will *not* be returned + - Finding Tags: ["A", "B", "C"] + - Filter Query: "F" + - Not Tags: filter on any tags that are *not* attached to a given Finding + - Examples: + - Finding will be returned + - Finding Tags: ["A", "B", "C"] + - Filter Query: "F" + - Finding Will *not* be returned + - Finding Tags: ["A", "B", "C"] + - Filter Query: "B" + - Tag Name Contains: filter on any tags that contain part or all of the query in the given Finding + - Examples: + - Finding will be returned + - Finding Tags: ["Alpha", "Beta", "Charlie"] + - Filter Query: "et" (part of "Beta") + - Finding Will *not* be returned + - Finding Tags: ["Alpha", "Beta", "Charlie"] + - Filter Query: "meg" (part of "Omega") + - Not Tags: filter on any tags that do *not* contain part or all of the query in the given Finding + - Examples: + - Finding will be returned + - Finding Tags: ["Alpha", "Beta", "Charlie"] + - Filter Query: "meg" (part of "Omega") + - Finding Will *not* be returned + - Finding Tags: ["Alpha", "Beta", "Charlie"] + - Filter Query: "et" (part of "Beta") + +For the other six tag filters, they follow the same rules as "Tags" and "Not Tags" as above, +but at different levels in the data model: + + - Tags (Test): filter on any tags that are attached to the Test of a given Finding + - Not Tags (Test): filter on any tags that are *not* attached to the Test of a given Finding + - Tags (Engagement): filter on any tags that are attached to the Engagement of a given Finding + - Not Tags (Engagement): filter on any tags that are *not* attached to the Engagement of a given Finding + - Tags (Product): filter on any tags that are attached to the Product of a given Finding + - Not Tags (Product): filter on any tags that are *not* attached to the Product of a given Finding diff --git a/dojo/__init__.py b/dojo/__init__.py index 1d5aa3febd8..3788f2085b8 100644 --- a/dojo/__init__.py +++ b/dojo/__init__.py @@ -1,9 +1,9 @@ - - -# This will make sure the app is always imported when -# Django starts so that shared_task will use this app. -from .celery import app as celery_app # noqa: F401 - -__version__ = "2.53.1" -__url__ = "https://github.com/DefectDojo/django-DefectDojo" -__docs__ = "https://documentation.defectdojo.com" + + +# This will make sure the app is always imported when +# Django starts so that shared_task will use this app. +from .celery import app as celery_app # noqa: F401 + +__version__ = "2.53.1" +__url__ = "https://github.com/DefectDojo/django-DefectDojo" +__docs__ = "https://documentation.defectdojo.com" diff --git a/dojo/authorization/authorization.py b/dojo/authorization/authorization.py index 4110b965db1..d67d10c4c61 100644 --- a/dojo/authorization/authorization.py +++ b/dojo/authorization/authorization.py @@ -1,395 +1,395 @@ -from django.core.exceptions import PermissionDenied - -from dojo.authorization.roles_permissions import ( - Permissions, - Roles, - get_global_roles_with_permissions, - get_roles_with_permissions, -) -from dojo.models import ( - App_Analysis, - Cred_Mapping, - Dojo_Group, - Dojo_Group_Member, - Endpoint, - Engagement, - Finding, - Finding_Group, - Languages, - Product, - Product_API_Scan_Configuration, - Product_Group, - Product_Member, - Product_Type, - Product_Type_Group, - Product_Type_Member, - Risk_Acceptance, - Stub_Finding, - Test, -) -from dojo.request_cache import cache_for_request - - -def user_has_configuration_permission(user, permission): - if not user: - return False - - if user.is_anonymous: - return False - - return user.has_perm(permission) - - -def user_has_permission(user, obj, permission): - if user.is_anonymous: - return False - - if user.is_superuser: - return True - - if isinstance(obj, Product_Type | Product): - # Global roles are only relevant for product types, products and their - # dependent objects - if user_has_global_permission(user, permission): - return True - - if isinstance(obj, Product_Type): - # Check if the user has a role for the product type with the requested - # permissions - member = get_product_type_member(user, obj) - if member is not None and role_has_permission( - member.role.id, permission, - ): - return True - # Check if the user is in a group with a role for the product type with - # the requested permissions - for product_type_group in get_product_type_groups(user, obj): - if role_has_permission(product_type_group.role.id, permission): - return True - return False - if ( - isinstance(obj, Product) - and permission.value >= Permissions.Product_View.value - ): - # Products inherit permissions of their product type - if user_has_permission(user, obj.prod_type, permission): - return True - - # Check if the user has a role for the product with the requested - # permissions - member = get_product_member(user, obj) - if member is not None and role_has_permission( - member.role.id, permission, - ): - return True - # Check if the user is in a group with a role for the product with the - # requested permissions - for product_group in get_product_groups(user, obj): - if role_has_permission(product_group.role.id, permission): - return True - return False - if ( - isinstance(obj, Engagement) - and permission in Permissions.get_engagement_permissions() - ): - return user_has_permission(user, obj.product, permission) - if ( - isinstance(obj, Test) - and permission in Permissions.get_test_permissions() - ) or ( - isinstance(obj, Risk_Acceptance) - and permission == Permissions.Risk_Acceptance - ): - if obj.engagement is not None: - return user_has_permission(user, obj.engagement.product, permission) - return user_has_global_permission(user, permission) - if (( - isinstance(obj, Finding | Stub_Finding) - ) and permission in Permissions.get_finding_permissions()) or ( - isinstance(obj, Finding_Group) - and permission in Permissions.get_finding_group_permissions() - ): - return user_has_permission( - user, obj.test.engagement.product, permission, - ) - if ( - isinstance(obj, Endpoint) - and permission in Permissions.get_endpoint_permissions() - ) or ( - isinstance(obj, Languages) - and permission in Permissions.get_language_permissions() - ) or (( - isinstance(obj, App_Analysis) - and permission in Permissions.get_technology_permissions() - ) or ( - isinstance(obj, Product_API_Scan_Configuration) - and permission - in Permissions.get_product_api_scan_configuration_permissions() - )): - return user_has_permission(user, obj.product, permission) - if ( - isinstance(obj, Product_Type_Member) - and permission in Permissions.get_product_type_member_permissions() - ): - if permission == Permissions.Product_Type_Member_Delete: - # Every member is allowed to remove himself - return obj.user == user or user_has_permission( - user, obj.product_type, permission, - ) - return user_has_permission(user, obj.product_type, permission) - if ( - isinstance(obj, Product_Member) - and permission in Permissions.get_product_member_permissions() - ): - if permission == Permissions.Product_Member_Delete: - # Every member is allowed to remove himself - return obj.user == user or user_has_permission( - user, obj.product, permission, - ) - return user_has_permission(user, obj.product, permission) - if ( - isinstance(obj, Product_Type_Group) - and permission in Permissions.get_product_type_group_permissions() - ): - return user_has_permission(user, obj.product_type, permission) - if ( - isinstance(obj, Product_Group) - and permission in Permissions.get_product_group_permissions() - ): - return user_has_permission(user, obj.product, permission) - if ( - isinstance(obj, Dojo_Group) - and permission in Permissions.get_group_permissions() - ): - # Check if the user has a role for the group with the requested - # permissions - group_member = get_group_member(user, obj) - return group_member is not None and role_has_permission( - group_member.role.id, permission, - ) - if ( - isinstance(obj, Dojo_Group_Member) - and permission in Permissions.get_group_member_permissions() - ): - if permission == Permissions.Group_Member_Delete: - # Every user is allowed to remove himself - return obj.user == user or user_has_permission( - user, obj.group, permission, - ) - return user_has_permission(user, obj.group, permission) - if ( - isinstance(obj, Cred_Mapping) - and permission in Permissions.get_credential_permissions() - ): - if obj.product: - return user_has_permission(user, obj.product, permission) - if obj.engagement: - return user_has_permission( - user, obj.engagement.product, permission, - ) - if obj.test: - return user_has_permission( - user, obj.test.engagement.product, permission, - ) - if obj.finding: - return user_has_permission( - user, obj.finding.test.engagement.product, permission, - ) - return None - msg = f"No authorization implemented for class {type(obj).__name__} and permission {permission}" - raise NoAuthorizationImplementedError(msg) - - -def user_has_global_permission(user, permission): - if not user: - return False - - if user.is_anonymous: - return False - - if user.is_superuser: - return True - - if permission == Permissions.Product_Type_Add: - if user_has_configuration_permission(user, "dojo.add_product_type"): - return True - - if ( - hasattr(user, "global_role") - and user.global_role.role is not None - and role_has_global_permission(user.global_role.role.id, permission) - ): - return True - - for group in get_groups(user): - if ( - hasattr(group, "global_role") - and group.global_role.role is not None - and role_has_global_permission( - group.global_role.role.id, permission, - ) - ): - return True - - return False - - -def user_has_configuration_permission_or_403(user, permission): - if not user_has_configuration_permission(user, permission): - raise PermissionDenied - - -def user_has_permission_or_403(user, obj, permission): - if not user_has_permission(user, obj, permission): - raise PermissionDenied - - -def user_has_global_permission_or_403(user, permission): - if not user_has_global_permission(user, permission): - raise PermissionDenied - - -def get_roles_for_permission(permission): - if not Permissions.has_value(permission): - msg = f"Permission {permission} does not exist" - raise PermissionDoesNotExistError(msg) - roles_for_permissions = set() - roles = get_roles_with_permissions() - for role in roles: - permissions = roles.get(role) - if permission in permissions: - roles_for_permissions.add(role) - return roles_for_permissions - - -def role_has_permission(role, permission): - if role is None: - return False - if not Roles.has_value(role): - msg = f"Role {role} does not exist" - raise RoleDoesNotExistError(msg) - roles = get_roles_with_permissions() - permissions = roles.get(role) - if not permissions: - return False - return permission in permissions - - -def role_has_global_permission(role, permission): - if role is None: - return False - if not Roles.has_value(role): - msg = f"Role {role} does not exist" - raise RoleDoesNotExistError(msg) - roles = get_global_roles_with_permissions() - permissions = roles.get(role) - if permissions and permission in permissions: - return True - return role_has_permission(role, permission) - - -class NoAuthorizationImplementedError(Exception): - def __init__(self, message): - self.message = message - - -class PermissionDoesNotExistError(Exception): - def __init__(self, message): - self.message = message - - -class RoleDoesNotExistError(Exception): - def __init__(self, message): - self.message = message - - -def get_product_member(user, product): - return get_product_member_dict(user).get(product.id) - - -@cache_for_request -def get_product_member_dict(user): - pm_dict = {} - for product_member in ( - Product_Member.objects.select_related("product") - .select_related("role") - .filter(user=user) - ): - pm_dict[product_member.product.id] = product_member - return pm_dict - - -def get_product_type_member(user, product_type): - return get_product_type_member_dict(user).get(product_type.id) - - -@cache_for_request -def get_product_type_member_dict(user): - ptm_dict = {} - for product_type_member in ( - Product_Type_Member.objects.select_related("product_type") - .select_related("role") - .filter(user=user) - ): - ptm_dict[product_type_member.product_type.id] = product_type_member - return ptm_dict - - -def get_product_groups(user, product): - return get_product_groups_dict(user).get(product.id, []) - - -@cache_for_request -def get_product_groups_dict(user): - pg_dict = {} - for product_group in ( - Product_Group.objects.select_related("product") - .select_related("role") - .filter(group__users=user) - ): - pgu_list = [] if pg_dict.get(product_group.product.id) is None else pg_dict[product_group.product.id] - pgu_list.append(product_group) - pg_dict[product_group.product.id] = pgu_list - return pg_dict - - -def get_product_type_groups(user, product_type): - return get_product_type_groups_dict(user).get(product_type.id, []) - - -@cache_for_request -def get_product_type_groups_dict(user): - pgt_dict = {} - for product_type_group in ( - Product_Type_Group.objects.select_related("product_type") - .select_related("role") - .filter(group__users=user) - ): - if pgt_dict.get(product_type_group.product_type.id) is None: - pgtu_list = [] - else: - pgtu_list = pgt_dict[product_type_group.product_type.id] - pgtu_list.append(product_type_group) - pgt_dict[product_type_group.product_type.id] = pgtu_list - return pgt_dict - - -@cache_for_request -def get_groups(user): - return Dojo_Group.objects.select_related("global_role").filter(users=user) - - -def get_group_member(user, group): - return get_group_members_dict(user).get(group.id) - - -@cache_for_request -def get_group_members_dict(user): - gu_dict = {} - for group_member in ( - Dojo_Group_Member.objects.select_related("group") - .select_related("role") - .filter(user=user) - ): - gu_dict[group_member.group.id] = group_member - return gu_dict +from django.core.exceptions import PermissionDenied + +from dojo.authorization.roles_permissions import ( + Permissions, + Roles, + get_global_roles_with_permissions, + get_roles_with_permissions, +) +from dojo.models import ( + App_Analysis, + Cred_Mapping, + Dojo_Group, + Dojo_Group_Member, + Endpoint, + Engagement, + Finding, + Finding_Group, + Languages, + Product, + Product_API_Scan_Configuration, + Product_Group, + Product_Member, + Product_Type, + Product_Type_Group, + Product_Type_Member, + Risk_Acceptance, + Stub_Finding, + Test, +) +from dojo.request_cache import cache_for_request + + +def user_has_configuration_permission(user, permission): + if not user: + return False + + if user.is_anonymous: + return False + + return user.has_perm(permission) + + +def user_has_permission(user, obj, permission): + if user.is_anonymous: + return False + + if user.is_superuser: + return True + + if isinstance(obj, Product_Type | Product): + # Global roles are only relevant for product types, products and their + # dependent objects + if user_has_global_permission(user, permission): + return True + + if isinstance(obj, Product_Type): + # Check if the user has a role for the product type with the requested + # permissions + member = get_product_type_member(user, obj) + if member is not None and role_has_permission( + member.role.id, permission, + ): + return True + # Check if the user is in a group with a role for the product type with + # the requested permissions + for product_type_group in get_product_type_groups(user, obj): + if role_has_permission(product_type_group.role.id, permission): + return True + return False + if ( + isinstance(obj, Product) + and permission.value >= Permissions.Product_View.value + ): + # Products inherit permissions of their product type + if user_has_permission(user, obj.prod_type, permission): + return True + + # Check if the user has a role for the product with the requested + # permissions + member = get_product_member(user, obj) + if member is not None and role_has_permission( + member.role.id, permission, + ): + return True + # Check if the user is in a group with a role for the product with the + # requested permissions + for product_group in get_product_groups(user, obj): + if role_has_permission(product_group.role.id, permission): + return True + return False + if ( + isinstance(obj, Engagement) + and permission in Permissions.get_engagement_permissions() + ): + return user_has_permission(user, obj.product, permission) + if ( + isinstance(obj, Test) + and permission in Permissions.get_test_permissions() + ) or ( + isinstance(obj, Risk_Acceptance) + and permission == Permissions.Risk_Acceptance + ): + if obj.engagement is not None: + return user_has_permission(user, obj.engagement.product, permission) + return user_has_global_permission(user, permission) + if (( + isinstance(obj, Finding | Stub_Finding) + ) and permission in Permissions.get_finding_permissions()) or ( + isinstance(obj, Finding_Group) + and permission in Permissions.get_finding_group_permissions() + ): + return user_has_permission( + user, obj.test.engagement.product, permission, + ) + if ( + isinstance(obj, Endpoint) + and permission in Permissions.get_endpoint_permissions() + ) or ( + isinstance(obj, Languages) + and permission in Permissions.get_language_permissions() + ) or (( + isinstance(obj, App_Analysis) + and permission in Permissions.get_technology_permissions() + ) or ( + isinstance(obj, Product_API_Scan_Configuration) + and permission + in Permissions.get_product_api_scan_configuration_permissions() + )): + return user_has_permission(user, obj.product, permission) + if ( + isinstance(obj, Product_Type_Member) + and permission in Permissions.get_product_type_member_permissions() + ): + if permission == Permissions.Product_Type_Member_Delete: + # Every member is allowed to remove himself + return obj.user == user or user_has_permission( + user, obj.product_type, permission, + ) + return user_has_permission(user, obj.product_type, permission) + if ( + isinstance(obj, Product_Member) + and permission in Permissions.get_product_member_permissions() + ): + if permission == Permissions.Product_Member_Delete: + # Every member is allowed to remove himself + return obj.user == user or user_has_permission( + user, obj.product, permission, + ) + return user_has_permission(user, obj.product, permission) + if ( + isinstance(obj, Product_Type_Group) + and permission in Permissions.get_product_type_group_permissions() + ): + return user_has_permission(user, obj.product_type, permission) + if ( + isinstance(obj, Product_Group) + and permission in Permissions.get_product_group_permissions() + ): + return user_has_permission(user, obj.product, permission) + if ( + isinstance(obj, Dojo_Group) + and permission in Permissions.get_group_permissions() + ): + # Check if the user has a role for the group with the requested + # permissions + group_member = get_group_member(user, obj) + return group_member is not None and role_has_permission( + group_member.role.id, permission, + ) + if ( + isinstance(obj, Dojo_Group_Member) + and permission in Permissions.get_group_member_permissions() + ): + if permission == Permissions.Group_Member_Delete: + # Every user is allowed to remove himself + return obj.user == user or user_has_permission( + user, obj.group, permission, + ) + return user_has_permission(user, obj.group, permission) + if ( + isinstance(obj, Cred_Mapping) + and permission in Permissions.get_credential_permissions() + ): + if obj.product: + return user_has_permission(user, obj.product, permission) + if obj.engagement: + return user_has_permission( + user, obj.engagement.product, permission, + ) + if obj.test: + return user_has_permission( + user, obj.test.engagement.product, permission, + ) + if obj.finding: + return user_has_permission( + user, obj.finding.test.engagement.product, permission, + ) + return None + msg = f"No authorization implemented for class {type(obj).__name__} and permission {permission}" + raise NoAuthorizationImplementedError(msg) + + +def user_has_global_permission(user, permission): + if not user: + return False + + if user.is_anonymous: + return False + + if user.is_superuser: + return True + + if permission == Permissions.Product_Type_Add: + if user_has_configuration_permission(user, "dojo.add_product_type"): + return True + + if ( + hasattr(user, "global_role") + and user.global_role.role is not None + and role_has_global_permission(user.global_role.role.id, permission) + ): + return True + + for group in get_groups(user): + if ( + hasattr(group, "global_role") + and group.global_role.role is not None + and role_has_global_permission( + group.global_role.role.id, permission, + ) + ): + return True + + return False + + +def user_has_configuration_permission_or_403(user, permission): + if not user_has_configuration_permission(user, permission): + raise PermissionDenied + + +def user_has_permission_or_403(user, obj, permission): + if not user_has_permission(user, obj, permission): + raise PermissionDenied + + +def user_has_global_permission_or_403(user, permission): + if not user_has_global_permission(user, permission): + raise PermissionDenied + + +def get_roles_for_permission(permission): + if not Permissions.has_value(permission): + msg = f"Permission {permission} does not exist" + raise PermissionDoesNotExistError(msg) + roles_for_permissions = set() + roles = get_roles_with_permissions() + for role in roles: + permissions = roles.get(role) + if permission in permissions: + roles_for_permissions.add(role) + return roles_for_permissions + + +def role_has_permission(role, permission): + if role is None: + return False + if not Roles.has_value(role): + msg = f"Role {role} does not exist" + raise RoleDoesNotExistError(msg) + roles = get_roles_with_permissions() + permissions = roles.get(role) + if not permissions: + return False + return permission in permissions + + +def role_has_global_permission(role, permission): + if role is None: + return False + if not Roles.has_value(role): + msg = f"Role {role} does not exist" + raise RoleDoesNotExistError(msg) + roles = get_global_roles_with_permissions() + permissions = roles.get(role) + if permissions and permission in permissions: + return True + return role_has_permission(role, permission) + + +class NoAuthorizationImplementedError(Exception): + def __init__(self, message): + self.message = message + + +class PermissionDoesNotExistError(Exception): + def __init__(self, message): + self.message = message + + +class RoleDoesNotExistError(Exception): + def __init__(self, message): + self.message = message + + +def get_product_member(user, product): + return get_product_member_dict(user).get(product.id) + + +@cache_for_request +def get_product_member_dict(user): + pm_dict = {} + for product_member in ( + Product_Member.objects.select_related("product") + .select_related("role") + .filter(user=user) + ): + pm_dict[product_member.product.id] = product_member + return pm_dict + + +def get_product_type_member(user, product_type): + return get_product_type_member_dict(user).get(product_type.id) + + +@cache_for_request +def get_product_type_member_dict(user): + ptm_dict = {} + for product_type_member in ( + Product_Type_Member.objects.select_related("product_type") + .select_related("role") + .filter(user=user) + ): + ptm_dict[product_type_member.product_type.id] = product_type_member + return ptm_dict + + +def get_product_groups(user, product): + return get_product_groups_dict(user).get(product.id, []) + + +@cache_for_request +def get_product_groups_dict(user): + pg_dict = {} + for product_group in ( + Product_Group.objects.select_related("product") + .select_related("role") + .filter(group__users=user) + ): + pgu_list = [] if pg_dict.get(product_group.product.id) is None else pg_dict[product_group.product.id] + pgu_list.append(product_group) + pg_dict[product_group.product.id] = pgu_list + return pg_dict + + +def get_product_type_groups(user, product_type): + return get_product_type_groups_dict(user).get(product_type.id, []) + + +@cache_for_request +def get_product_type_groups_dict(user): + pgt_dict = {} + for product_type_group in ( + Product_Type_Group.objects.select_related("product_type") + .select_related("role") + .filter(group__users=user) + ): + if pgt_dict.get(product_type_group.product_type.id) is None: + pgtu_list = [] + else: + pgtu_list = pgt_dict[product_type_group.product_type.id] + pgtu_list.append(product_type_group) + pgt_dict[product_type_group.product_type.id] = pgtu_list + return pgt_dict + + +@cache_for_request +def get_groups(user): + return Dojo_Group.objects.select_related("global_role").filter(users=user) + + +def get_group_member(user, group): + return get_group_members_dict(user).get(group.id) + + +@cache_for_request +def get_group_members_dict(user): + gu_dict = {} + for group_member in ( + Dojo_Group_Member.objects.select_related("group") + .select_related("role") + .filter(user=user) + ): + gu_dict[group_member.group.id] = group_member + return gu_dict diff --git a/dojo/filters.py b/dojo/filters.py index 8e77ddac3aa..bbf7440473e 100644 --- a/dojo/filters.py +++ b/dojo/filters.py @@ -1,3816 +1,3816 @@ -import collections -import decimal -import logging -import warnings -from datetime import datetime, timedelta - -import six -import tagulous -from auditlog.models import LogEntry -from django import forms -from django.apps import apps -from django.conf import settings -from django.contrib.contenttypes.models import ContentType -from django.db.models import Count, JSONField, Q -from django.forms import HiddenInput -from django.utils.timezone import now, tzinfo -from django.utils.translation import gettext_lazy as _ -from django_filters import ( - BooleanFilter, - CharFilter, - DateFilter, - DateFromToRangeFilter, - DateTimeFilter, - FilterSet, - ModelChoiceFilter, - ModelMultipleChoiceFilter, - MultipleChoiceFilter, - NumberFilter, - OrderingFilter, - RangeFilter, -) -from django_filters import rest_framework as filters -from django_filters.filters import ChoiceFilter -from drf_spectacular.types import OpenApiTypes -from drf_spectacular.utils import extend_schema_field -from polymorphic.base import ManagerInheritanceWarning - -# from tagulous.forms import TagWidget -# import tagulous -from dojo.authorization.roles_permissions import Permissions -from dojo.endpoint.queries import get_authorized_endpoints -from dojo.engagement.queries import get_authorized_engagements -from dojo.finding.helper import ( - ACCEPTED_FINDINGS_QUERY, - CLOSED_FINDINGS_QUERY, - FALSE_POSITIVE_FINDINGS_QUERY, - INACTIVE_FINDINGS_QUERY, - NOT_ACCEPTED_FINDINGS_QUERY, - OPEN_FINDINGS_QUERY, - OUT_OF_SCOPE_FINDINGS_QUERY, - UNDER_REVIEW_QUERY, - VERIFIED_FINDINGS_QUERY, - WAS_ACCEPTED_FINDINGS_QUERY, -) -from dojo.finding.queries import get_authorized_findings -from dojo.finding_group.queries import get_authorized_finding_groups -from dojo.labels import get_labels -from dojo.models import ( - EFFORT_FOR_FIXING_CHOICES, - ENGAGEMENT_STATUS_CHOICES, - IMPORT_ACTIONS, - SEVERITY_CHOICES, - App_Analysis, - ChoiceQuestion, - Cred_Mapping, - Development_Environment, - Dojo_Group, - Dojo_User, - DojoMeta, - Endpoint, - Endpoint_Status, - Engagement, - Engagement_Survey, - Finding, - Finding_Group, - Finding_Template, - Note_Type, - Product, - Product_API_Scan_Configuration, - Product_Type, - Question, - Risk_Acceptance, - Test, - Test_Import, - Test_Import_Finding_Action, - Test_Type, - TextQuestion, - User, - Vulnerability_Id, -) -from dojo.product.queries import get_authorized_products -from dojo.product_type.queries import get_authorized_product_types -from dojo.risk_acceptance.queries import get_authorized_risk_acceptances -from dojo.test.queries import get_authorized_tests -from dojo.user.queries import get_authorized_users -from dojo.utils import get_system_setting, is_finding_groups_enabled, truncate_timezone_aware - -logger = logging.getLogger(__name__) - -labels = get_labels() - -BOOLEAN_CHOICES = (("false", "No"), ("true", "Yes")) -EARLIEST_FINDING = None - - -def custom_filter(queryset, name, value): - values = value.split(",") - cust_filter = (f"{name}__in") - return queryset.filter(Q(**{cust_filter: values})) - - -def custom_vulnerability_id_filter(queryset, name, value): - values = value.split(",") - ids = Vulnerability_Id.objects \ - .filter(vulnerability_id__in=values) \ - .values_list("finding_id", flat=True) - return queryset.filter(id__in=ids) - - -def vulnerability_id_filter(queryset, name, value): - ids = Vulnerability_Id.objects \ - .filter(vulnerability_id=value) \ - .values_list("finding_id", flat=True) - return queryset.filter(id__in=ids) - - -class NumberInFilter(filters.BaseInFilter, filters.NumberFilter): - pass - - -class CharFieldInFilter(filters.BaseInFilter, filters.CharFilter): - def __init__(self, *args, **kwargs): - super(CharFilter, self).__init__(*args, **kwargs) - - -class CharFieldFilterANDExpression(CharFieldInFilter): - def filter(self, queryset, value): - # Catch the case where a value if not supplied - if not value: - return queryset - # Do the filtering - objects = set(value.split(",")) - return ( - queryset.filter(**{f"{self.field_name}__in": objects}) - .annotate(object_count=Count(self.field_name)) - .filter(object_count=len(objects)) - ) - - -class FindingStatusFilter(ChoiceFilter): - def any(self, qs, name): - return qs - - def open(self, qs, name): - return qs.filter(OPEN_FINDINGS_QUERY) - - def verified(self, qs, name): - return qs.filter(VERIFIED_FINDINGS_QUERY) - - def out_of_scope(self, qs, name): - return qs.filter(OUT_OF_SCOPE_FINDINGS_QUERY) - - def false_positive(self, qs, name): - return qs.filter(FALSE_POSITIVE_FINDINGS_QUERY) - - def inactive(self, qs, name): - return qs.filter(INACTIVE_FINDINGS_QUERY) - - def risk_accepted(self, qs, name): - return qs.filter(ACCEPTED_FINDINGS_QUERY) - - def closed(self, qs, name): - return qs.filter(CLOSED_FINDINGS_QUERY) - - def under_review(self, qs, name): - return qs.filter(UNDER_REVIEW_QUERY) - - options = { - None: (_("Any"), any), - 0: (_("Open"), open), - 1: (_("Verified"), verified), - 2: (_("Out Of Scope"), out_of_scope), - 3: (_("False Positive"), false_positive), - 4: (_("Inactive"), inactive), - 5: (_("Risk Accepted"), risk_accepted), - 6: (_("Closed"), closed), - 7: (_("Under Review"), under_review), - } - - def __init__(self, *args, **kwargs): - kwargs["choices"] = [ - (key, value[0]) for key, value in six.iteritems(self.options)] - super().__init__(*args, **kwargs) - - def filter(self, qs, value): - earliest_finding = get_earliest_finding(qs) - if earliest_finding is not None: - start_date = datetime.combine( - earliest_finding.date, datetime.min.time()).replace(tzinfo=tzinfo()) - self.start_date = truncate_timezone_aware(start_date - timedelta(days=1)) - self.end_date = truncate_timezone_aware(now() + timedelta(days=1)) - try: - value = int(value) - except (ValueError, TypeError): - value = None - return self.options[value][1](self, qs, self.field_name) - - -class FindingSLAFilter(ChoiceFilter): - def any(self, qs, name): - return qs - - def sla_satisfied(self, qs, name): - # return findings that have an sla expiration date after today or no sla expiration date - return qs.filter(Q(sla_expiration_date__isnull=True) | Q(sla_expiration_date__gt=now().date())) - - def sla_violated(self, qs, name): - # return active findings that have an sla expiration date before today - return qs.filter( - Q( - active=True, - false_p=False, - duplicate=False, - out_of_scope=False, - risk_accepted=False, - is_mitigated=False, - mitigated=None, - ) & Q(sla_expiration_date__lt=now().date()), - ) - - options = { - None: (_("Any"), any), - 0: (_("False"), sla_satisfied), - 1: (_("True"), sla_violated), - } - - def __init__(self, *args, **kwargs): - kwargs["choices"] = [ - (key, value[0]) for key, value in six.iteritems(self.options)] - super().__init__(*args, **kwargs) - - def filter(self, qs, value): - try: - value = int(value) - except (ValueError, TypeError): - value = None - return self.options[value][1](self, qs, self.field_name) - - -class FindingHasJIRAFilter(ChoiceFilter): - def no_jira(self, qs, name): - return qs.filter(Q(jira_issue=None) & Q(finding_group__jira_issue=None)) - - def any_jira(self, qs, name): - return qs.filter(~Q(jira_issue=None) | ~Q(finding_group__jira_issue=None)) - - def all_items(self, qs, name): - return qs - - options = { - 0: (_("Yes"), any_jira), - 1: (_("No"), no_jira), - } - - def __init__(self, *args, **kwargs): - kwargs["choices"] = [ - (key, value[0]) for key, value in six.iteritems(self.options)] - super().__init__(*args, **kwargs) - - def filter(self, qs, value): - try: - value = int(value) - except (ValueError, TypeError): - return self.all_items(qs, self.field_name) - - return self.options[value][1](self, qs, self.field_name) - - -class ProductSLAFilter(ChoiceFilter): - def any(self, qs, name): - return qs - - def sla_satisifed(self, qs, name): - for product in qs: - if product.violates_sla(): - qs = qs.exclude(id=product.id) - return qs - - def sla_violated(self, qs, name): - for product in qs: - if not product.violates_sla(): - qs = qs.exclude(id=product.id) - return qs - - options = { - None: (_("Any"), any), - 0: (_("False"), sla_satisifed), - 1: (_("True"), sla_violated), - } - - def __init__(self, *args, **kwargs): - kwargs["choices"] = [ - (key, value[0]) for key, value in six.iteritems(self.options)] - super().__init__(*args, **kwargs) - - def filter(self, qs, value): - try: - value = int(value) - except (ValueError, TypeError): - value = None - return self.options[value][1](self, qs, self.field_name) - - -def get_earliest_finding(queryset=None): - if queryset is None: # don't to 'if not queryset' which will trigger the query - queryset = Finding.objects.all() - - try: - EARLIEST_FINDING = queryset.earliest("date") - except (Finding.DoesNotExist, Endpoint_Status.DoesNotExist): - EARLIEST_FINDING = None - return EARLIEST_FINDING - - -def cwe_options(queryset): - cwe = {} - cwe = dict([cwe, cwe] - for cwe in queryset.order_by().values_list("cwe", flat=True).distinct() - if isinstance(cwe, int) and cwe is not None and cwe > 0) - cwe = collections.OrderedDict(sorted(cwe.items())) - return list(cwe.items()) - - -class DojoFilter(FilterSet): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - for field in ["tags", "test__tags", "test__engagement__tags", "test__engagement__product__tags", - "not_tags", "not_test__tags", "not_test__engagement__tags", "not_test__engagement__product__tags"]: - if field in self.form.fields: - tags_filter = self.filters["tags"] - model = tags_filter.model - - self.form.fields[field] = model._meta.get_field("tags").formfield() - # we defer applying the select2 autocomplete because there can be multiple forms on the same page - # and form.js would then apply select2 multiple times, resulting in duplicated fields - # the initialization now happens in filter_js_snippet.html - self.form.fields[field].widget.tag_options += tagulous.models.options.TagOptions(autocomplete_settings={"width": "200px", "defer": True}) - tagged_model, exclude = get_tags_model_from_field_name(field) - if tagged_model: # only if not the normal tags field - self.form.fields[field].label = get_tags_label_from_model(tagged_model) - self.form.fields[field].autocomplete_tags = tagged_model.tags.tag_model.objects.all().order_by("name") - - if exclude: - self.form.fields[field].label = "Not " + self.form.fields[field].label - - def filter_queryset(self, queryset): - qs = super().filter_queryset(queryset) - if hasattr(self, "form") and hasattr(self.form, "cleaned_data"): - for name, f in self.filters.items(): - field_name = getattr(f, "field_name", "") or "" - # Only apply distinct for tag lookups that can duplicate base rows - if "tags__name" in field_name: - value = self.form.cleaned_data.get(name, None) - if value not in (None, "", [], (), {}): - lookup_expr = getattr(f, "lookup_expr", None) - is_exclude = getattr(f, "exclude", False) - needs_distinct = ( - is_exclude - or lookup_expr in { - "in", - "contains", - "icontains", - "startswith", - "istartswith", - "endswith", - "iendswith", - } - ) - # exact/iexact typically won't duplicate rows - if needs_distinct: - return qs.distinct() - return qs - - -def get_tags_model_from_field_name(field): - exclude = False - if field.startswith("not_"): - field = field.replace("not_", "") - exclude = True - try: - parts = field.split("__") - model_name = parts[-2] - return apps.get_model(f"dojo.{model_name}", require_ready=True), exclude - except Exception: - return None, exclude - - -def get_tags_label_from_model(model): - if model: - if model is Product_Type: - return labels.ORG_FILTERS_TAGS_LABEL - if model is Product: - return labels.ASSET_FILTERS_TAGS_LABEL - return f"Tags ({model.__name__.title()})" - return "Tags (Unknown)" - - -def get_finding_filterset_fields(*, metrics=False, similar=False, filter_string_matching=False): - fields = [] - - if similar: - fields.extend([ - "id", - "hash_code", - ]) - - fields.extend(["title", "component_name", "component_version"]) - - if metrics: - fields.extend([ - "start_date", - "end_date", - ]) - - fields.extend([ - "date", - "cwe", - "severity", - "last_reviewed", - "last_status_update", - "mitigated", - "reporter", - "reviewers", - ]) - - if filter_string_matching: - fields.extend([ - "reporter", - "reviewers", - "test__engagement__product__prod_type__name", - "test__engagement__product__name", - "test__engagement__name", - "test__title", - ]) - else: - fields.extend([ - "reporter", - "reviewers", - "test__engagement__product__prod_type", - "test__engagement__product", - "test__engagement", - "test", - ]) - - fields.extend([ - "test__test_type", - "test__engagement__version", - "test__version", - "endpoints", - "status", - "active", - "verified", - "duplicate", - "is_mitigated", - "out_of_scope", - "false_p", - "has_component", - "has_notes", - "file_path", - "unique_id_from_tool", - "vuln_id_from_tool", - "service", - "epss_score", - "epss_score_range", - "epss_percentile", - "epss_percentile_range", - "known_exploited", - "ransomware_used", - "kev_date", - "kev_before", - "kev_after", - "fix_available", - ]) - - if similar: - fields.extend([ - "id", - ]) - - fields.extend([ - "param", - "payload", - "risk_acceptance", - ]) - - if get_system_setting("enable_jira"): - fields.extend([ - "has_jira_issue", - "jira_creation", - "jira_change", - "jira_issue__jira_key", - ]) - - if is_finding_groups_enabled(): - if filter_string_matching: - fields.extend([ - "has_finding_group", - "finding_group__name", - ]) - else: - fields.extend([ - "has_finding_group", - "finding_group", - ]) - - if get_system_setting("enable_jira"): - fields.extend([ - "has_jira_group_issue", - ]) - - return fields - - -class FindingTagFilter(DojoFilter): - tag = CharFilter( - field_name="tags__name", - lookup_expr="icontains", - label="Tag name contains", - help_text="Search for tags on a Finding that contain a given pattern") - tags = ModelMultipleChoiceFilter( - field_name="tags__name", - to_field_name="name", - queryset=Finding.tags.tag_model.objects.all().order_by("name"), - help_text="Filter Findings by the selected tags (OR logic)", - ) - - tags_and = ModelMultipleChoiceFilter( - field_name="tags__name", - to_field_name="name", - queryset=Finding.tags.tag_model.objects.all().order_by("name"), - help_text="Filter Findings by the selected tags (AND logic)", - label="Tags (AND)", - conjoined=True, - ) - - test__tags = ModelMultipleChoiceFilter( - field_name="test__tags__name", - to_field_name="name", - queryset=Test.tags.tag_model.objects.all().order_by("name"), - help_text="Filter Findings by the selected Test tags (OR logic)", - label="Test Tags", - ) - - test__tags_and = ModelMultipleChoiceFilter( - field_name="test__tags__name", - to_field_name="name", - queryset=Test.tags.tag_model.objects.all().order_by("name"), - help_text="Filter Findings by the selected Test tags (AND logic)", - label="Test Tags (AND)", - conjoined=True, - ) - - test__engagement__tags = ModelMultipleChoiceFilter( - field_name="test__engagement__tags__name", - to_field_name="name", - queryset=Engagement.tags.tag_model.objects.all().order_by("name"), - help_text="Filter Findings by the selected Engagement tags (OR logic)", - label="Engagement Tags", - ) - - test__engagement__tags_and = ModelMultipleChoiceFilter( - field_name="test__engagement__tags__name", - to_field_name="name", - queryset=Engagement.tags.tag_model.objects.all().order_by("name"), - help_text="Filter Findings by the selected Engagement tags (AND logic)", - label="Engagement Tags (AND)", - conjoined=True, - ) - - test__engagement__product__tags = ModelMultipleChoiceFilter( - field_name="test__engagement__product__tags__name", - to_field_name="name", - queryset=Product.tags.tag_model.objects.all().order_by("name"), - help_text="Filter Findings by the selected Product tags (OR logic)", - label="Product Tags", - ) - - test__engagement__product__tags_and = ModelMultipleChoiceFilter( - field_name="test__engagement__product__tags__name", - to_field_name="name", - queryset=Product.tags.tag_model.objects.all().order_by("name"), - help_text="Filter Findings by the selected Product tags (AND logic)", - label="Product Tags (AND)", - conjoined=True, - ) - - not_tags = ModelMultipleChoiceFilter( - field_name="tags__name", - to_field_name="name", - queryset=Finding.tags.tag_model.objects.all().order_by("name"), - help_text="Search for tags on a Finding that contain a given pattern, and exclude them", - exclude=True) - not_test__tags = ModelMultipleChoiceFilter( - field_name="test__tags__name", - to_field_name="name", - label="Test without tags", - queryset=Test.tags.tag_model.objects.all().order_by("name"), - help_text="Search for tags on a Test that contain a given pattern, and exclude them", - exclude=True) - not_test__engagement__tags = ModelMultipleChoiceFilter( - field_name="test__engagement__tags__name", - to_field_name="name", - label="Engagement without tags", - queryset=Engagement.tags.tag_model.objects.all().order_by("name"), - help_text="Search for tags on a Engagement that contain a given pattern, and exclude them", - exclude=True) - not_test__engagement__product__tags = ModelMultipleChoiceFilter( - field_name="test__engagement__product__tags__name", - to_field_name="name", - label=labels.ASSET_FILTERS_ASSETS_WITHOUT_TAGS_LABEL, - queryset=Product.tags.tag_model.objects.all().order_by("name"), - help_text=labels.ASSET_FILTERS_ASSETS_WITHOUT_TAGS_HELP, - exclude=True) - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - -class FindingTagStringFilter(FilterSet): - tags_contains = CharFilter( - label="Finding Tag Contains", - field_name="tags__name", - lookup_expr="icontains", - help_text="Search for tags on a Finding that contain a given pattern") - tags = CharFilter( - label="Finding Tag", - field_name="tags__name", - lookup_expr="iexact", - help_text="Search for tags on a Finding that are an exact match") - test__tags_contains = CharFilter( - label="Test Tag Contains", - field_name="test__tags__name", - lookup_expr="icontains", - help_text="Search for tags on a Finding that contain a given pattern") - test__tags = CharFilter( - label="Test Tag", - field_name="test__tags__name", - lookup_expr="iexact", - help_text="Search for tags on a Finding that are an exact match") - test__engagement__tags_contains = CharFilter( - label="Engagement Tag Contains", - field_name="test__engagement__tags__name", - lookup_expr="icontains", - help_text="Search for tags on a Finding that contain a given pattern") - test__engagement__tags = CharFilter( - label="Engagement Tag", - field_name="test__engagement__tags__name", - lookup_expr="iexact", - help_text="Search for tags on a Finding that are an exact match") - test__engagement__product__tags_contains = CharFilter( - label=labels.ASSET_FILTERS_TAG_ASSET_CONTAINS_LABEL, - field_name="test__engagement__product__tags__name", - lookup_expr="icontains", - help_text=labels.ASSET_FILTERS_TAG_ASSET_CONTAINS_HELP) - test__engagement__product__tags = CharFilter( - label=labels.ASSET_FILTERS_TAG_ASSET_LABEL, - field_name="test__engagement__product__tags__name", - lookup_expr="iexact", - help_text=labels.ASSET_FILTERS_TAG_ASSET_HELP) - - not_tags_contains = CharFilter( - label="Finding Tag Does Not Contain", - field_name="tags__name", - lookup_expr="icontains", - help_text="Search for tags on a Finding that contain a given pattern, and exclude them", - exclude=True) - not_tags = CharFilter( - label="Not Finding Tag", - field_name="tags__name", - lookup_expr="iexact", - help_text="Search for tags on a Finding that are an exact match, and exclude them", - exclude=True) - not_test__tags_contains = CharFilter( - label="Test Tag Does Not Contain", - field_name="test__tags__name", - lookup_expr="icontains", - help_text="Search for tags on a Test that contain a given pattern, and exclude them", - exclude=True) - not_test__tags = CharFilter( - label="Not Test Tag", - field_name="test__tags__name", - lookup_expr="iexact", - help_text="Search for tags on a Test that are an exact match, and exclude them", - exclude=True) - not_test__engagement__tags_contains = CharFilter( - label="Engagement Tag Does Not Contain", - field_name="test__engagement__tags__name", - lookup_expr="icontains", - help_text="Search for tags on a Engagement that contain a given pattern, and exclude them", - exclude=True) - not_test__engagement__tags = CharFilter( - label="Not Engagement Tag", - field_name="test__engagement__tags__name", - lookup_expr="iexact", - help_text="Search for tags on a Engagement that are an exact match, and exclude them", - exclude=True) - not_test__engagement__product__tags_contains = CharFilter( - label=labels.ASSET_FILTERS_TAG_NOT_CONTAIN_LABEL, - field_name="test__engagement__product__tags__name", - lookup_expr="icontains", - help_text=labels.ASSET_FILTERS_TAG_NOT_CONTAIN_HELP, - exclude=True) - not_test__engagement__product__tags = CharFilter( - label=labels.ASSET_FILTERS_TAG_NOT_LABEL, - field_name="test__engagement__product__tags__name", - lookup_expr="iexact", - help_text=labels.ASSET_FILTERS_TAG_NOT_HELP, - exclude=True) - - def delete_tags_from_form(self, tag_list: list): - for tag in tag_list: - self.form.fields.pop(tag, None) - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - -class DateRangeFilter(ChoiceFilter): - options = { - None: (_("Any date"), lambda qs, _: qs.all()), - 1: (_("Today"), lambda qs, name: qs.filter(**{ - f"{name}__year": now().year, - f"{name}__month": now().month, - f"{name}__day": now().day, - })), - 2: (_("Past 7 days"), lambda qs, name: qs.filter(**{ - f"{name}__gte": truncate_timezone_aware(now() - timedelta(days=7)), - f"{name}__lt": truncate_timezone_aware(now() + timedelta(days=1)), - })), - 3: (_("Past 30 days"), lambda qs, name: qs.filter(**{ - f"{name}__gte": truncate_timezone_aware(now() - timedelta(days=30)), - f"{name}__lt": truncate_timezone_aware(now() + timedelta(days=1)), - })), - 4: (_("Past 90 days"), lambda qs, name: qs.filter(**{ - f"{name}__gte": truncate_timezone_aware(now() - timedelta(days=90)), - f"{name}__lt": truncate_timezone_aware(now() + timedelta(days=1)), - })), - 5: (_("Current month"), lambda qs, name: qs.filter(**{ - f"{name}__year": now().year, - f"{name}__month": now().month, - })), - 6: (_("Current year"), lambda qs, name: qs.filter(**{ - f"{name}__year": now().year, - })), - 7: (_("Past year"), lambda qs, name: qs.filter(**{ - f"{name}__gte": truncate_timezone_aware(now() - timedelta(days=365)), - f"{name}__lt": truncate_timezone_aware(now() + timedelta(days=1)), - })), - } - - def __init__(self, *args, **kwargs): - kwargs["choices"] = [ - (key, value[0]) for key, value in six.iteritems(self.options)] - super().__init__(*args, **kwargs) - - def filter(self, qs, value): - try: - value = int(value) - except (ValueError, TypeError): - value = None - return self.options[value][1](qs, self.field_name) - - -class DateRangeOmniFilter(ChoiceFilter): - options = { - None: (_("Any date"), lambda qs, _: qs.all()), - 1: (_("Today"), lambda qs, name: qs.filter(**{ - f"{name}__year": now().year, - f"{name}__month": now().month, - f"{name}__day": now().day, - })), - 2: (_("Next 7 days"), lambda qs, name: qs.filter(**{ - f"{name}__gte": truncate_timezone_aware(now() + timedelta(days=1)), - f"{name}__lt": truncate_timezone_aware(now() + timedelta(days=7)), - })), - 3: (_("Next 30 days"), lambda qs, name: qs.filter(**{ - f"{name}__gte": truncate_timezone_aware(now() + timedelta(days=1)), - f"{name}__lt": truncate_timezone_aware(now() + timedelta(days=30)), - })), - 4: (_("Next 90 days"), lambda qs, name: qs.filter(**{ - f"{name}__gte": truncate_timezone_aware(now() + timedelta(days=1)), - f"{name}__lt": truncate_timezone_aware(now() + timedelta(days=90)), - })), - 5: (_("Past 7 days"), lambda qs, name: qs.filter(**{ - f"{name}__gte": truncate_timezone_aware(now() - timedelta(days=7)), - f"{name}__lt": truncate_timezone_aware(now() + timedelta(days=1)), - })), - 6: (_("Past 30 days"), lambda qs, name: qs.filter(**{ - f"{name}__gte": truncate_timezone_aware(now() - timedelta(days=30)), - f"{name}__lt": truncate_timezone_aware(now() + timedelta(days=1)), - })), - 7: (_("Past 90 days"), lambda qs, name: qs.filter(**{ - f"{name}__gte": truncate_timezone_aware(now() - timedelta(days=90)), - f"{name}__lt": truncate_timezone_aware(now() + timedelta(days=1)), - })), - 8: (_("Current month"), lambda qs, name: qs.filter(**{ - f"{name}__year": now().year, - f"{name}__month": now().month, - })), - 9: (_("Past year"), lambda qs, name: qs.filter(**{ - f"{name}__gte": truncate_timezone_aware(now() - timedelta(days=365)), - f"{name}__lt": truncate_timezone_aware(now() + timedelta(days=1)), - })), - 10: (_("Current year"), lambda qs, name: qs.filter(**{ - f"{name}__year": now().year, - })), - 11: (_("Next year"), lambda qs, name: qs.filter(**{ - f"{name}__gte": truncate_timezone_aware(now() + timedelta(days=1)), - f"{name}__lt": truncate_timezone_aware(now() + timedelta(days=365)), - })), - } - - def __init__(self, *args, **kwargs): - kwargs["choices"] = [ - (key, value[0]) for key, value in six.iteritems(self.options)] - super().__init__(*args, **kwargs) - - def filter(self, qs, value): - try: - value = int(value) - except (ValueError, TypeError): - value = None - return self.options[value][1](qs, self.field_name) - - -class ReportBooleanFilter(ChoiceFilter): - options = { - None: (_("Either"), lambda qs, _: qs.all()), - 1: (_("Yes"), lambda qs, name: qs.filter(**{ - f"{name}": True, - })), - 2: (_("No"), lambda qs, name: qs.filter(**{ - f"{name}": False, - })), - } - - def __init__(self, *args, **kwargs): - kwargs["choices"] = [ - (key, value[0]) for key, value in six.iteritems(self.options)] - super().__init__(*args, **kwargs) - - def filter(self, qs, value): - try: - value = int(value) - except (ValueError, TypeError): - value = None - return self.options[value][1](qs, self.field_name) - - -class ReportRiskAcceptanceFilter(ChoiceFilter): - - def any(self, qs, name): - return qs.all() - - def accepted(self, qs, name): - # return qs.filter(risk_acceptance__isnull=False) - return qs.filter(ACCEPTED_FINDINGS_QUERY) - - def not_accepted(self, qs, name): - return qs.filter(NOT_ACCEPTED_FINDINGS_QUERY) - - def was_accepted(self, qs, name): - return qs.filter(WAS_ACCEPTED_FINDINGS_QUERY) - - options = { - None: (_("Either"), any), - 1: (_("Yes"), accepted), - 2: (_("No"), not_accepted), - 3: (_("Expired"), was_accepted), - } - - def __init__(self, *args, **kwargs): - kwargs["choices"] = [ - (key, value[0]) for key, value in six.iteritems(self.options)] - super().__init__(*args, **kwargs) - - def filter(self, qs, value): - try: - value = int(value) - except (ValueError, TypeError): - value = None - return self.options[value][1](self, qs, self.field_name) - - -class MetricsDateRangeFilter(ChoiceFilter): - def any(self, qs, name): - earliest_finding = get_earliest_finding(qs) - if earliest_finding is not None: - start_date = datetime.combine( - earliest_finding.date, datetime.min.time()).replace(tzinfo=tzinfo()) - self.start_date = truncate_timezone_aware(start_date - timedelta(days=1)) - self.end_date = truncate_timezone_aware(now() + timedelta(days=1)) - return qs.all() - return None - - def current_month(self, qs, name): - self.start_date = datetime(now().year, now().month, 1, 0, 0, 0).replace(tzinfo=tzinfo()) - self.end_date = now() - return qs.filter(**{ - f"{name}__year": self.start_date.year, - f"{name}__month": self.start_date.month, - }) - - def current_year(self, qs, name): - self.start_date = datetime(now().year, 1, 1, 0, 0, 0).replace(tzinfo=tzinfo()) - self.end_date = now() - return qs.filter(**{ - f"{name}__year": now().year, - }) - - def past_x_days(self, qs, name, days): - self.start_date = truncate_timezone_aware(now() - timedelta(days=days)) - self.end_date = truncate_timezone_aware(now() + timedelta(days=1)) - return qs.filter(**{ - f"{name}__gte": self.start_date, - f"{name}__lt": self.end_date, - }) - - def past_seven_days(self, qs, name): - return self.past_x_days(qs, name, 7) - - def past_thirty_days(self, qs, name): - return self.past_x_days(qs, name, 30) - - def past_ninety_days(self, qs, name): - return self.past_x_days(qs, name, 90) - - def past_six_months(self, qs, name): - return self.past_x_days(qs, name, 183) - - def past_year(self, qs, name): - return self.past_x_days(qs, name, 365) - - options = { - None: (_("Past 30 days"), past_thirty_days), - 1: (_("Past 7 days"), past_seven_days), - 2: (_("Past 90 days"), past_ninety_days), - 3: (_("Current month"), current_month), - 4: (_("Current year"), current_year), - 5: (_("Past 6 Months"), past_six_months), - 6: (_("Past year"), past_year), - 7: (_("Any date"), any), - } - - def __init__(self, *args, **kwargs): - kwargs["choices"] = [ - (key, value[0]) for key, value in six.iteritems(self.options)] - super().__init__(*args, **kwargs) - - def filter(self, qs, value): - if value == 8: - return qs - earliest_finding = get_earliest_finding(qs) - if earliest_finding is not None: - start_date = datetime.combine( - earliest_finding.date, datetime.min.time()).replace(tzinfo=tzinfo()) - self.start_date = truncate_timezone_aware(start_date - timedelta(days=1)) - self.end_date = truncate_timezone_aware(now() + timedelta(days=1)) - try: - value = int(value) - except (ValueError, TypeError): - value = None - return self.options[value][1](self, qs, self.field_name) - - -class ProductComponentFilter(DojoFilter): - component_name = CharFilter(lookup_expr="icontains", label="Module Name") - component_version = CharFilter(lookup_expr="icontains", label="Module Version") - - o = OrderingFilter( - fields=( - ("component_name", "component_name"), - ("component_version", "component_version"), - ("active", "active"), - ("duplicate", "duplicate"), - ("total", "total"), - ), - field_labels={ - "component_name": "Component Name", - "component_version": "Component Version", - "active": "Active", - "duplicate": "Duplicate", - "total": "Total", - }, - ) - - -class ComponentFilterWithoutObjectLookups(ProductComponentFilter): - test__engagement__product__prod_type__name = CharFilter( - field_name="test__engagement__product__prod_type__name", - lookup_expr="iexact", - label=labels.ORG_FILTERS_NAME_LABEL, - help_text=labels.ORG_FILTERS_NAME_HELP) - test__engagement__product__prod_type__name_contains = CharFilter( - field_name="test__engagement__product__prod_type__name", - lookup_expr="icontains", - label=labels.ORG_FILTERS_NAME_CONTAINS_LABEL, - help_text=labels.ORG_FILTERS_NAME_CONTAINS_HELP) - test__engagement__product__name = CharFilter( - field_name="test__engagement__product__name", - lookup_expr="iexact", - label=labels.ASSET_FILTERS_NAME_LABEL, - help_text=labels.ASSET_FILTERS_NAME_HELP) - test__engagement__product__name_contains = CharFilter( - field_name="test__engagement__product__name", - lookup_expr="icontains", - label=labels.ASSET_FILTERS_NAME_CONTAINS_LABEL, - help_text=labels.ASSET_FILTERS_NAME_CONTAINS_HELP) - - -class ComponentFilter(ProductComponentFilter): - test__engagement__product__prod_type = ModelMultipleChoiceFilter( - queryset=Product_Type.objects.none(), - label=labels.ORG_FILTERS_LABEL) - test__engagement__product = ModelMultipleChoiceFilter( - queryset=Product.objects.none(), - label=labels.ASSET_FILTERS_LABEL) - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.form.fields[ - "test__engagement__product__prod_type"].queryset = get_authorized_product_types(Permissions.Product_Type_View) - self.form.fields[ - "test__engagement__product"].queryset = get_authorized_products(Permissions.Product_View) - - -class EngagementDirectFilterHelper(FilterSet): - name = CharFilter(lookup_expr="icontains", label="Engagement name contains") - version = CharFilter(field_name="version", lookup_expr="icontains", label="Engagement version") - test__version = CharFilter(field_name="test__version", lookup_expr="icontains", label="Test version") - product__name = CharFilter(lookup_expr="icontains", label=labels.ASSET_FILTERS_NAME_CONTAINS_LABEL) - status = MultipleChoiceFilter(choices=ENGAGEMENT_STATUS_CHOICES, label="Status") - tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Tag name contains") - not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Not tag name contains", exclude=True) - has_tags = BooleanFilter(field_name="tags", lookup_expr="isnull", exclude=True, label="Has tags") - target_start = DateRangeFilter() - target_end = DateRangeFilter() - test__engagement__product__lifecycle = MultipleChoiceFilter( - choices=Product.LIFECYCLE_CHOICES, - label=labels.ASSET_LIFECYCLE_LABEL, - null_label="Empty") - o = OrderingFilter( - # tuple-mapping retains order - fields=( - ("target_start", "target_start"), - ("name", "name"), - ("product__name", "product__name"), - ("product__prod_type__name", "product__prod_type__name"), - ("lead__first_name", "lead__first_name"), - ), - field_labels={ - "target_start": "Start date", - "name": "Engagement", - "product__name": labels.ASSET_FILTERS_NAME_LABEL, - "product__prod_type__name": labels.ORG_FILTERS_LABEL, - "lead__first_name": "Lead", - }, - ) - - -class EngagementDirectFilter(EngagementDirectFilterHelper, DojoFilter): - lead = ModelChoiceFilter(queryset=Dojo_User.objects.none(), label="Lead") - product__prod_type = ModelMultipleChoiceFilter( - queryset=Product_Type.objects.none(), - label=labels.ORG_FILTERS_LABEL) - tags = ModelMultipleChoiceFilter( - field_name="tags__name", - to_field_name="name", - queryset=Engagement.tags.tag_model.objects.all().order_by("name")) - not_tags = ModelMultipleChoiceFilter( - field_name="tags__name", - to_field_name="name", - exclude=True, - queryset=Engagement.tags.tag_model.objects.all().order_by("name")) - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.form.fields["product__prod_type"].queryset = get_authorized_product_types(Permissions.Product_Type_View) - self.form.fields["lead"].queryset = get_authorized_users(Permissions.Product_Type_View) \ - .filter(engagement__lead__isnull=False).distinct() - - class Meta: - model = Engagement - fields = ["product__name", "product__prod_type"] - - -class EngagementDirectFilterWithoutObjectLookups(EngagementDirectFilterHelper): - lead = CharFilter( - field_name="lead__username", - lookup_expr="iexact", - label="Lead Username", - help_text="Search for Lead username that are an exact match") - lead_contains = CharFilter( - field_name="lead__username", - lookup_expr="icontains", - label="Lead Username Contains", - help_text="Search for Lead username that contain a given pattern") - product__prod_type__name = CharFilter( - field_name="product__prod_type__name", - lookup_expr="iexact", - label=labels.ORG_FILTERS_NAME_LABEL, - help_text=labels.ORG_FILTERS_NAME_HELP) - product__prod_type__name_contains = CharFilter( - field_name="product__prod_type__name", - lookup_expr="icontains", - label=labels.ORG_FILTERS_NAME_CONTAINS_LABEL, - help_text=labels.ORG_FILTERS_NAME_CONTAINS_HELP) - - class Meta: - model = Engagement - fields = ["product__name"] - - -class EngagementFilterHelper(FilterSet): - name = CharFilter(lookup_expr="icontains", label=labels.ASSET_FILTERS_NAME_CONTAINS_LABEL) - tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Tag name contains") - not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Not tag name contains", exclude=True) - has_tags = BooleanFilter(field_name="tags", lookup_expr="isnull", exclude=True, label="Has tags") - engagement__name = CharFilter(lookup_expr="icontains", label="Engagement name contains") - engagement__version = CharFilter(field_name="engagement__version", lookup_expr="icontains", label="Engagement version") - engagement__test__version = CharFilter(field_name="engagement__test__version", lookup_expr="icontains", label="Test version") - engagement__product__lifecycle = MultipleChoiceFilter( - choices=Product.LIFECYCLE_CHOICES, - label=labels.ASSET_LIFECYCLE_LABEL, - null_label="Empty") - engagement__status = MultipleChoiceFilter( - choices=ENGAGEMENT_STATUS_CHOICES, - label="Status") - o = OrderingFilter( - # tuple-mapping retains order - fields=( - ("name", "name"), - ("prod_type__name", "prod_type__name"), - ), - field_labels={ - "name": labels.ASSET_FILTERS_NAME_LABEL, - "prod_type__name": labels.ORG_FILTERS_LABEL, - }, - ) - - -class EngagementFilter(EngagementFilterHelper, DojoFilter): - engagement__lead = ModelChoiceFilter( - queryset=Dojo_User.objects.none(), - label="Lead") - prod_type = ModelMultipleChoiceFilter( - queryset=Product_Type.objects.none(), - label=labels.ORG_FILTERS_LABEL) - tags = ModelMultipleChoiceFilter( - field_name="tags__name", - to_field_name="name", - queryset=Engagement.tags.tag_model.objects.all().order_by("name")) - not_tags = ModelMultipleChoiceFilter( - field_name="tags__name", - to_field_name="name", - exclude=True, - queryset=Engagement.tags.tag_model.objects.all().order_by("name")) - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.form.fields["prod_type"].queryset = get_authorized_product_types(Permissions.Product_Type_View) - self.form.fields["engagement__lead"].queryset = get_authorized_users(Permissions.Product_Type_View) \ - .filter(engagement__lead__isnull=False).distinct() - self.form.fields["tags"].help_text = labels.ASSET_FILTERS_TAGS_HELP - self.form.fields["not_tags"].help_text = labels.ASSET_FILTERS_NOT_TAGS_HELP - - class Meta: - model = Product - fields = ["name", "prod_type"] - - -class ProductEngagementsFilter(DojoFilter): - engagement__name = CharFilter(field_name="name", lookup_expr="icontains", label="Engagement name contains") - engagement__lead = ModelChoiceFilter(field_name="lead", queryset=Dojo_User.objects.none(), label="Lead") - engagement__version = CharFilter(field_name="version", lookup_expr="icontains", label="Engagement version") - engagement__test__version = CharFilter(field_name="test__version", lookup_expr="icontains", label="Test version") - engagement__status = MultipleChoiceFilter(field_name="status", choices=ENGAGEMENT_STATUS_CHOICES, - label="Status") - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.form.fields["engagement__lead"].queryset = get_authorized_users(Permissions.Product_Type_View) \ - .filter(engagement__lead__isnull=False).distinct() - - class Meta: - model = Engagement - fields = [] - - -class ProductEngagementsFilterWithoutObjectLookups(ProductEngagementsFilter): - engagement__lead = CharFilter( - field_name="lead__username", - lookup_expr="iexact", - label="Lead Username", - help_text="Search for Lead username that are an exact match") - - -class EngagementFilterWithoutObjectLookups(EngagementFilterHelper): - engagement__lead = CharFilter( - field_name="engagement__lead__username", - lookup_expr="iexact", - label="Lead Username", - help_text="Search for Lead username that are an exact match") - engagement__lead_contains = CharFilter( - field_name="engagement__lead__username", - lookup_expr="icontains", - label="Lead Username Contains", - help_text="Search for Lead username that contain a given pattern") - prod_type__name = CharFilter( - field_name="prod_type__name", - lookup_expr="iexact", - label=labels.ORG_FILTERS_LABEL, - help_text=labels.ORG_FILTERS_LABEL_HELP) - prod_type__name_contains = CharFilter( - field_name="prod_type__name", - lookup_expr="icontains", - label=labels.ORG_FILTERS_NAME_CONTAINS_LABEL, - help_text=labels.ORG_FILTERS_NAME_CONTAINS_HELP) - - class Meta: - model = Product - fields = ["name"] - - -class ProductEngagementFilterHelper(FilterSet): - version = CharFilter(lookup_expr="icontains", label="Engagement version") - test__version = CharFilter(field_name="test__version", lookup_expr="icontains", label="Test version") - name = CharFilter(lookup_expr="icontains") - status = MultipleChoiceFilter(choices=ENGAGEMENT_STATUS_CHOICES, label="Status") - target_start = DateRangeFilter() - target_end = DateRangeFilter() - tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Tag name contains") - not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Not tag name contains", exclude=True) - o = OrderingFilter( - # tuple-mapping retains order - fields=( - ("name", "name"), - ("version", "version"), - ("target_start", "target_start"), - ("target_end", "target_end"), - ("status", "status"), - ("lead", "lead"), - ), - field_labels={ - "name": "Engagement Name", - }, - ) - - class Meta: - model = Product - fields = ["name"] - - -class ProductEngagementFilter(ProductEngagementFilterHelper, DojoFilter): - lead = ModelChoiceFilter(queryset=Dojo_User.objects.none(), label="Lead") - tags = ModelMultipleChoiceFilter( - field_name="tags__name", - to_field_name="name", - queryset=Engagement.tags.tag_model.objects.all().order_by("name")) - not_tags = ModelMultipleChoiceFilter( - field_name="tags__name", - to_field_name="name", - exclude=True, - queryset=Engagement.tags.tag_model.objects.all().order_by("name")) - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.form.fields["lead"].queryset = get_authorized_users( - Permissions.Product_Type_View).filter(engagement__lead__isnull=False).distinct() - - -class ProductEngagementFilterWithoutObjectLookups(ProductEngagementFilterHelper, DojoFilter): - lead = CharFilter( - field_name="lead__username", - lookup_expr="iexact", - label="Lead Username", - help_text="Search for Lead username that are an exact match") - lead_contains = CharFilter( - field_name="lead__username", - lookup_expr="icontains", - label="Lead Username Contains", - help_text="Search for Lead username that contain a given pattern") - - -class ApiEngagementFilter(DojoFilter): - product__prod_type = NumberInFilter(field_name="product__prod_type", lookup_expr="in") - tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Tag name contains") - tags = CharFieldInFilter( - field_name="tags__name", - lookup_expr="in", - help_text="Comma separated list of exact tags (uses OR for multiple values)") - tags__and = CharFieldFilterANDExpression( - field_name="tags__name", - help_text="Comma separated list of exact tags to match with an AND expression") - product__tags = CharFieldInFilter( - field_name="product__tags__name", - lookup_expr="in", - help_text=labels.ASSET_FILTERS_CSV_TAGS_OR_HELP) - product__tags__and = CharFieldFilterANDExpression( - field_name="product__tags__name", - help_text=labels.ASSET_FILTERS_CSV_TAGS_AND_HELP) - - not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Not Tag name contains", exclude="True") - not_tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in", - help_text="Comma separated list of exact tags not present on model", exclude="True") - not_product__tags = CharFieldInFilter(field_name="product__tags__name", - lookup_expr="in", - help_text=labels.ASSET_FILTERS_CSV_TAGS_NOT_HELP, - exclude="True") - has_tags = BooleanFilter(field_name="tags", lookup_expr="isnull", exclude=True, label="Has tags") - - o = OrderingFilter( - # tuple-mapping retains order - fields=( - ("name", "name"), - ("version", "version"), - ("target_start", "target_start"), - ("target_end", "target_end"), - ("status", "status"), - ("lead", "lead"), - ("created", "created"), - ("updated", "updated"), - ), - field_labels={ - "name": "Engagement Name", - }, - - ) - - class Meta: - model = Engagement - fields = ["id", "active", "target_start", - "target_end", "requester", "report_type", - "updated", "threat_model", "api_test", - "pen_test", "status", "product", "name", "version", "tags"] - - -class ProductFilterHelper(FilterSet): - name = CharFilter(lookup_expr="icontains", label=labels.ASSET_FILTERS_NAME_LABEL) - name_exact = CharFilter(field_name="name", lookup_expr="iexact", label=labels.ASSET_FILTERS_NAME_EXACT_LABEL) - business_criticality = MultipleChoiceFilter(choices=Product.BUSINESS_CRITICALITY_CHOICES, null_label="Empty") - platform = MultipleChoiceFilter(choices=Product.PLATFORM_CHOICES, null_label="Empty") - lifecycle = MultipleChoiceFilter(choices=Product.LIFECYCLE_CHOICES, null_label="Empty") - origin = MultipleChoiceFilter(choices=Product.ORIGIN_CHOICES, null_label="Empty") - external_audience = BooleanFilter(field_name="external_audience") - internet_accessible = BooleanFilter(field_name="internet_accessible") - tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Tag contains") - not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Not tag name contains", exclude=True) - outside_of_sla = ProductSLAFilter(label="Outside of SLA") - has_tags = BooleanFilter(field_name="tags", lookup_expr="isnull", exclude=True, label="Has tags") - o = OrderingFilter( - # tuple-mapping retains order - fields=( - ("name", "name"), - ("name_exact", "name_exact"), - ("prod_type__name", "prod_type__name"), - ("business_criticality", "business_criticality"), - ("platform", "platform"), - ("lifecycle", "lifecycle"), - ("origin", "origin"), - ("external_audience", "external_audience"), - ("internet_accessible", "internet_accessible"), - ("findings_count", "findings_count"), - ), - field_labels={ - "name": labels.ASSET_FILTERS_NAME_LABEL, - "name_exact": labels.ASSET_FILTERS_NAME_EXACT_LABEL, - "prod_type__name": labels.ORG_FILTERS_LABEL, - "business_criticality": "Business Criticality", - "platform": "Platform ", - "lifecycle": "Lifecycle ", - "origin": "Origin ", - "external_audience": "External Audience ", - "internet_accessible": "Internet Accessible ", - "findings_count": "Findings Count ", - }, - ) - - -class ProductFilter(ProductFilterHelper, DojoFilter): - prod_type = ModelMultipleChoiceFilter( - queryset=Product_Type.objects.none(), - label=labels.ORG_FILTERS_LABEL) - tags = ModelMultipleChoiceFilter( - field_name="tags__name", - to_field_name="name", - queryset=Product.tags.tag_model.objects.all().order_by("name")) - not_tags = ModelMultipleChoiceFilter( - field_name="tags__name", - to_field_name="name", - exclude=True, - queryset=Product.tags.tag_model.objects.all().order_by("name")) - - def __init__(self, *args, **kwargs): - self.user = None - if "user" in kwargs: - self.user = kwargs.pop("user") - super().__init__(*args, **kwargs) - self.form.fields["prod_type"].queryset = get_authorized_product_types(Permissions.Product_Type_View) - self.form.fields["tags"].help_text = labels.ASSET_FILTERS_TAGS_HELP - self.form.fields["not_tags"].help_text = labels.ASSET_FILTERS_NOT_TAGS_HELP - - class Meta: - model = Product - fields = [ - "name", "name_exact", "prod_type", "business_criticality", - "platform", "lifecycle", "origin", "external_audience", - "internet_accessible", "tags", - ] - - -class ProductFilterWithoutObjectLookups(ProductFilterHelper): - prod_type__name = CharFilter( - field_name="prod_type__name", - lookup_expr="iexact", - label=labels.ORG_FILTERS_NAME_LABEL, - help_text=labels.ORG_FILTERS_NAME_HELP) - prod_type__name_contains = CharFilter( - field_name="prod_type__name", - lookup_expr="icontains", - label=labels.ORG_FILTERS_NAME_CONTAINS_LABEL, - help_text=labels.ORG_FILTERS_NAME_CONTAINS_HELP) - - def __init__(self, *args, **kwargs): - kwargs.pop("user", None) - super().__init__(*args, **kwargs) - - class Meta: - model = Product - fields = [ - "name", "name_exact", "business_criticality", "platform", - "lifecycle", "origin", "external_audience", "internet_accessible", - ] - - -class ApiDojoMetaFilter(DojoFilter): - name_case_insensitive = CharFilter(field_name="name", lookup_expr="iexact") - value_case_insensitive = CharFilter(field_name="value", lookup_expr="iexact") - - class Meta: - model = DojoMeta - fields = [ - "id", - "product", - "endpoint", - "finding", - "name", - "value", - ] - - -class ApiProductFilter(DojoFilter): - # BooleanFilter - external_audience = BooleanFilter(field_name="external_audience") - internet_accessible = BooleanFilter(field_name="internet_accessible") - # CharFilter - name = CharFilter(lookup_expr="icontains") - name_exact = CharFilter(field_name="name", lookup_expr="iexact") - description = CharFilter(lookup_expr="icontains") - business_criticality = MultipleChoiceFilter(choices=Product.BUSINESS_CRITICALITY_CHOICES) - platform = MultipleChoiceFilter(choices=Product.PLATFORM_CHOICES) - lifecycle = MultipleChoiceFilter(choices=Product.LIFECYCLE_CHOICES) - origin = MultipleChoiceFilter(choices=Product.ORIGIN_CHOICES) - # NumberInFilter - id = NumberInFilter(field_name="id", lookup_expr="in") - product_manager = NumberInFilter(field_name="product_manager", lookup_expr="in") - technical_contact = NumberInFilter(field_name="technical_contact", lookup_expr="in") - team_manager = NumberInFilter(field_name="team_manager", lookup_expr="in") - prod_type = NumberInFilter(field_name="prod_type", lookup_expr="in") - tid = NumberInFilter(field_name="tid", lookup_expr="in") - prod_numeric_grade = NumberInFilter(field_name="prod_numeric_grade", lookup_expr="in") - user_records = NumberInFilter(field_name="user_records", lookup_expr="in") - regulations = NumberInFilter(field_name="regulations", lookup_expr="in") - - tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Tag name contains") - tags = CharFieldInFilter( - field_name="tags__name", - lookup_expr="in", - help_text="Comma separated list of exact tags (uses OR for multiple values)") - tags__and = CharFieldFilterANDExpression( - field_name="tags__name", - help_text="Comma separated list of exact tags to match with an AND expression") - not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Not Tag name contains", exclude="True") - not_tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in", - help_text=labels.ASSET_FILTERS_CSV_TAGS_NOT_HELP, exclude="True") - has_tags = BooleanFilter(field_name="tags", lookup_expr="isnull", exclude=True, label="Has tags") - outside_of_sla = extend_schema_field(OpenApiTypes.NUMBER)(ProductSLAFilter()) - - # DateRangeFilter - created = DateRangeFilter() - updated = DateRangeFilter() - # NumberFilter - revenue = NumberFilter() - - o = OrderingFilter( - # tuple-mapping retains order - fields=( - ("id", "id"), - ("tid", "tid"), - ("name", "name"), - ("created", "created"), - ("prod_numeric_grade", "prod_numeric_grade"), - ("business_criticality", "business_criticality"), - ("platform", "platform"), - ("lifecycle", "lifecycle"), - ("origin", "origin"), - ("revenue", "revenue"), - ("external_audience", "external_audience"), - ("internet_accessible", "internet_accessible"), - ("product_manager", "product_manager"), - ("product_manager__first_name", "product_manager__first_name"), - ("product_manager__last_name", "product_manager__last_name"), - ("technical_contact", "technical_contact"), - ("technical_contact__first_name", "technical_contact__first_name"), - ("technical_contact__last_name", "technical_contact__last_name"), - ("team_manager", "team_manager"), - ("team_manager__first_name", "team_manager__first_name"), - ("team_manager__last_name", "team_manager__last_name"), - ("prod_type", "prod_type"), - ("prod_type__name", "prod_type__name"), - ("updated", "updated"), - ("user_records", "user_records"), - ), - ) - - -class PercentageRangeFilter(RangeFilter): - def filter(self, qs, value): - if value is not None: - start = value.start / decimal.Decimal("100.0") if value.start else None - stop = value.stop / decimal.Decimal("100.0") if value.stop else None - value = slice(start, stop) - return super().filter(qs, value) - - -class ApiFindingFilter(DojoFilter): - # BooleanFilter - active = BooleanFilter(field_name="active") - duplicate = BooleanFilter(field_name="duplicate") - dynamic_finding = BooleanFilter(field_name="dynamic_finding") - false_p = BooleanFilter(field_name="false_p") - is_mitigated = BooleanFilter(field_name="is_mitigated") - out_of_scope = BooleanFilter(field_name="out_of_scope") - static_finding = BooleanFilter(field_name="static_finding") - under_defect_review = BooleanFilter(field_name="under_defect_review") - under_review = BooleanFilter(field_name="under_review") - verified = BooleanFilter(field_name="verified") - has_jira = BooleanFilter(field_name="jira_issue", lookup_expr="isnull", exclude=True) - fix_available = BooleanFilter(field_name="fix_available") - # CharFilter - component_version = CharFilter(lookup_expr="icontains") - component_name = CharFilter(lookup_expr="icontains") - vulnerability_id = CharFilter(method=custom_vulnerability_id_filter) - description = CharFilter(lookup_expr="icontains") - file_path = CharFilter(lookup_expr="icontains") - hash_code = CharFilter(lookup_expr="icontains") - impact = CharFilter(lookup_expr="icontains") - mitigation = CharFilter(lookup_expr="icontains") - numerical_severity = CharFilter(method=custom_filter, field_name="numerical_severity") - param = CharFilter(lookup_expr="icontains") - payload = CharFilter(lookup_expr="icontains") - references = CharFilter(lookup_expr="icontains") - severity = CharFilter(method=custom_filter, field_name="severity") - severity_justification = CharFilter(lookup_expr="icontains") - steps_to_reproduce = CharFilter(lookup_expr="icontains") - unique_id_from_tool = CharFilter(lookup_expr="icontains") - title = CharFilter(lookup_expr="icontains") - product_name = CharFilter(lookup_expr="engagement__product__name__iexact", field_name="test", label=labels.ASSET_FILTERS_NAME_EXACT_LABEL) - product_name_contains = CharFilter(lookup_expr="engagement__product__name__icontains", field_name="test", label=labels.ASSET_FILTERS_NAME_CONTAINS_LABEL) - product_lifecycle = CharFilter(method=custom_filter, lookup_expr="engagement__product__lifecycle", - field_name="test__engagement__product__lifecycle", label=labels.ASSET_FILTERS_CSV_LIFECYCLES_LABEL) - # DateRangeFilter - created = DateRangeFilter() - date = DateRangeFilter() - discovered_on = DateFilter(field_name="date", lookup_expr="exact") - discovered_before = DateFilter(field_name="date", lookup_expr="lt") - discovered_after = DateFilter(field_name="date", lookup_expr="gt") - jira_creation = DateRangeFilter(field_name="jira_issue__jira_creation") - jira_change = DateRangeFilter(field_name="jira_issue__jira_change") - last_reviewed = DateRangeFilter() - mitigated = DateRangeFilter() - mitigated_on = DateTimeFilter(field_name="mitigated", lookup_expr="exact", method="filter_mitigated_on") - mitigated_before = DateTimeFilter(field_name="mitigated", lookup_expr="lt") - mitigated_after = DateTimeFilter(field_name="mitigated", lookup_expr="gt", label="Mitigated After", method="filter_mitigated_after") - # NumberInFilter - cwe = NumberInFilter(field_name="cwe", lookup_expr="in") - defect_review_requested_by = NumberInFilter(field_name="defect_review_requested_by", lookup_expr="in") - endpoints = NumberInFilter(field_name="endpoints", lookup_expr="in") - epss_score = PercentageRangeFilter( - field_name="epss_score", - label="EPSS score range", - help_text=( - "The range of EPSS score percentages to filter on; the min input is a lower bound, " - "the max is an upper bound. Leaving one empty will skip that bound (e.g., leaving " - "the min bound input empty will filter only on the max bound -- filtering on " - '"less than or equal"). Leading 0 required.' - )) - epss_percentile = PercentageRangeFilter( - field_name="epss_percentile", - label="EPSS percentile range", - help_text=( - "The range of EPSS percentiles to filter on; the min input is a lower bound, the max " - "is an upper bound. Leaving one empty will skip that bound (e.g., leaving the min bound " - 'input empty will filter only on the max bound -- filtering on "less than or equal"). Leading 0 required.' - )) - found_by = NumberInFilter(field_name="found_by", lookup_expr="in") - id = NumberInFilter(field_name="id", lookup_expr="in") - last_reviewed_by = NumberInFilter(field_name="last_reviewed_by", lookup_expr="in") - mitigated_by = NumberInFilter(field_name="mitigated_by", lookup_expr="in") - nb_occurences = NumberInFilter(field_name="nb_occurences", lookup_expr="in") - reporter = NumberInFilter(field_name="reporter", lookup_expr="in") - scanner_confidence = NumberInFilter(field_name="scanner_confidence", lookup_expr="in") - review_requested_by = NumberInFilter(field_name="review_requested_by", lookup_expr="in") - reviewers = NumberInFilter(field_name="reviewers", lookup_expr="in") - sast_source_line = NumberInFilter(field_name="sast_source_line", lookup_expr="in") - sonarqube_issue = NumberInFilter(field_name="sonarqube_issue", lookup_expr="in") - test__test_type = NumberInFilter(field_name="test__test_type", lookup_expr="in", label="Test Type") - test__engagement = NumberInFilter(field_name="test__engagement", lookup_expr="in") - test__engagement__product = NumberInFilter(field_name="test__engagement__product", lookup_expr="in") - test__engagement__product__prod_type = NumberInFilter(field_name="test__engagement__product__prod_type", lookup_expr="in") - finding_group = NumberInFilter(field_name="finding_group", lookup_expr="in") - - # ReportRiskAcceptanceFilter - risk_acceptance = extend_schema_field(OpenApiTypes.NUMBER)(ReportRiskAcceptanceFilter()) - - tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Tag name contains") - tags = CharFieldInFilter( - field_name="tags__name", - lookup_expr="in", - help_text="Comma separated list of exact tags (uses OR for multiple values)") - tags__and = CharFieldFilterANDExpression( - field_name="tags__name", - help_text="Comma separated list of exact tags to match with an AND expression") - test__tags = CharFieldInFilter( - field_name="test__tags__name", - lookup_expr="in", - help_text="Comma separated list of exact tags present on test (uses OR for multiple values)") - test__tags__and = CharFieldFilterANDExpression( - field_name="test__tags__name", - help_text="Comma separated list of exact tags to match with an AND expression present on test") - test__engagement__tags = CharFieldInFilter( - field_name="test__engagement__tags__name", - lookup_expr="in", - help_text="Comma separated list of exact tags present on engagement (uses OR for multiple values)") - test__engagement__tags__and = CharFieldFilterANDExpression( - field_name="test__engagement__tags__name", - help_text="Comma separated list of exact tags to match with an AND expression present on engagement") - test__engagement__product__tags = CharFieldInFilter( - field_name="test__engagement__product__tags__name", - lookup_expr="in", - help_text=labels.ASSET_FILTERS_CSV_TAGS_OR_HELP) - test__engagement__product__tags__and = CharFieldFilterANDExpression( - field_name="test__engagement__product__tags__name", - help_text=labels.ASSET_FILTERS_CSV_TAGS_AND_HELP) - not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Not Tag name contains", exclude="True") - not_tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in", - help_text="Comma separated list of exact tags not present on model", exclude="True") - not_test__tags = CharFieldInFilter(field_name="test__tags__name", lookup_expr="in", exclude="True", help_text="Comma separated list of exact tags present on test") - not_test__engagement__tags = CharFieldInFilter(field_name="test__engagement__tags__name", lookup_expr="in", - help_text="Comma separated list of exact tags not present on engagement", - exclude="True") - not_test__engagement__product__tags = CharFieldInFilter( - field_name="test__engagement__product__tags__name", - lookup_expr="in", - help_text=labels.ASSET_FILTERS_CSV_TAGS_NOT_HELP, - exclude="True") - has_tags = BooleanFilter(field_name="tags", lookup_expr="isnull", exclude=True, label="Has tags") - outside_of_sla = extend_schema_field(OpenApiTypes.NUMBER)(FindingSLAFilter()) - - o = OrderingFilter( - # tuple-mapping retains order - fields=( - ("active", "active"), - ("component_name", "component_name"), - ("component_version", "component_version"), - ("created", "created"), - ("last_status_update", "last_status_update"), - ("last_reviewed", "last_reviewed"), - ("cwe", "cwe"), - ("date", "date"), - ("duplicate", "duplicate"), - ("dynamic_finding", "dynamic_finding"), - ("false_p", "false_p"), - ("found_by", "found_by"), - ("id", "id"), - ("is_mitigated", "is_mitigated"), - ("numerical_severity", "numerical_severity"), - ("out_of_scope", "out_of_scope"), - ("planned_remediation_date", "planned_remediation_date"), - ("severity", "severity"), - ("sla_expiration_date", "sla_expiration_date"), - ("reviewers", "reviewers"), - ("static_finding", "static_finding"), - ("test__engagement__product__name", "test__engagement__product__name"), - ("title", "title"), - ("under_defect_review", "under_defect_review"), - ("under_review", "under_review"), - ("verified", "verified"), - ), - ) - - class Meta: - model = Finding - exclude = ["url", "thread_id", "notes", "files", - "line", "cve"] - - def filter_mitigated_after(self, queryset, name, value): - if value.hour == 0 and value.minute == 0 and value.second == 0: - value = value.replace(hour=23, minute=59, second=59) - - return queryset.filter(mitigated__gt=value) - - def filter_mitigated_on(self, queryset, name, value): - if value.hour == 0 and value.minute == 0 and value.second == 0: - # we have a simple date without a time, lets get a range from this morning to tonight at 23:59:59:999 - nextday = value + timedelta(days=1) - return queryset.filter(mitigated__gte=value, mitigated__lt=nextday) - - return queryset.filter(mitigated=value) - - -class PercentageFilter(NumberFilter): - def __init__(self, *args, **kwargs): - kwargs["method"] = self.filter_percentage - super().__init__(*args, **kwargs) - - def filter_percentage(self, queryset, name, value): - value /= decimal.Decimal("100.0") - # Provide some wiggle room for filtering since the UI rounds to two places (and because floats): - # a user may enter 0.15, but we'll return everything in [0.0015, 0.0016). - # To do this, add to our value 1^(whatever the exponent for our least significant digit place is), but ensure - # that the exponent is at MOST the ten thousandths place so we don't show a range of e.g. [0.2, 0.3). - exponent = min(value.normalize().as_tuple().exponent, -4) - max_val = value + decimal.Decimal(f"1E{exponent}") - lookup_kwargs = { - f"{name}__gte": value, - f"{name}__lt": max_val} - return queryset.filter(**lookup_kwargs) - - -class FindingFilterHelper(FilterSet): - title = CharFilter(lookup_expr="icontains") - date = DateRangeFilter(field_name="date", label="Date Discovered") - on = DateFilter(field_name="date", lookup_expr="exact", label="Discovered On") - before = DateFilter(field_name="date", lookup_expr="lt", label="Discovered Before") - after = DateFilter(field_name="date", lookup_expr="gt", label="Discovered After") - last_reviewed = DateRangeFilter() - last_status_update = DateRangeFilter() - cwe = MultipleChoiceFilter(choices=[]) - vulnerability_id = CharFilter(method=vulnerability_id_filter, label="Vulnerability Id") - severity = MultipleChoiceFilter(choices=SEVERITY_CHOICES) - duplicate = ReportBooleanFilter() - is_mitigated = ReportBooleanFilter() - fix_available = ReportBooleanFilter() - mitigated = DateRangeFilter(field_name="mitigated", label="Mitigated Date") - mitigated_on = DateTimeFilter(field_name="mitigated", lookup_expr="exact", label="Mitigated On", method="filter_mitigated_on") - mitigated_before = DateTimeFilter(field_name="mitigated", lookup_expr="lt", label="Mitigated Before") - mitigated_after = DateTimeFilter(field_name="mitigated", lookup_expr="gt", label="Mitigated After", method="filter_mitigated_after") - planned_remediation_date = DateRangeOmniFilter() - planned_remediation_version = CharFilter(lookup_expr="icontains", label=_("Planned remediation version")) - file_path = CharFilter(lookup_expr="icontains") - param = CharFilter(lookup_expr="icontains") - payload = CharFilter(lookup_expr="icontains") - test__test_type = ModelMultipleChoiceFilter(queryset=Test_Type.objects.all(), label="Test Type") - endpoints__host = CharFilter(lookup_expr="icontains", label="Endpoint Host") - service = CharFilter(lookup_expr="icontains") - test__engagement__version = CharFilter(lookup_expr="icontains", label="Engagement Version") - test__version = CharFilter(lookup_expr="icontains", label="Test Version") - risk_acceptance = ReportRiskAcceptanceFilter(label="Risk Accepted") - effort_for_fixing = MultipleChoiceFilter(choices=EFFORT_FOR_FIXING_CHOICES) - test_import_finding_action__test_import = NumberFilter(widget=HiddenInput()) - endpoints = NumberFilter(widget=HiddenInput()) - status = FindingStatusFilter(label="Status") - test__engagement__product__lifecycle = MultipleChoiceFilter( - choices=Product.LIFECYCLE_CHOICES, - label=labels.ASSET_LIFECYCLE_LABEL) - - has_component = BooleanFilter( - field_name="component_name", - lookup_expr="isnull", - exclude=True, - label="Has Component") - has_notes = BooleanFilter( - field_name="notes", - lookup_expr="isnull", - exclude=True, - label="Has notes") - - if is_finding_groups_enabled(): - has_finding_group = BooleanFilter( - field_name="finding_group", - lookup_expr="isnull", - exclude=True, - label="Is Grouped") - - if get_system_setting("enable_jira"): - has_jira_issue = BooleanFilter( - field_name="jira_issue", - lookup_expr="isnull", - exclude=True, - label="Has JIRA") - jira_creation = DateRangeFilter(field_name="jira_issue__jira_creation", label="JIRA Creation") - jira_change = DateRangeFilter(field_name="jira_issue__jira_change", label="JIRA Updated") - jira_issue__jira_key = CharFilter(field_name="jira_issue__jira_key", lookup_expr="icontains", label="JIRA issue") - - if is_finding_groups_enabled(): - has_jira_group_issue = BooleanFilter( - field_name="finding_group__jira_issue", - lookup_expr="isnull", - exclude=True, - label="Has Group JIRA") - has_any_jira = FindingHasJIRAFilter(label="Has Any JIRA") - - outside_of_sla = FindingSLAFilter(label="Outside of SLA") - has_tags = BooleanFilter(field_name="tags", lookup_expr="isnull", exclude=True, label="Has tags") - epss_score = PercentageFilter(field_name="epss_score", label="EPSS score") - epss_score_range = PercentageRangeFilter( - field_name="epss_score", - label="EPSS score range", - help_text=( - "The range of EPSS score percentages to filter on; the left input is a lower bound, " - "the right is an upper bound. Leaving one empty will skip that bound (e.g., leaving " - "the lower bound input empty will filter only on the upper bound -- filtering on " - '"less than or equal").' - )) - epss_percentile = PercentageFilter(field_name="epss_percentile", label="EPSS percentile") - epss_percentile_range = PercentageRangeFilter( - field_name="epss_percentile", - label="EPSS percentile range", - help_text=( - "The range of EPSS percentiles to filter on; the left input is a lower bound, the right " - "is an upper bound. Leaving one empty will skip that bound (e.g., leaving the lower bound " - 'input empty will filter only on the upper bound -- filtering on "less than or equal").' - )) - kev_date = DateFilter(field_name="kev_date", lookup_expr="exact", label="Added to KEV On") - kev_before = DateFilter(field_name="kev_date", lookup_expr="lt", label="Added to KEV Before") - kev_after = DateFilter(field_name="kev_date", lookup_expr="gt", label="Added to KEV After") - - o = OrderingFilter( - # tuple-mapping retains order - fields=( - ("numerical_severity", "numerical_severity"), - ("date", "date"), - ("mitigated", "mitigated"), - ("fix_available", "fix_available"), - ("risk_acceptance__created__date", - "risk_acceptance__created__date"), - ("last_reviewed", "last_reviewed"), - ("planned_remediation_date", "planned_remediation_date"), - ("title", "title"), - ("test__engagement__product__name", - "test__engagement__product__name"), - ("service", "service"), - ("sla_age_days", "sla_age_days"), - ("epss_score", "epss_score"), - ("epss_percentile", "epss_percentile"), - ("known_exploited", "known_exploited"), - ("ransomware_used", "ransomware_used"), - ("kev_date", "kev_date"), - ), - field_labels={ - "numerical_severity": "Severity", - "date": "Date", - "risk_acceptance__created__date": "Acceptance Date", - "mitigated": "Mitigated Date", - "fix_available": "Fix Available", - "title": "Finding Name", - "test__engagement__product__name": labels.ASSET_FILTERS_NAME_LABEL, - "epss_score": "EPSS Score", - "epss_percentile": "EPSS Percentile", - "known_exploited": "Known Exploited", - "ransomware_used": "Ransomware Used", - "kev_date": "Date added to KEV", - "sla_age_days": "SLA age (days)", - "planned_remediation_date": "Planned Remediation", - }, - ) - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - def set_date_fields(self, *args: list, **kwargs: dict): - date_input_widget = forms.DateInput(attrs={"class": "datepicker", "placeholder": "YYYY-MM-DD"}, format="%Y-%m-%d") - self.form.fields["on"].widget = date_input_widget - self.form.fields["before"].widget = date_input_widget - self.form.fields["after"].widget = date_input_widget - self.form.fields["kev_date"].widget = date_input_widget - self.form.fields["kev_before"].widget = date_input_widget - self.form.fields["kev_after"].widget = date_input_widget - self.form.fields["mitigated_on"].widget = date_input_widget - self.form.fields["mitigated_before"].widget = date_input_widget - self.form.fields["mitigated_after"].widget = date_input_widget - self.form.fields["cwe"].choices = cwe_options(self.queryset) - - def filter_mitigated_after(self, queryset, name, value): - if value.hour == 0 and value.minute == 0 and value.second == 0: - value = value.replace(hour=23, minute=59, second=59) - - return queryset.filter(mitigated__gt=value) - - def filter_mitigated_on(self, queryset, name, value): - if value.hour == 0 and value.minute == 0 and value.second == 0: - # we have a simple date without a time, lets get a range from this morning to tonight at 23:59:59:999 - nextday = value + timedelta(days=1) - return queryset.filter(mitigated__gte=value, mitigated__lt=nextday) - - return queryset.filter(mitigated=value) - - -class FindingFilterWithoutObjectLookups(FindingFilterHelper, FindingTagStringFilter): - test__engagement__product__prod_type = NumberFilter(widget=HiddenInput()) - test__engagement__product = NumberFilter(widget=HiddenInput()) - reporter = CharFilter( - field_name="reporter__username", - lookup_expr="iexact", - label="Reporter Username", - help_text="Search for Reporter names that are an exact match") - reporter_contains = CharFilter( - field_name="reporter__username", - lookup_expr="icontains", - label="Reporter Username Contains", - help_text="Search for Reporter names that contain a given pattern") - reviewers = CharFilter( - field_name="reviewers__username", - lookup_expr="iexact", - label="Reviewer Username", - help_text="Search for Reviewer names that are an exact match") - reviewers_contains = CharFilter( - field_name="reviewers__username", - lookup_expr="icontains", - label="Reviewer Username Contains", - help_text="Search for Reviewer usernames that contain a given pattern") - test__engagement__product__prod_type__name = CharFilter( - field_name="test__engagement__product__prod_type__name", - lookup_expr="iexact", - label=labels.ORG_FILTERS_NAME_LABEL, - help_text=labels.ORG_FILTERS_NAME_HELP) - test__engagement__product__prod_type__name_contains = CharFilter( - field_name="test__engagement__product__prod_type__name", - lookup_expr="icontains", - label=labels.ORG_FILTERS_NAME_CONTAINS_LABEL, - help_text=labels.ORG_FILTERS_NAME_CONTAINS_HELP) - test__engagement__product__name = CharFilter( - field_name="test__engagement__product__name", - lookup_expr="iexact", - label=labels.ASSET_FILTERS_NAME_LABEL, - help_text=labels.ASSET_FILTERS_NAME_HELP) - test__engagement__product__name_contains = CharFilter( - field_name="test__engagement__product__name", - lookup_expr="icontains", - label=labels.ASSET_FILTERS_NAME_CONTAINS_LABEL, - help_text=labels.ASSET_FILTERS_NAME_CONTAINS_HELP) - test__engagement__name = CharFilter( - field_name="test__engagement__name", - lookup_expr="iexact", - label="Engagement Name", - help_text="Search for Engagement names that are an exact match") - test__engagement__name_contains = CharFilter( - field_name="test__engagement__name", - lookup_expr="icontains", - label="Engagement name Contains", - help_text="Search for Engagement names that contain a given pattern") - test__name = CharFilter( - field_name="test__name", - lookup_expr="iexact", - label="Test Name", - help_text="Search for Test names that are an exact match") - test__name_contains = CharFilter( - field_name="test__name", - lookup_expr="icontains", - label="Test name Contains", - help_text="Search for Test names that contain a given pattern") - - if is_finding_groups_enabled(): - finding_group__name = CharFilter( - field_name="finding_group__name", - lookup_expr="iexact", - label="Finding Group Name", - help_text="Search for Finding Group names that are an exact match") - finding_group__name_contains = CharFilter( - field_name="finding_group__name", - lookup_expr="icontains", - label="Finding Group Name Contains", - help_text="Search for Finding Group names that contain a given pattern") - - class Meta: - model = Finding - fields = get_finding_filterset_fields(filter_string_matching=True) - - exclude = ["url", "description", "mitigation", "impact", - "endpoints", "references", - "thread_id", "notes", "scanner_confidence", - "numerical_severity", "line", "duplicate_finding", - "hash_code", "reviewers", "created", "files", - "sla_start_date", "sla_expiration_date", "cvssv3", - "severity_justification", "steps_to_reproduce"] - - def __init__(self, *args, **kwargs): - self.user = None - self.pid = None - if "user" in kwargs: - self.user = kwargs.pop("user") - - if "pid" in kwargs: - self.pid = kwargs.pop("pid") - super().__init__(*args, **kwargs) - # Set some date fields - self.set_date_fields(*args, **kwargs) - # Don't show the product filter on the product finding view - if self.pid: - del self.form.fields["test__engagement__product__name"] - del self.form.fields["test__engagement__product__name_contains"] - del self.form.fields["test__engagement__product__prod_type__name"] - del self.form.fields["test__engagement__product__prod_type__name_contains"] - else: - del self.form.fields["test__name"] - del self.form.fields["test__name_contains"] - - -class FindingFilter(FindingFilterHelper, FindingTagFilter): - reporter = ModelMultipleChoiceFilter(queryset=Dojo_User.objects.none()) - reviewers = ModelMultipleChoiceFilter(queryset=Dojo_User.objects.none()) - test__engagement__product__prod_type = ModelMultipleChoiceFilter( - queryset=Product_Type.objects.none(), - label=labels.ORG_FILTERS_LABEL) - test__engagement__product = ModelMultipleChoiceFilter( - queryset=Product.objects.none(), - label=labels.ASSET_FILTERS_LABEL) - test__engagement = ModelMultipleChoiceFilter( - queryset=Engagement.objects.none(), - label="Engagement") - test = ModelMultipleChoiceFilter( - queryset=Test.objects.none(), - label="Test") - - if is_finding_groups_enabled(): - finding_group = ModelMultipleChoiceFilter( - queryset=Finding_Group.objects.none(), - label="Finding Group") - - class Meta: - model = Finding - fields = get_finding_filterset_fields() - - exclude = ["url", "description", "mitigation", "impact", - "endpoints", "references", - "thread_id", "notes", "scanner_confidence", - "numerical_severity", "line", "duplicate_finding", - "hash_code", "reviewers", "created", "files", - "sla_start_date", "sla_expiration_date", "cvssv3", - "severity_justification", "steps_to_reproduce"] - - def __init__(self, *args, **kwargs): - self.user = None - self.pid = None - if "user" in kwargs: - self.user = kwargs.pop("user") - - if "pid" in kwargs: - self.pid = kwargs.pop("pid") - super().__init__(*args, **kwargs) - # Set some date fields - self.set_date_fields(*args, **kwargs) - # Don't show the product filter on the product finding view - self.set_related_object_fields(*args, **kwargs) - - def set_related_object_fields(self, *args: list, **kwargs: dict): - finding_group_query = Finding_Group.objects.all() - if self.pid is not None: - del self.form.fields["test__engagement__product"] - del self.form.fields["test__engagement__product__prod_type"] - # TODO: add authorized check to be sure - self.form.fields["test__engagement"].queryset = Engagement.objects.filter( - product_id=self.pid, - ).all() - self.form.fields["test"].queryset = get_authorized_tests(Permissions.Test_View, product=self.pid).prefetch_related("test_type") - finding_group_query = Finding_Group.objects.filter(test__engagement__product_id=self.pid) - else: - self.form.fields[ - "test__engagement__product__prod_type"].queryset = get_authorized_product_types(Permissions.Product_Type_View) - self.form.fields["test__engagement"].queryset = get_authorized_engagements(Permissions.Engagement_View) - del self.form.fields["test"] - - if self.form.fields.get("test__engagement__product"): - self.form.fields["test__engagement__product"].queryset = get_authorized_products(Permissions.Product_View) - if self.form.fields.get("finding_group", None): - self.form.fields["finding_group"].queryset = get_authorized_finding_groups(Permissions.Finding_Group_View, queryset=finding_group_query) - self.form.fields["reporter"].queryset = get_authorized_users(Permissions.Finding_View) - self.form.fields["reviewers"].queryset = self.form.fields["reporter"].queryset - - -class FindingGroupsFilter(FilterSet): - name = CharFilter(lookup_expr="icontains", label="Name") - severity = ChoiceFilter( - choices=[ - ("Low", "Low"), - ("Medium", "Medium"), - ("High", "High"), - ("Critical", "Critical"), - ], - label="Min Severity", - ) - engagement = ModelMultipleChoiceFilter(queryset=Engagement.objects.none(), label="Engagement") - product = ModelMultipleChoiceFilter(queryset=Product.objects.none(), label=labels.ASSET_LABEL) - - class Meta: - model = Finding - fields = ["name", "severity", "engagement", "product"] - - def __init__(self, *args, **kwargs): - self.user = kwargs.pop("user", None) - self.pid = kwargs.pop("pid", None) - super().__init__(*args, **kwargs) - self.set_related_object_fields() - - def set_related_object_fields(self): - if self.pid is not None: - self.form.fields["engagement"].queryset = Engagement.objects.filter(product_id=self.pid) - if "product" in self.form.fields: - del self.form.fields["product"] - else: - self.form.fields["product"].queryset = get_authorized_products(Permissions.Product_View) - self.form.fields["engagement"].queryset = get_authorized_engagements(Permissions.Engagement_View) - - -class AcceptedFindingFilter(FindingFilter): - risk_acceptance__created__date = DateRangeFilter(label="Acceptance Date") - risk_acceptance__owner = ModelMultipleChoiceFilter( - queryset=Dojo_User.objects.none(), - label="Risk Acceptance Owner") - risk_acceptance = ModelMultipleChoiceFilter( - queryset=Risk_Acceptance.objects.none(), - label="Accepted By") - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.form.fields["risk_acceptance__owner"].queryset = get_authorized_users(Permissions.Finding_View) - self.form.fields["risk_acceptance"].queryset = get_authorized_risk_acceptances(Permissions.Risk_Acceptance) - - -class AcceptedFindingFilterWithoutObjectLookups(FindingFilterWithoutObjectLookups): - risk_acceptance__created__date = DateRangeFilter(label="Acceptance Date") - risk_acceptance__owner = CharFilter( - field_name="risk_acceptance__owner__username", - lookup_expr="iexact", - label="Risk Acceptance Owner Username", - help_text="Search for Risk Acceptance Owners username that are an exact match") - risk_acceptance__owner_contains = CharFilter( - field_name="risk_acceptance__owner__username", - lookup_expr="icontains", - label="Risk Acceptance Owner Username Contains", - help_text="Search for Risk Acceptance Owners username that contain a given pattern") - risk_acceptance__name = CharFilter( - field_name="risk_acceptance__name", - lookup_expr="iexact", - label="Risk Acceptance Name", - help_text="Search for Risk Acceptance name that are an exact match") - risk_acceptance__name_contains = CharFilter( - field_name="risk_acceptance__name", - lookup_expr="icontains", - label="Risk Acceptance Name", - help_text="Search for Risk Acceptance name contain a given pattern") - - -class SimilarFindingHelper(FilterSet): - hash_code = MultipleChoiceFilter() - vulnerability_ids = CharFilter(method=custom_vulnerability_id_filter, label="Vulnerability Ids") - - def update_data(self, data: dict, *args: list, **kwargs: dict): - # if filterset is bound, use initial values as defaults - # because of this, we can't rely on the self.form.has_changed - self.has_changed = True - if not data and self.finding: - # get a mutable copy of the QueryDict - data = data.copy() - - data["vulnerability_ids"] = ",".join(self.finding.vulnerability_ids) - data["cwe"] = self.finding.cwe - data["file_path"] = self.finding.file_path - data["line"] = self.finding.line - data["unique_id_from_tool"] = self.finding.unique_id_from_tool - data["test__test_type"] = self.finding.test.test_type - data["test__engagement__product"] = self.finding.test.engagement.product - data["test__engagement__product__prod_type"] = self.finding.test.engagement.product.prod_type - - self.has_changed = False - - def set_hash_codes(self, *args: list, **kwargs: dict): - if self.finding and self.finding.hash_code: - self.form.fields["hash_code"] = forms.MultipleChoiceField(choices=[(self.finding.hash_code, self.finding.hash_code[:24] + "...")], required=False, initial=[]) - - def filter_queryset(self, *args: list, **kwargs: dict): - queryset = super().filter_queryset(*args, **kwargs) - queryset = get_authorized_findings(Permissions.Finding_View, queryset, self.user) - return queryset.exclude(pk=self.finding.pk) - - -class SimilarFindingFilter(FindingFilter, SimilarFindingHelper): - class Meta(FindingFilter.Meta): - model = Finding - # slightly different fields from FindingFilter, but keep the same ordering for UI consistency - fields = get_finding_filterset_fields(similar=True) - - def __init__(self, data=None, *args, **kwargs): - self.user = None - if "user" in kwargs: - self.user = kwargs.pop("user") - self.finding = None - if "finding" in kwargs: - self.finding = kwargs.pop("finding") - self.update_data(data, *args, **kwargs) - super().__init__(data, *args, **kwargs) - self.set_hash_codes(*args, **kwargs) - - -class SimilarFindingFilterWithoutObjectLookups(FindingFilterWithoutObjectLookups, SimilarFindingHelper): - class Meta(FindingFilterWithoutObjectLookups.Meta): - model = Finding - # slightly different fields from FindingFilter, but keep the same ordering for UI consistency - fields = get_finding_filterset_fields(similar=True, filter_string_matching=True) - - def __init__(self, data=None, *args, **kwargs): - self.user = None - if "user" in kwargs: - self.user = kwargs.pop("user") - self.finding = None - if "finding" in kwargs: - self.finding = kwargs.pop("finding") - self.update_data(data, *args, **kwargs) - super().__init__(data, *args, **kwargs) - self.set_hash_codes(*args, **kwargs) - - -class TemplateFindingFilter(DojoFilter): - title = CharFilter(lookup_expr="icontains") - cwe = MultipleChoiceFilter(choices=[]) - severity = MultipleChoiceFilter(choices=SEVERITY_CHOICES) - - tags = ModelMultipleChoiceFilter( - field_name="tags__name", - to_field_name="name", - queryset=Finding.tags.tag_model.objects.all().order_by("name"), - # label='tags', # doesn't work with tagulous, need to set in __init__ below - ) - - tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Tag name contains") - - not_tags = ModelMultipleChoiceFilter( - field_name="tags__name", - to_field_name="name", - exclude=True, - queryset=Finding.tags.tag_model.objects.all().order_by("name"), - # label='tags', # doesn't work with tagulous, need to set in __init__ below - ) - - not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Not tag name contains", exclude=True) - - o = OrderingFilter( - # tuple-mapping retains order - fields=( - ("cwe", "cwe"), - ("title", "title"), - ("numerical_severity", "numerical_severity"), - ), - field_labels={ - "numerical_severity": "Severity", - }, - ) - - class Meta: - model = Finding_Template - exclude = ["description", "mitigation", "impact", - "references", "numerical_severity"] - - not_test__tags = ModelMultipleChoiceFilter( - field_name="test__tags__name", - to_field_name="name", - exclude=True, - label="Test without tags", - queryset=Test.tags.tag_model.objects.all().order_by("name"), - # label='tags', # doesn't work with tagulous, need to set in __init__ below - ) - - not_test__engagement__tags = ModelMultipleChoiceFilter( - field_name="test__engagement__tags__name", - to_field_name="name", - exclude=True, - label="Engagement without tags", - queryset=Engagement.tags.tag_model.objects.all().order_by("name"), - # label='tags', # doesn't work with tagulous, need to set in __init__ below - ) - - not_test__engagement__product__tags = ModelMultipleChoiceFilter( - field_name="test__engagement__product__tags__name", - to_field_name="name", - exclude=True, - label=labels.ASSET_FILTERS_WITHOUT_TAGS_LABEL, - queryset=Product.tags.tag_model.objects.all().order_by("name"), - # label='tags', # doesn't work with tagulous, need to set in __init__ below - ) - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.form.fields["cwe"].choices = cwe_options(self.queryset) - - -class ApiTemplateFindingFilter(DojoFilter): - tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Tag name contains") - tags = CharFieldInFilter( - field_name="tags__name", - lookup_expr="in", - help_text="Comma separated list of exact tags (uses OR for multiple values)") - tags__and = CharFieldFilterANDExpression( - field_name="tags__name", - help_text="Comma separated list of exact tags to match with an AND expression") - not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Not Tag name contains", exclude="True") - not_tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in", - help_text="Comma separated list of exact tags not present on model", exclude="True") - - o = OrderingFilter( - # tuple-mapping retains order - fields=( - ("title", "title"), - ("cwe", "cwe"), - ), - ) - - class Meta: - model = Finding_Template - fields = ["id", "title", "cwe", "severity", "description", - "mitigation"] - - -class MetricsFindingFilter(FindingFilter): - start_date = DateFilter(field_name="date", label="Start Date", lookup_expr=("gt")) - end_date = DateFilter(field_name="date", label="End Date", lookup_expr=("lt")) - date = MetricsDateRangeFilter() - vulnerability_id = CharFilter(method=vulnerability_id_filter, label="Vulnerability Id") - - not_tags = ModelMultipleChoiceFilter( - field_name="tags__name", - to_field_name="name", - exclude=True, - queryset=Endpoint.tags.tag_model.objects.all().order_by("name"), - # label='tags', # doesn't work with tagulous, need to set in __init__ below - ) - - not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Not tag name contains", exclude=True) - - def __init__(self, *args, **kwargs): - if args[0]: - if args[0].get("start_date", "") or args[0].get("end_date", ""): - args[0]._mutable = True - args[0]["date"] = 8 - args[0]._mutable = False - - super().__init__(*args, **kwargs) - - class Meta(FindingFilter.Meta): - model = Finding - fields = get_finding_filterset_fields(metrics=True) - - -class MetricsFindingFilterWithoutObjectLookups(FindingFilterWithoutObjectLookups): - start_date = DateFilter(field_name="date", label="Start Date", lookup_expr=("gt")) - end_date = DateFilter(field_name="date", label="End Date", lookup_expr=("lt")) - date = MetricsDateRangeFilter() - vulnerability_id = CharFilter(method=vulnerability_id_filter, label="Vulnerability Id") - - not_tags = ModelMultipleChoiceFilter( - field_name="tags__name", - to_field_name="name", - exclude=True, - queryset=Endpoint.tags.tag_model.objects.all().order_by("name"), - # label='tags', # doesn't work with tagulous, need to set in __init__ below - ) - - not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Not tag name contains", exclude=True) - - def __init__(self, *args, **kwargs): - if args[0]: - if args[0].get("start_date", "") or args[0].get("end_date", ""): - args[0]._mutable = True - args[0]["date"] = 8 - args[0]._mutable = False - - super().__init__(*args, **kwargs) - - class Meta(FindingFilterWithoutObjectLookups.Meta): - model = Finding - fields = get_finding_filterset_fields(metrics=True, filter_string_matching=True) - - -class MetricsEndpointFilterHelper(FilterSet): - start_date = DateFilter(field_name="date", label="Start Date", lookup_expr=("gt")) - end_date = DateFilter(field_name="date", label="End Date", lookup_expr=("lt")) - date = MetricsDateRangeFilter() - finding__test__engagement__version = CharFilter(lookup_expr="icontains", label="Engagement Version") - finding__severity = MultipleChoiceFilter(choices=SEVERITY_CHOICES, label="Severity") - endpoint__host = CharFilter(lookup_expr="icontains", label="Endpoint Host") - finding_title = CharFilter(lookup_expr="icontains", label="Finding Title") - tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Tag name contains") - not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Not tag name contains", exclude=True) - - -class MetricsEndpointFilter(MetricsEndpointFilterHelper): - finding__test__engagement__product__prod_type = ModelMultipleChoiceFilter( - queryset=Product_Type.objects.none(), - label=labels.ORG_FILTERS_LABEL) - finding__test__engagement = ModelMultipleChoiceFilter( - queryset=Engagement.objects.none(), - label="Engagement") - endpoint__tags = ModelMultipleChoiceFilter( - field_name="endpoint__tags__name", - to_field_name="name", - label="Endpoint tags", - queryset=Endpoint.tags.tag_model.objects.all().order_by("name")) - finding__tags = ModelMultipleChoiceFilter( - field_name="finding__tags__name", - to_field_name="name", - label="Finding tags", - queryset=Finding.tags.tag_model.objects.all().order_by("name")) - finding__test__tags = ModelMultipleChoiceFilter( - field_name="finding__test__tags__name", - to_field_name="name", - label="Test tags", - queryset=Test.tags.tag_model.objects.all().order_by("name")) - finding__test__engagement__tags = ModelMultipleChoiceFilter( - field_name="finding__test__engagement__tags__name", - to_field_name="name", - label="Engagement tags", - queryset=Engagement.tags.tag_model.objects.all().order_by("name")) - finding__test__engagement__product__tags = ModelMultipleChoiceFilter( - field_name="finding__test__engagement__product__tags__name", - to_field_name="name", - label=labels.ASSET_FILTERS_TAGS_ASSET_LABEL, - queryset=Product.tags.tag_model.objects.all().order_by("name")) - not_endpoint__tags = ModelMultipleChoiceFilter( - field_name="endpoint__tags__name", - to_field_name="name", - exclude=True, - label="Endpoint without tags", - queryset=Endpoint.tags.tag_model.objects.all().order_by("name")) - not_finding__tags = ModelMultipleChoiceFilter( - field_name="finding__tags__name", - to_field_name="name", - exclude=True, - label="Finding without tags", - queryset=Finding.tags.tag_model.objects.all().order_by("name")) - not_finding__test__tags = ModelMultipleChoiceFilter( - field_name="finding__test__tags__name", - to_field_name="name", - exclude=True, - label="Test without tags", - queryset=Test.tags.tag_model.objects.all().order_by("name")) - not_finding__test__engagement__tags = ModelMultipleChoiceFilter( - field_name="finding__test__engagement__tags__name", - to_field_name="name", - exclude=True, - label="Engagement without tags", - queryset=Engagement.tags.tag_model.objects.all().order_by("name")) - not_finding__test__engagement__product__tags = ModelMultipleChoiceFilter( - field_name="finding__test__engagement__product__tags__name", - to_field_name="name", - exclude=True, - label=labels.ASSET_FILTERS_WITHOUT_TAGS_LABEL, - queryset=Product.tags.tag_model.objects.all().order_by("name")) - - def __init__(self, *args, **kwargs): - if args[0]: - if args[0].get("start_date", "") or args[0].get("end_date", ""): - args[0]._mutable = True - args[0]["date"] = 8 - args[0]._mutable = False - - self.pid = None - if "pid" in kwargs: - self.pid = kwargs.pop("pid") - - super().__init__(*args, **kwargs) - if self.pid: - del self.form.fields["finding__test__engagement__product__prod_type"] - self.form.fields["finding__test__engagement"].queryset = Engagement.objects.filter( - product_id=self.pid, - ).all() - else: - self.form.fields["finding__test__engagement"].queryset = get_authorized_engagements(Permissions.Engagement_View).order_by("name") - - if "finding__test__engagement__product__prod_type" in self.form.fields: - self.form.fields[ - "finding__test__engagement__product__prod_type"].queryset = get_authorized_product_types(Permissions.Product_Type_View) - - class Meta: - model = Endpoint_Status - exclude = ["last_modified", "endpoint", "finding"] - - -class MetricsEndpointFilterWithoutObjectLookups(MetricsEndpointFilterHelper, FindingTagStringFilter): - finding__test__engagement__product__prod_type = CharFilter( - field_name="finding__test__engagement__product__prod_type", - lookup_expr="iexact", - label=labels.ORG_FILTERS_NAME_LABEL, - help_text=labels.ORG_FILTERS_NAME_HELP) - finding__test__engagement__product__prod_type_contains = CharFilter( - field_name="finding__test__engagement__product__prod_type", - lookup_expr="icontains", - label=labels.ORG_FILTERS_NAME_CONTAINS_LABEL, - help_text=labels.ORG_FILTERS_NAME_CONTAINS_HELP) - finding__test__engagement = CharFilter( - field_name="finding__test__engagement", - lookup_expr="iexact", - label="Engagement Name", - help_text="Search for Engagement names that are an exact match") - finding__test__engagement_contains = CharFilter( - field_name="finding__test__engagement", - lookup_expr="icontains", - label="Engagement Name Contains", - help_text="Search for Engagement names that contain a given pattern") - endpoint__tags_contains = CharFilter( - label="Endpoint Tag Contains", - field_name="endpoint__tags__name", - lookup_expr="icontains", - help_text="Search for tags on a Endpoint that contain a given pattern") - endpoint__tags = CharFilter( - label="Endpoint Tag", - field_name="endpoint__tags__name", - lookup_expr="iexact", - help_text="Search for tags on a Endpoint that are an exact match") - finding__tags_contains = CharFilter( - label="Finding Tag Contains", - field_name="finding__tags__name", - lookup_expr="icontains", - help_text="Search for tags on a Finding that contain a given pattern") - finding__tags = CharFilter( - label="Finding Tag", - field_name="finding__tags__name", - lookup_expr="iexact", - help_text="Search for tags on a Finding that are an exact match") - finding__test__tags_contains = CharFilter( - label="Test Tag Contains", - field_name="finding__test__tags__name", - lookup_expr="icontains", - help_text="Search for tags on a Finding that contain a given pattern") - finding__test__tags = CharFilter( - label="Test Tag", - field_name="finding__test__tags__name", - lookup_expr="iexact", - help_text="Search for tags on a Finding that are an exact match") - finding__test__engagement__tags_contains = CharFilter( - label="Engagement Tag Contains", - field_name="finding__test__engagement__tags__name", - lookup_expr="icontains", - help_text="Search for tags on a Finding that contain a given pattern") - finding__test__engagement__tags = CharFilter( - label="Engagement Tag", - field_name="finding__test__engagement__tags__name", - lookup_expr="iexact", - help_text="Search for tags on a Finding that are an exact match") - finding__test__engagement__product__tags_contains = CharFilter( - label=labels.ASSET_FILTERS_TAG_ASSET_CONTAINS_LABEL, - field_name="finding__test__engagement__product__tags__name", - lookup_expr="icontains", - help_text=labels.ASSET_FILTERS_TAG_ASSET_CONTAINS_HELP) - finding__test__engagement__product__tags = CharFilter( - label=labels.ASSET_FILTERS_TAG_ASSET_LABEL, - field_name="finding__test__engagement__product__tags__name", - lookup_expr="iexact", - help_text=labels.ASSET_FILTERS_TAG_ASSET_HELP) - - not_endpoint__tags_contains = CharFilter( - label="Endpoint Tag Does Not Contain", - field_name="endpoint__tags__name", - lookup_expr="icontains", - help_text="Search for tags on a Endpoint that contain a given pattern, and exclude them", - exclude=True) - not_endpoint__tags = CharFilter( - label="Not Endpoint Tag", - field_name="endpoint__tags__name", - lookup_expr="iexact", - help_text="Search for tags on a Endpoint that are an exact match, and exclude them", - exclude=True) - not_finding__tags_contains = CharFilter( - label="Finding Tag Does Not Contain", - field_name="finding__tags__name", - lookup_expr="icontains", - help_text="Search for tags on a Finding that contain a given pattern, and exclude them", - exclude=True) - not_finding__tags = CharFilter( - label="Not Finding Tag", - field_name="finding__tags__name", - lookup_expr="iexact", - help_text="Search for tags on a Finding that are an exact match, and exclude them", - exclude=True) - not_finding__test__tags_contains = CharFilter( - label="Test Tag Does Not Contain", - field_name="finding__test__tags__name", - lookup_expr="icontains", - help_text="Search for tags on a Test that contain a given pattern, and exclude them", - exclude=True) - not_finding__test__tags = CharFilter( - label="Not Test Tag", - field_name="finding__test__tags__name", - lookup_expr="iexact", - help_text="Search for tags on a Test that are an exact match, and exclude them", - exclude=True) - not_finding__test__engagement__tags_contains = CharFilter( - label="Engagement Tag Does Not Contain", - field_name="finding__test__engagement__tags__name", - lookup_expr="icontains", - help_text="Search for tags on a Engagement that contain a given pattern, and exclude them", - exclude=True) - not_finding__test__engagement__tags = CharFilter( - label="Not Engagement Tag", - field_name="finding__test__engagement__tags__name", - lookup_expr="iexact", - help_text="Search for tags on a Engagement that are an exact match, and exclude them", - exclude=True) - not_finding__test__engagement__product__tags_contains = CharFilter( - label=labels.ASSET_FILTERS_TAG_NOT_CONTAIN_LABEL, - field_name="finding__test__engagement__product__tags__name", - lookup_expr="icontains", - help_text=labels.ASSET_FILTERS_TAG_NOT_CONTAIN_HELP, - exclude=True) - not_finding__test__engagement__product__tags = CharFilter( - label=labels.ASSET_FILTERS_TAG_NOT_LABEL, - field_name="finding__test__engagement__product__tags__name", - lookup_expr="iexact", - help_text=labels.ASSET_FILTERS_TAG_NOT_HELP, - exclude=True) - - def __init__(self, *args, **kwargs): - if args[0]: - if args[0].get("start_date", "") or args[0].get("end_date", ""): - args[0]._mutable = True - args[0]["date"] = 8 - args[0]._mutable = False - self.pid = None - if "pid" in kwargs: - self.pid = kwargs.pop("pid") - super().__init__(*args, **kwargs) - if self.pid: - del self.form.fields["finding__test__engagement__product__prod_type"] - - class Meta: - model = Endpoint_Status - exclude = ["last_modified", "endpoint", "finding"] - - -class EndpointFilterHelper(FilterSet): - protocol = CharFilter(lookup_expr="icontains") - userinfo = CharFilter(lookup_expr="icontains") - host = CharFilter(lookup_expr="icontains") - port = NumberFilter() - path = CharFilter(lookup_expr="icontains") - query = CharFilter(lookup_expr="icontains") - fragment = CharFilter(lookup_expr="icontains") - tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Tag name contains") - not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Not tag name contains", exclude=True) - has_tags = BooleanFilter(field_name="tags", lookup_expr="isnull", exclude=True, label="Has tags") - o = OrderingFilter( - # tuple-mapping retains order - fields=( - ("product", "product"), - ("host", "host"), - ("id", "id"), - ), - ) - - -class EndpointFilter(EndpointFilterHelper, DojoFilter): - product = ModelMultipleChoiceFilter( - queryset=Product.objects.none(), - label=labels.ASSET_FILTERS_LABEL) - tags = ModelMultipleChoiceFilter( - field_name="tags__name", - to_field_name="name", - label="Endpoint Tags", - queryset=Endpoint.tags.tag_model.objects.all().order_by("name")) - findings__tags = ModelMultipleChoiceFilter( - field_name="findings__tags__name", - to_field_name="name", - label="Finding Tags", - queryset=Finding.tags.tag_model.objects.all().order_by("name")) - findings__test__tags = ModelMultipleChoiceFilter( - field_name="findings__test__tags__name", - to_field_name="name", - label="Test Tags", - queryset=Test.tags.tag_model.objects.all().order_by("name")) - findings__test__engagement__tags = ModelMultipleChoiceFilter( - field_name="findings__test__engagement__tags__name", - to_field_name="name", - label="Engagement Tags", - queryset=Engagement.tags.tag_model.objects.all().order_by("name")) - findings__test__engagement__product__tags = ModelMultipleChoiceFilter( - field_name="findings__test__engagement__product__tags__name", - to_field_name="name", - label=labels.ASSET_FILTERS_TAGS_ASSET_LABEL, - queryset=Product.tags.tag_model.objects.all().order_by("name")) - not_tags = ModelMultipleChoiceFilter( - field_name="tags__name", - to_field_name="name", - label="Not Endpoint Tags", - exclude=True, - queryset=Endpoint.tags.tag_model.objects.all().order_by("name")) - not_findings__tags = ModelMultipleChoiceFilter( - field_name="findings__tags__name", - to_field_name="name", - label="Not Finding Tags", - exclude=True, - queryset=Finding.tags.tag_model.objects.all().order_by("name")) - not_findings__test__tags = ModelMultipleChoiceFilter( - field_name="findings__test__tags__name", - to_field_name="name", - label="Not Test Tags", - exclude=True, - queryset=Test.tags.tag_model.objects.all().order_by("name")) - not_findings__test__engagement__tags = ModelMultipleChoiceFilter( - field_name="findings__test__engagement__tags__name", - to_field_name="name", - label="Not Engagement Tags", - exclude=True, - queryset=Engagement.tags.tag_model.objects.all().order_by("name")) - not_findings__test__engagement__product__tags = ModelMultipleChoiceFilter( - field_name="findings__test__engagement__product__tags__name", - to_field_name="name", - label=labels.ASSET_FILTERS_NOT_TAGS_ASSET_LABEL, - exclude=True, - queryset=Product.tags.tag_model.objects.all().order_by("name")) - - def __init__(self, *args, **kwargs): - self.user = None - if "user" in kwargs: - self.user = kwargs.pop("user") - super().__init__(*args, **kwargs) - self.form.fields["product"].queryset = get_authorized_products(Permissions.Product_View) - - @property - def qs(self): - parent = super().qs - return get_authorized_endpoints(Permissions.Endpoint_View, parent) - - class Meta: - model = Endpoint - exclude = ["findings", "inherited_tags"] - - -class EndpointFilterWithoutObjectLookups(EndpointFilterHelper): - product = NumberFilter(widget=HiddenInput()) - product__name = CharFilter( - field_name="product__name", - lookup_expr="iexact", - label=labels.ASSET_FILTERS_NAME_LABEL, - help_text=labels.ASSET_FILTERS_NAME_HELP) - product__name_contains = CharFilter( - field_name="product__name", - lookup_expr="icontains", - label=labels.ASSET_FILTERS_NAME_CONTAINS_LABEL, - help_text=labels.ASSET_FILTERS_NAME_CONTAINS_HELP) - - tags_contains = CharFilter( - label="Endpoint Tag Contains", - field_name="tags__name", - lookup_expr="icontains", - help_text="Search for tags on a Endpoint that contain a given pattern") - tags = CharFilter( - label="Endpoint Tag", - field_name="tags__name", - lookup_expr="iexact", - help_text="Search for tags on a Endpoint that are an exact match") - findings__tags_contains = CharFilter( - label="Finding Tag Contains", - field_name="findings__tags__name", - lookup_expr="icontains", - help_text="Search for tags on a Finding that contain a given pattern") - findings__tags = CharFilter( - label="Finding Tag", - field_name="findings__tags__name", - lookup_expr="iexact", - help_text="Search for tags on a Finding that are an exact match") - findings__test__tags_contains = CharFilter( - label="Test Tag Contains", - field_name="findings__test__tags__name", - lookup_expr="icontains", - help_text="Search for tags on a Finding that contain a given pattern") - findings__test__tags = CharFilter( - label="Test Tag", - field_name="findings__test__tags__name", - lookup_expr="iexact", - help_text="Search for tags on a Finding that are an exact match") - findings__test__engagement__tags_contains = CharFilter( - label="Engagement Tag Contains", - field_name="findings__test__engagement__tags__name", - lookup_expr="icontains", - help_text="Search for tags on a Finding that contain a given pattern") - findings__test__engagement__tags = CharFilter( - label="Engagement Tag", - field_name="findings__test__engagement__tags__name", - lookup_expr="iexact", - help_text="Search for tags on a Finding that are an exact match") - findings__test__engagement__product__tags_contains = CharFilter( - label=labels.ASSET_FILTERS_TAG_ASSET_CONTAINS_LABEL, - field_name="findings__test__engagement__product__tags__name", - lookup_expr="icontains", - help_text=labels.ASSET_FILTERS_TAG_ASSET_CONTAINS_HELP) - findings__test__engagement__product__tags = CharFilter( - label=labels.ASSET_FILTERS_TAG_ASSET_LABEL, - field_name="findings__test__engagement__product__tags__name", - lookup_expr="iexact", - help_text=labels.ASSET_FILTERS_TAG_ASSET_HELP) - - not_tags_contains = CharFilter( - label="Endpoint Tag Does Not Contain", - field_name="tags__name", - lookup_expr="icontains", - help_text="Search for tags on a Endpoint that contain a given pattern, and exclude them", - exclude=True) - not_tags = CharFilter( - label="Not Endpoint Tag", - field_name="tags__name", - lookup_expr="iexact", - help_text="Search for tags on a Endpoint that are an exact match, and exclude them", - exclude=True) - not_findings__tags_contains = CharFilter( - label="Finding Tag Does Not Contain", - field_name="findings__tags__name", - lookup_expr="icontains", - help_text="Search for tags on a Finding that contain a given pattern, and exclude them", - exclude=True) - not_findings__tags = CharFilter( - label="Not Finding Tag", - field_name="findings__tags__name", - lookup_expr="iexact", - help_text="Search for tags on a Finding that are an exact match, and exclude them", - exclude=True) - not_findings__test__tags_contains = CharFilter( - label="Test Tag Does Not Contain", - field_name="findings__test__tags__name", - lookup_expr="icontains", - help_text="Search for tags on a Test that contain a given pattern, and exclude them", - exclude=True) - not_findings__test__tags = CharFilter( - label="Not Test Tag", - field_name="findings__test__tags__name", - lookup_expr="iexact", - help_text="Search for tags on a Test that are an exact match, and exclude them", - exclude=True) - not_findings__test__engagement__tags_contains = CharFilter( - label="Engagement Tag Does Not Contain", - field_name="findings__test__engagement__tags__name", - lookup_expr="icontains", - help_text="Search for tags on a Engagement that contain a given pattern, and exclude them", - exclude=True) - not_findings__test__engagement__tags = CharFilter( - label="Not Engagement Tag", - field_name="findings__test__engagement__tags__name", - lookup_expr="iexact", - help_text="Search for tags on a Engagement that are an exact match, and exclude them", - exclude=True) - not_findings__test__engagement__product__tags_contains = CharFilter( - label=labels.ASSET_FILTERS_TAG_NOT_CONTAIN_LABEL, - field_name="findings__test__engagement__product__tags__name", - lookup_expr="icontains", - help_text=labels.ASSET_FILTERS_TAG_NOT_CONTAIN_HELP, - exclude=True) - not_findings__test__engagement__product__tags = CharFilter( - label=labels.ASSET_FILTERS_TAG_NOT_LABEL, - field_name="findings__test__engagement__product__tags__name", - lookup_expr="iexact", - help_text=labels.ASSET_FILTERS_TAG_NOT_HELP, - exclude=True) - - def __init__(self, *args, **kwargs): - self.user = None - if "user" in kwargs: - self.user = kwargs.pop("user") - super().__init__(*args, **kwargs) - - @property - def qs(self): - parent = super().qs - return get_authorized_endpoints(Permissions.Endpoint_View, parent) - - class Meta: - model = Endpoint - exclude = ["findings", "inherited_tags", "product"] - - -class ApiEndpointFilter(DojoFilter): - tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Tag name contains") - tags = CharFieldInFilter( - field_name="tags__name", - lookup_expr="in", - help_text="Comma separated list of exact tags (uses OR for multiple values)") - tags__and = CharFieldFilterANDExpression( - field_name="tags__name", - help_text="Comma separated list of exact tags to match with an AND expression") - not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Not Tag name contains", exclude="True") - not_tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in", - help_text="Comma separated list of exact tags not present on model", exclude="True") - has_tags = BooleanFilter(field_name="tags", lookup_expr="isnull", exclude=True, label="Has tags") - - o = OrderingFilter( - # tuple-mapping retains order - fields=( - ("host", "host"), - ("product", "product"), - ("id", "id"), - ), - ) - - class Meta: - model = Endpoint - fields = ["id", "protocol", "userinfo", "host", "port", "path", "query", "fragment", "product"] - - -class ApiRiskAcceptanceFilter(DojoFilter): - o = OrderingFilter( - # tuple-mapping retains order - fields=( - ("name", "name"), - ), - ) - - class Meta: - model = Risk_Acceptance - fields = [ - "name", "accepted_findings", "recommendation", "recommendation_details", - "decision", "decision_details", "accepted_by", "owner", "expiration_date", - "expiration_date_warned", "expiration_date_handled", "reactivate_expired", - "restart_sla_expired", "notes", - ] - - -class EngagementTestFilterHelper(FilterSet): - version = CharFilter(lookup_expr="icontains", label="Version") - if settings.TRACK_IMPORT_HISTORY: - test_import__version = CharFilter(field_name="test_import__version", lookup_expr="icontains", label="Reimported Version") - target_start = DateRangeFilter() - target_end = DateRangeFilter() - tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Tag name contains") - not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Not tag name contains", exclude=True) - has_tags = BooleanFilter(field_name="tags", lookup_expr="isnull", exclude=True, label="Has tags") - o = OrderingFilter( - # tuple-mapping retains order - fields=( - ("title", "title"), - ("version", "version"), - ("target_start", "target_start"), - ("target_end", "target_end"), - ("lead", "lead"), - ("api_scan_configuration", "api_scan_configuration"), - ), - field_labels={ - "name": "Test Name", - }, - ) - - -class EngagementTestFilter(EngagementTestFilterHelper, DojoFilter): - lead = ModelChoiceFilter(queryset=Dojo_User.objects.none(), label="Lead") - api_scan_configuration = ModelChoiceFilter( - queryset=Product_API_Scan_Configuration.objects.none(), - label="API Scan Configuration") - tags = ModelMultipleChoiceFilter( - field_name="tags__name", - to_field_name="name", - queryset=Test.tags.tag_model.objects.all().order_by("name")) - not_tags = ModelMultipleChoiceFilter( - field_name="tags__name", - to_field_name="name", - exclude=True, - queryset=Test.tags.tag_model.objects.all().order_by("name")) - - class Meta: - model = Test - fields = [ - "title", "test_type", "target_start", - "target_end", "percent_complete", - "version", "api_scan_configuration", - ] - - def __init__(self, *args, **kwargs): - self.engagement = kwargs.pop("engagement") - super(DojoFilter, self).__init__(*args, **kwargs) - self.form.fields["test_type"].queryset = Test_Type.objects.filter(test__engagement=self.engagement).distinct().order_by("name") - self.form.fields["api_scan_configuration"].queryset = Product_API_Scan_Configuration.objects.filter(product=self.engagement.product).distinct() - self.form.fields["lead"].queryset = get_authorized_users(Permissions.Product_Type_View) \ - .filter(test__lead__isnull=False).distinct() - - -class EngagementTestFilterWithoutObjectLookups(EngagementTestFilterHelper): - lead = CharFilter( - field_name="lead__username", - lookup_expr="iexact", - label="Lead Username", - help_text="Search for Lead username that are an exact match") - lead_contains = CharFilter( - field_name="lead__username", - lookup_expr="icontains", - label="Lead Username Contains", - help_text="Search for Lead username that contain a given pattern") - api_scan_configuration__tool_configuration__name = CharFilter( - field_name="api_scan_configuration__tool_configuration__name", - lookup_expr="iexact", - label="API Scan Configuration Name", - help_text="Search for Lead username that are an exact match") - api_scan_configuration__tool_configuration__name_contains = CharFilter( - field_name="api_scan_configuration__tool_configuration__name", - lookup_expr="icontains", - label="API Scan Configuration Name Contains", - help_text="Search for Lead username that contain a given pattern") - tags_contains = CharFilter( - label="Test Tag Contains", - field_name="tags__name", - lookup_expr="icontains", - help_text="Search for tags on a Test that contain a given pattern") - tags = CharFilter( - label="Test Tag", - field_name="tags__name", - lookup_expr="iexact", - help_text="Search for tags on a Test that are an exact match") - not_tags_contains = CharFilter( - label="Test Tag Does Not Contain", - field_name="tags__name", - lookup_expr="icontains", - help_text="Search for tags on a Test that contain a given pattern, and exclude them", - exclude=True) - not_tags = CharFilter( - label="Not Test Tag", - field_name="tags__name", - lookup_expr="iexact", - help_text="Search for tags on a Test that are an exact match, and exclude them", - exclude=True) - - class Meta: - model = Test - fields = [ - "title", "test_type", "target_start", - "target_end", "percent_complete", "version", - ] - - def __init__(self, *args, **kwargs): - self.engagement = kwargs.pop("engagement") - super().__init__(*args, **kwargs) - self.form.fields["test_type"].queryset = Test_Type.objects.filter(test__engagement=self.engagement).distinct().order_by("name") - - -class ApiTestFilter(DojoFilter): - tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Tag name contains") - tags = CharFieldInFilter( - field_name="tags__name", - lookup_expr="in", - help_text="Comma separated list of exact tags (uses OR for multiple values)") - tags__and = CharFieldFilterANDExpression( - field_name="tags__name", - help_text="Comma separated list of exact tags to match with an AND expression") - engagement__tags = CharFieldInFilter( - field_name="engagement__tags__name", - lookup_expr="in", - help_text="Comma separated list of exact tags present on engagement (uses OR for multiple values)") - engagement__tags__and = CharFieldFilterANDExpression( - field_name="engagement__tags__name", - help_text="Comma separated list of exact tags to match with an AND expression present on engagement") - engagement__product__tags = CharFieldInFilter( - field_name="engagement__product__tags__name", - lookup_expr="in", - help_text=labels.ASSET_FILTERS_CSV_TAGS_OR_HELP) - engagement__product__tags__and = CharFieldFilterANDExpression( - field_name="engagement__product__tags__name", - help_text=labels.ASSET_FILTERS_CSV_TAGS_AND_HELP) - - not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Not Tag name contains", exclude="True") - not_tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in", - help_text="Comma separated list of exact tags not present on model", exclude="True") - not_engagement__tags = CharFieldInFilter(field_name="engagement__tags__name", lookup_expr="in", - help_text="Comma separated list of exact tags not present on engagement", - exclude="True") - not_engagement__product__tags = CharFieldInFilter(field_name="engagement__product__tags__name", - lookup_expr="in", - help_text=labels.ASSET_FILTERS_CSV_TAGS_NOT_HELP, - exclude="True") - has_tags = BooleanFilter(field_name="tags", lookup_expr="isnull", exclude=True, label="Has tags") - - o = OrderingFilter( - # tuple-mapping retains order - fields=( - ("title", "title"), - ("version", "version"), - ("target_start", "target_start"), - ("target_end", "target_end"), - ("test_type", "test_type"), - ("lead", "lead"), - ("version", "version"), - ("branch_tag", "branch_tag"), - ("build_id", "build_id"), - ("commit_hash", "commit_hash"), - ("api_scan_configuration", "api_scan_configuration"), - ("engagement", "engagement"), - ("created", "created"), - ("updated", "updated"), - ), - field_labels={ - "name": "Test Name", - }, - ) - - class Meta: - model = Test - fields = ["id", "title", "test_type", "target_start", - "target_end", "notes", "percent_complete", - "engagement", "version", - "branch_tag", "build_id", "commit_hash", - "api_scan_configuration", "scan_type"] - - -class ApiAppAnalysisFilter(DojoFilter): - tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Tag name contains") - tags = CharFieldInFilter( - field_name="tags__name", - lookup_expr="in", - help_text="Comma separated list of exact tags (uses OR for multiple values)") - tags__and = CharFieldFilterANDExpression( - field_name="tags__name", - help_text="Comma separated list of exact tags to match with an AND expression") - not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Not Tag name contains", exclude="True") - not_tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in", - help_text="Comma separated list of exact tags not present on model", exclude="True") - - class Meta: - model = App_Analysis - fields = ["product", "name", "user", "version"] - - -class ApiCredentialsFilter(DojoFilter): - class Meta: - model = Cred_Mapping - fields = "__all__" - - -class EndpointReportFilter(DojoFilter): - protocol = CharFilter(lookup_expr="icontains") - userinfo = CharFilter(lookup_expr="icontains") - host = CharFilter(lookup_expr="icontains") - port = NumberFilter() - path = CharFilter(lookup_expr="icontains") - query = CharFilter(lookup_expr="icontains") - fragment = CharFilter(lookup_expr="icontains") - finding__severity = MultipleChoiceFilter(choices=SEVERITY_CHOICES, label="Severity") - finding__mitigated = ReportBooleanFilter(label="Finding Mitigated") - - tags = ModelMultipleChoiceFilter( - field_name="tags__name", - to_field_name="name", - queryset=Endpoint.tags.tag_model.objects.all().order_by("name"), - # label='tags', # doesn't work with tagulous, need to set in __init__ below - ) - - tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Tag name contains") - - not_tags = ModelMultipleChoiceFilter( - field_name="tags__name", - to_field_name="name", - exclude=True, - queryset=Endpoint.tags.tag_model.objects.all().order_by("name"), - # label='tags', # doesn't work with tagulous, need to set in __init__ below - ) - - not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Not tag name contains", exclude=True) - - class Meta: - model = Endpoint - exclude = ["product"] - - -class ReportFindingFilterHelper(FilterSet): - title = CharFilter(lookup_expr="icontains", label="Name") - date = DateFromToRangeFilter(field_name="date", label="Date Discovered") - date_recent = DateRangeFilter(field_name="date", label="Relative Date") - severity = MultipleChoiceFilter(choices=SEVERITY_CHOICES) - active = ReportBooleanFilter() - is_mitigated = ReportBooleanFilter() - mitigated = DateRangeFilter(label="Mitigated Date") - verified = ReportBooleanFilter() - false_p = ReportBooleanFilter(label="False Positive") - risk_acceptance = ReportRiskAcceptanceFilter(label="Risk Accepted") - duplicate = ReportBooleanFilter() - out_of_scope = ReportBooleanFilter() - outside_of_sla = FindingSLAFilter(label="Outside of SLA") - file_path = CharFilter(lookup_expr="icontains") - - o = OrderingFilter( - fields=( - ("title", "title"), - ("date", "date"), - ("fix_available", "fix_available"), - ("numerical_severity", "numerical_severity"), - ("epss_score", "epss_score"), - ("epss_percentile", "epss_percentile"), - ("test__engagement__product__name", "test__engagement__product__name"), - ), - ) - - class Meta: - model = Finding - # exclude sonarqube issue as by default it will show all without checking permissions - exclude = ["date", "cwe", "url", "description", "mitigation", "impact", - "references", "sonarqube_issue", "duplicate_finding", - "thread_id", "notes", "inherited_tags", "endpoints", - "numerical_severity", "reporter", "last_reviewed", - "jira_creation", "jira_change", "files"] - - def manage_kwargs(self, kwargs): - self.prod_type = None - self.product = None - self.engagement = None - self.test = None - if "prod_type" in kwargs: - self.prod_type = kwargs.pop("prod_type") - if "product" in kwargs: - self.product = kwargs.pop("product") - if "engagement" in kwargs: - self.engagement = kwargs.pop("engagement") - if "test" in kwargs: - self.test = kwargs.pop("test") - - @property - def qs(self): - parent = super().qs - return get_authorized_findings(Permissions.Finding_View, parent) - - -class ReportFindingFilter(ReportFindingFilterHelper, FindingTagFilter): - test__engagement__product = ModelMultipleChoiceFilter( - queryset=Product.objects.none(), label=labels.ASSET_FILTERS_LABEL) - test__engagement__product__prod_type = ModelMultipleChoiceFilter( - queryset=Product_Type.objects.none(), - label=labels.ORG_FILTERS_LABEL) - test__engagement__product__lifecycle = MultipleChoiceFilter(choices=Product.LIFECYCLE_CHOICES, label=labels.ASSET_LIFECYCLE_LABEL) - test__engagement = ModelMultipleChoiceFilter(queryset=Engagement.objects.none(), label="Engagement") - duplicate_finding = ModelChoiceFilter(queryset=Finding.objects.filter(original_finding__isnull=False).distinct()) - - def __init__(self, *args, **kwargs): - self.manage_kwargs(kwargs) - super().__init__(*args, **kwargs) - - # duplicate_finding queryset needs to restricted in line with permissions - # and inline with report scope to avoid a dropdown with 100K entries - duplicate_finding_query_set = self.form.fields["duplicate_finding"].queryset - duplicate_finding_query_set = get_authorized_findings(Permissions.Finding_View, duplicate_finding_query_set) - - if self.test: - duplicate_finding_query_set = duplicate_finding_query_set.filter(test=self.test) - del self.form.fields["test__tags"] - del self.form.fields["test__engagement__tags"] - del self.form.fields["test__engagement__product__tags"] - if self.engagement: - duplicate_finding_query_set = duplicate_finding_query_set.filter(test__engagement=self.engagement) - del self.form.fields["test__engagement__tags"] - del self.form.fields["test__engagement__product__tags"] - elif self.product: - duplicate_finding_query_set = duplicate_finding_query_set.filter(test__engagement__product=self.product) - del self.form.fields["test__engagement__product"] - del self.form.fields["test__engagement__product__tags"] - elif self.prod_type: - duplicate_finding_query_set = duplicate_finding_query_set.filter(test__engagement__product__prod_type=self.prod_type) - del self.form.fields["test__engagement__product__prod_type"] - - self.form.fields["duplicate_finding"].queryset = duplicate_finding_query_set - - if "test__engagement__product__prod_type" in self.form.fields: - self.form.fields[ - "test__engagement__product__prod_type"].queryset = get_authorized_product_types(Permissions.Product_Type_View) - if "test__engagement__product" in self.form.fields: - self.form.fields[ - "test__engagement__product"].queryset = get_authorized_products(Permissions.Product_View) - if "test__engagement" in self.form.fields: - self.form.fields["test__engagement"].queryset = get_authorized_engagements(Permissions.Engagement_View) - - -class ReportFindingFilterWithoutObjectLookups(ReportFindingFilterHelper, FindingTagStringFilter): - test__engagement__product__prod_type = NumberFilter(widget=HiddenInput()) - test__engagement__product = NumberFilter(widget=HiddenInput()) - test__engagement = NumberFilter(widget=HiddenInput()) - test = NumberFilter(widget=HiddenInput()) - endpoint = NumberFilter(widget=HiddenInput()) - reporter = CharFilter( - field_name="reporter__username", - lookup_expr="iexact", - label="Reporter Username", - help_text="Search for Reporter names that are an exact match") - reporter_contains = CharFilter( - field_name="reporter__username", - lookup_expr="icontains", - label="Reporter Username Contains", - help_text="Search for Reporter names that contain a given pattern") - reviewers = CharFilter( - field_name="reviewers__username", - lookup_expr="iexact", - label="Reviewer Username", - help_text="Search for Reviewer names that are an exact match") - reviewers_contains = CharFilter( - field_name="reviewers__username", - lookup_expr="icontains", - label="Reviewer Username Contains", - help_text="Search for Reviewer usernames that contain a given pattern") - last_reviewed_by = CharFilter( - field_name="last_reviewed_by__username", - lookup_expr="iexact", - label="Last Reviewed By Username", - help_text="Search for Last Reviewed By names that are an exact match") - last_reviewed_by_contains = CharFilter( - field_name="last_reviewed_by__username", - lookup_expr="icontains", - label="Last Reviewed By Username Contains", - help_text="Search for Last Reviewed By usernames that contain a given pattern") - review_requested_by = CharFilter( - field_name="review_requested_by__username", - lookup_expr="iexact", - label="Review Requested By Username", - help_text="Search for Review Requested By names that are an exact match") - review_requested_by_contains = CharFilter( - field_name="review_requested_by__username", - lookup_expr="icontains", - label="Review Requested By Username Contains", - help_text="Search for Review Requested By usernames that contain a given pattern") - mitigated_by = CharFilter( - field_name="mitigated_by__username", - lookup_expr="iexact", - label="Mitigator Username", - help_text="Search for Mitigator names that are an exact match") - mitigated_by_contains = CharFilter( - field_name="mitigated_by__username", - lookup_expr="icontains", - label="Mitigator Username Contains", - help_text="Search for Mitigator usernames that contain a given pattern") - defect_review_requested_by = CharFilter( - field_name="defect_review_requested_by__username", - lookup_expr="iexact", - label="Requester of Defect Review Username", - help_text="Search for Requester of Defect Review names that are an exact match") - defect_review_requested_by_contains = CharFilter( - field_name="defect_review_requested_by__username", - lookup_expr="icontains", - label="Requester of Defect Review Username Contains", - help_text="Search for Requester of Defect Review usernames that contain a given pattern") - test__engagement__product__prod_type__name = CharFilter( - field_name="test__engagement__product__prod_type__name", - lookup_expr="iexact", - label=labels.ORG_FILTERS_NAME_LABEL, - help_text=labels.ORG_FILTERS_NAME_HELP) - test__engagement__product__prod_type__name_contains = CharFilter( - field_name="test__engagement__product__prod_type__name", - lookup_expr="icontains", - label=labels.ORG_FILTERS_NAME_CONTAINS_LABEL, - help_text=labels.ORG_FILTERS_NAME_CONTAINS_HELP) - test__engagement__product__name = CharFilter( - field_name="test__engagement__product__name", - lookup_expr="iexact", - label=labels.ASSET_FILTERS_NAME_LABEL, - help_text=labels.ASSET_FILTERS_NAME_HELP) - test__engagement__product__name_contains = CharFilter( - field_name="test__engagement__product__name", - lookup_expr="icontains", - label=labels.ASSET_FILTERS_NAME_CONTAINS_LABEL, - help_text=labels.ASSET_FILTERS_NAME_CONTAINS_HELP) - test__engagement__name = CharFilter( - field_name="test__engagement__name", - lookup_expr="iexact", - label="Engagement Name", - help_text="Search for Engagement names that are an exact match") - test__engagement__name_contains = CharFilter( - field_name="test__engagement__name", - lookup_expr="icontains", - label="Engagement name Contains", - help_text="Search for Engagement names that contain a given pattern") - test__name = CharFilter( - field_name="test__name", - lookup_expr="iexact", - label="Test Name", - help_text="Search for Test names that are an exact match") - test__name_contains = CharFilter( - field_name="test__name", - lookup_expr="icontains", - label="Test name Contains", - help_text="Search for Test names that contain a given pattern") - - def __init__(self, *args, **kwargs): - self.manage_kwargs(kwargs) - super().__init__(*args, **kwargs) - - product_type_refs = [ - "test__engagement__product__prod_type__name", - "test__engagement__product__prod_type__name_contains", - ] - product_refs = [ - "test__engagement__product__name", - "test__engagement__product__name_contains", - "test__engagement__product__tags", - "test__engagement__product__tags_contains", - "not_test__engagement__product__tags", - "not_test__engagement__product__tags_contains", - ] - engagement_refs = [ - "test__engagement__name", - "test__engagement__name_contains", - "test__engagement__tags", - "test__engagement__tags_contains", - "not_test__engagement__tags", - "not_test__engagement__tags_contains", - ] - test_refs = [ - "test__name", - "test__name_contains", - "test__tags", - "test__tags_contains", - "not_test__tags", - "not_test__tags_contains", - ] - - if self.test: - self.delete_tags_from_form(product_type_refs) - self.delete_tags_from_form(product_refs) - self.delete_tags_from_form(engagement_refs) - self.delete_tags_from_form(test_refs) - elif self.engagement: - self.delete_tags_from_form(product_type_refs) - self.delete_tags_from_form(product_refs) - self.delete_tags_from_form(engagement_refs) - elif self.product: - self.delete_tags_from_form(product_type_refs) - self.delete_tags_from_form(product_refs) - elif self.prod_type: - self.delete_tags_from_form(product_type_refs) - - -class UserFilter(DojoFilter): - first_name = CharFilter(lookup_expr="icontains") - last_name = CharFilter(lookup_expr="icontains") - username = CharFilter(lookup_expr="icontains") - email = CharFilter(lookup_expr="icontains") - - o = OrderingFilter( - # tuple-mapping retains order - fields=( - ("username", "username"), - ("last_name", "last_name"), - ("first_name", "first_name"), - ("email", "email"), - ("is_active", "is_active"), - ("is_superuser", "is_superuser"), - ("date_joined", "date_joined"), - ("last_login", "last_login"), - ), - field_labels={ - "username": "User Name", - "is_active": "Active", - "is_superuser": "Superuser", - }, - ) - - class Meta: - model = Dojo_User - fields = ["is_superuser", "is_active", "first_name", "last_name", "username", "email"] - - -class GroupFilter(DojoFilter): - name = CharFilter(lookup_expr="icontains") - description = CharFilter(lookup_expr="icontains") - - class Meta: - model = Dojo_Group - fields = ["name", "description"] - exclude = ["users"] - - -# This class is used exclusively by Findings -class TestImportFilter(DojoFilter): - version = CharFilter(field_name="version", lookup_expr="icontains") - version_exact = CharFilter(field_name="version", lookup_expr="iexact", label="Version Exact") - branch_tag = CharFilter(lookup_expr="icontains", label="Branch/Tag") - build_id = CharFilter(lookup_expr="icontains", label="Build ID") - commit_hash = CharFilter(lookup_expr="icontains", label="Commit hash") - - findings_affected = BooleanFilter(field_name="findings_affected", lookup_expr="isnull", exclude=True, label="Findings affected") - - o = OrderingFilter( - # tuple-mapping retains order - fields=( - ("date", "date"), - ("version", "version"), - ("branch_tag", "branch_tag"), - ("build_id", "build_id"), - ("commit_hash", "commit_hash"), - - ), - ) - - class Meta: - model = Test_Import - fields = [] - - -# This class is used exclusively by Findings -class TestImportFindingActionFilter(DojoFilter): - action = MultipleChoiceFilter(choices=IMPORT_ACTIONS) - o = OrderingFilter( - # tuple-mapping retains order - fields=( - ("action", "action"), - ), - ) - - class Meta: - model = Test_Import_Finding_Action - fields = [] - - -# Used within the TestImport API -class TestImportAPIFilter(DojoFilter): - o = OrderingFilter( - # tuple-mapping retains order - fields=( - ("id", "id"), - ("created", "created"), - ("modified", "modified"), - ("version", "version"), - ("branch_tag", "branch_tag"), - ("build_id", "build_id"), - ("commit_hash", "commit_hash"), - - ), - ) - - class Meta: - model = Test_Import - fields = ["test", - "findings_affected", - "version", - "branch_tag", - "build_id", - "commit_hash", - "test_import_finding_action__action", - "test_import_finding_action__finding", - "test_import_finding_action__created"] - - -class LogEntryFilter(DojoFilter): - - action = MultipleChoiceFilter(choices=LogEntry.Action.choices) - actor = ModelMultipleChoiceFilter(queryset=Dojo_User.objects.none()) - timestamp = DateRangeFilter() - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.form.fields["actor"].queryset = get_authorized_users(Permissions.Product_View) - - class Meta: - model = LogEntry - exclude = ["content_type", "object_pk", "object_id", "object_repr", - "changes", "additional_data", "remote_addr"] - filter_overrides = { - JSONField: { - "filter_class": CharFilter, - "extra": lambda _: { - "lookup_expr": "icontains", - }, - }, - } - - -class PgHistoryFilter(DojoFilter): - - """ - Filter for django-pghistory audit entries. - - This filter works with pghistory event tables that have: - - pgh_created_at: timestamp of the event - - pgh_label: event type (insert/update/delete) - - user: user ID from context - - url: URL from context - - remote_addr: IP address from context - """ - - # Filter by event creation time (equivalent to auditlog timestamp) - pgh_created_at = DateRangeFilter(field_name="pgh_created_at", label="Timestamp") - - # Filter by event type/label - pgh_label = ChoiceFilter( - field_name="pgh_label", - label="Event Type", - choices=[ - ("", "All"), - ("insert", "Insert"), - ("update", "Update"), - ("delete", "Delete"), - ("initial_import", "Initial Import"), - ], - ) - - # Filter by user (from context) - user = ModelChoiceFilter( - field_name="user", - queryset=Dojo_User.objects.none(), - label="User", - empty_label="All Users", - ) - - # Filter by IP address (from context) - remote_addr = CharFilter( - field_name="remote_addr", - lookup_expr="icontains", - label="IP Address Contains", - ) - - # Filter by changes/diff field (JSON field containing what changed) - pgh_diff = CharFilter( - method="filter_pgh_diff_contains", - label="Changes Contains", - help_text="Search for field names or values in the changes (optimized for JSONB, but can be slow)", - ) - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.form.fields["user"].queryset = get_authorized_users(Permissions.Product_View) - - def filter_pgh_diff_contains(self, queryset, name, value): - """ - Custom filter for pgh_diff that uses efficient JSONB operations. - Searches both keys and values in the JSONB field. - """ - if not value: - return queryset - - # Search in both keys and values using JSONB operators - return queryset.filter( - Q(pgh_diff__has_key=value) | # Search in keys: {"severity": [...]} - Q(pgh_diff__has_any_keys=[value]) | # Alternative key search - Q(pgh_diff__contains=f'"{value}"'), # Search in values: ["severity", "other"] - ) - - class Meta: - fields = ["pgh_created_at", "pgh_label", "user", "url", "remote_addr", "pgh_diff"] - exclude = [] - - -class ProductTypeFilter(DojoFilter): - name = CharFilter(lookup_expr="icontains") - - o = OrderingFilter( - # tuple-mapping retains order - fields=( - ("name", "name"), - ), - ) - - class Meta: - model = Product_Type - exclude = [] - include = ("name",) - - -class TestTypeFilter(DojoFilter): - name = CharFilter(lookup_expr="icontains") - - o = OrderingFilter( - # tuple-mapping retains order - fields=( - ("name", "name"), - ), - ) - - class Meta: - model = Test_Type - exclude = [] - include = ("name",) - - -class DevelopmentEnvironmentFilter(DojoFilter): - name = CharFilter(lookup_expr="icontains") - - o = OrderingFilter( - # tuple-mapping retains order - fields=( - ("name", "name"), - ), - ) - - class Meta: - model = Development_Environment - exclude = [] - include = ("name",) - - -class NoteTypesFilter(DojoFilter): - name = CharFilter(lookup_expr="icontains") - - o = OrderingFilter( - # tuple-mapping retains order - fields=( - ("name", "name"), - ("description", "description"), - ("is_single", "is_single"), - ("is_mandatory", "is_mandatory"), - ), - ) - - class Meta: - model = Note_Type - exclude = [] - include = ("name", "is_single", "description") - -# ============================== -# Defect Dojo Engaegment Surveys -# ============================== - - -class QuestionnaireFilter(FilterSet): - name = CharFilter(lookup_expr="icontains") - description = CharFilter(lookup_expr="icontains") - active = BooleanFilter() - - class Meta: - model = Engagement_Survey - exclude = ["questions"] - - survey_set = FilterSet - - -class QuestionTypeFilter(ChoiceFilter): - def any(self, qs, name): - return qs.all() - - def text_question(self, qs, name): - return qs.filter(polymorphic_ctype=ContentType.objects.get_for_model(TextQuestion)) - - def choice_question(self, qs, name): - return qs.filter(polymorphic_ctype=ContentType.objects.get_for_model(ChoiceQuestion)) - - options = { - None: (_("Any"), any), - 1: (_("Text Question"), text_question), - 2: (_("Choice Question"), choice_question), - } - - def __init__(self, *args, **kwargs): - kwargs["choices"] = [ - (key, value[0]) for key, value in six.iteritems(self.options)] - super().__init__(*args, **kwargs) - - def filter(self, qs, value): - try: - value = int(value) - except (ValueError, TypeError): - value = None - return self.options[value][1](self, qs, self.options[value][0]) - - -class ApiUserFilter(filters.FilterSet): - last_login = filters.DateFromToRangeFilter() - date_joined = filters.DateFromToRangeFilter() - is_active = filters.BooleanFilter() - is_superuser = filters.BooleanFilter() - username = filters.CharFilter(lookup_expr="icontains") - first_name = filters.CharFilter(lookup_expr="icontains") - last_name = filters.CharFilter(lookup_expr="icontains") - email = filters.CharFilter(lookup_expr="icontains") - class Meta: - model = User - fields = [ - "id", - "username", - "first_name", - "last_name", - "email", - "is_active", - "is_superuser", - "last_login", - "date_joined", - ] - - o = OrderingFilter( - # tuple-mapping retains order - fields=( - ("username", "username"), - ("last_name", "last_name"), - ("first_name", "first_name"), - ("email", "email"), - ("is_active", "is_active"), - ("is_superuser", "is_superuser"), - ("date_joined", "date_joined"), - ("last_login", "last_login"), - ), - ) - - -with warnings.catch_warnings(action="ignore", category=ManagerInheritanceWarning): - class QuestionFilter(FilterSet): - text = CharFilter(lookup_expr="icontains") - type = QuestionTypeFilter() - - class Meta: - model = Question - exclude = ["polymorphic_ctype", "created", "modified", "order"] - - question_set = FilterSet +import collections +import decimal +import logging +import warnings +from datetime import datetime, timedelta + +import six +import tagulous +from auditlog.models import LogEntry +from django import forms +from django.apps import apps +from django.conf import settings +from django.contrib.contenttypes.models import ContentType +from django.db.models import Count, JSONField, Q +from django.forms import HiddenInput +from django.utils.timezone import now, tzinfo +from django.utils.translation import gettext_lazy as _ +from django_filters import ( + BooleanFilter, + CharFilter, + DateFilter, + DateFromToRangeFilter, + DateTimeFilter, + FilterSet, + ModelChoiceFilter, + ModelMultipleChoiceFilter, + MultipleChoiceFilter, + NumberFilter, + OrderingFilter, + RangeFilter, +) +from django_filters import rest_framework as filters +from django_filters.filters import ChoiceFilter +from drf_spectacular.types import OpenApiTypes +from drf_spectacular.utils import extend_schema_field +from polymorphic.base import ManagerInheritanceWarning + +# from tagulous.forms import TagWidget +# import tagulous +from dojo.authorization.roles_permissions import Permissions +from dojo.endpoint.queries import get_authorized_endpoints +from dojo.engagement.queries import get_authorized_engagements +from dojo.finding.helper import ( + ACCEPTED_FINDINGS_QUERY, + CLOSED_FINDINGS_QUERY, + FALSE_POSITIVE_FINDINGS_QUERY, + INACTIVE_FINDINGS_QUERY, + NOT_ACCEPTED_FINDINGS_QUERY, + OPEN_FINDINGS_QUERY, + OUT_OF_SCOPE_FINDINGS_QUERY, + UNDER_REVIEW_QUERY, + VERIFIED_FINDINGS_QUERY, + WAS_ACCEPTED_FINDINGS_QUERY, +) +from dojo.finding.queries import get_authorized_findings +from dojo.finding_group.queries import get_authorized_finding_groups +from dojo.labels import get_labels +from dojo.models import ( + EFFORT_FOR_FIXING_CHOICES, + ENGAGEMENT_STATUS_CHOICES, + IMPORT_ACTIONS, + SEVERITY_CHOICES, + App_Analysis, + ChoiceQuestion, + Cred_Mapping, + Development_Environment, + Dojo_Group, + Dojo_User, + DojoMeta, + Endpoint, + Endpoint_Status, + Engagement, + Engagement_Survey, + Finding, + Finding_Group, + Finding_Template, + Note_Type, + Product, + Product_API_Scan_Configuration, + Product_Type, + Question, + Risk_Acceptance, + Test, + Test_Import, + Test_Import_Finding_Action, + Test_Type, + TextQuestion, + User, + Vulnerability_Id, +) +from dojo.product.queries import get_authorized_products +from dojo.product_type.queries import get_authorized_product_types +from dojo.risk_acceptance.queries import get_authorized_risk_acceptances +from dojo.test.queries import get_authorized_tests +from dojo.user.queries import get_authorized_users +from dojo.utils import get_system_setting, is_finding_groups_enabled, truncate_timezone_aware + +logger = logging.getLogger(__name__) + +labels = get_labels() + +BOOLEAN_CHOICES = (("false", "No"), ("true", "Yes")) +EARLIEST_FINDING = None + + +def custom_filter(queryset, name, value): + values = value.split(",") + cust_filter = (f"{name}__in") + return queryset.filter(Q(**{cust_filter: values})) + + +def custom_vulnerability_id_filter(queryset, name, value): + values = value.split(",") + ids = Vulnerability_Id.objects \ + .filter(vulnerability_id__in=values) \ + .values_list("finding_id", flat=True) + return queryset.filter(id__in=ids) + + +def vulnerability_id_filter(queryset, name, value): + ids = Vulnerability_Id.objects \ + .filter(vulnerability_id=value) \ + .values_list("finding_id", flat=True) + return queryset.filter(id__in=ids) + + +class NumberInFilter(filters.BaseInFilter, filters.NumberFilter): + pass + + +class CharFieldInFilter(filters.BaseInFilter, filters.CharFilter): + def __init__(self, *args, **kwargs): + super(CharFilter, self).__init__(*args, **kwargs) + + +class CharFieldFilterANDExpression(CharFieldInFilter): + def filter(self, queryset, value): + # Catch the case where a value if not supplied + if not value: + return queryset + # Do the filtering + objects = set(value.split(",")) + return ( + queryset.filter(**{f"{self.field_name}__in": objects}) + .annotate(object_count=Count(self.field_name)) + .filter(object_count=len(objects)) + ) + + +class FindingStatusFilter(ChoiceFilter): + def any(self, qs, name): + return qs + + def open(self, qs, name): + return qs.filter(OPEN_FINDINGS_QUERY) + + def verified(self, qs, name): + return qs.filter(VERIFIED_FINDINGS_QUERY) + + def out_of_scope(self, qs, name): + return qs.filter(OUT_OF_SCOPE_FINDINGS_QUERY) + + def false_positive(self, qs, name): + return qs.filter(FALSE_POSITIVE_FINDINGS_QUERY) + + def inactive(self, qs, name): + return qs.filter(INACTIVE_FINDINGS_QUERY) + + def risk_accepted(self, qs, name): + return qs.filter(ACCEPTED_FINDINGS_QUERY) + + def closed(self, qs, name): + return qs.filter(CLOSED_FINDINGS_QUERY) + + def under_review(self, qs, name): + return qs.filter(UNDER_REVIEW_QUERY) + + options = { + None: (_("Any"), any), + 0: (_("Open"), open), + 1: (_("Verified"), verified), + 2: (_("Out Of Scope"), out_of_scope), + 3: (_("False Positive"), false_positive), + 4: (_("Inactive"), inactive), + 5: (_("Risk Accepted"), risk_accepted), + 6: (_("Closed"), closed), + 7: (_("Under Review"), under_review), + } + + def __init__(self, *args, **kwargs): + kwargs["choices"] = [ + (key, value[0]) for key, value in six.iteritems(self.options)] + super().__init__(*args, **kwargs) + + def filter(self, qs, value): + earliest_finding = get_earliest_finding(qs) + if earliest_finding is not None: + start_date = datetime.combine( + earliest_finding.date, datetime.min.time()).replace(tzinfo=tzinfo()) + self.start_date = truncate_timezone_aware(start_date - timedelta(days=1)) + self.end_date = truncate_timezone_aware(now() + timedelta(days=1)) + try: + value = int(value) + except (ValueError, TypeError): + value = None + return self.options[value][1](self, qs, self.field_name) + + +class FindingSLAFilter(ChoiceFilter): + def any(self, qs, name): + return qs + + def sla_satisfied(self, qs, name): + # return findings that have an sla expiration date after today or no sla expiration date + return qs.filter(Q(sla_expiration_date__isnull=True) | Q(sla_expiration_date__gt=now().date())) + + def sla_violated(self, qs, name): + # return active findings that have an sla expiration date before today + return qs.filter( + Q( + active=True, + false_p=False, + duplicate=False, + out_of_scope=False, + risk_accepted=False, + is_mitigated=False, + mitigated=None, + ) & Q(sla_expiration_date__lt=now().date()), + ) + + options = { + None: (_("Any"), any), + 0: (_("False"), sla_satisfied), + 1: (_("True"), sla_violated), + } + + def __init__(self, *args, **kwargs): + kwargs["choices"] = [ + (key, value[0]) for key, value in six.iteritems(self.options)] + super().__init__(*args, **kwargs) + + def filter(self, qs, value): + try: + value = int(value) + except (ValueError, TypeError): + value = None + return self.options[value][1](self, qs, self.field_name) + + +class FindingHasJIRAFilter(ChoiceFilter): + def no_jira(self, qs, name): + return qs.filter(Q(jira_issue=None) & Q(finding_group__jira_issue=None)) + + def any_jira(self, qs, name): + return qs.filter(~Q(jira_issue=None) | ~Q(finding_group__jira_issue=None)) + + def all_items(self, qs, name): + return qs + + options = { + 0: (_("Yes"), any_jira), + 1: (_("No"), no_jira), + } + + def __init__(self, *args, **kwargs): + kwargs["choices"] = [ + (key, value[0]) for key, value in six.iteritems(self.options)] + super().__init__(*args, **kwargs) + + def filter(self, qs, value): + try: + value = int(value) + except (ValueError, TypeError): + return self.all_items(qs, self.field_name) + + return self.options[value][1](self, qs, self.field_name) + + +class ProductSLAFilter(ChoiceFilter): + def any(self, qs, name): + return qs + + def sla_satisifed(self, qs, name): + for product in qs: + if product.violates_sla(): + qs = qs.exclude(id=product.id) + return qs + + def sla_violated(self, qs, name): + for product in qs: + if not product.violates_sla(): + qs = qs.exclude(id=product.id) + return qs + + options = { + None: (_("Any"), any), + 0: (_("False"), sla_satisifed), + 1: (_("True"), sla_violated), + } + + def __init__(self, *args, **kwargs): + kwargs["choices"] = [ + (key, value[0]) for key, value in six.iteritems(self.options)] + super().__init__(*args, **kwargs) + + def filter(self, qs, value): + try: + value = int(value) + except (ValueError, TypeError): + value = None + return self.options[value][1](self, qs, self.field_name) + + +def get_earliest_finding(queryset=None): + if queryset is None: # don't to 'if not queryset' which will trigger the query + queryset = Finding.objects.all() + + try: + EARLIEST_FINDING = queryset.earliest("date") + except (Finding.DoesNotExist, Endpoint_Status.DoesNotExist): + EARLIEST_FINDING = None + return EARLIEST_FINDING + + +def cwe_options(queryset): + cwe = {} + cwe = dict([cwe, cwe] + for cwe in queryset.order_by().values_list("cwe", flat=True).distinct() + if isinstance(cwe, int) and cwe is not None and cwe > 0) + cwe = collections.OrderedDict(sorted(cwe.items())) + return list(cwe.items()) + + +class DojoFilter(FilterSet): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + for field in ["tags", "test__tags", "test__engagement__tags", "test__engagement__product__tags", + "not_tags", "not_test__tags", "not_test__engagement__tags", "not_test__engagement__product__tags"]: + if field in self.form.fields: + tags_filter = self.filters["tags"] + model = tags_filter.model + + self.form.fields[field] = model._meta.get_field("tags").formfield() + # we defer applying the select2 autocomplete because there can be multiple forms on the same page + # and form.js would then apply select2 multiple times, resulting in duplicated fields + # the initialization now happens in filter_js_snippet.html + self.form.fields[field].widget.tag_options += tagulous.models.options.TagOptions(autocomplete_settings={"width": "200px", "defer": True}) + tagged_model, exclude = get_tags_model_from_field_name(field) + if tagged_model: # only if not the normal tags field + self.form.fields[field].label = get_tags_label_from_model(tagged_model) + self.form.fields[field].autocomplete_tags = tagged_model.tags.tag_model.objects.all().order_by("name") + + if exclude: + self.form.fields[field].label = "Not " + self.form.fields[field].label + + def filter_queryset(self, queryset): + qs = super().filter_queryset(queryset) + if hasattr(self, "form") and hasattr(self.form, "cleaned_data"): + for name, f in self.filters.items(): + field_name = getattr(f, "field_name", "") or "" + # Only apply distinct for tag lookups that can duplicate base rows + if "tags__name" in field_name: + value = self.form.cleaned_data.get(name, None) + if value not in (None, "", [], (), {}): + lookup_expr = getattr(f, "lookup_expr", None) + is_exclude = getattr(f, "exclude", False) + needs_distinct = ( + is_exclude + or lookup_expr in { + "in", + "contains", + "icontains", + "startswith", + "istartswith", + "endswith", + "iendswith", + } + ) + # exact/iexact typically won't duplicate rows + if needs_distinct: + return qs.distinct() + return qs + + +def get_tags_model_from_field_name(field): + exclude = False + if field.startswith("not_"): + field = field.replace("not_", "") + exclude = True + try: + parts = field.split("__") + model_name = parts[-2] + return apps.get_model(f"dojo.{model_name}", require_ready=True), exclude + except Exception: + return None, exclude + + +def get_tags_label_from_model(model): + if model: + if model is Product_Type: + return labels.ORG_FILTERS_TAGS_LABEL + if model is Product: + return labels.ASSET_FILTERS_TAGS_LABEL + return f"Tags ({model.__name__.title()})" + return "Tags (Unknown)" + + +def get_finding_filterset_fields(*, metrics=False, similar=False, filter_string_matching=False): + fields = [] + + if similar: + fields.extend([ + "id", + "hash_code", + ]) + + fields.extend(["title", "component_name", "component_version"]) + + if metrics: + fields.extend([ + "start_date", + "end_date", + ]) + + fields.extend([ + "date", + "cwe", + "severity", + "last_reviewed", + "last_status_update", + "mitigated", + "reporter", + "reviewers", + ]) + + if filter_string_matching: + fields.extend([ + "reporter", + "reviewers", + "test__engagement__product__prod_type__name", + "test__engagement__product__name", + "test__engagement__name", + "test__title", + ]) + else: + fields.extend([ + "reporter", + "reviewers", + "test__engagement__product__prod_type", + "test__engagement__product", + "test__engagement", + "test", + ]) + + fields.extend([ + "test__test_type", + "test__engagement__version", + "test__version", + "endpoints", + "status", + "active", + "verified", + "duplicate", + "is_mitigated", + "out_of_scope", + "false_p", + "has_component", + "has_notes", + "file_path", + "unique_id_from_tool", + "vuln_id_from_tool", + "service", + "epss_score", + "epss_score_range", + "epss_percentile", + "epss_percentile_range", + "known_exploited", + "ransomware_used", + "kev_date", + "kev_before", + "kev_after", + "fix_available", + ]) + + if similar: + fields.extend([ + "id", + ]) + + fields.extend([ + "param", + "payload", + "risk_acceptance", + ]) + + if get_system_setting("enable_jira"): + fields.extend([ + "has_jira_issue", + "jira_creation", + "jira_change", + "jira_issue__jira_key", + ]) + + if is_finding_groups_enabled(): + if filter_string_matching: + fields.extend([ + "has_finding_group", + "finding_group__name", + ]) + else: + fields.extend([ + "has_finding_group", + "finding_group", + ]) + + if get_system_setting("enable_jira"): + fields.extend([ + "has_jira_group_issue", + ]) + + return fields + + +class FindingTagFilter(DojoFilter): + tag = CharFilter( + field_name="tags__name", + lookup_expr="icontains", + label="Tag name contains", + help_text="Search for tags on a Finding that contain a given pattern") + tags = ModelMultipleChoiceFilter( + field_name="tags__name", + to_field_name="name", + queryset=Finding.tags.tag_model.objects.all().order_by("name"), + help_text="Filter Findings by the selected tags (OR logic)", + ) + + tags_and = ModelMultipleChoiceFilter( + field_name="tags__name", + to_field_name="name", + queryset=Finding.tags.tag_model.objects.all().order_by("name"), + help_text="Filter Findings by the selected tags (AND logic)", + label="Tags (AND)", + conjoined=True, + ) + + test__tags = ModelMultipleChoiceFilter( + field_name="test__tags__name", + to_field_name="name", + queryset=Test.tags.tag_model.objects.all().order_by("name"), + help_text="Filter Findings by the selected Test tags (OR logic)", + label="Test Tags", + ) + + test__tags_and = ModelMultipleChoiceFilter( + field_name="test__tags__name", + to_field_name="name", + queryset=Test.tags.tag_model.objects.all().order_by("name"), + help_text="Filter Findings by the selected Test tags (AND logic)", + label="Test Tags (AND)", + conjoined=True, + ) + + test__engagement__tags = ModelMultipleChoiceFilter( + field_name="test__engagement__tags__name", + to_field_name="name", + queryset=Engagement.tags.tag_model.objects.all().order_by("name"), + help_text="Filter Findings by the selected Engagement tags (OR logic)", + label="Engagement Tags", + ) + + test__engagement__tags_and = ModelMultipleChoiceFilter( + field_name="test__engagement__tags__name", + to_field_name="name", + queryset=Engagement.tags.tag_model.objects.all().order_by("name"), + help_text="Filter Findings by the selected Engagement tags (AND logic)", + label="Engagement Tags (AND)", + conjoined=True, + ) + + test__engagement__product__tags = ModelMultipleChoiceFilter( + field_name="test__engagement__product__tags__name", + to_field_name="name", + queryset=Product.tags.tag_model.objects.all().order_by("name"), + help_text="Filter Findings by the selected Product tags (OR logic)", + label="Product Tags", + ) + + test__engagement__product__tags_and = ModelMultipleChoiceFilter( + field_name="test__engagement__product__tags__name", + to_field_name="name", + queryset=Product.tags.tag_model.objects.all().order_by("name"), + help_text="Filter Findings by the selected Product tags (AND logic)", + label="Product Tags (AND)", + conjoined=True, + ) + + not_tags = ModelMultipleChoiceFilter( + field_name="tags__name", + to_field_name="name", + queryset=Finding.tags.tag_model.objects.all().order_by("name"), + help_text="Search for tags on a Finding that contain a given pattern, and exclude them", + exclude=True) + not_test__tags = ModelMultipleChoiceFilter( + field_name="test__tags__name", + to_field_name="name", + label="Test without tags", + queryset=Test.tags.tag_model.objects.all().order_by("name"), + help_text="Search for tags on a Test that contain a given pattern, and exclude them", + exclude=True) + not_test__engagement__tags = ModelMultipleChoiceFilter( + field_name="test__engagement__tags__name", + to_field_name="name", + label="Engagement without tags", + queryset=Engagement.tags.tag_model.objects.all().order_by("name"), + help_text="Search for tags on a Engagement that contain a given pattern, and exclude them", + exclude=True) + not_test__engagement__product__tags = ModelMultipleChoiceFilter( + field_name="test__engagement__product__tags__name", + to_field_name="name", + label=labels.ASSET_FILTERS_ASSETS_WITHOUT_TAGS_LABEL, + queryset=Product.tags.tag_model.objects.all().order_by("name"), + help_text=labels.ASSET_FILTERS_ASSETS_WITHOUT_TAGS_HELP, + exclude=True) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + +class FindingTagStringFilter(FilterSet): + tags_contains = CharFilter( + label="Finding Tag Contains", + field_name="tags__name", + lookup_expr="icontains", + help_text="Search for tags on a Finding that contain a given pattern") + tags = CharFilter( + label="Finding Tag", + field_name="tags__name", + lookup_expr="iexact", + help_text="Search for tags on a Finding that are an exact match") + test__tags_contains = CharFilter( + label="Test Tag Contains", + field_name="test__tags__name", + lookup_expr="icontains", + help_text="Search for tags on a Finding that contain a given pattern") + test__tags = CharFilter( + label="Test Tag", + field_name="test__tags__name", + lookup_expr="iexact", + help_text="Search for tags on a Finding that are an exact match") + test__engagement__tags_contains = CharFilter( + label="Engagement Tag Contains", + field_name="test__engagement__tags__name", + lookup_expr="icontains", + help_text="Search for tags on a Finding that contain a given pattern") + test__engagement__tags = CharFilter( + label="Engagement Tag", + field_name="test__engagement__tags__name", + lookup_expr="iexact", + help_text="Search for tags on a Finding that are an exact match") + test__engagement__product__tags_contains = CharFilter( + label=labels.ASSET_FILTERS_TAG_ASSET_CONTAINS_LABEL, + field_name="test__engagement__product__tags__name", + lookup_expr="icontains", + help_text=labels.ASSET_FILTERS_TAG_ASSET_CONTAINS_HELP) + test__engagement__product__tags = CharFilter( + label=labels.ASSET_FILTERS_TAG_ASSET_LABEL, + field_name="test__engagement__product__tags__name", + lookup_expr="iexact", + help_text=labels.ASSET_FILTERS_TAG_ASSET_HELP) + + not_tags_contains = CharFilter( + label="Finding Tag Does Not Contain", + field_name="tags__name", + lookup_expr="icontains", + help_text="Search for tags on a Finding that contain a given pattern, and exclude them", + exclude=True) + not_tags = CharFilter( + label="Not Finding Tag", + field_name="tags__name", + lookup_expr="iexact", + help_text="Search for tags on a Finding that are an exact match, and exclude them", + exclude=True) + not_test__tags_contains = CharFilter( + label="Test Tag Does Not Contain", + field_name="test__tags__name", + lookup_expr="icontains", + help_text="Search for tags on a Test that contain a given pattern, and exclude them", + exclude=True) + not_test__tags = CharFilter( + label="Not Test Tag", + field_name="test__tags__name", + lookup_expr="iexact", + help_text="Search for tags on a Test that are an exact match, and exclude them", + exclude=True) + not_test__engagement__tags_contains = CharFilter( + label="Engagement Tag Does Not Contain", + field_name="test__engagement__tags__name", + lookup_expr="icontains", + help_text="Search for tags on a Engagement that contain a given pattern, and exclude them", + exclude=True) + not_test__engagement__tags = CharFilter( + label="Not Engagement Tag", + field_name="test__engagement__tags__name", + lookup_expr="iexact", + help_text="Search for tags on a Engagement that are an exact match, and exclude them", + exclude=True) + not_test__engagement__product__tags_contains = CharFilter( + label=labels.ASSET_FILTERS_TAG_NOT_CONTAIN_LABEL, + field_name="test__engagement__product__tags__name", + lookup_expr="icontains", + help_text=labels.ASSET_FILTERS_TAG_NOT_CONTAIN_HELP, + exclude=True) + not_test__engagement__product__tags = CharFilter( + label=labels.ASSET_FILTERS_TAG_NOT_LABEL, + field_name="test__engagement__product__tags__name", + lookup_expr="iexact", + help_text=labels.ASSET_FILTERS_TAG_NOT_HELP, + exclude=True) + + def delete_tags_from_form(self, tag_list: list): + for tag in tag_list: + self.form.fields.pop(tag, None) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + +class DateRangeFilter(ChoiceFilter): + options = { + None: (_("Any date"), lambda qs, _: qs.all()), + 1: (_("Today"), lambda qs, name: qs.filter(**{ + f"{name}__year": now().year, + f"{name}__month": now().month, + f"{name}__day": now().day, + })), + 2: (_("Past 7 days"), lambda qs, name: qs.filter(**{ + f"{name}__gte": truncate_timezone_aware(now() - timedelta(days=7)), + f"{name}__lt": truncate_timezone_aware(now() + timedelta(days=1)), + })), + 3: (_("Past 30 days"), lambda qs, name: qs.filter(**{ + f"{name}__gte": truncate_timezone_aware(now() - timedelta(days=30)), + f"{name}__lt": truncate_timezone_aware(now() + timedelta(days=1)), + })), + 4: (_("Past 90 days"), lambda qs, name: qs.filter(**{ + f"{name}__gte": truncate_timezone_aware(now() - timedelta(days=90)), + f"{name}__lt": truncate_timezone_aware(now() + timedelta(days=1)), + })), + 5: (_("Current month"), lambda qs, name: qs.filter(**{ + f"{name}__year": now().year, + f"{name}__month": now().month, + })), + 6: (_("Current year"), lambda qs, name: qs.filter(**{ + f"{name}__year": now().year, + })), + 7: (_("Past year"), lambda qs, name: qs.filter(**{ + f"{name}__gte": truncate_timezone_aware(now() - timedelta(days=365)), + f"{name}__lt": truncate_timezone_aware(now() + timedelta(days=1)), + })), + } + + def __init__(self, *args, **kwargs): + kwargs["choices"] = [ + (key, value[0]) for key, value in six.iteritems(self.options)] + super().__init__(*args, **kwargs) + + def filter(self, qs, value): + try: + value = int(value) + except (ValueError, TypeError): + value = None + return self.options[value][1](qs, self.field_name) + + +class DateRangeOmniFilter(ChoiceFilter): + options = { + None: (_("Any date"), lambda qs, _: qs.all()), + 1: (_("Today"), lambda qs, name: qs.filter(**{ + f"{name}__year": now().year, + f"{name}__month": now().month, + f"{name}__day": now().day, + })), + 2: (_("Next 7 days"), lambda qs, name: qs.filter(**{ + f"{name}__gte": truncate_timezone_aware(now() + timedelta(days=1)), + f"{name}__lt": truncate_timezone_aware(now() + timedelta(days=7)), + })), + 3: (_("Next 30 days"), lambda qs, name: qs.filter(**{ + f"{name}__gte": truncate_timezone_aware(now() + timedelta(days=1)), + f"{name}__lt": truncate_timezone_aware(now() + timedelta(days=30)), + })), + 4: (_("Next 90 days"), lambda qs, name: qs.filter(**{ + f"{name}__gte": truncate_timezone_aware(now() + timedelta(days=1)), + f"{name}__lt": truncate_timezone_aware(now() + timedelta(days=90)), + })), + 5: (_("Past 7 days"), lambda qs, name: qs.filter(**{ + f"{name}__gte": truncate_timezone_aware(now() - timedelta(days=7)), + f"{name}__lt": truncate_timezone_aware(now() + timedelta(days=1)), + })), + 6: (_("Past 30 days"), lambda qs, name: qs.filter(**{ + f"{name}__gte": truncate_timezone_aware(now() - timedelta(days=30)), + f"{name}__lt": truncate_timezone_aware(now() + timedelta(days=1)), + })), + 7: (_("Past 90 days"), lambda qs, name: qs.filter(**{ + f"{name}__gte": truncate_timezone_aware(now() - timedelta(days=90)), + f"{name}__lt": truncate_timezone_aware(now() + timedelta(days=1)), + })), + 8: (_("Current month"), lambda qs, name: qs.filter(**{ + f"{name}__year": now().year, + f"{name}__month": now().month, + })), + 9: (_("Past year"), lambda qs, name: qs.filter(**{ + f"{name}__gte": truncate_timezone_aware(now() - timedelta(days=365)), + f"{name}__lt": truncate_timezone_aware(now() + timedelta(days=1)), + })), + 10: (_("Current year"), lambda qs, name: qs.filter(**{ + f"{name}__year": now().year, + })), + 11: (_("Next year"), lambda qs, name: qs.filter(**{ + f"{name}__gte": truncate_timezone_aware(now() + timedelta(days=1)), + f"{name}__lt": truncate_timezone_aware(now() + timedelta(days=365)), + })), + } + + def __init__(self, *args, **kwargs): + kwargs["choices"] = [ + (key, value[0]) for key, value in six.iteritems(self.options)] + super().__init__(*args, **kwargs) + + def filter(self, qs, value): + try: + value = int(value) + except (ValueError, TypeError): + value = None + return self.options[value][1](qs, self.field_name) + + +class ReportBooleanFilter(ChoiceFilter): + options = { + None: (_("Either"), lambda qs, _: qs.all()), + 1: (_("Yes"), lambda qs, name: qs.filter(**{ + f"{name}": True, + })), + 2: (_("No"), lambda qs, name: qs.filter(**{ + f"{name}": False, + })), + } + + def __init__(self, *args, **kwargs): + kwargs["choices"] = [ + (key, value[0]) for key, value in six.iteritems(self.options)] + super().__init__(*args, **kwargs) + + def filter(self, qs, value): + try: + value = int(value) + except (ValueError, TypeError): + value = None + return self.options[value][1](qs, self.field_name) + + +class ReportRiskAcceptanceFilter(ChoiceFilter): + + def any(self, qs, name): + return qs.all() + + def accepted(self, qs, name): + # return qs.filter(risk_acceptance__isnull=False) + return qs.filter(ACCEPTED_FINDINGS_QUERY) + + def not_accepted(self, qs, name): + return qs.filter(NOT_ACCEPTED_FINDINGS_QUERY) + + def was_accepted(self, qs, name): + return qs.filter(WAS_ACCEPTED_FINDINGS_QUERY) + + options = { + None: (_("Either"), any), + 1: (_("Yes"), accepted), + 2: (_("No"), not_accepted), + 3: (_("Expired"), was_accepted), + } + + def __init__(self, *args, **kwargs): + kwargs["choices"] = [ + (key, value[0]) for key, value in six.iteritems(self.options)] + super().__init__(*args, **kwargs) + + def filter(self, qs, value): + try: + value = int(value) + except (ValueError, TypeError): + value = None + return self.options[value][1](self, qs, self.field_name) + + +class MetricsDateRangeFilter(ChoiceFilter): + def any(self, qs, name): + earliest_finding = get_earliest_finding(qs) + if earliest_finding is not None: + start_date = datetime.combine( + earliest_finding.date, datetime.min.time()).replace(tzinfo=tzinfo()) + self.start_date = truncate_timezone_aware(start_date - timedelta(days=1)) + self.end_date = truncate_timezone_aware(now() + timedelta(days=1)) + return qs.all() + return None + + def current_month(self, qs, name): + self.start_date = datetime(now().year, now().month, 1, 0, 0, 0).replace(tzinfo=tzinfo()) + self.end_date = now() + return qs.filter(**{ + f"{name}__year": self.start_date.year, + f"{name}__month": self.start_date.month, + }) + + def current_year(self, qs, name): + self.start_date = datetime(now().year, 1, 1, 0, 0, 0).replace(tzinfo=tzinfo()) + self.end_date = now() + return qs.filter(**{ + f"{name}__year": now().year, + }) + + def past_x_days(self, qs, name, days): + self.start_date = truncate_timezone_aware(now() - timedelta(days=days)) + self.end_date = truncate_timezone_aware(now() + timedelta(days=1)) + return qs.filter(**{ + f"{name}__gte": self.start_date, + f"{name}__lt": self.end_date, + }) + + def past_seven_days(self, qs, name): + return self.past_x_days(qs, name, 7) + + def past_thirty_days(self, qs, name): + return self.past_x_days(qs, name, 30) + + def past_ninety_days(self, qs, name): + return self.past_x_days(qs, name, 90) + + def past_six_months(self, qs, name): + return self.past_x_days(qs, name, 183) + + def past_year(self, qs, name): + return self.past_x_days(qs, name, 365) + + options = { + None: (_("Past 30 days"), past_thirty_days), + 1: (_("Past 7 days"), past_seven_days), + 2: (_("Past 90 days"), past_ninety_days), + 3: (_("Current month"), current_month), + 4: (_("Current year"), current_year), + 5: (_("Past 6 Months"), past_six_months), + 6: (_("Past year"), past_year), + 7: (_("Any date"), any), + } + + def __init__(self, *args, **kwargs): + kwargs["choices"] = [ + (key, value[0]) for key, value in six.iteritems(self.options)] + super().__init__(*args, **kwargs) + + def filter(self, qs, value): + if value == 8: + return qs + earliest_finding = get_earliest_finding(qs) + if earliest_finding is not None: + start_date = datetime.combine( + earliest_finding.date, datetime.min.time()).replace(tzinfo=tzinfo()) + self.start_date = truncate_timezone_aware(start_date - timedelta(days=1)) + self.end_date = truncate_timezone_aware(now() + timedelta(days=1)) + try: + value = int(value) + except (ValueError, TypeError): + value = None + return self.options[value][1](self, qs, self.field_name) + + +class ProductComponentFilter(DojoFilter): + component_name = CharFilter(lookup_expr="icontains", label="Module Name") + component_version = CharFilter(lookup_expr="icontains", label="Module Version") + + o = OrderingFilter( + fields=( + ("component_name", "component_name"), + ("component_version", "component_version"), + ("active", "active"), + ("duplicate", "duplicate"), + ("total", "total"), + ), + field_labels={ + "component_name": "Component Name", + "component_version": "Component Version", + "active": "Active", + "duplicate": "Duplicate", + "total": "Total", + }, + ) + + +class ComponentFilterWithoutObjectLookups(ProductComponentFilter): + test__engagement__product__prod_type__name = CharFilter( + field_name="test__engagement__product__prod_type__name", + lookup_expr="iexact", + label=labels.ORG_FILTERS_NAME_LABEL, + help_text=labels.ORG_FILTERS_NAME_HELP) + test__engagement__product__prod_type__name_contains = CharFilter( + field_name="test__engagement__product__prod_type__name", + lookup_expr="icontains", + label=labels.ORG_FILTERS_NAME_CONTAINS_LABEL, + help_text=labels.ORG_FILTERS_NAME_CONTAINS_HELP) + test__engagement__product__name = CharFilter( + field_name="test__engagement__product__name", + lookup_expr="iexact", + label=labels.ASSET_FILTERS_NAME_LABEL, + help_text=labels.ASSET_FILTERS_NAME_HELP) + test__engagement__product__name_contains = CharFilter( + field_name="test__engagement__product__name", + lookup_expr="icontains", + label=labels.ASSET_FILTERS_NAME_CONTAINS_LABEL, + help_text=labels.ASSET_FILTERS_NAME_CONTAINS_HELP) + + +class ComponentFilter(ProductComponentFilter): + test__engagement__product__prod_type = ModelMultipleChoiceFilter( + queryset=Product_Type.objects.none(), + label=labels.ORG_FILTERS_LABEL) + test__engagement__product = ModelMultipleChoiceFilter( + queryset=Product.objects.none(), + label=labels.ASSET_FILTERS_LABEL) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.form.fields[ + "test__engagement__product__prod_type"].queryset = get_authorized_product_types(Permissions.Product_Type_View) + self.form.fields[ + "test__engagement__product"].queryset = get_authorized_products(Permissions.Product_View) + + +class EngagementDirectFilterHelper(FilterSet): + name = CharFilter(lookup_expr="icontains", label="Engagement name contains") + version = CharFilter(field_name="version", lookup_expr="icontains", label="Engagement version") + test__version = CharFilter(field_name="test__version", lookup_expr="icontains", label="Test version") + product__name = CharFilter(lookup_expr="icontains", label=labels.ASSET_FILTERS_NAME_CONTAINS_LABEL) + status = MultipleChoiceFilter(choices=ENGAGEMENT_STATUS_CHOICES, label="Status") + tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Tag name contains") + not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Not tag name contains", exclude=True) + has_tags = BooleanFilter(field_name="tags", lookup_expr="isnull", exclude=True, label="Has tags") + target_start = DateRangeFilter() + target_end = DateRangeFilter() + test__engagement__product__lifecycle = MultipleChoiceFilter( + choices=Product.LIFECYCLE_CHOICES, + label=labels.ASSET_LIFECYCLE_LABEL, + null_label="Empty") + o = OrderingFilter( + # tuple-mapping retains order + fields=( + ("target_start", "target_start"), + ("name", "name"), + ("product__name", "product__name"), + ("product__prod_type__name", "product__prod_type__name"), + ("lead__first_name", "lead__first_name"), + ), + field_labels={ + "target_start": "Start date", + "name": "Engagement", + "product__name": labels.ASSET_FILTERS_NAME_LABEL, + "product__prod_type__name": labels.ORG_FILTERS_LABEL, + "lead__first_name": "Lead", + }, + ) + + +class EngagementDirectFilter(EngagementDirectFilterHelper, DojoFilter): + lead = ModelChoiceFilter(queryset=Dojo_User.objects.none(), label="Lead") + product__prod_type = ModelMultipleChoiceFilter( + queryset=Product_Type.objects.none(), + label=labels.ORG_FILTERS_LABEL) + tags = ModelMultipleChoiceFilter( + field_name="tags__name", + to_field_name="name", + queryset=Engagement.tags.tag_model.objects.all().order_by("name")) + not_tags = ModelMultipleChoiceFilter( + field_name="tags__name", + to_field_name="name", + exclude=True, + queryset=Engagement.tags.tag_model.objects.all().order_by("name")) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.form.fields["product__prod_type"].queryset = get_authorized_product_types(Permissions.Product_Type_View) + self.form.fields["lead"].queryset = get_authorized_users(Permissions.Product_Type_View) \ + .filter(engagement__lead__isnull=False).distinct() + + class Meta: + model = Engagement + fields = ["product__name", "product__prod_type"] + + +class EngagementDirectFilterWithoutObjectLookups(EngagementDirectFilterHelper): + lead = CharFilter( + field_name="lead__username", + lookup_expr="iexact", + label="Lead Username", + help_text="Search for Lead username that are an exact match") + lead_contains = CharFilter( + field_name="lead__username", + lookup_expr="icontains", + label="Lead Username Contains", + help_text="Search for Lead username that contain a given pattern") + product__prod_type__name = CharFilter( + field_name="product__prod_type__name", + lookup_expr="iexact", + label=labels.ORG_FILTERS_NAME_LABEL, + help_text=labels.ORG_FILTERS_NAME_HELP) + product__prod_type__name_contains = CharFilter( + field_name="product__prod_type__name", + lookup_expr="icontains", + label=labels.ORG_FILTERS_NAME_CONTAINS_LABEL, + help_text=labels.ORG_FILTERS_NAME_CONTAINS_HELP) + + class Meta: + model = Engagement + fields = ["product__name"] + + +class EngagementFilterHelper(FilterSet): + name = CharFilter(lookup_expr="icontains", label=labels.ASSET_FILTERS_NAME_CONTAINS_LABEL) + tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Tag name contains") + not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Not tag name contains", exclude=True) + has_tags = BooleanFilter(field_name="tags", lookup_expr="isnull", exclude=True, label="Has tags") + engagement__name = CharFilter(lookup_expr="icontains", label="Engagement name contains") + engagement__version = CharFilter(field_name="engagement__version", lookup_expr="icontains", label="Engagement version") + engagement__test__version = CharFilter(field_name="engagement__test__version", lookup_expr="icontains", label="Test version") + engagement__product__lifecycle = MultipleChoiceFilter( + choices=Product.LIFECYCLE_CHOICES, + label=labels.ASSET_LIFECYCLE_LABEL, + null_label="Empty") + engagement__status = MultipleChoiceFilter( + choices=ENGAGEMENT_STATUS_CHOICES, + label="Status") + o = OrderingFilter( + # tuple-mapping retains order + fields=( + ("name", "name"), + ("prod_type__name", "prod_type__name"), + ), + field_labels={ + "name": labels.ASSET_FILTERS_NAME_LABEL, + "prod_type__name": labels.ORG_FILTERS_LABEL, + }, + ) + + +class EngagementFilter(EngagementFilterHelper, DojoFilter): + engagement__lead = ModelChoiceFilter( + queryset=Dojo_User.objects.none(), + label="Lead") + prod_type = ModelMultipleChoiceFilter( + queryset=Product_Type.objects.none(), + label=labels.ORG_FILTERS_LABEL) + tags = ModelMultipleChoiceFilter( + field_name="tags__name", + to_field_name="name", + queryset=Engagement.tags.tag_model.objects.all().order_by("name")) + not_tags = ModelMultipleChoiceFilter( + field_name="tags__name", + to_field_name="name", + exclude=True, + queryset=Engagement.tags.tag_model.objects.all().order_by("name")) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.form.fields["prod_type"].queryset = get_authorized_product_types(Permissions.Product_Type_View) + self.form.fields["engagement__lead"].queryset = get_authorized_users(Permissions.Product_Type_View) \ + .filter(engagement__lead__isnull=False).distinct() + self.form.fields["tags"].help_text = labels.ASSET_FILTERS_TAGS_HELP + self.form.fields["not_tags"].help_text = labels.ASSET_FILTERS_NOT_TAGS_HELP + + class Meta: + model = Product + fields = ["name", "prod_type"] + + +class ProductEngagementsFilter(DojoFilter): + engagement__name = CharFilter(field_name="name", lookup_expr="icontains", label="Engagement name contains") + engagement__lead = ModelChoiceFilter(field_name="lead", queryset=Dojo_User.objects.none(), label="Lead") + engagement__version = CharFilter(field_name="version", lookup_expr="icontains", label="Engagement version") + engagement__test__version = CharFilter(field_name="test__version", lookup_expr="icontains", label="Test version") + engagement__status = MultipleChoiceFilter(field_name="status", choices=ENGAGEMENT_STATUS_CHOICES, + label="Status") + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.form.fields["engagement__lead"].queryset = get_authorized_users(Permissions.Product_Type_View) \ + .filter(engagement__lead__isnull=False).distinct() + + class Meta: + model = Engagement + fields = [] + + +class ProductEngagementsFilterWithoutObjectLookups(ProductEngagementsFilter): + engagement__lead = CharFilter( + field_name="lead__username", + lookup_expr="iexact", + label="Lead Username", + help_text="Search for Lead username that are an exact match") + + +class EngagementFilterWithoutObjectLookups(EngagementFilterHelper): + engagement__lead = CharFilter( + field_name="engagement__lead__username", + lookup_expr="iexact", + label="Lead Username", + help_text="Search for Lead username that are an exact match") + engagement__lead_contains = CharFilter( + field_name="engagement__lead__username", + lookup_expr="icontains", + label="Lead Username Contains", + help_text="Search for Lead username that contain a given pattern") + prod_type__name = CharFilter( + field_name="prod_type__name", + lookup_expr="iexact", + label=labels.ORG_FILTERS_LABEL, + help_text=labels.ORG_FILTERS_LABEL_HELP) + prod_type__name_contains = CharFilter( + field_name="prod_type__name", + lookup_expr="icontains", + label=labels.ORG_FILTERS_NAME_CONTAINS_LABEL, + help_text=labels.ORG_FILTERS_NAME_CONTAINS_HELP) + + class Meta: + model = Product + fields = ["name"] + + +class ProductEngagementFilterHelper(FilterSet): + version = CharFilter(lookup_expr="icontains", label="Engagement version") + test__version = CharFilter(field_name="test__version", lookup_expr="icontains", label="Test version") + name = CharFilter(lookup_expr="icontains") + status = MultipleChoiceFilter(choices=ENGAGEMENT_STATUS_CHOICES, label="Status") + target_start = DateRangeFilter() + target_end = DateRangeFilter() + tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Tag name contains") + not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Not tag name contains", exclude=True) + o = OrderingFilter( + # tuple-mapping retains order + fields=( + ("name", "name"), + ("version", "version"), + ("target_start", "target_start"), + ("target_end", "target_end"), + ("status", "status"), + ("lead", "lead"), + ), + field_labels={ + "name": "Engagement Name", + }, + ) + + class Meta: + model = Product + fields = ["name"] + + +class ProductEngagementFilter(ProductEngagementFilterHelper, DojoFilter): + lead = ModelChoiceFilter(queryset=Dojo_User.objects.none(), label="Lead") + tags = ModelMultipleChoiceFilter( + field_name="tags__name", + to_field_name="name", + queryset=Engagement.tags.tag_model.objects.all().order_by("name")) + not_tags = ModelMultipleChoiceFilter( + field_name="tags__name", + to_field_name="name", + exclude=True, + queryset=Engagement.tags.tag_model.objects.all().order_by("name")) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.form.fields["lead"].queryset = get_authorized_users( + Permissions.Product_Type_View).filter(engagement__lead__isnull=False).distinct() + + +class ProductEngagementFilterWithoutObjectLookups(ProductEngagementFilterHelper, DojoFilter): + lead = CharFilter( + field_name="lead__username", + lookup_expr="iexact", + label="Lead Username", + help_text="Search for Lead username that are an exact match") + lead_contains = CharFilter( + field_name="lead__username", + lookup_expr="icontains", + label="Lead Username Contains", + help_text="Search for Lead username that contain a given pattern") + + +class ApiEngagementFilter(DojoFilter): + product__prod_type = NumberInFilter(field_name="product__prod_type", lookup_expr="in") + tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Tag name contains") + tags = CharFieldInFilter( + field_name="tags__name", + lookup_expr="in", + help_text="Comma separated list of exact tags (uses OR for multiple values)") + tags__and = CharFieldFilterANDExpression( + field_name="tags__name", + help_text="Comma separated list of exact tags to match with an AND expression") + product__tags = CharFieldInFilter( + field_name="product__tags__name", + lookup_expr="in", + help_text=labels.ASSET_FILTERS_CSV_TAGS_OR_HELP) + product__tags__and = CharFieldFilterANDExpression( + field_name="product__tags__name", + help_text=labels.ASSET_FILTERS_CSV_TAGS_AND_HELP) + + not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Not Tag name contains", exclude="True") + not_tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in", + help_text="Comma separated list of exact tags not present on model", exclude="True") + not_product__tags = CharFieldInFilter(field_name="product__tags__name", + lookup_expr="in", + help_text=labels.ASSET_FILTERS_CSV_TAGS_NOT_HELP, + exclude="True") + has_tags = BooleanFilter(field_name="tags", lookup_expr="isnull", exclude=True, label="Has tags") + + o = OrderingFilter( + # tuple-mapping retains order + fields=( + ("name", "name"), + ("version", "version"), + ("target_start", "target_start"), + ("target_end", "target_end"), + ("status", "status"), + ("lead", "lead"), + ("created", "created"), + ("updated", "updated"), + ), + field_labels={ + "name": "Engagement Name", + }, + + ) + + class Meta: + model = Engagement + fields = ["id", "active", "target_start", + "target_end", "requester", "report_type", + "updated", "threat_model", "api_test", + "pen_test", "status", "product", "name", "version", "tags"] + + +class ProductFilterHelper(FilterSet): + name = CharFilter(lookup_expr="icontains", label=labels.ASSET_FILTERS_NAME_LABEL) + name_exact = CharFilter(field_name="name", lookup_expr="iexact", label=labels.ASSET_FILTERS_NAME_EXACT_LABEL) + business_criticality = MultipleChoiceFilter(choices=Product.BUSINESS_CRITICALITY_CHOICES, null_label="Empty") + platform = MultipleChoiceFilter(choices=Product.PLATFORM_CHOICES, null_label="Empty") + lifecycle = MultipleChoiceFilter(choices=Product.LIFECYCLE_CHOICES, null_label="Empty") + origin = MultipleChoiceFilter(choices=Product.ORIGIN_CHOICES, null_label="Empty") + external_audience = BooleanFilter(field_name="external_audience") + internet_accessible = BooleanFilter(field_name="internet_accessible") + tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Tag contains") + not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Not tag name contains", exclude=True) + outside_of_sla = ProductSLAFilter(label="Outside of SLA") + has_tags = BooleanFilter(field_name="tags", lookup_expr="isnull", exclude=True, label="Has tags") + o = OrderingFilter( + # tuple-mapping retains order + fields=( + ("name", "name"), + ("name_exact", "name_exact"), + ("prod_type__name", "prod_type__name"), + ("business_criticality", "business_criticality"), + ("platform", "platform"), + ("lifecycle", "lifecycle"), + ("origin", "origin"), + ("external_audience", "external_audience"), + ("internet_accessible", "internet_accessible"), + ("findings_count", "findings_count"), + ), + field_labels={ + "name": labels.ASSET_FILTERS_NAME_LABEL, + "name_exact": labels.ASSET_FILTERS_NAME_EXACT_LABEL, + "prod_type__name": labels.ORG_FILTERS_LABEL, + "business_criticality": "Business Criticality", + "platform": "Platform ", + "lifecycle": "Lifecycle ", + "origin": "Origin ", + "external_audience": "External Audience ", + "internet_accessible": "Internet Accessible ", + "findings_count": "Findings Count ", + }, + ) + + +class ProductFilter(ProductFilterHelper, DojoFilter): + prod_type = ModelMultipleChoiceFilter( + queryset=Product_Type.objects.none(), + label=labels.ORG_FILTERS_LABEL) + tags = ModelMultipleChoiceFilter( + field_name="tags__name", + to_field_name="name", + queryset=Product.tags.tag_model.objects.all().order_by("name")) + not_tags = ModelMultipleChoiceFilter( + field_name="tags__name", + to_field_name="name", + exclude=True, + queryset=Product.tags.tag_model.objects.all().order_by("name")) + + def __init__(self, *args, **kwargs): + self.user = None + if "user" in kwargs: + self.user = kwargs.pop("user") + super().__init__(*args, **kwargs) + self.form.fields["prod_type"].queryset = get_authorized_product_types(Permissions.Product_Type_View) + self.form.fields["tags"].help_text = labels.ASSET_FILTERS_TAGS_HELP + self.form.fields["not_tags"].help_text = labels.ASSET_FILTERS_NOT_TAGS_HELP + + class Meta: + model = Product + fields = [ + "name", "name_exact", "prod_type", "business_criticality", + "platform", "lifecycle", "origin", "external_audience", + "internet_accessible", "tags", + ] + + +class ProductFilterWithoutObjectLookups(ProductFilterHelper): + prod_type__name = CharFilter( + field_name="prod_type__name", + lookup_expr="iexact", + label=labels.ORG_FILTERS_NAME_LABEL, + help_text=labels.ORG_FILTERS_NAME_HELP) + prod_type__name_contains = CharFilter( + field_name="prod_type__name", + lookup_expr="icontains", + label=labels.ORG_FILTERS_NAME_CONTAINS_LABEL, + help_text=labels.ORG_FILTERS_NAME_CONTAINS_HELP) + + def __init__(self, *args, **kwargs): + kwargs.pop("user", None) + super().__init__(*args, **kwargs) + + class Meta: + model = Product + fields = [ + "name", "name_exact", "business_criticality", "platform", + "lifecycle", "origin", "external_audience", "internet_accessible", + ] + + +class ApiDojoMetaFilter(DojoFilter): + name_case_insensitive = CharFilter(field_name="name", lookup_expr="iexact") + value_case_insensitive = CharFilter(field_name="value", lookup_expr="iexact") + + class Meta: + model = DojoMeta + fields = [ + "id", + "product", + "endpoint", + "finding", + "name", + "value", + ] + + +class ApiProductFilter(DojoFilter): + # BooleanFilter + external_audience = BooleanFilter(field_name="external_audience") + internet_accessible = BooleanFilter(field_name="internet_accessible") + # CharFilter + name = CharFilter(lookup_expr="icontains") + name_exact = CharFilter(field_name="name", lookup_expr="iexact") + description = CharFilter(lookup_expr="icontains") + business_criticality = MultipleChoiceFilter(choices=Product.BUSINESS_CRITICALITY_CHOICES) + platform = MultipleChoiceFilter(choices=Product.PLATFORM_CHOICES) + lifecycle = MultipleChoiceFilter(choices=Product.LIFECYCLE_CHOICES) + origin = MultipleChoiceFilter(choices=Product.ORIGIN_CHOICES) + # NumberInFilter + id = NumberInFilter(field_name="id", lookup_expr="in") + product_manager = NumberInFilter(field_name="product_manager", lookup_expr="in") + technical_contact = NumberInFilter(field_name="technical_contact", lookup_expr="in") + team_manager = NumberInFilter(field_name="team_manager", lookup_expr="in") + prod_type = NumberInFilter(field_name="prod_type", lookup_expr="in") + tid = NumberInFilter(field_name="tid", lookup_expr="in") + prod_numeric_grade = NumberInFilter(field_name="prod_numeric_grade", lookup_expr="in") + user_records = NumberInFilter(field_name="user_records", lookup_expr="in") + regulations = NumberInFilter(field_name="regulations", lookup_expr="in") + + tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Tag name contains") + tags = CharFieldInFilter( + field_name="tags__name", + lookup_expr="in", + help_text="Comma separated list of exact tags (uses OR for multiple values)") + tags__and = CharFieldFilterANDExpression( + field_name="tags__name", + help_text="Comma separated list of exact tags to match with an AND expression") + not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Not Tag name contains", exclude="True") + not_tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in", + help_text=labels.ASSET_FILTERS_CSV_TAGS_NOT_HELP, exclude="True") + has_tags = BooleanFilter(field_name="tags", lookup_expr="isnull", exclude=True, label="Has tags") + outside_of_sla = extend_schema_field(OpenApiTypes.NUMBER)(ProductSLAFilter()) + + # DateRangeFilter + created = DateRangeFilter() + updated = DateRangeFilter() + # NumberFilter + revenue = NumberFilter() + + o = OrderingFilter( + # tuple-mapping retains order + fields=( + ("id", "id"), + ("tid", "tid"), + ("name", "name"), + ("created", "created"), + ("prod_numeric_grade", "prod_numeric_grade"), + ("business_criticality", "business_criticality"), + ("platform", "platform"), + ("lifecycle", "lifecycle"), + ("origin", "origin"), + ("revenue", "revenue"), + ("external_audience", "external_audience"), + ("internet_accessible", "internet_accessible"), + ("product_manager", "product_manager"), + ("product_manager__first_name", "product_manager__first_name"), + ("product_manager__last_name", "product_manager__last_name"), + ("technical_contact", "technical_contact"), + ("technical_contact__first_name", "technical_contact__first_name"), + ("technical_contact__last_name", "technical_contact__last_name"), + ("team_manager", "team_manager"), + ("team_manager__first_name", "team_manager__first_name"), + ("team_manager__last_name", "team_manager__last_name"), + ("prod_type", "prod_type"), + ("prod_type__name", "prod_type__name"), + ("updated", "updated"), + ("user_records", "user_records"), + ), + ) + + +class PercentageRangeFilter(RangeFilter): + def filter(self, qs, value): + if value is not None: + start = value.start / decimal.Decimal("100.0") if value.start else None + stop = value.stop / decimal.Decimal("100.0") if value.stop else None + value = slice(start, stop) + return super().filter(qs, value) + + +class ApiFindingFilter(DojoFilter): + # BooleanFilter + active = BooleanFilter(field_name="active") + duplicate = BooleanFilter(field_name="duplicate") + dynamic_finding = BooleanFilter(field_name="dynamic_finding") + false_p = BooleanFilter(field_name="false_p") + is_mitigated = BooleanFilter(field_name="is_mitigated") + out_of_scope = BooleanFilter(field_name="out_of_scope") + static_finding = BooleanFilter(field_name="static_finding") + under_defect_review = BooleanFilter(field_name="under_defect_review") + under_review = BooleanFilter(field_name="under_review") + verified = BooleanFilter(field_name="verified") + has_jira = BooleanFilter(field_name="jira_issue", lookup_expr="isnull", exclude=True) + fix_available = BooleanFilter(field_name="fix_available") + # CharFilter + component_version = CharFilter(lookup_expr="icontains") + component_name = CharFilter(lookup_expr="icontains") + vulnerability_id = CharFilter(method=custom_vulnerability_id_filter) + description = CharFilter(lookup_expr="icontains") + file_path = CharFilter(lookup_expr="icontains") + hash_code = CharFilter(lookup_expr="icontains") + impact = CharFilter(lookup_expr="icontains") + mitigation = CharFilter(lookup_expr="icontains") + numerical_severity = CharFilter(method=custom_filter, field_name="numerical_severity") + param = CharFilter(lookup_expr="icontains") + payload = CharFilter(lookup_expr="icontains") + references = CharFilter(lookup_expr="icontains") + severity = CharFilter(method=custom_filter, field_name="severity") + severity_justification = CharFilter(lookup_expr="icontains") + steps_to_reproduce = CharFilter(lookup_expr="icontains") + unique_id_from_tool = CharFilter(lookup_expr="icontains") + title = CharFilter(lookup_expr="icontains") + product_name = CharFilter(lookup_expr="engagement__product__name__iexact", field_name="test", label=labels.ASSET_FILTERS_NAME_EXACT_LABEL) + product_name_contains = CharFilter(lookup_expr="engagement__product__name__icontains", field_name="test", label=labels.ASSET_FILTERS_NAME_CONTAINS_LABEL) + product_lifecycle = CharFilter(method=custom_filter, lookup_expr="engagement__product__lifecycle", + field_name="test__engagement__product__lifecycle", label=labels.ASSET_FILTERS_CSV_LIFECYCLES_LABEL) + # DateRangeFilter + created = DateRangeFilter() + date = DateRangeFilter() + discovered_on = DateFilter(field_name="date", lookup_expr="exact") + discovered_before = DateFilter(field_name="date", lookup_expr="lt") + discovered_after = DateFilter(field_name="date", lookup_expr="gt") + jira_creation = DateRangeFilter(field_name="jira_issue__jira_creation") + jira_change = DateRangeFilter(field_name="jira_issue__jira_change") + last_reviewed = DateRangeFilter() + mitigated = DateRangeFilter() + mitigated_on = DateTimeFilter(field_name="mitigated", lookup_expr="exact", method="filter_mitigated_on") + mitigated_before = DateTimeFilter(field_name="mitigated", lookup_expr="lt") + mitigated_after = DateTimeFilter(field_name="mitigated", lookup_expr="gt", label="Mitigated After", method="filter_mitigated_after") + # NumberInFilter + cwe = NumberInFilter(field_name="cwe", lookup_expr="in") + defect_review_requested_by = NumberInFilter(field_name="defect_review_requested_by", lookup_expr="in") + endpoints = NumberInFilter(field_name="endpoints", lookup_expr="in") + epss_score = PercentageRangeFilter( + field_name="epss_score", + label="EPSS score range", + help_text=( + "The range of EPSS score percentages to filter on; the min input is a lower bound, " + "the max is an upper bound. Leaving one empty will skip that bound (e.g., leaving " + "the min bound input empty will filter only on the max bound -- filtering on " + '"less than or equal"). Leading 0 required.' + )) + epss_percentile = PercentageRangeFilter( + field_name="epss_percentile", + label="EPSS percentile range", + help_text=( + "The range of EPSS percentiles to filter on; the min input is a lower bound, the max " + "is an upper bound. Leaving one empty will skip that bound (e.g., leaving the min bound " + 'input empty will filter only on the max bound -- filtering on "less than or equal"). Leading 0 required.' + )) + found_by = NumberInFilter(field_name="found_by", lookup_expr="in") + id = NumberInFilter(field_name="id", lookup_expr="in") + last_reviewed_by = NumberInFilter(field_name="last_reviewed_by", lookup_expr="in") + mitigated_by = NumberInFilter(field_name="mitigated_by", lookup_expr="in") + nb_occurences = NumberInFilter(field_name="nb_occurences", lookup_expr="in") + reporter = NumberInFilter(field_name="reporter", lookup_expr="in") + scanner_confidence = NumberInFilter(field_name="scanner_confidence", lookup_expr="in") + review_requested_by = NumberInFilter(field_name="review_requested_by", lookup_expr="in") + reviewers = NumberInFilter(field_name="reviewers", lookup_expr="in") + sast_source_line = NumberInFilter(field_name="sast_source_line", lookup_expr="in") + sonarqube_issue = NumberInFilter(field_name="sonarqube_issue", lookup_expr="in") + test__test_type = NumberInFilter(field_name="test__test_type", lookup_expr="in", label="Test Type") + test__engagement = NumberInFilter(field_name="test__engagement", lookup_expr="in") + test__engagement__product = NumberInFilter(field_name="test__engagement__product", lookup_expr="in") + test__engagement__product__prod_type = NumberInFilter(field_name="test__engagement__product__prod_type", lookup_expr="in") + finding_group = NumberInFilter(field_name="finding_group", lookup_expr="in") + + # ReportRiskAcceptanceFilter + risk_acceptance = extend_schema_field(OpenApiTypes.NUMBER)(ReportRiskAcceptanceFilter()) + + tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Tag name contains") + tags = CharFieldInFilter( + field_name="tags__name", + lookup_expr="in", + help_text="Comma separated list of exact tags (uses OR for multiple values)") + tags__and = CharFieldFilterANDExpression( + field_name="tags__name", + help_text="Comma separated list of exact tags to match with an AND expression") + test__tags = CharFieldInFilter( + field_name="test__tags__name", + lookup_expr="in", + help_text="Comma separated list of exact tags present on test (uses OR for multiple values)") + test__tags__and = CharFieldFilterANDExpression( + field_name="test__tags__name", + help_text="Comma separated list of exact tags to match with an AND expression present on test") + test__engagement__tags = CharFieldInFilter( + field_name="test__engagement__tags__name", + lookup_expr="in", + help_text="Comma separated list of exact tags present on engagement (uses OR for multiple values)") + test__engagement__tags__and = CharFieldFilterANDExpression( + field_name="test__engagement__tags__name", + help_text="Comma separated list of exact tags to match with an AND expression present on engagement") + test__engagement__product__tags = CharFieldInFilter( + field_name="test__engagement__product__tags__name", + lookup_expr="in", + help_text=labels.ASSET_FILTERS_CSV_TAGS_OR_HELP) + test__engagement__product__tags__and = CharFieldFilterANDExpression( + field_name="test__engagement__product__tags__name", + help_text=labels.ASSET_FILTERS_CSV_TAGS_AND_HELP) + not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Not Tag name contains", exclude="True") + not_tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in", + help_text="Comma separated list of exact tags not present on model", exclude="True") + not_test__tags = CharFieldInFilter(field_name="test__tags__name", lookup_expr="in", exclude="True", help_text="Comma separated list of exact tags present on test") + not_test__engagement__tags = CharFieldInFilter(field_name="test__engagement__tags__name", lookup_expr="in", + help_text="Comma separated list of exact tags not present on engagement", + exclude="True") + not_test__engagement__product__tags = CharFieldInFilter( + field_name="test__engagement__product__tags__name", + lookup_expr="in", + help_text=labels.ASSET_FILTERS_CSV_TAGS_NOT_HELP, + exclude="True") + has_tags = BooleanFilter(field_name="tags", lookup_expr="isnull", exclude=True, label="Has tags") + outside_of_sla = extend_schema_field(OpenApiTypes.NUMBER)(FindingSLAFilter()) + + o = OrderingFilter( + # tuple-mapping retains order + fields=( + ("active", "active"), + ("component_name", "component_name"), + ("component_version", "component_version"), + ("created", "created"), + ("last_status_update", "last_status_update"), + ("last_reviewed", "last_reviewed"), + ("cwe", "cwe"), + ("date", "date"), + ("duplicate", "duplicate"), + ("dynamic_finding", "dynamic_finding"), + ("false_p", "false_p"), + ("found_by", "found_by"), + ("id", "id"), + ("is_mitigated", "is_mitigated"), + ("numerical_severity", "numerical_severity"), + ("out_of_scope", "out_of_scope"), + ("planned_remediation_date", "planned_remediation_date"), + ("severity", "severity"), + ("sla_expiration_date", "sla_expiration_date"), + ("reviewers", "reviewers"), + ("static_finding", "static_finding"), + ("test__engagement__product__name", "test__engagement__product__name"), + ("title", "title"), + ("under_defect_review", "under_defect_review"), + ("under_review", "under_review"), + ("verified", "verified"), + ), + ) + + class Meta: + model = Finding + exclude = ["url", "thread_id", "notes", "files", + "line", "cve"] + + def filter_mitigated_after(self, queryset, name, value): + if value.hour == 0 and value.minute == 0 and value.second == 0: + value = value.replace(hour=23, minute=59, second=59) + + return queryset.filter(mitigated__gt=value) + + def filter_mitigated_on(self, queryset, name, value): + if value.hour == 0 and value.minute == 0 and value.second == 0: + # we have a simple date without a time, lets get a range from this morning to tonight at 23:59:59:999 + nextday = value + timedelta(days=1) + return queryset.filter(mitigated__gte=value, mitigated__lt=nextday) + + return queryset.filter(mitigated=value) + + +class PercentageFilter(NumberFilter): + def __init__(self, *args, **kwargs): + kwargs["method"] = self.filter_percentage + super().__init__(*args, **kwargs) + + def filter_percentage(self, queryset, name, value): + value /= decimal.Decimal("100.0") + # Provide some wiggle room for filtering since the UI rounds to two places (and because floats): + # a user may enter 0.15, but we'll return everything in [0.0015, 0.0016). + # To do this, add to our value 1^(whatever the exponent for our least significant digit place is), but ensure + # that the exponent is at MOST the ten thousandths place so we don't show a range of e.g. [0.2, 0.3). + exponent = min(value.normalize().as_tuple().exponent, -4) + max_val = value + decimal.Decimal(f"1E{exponent}") + lookup_kwargs = { + f"{name}__gte": value, + f"{name}__lt": max_val} + return queryset.filter(**lookup_kwargs) + + +class FindingFilterHelper(FilterSet): + title = CharFilter(lookup_expr="icontains") + date = DateRangeFilter(field_name="date", label="Date Discovered") + on = DateFilter(field_name="date", lookup_expr="exact", label="Discovered On") + before = DateFilter(field_name="date", lookup_expr="lt", label="Discovered Before") + after = DateFilter(field_name="date", lookup_expr="gt", label="Discovered After") + last_reviewed = DateRangeFilter() + last_status_update = DateRangeFilter() + cwe = MultipleChoiceFilter(choices=[]) + vulnerability_id = CharFilter(method=vulnerability_id_filter, label="Vulnerability Id") + severity = MultipleChoiceFilter(choices=SEVERITY_CHOICES) + duplicate = ReportBooleanFilter() + is_mitigated = ReportBooleanFilter() + fix_available = ReportBooleanFilter() + mitigated = DateRangeFilter(field_name="mitigated", label="Mitigated Date") + mitigated_on = DateTimeFilter(field_name="mitigated", lookup_expr="exact", label="Mitigated On", method="filter_mitigated_on") + mitigated_before = DateTimeFilter(field_name="mitigated", lookup_expr="lt", label="Mitigated Before") + mitigated_after = DateTimeFilter(field_name="mitigated", lookup_expr="gt", label="Mitigated After", method="filter_mitigated_after") + planned_remediation_date = DateRangeOmniFilter() + planned_remediation_version = CharFilter(lookup_expr="icontains", label=_("Planned remediation version")) + file_path = CharFilter(lookup_expr="icontains") + param = CharFilter(lookup_expr="icontains") + payload = CharFilter(lookup_expr="icontains") + test__test_type = ModelMultipleChoiceFilter(queryset=Test_Type.objects.all(), label="Test Type") + endpoints__host = CharFilter(lookup_expr="icontains", label="Endpoint Host") + service = CharFilter(lookup_expr="icontains") + test__engagement__version = CharFilter(lookup_expr="icontains", label="Engagement Version") + test__version = CharFilter(lookup_expr="icontains", label="Test Version") + risk_acceptance = ReportRiskAcceptanceFilter(label="Risk Accepted") + effort_for_fixing = MultipleChoiceFilter(choices=EFFORT_FOR_FIXING_CHOICES) + test_import_finding_action__test_import = NumberFilter(widget=HiddenInput()) + endpoints = NumberFilter(widget=HiddenInput()) + status = FindingStatusFilter(label="Status") + test__engagement__product__lifecycle = MultipleChoiceFilter( + choices=Product.LIFECYCLE_CHOICES, + label=labels.ASSET_LIFECYCLE_LABEL) + + has_component = BooleanFilter( + field_name="component_name", + lookup_expr="isnull", + exclude=True, + label="Has Component") + has_notes = BooleanFilter( + field_name="notes", + lookup_expr="isnull", + exclude=True, + label="Has notes") + + if is_finding_groups_enabled(): + has_finding_group = BooleanFilter( + field_name="finding_group", + lookup_expr="isnull", + exclude=True, + label="Is Grouped") + + if get_system_setting("enable_jira"): + has_jira_issue = BooleanFilter( + field_name="jira_issue", + lookup_expr="isnull", + exclude=True, + label="Has JIRA") + jira_creation = DateRangeFilter(field_name="jira_issue__jira_creation", label="JIRA Creation") + jira_change = DateRangeFilter(field_name="jira_issue__jira_change", label="JIRA Updated") + jira_issue__jira_key = CharFilter(field_name="jira_issue__jira_key", lookup_expr="icontains", label="JIRA issue") + + if is_finding_groups_enabled(): + has_jira_group_issue = BooleanFilter( + field_name="finding_group__jira_issue", + lookup_expr="isnull", + exclude=True, + label="Has Group JIRA") + has_any_jira = FindingHasJIRAFilter(label="Has Any JIRA") + + outside_of_sla = FindingSLAFilter(label="Outside of SLA") + has_tags = BooleanFilter(field_name="tags", lookup_expr="isnull", exclude=True, label="Has tags") + epss_score = PercentageFilter(field_name="epss_score", label="EPSS score") + epss_score_range = PercentageRangeFilter( + field_name="epss_score", + label="EPSS score range", + help_text=( + "The range of EPSS score percentages to filter on; the left input is a lower bound, " + "the right is an upper bound. Leaving one empty will skip that bound (e.g., leaving " + "the lower bound input empty will filter only on the upper bound -- filtering on " + '"less than or equal").' + )) + epss_percentile = PercentageFilter(field_name="epss_percentile", label="EPSS percentile") + epss_percentile_range = PercentageRangeFilter( + field_name="epss_percentile", + label="EPSS percentile range", + help_text=( + "The range of EPSS percentiles to filter on; the left input is a lower bound, the right " + "is an upper bound. Leaving one empty will skip that bound (e.g., leaving the lower bound " + 'input empty will filter only on the upper bound -- filtering on "less than or equal").' + )) + kev_date = DateFilter(field_name="kev_date", lookup_expr="exact", label="Added to KEV On") + kev_before = DateFilter(field_name="kev_date", lookup_expr="lt", label="Added to KEV Before") + kev_after = DateFilter(field_name="kev_date", lookup_expr="gt", label="Added to KEV After") + + o = OrderingFilter( + # tuple-mapping retains order + fields=( + ("numerical_severity", "numerical_severity"), + ("date", "date"), + ("mitigated", "mitigated"), + ("fix_available", "fix_available"), + ("risk_acceptance__created__date", + "risk_acceptance__created__date"), + ("last_reviewed", "last_reviewed"), + ("planned_remediation_date", "planned_remediation_date"), + ("title", "title"), + ("test__engagement__product__name", + "test__engagement__product__name"), + ("service", "service"), + ("sla_age_days", "sla_age_days"), + ("epss_score", "epss_score"), + ("epss_percentile", "epss_percentile"), + ("known_exploited", "known_exploited"), + ("ransomware_used", "ransomware_used"), + ("kev_date", "kev_date"), + ), + field_labels={ + "numerical_severity": "Severity", + "date": "Date", + "risk_acceptance__created__date": "Acceptance Date", + "mitigated": "Mitigated Date", + "fix_available": "Fix Available", + "title": "Finding Name", + "test__engagement__product__name": labels.ASSET_FILTERS_NAME_LABEL, + "epss_score": "EPSS Score", + "epss_percentile": "EPSS Percentile", + "known_exploited": "Known Exploited", + "ransomware_used": "Ransomware Used", + "kev_date": "Date added to KEV", + "sla_age_days": "SLA age (days)", + "planned_remediation_date": "Planned Remediation", + }, + ) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def set_date_fields(self, *args: list, **kwargs: dict): + date_input_widget = forms.DateInput(attrs={"class": "datepicker", "placeholder": "YYYY-MM-DD"}, format="%Y-%m-%d") + self.form.fields["on"].widget = date_input_widget + self.form.fields["before"].widget = date_input_widget + self.form.fields["after"].widget = date_input_widget + self.form.fields["kev_date"].widget = date_input_widget + self.form.fields["kev_before"].widget = date_input_widget + self.form.fields["kev_after"].widget = date_input_widget + self.form.fields["mitigated_on"].widget = date_input_widget + self.form.fields["mitigated_before"].widget = date_input_widget + self.form.fields["mitigated_after"].widget = date_input_widget + self.form.fields["cwe"].choices = cwe_options(self.queryset) + + def filter_mitigated_after(self, queryset, name, value): + if value.hour == 0 and value.minute == 0 and value.second == 0: + value = value.replace(hour=23, minute=59, second=59) + + return queryset.filter(mitigated__gt=value) + + def filter_mitigated_on(self, queryset, name, value): + if value.hour == 0 and value.minute == 0 and value.second == 0: + # we have a simple date without a time, lets get a range from this morning to tonight at 23:59:59:999 + nextday = value + timedelta(days=1) + return queryset.filter(mitigated__gte=value, mitigated__lt=nextday) + + return queryset.filter(mitigated=value) + + +class FindingFilterWithoutObjectLookups(FindingFilterHelper, FindingTagStringFilter): + test__engagement__product__prod_type = NumberFilter(widget=HiddenInput()) + test__engagement__product = NumberFilter(widget=HiddenInput()) + reporter = CharFilter( + field_name="reporter__username", + lookup_expr="iexact", + label="Reporter Username", + help_text="Search for Reporter names that are an exact match") + reporter_contains = CharFilter( + field_name="reporter__username", + lookup_expr="icontains", + label="Reporter Username Contains", + help_text="Search for Reporter names that contain a given pattern") + reviewers = CharFilter( + field_name="reviewers__username", + lookup_expr="iexact", + label="Reviewer Username", + help_text="Search for Reviewer names that are an exact match") + reviewers_contains = CharFilter( + field_name="reviewers__username", + lookup_expr="icontains", + label="Reviewer Username Contains", + help_text="Search for Reviewer usernames that contain a given pattern") + test__engagement__product__prod_type__name = CharFilter( + field_name="test__engagement__product__prod_type__name", + lookup_expr="iexact", + label=labels.ORG_FILTERS_NAME_LABEL, + help_text=labels.ORG_FILTERS_NAME_HELP) + test__engagement__product__prod_type__name_contains = CharFilter( + field_name="test__engagement__product__prod_type__name", + lookup_expr="icontains", + label=labels.ORG_FILTERS_NAME_CONTAINS_LABEL, + help_text=labels.ORG_FILTERS_NAME_CONTAINS_HELP) + test__engagement__product__name = CharFilter( + field_name="test__engagement__product__name", + lookup_expr="iexact", + label=labels.ASSET_FILTERS_NAME_LABEL, + help_text=labels.ASSET_FILTERS_NAME_HELP) + test__engagement__product__name_contains = CharFilter( + field_name="test__engagement__product__name", + lookup_expr="icontains", + label=labels.ASSET_FILTERS_NAME_CONTAINS_LABEL, + help_text=labels.ASSET_FILTERS_NAME_CONTAINS_HELP) + test__engagement__name = CharFilter( + field_name="test__engagement__name", + lookup_expr="iexact", + label="Engagement Name", + help_text="Search for Engagement names that are an exact match") + test__engagement__name_contains = CharFilter( + field_name="test__engagement__name", + lookup_expr="icontains", + label="Engagement name Contains", + help_text="Search for Engagement names that contain a given pattern") + test__name = CharFilter( + field_name="test__name", + lookup_expr="iexact", + label="Test Name", + help_text="Search for Test names that are an exact match") + test__name_contains = CharFilter( + field_name="test__name", + lookup_expr="icontains", + label="Test name Contains", + help_text="Search for Test names that contain a given pattern") + + if is_finding_groups_enabled(): + finding_group__name = CharFilter( + field_name="finding_group__name", + lookup_expr="iexact", + label="Finding Group Name", + help_text="Search for Finding Group names that are an exact match") + finding_group__name_contains = CharFilter( + field_name="finding_group__name", + lookup_expr="icontains", + label="Finding Group Name Contains", + help_text="Search for Finding Group names that contain a given pattern") + + class Meta: + model = Finding + fields = get_finding_filterset_fields(filter_string_matching=True) + + exclude = ["url", "description", "mitigation", "impact", + "endpoints", "references", + "thread_id", "notes", "scanner_confidence", + "numerical_severity", "line", "duplicate_finding", + "hash_code", "reviewers", "created", "files", + "sla_start_date", "sla_expiration_date", "cvssv3", + "severity_justification", "steps_to_reproduce"] + + def __init__(self, *args, **kwargs): + self.user = None + self.pid = None + if "user" in kwargs: + self.user = kwargs.pop("user") + + if "pid" in kwargs: + self.pid = kwargs.pop("pid") + super().__init__(*args, **kwargs) + # Set some date fields + self.set_date_fields(*args, **kwargs) + # Don't show the product filter on the product finding view + if self.pid: + del self.form.fields["test__engagement__product__name"] + del self.form.fields["test__engagement__product__name_contains"] + del self.form.fields["test__engagement__product__prod_type__name"] + del self.form.fields["test__engagement__product__prod_type__name_contains"] + else: + del self.form.fields["test__name"] + del self.form.fields["test__name_contains"] + + +class FindingFilter(FindingFilterHelper, FindingTagFilter): + reporter = ModelMultipleChoiceFilter(queryset=Dojo_User.objects.none()) + reviewers = ModelMultipleChoiceFilter(queryset=Dojo_User.objects.none()) + test__engagement__product__prod_type = ModelMultipleChoiceFilter( + queryset=Product_Type.objects.none(), + label=labels.ORG_FILTERS_LABEL) + test__engagement__product = ModelMultipleChoiceFilter( + queryset=Product.objects.none(), + label=labels.ASSET_FILTERS_LABEL) + test__engagement = ModelMultipleChoiceFilter( + queryset=Engagement.objects.none(), + label="Engagement") + test = ModelMultipleChoiceFilter( + queryset=Test.objects.none(), + label="Test") + + if is_finding_groups_enabled(): + finding_group = ModelMultipleChoiceFilter( + queryset=Finding_Group.objects.none(), + label="Finding Group") + + class Meta: + model = Finding + fields = get_finding_filterset_fields() + + exclude = ["url", "description", "mitigation", "impact", + "endpoints", "references", + "thread_id", "notes", "scanner_confidence", + "numerical_severity", "line", "duplicate_finding", + "hash_code", "reviewers", "created", "files", + "sla_start_date", "sla_expiration_date", "cvssv3", + "severity_justification", "steps_to_reproduce"] + + def __init__(self, *args, **kwargs): + self.user = None + self.pid = None + if "user" in kwargs: + self.user = kwargs.pop("user") + + if "pid" in kwargs: + self.pid = kwargs.pop("pid") + super().__init__(*args, **kwargs) + # Set some date fields + self.set_date_fields(*args, **kwargs) + # Don't show the product filter on the product finding view + self.set_related_object_fields(*args, **kwargs) + + def set_related_object_fields(self, *args: list, **kwargs: dict): + finding_group_query = Finding_Group.objects.all() + if self.pid is not None: + del self.form.fields["test__engagement__product"] + del self.form.fields["test__engagement__product__prod_type"] + # TODO: add authorized check to be sure + self.form.fields["test__engagement"].queryset = Engagement.objects.filter( + product_id=self.pid, + ).all() + self.form.fields["test"].queryset = get_authorized_tests(Permissions.Test_View, product=self.pid).prefetch_related("test_type") + finding_group_query = Finding_Group.objects.filter(test__engagement__product_id=self.pid) + else: + self.form.fields[ + "test__engagement__product__prod_type"].queryset = get_authorized_product_types(Permissions.Product_Type_View) + self.form.fields["test__engagement"].queryset = get_authorized_engagements(Permissions.Engagement_View) + del self.form.fields["test"] + + if self.form.fields.get("test__engagement__product"): + self.form.fields["test__engagement__product"].queryset = get_authorized_products(Permissions.Product_View) + if self.form.fields.get("finding_group", None): + self.form.fields["finding_group"].queryset = get_authorized_finding_groups(Permissions.Finding_Group_View, queryset=finding_group_query) + self.form.fields["reporter"].queryset = get_authorized_users(Permissions.Finding_View) + self.form.fields["reviewers"].queryset = self.form.fields["reporter"].queryset + + +class FindingGroupsFilter(FilterSet): + name = CharFilter(lookup_expr="icontains", label="Name") + severity = ChoiceFilter( + choices=[ + ("Low", "Low"), + ("Medium", "Medium"), + ("High", "High"), + ("Critical", "Critical"), + ], + label="Min Severity", + ) + engagement = ModelMultipleChoiceFilter(queryset=Engagement.objects.none(), label="Engagement") + product = ModelMultipleChoiceFilter(queryset=Product.objects.none(), label=labels.ASSET_LABEL) + + class Meta: + model = Finding + fields = ["name", "severity", "engagement", "product"] + + def __init__(self, *args, **kwargs): + self.user = kwargs.pop("user", None) + self.pid = kwargs.pop("pid", None) + super().__init__(*args, **kwargs) + self.set_related_object_fields() + + def set_related_object_fields(self): + if self.pid is not None: + self.form.fields["engagement"].queryset = Engagement.objects.filter(product_id=self.pid) + if "product" in self.form.fields: + del self.form.fields["product"] + else: + self.form.fields["product"].queryset = get_authorized_products(Permissions.Product_View) + self.form.fields["engagement"].queryset = get_authorized_engagements(Permissions.Engagement_View) + + +class AcceptedFindingFilter(FindingFilter): + risk_acceptance__created__date = DateRangeFilter(label="Acceptance Date") + risk_acceptance__owner = ModelMultipleChoiceFilter( + queryset=Dojo_User.objects.none(), + label="Risk Acceptance Owner") + risk_acceptance = ModelMultipleChoiceFilter( + queryset=Risk_Acceptance.objects.none(), + label="Accepted By") + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.form.fields["risk_acceptance__owner"].queryset = get_authorized_users(Permissions.Finding_View) + self.form.fields["risk_acceptance"].queryset = get_authorized_risk_acceptances(Permissions.Risk_Acceptance) + + +class AcceptedFindingFilterWithoutObjectLookups(FindingFilterWithoutObjectLookups): + risk_acceptance__created__date = DateRangeFilter(label="Acceptance Date") + risk_acceptance__owner = CharFilter( + field_name="risk_acceptance__owner__username", + lookup_expr="iexact", + label="Risk Acceptance Owner Username", + help_text="Search for Risk Acceptance Owners username that are an exact match") + risk_acceptance__owner_contains = CharFilter( + field_name="risk_acceptance__owner__username", + lookup_expr="icontains", + label="Risk Acceptance Owner Username Contains", + help_text="Search for Risk Acceptance Owners username that contain a given pattern") + risk_acceptance__name = CharFilter( + field_name="risk_acceptance__name", + lookup_expr="iexact", + label="Risk Acceptance Name", + help_text="Search for Risk Acceptance name that are an exact match") + risk_acceptance__name_contains = CharFilter( + field_name="risk_acceptance__name", + lookup_expr="icontains", + label="Risk Acceptance Name", + help_text="Search for Risk Acceptance name contain a given pattern") + + +class SimilarFindingHelper(FilterSet): + hash_code = MultipleChoiceFilter() + vulnerability_ids = CharFilter(method=custom_vulnerability_id_filter, label="Vulnerability Ids") + + def update_data(self, data: dict, *args: list, **kwargs: dict): + # if filterset is bound, use initial values as defaults + # because of this, we can't rely on the self.form.has_changed + self.has_changed = True + if not data and self.finding: + # get a mutable copy of the QueryDict + data = data.copy() + + data["vulnerability_ids"] = ",".join(self.finding.vulnerability_ids) + data["cwe"] = self.finding.cwe + data["file_path"] = self.finding.file_path + data["line"] = self.finding.line + data["unique_id_from_tool"] = self.finding.unique_id_from_tool + data["test__test_type"] = self.finding.test.test_type + data["test__engagement__product"] = self.finding.test.engagement.product + data["test__engagement__product__prod_type"] = self.finding.test.engagement.product.prod_type + + self.has_changed = False + + def set_hash_codes(self, *args: list, **kwargs: dict): + if self.finding and self.finding.hash_code: + self.form.fields["hash_code"] = forms.MultipleChoiceField(choices=[(self.finding.hash_code, self.finding.hash_code[:24] + "...")], required=False, initial=[]) + + def filter_queryset(self, *args: list, **kwargs: dict): + queryset = super().filter_queryset(*args, **kwargs) + queryset = get_authorized_findings(Permissions.Finding_View, queryset, self.user) + return queryset.exclude(pk=self.finding.pk) + + +class SimilarFindingFilter(FindingFilter, SimilarFindingHelper): + class Meta(FindingFilter.Meta): + model = Finding + # slightly different fields from FindingFilter, but keep the same ordering for UI consistency + fields = get_finding_filterset_fields(similar=True) + + def __init__(self, data=None, *args, **kwargs): + self.user = None + if "user" in kwargs: + self.user = kwargs.pop("user") + self.finding = None + if "finding" in kwargs: + self.finding = kwargs.pop("finding") + self.update_data(data, *args, **kwargs) + super().__init__(data, *args, **kwargs) + self.set_hash_codes(*args, **kwargs) + + +class SimilarFindingFilterWithoutObjectLookups(FindingFilterWithoutObjectLookups, SimilarFindingHelper): + class Meta(FindingFilterWithoutObjectLookups.Meta): + model = Finding + # slightly different fields from FindingFilter, but keep the same ordering for UI consistency + fields = get_finding_filterset_fields(similar=True, filter_string_matching=True) + + def __init__(self, data=None, *args, **kwargs): + self.user = None + if "user" in kwargs: + self.user = kwargs.pop("user") + self.finding = None + if "finding" in kwargs: + self.finding = kwargs.pop("finding") + self.update_data(data, *args, **kwargs) + super().__init__(data, *args, **kwargs) + self.set_hash_codes(*args, **kwargs) + + +class TemplateFindingFilter(DojoFilter): + title = CharFilter(lookup_expr="icontains") + cwe = MultipleChoiceFilter(choices=[]) + severity = MultipleChoiceFilter(choices=SEVERITY_CHOICES) + + tags = ModelMultipleChoiceFilter( + field_name="tags__name", + to_field_name="name", + queryset=Finding.tags.tag_model.objects.all().order_by("name"), + # label='tags', # doesn't work with tagulous, need to set in __init__ below + ) + + tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Tag name contains") + + not_tags = ModelMultipleChoiceFilter( + field_name="tags__name", + to_field_name="name", + exclude=True, + queryset=Finding.tags.tag_model.objects.all().order_by("name"), + # label='tags', # doesn't work with tagulous, need to set in __init__ below + ) + + not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Not tag name contains", exclude=True) + + o = OrderingFilter( + # tuple-mapping retains order + fields=( + ("cwe", "cwe"), + ("title", "title"), + ("numerical_severity", "numerical_severity"), + ), + field_labels={ + "numerical_severity": "Severity", + }, + ) + + class Meta: + model = Finding_Template + exclude = ["description", "mitigation", "impact", + "references", "numerical_severity"] + + not_test__tags = ModelMultipleChoiceFilter( + field_name="test__tags__name", + to_field_name="name", + exclude=True, + label="Test without tags", + queryset=Test.tags.tag_model.objects.all().order_by("name"), + # label='tags', # doesn't work with tagulous, need to set in __init__ below + ) + + not_test__engagement__tags = ModelMultipleChoiceFilter( + field_name="test__engagement__tags__name", + to_field_name="name", + exclude=True, + label="Engagement without tags", + queryset=Engagement.tags.tag_model.objects.all().order_by("name"), + # label='tags', # doesn't work with tagulous, need to set in __init__ below + ) + + not_test__engagement__product__tags = ModelMultipleChoiceFilter( + field_name="test__engagement__product__tags__name", + to_field_name="name", + exclude=True, + label=labels.ASSET_FILTERS_WITHOUT_TAGS_LABEL, + queryset=Product.tags.tag_model.objects.all().order_by("name"), + # label='tags', # doesn't work with tagulous, need to set in __init__ below + ) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.form.fields["cwe"].choices = cwe_options(self.queryset) + + +class ApiTemplateFindingFilter(DojoFilter): + tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Tag name contains") + tags = CharFieldInFilter( + field_name="tags__name", + lookup_expr="in", + help_text="Comma separated list of exact tags (uses OR for multiple values)") + tags__and = CharFieldFilterANDExpression( + field_name="tags__name", + help_text="Comma separated list of exact tags to match with an AND expression") + not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Not Tag name contains", exclude="True") + not_tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in", + help_text="Comma separated list of exact tags not present on model", exclude="True") + + o = OrderingFilter( + # tuple-mapping retains order + fields=( + ("title", "title"), + ("cwe", "cwe"), + ), + ) + + class Meta: + model = Finding_Template + fields = ["id", "title", "cwe", "severity", "description", + "mitigation"] + + +class MetricsFindingFilter(FindingFilter): + start_date = DateFilter(field_name="date", label="Start Date", lookup_expr=("gt")) + end_date = DateFilter(field_name="date", label="End Date", lookup_expr=("lt")) + date = MetricsDateRangeFilter() + vulnerability_id = CharFilter(method=vulnerability_id_filter, label="Vulnerability Id") + + not_tags = ModelMultipleChoiceFilter( + field_name="tags__name", + to_field_name="name", + exclude=True, + queryset=Endpoint.tags.tag_model.objects.all().order_by("name"), + # label='tags', # doesn't work with tagulous, need to set in __init__ below + ) + + not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Not tag name contains", exclude=True) + + def __init__(self, *args, **kwargs): + if args[0]: + if args[0].get("start_date", "") or args[0].get("end_date", ""): + args[0]._mutable = True + args[0]["date"] = 8 + args[0]._mutable = False + + super().__init__(*args, **kwargs) + + class Meta(FindingFilter.Meta): + model = Finding + fields = get_finding_filterset_fields(metrics=True) + + +class MetricsFindingFilterWithoutObjectLookups(FindingFilterWithoutObjectLookups): + start_date = DateFilter(field_name="date", label="Start Date", lookup_expr=("gt")) + end_date = DateFilter(field_name="date", label="End Date", lookup_expr=("lt")) + date = MetricsDateRangeFilter() + vulnerability_id = CharFilter(method=vulnerability_id_filter, label="Vulnerability Id") + + not_tags = ModelMultipleChoiceFilter( + field_name="tags__name", + to_field_name="name", + exclude=True, + queryset=Endpoint.tags.tag_model.objects.all().order_by("name"), + # label='tags', # doesn't work with tagulous, need to set in __init__ below + ) + + not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Not tag name contains", exclude=True) + + def __init__(self, *args, **kwargs): + if args[0]: + if args[0].get("start_date", "") or args[0].get("end_date", ""): + args[0]._mutable = True + args[0]["date"] = 8 + args[0]._mutable = False + + super().__init__(*args, **kwargs) + + class Meta(FindingFilterWithoutObjectLookups.Meta): + model = Finding + fields = get_finding_filterset_fields(metrics=True, filter_string_matching=True) + + +class MetricsEndpointFilterHelper(FilterSet): + start_date = DateFilter(field_name="date", label="Start Date", lookup_expr=("gt")) + end_date = DateFilter(field_name="date", label="End Date", lookup_expr=("lt")) + date = MetricsDateRangeFilter() + finding__test__engagement__version = CharFilter(lookup_expr="icontains", label="Engagement Version") + finding__severity = MultipleChoiceFilter(choices=SEVERITY_CHOICES, label="Severity") + endpoint__host = CharFilter(lookup_expr="icontains", label="Endpoint Host") + finding_title = CharFilter(lookup_expr="icontains", label="Finding Title") + tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Tag name contains") + not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Not tag name contains", exclude=True) + + +class MetricsEndpointFilter(MetricsEndpointFilterHelper): + finding__test__engagement__product__prod_type = ModelMultipleChoiceFilter( + queryset=Product_Type.objects.none(), + label=labels.ORG_FILTERS_LABEL) + finding__test__engagement = ModelMultipleChoiceFilter( + queryset=Engagement.objects.none(), + label="Engagement") + endpoint__tags = ModelMultipleChoiceFilter( + field_name="endpoint__tags__name", + to_field_name="name", + label="Endpoint tags", + queryset=Endpoint.tags.tag_model.objects.all().order_by("name")) + finding__tags = ModelMultipleChoiceFilter( + field_name="finding__tags__name", + to_field_name="name", + label="Finding tags", + queryset=Finding.tags.tag_model.objects.all().order_by("name")) + finding__test__tags = ModelMultipleChoiceFilter( + field_name="finding__test__tags__name", + to_field_name="name", + label="Test tags", + queryset=Test.tags.tag_model.objects.all().order_by("name")) + finding__test__engagement__tags = ModelMultipleChoiceFilter( + field_name="finding__test__engagement__tags__name", + to_field_name="name", + label="Engagement tags", + queryset=Engagement.tags.tag_model.objects.all().order_by("name")) + finding__test__engagement__product__tags = ModelMultipleChoiceFilter( + field_name="finding__test__engagement__product__tags__name", + to_field_name="name", + label=labels.ASSET_FILTERS_TAGS_ASSET_LABEL, + queryset=Product.tags.tag_model.objects.all().order_by("name")) + not_endpoint__tags = ModelMultipleChoiceFilter( + field_name="endpoint__tags__name", + to_field_name="name", + exclude=True, + label="Endpoint without tags", + queryset=Endpoint.tags.tag_model.objects.all().order_by("name")) + not_finding__tags = ModelMultipleChoiceFilter( + field_name="finding__tags__name", + to_field_name="name", + exclude=True, + label="Finding without tags", + queryset=Finding.tags.tag_model.objects.all().order_by("name")) + not_finding__test__tags = ModelMultipleChoiceFilter( + field_name="finding__test__tags__name", + to_field_name="name", + exclude=True, + label="Test without tags", + queryset=Test.tags.tag_model.objects.all().order_by("name")) + not_finding__test__engagement__tags = ModelMultipleChoiceFilter( + field_name="finding__test__engagement__tags__name", + to_field_name="name", + exclude=True, + label="Engagement without tags", + queryset=Engagement.tags.tag_model.objects.all().order_by("name")) + not_finding__test__engagement__product__tags = ModelMultipleChoiceFilter( + field_name="finding__test__engagement__product__tags__name", + to_field_name="name", + exclude=True, + label=labels.ASSET_FILTERS_WITHOUT_TAGS_LABEL, + queryset=Product.tags.tag_model.objects.all().order_by("name")) + + def __init__(self, *args, **kwargs): + if args[0]: + if args[0].get("start_date", "") or args[0].get("end_date", ""): + args[0]._mutable = True + args[0]["date"] = 8 + args[0]._mutable = False + + self.pid = None + if "pid" in kwargs: + self.pid = kwargs.pop("pid") + + super().__init__(*args, **kwargs) + if self.pid: + del self.form.fields["finding__test__engagement__product__prod_type"] + self.form.fields["finding__test__engagement"].queryset = Engagement.objects.filter( + product_id=self.pid, + ).all() + else: + self.form.fields["finding__test__engagement"].queryset = get_authorized_engagements(Permissions.Engagement_View).order_by("name") + + if "finding__test__engagement__product__prod_type" in self.form.fields: + self.form.fields[ + "finding__test__engagement__product__prod_type"].queryset = get_authorized_product_types(Permissions.Product_Type_View) + + class Meta: + model = Endpoint_Status + exclude = ["last_modified", "endpoint", "finding"] + + +class MetricsEndpointFilterWithoutObjectLookups(MetricsEndpointFilterHelper, FindingTagStringFilter): + finding__test__engagement__product__prod_type = CharFilter( + field_name="finding__test__engagement__product__prod_type", + lookup_expr="iexact", + label=labels.ORG_FILTERS_NAME_LABEL, + help_text=labels.ORG_FILTERS_NAME_HELP) + finding__test__engagement__product__prod_type_contains = CharFilter( + field_name="finding__test__engagement__product__prod_type", + lookup_expr="icontains", + label=labels.ORG_FILTERS_NAME_CONTAINS_LABEL, + help_text=labels.ORG_FILTERS_NAME_CONTAINS_HELP) + finding__test__engagement = CharFilter( + field_name="finding__test__engagement", + lookup_expr="iexact", + label="Engagement Name", + help_text="Search for Engagement names that are an exact match") + finding__test__engagement_contains = CharFilter( + field_name="finding__test__engagement", + lookup_expr="icontains", + label="Engagement Name Contains", + help_text="Search for Engagement names that contain a given pattern") + endpoint__tags_contains = CharFilter( + label="Endpoint Tag Contains", + field_name="endpoint__tags__name", + lookup_expr="icontains", + help_text="Search for tags on a Endpoint that contain a given pattern") + endpoint__tags = CharFilter( + label="Endpoint Tag", + field_name="endpoint__tags__name", + lookup_expr="iexact", + help_text="Search for tags on a Endpoint that are an exact match") + finding__tags_contains = CharFilter( + label="Finding Tag Contains", + field_name="finding__tags__name", + lookup_expr="icontains", + help_text="Search for tags on a Finding that contain a given pattern") + finding__tags = CharFilter( + label="Finding Tag", + field_name="finding__tags__name", + lookup_expr="iexact", + help_text="Search for tags on a Finding that are an exact match") + finding__test__tags_contains = CharFilter( + label="Test Tag Contains", + field_name="finding__test__tags__name", + lookup_expr="icontains", + help_text="Search for tags on a Finding that contain a given pattern") + finding__test__tags = CharFilter( + label="Test Tag", + field_name="finding__test__tags__name", + lookup_expr="iexact", + help_text="Search for tags on a Finding that are an exact match") + finding__test__engagement__tags_contains = CharFilter( + label="Engagement Tag Contains", + field_name="finding__test__engagement__tags__name", + lookup_expr="icontains", + help_text="Search for tags on a Finding that contain a given pattern") + finding__test__engagement__tags = CharFilter( + label="Engagement Tag", + field_name="finding__test__engagement__tags__name", + lookup_expr="iexact", + help_text="Search for tags on a Finding that are an exact match") + finding__test__engagement__product__tags_contains = CharFilter( + label=labels.ASSET_FILTERS_TAG_ASSET_CONTAINS_LABEL, + field_name="finding__test__engagement__product__tags__name", + lookup_expr="icontains", + help_text=labels.ASSET_FILTERS_TAG_ASSET_CONTAINS_HELP) + finding__test__engagement__product__tags = CharFilter( + label=labels.ASSET_FILTERS_TAG_ASSET_LABEL, + field_name="finding__test__engagement__product__tags__name", + lookup_expr="iexact", + help_text=labels.ASSET_FILTERS_TAG_ASSET_HELP) + + not_endpoint__tags_contains = CharFilter( + label="Endpoint Tag Does Not Contain", + field_name="endpoint__tags__name", + lookup_expr="icontains", + help_text="Search for tags on a Endpoint that contain a given pattern, and exclude them", + exclude=True) + not_endpoint__tags = CharFilter( + label="Not Endpoint Tag", + field_name="endpoint__tags__name", + lookup_expr="iexact", + help_text="Search for tags on a Endpoint that are an exact match, and exclude them", + exclude=True) + not_finding__tags_contains = CharFilter( + label="Finding Tag Does Not Contain", + field_name="finding__tags__name", + lookup_expr="icontains", + help_text="Search for tags on a Finding that contain a given pattern, and exclude them", + exclude=True) + not_finding__tags = CharFilter( + label="Not Finding Tag", + field_name="finding__tags__name", + lookup_expr="iexact", + help_text="Search for tags on a Finding that are an exact match, and exclude them", + exclude=True) + not_finding__test__tags_contains = CharFilter( + label="Test Tag Does Not Contain", + field_name="finding__test__tags__name", + lookup_expr="icontains", + help_text="Search for tags on a Test that contain a given pattern, and exclude them", + exclude=True) + not_finding__test__tags = CharFilter( + label="Not Test Tag", + field_name="finding__test__tags__name", + lookup_expr="iexact", + help_text="Search for tags on a Test that are an exact match, and exclude them", + exclude=True) + not_finding__test__engagement__tags_contains = CharFilter( + label="Engagement Tag Does Not Contain", + field_name="finding__test__engagement__tags__name", + lookup_expr="icontains", + help_text="Search for tags on a Engagement that contain a given pattern, and exclude them", + exclude=True) + not_finding__test__engagement__tags = CharFilter( + label="Not Engagement Tag", + field_name="finding__test__engagement__tags__name", + lookup_expr="iexact", + help_text="Search for tags on a Engagement that are an exact match, and exclude them", + exclude=True) + not_finding__test__engagement__product__tags_contains = CharFilter( + label=labels.ASSET_FILTERS_TAG_NOT_CONTAIN_LABEL, + field_name="finding__test__engagement__product__tags__name", + lookup_expr="icontains", + help_text=labels.ASSET_FILTERS_TAG_NOT_CONTAIN_HELP, + exclude=True) + not_finding__test__engagement__product__tags = CharFilter( + label=labels.ASSET_FILTERS_TAG_NOT_LABEL, + field_name="finding__test__engagement__product__tags__name", + lookup_expr="iexact", + help_text=labels.ASSET_FILTERS_TAG_NOT_HELP, + exclude=True) + + def __init__(self, *args, **kwargs): + if args[0]: + if args[0].get("start_date", "") or args[0].get("end_date", ""): + args[0]._mutable = True + args[0]["date"] = 8 + args[0]._mutable = False + self.pid = None + if "pid" in kwargs: + self.pid = kwargs.pop("pid") + super().__init__(*args, **kwargs) + if self.pid: + del self.form.fields["finding__test__engagement__product__prod_type"] + + class Meta: + model = Endpoint_Status + exclude = ["last_modified", "endpoint", "finding"] + + +class EndpointFilterHelper(FilterSet): + protocol = CharFilter(lookup_expr="icontains") + userinfo = CharFilter(lookup_expr="icontains") + host = CharFilter(lookup_expr="icontains") + port = NumberFilter() + path = CharFilter(lookup_expr="icontains") + query = CharFilter(lookup_expr="icontains") + fragment = CharFilter(lookup_expr="icontains") + tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Tag name contains") + not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Not tag name contains", exclude=True) + has_tags = BooleanFilter(field_name="tags", lookup_expr="isnull", exclude=True, label="Has tags") + o = OrderingFilter( + # tuple-mapping retains order + fields=( + ("product", "product"), + ("host", "host"), + ("id", "id"), + ), + ) + + +class EndpointFilter(EndpointFilterHelper, DojoFilter): + product = ModelMultipleChoiceFilter( + queryset=Product.objects.none(), + label=labels.ASSET_FILTERS_LABEL) + tags = ModelMultipleChoiceFilter( + field_name="tags__name", + to_field_name="name", + label="Endpoint Tags", + queryset=Endpoint.tags.tag_model.objects.all().order_by("name")) + findings__tags = ModelMultipleChoiceFilter( + field_name="findings__tags__name", + to_field_name="name", + label="Finding Tags", + queryset=Finding.tags.tag_model.objects.all().order_by("name")) + findings__test__tags = ModelMultipleChoiceFilter( + field_name="findings__test__tags__name", + to_field_name="name", + label="Test Tags", + queryset=Test.tags.tag_model.objects.all().order_by("name")) + findings__test__engagement__tags = ModelMultipleChoiceFilter( + field_name="findings__test__engagement__tags__name", + to_field_name="name", + label="Engagement Tags", + queryset=Engagement.tags.tag_model.objects.all().order_by("name")) + findings__test__engagement__product__tags = ModelMultipleChoiceFilter( + field_name="findings__test__engagement__product__tags__name", + to_field_name="name", + label=labels.ASSET_FILTERS_TAGS_ASSET_LABEL, + queryset=Product.tags.tag_model.objects.all().order_by("name")) + not_tags = ModelMultipleChoiceFilter( + field_name="tags__name", + to_field_name="name", + label="Not Endpoint Tags", + exclude=True, + queryset=Endpoint.tags.tag_model.objects.all().order_by("name")) + not_findings__tags = ModelMultipleChoiceFilter( + field_name="findings__tags__name", + to_field_name="name", + label="Not Finding Tags", + exclude=True, + queryset=Finding.tags.tag_model.objects.all().order_by("name")) + not_findings__test__tags = ModelMultipleChoiceFilter( + field_name="findings__test__tags__name", + to_field_name="name", + label="Not Test Tags", + exclude=True, + queryset=Test.tags.tag_model.objects.all().order_by("name")) + not_findings__test__engagement__tags = ModelMultipleChoiceFilter( + field_name="findings__test__engagement__tags__name", + to_field_name="name", + label="Not Engagement Tags", + exclude=True, + queryset=Engagement.tags.tag_model.objects.all().order_by("name")) + not_findings__test__engagement__product__tags = ModelMultipleChoiceFilter( + field_name="findings__test__engagement__product__tags__name", + to_field_name="name", + label=labels.ASSET_FILTERS_NOT_TAGS_ASSET_LABEL, + exclude=True, + queryset=Product.tags.tag_model.objects.all().order_by("name")) + + def __init__(self, *args, **kwargs): + self.user = None + if "user" in kwargs: + self.user = kwargs.pop("user") + super().__init__(*args, **kwargs) + self.form.fields["product"].queryset = get_authorized_products(Permissions.Product_View) + + @property + def qs(self): + parent = super().qs + return get_authorized_endpoints(Permissions.Endpoint_View, parent) + + class Meta: + model = Endpoint + exclude = ["findings", "inherited_tags"] + + +class EndpointFilterWithoutObjectLookups(EndpointFilterHelper): + product = NumberFilter(widget=HiddenInput()) + product__name = CharFilter( + field_name="product__name", + lookup_expr="iexact", + label=labels.ASSET_FILTERS_NAME_LABEL, + help_text=labels.ASSET_FILTERS_NAME_HELP) + product__name_contains = CharFilter( + field_name="product__name", + lookup_expr="icontains", + label=labels.ASSET_FILTERS_NAME_CONTAINS_LABEL, + help_text=labels.ASSET_FILTERS_NAME_CONTAINS_HELP) + + tags_contains = CharFilter( + label="Endpoint Tag Contains", + field_name="tags__name", + lookup_expr="icontains", + help_text="Search for tags on a Endpoint that contain a given pattern") + tags = CharFilter( + label="Endpoint Tag", + field_name="tags__name", + lookup_expr="iexact", + help_text="Search for tags on a Endpoint that are an exact match") + findings__tags_contains = CharFilter( + label="Finding Tag Contains", + field_name="findings__tags__name", + lookup_expr="icontains", + help_text="Search for tags on a Finding that contain a given pattern") + findings__tags = CharFilter( + label="Finding Tag", + field_name="findings__tags__name", + lookup_expr="iexact", + help_text="Search for tags on a Finding that are an exact match") + findings__test__tags_contains = CharFilter( + label="Test Tag Contains", + field_name="findings__test__tags__name", + lookup_expr="icontains", + help_text="Search for tags on a Finding that contain a given pattern") + findings__test__tags = CharFilter( + label="Test Tag", + field_name="findings__test__tags__name", + lookup_expr="iexact", + help_text="Search for tags on a Finding that are an exact match") + findings__test__engagement__tags_contains = CharFilter( + label="Engagement Tag Contains", + field_name="findings__test__engagement__tags__name", + lookup_expr="icontains", + help_text="Search for tags on a Finding that contain a given pattern") + findings__test__engagement__tags = CharFilter( + label="Engagement Tag", + field_name="findings__test__engagement__tags__name", + lookup_expr="iexact", + help_text="Search for tags on a Finding that are an exact match") + findings__test__engagement__product__tags_contains = CharFilter( + label=labels.ASSET_FILTERS_TAG_ASSET_CONTAINS_LABEL, + field_name="findings__test__engagement__product__tags__name", + lookup_expr="icontains", + help_text=labels.ASSET_FILTERS_TAG_ASSET_CONTAINS_HELP) + findings__test__engagement__product__tags = CharFilter( + label=labels.ASSET_FILTERS_TAG_ASSET_LABEL, + field_name="findings__test__engagement__product__tags__name", + lookup_expr="iexact", + help_text=labels.ASSET_FILTERS_TAG_ASSET_HELP) + + not_tags_contains = CharFilter( + label="Endpoint Tag Does Not Contain", + field_name="tags__name", + lookup_expr="icontains", + help_text="Search for tags on a Endpoint that contain a given pattern, and exclude them", + exclude=True) + not_tags = CharFilter( + label="Not Endpoint Tag", + field_name="tags__name", + lookup_expr="iexact", + help_text="Search for tags on a Endpoint that are an exact match, and exclude them", + exclude=True) + not_findings__tags_contains = CharFilter( + label="Finding Tag Does Not Contain", + field_name="findings__tags__name", + lookup_expr="icontains", + help_text="Search for tags on a Finding that contain a given pattern, and exclude them", + exclude=True) + not_findings__tags = CharFilter( + label="Not Finding Tag", + field_name="findings__tags__name", + lookup_expr="iexact", + help_text="Search for tags on a Finding that are an exact match, and exclude them", + exclude=True) + not_findings__test__tags_contains = CharFilter( + label="Test Tag Does Not Contain", + field_name="findings__test__tags__name", + lookup_expr="icontains", + help_text="Search for tags on a Test that contain a given pattern, and exclude them", + exclude=True) + not_findings__test__tags = CharFilter( + label="Not Test Tag", + field_name="findings__test__tags__name", + lookup_expr="iexact", + help_text="Search for tags on a Test that are an exact match, and exclude them", + exclude=True) + not_findings__test__engagement__tags_contains = CharFilter( + label="Engagement Tag Does Not Contain", + field_name="findings__test__engagement__tags__name", + lookup_expr="icontains", + help_text="Search for tags on a Engagement that contain a given pattern, and exclude them", + exclude=True) + not_findings__test__engagement__tags = CharFilter( + label="Not Engagement Tag", + field_name="findings__test__engagement__tags__name", + lookup_expr="iexact", + help_text="Search for tags on a Engagement that are an exact match, and exclude them", + exclude=True) + not_findings__test__engagement__product__tags_contains = CharFilter( + label=labels.ASSET_FILTERS_TAG_NOT_CONTAIN_LABEL, + field_name="findings__test__engagement__product__tags__name", + lookup_expr="icontains", + help_text=labels.ASSET_FILTERS_TAG_NOT_CONTAIN_HELP, + exclude=True) + not_findings__test__engagement__product__tags = CharFilter( + label=labels.ASSET_FILTERS_TAG_NOT_LABEL, + field_name="findings__test__engagement__product__tags__name", + lookup_expr="iexact", + help_text=labels.ASSET_FILTERS_TAG_NOT_HELP, + exclude=True) + + def __init__(self, *args, **kwargs): + self.user = None + if "user" in kwargs: + self.user = kwargs.pop("user") + super().__init__(*args, **kwargs) + + @property + def qs(self): + parent = super().qs + return get_authorized_endpoints(Permissions.Endpoint_View, parent) + + class Meta: + model = Endpoint + exclude = ["findings", "inherited_tags", "product"] + + +class ApiEndpointFilter(DojoFilter): + tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Tag name contains") + tags = CharFieldInFilter( + field_name="tags__name", + lookup_expr="in", + help_text="Comma separated list of exact tags (uses OR for multiple values)") + tags__and = CharFieldFilterANDExpression( + field_name="tags__name", + help_text="Comma separated list of exact tags to match with an AND expression") + not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Not Tag name contains", exclude="True") + not_tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in", + help_text="Comma separated list of exact tags not present on model", exclude="True") + has_tags = BooleanFilter(field_name="tags", lookup_expr="isnull", exclude=True, label="Has tags") + + o = OrderingFilter( + # tuple-mapping retains order + fields=( + ("host", "host"), + ("product", "product"), + ("id", "id"), + ), + ) + + class Meta: + model = Endpoint + fields = ["id", "protocol", "userinfo", "host", "port", "path", "query", "fragment", "product"] + + +class ApiRiskAcceptanceFilter(DojoFilter): + o = OrderingFilter( + # tuple-mapping retains order + fields=( + ("name", "name"), + ), + ) + + class Meta: + model = Risk_Acceptance + fields = [ + "name", "accepted_findings", "recommendation", "recommendation_details", + "decision", "decision_details", "accepted_by", "owner", "expiration_date", + "expiration_date_warned", "expiration_date_handled", "reactivate_expired", + "restart_sla_expired", "notes", + ] + + +class EngagementTestFilterHelper(FilterSet): + version = CharFilter(lookup_expr="icontains", label="Version") + if settings.TRACK_IMPORT_HISTORY: + test_import__version = CharFilter(field_name="test_import__version", lookup_expr="icontains", label="Reimported Version") + target_start = DateRangeFilter() + target_end = DateRangeFilter() + tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Tag name contains") + not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Not tag name contains", exclude=True) + has_tags = BooleanFilter(field_name="tags", lookup_expr="isnull", exclude=True, label="Has tags") + o = OrderingFilter( + # tuple-mapping retains order + fields=( + ("title", "title"), + ("version", "version"), + ("target_start", "target_start"), + ("target_end", "target_end"), + ("lead", "lead"), + ("api_scan_configuration", "api_scan_configuration"), + ), + field_labels={ + "name": "Test Name", + }, + ) + + +class EngagementTestFilter(EngagementTestFilterHelper, DojoFilter): + lead = ModelChoiceFilter(queryset=Dojo_User.objects.none(), label="Lead") + api_scan_configuration = ModelChoiceFilter( + queryset=Product_API_Scan_Configuration.objects.none(), + label="API Scan Configuration") + tags = ModelMultipleChoiceFilter( + field_name="tags__name", + to_field_name="name", + queryset=Test.tags.tag_model.objects.all().order_by("name")) + not_tags = ModelMultipleChoiceFilter( + field_name="tags__name", + to_field_name="name", + exclude=True, + queryset=Test.tags.tag_model.objects.all().order_by("name")) + + class Meta: + model = Test + fields = [ + "title", "test_type", "target_start", + "target_end", "percent_complete", + "version", "api_scan_configuration", + ] + + def __init__(self, *args, **kwargs): + self.engagement = kwargs.pop("engagement") + super(DojoFilter, self).__init__(*args, **kwargs) + self.form.fields["test_type"].queryset = Test_Type.objects.filter(test__engagement=self.engagement).distinct().order_by("name") + self.form.fields["api_scan_configuration"].queryset = Product_API_Scan_Configuration.objects.filter(product=self.engagement.product).distinct() + self.form.fields["lead"].queryset = get_authorized_users(Permissions.Product_Type_View) \ + .filter(test__lead__isnull=False).distinct() + + +class EngagementTestFilterWithoutObjectLookups(EngagementTestFilterHelper): + lead = CharFilter( + field_name="lead__username", + lookup_expr="iexact", + label="Lead Username", + help_text="Search for Lead username that are an exact match") + lead_contains = CharFilter( + field_name="lead__username", + lookup_expr="icontains", + label="Lead Username Contains", + help_text="Search for Lead username that contain a given pattern") + api_scan_configuration__tool_configuration__name = CharFilter( + field_name="api_scan_configuration__tool_configuration__name", + lookup_expr="iexact", + label="API Scan Configuration Name", + help_text="Search for Lead username that are an exact match") + api_scan_configuration__tool_configuration__name_contains = CharFilter( + field_name="api_scan_configuration__tool_configuration__name", + lookup_expr="icontains", + label="API Scan Configuration Name Contains", + help_text="Search for Lead username that contain a given pattern") + tags_contains = CharFilter( + label="Test Tag Contains", + field_name="tags__name", + lookup_expr="icontains", + help_text="Search for tags on a Test that contain a given pattern") + tags = CharFilter( + label="Test Tag", + field_name="tags__name", + lookup_expr="iexact", + help_text="Search for tags on a Test that are an exact match") + not_tags_contains = CharFilter( + label="Test Tag Does Not Contain", + field_name="tags__name", + lookup_expr="icontains", + help_text="Search for tags on a Test that contain a given pattern, and exclude them", + exclude=True) + not_tags = CharFilter( + label="Not Test Tag", + field_name="tags__name", + lookup_expr="iexact", + help_text="Search for tags on a Test that are an exact match, and exclude them", + exclude=True) + + class Meta: + model = Test + fields = [ + "title", "test_type", "target_start", + "target_end", "percent_complete", "version", + ] + + def __init__(self, *args, **kwargs): + self.engagement = kwargs.pop("engagement") + super().__init__(*args, **kwargs) + self.form.fields["test_type"].queryset = Test_Type.objects.filter(test__engagement=self.engagement).distinct().order_by("name") + + +class ApiTestFilter(DojoFilter): + tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Tag name contains") + tags = CharFieldInFilter( + field_name="tags__name", + lookup_expr="in", + help_text="Comma separated list of exact tags (uses OR for multiple values)") + tags__and = CharFieldFilterANDExpression( + field_name="tags__name", + help_text="Comma separated list of exact tags to match with an AND expression") + engagement__tags = CharFieldInFilter( + field_name="engagement__tags__name", + lookup_expr="in", + help_text="Comma separated list of exact tags present on engagement (uses OR for multiple values)") + engagement__tags__and = CharFieldFilterANDExpression( + field_name="engagement__tags__name", + help_text="Comma separated list of exact tags to match with an AND expression present on engagement") + engagement__product__tags = CharFieldInFilter( + field_name="engagement__product__tags__name", + lookup_expr="in", + help_text=labels.ASSET_FILTERS_CSV_TAGS_OR_HELP) + engagement__product__tags__and = CharFieldFilterANDExpression( + field_name="engagement__product__tags__name", + help_text=labels.ASSET_FILTERS_CSV_TAGS_AND_HELP) + + not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Not Tag name contains", exclude="True") + not_tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in", + help_text="Comma separated list of exact tags not present on model", exclude="True") + not_engagement__tags = CharFieldInFilter(field_name="engagement__tags__name", lookup_expr="in", + help_text="Comma separated list of exact tags not present on engagement", + exclude="True") + not_engagement__product__tags = CharFieldInFilter(field_name="engagement__product__tags__name", + lookup_expr="in", + help_text=labels.ASSET_FILTERS_CSV_TAGS_NOT_HELP, + exclude="True") + has_tags = BooleanFilter(field_name="tags", lookup_expr="isnull", exclude=True, label="Has tags") + + o = OrderingFilter( + # tuple-mapping retains order + fields=( + ("title", "title"), + ("version", "version"), + ("target_start", "target_start"), + ("target_end", "target_end"), + ("test_type", "test_type"), + ("lead", "lead"), + ("version", "version"), + ("branch_tag", "branch_tag"), + ("build_id", "build_id"), + ("commit_hash", "commit_hash"), + ("api_scan_configuration", "api_scan_configuration"), + ("engagement", "engagement"), + ("created", "created"), + ("updated", "updated"), + ), + field_labels={ + "name": "Test Name", + }, + ) + + class Meta: + model = Test + fields = ["id", "title", "test_type", "target_start", + "target_end", "notes", "percent_complete", + "engagement", "version", + "branch_tag", "build_id", "commit_hash", + "api_scan_configuration", "scan_type"] + + +class ApiAppAnalysisFilter(DojoFilter): + tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Tag name contains") + tags = CharFieldInFilter( + field_name="tags__name", + lookup_expr="in", + help_text="Comma separated list of exact tags (uses OR for multiple values)") + tags__and = CharFieldFilterANDExpression( + field_name="tags__name", + help_text="Comma separated list of exact tags to match with an AND expression") + not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", help_text="Not Tag name contains", exclude="True") + not_tags = CharFieldInFilter(field_name="tags__name", lookup_expr="in", + help_text="Comma separated list of exact tags not present on model", exclude="True") + + class Meta: + model = App_Analysis + fields = ["product", "name", "user", "version"] + + +class ApiCredentialsFilter(DojoFilter): + class Meta: + model = Cred_Mapping + fields = "__all__" + + +class EndpointReportFilter(DojoFilter): + protocol = CharFilter(lookup_expr="icontains") + userinfo = CharFilter(lookup_expr="icontains") + host = CharFilter(lookup_expr="icontains") + port = NumberFilter() + path = CharFilter(lookup_expr="icontains") + query = CharFilter(lookup_expr="icontains") + fragment = CharFilter(lookup_expr="icontains") + finding__severity = MultipleChoiceFilter(choices=SEVERITY_CHOICES, label="Severity") + finding__mitigated = ReportBooleanFilter(label="Finding Mitigated") + + tags = ModelMultipleChoiceFilter( + field_name="tags__name", + to_field_name="name", + queryset=Endpoint.tags.tag_model.objects.all().order_by("name"), + # label='tags', # doesn't work with tagulous, need to set in __init__ below + ) + + tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Tag name contains") + + not_tags = ModelMultipleChoiceFilter( + field_name="tags__name", + to_field_name="name", + exclude=True, + queryset=Endpoint.tags.tag_model.objects.all().order_by("name"), + # label='tags', # doesn't work with tagulous, need to set in __init__ below + ) + + not_tag = CharFilter(field_name="tags__name", lookup_expr="icontains", label="Not tag name contains", exclude=True) + + class Meta: + model = Endpoint + exclude = ["product"] + + +class ReportFindingFilterHelper(FilterSet): + title = CharFilter(lookup_expr="icontains", label="Name") + date = DateFromToRangeFilter(field_name="date", label="Date Discovered") + date_recent = DateRangeFilter(field_name="date", label="Relative Date") + severity = MultipleChoiceFilter(choices=SEVERITY_CHOICES) + active = ReportBooleanFilter() + is_mitigated = ReportBooleanFilter() + mitigated = DateRangeFilter(label="Mitigated Date") + verified = ReportBooleanFilter() + false_p = ReportBooleanFilter(label="False Positive") + risk_acceptance = ReportRiskAcceptanceFilter(label="Risk Accepted") + duplicate = ReportBooleanFilter() + out_of_scope = ReportBooleanFilter() + outside_of_sla = FindingSLAFilter(label="Outside of SLA") + file_path = CharFilter(lookup_expr="icontains") + + o = OrderingFilter( + fields=( + ("title", "title"), + ("date", "date"), + ("fix_available", "fix_available"), + ("numerical_severity", "numerical_severity"), + ("epss_score", "epss_score"), + ("epss_percentile", "epss_percentile"), + ("test__engagement__product__name", "test__engagement__product__name"), + ), + ) + + class Meta: + model = Finding + # exclude sonarqube issue as by default it will show all without checking permissions + exclude = ["date", "cwe", "url", "description", "mitigation", "impact", + "references", "sonarqube_issue", "duplicate_finding", + "thread_id", "notes", "inherited_tags", "endpoints", + "numerical_severity", "reporter", "last_reviewed", + "jira_creation", "jira_change", "files"] + + def manage_kwargs(self, kwargs): + self.prod_type = None + self.product = None + self.engagement = None + self.test = None + if "prod_type" in kwargs: + self.prod_type = kwargs.pop("prod_type") + if "product" in kwargs: + self.product = kwargs.pop("product") + if "engagement" in kwargs: + self.engagement = kwargs.pop("engagement") + if "test" in kwargs: + self.test = kwargs.pop("test") + + @property + def qs(self): + parent = super().qs + return get_authorized_findings(Permissions.Finding_View, parent) + + +class ReportFindingFilter(ReportFindingFilterHelper, FindingTagFilter): + test__engagement__product = ModelMultipleChoiceFilter( + queryset=Product.objects.none(), label=labels.ASSET_FILTERS_LABEL) + test__engagement__product__prod_type = ModelMultipleChoiceFilter( + queryset=Product_Type.objects.none(), + label=labels.ORG_FILTERS_LABEL) + test__engagement__product__lifecycle = MultipleChoiceFilter(choices=Product.LIFECYCLE_CHOICES, label=labels.ASSET_LIFECYCLE_LABEL) + test__engagement = ModelMultipleChoiceFilter(queryset=Engagement.objects.none(), label="Engagement") + duplicate_finding = ModelChoiceFilter(queryset=Finding.objects.filter(original_finding__isnull=False).distinct()) + + def __init__(self, *args, **kwargs): + self.manage_kwargs(kwargs) + super().__init__(*args, **kwargs) + + # duplicate_finding queryset needs to restricted in line with permissions + # and inline with report scope to avoid a dropdown with 100K entries + duplicate_finding_query_set = self.form.fields["duplicate_finding"].queryset + duplicate_finding_query_set = get_authorized_findings(Permissions.Finding_View, duplicate_finding_query_set) + + if self.test: + duplicate_finding_query_set = duplicate_finding_query_set.filter(test=self.test) + del self.form.fields["test__tags"] + del self.form.fields["test__engagement__tags"] + del self.form.fields["test__engagement__product__tags"] + if self.engagement: + duplicate_finding_query_set = duplicate_finding_query_set.filter(test__engagement=self.engagement) + del self.form.fields["test__engagement__tags"] + del self.form.fields["test__engagement__product__tags"] + elif self.product: + duplicate_finding_query_set = duplicate_finding_query_set.filter(test__engagement__product=self.product) + del self.form.fields["test__engagement__product"] + del self.form.fields["test__engagement__product__tags"] + elif self.prod_type: + duplicate_finding_query_set = duplicate_finding_query_set.filter(test__engagement__product__prod_type=self.prod_type) + del self.form.fields["test__engagement__product__prod_type"] + + self.form.fields["duplicate_finding"].queryset = duplicate_finding_query_set + + if "test__engagement__product__prod_type" in self.form.fields: + self.form.fields[ + "test__engagement__product__prod_type"].queryset = get_authorized_product_types(Permissions.Product_Type_View) + if "test__engagement__product" in self.form.fields: + self.form.fields[ + "test__engagement__product"].queryset = get_authorized_products(Permissions.Product_View) + if "test__engagement" in self.form.fields: + self.form.fields["test__engagement"].queryset = get_authorized_engagements(Permissions.Engagement_View) + + +class ReportFindingFilterWithoutObjectLookups(ReportFindingFilterHelper, FindingTagStringFilter): + test__engagement__product__prod_type = NumberFilter(widget=HiddenInput()) + test__engagement__product = NumberFilter(widget=HiddenInput()) + test__engagement = NumberFilter(widget=HiddenInput()) + test = NumberFilter(widget=HiddenInput()) + endpoint = NumberFilter(widget=HiddenInput()) + reporter = CharFilter( + field_name="reporter__username", + lookup_expr="iexact", + label="Reporter Username", + help_text="Search for Reporter names that are an exact match") + reporter_contains = CharFilter( + field_name="reporter__username", + lookup_expr="icontains", + label="Reporter Username Contains", + help_text="Search for Reporter names that contain a given pattern") + reviewers = CharFilter( + field_name="reviewers__username", + lookup_expr="iexact", + label="Reviewer Username", + help_text="Search for Reviewer names that are an exact match") + reviewers_contains = CharFilter( + field_name="reviewers__username", + lookup_expr="icontains", + label="Reviewer Username Contains", + help_text="Search for Reviewer usernames that contain a given pattern") + last_reviewed_by = CharFilter( + field_name="last_reviewed_by__username", + lookup_expr="iexact", + label="Last Reviewed By Username", + help_text="Search for Last Reviewed By names that are an exact match") + last_reviewed_by_contains = CharFilter( + field_name="last_reviewed_by__username", + lookup_expr="icontains", + label="Last Reviewed By Username Contains", + help_text="Search for Last Reviewed By usernames that contain a given pattern") + review_requested_by = CharFilter( + field_name="review_requested_by__username", + lookup_expr="iexact", + label="Review Requested By Username", + help_text="Search for Review Requested By names that are an exact match") + review_requested_by_contains = CharFilter( + field_name="review_requested_by__username", + lookup_expr="icontains", + label="Review Requested By Username Contains", + help_text="Search for Review Requested By usernames that contain a given pattern") + mitigated_by = CharFilter( + field_name="mitigated_by__username", + lookup_expr="iexact", + label="Mitigator Username", + help_text="Search for Mitigator names that are an exact match") + mitigated_by_contains = CharFilter( + field_name="mitigated_by__username", + lookup_expr="icontains", + label="Mitigator Username Contains", + help_text="Search for Mitigator usernames that contain a given pattern") + defect_review_requested_by = CharFilter( + field_name="defect_review_requested_by__username", + lookup_expr="iexact", + label="Requester of Defect Review Username", + help_text="Search for Requester of Defect Review names that are an exact match") + defect_review_requested_by_contains = CharFilter( + field_name="defect_review_requested_by__username", + lookup_expr="icontains", + label="Requester of Defect Review Username Contains", + help_text="Search for Requester of Defect Review usernames that contain a given pattern") + test__engagement__product__prod_type__name = CharFilter( + field_name="test__engagement__product__prod_type__name", + lookup_expr="iexact", + label=labels.ORG_FILTERS_NAME_LABEL, + help_text=labels.ORG_FILTERS_NAME_HELP) + test__engagement__product__prod_type__name_contains = CharFilter( + field_name="test__engagement__product__prod_type__name", + lookup_expr="icontains", + label=labels.ORG_FILTERS_NAME_CONTAINS_LABEL, + help_text=labels.ORG_FILTERS_NAME_CONTAINS_HELP) + test__engagement__product__name = CharFilter( + field_name="test__engagement__product__name", + lookup_expr="iexact", + label=labels.ASSET_FILTERS_NAME_LABEL, + help_text=labels.ASSET_FILTERS_NAME_HELP) + test__engagement__product__name_contains = CharFilter( + field_name="test__engagement__product__name", + lookup_expr="icontains", + label=labels.ASSET_FILTERS_NAME_CONTAINS_LABEL, + help_text=labels.ASSET_FILTERS_NAME_CONTAINS_HELP) + test__engagement__name = CharFilter( + field_name="test__engagement__name", + lookup_expr="iexact", + label="Engagement Name", + help_text="Search for Engagement names that are an exact match") + test__engagement__name_contains = CharFilter( + field_name="test__engagement__name", + lookup_expr="icontains", + label="Engagement name Contains", + help_text="Search for Engagement names that contain a given pattern") + test__name = CharFilter( + field_name="test__name", + lookup_expr="iexact", + label="Test Name", + help_text="Search for Test names that are an exact match") + test__name_contains = CharFilter( + field_name="test__name", + lookup_expr="icontains", + label="Test name Contains", + help_text="Search for Test names that contain a given pattern") + + def __init__(self, *args, **kwargs): + self.manage_kwargs(kwargs) + super().__init__(*args, **kwargs) + + product_type_refs = [ + "test__engagement__product__prod_type__name", + "test__engagement__product__prod_type__name_contains", + ] + product_refs = [ + "test__engagement__product__name", + "test__engagement__product__name_contains", + "test__engagement__product__tags", + "test__engagement__product__tags_contains", + "not_test__engagement__product__tags", + "not_test__engagement__product__tags_contains", + ] + engagement_refs = [ + "test__engagement__name", + "test__engagement__name_contains", + "test__engagement__tags", + "test__engagement__tags_contains", + "not_test__engagement__tags", + "not_test__engagement__tags_contains", + ] + test_refs = [ + "test__name", + "test__name_contains", + "test__tags", + "test__tags_contains", + "not_test__tags", + "not_test__tags_contains", + ] + + if self.test: + self.delete_tags_from_form(product_type_refs) + self.delete_tags_from_form(product_refs) + self.delete_tags_from_form(engagement_refs) + self.delete_tags_from_form(test_refs) + elif self.engagement: + self.delete_tags_from_form(product_type_refs) + self.delete_tags_from_form(product_refs) + self.delete_tags_from_form(engagement_refs) + elif self.product: + self.delete_tags_from_form(product_type_refs) + self.delete_tags_from_form(product_refs) + elif self.prod_type: + self.delete_tags_from_form(product_type_refs) + + +class UserFilter(DojoFilter): + first_name = CharFilter(lookup_expr="icontains") + last_name = CharFilter(lookup_expr="icontains") + username = CharFilter(lookup_expr="icontains") + email = CharFilter(lookup_expr="icontains") + + o = OrderingFilter( + # tuple-mapping retains order + fields=( + ("username", "username"), + ("last_name", "last_name"), + ("first_name", "first_name"), + ("email", "email"), + ("is_active", "is_active"), + ("is_superuser", "is_superuser"), + ("date_joined", "date_joined"), + ("last_login", "last_login"), + ), + field_labels={ + "username": "User Name", + "is_active": "Active", + "is_superuser": "Superuser", + }, + ) + + class Meta: + model = Dojo_User + fields = ["is_superuser", "is_active", "first_name", "last_name", "username", "email"] + + +class GroupFilter(DojoFilter): + name = CharFilter(lookup_expr="icontains") + description = CharFilter(lookup_expr="icontains") + + class Meta: + model = Dojo_Group + fields = ["name", "description"] + exclude = ["users"] + + +# This class is used exclusively by Findings +class TestImportFilter(DojoFilter): + version = CharFilter(field_name="version", lookup_expr="icontains") + version_exact = CharFilter(field_name="version", lookup_expr="iexact", label="Version Exact") + branch_tag = CharFilter(lookup_expr="icontains", label="Branch/Tag") + build_id = CharFilter(lookup_expr="icontains", label="Build ID") + commit_hash = CharFilter(lookup_expr="icontains", label="Commit hash") + + findings_affected = BooleanFilter(field_name="findings_affected", lookup_expr="isnull", exclude=True, label="Findings affected") + + o = OrderingFilter( + # tuple-mapping retains order + fields=( + ("date", "date"), + ("version", "version"), + ("branch_tag", "branch_tag"), + ("build_id", "build_id"), + ("commit_hash", "commit_hash"), + + ), + ) + + class Meta: + model = Test_Import + fields = [] + + +# This class is used exclusively by Findings +class TestImportFindingActionFilter(DojoFilter): + action = MultipleChoiceFilter(choices=IMPORT_ACTIONS) + o = OrderingFilter( + # tuple-mapping retains order + fields=( + ("action", "action"), + ), + ) + + class Meta: + model = Test_Import_Finding_Action + fields = [] + + +# Used within the TestImport API +class TestImportAPIFilter(DojoFilter): + o = OrderingFilter( + # tuple-mapping retains order + fields=( + ("id", "id"), + ("created", "created"), + ("modified", "modified"), + ("version", "version"), + ("branch_tag", "branch_tag"), + ("build_id", "build_id"), + ("commit_hash", "commit_hash"), + + ), + ) + + class Meta: + model = Test_Import + fields = ["test", + "findings_affected", + "version", + "branch_tag", + "build_id", + "commit_hash", + "test_import_finding_action__action", + "test_import_finding_action__finding", + "test_import_finding_action__created"] + + +class LogEntryFilter(DojoFilter): + + action = MultipleChoiceFilter(choices=LogEntry.Action.choices) + actor = ModelMultipleChoiceFilter(queryset=Dojo_User.objects.none()) + timestamp = DateRangeFilter() + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.form.fields["actor"].queryset = get_authorized_users(Permissions.Product_View) + + class Meta: + model = LogEntry + exclude = ["content_type", "object_pk", "object_id", "object_repr", + "changes", "additional_data", "remote_addr"] + filter_overrides = { + JSONField: { + "filter_class": CharFilter, + "extra": lambda _: { + "lookup_expr": "icontains", + }, + }, + } + + +class PgHistoryFilter(DojoFilter): + + """ + Filter for django-pghistory audit entries. + + This filter works with pghistory event tables that have: + - pgh_created_at: timestamp of the event + - pgh_label: event type (insert/update/delete) + - user: user ID from context + - url: URL from context + - remote_addr: IP address from context + """ + + # Filter by event creation time (equivalent to auditlog timestamp) + pgh_created_at = DateRangeFilter(field_name="pgh_created_at", label="Timestamp") + + # Filter by event type/label + pgh_label = ChoiceFilter( + field_name="pgh_label", + label="Event Type", + choices=[ + ("", "All"), + ("insert", "Insert"), + ("update", "Update"), + ("delete", "Delete"), + ("initial_import", "Initial Import"), + ], + ) + + # Filter by user (from context) + user = ModelChoiceFilter( + field_name="user", + queryset=Dojo_User.objects.none(), + label="User", + empty_label="All Users", + ) + + # Filter by IP address (from context) + remote_addr = CharFilter( + field_name="remote_addr", + lookup_expr="icontains", + label="IP Address Contains", + ) + + # Filter by changes/diff field (JSON field containing what changed) + pgh_diff = CharFilter( + method="filter_pgh_diff_contains", + label="Changes Contains", + help_text="Search for field names or values in the changes (optimized for JSONB, but can be slow)", + ) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.form.fields["user"].queryset = get_authorized_users(Permissions.Product_View) + + def filter_pgh_diff_contains(self, queryset, name, value): + """ + Custom filter for pgh_diff that uses efficient JSONB operations. + Searches both keys and values in the JSONB field. + """ + if not value: + return queryset + + # Search in both keys and values using JSONB operators + return queryset.filter( + Q(pgh_diff__has_key=value) | # Search in keys: {"severity": [...]} + Q(pgh_diff__has_any_keys=[value]) | # Alternative key search + Q(pgh_diff__contains=f'"{value}"'), # Search in values: ["severity", "other"] + ) + + class Meta: + fields = ["pgh_created_at", "pgh_label", "user", "url", "remote_addr", "pgh_diff"] + exclude = [] + + +class ProductTypeFilter(DojoFilter): + name = CharFilter(lookup_expr="icontains") + + o = OrderingFilter( + # tuple-mapping retains order + fields=( + ("name", "name"), + ), + ) + + class Meta: + model = Product_Type + exclude = [] + include = ("name",) + + +class TestTypeFilter(DojoFilter): + name = CharFilter(lookup_expr="icontains") + + o = OrderingFilter( + # tuple-mapping retains order + fields=( + ("name", "name"), + ), + ) + + class Meta: + model = Test_Type + exclude = [] + include = ("name",) + + +class DevelopmentEnvironmentFilter(DojoFilter): + name = CharFilter(lookup_expr="icontains") + + o = OrderingFilter( + # tuple-mapping retains order + fields=( + ("name", "name"), + ), + ) + + class Meta: + model = Development_Environment + exclude = [] + include = ("name",) + + +class NoteTypesFilter(DojoFilter): + name = CharFilter(lookup_expr="icontains") + + o = OrderingFilter( + # tuple-mapping retains order + fields=( + ("name", "name"), + ("description", "description"), + ("is_single", "is_single"), + ("is_mandatory", "is_mandatory"), + ), + ) + + class Meta: + model = Note_Type + exclude = [] + include = ("name", "is_single", "description") + +# ============================== +# Defect Dojo Engaegment Surveys +# ============================== + + +class QuestionnaireFilter(FilterSet): + name = CharFilter(lookup_expr="icontains") + description = CharFilter(lookup_expr="icontains") + active = BooleanFilter() + + class Meta: + model = Engagement_Survey + exclude = ["questions"] + + survey_set = FilterSet + + +class QuestionTypeFilter(ChoiceFilter): + def any(self, qs, name): + return qs.all() + + def text_question(self, qs, name): + return qs.filter(polymorphic_ctype=ContentType.objects.get_for_model(TextQuestion)) + + def choice_question(self, qs, name): + return qs.filter(polymorphic_ctype=ContentType.objects.get_for_model(ChoiceQuestion)) + + options = { + None: (_("Any"), any), + 1: (_("Text Question"), text_question), + 2: (_("Choice Question"), choice_question), + } + + def __init__(self, *args, **kwargs): + kwargs["choices"] = [ + (key, value[0]) for key, value in six.iteritems(self.options)] + super().__init__(*args, **kwargs) + + def filter(self, qs, value): + try: + value = int(value) + except (ValueError, TypeError): + value = None + return self.options[value][1](self, qs, self.options[value][0]) + + +class ApiUserFilter(filters.FilterSet): + last_login = filters.DateFromToRangeFilter() + date_joined = filters.DateFromToRangeFilter() + is_active = filters.BooleanFilter() + is_superuser = filters.BooleanFilter() + username = filters.CharFilter(lookup_expr="icontains") + first_name = filters.CharFilter(lookup_expr="icontains") + last_name = filters.CharFilter(lookup_expr="icontains") + email = filters.CharFilter(lookup_expr="icontains") + class Meta: + model = User + fields = [ + "id", + "username", + "first_name", + "last_name", + "email", + "is_active", + "is_superuser", + "last_login", + "date_joined", + ] + + o = OrderingFilter( + # tuple-mapping retains order + fields=( + ("username", "username"), + ("last_name", "last_name"), + ("first_name", "first_name"), + ("email", "email"), + ("is_active", "is_active"), + ("is_superuser", "is_superuser"), + ("date_joined", "date_joined"), + ("last_login", "last_login"), + ), + ) + + +with warnings.catch_warnings(action="ignore", category=ManagerInheritanceWarning): + class QuestionFilter(FilterSet): + text = CharFilter(lookup_expr="icontains") + type = QuestionTypeFilter() + + class Meta: + model = Question + exclude = ["polymorphic_ctype", "created", "modified", "order"] + + question_set = FilterSet diff --git a/dojo/models.py b/dojo/models.py index 282a8c4d667..2ff1d681142 100644 --- a/dojo/models.py +++ b/dojo/models.py @@ -1580,6 +1580,15 @@ def __str__(self): def get_absolute_url(self): return reverse("view_engagement", args=[str(self.id)]) + @property + def engagement_id(self): + if hasattr(settings, 'ENGAGEMENT_ID_FORMAT') and settings.ENGAGEMENT_ID_FORMAT: + try: + return settings.ENGAGEMENT_ID_FORMAT.format(id=self.id) + except Exception: + return str(self.id) + return None + def copy(self): copy = copy_model_util(self) # Save the necessary ManyToMany relationships diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py index f13696c586b..ef02fc5defb 100644 --- a/dojo/settings/settings.dist.py +++ b/dojo/settings/settings.dist.py @@ -1846,6 +1846,10 @@ def saml2_attrib_map_format(din): # Maximum size of a scan file in MB SCAN_FILE_MAX_SIZE = env("DD_SCAN_FILE_MAX_SIZE") +# Engagement ID format +# Example: "ENG-{id:04d}" +ENGAGEMENT_ID_FORMAT = env("DD_ENGAGEMENT_ID_FORMAT", default="ENG-{id:04d}") + # Apply a severity level to "Security Weaknesses" in Qualys WAS QUALYS_WAS_WEAKNESS_IS_VULN = env("DD_QUALYS_WAS_WEAKNESS_IS_VULN") diff --git a/dojo/templates/dojo/engagement.html b/dojo/templates/dojo/engagement.html index 684b0777a73..a0a99b0e140 100644 --- a/dojo/templates/dojo/engagement.html +++ b/dojo/templates/dojo/engagement.html @@ -1,243 +1,258 @@ -{% extends "base.html" %} -{% load navigation_tags %} -{% load display_tags %} -{% load authorization_tags %} - -{% block content %} - {{ block.super }} -
-
-
-
-

- {{ view }} Engagements - -

-
-
- {% include "dojo/filter_snippet.html" with form=filter_form %} -
-
- {% if engagements %} - -
- {% include "dojo/paging_snippet.html" with page=engagements page_size=True %} -
- -
- - - - {% comment %} The display field is translated in the function. No need to translate here as well{% endcomment %} - - {% comment %} The display field is translated in the function. No need to translate here as well{% endcomment %} - - - {% comment %} The display field is translated in the function. No need to translate here as well{% endcomment %} - - {% comment %} The display field is translated in the function. No need to translate here as well{% endcomment %} - - {% comment %} The display field is translated in the function. No need to translate here as well{% endcomment %} - - - {% if system_settings.enable_jira %} - - {% endif %} - - - {% for e in engagements %} - - - - - - - - - - {% if system_settings.enable_jira %} - - {% endif %} - - {% endfor %} -
{% dojo_sort request 'Engagement' 'name' 'asc' %}{% dojo_sort request 'Period' 'target_start' 'asc' %}Status{% dojo_sort request labels.ASSET_LABEL 'product__name' 'asc' %}{% dojo_sort request labels.ORG_LABEL 'product__prod_type__name' %}{% dojo_sort request 'Lead' 'lead__first_name' %}TestsJira
-
-
- {% if e.name %}{{ e.name }}{% endif %} -
- {% include "dojo/snippets/tags.html" with tags=e.tags.all %} -
-
{{ e.target_start }} - {{ e.target_end }} - {% if e.is_overdue and e.status != 'Completed' %} - - {{ e.target_end|overdue }} overdue - - {% endif %} - {{ e.status }} - - {{ e.product.name }} - - {{ e.product|jira_project_tag }} - {% include "dojo/snippets/tags.html" with tags=e.product.tags.all %} - - - {{ e.product.prod_type.name }} - - {{ e.lead.first_name }} {{ e.lead.last_name }} - - {{ e.test_count|default:0 }} - - - {{ e|jira_project_tag }} -
-
-
- {% include "dojo/paging_snippet.html" with page=engagements page_size=True %} -
- {% else %} -
No active engagements
- {% endif %} -
-
- -{% endblock %} -{% block postscript %} - {{ block.super }} - - - {% include "dojo/filter_js_snippet.html" %} -{% endblock %} +{% extends "base.html" %} +{% load navigation_tags %} +{% load display_tags %} +{% load authorization_tags %} + +{% block content %} +{{ block.super }} +
+
+
+
+

+ {{ view }} Engagements + +

+
+
+ {% include "dojo/filter_snippet.html" with form=filter_form %} +
+
+ {% if engagements %} + +
+ {% include "dojo/paging_snippet.html" with page=engagements page_size=True %} +
+ +
+ + + + {% comment %} The display field is translated in the function. No need to translate here as well{% endcomment %} + + + {% comment %} The display field is translated in the function. No need to translate here as well{% endcomment %} + + + {% comment %} The display field is translated in the function. No need to translate here as well{% endcomment %} + + {% comment %} The display field is translated in the function. No need to translate here as well{% endcomment %} + + {% comment %} The display field is translated in the function. No need to translate here as well{% endcomment %} + + + {% if system_settings.enable_jira %} + + {% endif %} + + + {% for e in engagements %} + + + + + + + + + + + {% if system_settings.enable_jira %} + + {% endif %} + + {% endfor %} +
ID{% dojo_sort request .Engagement. .name. .asc. %}{% dojo_sort request 'Period' 'target_start' 'asc' %}Status{% dojo_sort request labels.ASSET_LABEL 'product__name' 'asc' %}{% dojo_sort request labels.ORG_LABEL 'product__prod_type__name' %}{% dojo_sort request 'Lead' 'lead__first_name' %}TestsJira
+
+
+ {% if e.engagement_id %} + {{ e.engagement_id }} + {% endif %} + + {% if e.name %}{{ e.name }}{% endif %} +
+ {% include "dojo/snippets/tags.html" with tags=e.tags.all %} +
+
{{ e.target_start }} - {{ e.target_end }} + {% if e.is_overdue and e.status != 'Completed' %} + + {{ e.target_end|overdue }} overdue + + {% endif %} + {{ e.status }} + + {{ e.product.name }} + + {{ e.product|jira_project_tag }} + {% include "dojo/snippets/tags.html" with tags=e.product.tags.all %} + + + {{ e.product.prod_type.name }} + + {{ e.lead.first_name }} {{ e.lead.last_name }} + + {{ e.test_count|default:0 }} + + + {{ e|jira_project_tag }} +
+
+
+ {% include "dojo/paging_snippet.html" with page=engagements page_size=True %} +
+ {% else %} +
+
No active engagements
+
+ {% endif %} +
+
+ +{% endblock %} +{% block postscript %} +{{ block.super }} + + +{% include "dojo/filter_js_snippet.html" %} +{% endblock %} \ No newline at end of file diff --git a/dojo/templates/dojo/engagements_all.html b/dojo/templates/dojo/engagements_all.html index ec31cefd2cd..d211fe93f55 100644 --- a/dojo/templates/dojo/engagements_all.html +++ b/dojo/templates/dojo/engagements_all.html @@ -3,184 +3,199 @@ {% load display_tags %} {% load authorization_tags %} {% block content %} - {{ block.super }} -
-
-
-
-

- Engagements - -

-
-
- {% include "dojo/filter_snippet.html" with form=filter_form %} -
+{{ block.super }} +
+
+
+
+

+ Engagements + +

- {% if products %} +
+ {% include "dojo/filter_snippet.html" with form=filter_form %} +
+
+ {% if products %} -
- {% include "dojo/paging_snippet.html" with page=products page_size=True %} -
+
+ {% include "dojo/paging_snippet.html" with page=products page_size=True %} +
-
- - - - - - - - {% if system_settings.enable_jira %} - - {% endif %} - - - - - - - +
+
{{ labels.ASSET_LABEL }}{{ labels.ASSET_LABEL }}Engagement NameJIRAStatusPeriodLeadTestsEngagement
+ + + + + + + + {% if system_settings.enable_jira %} + + {% endif %} + + + + + + + - - {% for p in products %} - {% for e in p.engagement_set.all %} - - - - - - {% if system_settings.enable_jira %} - - {% endif %} - - + {% for p in products %} + {% for e in p.engagement_set.all %} + + - - - - + +
  • + + View Active Findings + +
  • +
  • + + View Active and Verified Findings + +
  • +
  • + + View Mitigated Findings + +
  • +
  • + + View Accepted Findings + +
  • +
  • + + View All Findings + +
  • + +
  • + + Engagement Report + +
  • + {% if e|has_object_permission:"Engagement_Delete" %} + +
  • + + Delete Engagement + +
  • + {% endif %} + + + + + + + + + {% if system_settings.enable_jira %} + + {% endif %} + + + + + + - {% endfor %} - {% endfor %} - -
    {{ labels.ASSET_LABEL }}{{ labels.ASSET_LABEL }}IDEngagement NameJIRAStatusPeriodLeadTestsEngagement
    -
    -
    {{ p.name }} - {% include "dojo/snippets/tags.html" with tags=p.tags.all %} - - {{ p.prod_type.name }} - - {% if e.name %}{{ e.name }}{% endif %} - {% include "dojo/snippets/tags.html" with tags=e.tags.all %} -
    -
    - {{ e|jira_project_tag }} - {{ e.status }} {{ e.target_start }} - {{ e.target_end }} - {% if e.is_overdue and e.active and e.status != 'Completed' %} -
    {{ e.target_end|overdue }} overdue
    +
    +
    +
    {{ e.lead.first_name }} {{ e.lead.last_name }} - - {{ e.test_count }} - - - {% if p|has_object_permission:"Engagement_Add" %} - - Add - + {% if e|has_object_permission:"Import_Scan_Result" %} +
  • + + Import Scan Results + +
  • {% endif %} -
    {{ p.name }} + {% include "dojo/snippets/tags.html" with tags=p.tags.all %} + + {{ p.prod_type.name }} + + {% if e.engagement_id %} + {{ e.engagement_id }} + {% endif %} + + {% if e.name %}{{ e.name }}{% endif %} + {% include "dojo/snippets/tags.html" with tags=e.tags.all %} +
    +
    + {{ e|jira_project_tag }} + {{ e.status }} {{ e.target_start }} - {{ e.target_end }} + {% if e.is_overdue and e.active and e.status != 'Completed' %} + +
    {{ e.target_end|overdue }} overdue
    +
    + {% endif %} +
    {{ e.lead.first_name }} {{ e.lead.last_name }} + + {{ e.test_count }} + + + {% if p|has_object_permission:"Engagement_Add" %} + + Add + + {% endif %} +
    -
    -
    - {% include "dojo/paging_snippet.html" with page=products page_size=True %} -
    - {% else %} -
    No engagements found
    - {% endif %} + {% endfor %} + {% endfor %} + +
    +
    + {% include "dojo/paging_snippet.html" with page=products page_size=True %} +
    + {% else %} +
    +
    No engagements found
    +
    + {% endif %}
    +
    {% endblock %} {% block postscript %} - {{ block.super }} - - + - + - {% include "dojo/filter_js_snippet.html" %} -{% endblock %} + {% endif %} + }); + +{% include "dojo/filter_js_snippet.html" %} +{% endblock %} \ No newline at end of file diff --git a/dojo/templates/dojo/findings_list_snippet.html b/dojo/templates/dojo/findings_list_snippet.html index 31dd287e54b..ae9d37ce04c 100644 --- a/dojo/templates/dojo/findings_list_snippet.html +++ b/dojo/templates/dojo/findings_list_snippet.html @@ -1,1208 +1,1208 @@ -{% load navigation_tags %} -{% load display_tags %} -{% load authorization_tags %} -{% load get_endpoint_status %} -{% load static %} -{% load i18n %} -{% block findings_list %} -
    -
    -
    -
    -

    - {% blocktrans %}{{ filter_name }} Findings{% endblocktrans %} - -

    -
    -
    - {% include "dojo/filter_snippet.html" with form=filtered.form %} -
    -
    - {% if findings %} -
    {% include "dojo/paging_snippet.html" with page=findings page_size=True %}
    - {% if not product_tab or product_tab and product_tab.product|has_object_permission:"Finding_Edit" %} - - {% endif %} -
    - - - - {% block header %} - {% if not product_tab or product_tab and product_tab.product|has_object_permission:"Finding_Edit" %} - - {% endif %} - - - - - - - - - - - - - {% if system_settings.enable_finding_sla %} - - {% endif %} - - - - {% if system_settings.enable_jira %} - {% if jira_project and product_tab or not product_tab %} - - - - {% endif %} - {% endif %} - {% if 'is_finding_groups_enabled'|system_setting_enabled %} - - {% endif %} - {% if show_product_column and product_tab is None %} - - {% endif %} - - - {% if filter_name != 'Closed' %} - - {% endif %} - {% endblock header %} - - - - {% for finding in findings %} - - {% block body %} - {% if not product_tab or product_tab and product_tab.product|has_object_permission:"Finding_Edit" %} - - {% endif %} - - - - - - - - - - - {% if filter_name == 'Closed' %} - - - {% if system_settings.enable_finding_sla %} - - {% endif %} - - - - {% if system_settings.enable_jira %} - {% if jira_project and product_tab or not product_tab %} - - - - {% endif %} - {% endif %} - {% if 'is_finding_groups_enabled'|system_setting_enabled %} - - {% endif %} - {% if show_product_column and product_tab is None %} - - {% endif %} - - - - {% if filter_name != 'Closed' %} - - {% endif %} - {% endblock body %} - - {% endfor %} - -
    - - - {% dojo_sort request 'Severity' 'numerical_severity' %} - - {% comment %} The display field is translated in the function. No need to translate here as well{% endcomment %} - {% dojo_sort request 'Name' 'title' %} - - {% dojo_sort request 'CWE' 'cwe' %} - - {% trans "Vulnerability Id" %} - - {% trans "EPSS Score" %} - - {% trans "EPSS Percentile" %} - - {% trans "Known Exploited" %} - - {% trans "Used in Ransomware" %} - - {% trans "Date Added to KEV" %} - - {% if filter_name == 'Closed' %} - {% comment %} The display field is translated in the function. No need to translate here as well{% endcomment %} - {% dojo_sort request 'Closed Date' 'mitigated' %} - {% else %} - {% comment %} The display field is translated in the function. No need to translate here as well{% endcomment %} - {% dojo_sort request 'Date' 'date' %} - {% endif %} - - {% dojo_sort request 'Age' 'date' %} - - {% dojo_sort request 'SLA' 'sla_age_days' %} - - {% trans "Reporter" %} - - {% trans "Found By" %} - - {% trans "Status" %} - - {% trans "Jira" %} - - {% trans "JIRA Age" %} - - {% trans "JIRA Change" %} - - {% trans "Group" %} - - {% comment %} The display field is translated in the function. No need to translate here as well{% endcomment %} - {% dojo_sort request labels.ASSET_LABEL 'test__engagement__product__name' %} - - {% dojo_sort request 'Service' 'service' %} - - {% dojo_sort request 'Planned Remediation' 'planned_remediation_date' %} - - {% trans "Reviewers" %} -
    -
    - -
    -
    -
    - -
    -
    - - {{ finding.severity_display }} - - - {% if finding.title %} - {{ finding.title|truncatechars_html:60 }} - {% else %} - {{ finding.id }} - {% endif %} - {% if finding.file_path %} - - {% endif %} - {% if finding.endpoints.all %} - - {% endif %} - {% if finding.component_name %} - - {% endif %} - {% if finding.notes.all %} - - - ({{ finding.notes.count }}) - - {% endif %} - {% include "dojo/snippets/tags.html" with tags=finding.tags.all %} - - {% if finding.cwe > 0 %} - - {{ finding.cwe|default:"" }} - - {% endif %} - - {% with finding|first_vulnerability_id as first_vulnerability_id %} - {% if first_vulnerability_id %} - {% if first_vulnerability_id|has_vulnerability_url %} - - {{ first_vulnerability_id }} - - {% else %} - {{ first_vulnerability_id }} - {% endif %} - {% endif %} - {% endwith %} - - {{ finding.epss_score|format_epss }} - - {{ finding.epss_percentile|format_epss }} - - {{ finding.known_exploited|yesno|capfirst }} - - {{ finding.ransomware_used|yesno|capfirst }} - - {{ finding.kev_date|date }} - - {{ finding.mitigated|date }} - {% else %} - - {{ finding.date }} - {% endif %} - - {{ finding.age }} - - {{ finding|finding_sla }} - - {% if finding.reporter.get_full_name and finding.reporter.get_full_name.strip %} - {{ finding.reporter.get_full_name }} - {% else %} - {{ finding.reporter }} - {% endif %} - - {% if finding.found_by %} - {{ finding.found_by.all|join:", " }} - {% else %} - {{ finding.test.test_type }} - {% endif %} - - {{ finding|finding_display_status|safe }} {{ finding|import_history }} - - {% if finding.has_jira_group_issue %} - {{ finding.finding_group | jira_key }} - {% elif finding.has_jira_issue %} - {{ finding | jira_key }} - {% endif %} - - {% if finding.has_jira_group_issue %} - {{ finding.finding_group | jira_creation | timesince }} - {% else %} - {{ finding | jira_creation | timesince }} - {% endif %} - - {% if finding.has_jira_group_issue %} - {{ finding.finding_group | jira_change | timesince }} - {% else %} - {{ finding | jira_change | timesince }} - {% endif %} - - {% if finding.has_finding_group %} - {{ finding.finding_group.name }} - {% endif %} - - {{ finding.test.engagement.product }} - - {% if finding.service %}{{ finding.service }}{% endif %} - - {% if finding.planned_remediation_date %}{{ finding.planned_remediation_date }}{% endif %} - - {% if finding.planned_remediation_version %}{{ finding.planned_remediation_version }}{% endif %} - - {% if finding.reviewers %} - {% for reviewer in finding.reviewers.all %} - {{reviewer.get_full_name}} - {% if not forloop.last %}
    {% endif %} - {% endfor %} - {% endif %} -
    -
    -
    - {% include "dojo/paging_snippet.html" with page=findings page_size=True %} -
    - {% else %} -
    -

    - {% trans "No findings found." %} -

    -
    - {% endif %} - -
    -
    -{% endblock %} -{% block postscript %} - - - - - {% include "dojo/filter_js_snippet.html" %} - {% include "dojo/snippets/selectpicker_in_dropdown.html" %} -{% endblock %} +{% load navigation_tags %} +{% load display_tags %} +{% load authorization_tags %} +{% load get_endpoint_status %} +{% load static %} +{% load i18n %} +{% block findings_list %} +
    +
    +
    +
    +

    + {% blocktrans %}{{ filter_name }} Findings{% endblocktrans %} + +

    +
    +
    + {% include "dojo/filter_snippet.html" with form=filtered.form %} +
    +
    + {% if findings %} +
    {% include "dojo/paging_snippet.html" with page=findings page_size=True %}
    + {% if not product_tab or product_tab and product_tab.product|has_object_permission:"Finding_Edit" %} + + {% endif %} +
    + + + + {% block header %} + {% if not product_tab or product_tab and product_tab.product|has_object_permission:"Finding_Edit" %} + + {% endif %} + + + + + + + + + + + + + {% if system_settings.enable_finding_sla %} + + {% endif %} + + + + {% if system_settings.enable_jira %} + {% if jira_project and product_tab or not product_tab %} + + + + {% endif %} + {% endif %} + {% if 'is_finding_groups_enabled'|system_setting_enabled %} + + {% endif %} + {% if show_product_column and product_tab is None %} + + {% endif %} + + + {% if filter_name != 'Closed' %} + + {% endif %} + {% endblock header %} + + + + {% for finding in findings %} + + {% block body %} + {% if not product_tab or product_tab and product_tab.product|has_object_permission:"Finding_Edit" %} + + {% endif %} + + + + + + + + + + + {% if filter_name == 'Closed' %} + + + {% if system_settings.enable_finding_sla %} + + {% endif %} + + + + {% if system_settings.enable_jira %} + {% if jira_project and product_tab or not product_tab %} + + + + {% endif %} + {% endif %} + {% if 'is_finding_groups_enabled'|system_setting_enabled %} + + {% endif %} + {% if show_product_column and product_tab is None %} + + {% endif %} + + + + {% if filter_name != 'Closed' %} + + {% endif %} + {% endblock body %} + + {% endfor %} + +
    + + + {% dojo_sort request 'Severity' 'numerical_severity' %} + + {% comment %} The display field is translated in the function. No need to translate here as well{% endcomment %} + {% dojo_sort request 'Name' 'title' %} + + {% dojo_sort request 'CWE' 'cwe' %} + + {% trans "Vulnerability Id" %} + + {% trans "EPSS Score" %} + + {% trans "EPSS Percentile" %} + + {% trans "Known Exploited" %} + + {% trans "Used in Ransomware" %} + + {% trans "Date Added to KEV" %} + + {% if filter_name == 'Closed' %} + {% comment %} The display field is translated in the function. No need to translate here as well{% endcomment %} + {% dojo_sort request 'Closed Date' 'mitigated' %} + {% else %} + {% comment %} The display field is translated in the function. No need to translate here as well{% endcomment %} + {% dojo_sort request 'Date' 'date' %} + {% endif %} + + {% dojo_sort request 'Age' 'date' %} + + {% dojo_sort request 'SLA' 'sla_age_days' %} + + {% trans "Reporter" %} + + {% trans "Found By" %} + + {% trans "Status" %} + + {% trans "Jira" %} + + {% trans "JIRA Age" %} + + {% trans "JIRA Change" %} + + {% trans "Group" %} + + {% comment %} The display field is translated in the function. No need to translate here as well{% endcomment %} + {% dojo_sort request labels.ASSET_LABEL 'test__engagement__product__name' %} + + {% dojo_sort request 'Service' 'service' %} + + {% dojo_sort request 'Planned Remediation' 'planned_remediation_date' %} + + {% trans "Reviewers" %} +
    +
    + +
    +
    +
    + +
    +
    + + {{ finding.severity_display }} + + + {% if finding.title %} + {{ finding.title|truncatechars_html:60 }} + {% else %} + {{ finding.id }} + {% endif %} + {% if finding.file_path %} + + {% endif %} + {% if finding.endpoints.all %} + + {% endif %} + {% if finding.component_name %} + + {% endif %} + {% if finding.notes.all %} + + + ({{ finding.notes.count }}) + + {% endif %} + {% include "dojo/snippets/tags.html" with tags=finding.tags.all %} + + {% if finding.cwe > 0 %} + + {{ finding.cwe|default:"" }} + + {% endif %} + + {% with finding|first_vulnerability_id as first_vulnerability_id %} + {% if first_vulnerability_id %} + {% if first_vulnerability_id|has_vulnerability_url %} + + {{ first_vulnerability_id }} + + {% else %} + {{ first_vulnerability_id }} + {% endif %} + {% endif %} + {% endwith %} + + {{ finding.epss_score|format_epss }} + + {{ finding.epss_percentile|format_epss }} + + {{ finding.known_exploited|yesno|capfirst }} + + {{ finding.ransomware_used|yesno|capfirst }} + + {{ finding.kev_date|date }} + + {{ finding.mitigated|date }} + {% else %} + + {{ finding.date }} + {% endif %} + + {{ finding.age }} + + {{ finding|finding_sla }} + + {% if finding.reporter.get_full_name and finding.reporter.get_full_name.strip %} + {{ finding.reporter.get_full_name }} + {% else %} + {{ finding.reporter }} + {% endif %} + + {% if finding.found_by %} + {{ finding.found_by.all|join:", " }} + {% else %} + {{ finding.test.test_type }} + {% endif %} + + {{ finding|finding_display_status|safe }} {{ finding|import_history }} + + {% if finding.has_jira_group_issue %} + {{ finding.finding_group | jira_key }} + {% elif finding.has_jira_issue %} + {{ finding | jira_key }} + {% endif %} + + {% if finding.has_jira_group_issue %} + {{ finding.finding_group | jira_creation | timesince }} + {% else %} + {{ finding | jira_creation | timesince }} + {% endif %} + + {% if finding.has_jira_group_issue %} + {{ finding.finding_group | jira_change | timesince }} + {% else %} + {{ finding | jira_change | timesince }} + {% endif %} + + {% if finding.has_finding_group %} + {{ finding.finding_group.name }} + {% endif %} + + {{ finding.test.engagement.product }} + + {% if finding.service %}{{ finding.service }}{% endif %} + + {% if finding.planned_remediation_date %}{{ finding.planned_remediation_date }}{% endif %} + + {% if finding.planned_remediation_version %}{{ finding.planned_remediation_version }}{% endif %} + + {% if finding.reviewers %} + {% for reviewer in finding.reviewers.all %} + {{reviewer.get_full_name}} + {% if not forloop.last %}
    {% endif %} + {% endfor %} + {% endif %} +
    +
    +
    + {% include "dojo/paging_snippet.html" with page=findings page_size=True %} +
    + {% else %} +
    +

    + {% trans "No findings found." %} +

    +
    + {% endif %} + +
    +
    +{% endblock %} +{% block postscript %} + + + + + {% include "dojo/filter_js_snippet.html" %} + {% include "dojo/snippets/selectpicker_in_dropdown.html" %} +{% endblock %} diff --git a/dojo/templates/dojo/snippets/engagement_list.html b/dojo/templates/dojo/snippets/engagement_list.html index 757b6e222ef..75b6330f635 100644 --- a/dojo/templates/dojo/snippets/engagement_list.html +++ b/dojo/templates/dojo/snippets/engagement_list.html @@ -2,363 +2,379 @@ {% load display_tags %} {% load authorization_tags %}
    -
    -
    -

    {% if status == "open" %}Active{% elif status == "paused" %}Paused {% else %}Closed{% endif %} Engagements ({{ count }}) -

    +
    +
    + {% include "dojo/filter_snippet.html" with form=filter.form %} +
    -
    - {% include "dojo/paging_snippet.html" with page=engs prefix=prefix page_size=True %} -
    - {% if engs %} - - - - - - - - - - {% if system_settings.enable_jira %} - - {% endif %} - - - - - - - {% if status == "paused" or status == "closed" %} - - {% endif %} - - - - {% for eng in engs %} - - - - - - + + + + + + {% if status == "paused" or status == "closed" %} + + {% endif %} + {% endfor %} + +
    NameTypeLeadDateLengthJIRATestsActive (Verified / Fixable)MitigatedAcceptedAllDuplicatesStatus
    -
    -
    - - {{ eng.name|truncatechars_html:35|default:"N/A" }} - {% if eng.version %} - - - {{ eng.version }} - - {% endif %} - {% include "dojo/snippets/tags.html" with tags=eng.tags.all %} - {{ eng.engagement_type }} - {% if eng.lead.get_full_name and eng.lead.get_full_name.strip %} - {{ eng.lead.get_full_name }} - {% else %} - {{ eng.lead |default_if_none:""}} - {% endif %} - - - {{ eng.target_start|date:"jS F" }} {% if eng.target_start|datediff_time:eng.target_end != "1 day" %} - {{ eng.target_end|date:"jS F" }}{% endif %} +
    + {% include "dojo/paging_snippet.html" with page=engs prefix=prefix page_size=True %} +
    + {% if engs %} + + + + + + + + + + {% if system_settings.enable_jira %} + + {% endif %} + + + + + + + {% if status == "paused" or status == "closed" %} + + {% endif %} + + + + {% for eng in engs %} + + - - {% if system_settings.enable_jira %} - - {% endif %} - - - - - - - {% if status == "paused" or status == "closed" %} - + + + + + + {% if system_settings.enable_jira %} + + {% endif %} + - {% endif %} - {% endfor %} - -
    NameTypeLeadDateLengthJIRATestsActive (Verified / Fixable)MitigatedAcceptedAllDuplicatesStatus
    + {{ eng.target_start|datediff_time:eng.target_end }} - {% if status == "open" %} - {% if eng.is_overdue and eng.status != 'Completed' %} - -
    - {{ eng.target_end|overdue }} overdue -
    -
    - {% endif %} - {% if eng.count_tests == 0 %} -   -
    - no tests -
    -
    - {% endif %} - {% if eng.count_findings_all == 0 %} -   -
    - no findings -
    -
    - {% endif %} - {% endif %} -
    - {{ eng|jira_project_tag }} - - - {{ eng.count_findings_open }} ({{ eng.count_findings_open_verified}}/{{ eng.count_findings_fix_available}}){{ eng.count_findings_close }}{{ eng.count_findings_accepted }}{{ eng.count_findings_all }}{{ eng.count_findings_duplicate }} - {% if eng.status == "Blocked" %} - - {% elif eng.status == "On Hold" %} - + + +
  • + {% endif %} + {% if eng|has_object_permission:"Test_Add" %} +
  • + + Add Tests + +
  • + {% endif %} + {% if eng|has_object_permission:"Import_Scan_Result" %} +
  • + + Import Scan Results + +
  • + {% endif %} +
  • +
  • + + View Active Findings + +
  • +
  • + + View Active and Verified Findings + +
  • +
  • + + View Mitigated Findings + +
  • +
  • + + View Accepted Findings + +
  • +
  • + + View All Findings + +
  • +
  • +
  • + + Engagement Report + +
  • + {% if eng|has_object_permission:"Engagement_Delete" %} +
  • +
  • + + Delete Engagement + +
  • + {% endif %} + + + +
    + + {{ eng.name|truncatechars_html:35|default:"N/A" }} + {% if eng.version %} + + + {{ eng.version }} + + {% endif %} + {% include "dojo/snippets/tags.html" with tags=eng.tags.all %} + {{ eng.engagement_type }} + {% if eng.lead.get_full_name and eng.lead.get_full_name.strip %} + {{ eng.lead.get_full_name }} + {% else %} + {{ eng.lead |default_if_none:""}} + {% endif %} + + + {{ eng.target_start|date:"jS F" }} {% if eng.target_start|datediff_time:eng.target_end != "1 day" %} - {{ + eng.target_end|date:"jS F" }}{% endif %} + + {{ eng.target_start|datediff_time:eng.target_end }} + {% if status == "open" %} + {% if eng.is_overdue and eng.status != 'Completed' %} + +
    + {{ eng.target_end|overdue }} overdue +
    +
    + {% endif %} + {% if eng.count_tests == 0 %} +   +
    + no tests +
    +
    + {% endif %} + {% if eng.count_findings_all == 0 %} +   +
    + no findings +
    +
    + {% endif %} + {% endif %} +
    + {{ eng|jira_project_tag }} + +
    - {% else %} -
    + {% else %} + {{ eng.count_tests }} + {% endif %} - {% endif %} +
    {{ eng.count_findings_open }} ({{ + eng.count_findings_open_verified}}/{{ eng.count_findings_fix_available}}){{ eng.count_findings_close }}{{ eng.count_findings_accepted }}{{ eng.count_findings_all }}{{ eng.count_findings_duplicate }} + {% if eng.status == "Blocked" %} + + {% elif eng.status == "On Hold" %} + + {% else %} + + {% endif %} + {{ eng.status }} + +
    + {% else %} +
    +

    No {% if status == "open" %}active{% elif status == "paused" %}paused{% else %}closed{% + endif %} engagements found.

    +
    + {% endif %} -
    - {% include "dojo/paging_snippet.html" with page=engs prefix=prefix page_size=True %} -
    +
    + {% include "dojo/paging_snippet.html" with page=engs prefix=prefix page_size=True %}
    +
    {% block postscript %} - {% endblock %} + +{% endblock %} \ No newline at end of file diff --git a/dojo/templates/dojo/view_eng.html b/dojo/templates/dojo/view_eng.html index ab09dadb7c5..7c6e3be7366 100644 --- a/dojo/templates/dojo/view_eng.html +++ b/dojo/templates/dojo/view_eng.html @@ -5,1084 +5,1175 @@ {% load authorization_tags %} {% load static %} {% block add_styles %} - .tooltip-inner { - max-width: 350px; - } +.tooltip-inner { +max-width: 350px; +} {% endblock %} {% block content %} -
    -
    -
    -
    -
    -

    - Description -

    - +
    +
    +
    + {% if eng.description %} + {{ eng.description|markdown_render }} + {% else %} + There is no description. + {% endif %} +
    +
    + {% if eng.preset %} +
    +
    +
    +
    +
    +

    + Engagement Presets {{ eng.preset.title|truncatechars_html:60 }} +

    + {% if eng.product|has_object_permission:"Product_Edit" %} + + {% endif %} +
    +
    +
    + + + + + + + + + + + + + +
    Test TypeNetwork
    + {% if preset_test_type.count > 1 %} + {% for test in preset_test_type %} + {{test.name}}{%if not forloop.last%},{%endif%} + {% endfor %} {% else %} - - Reopen Engagement - + {{ preset_test_type.0.name }} {% endif %} - - {% endif %} -
  • - - Report - -
  • -
  • - - Add To Calendar - -
  • -
  • - - View History - -
  • - {% if eng|has_object_permission:"Engagement_Edit" %} - -
  • - {% if eng.test_strategy %} - View Test - Strategy +
  • + {% if network.count > 1 %} + {% for net in network %} + {{ net.location }}{%if not forloop.last%},{%endif%} + {% endfor %} {% else %} - Add a Test Strategy + {{ network.0.location }} {% endif %} - - {% if threat != 'none' %} +
    +
    +
    + {% if eng.preset.notes %} + Notes: {{ eng.preset.notes|markdown_render }} + {% else %} + No test notes found. + {% endif %} + {% if eng.preset.scope %} + Scope: {{ eng.preset.scope|markdown_render }} + {% else %} + Testing scope not specified. + {% endif %} +
    +
    +
    +
    + {% endif %} +
    +
    +
    +
    +
    +

    + Tests ({{tests.paginator.count}}) {{ eng.id|get_severity_count:"engagement" + }} + +

    -
    -
    - {% if eng.description %} - {{ eng.description|markdown_render }} - {% else %} - There is no description. - {% endif %} -
    -
    - {% if eng.preset %} -
    -
    -
    -
    -
    -

    - Engagement Presets {{ eng.preset.title|truncatechars_html:60 }} -

    - {% if eng.product|has_object_permission:"Product_Edit" %} -
    -
    -
    -
    -
    -

    Risk Acceptance - {% if eng.product.enable_full_risk_acceptance %} - {% if eng|has_object_permission:"Risk_Acceptance" %} - - +

    +
    +
    +
    +
    +

    Risk Acceptance + {% if eng.product.enable_full_risk_acceptance %} + {% if eng|has_object_permission:"Risk_Acceptance" %} + + + {% endif %} + {% endif %} +

    +
    + {% if risks_accepted %} +
    + + + + {% block risk_acceptance_header %} + + + + + + + + + + + {% endblock risk_acceptance_header %} + + + + {% for risk_acceptance in risks_accepted %} + + {% block risk_acceptances %} + + + + + + + + + {% if risk_acceptance.filename %} + + {% else %} + {% endif %} - {% endif %} - - - {% if risks_accepted %} + + {% endblock risk_acceptances %} + + {% endfor %} + +
    DateAccepted ByNameDecisionExpirationFindingsProofOwner
    +
      + +
    +
    {{ + risk_acceptance.created|date }}{{ risk_acceptance.accepted_by }}{{ + risk_acceptance.name }} + {{ risk_acceptance.get_decision_display|default_if_none:"" }} + {% if risk_acceptance.decision_details %} +   + {% endif %} + + {% if risk_acceptance.expiration_date %} + {{ risk_acceptance.expiration_date|date }} + {% else %} + Never + {% endif %} + {{ + risk_acceptance.accepted_findings_count }}Yes +   + No{{ risk_acceptance.owner.get_full_name }}
    +
    + {% else %} +
    + No Risk Acceptances found. +
    + {% endif %} +
    +
    +
    + {% block global_risk_acceptances %}{% endblock %} +
    +
    +

    Additional Features + +

    +
    +
    + {% if eng.engagement_type == "Interactive" and system_settings.enable_checklists %} +
    +
    +

    Checklist +   + + + {% if eng|has_object_permission:"Engagement_Edit" %} + {% if check %} + + + {% else %} + + + {% endif %} + {% endif %} +

    +
    +
    + {% if check %} +
    - +
    - {% block risk_acceptance_header %} - - - - - - - - - - - {% endblock risk_acceptance_header %} + + + + + + + + - {% for risk_acceptance in risks_accepted %} - - {% block risk_acceptances %} - - - - - - - - - {% if risk_acceptance.filename %} - - {% else %} - - {% endif %} - - {% endblock risk_acceptances %} - - {% endfor %} + + + + + + + + + +
    DateAccepted ByNameDecisionExpirationFindingsProofOwnerSessionEncryptionConfigurationAuthenticationAuthorizationData InputSensitive DataOther
    -
      - -
    -
    {{ risk_acceptance.created|date }}{{ risk_acceptance.accepted_by }}{{ risk_acceptance.name }} - {{ risk_acceptance.get_decision_display|default_if_none:"" }} - {% if risk_acceptance.decision_details %} -   - {% endif %} - - {% if risk_acceptance.expiration_date %} - {{ risk_acceptance.expiration_date|date }} - {% else %} - Never - {% endif %} - {{ risk_acceptance.accepted_findings_count }}Yes -   - No{{ risk_acceptance.owner.get_full_name }}
    {{ + check.session_management }}{{ + check.encryption_crypto }}{{ + check.configuration_management }}{{ + check.authentication }}{{ + check.authorization_and_access_control }}{{ + check.data_input_sanitization_validation }}{{ + check.sensitive_data }}{{ + check.other }}
    +
    {% else %} +
    - No Risk Acceptances found. + Checklist has not been completed.
    +
    {% endif %}
    -
    - {% block global_risk_acceptances %}{% endblock %} -
    -
    -

    Additional Features - -

    -
    -
    - {% if eng.engagement_type == "Interactive" and system_settings.enable_checklists %} -
    -
    -

    Checklist -   - + {% endif %} + {% if system_settings.enable_questionnaires %} +
    +
    +

    Questionnaires +   + - {% if eng|has_object_permission:"Engagement_Edit" %} - {% if check %} - - - {% else %} - - - {% endif %} - {% endif %} -

    + {% if eng|has_object_permission:"Engagement_Edit" %} + {% add_surveys eng %} + {% endif %} +

    +
    +
    +
    +
    +
    + {% show_surveys eng users %} +
    -
    - {% if check %} -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - -
    SessionEncryptionConfigurationAuthenticationAuthorizationData InputSensitive DataOther
    {{ check.session_management }}{{ check.encryption_crypto }}{{ check.configuration_management }}{{ check.authentication }}{{ check.authorization_and_access_control }}{{ check.data_input_sanitization_validation }}{{ check.sensitive_data }}{{ check.other }}
    + - {% endif %} - {% if system_settings.enable_questionnaires %} +
    +
    + {% endif %} +
    +
    +

    Notes + + +

    +
    +
    + {% if eng|has_object_permission:"Note_Add" %} +
    + {% csrf_token %} + {% include "dojo/form_fields.html" with form=form %} +
    +
    + +
    +
    +
    + {% endif %}
    -

    Questionnaires -   - - - {% if eng|has_object_permission:"Engagement_Edit" %} - {% add_surveys eng %} - {% endif %} +

    Note Log + +

    -
    -
    -
    -
    - {% show_surveys eng users %} -
    -
    -