diff --git a/.github/renovate.json b/.github/renovate.json index ad75c1a2b51..dbf852063ea 100644 --- a/.github/renovate.json +++ b/.github/renovate.json @@ -1,21 +1,38 @@ { "extends": [ - "config:base" + "config:recommended" ], "dependencyDashboard": true, "dependencyDashboardApproval": false, - "baseBranches": ["dev"], + "baseBranchPatterns": ["dev"], "rebaseWhen": "conflicted", "separateMinorPatch": true, - "ignorePaths": ["requirements.txt", "requirements-lint.txt", "components/package.json", "components/package-lock.json", "dojo/components/yarn.lock", "dojo/components/package.json", "Dockerfile**"], + "ignorePaths": [ + "requirements.txt", + "requirements-lint.txt", + "components/package.json", + "components/package-lock.json", + "dojo/components/yarn.lock", + "dojo/components/package.json", + "Dockerfile**" + ], "ignoreDeps": [], "packageRules": [{ - "packagePatterns": ["*"], - "commitMessageExtra": "from {{currentVersion}} to {{#if isMajor}}v{{{newMajor}}}{{else}}{{#if isSingleVersion}}v{{{toVersion}}}{{else}}{{{newValue}}}{{/if}}{{/if}}", + "matchPackageNames": ["*"], + "commitMessageExtra": "from {{currentVersion}} to {{#if isMajor}}v{{{newMajor}}}{{else}}{{#if isSingleVersion}}v{{{newVersion}}}{{else}}{{{newValue}}}{{/if}}{{/if}}", "commitMessageSuffix": "({{packageFile}})", "labels": ["dependencies"] }], - "registryAliases": { - "bitnami": "https://charts.bitnami.com/bitnami" - } + "customManagers": [ + { + "customType": "regex", + "managerFilePatterns": [ + "/^.github/workflows//" + ], + "matchStrings": [ + "\\w*:\\s[\"']?(?\\S*[^\"']?)[\"']?\\s#\\s*renovate:\\s*datasource=(?.*?) depName=(?.*?)( versioning=(?.*?))?\\s" + ], + "versioningTemplate": "{{#if versioning}}{{{versioning}}}{{else}}semver{{/if}}" + } + ] } diff --git a/.github/workflows/build-docker-images-for-testing.yml b/.github/workflows/build-docker-images-for-testing.yml index 1d9eab75e39..53e44b5e6a9 100644 --- a/.github/workflows/build-docker-images-for-testing.yml +++ b/.github/workflows/build-docker-images-for-testing.yml @@ -40,7 +40,7 @@ jobs: echo $GITHUB_ENV - name: Checkout - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: persist-credentials: false diff --git a/.github/workflows/detect-merge-conflicts.yaml b/.github/workflows/detect-merge-conflicts.yaml index f3bdda58562..3b8a791d4a6 100644 --- a/.github/workflows/detect-merge-conflicts.yaml +++ b/.github/workflows/detect-merge-conflicts.yaml @@ -7,7 +7,7 @@ on: - master - bugfix - release/* - + pull_request_target: types: [synchronize] @@ -16,6 +16,8 @@ jobs: runs-on: ubuntu-latest steps: - name: check if prs are conflicted + # we experience a high error rate so we allow this to fail but still have the check become green on the PR + continue-on-error: true uses: eps1lon/actions-label-merge-conflict@1df065ebe6e3310545d4f4c4e862e43bdca146f0 # v3.0.3 with: dirtyLabel: "conflicts-detected" diff --git a/.github/workflows/gh-pages.yml b/.github/workflows/gh-pages.yml index 5a749e0946f..dbe202e1c0c 100644 --- a/.github/workflows/gh-pages.yml +++ b/.github/workflows/gh-pages.yml @@ -15,13 +15,13 @@ jobs: - name: Setup Hugo uses: peaceiris/actions-hugo@75d2e84710de30f6ff7268e08f310b60ef14033f # v3.0.0 with: - hugo-version: '0.140.1' + hugo-version: '0.140.1' # renovate: datasource=github-releases depName=gohugoio/hugo versioning=loose extended: true - name: Setup Node uses: actions/setup-node@a0853c24544627f65ddf259abe73b1d18a591444 # v5.0.0 with: - node-version: '22.20.0' + node-version: '22.20.0' # TODO: Renovate helper might not be needed here - needs to be fully tested - name: Cache dependencies uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 diff --git a/.github/workflows/helm-docs-updates.yml b/.github/workflows/helm-docs-updates.yml index 99677da43f0..0d70215e146 100644 --- a/.github/workflows/helm-docs-updates.yml +++ b/.github/workflows/helm-docs-updates.yml @@ -13,7 +13,7 @@ jobs: docs_updates: name: Update documentation runs-on: ubuntu-latest - if: startsWith(github.head_ref, 'renovate/') or startsWith(github.head_ref, 'dependabot/') + if: startsWith(github.head_ref, 'renovate/') || startsWith(github.head_ref, 'dependabot/') steps: - name: Checkout uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 diff --git a/.github/workflows/k8s-tests.yml b/.github/workflows/k8s-tests.yml index dc30f685793..475a0e1715a 100644 --- a/.github/workflows/k8s-tests.yml +++ b/.github/workflows/k8s-tests.yml @@ -27,7 +27,7 @@ jobs: # are tested (https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html#available-versions) - databases: pgsql brokers: redis - k8s: 'v1.34.0' + k8s: 'v1.34.0' # renovate: datasource=github-releases depName=kubernetes/kubernetes versioning=loose os: debian steps: - name: Checkout @@ -36,7 +36,7 @@ jobs: - name: Setup Minikube uses: manusa/actions-setup-minikube@b589f2d61bf96695c546929c72b38563e856059d # v2.14.0 with: - minikube version: 'v1.37.0' + minikube version: 'v1.37.0' # renovate: datasource=github-releases depName=kubernetes/minikube versioning=loose kubernetes version: ${{ matrix.k8s }} driver: docker start args: '--addons=ingress --cni calico' diff --git a/.github/workflows/renovate.yaml b/.github/workflows/renovate.yaml new file mode 100644 index 00000000000..0b9ee77e1c7 --- /dev/null +++ b/.github/workflows/renovate.yaml @@ -0,0 +1,24 @@ +name: "Renovate validation" +on: + workflow_dispatch: + pull_request: + branches: + - dev + - master + - bugfix + - release/* + +jobs: + main: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + with: + persist-credentials: false + + - name: validate + uses: suzuki-shunsuke/github-action-renovate-config-validator@c22827f47f4f4a5364bdba19e1fe36907ef1318e # v1.1.1 + with: + strict: "true" + validator_version: 41.146.0 # renovate: datasource=github-releases depName=renovatebot/renovate diff --git a/.github/workflows/shellcheck.yml b/.github/workflows/shellcheck.yml index 598ebf995d4..740780b704a 100644 --- a/.github/workflows/shellcheck.yml +++ b/.github/workflows/shellcheck.yml @@ -4,8 +4,8 @@ on: pull_request: env: SHELLCHECK_REPO: 'koalaman/shellcheck' - SHELLCHECK_VERSION: 'v0.9.0' - SHELLCHECK_SHA: '038fd81de6b7e20cc651571362683853670cdc71' + SHELLCHECK_VERSION: 'v0.9.0' # renovate: datasource=github-releases depName=koalaman/shellcheck versioning=loose + SHELLCHECK_SHA: '038fd81de6b7e20cc651571362683853670cdc71' # Renovate config is not currently adjusted to update hash - it needs to be done manually for now jobs: shellcheck: runs-on: ubuntu-latest diff --git a/.github/workflows/slack-pr-reminder.yml b/.github/workflows/slack-pr-reminder.yml index fc7657e9148..55123816851 100644 --- a/.github/workflows/slack-pr-reminder.yml +++ b/.github/workflows/slack-pr-reminder.yml @@ -11,7 +11,7 @@ jobs: if: github.repository == 'DefectDojo/django-DefectDojo' # Notify only in core repo, not in forks - it would just fail in fork steps: - name: Notify reviewers in Slack - uses: DefectDojo-Inc/notify-pr-reviewers-action@master + uses: DefectDojo-Inc/notify-pr-reviewers-action@be26734e06338b41be6e70ce96027a51aa9ba9c6 # master with: owner: "DefectDojo" repository: "django-DefectDojo" diff --git a/.github/workflows/test-helm-chart.yml b/.github/workflows/test-helm-chart.yml index 934602cf7f8..f7e9199ab67 100644 --- a/.github/workflows/test-helm-chart.yml +++ b/.github/workflows/test-helm-chart.yml @@ -24,7 +24,7 @@ jobs: - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: - python-version: 3.13 + python-version: 3.13 # Renovate helper is not needed here - name: Configure Helm repos run: |- @@ -34,8 +34,8 @@ jobs: - name: Set up chart-testing uses: helm/chart-testing-action@0d28d3144d3a25ea2cc349d6e59901c4ff469b3b # v2.7.0 with: - yamale_version: 4.0.4 - yamllint_version: 1.35.1 + yamale_version: 4.0.4 # renovate: datasource=pypi depName=yamale versioning=semver + yamllint_version: 1.35.1 # renovate: datasource=pypi depName=yamllint versioning=semver - name: Determine target branch id: ct-branch-target diff --git a/.github/workflows/validate_docs_build.yml b/.github/workflows/validate_docs_build.yml index 223fa2a2a0c..c64f2a8f41c 100644 --- a/.github/workflows/validate_docs_build.yml +++ b/.github/workflows/validate_docs_build.yml @@ -12,13 +12,13 @@ jobs: - name: Setup Hugo uses: peaceiris/actions-hugo@75d2e84710de30f6ff7268e08f310b60ef14033f # v3.0.0 with: - hugo-version: '0.140.1' + hugo-version: '0.140.1' # renovate: datasource=github-releases depName=gohugoio/hugo versioning=loose extended: true - name: Setup Node uses: actions/setup-node@a0853c24544627f65ddf259abe73b1d18a591444 # v5.0.0 with: - node-version: '22.20.0' + node-version: '22.20.0' # TODO: Renovate helper might not be needed here - needs to be fully tested - name: Cache dependencies uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 diff --git a/components/package.json b/components/package.json index f9b97fa55a4..cbbde92364d 100644 --- a/components/package.json +++ b/components/package.json @@ -1,6 +1,6 @@ { "name": "defectdojo", - "version": "2.51.1", + "version": "2.51.2", "license" : "BSD-3-Clause", "private": true, "dependencies": { diff --git a/docs/assets/images/cvssv4_vector_builder.png b/docs/assets/images/cvssv4_vector_builder.png new file mode 100644 index 00000000000..57106ba4688 Binary files /dev/null and b/docs/assets/images/cvssv4_vector_builder.png differ diff --git a/docs/assets/images/hash_code_id_field.png b/docs/assets/images/hash_code_id_field.png new file mode 100644 index 00000000000..af767a68493 Binary files /dev/null and b/docs/assets/images/hash_code_id_field.png differ diff --git a/docs/assets/images/hash_code_status_column.png b/docs/assets/images/hash_code_status_column.png new file mode 100644 index 00000000000..c2e4a06c3be Binary files /dev/null and b/docs/assets/images/hash_code_status_column.png differ diff --git a/docs/assets/images/pro_cvss_vector_and_score.png b/docs/assets/images/pro_cvss_vector_and_score.png new file mode 100644 index 00000000000..e8121c5f15a Binary files /dev/null and b/docs/assets/images/pro_cvss_vector_and_score.png differ diff --git a/docs/assets/images/quick_report.png b/docs/assets/images/quick_report.png new file mode 100644 index 00000000000..54682e1fb4a Binary files /dev/null and b/docs/assets/images/quick_report.png differ diff --git a/docs/assets/images/webhook-state-transition-flow.png b/docs/assets/images/webhook-state-transition-flow.png new file mode 100644 index 00000000000..48752b590a9 Binary files /dev/null and b/docs/assets/images/webhook-state-transition-flow.png differ diff --git a/docs/content/en/about_defectdojo/pro_features.md b/docs/content/en/about_defectdojo/pro_features.md index 50ef4e9c0de..9f6fa3110eb 100644 --- a/docs/content/en/about_defectdojo/pro_features.md +++ b/docs/content/en/about_defectdojo/pro_features.md @@ -54,6 +54,7 @@ See our [Connectors Guide](/en/connecting_your_tools/connectors/about_connectors Supported tools for Connectors include: +* Anchore * AWS Security Hub * BurpSuite * Checkmarx ONE diff --git a/docs/content/en/changelog/changelog.md b/docs/content/en/changelog/changelog.md index 5cdd06d0262..e978214f0ac 100644 --- a/docs/content/en/changelog/changelog.md +++ b/docs/content/en/changelog/changelog.md @@ -8,28 +8,53 @@ Here are the release notes for **DefectDojo Pro (Cloud Version)**. These release For Open Source release notes, please see the [Releases page on GitHub](https://github.com/DefectDojo/django-DefectDojo/releases), or alternatively consult the Open Source [upgrade notes](/en/open_source/upgrading/upgrading_guide/). +## Oct 2025: v2.51 + +### Oct 14, 2025: v2.51.1 + +* **(Pro UI)** Added Finding Quick Report feature. Quick report allows users to quickly render an HTML report with the currently displayed Findings on a Finding table. + +![image](images/quick_report.png) + +* **(Pro UI)** Added vector builder and calculator to the Edit Finding form, for CVSSv3 and CVSSv4. You can build vector strings using the 🛠️ button next to the CVSSv3 / CVSSv4 string entry on the Edit Finding form. + +Click the calculator button to render a score based on the vector string. + +![image](images/pro_cvss_vector_and_score.png) +![image](images/cvssv4_vector_builder.png) + +* **(Pro UI)** Added Similar Findings view on Findings when enabled in System Settings. +* **(Pro UI)** File names (for attached artifacts) can now be edited directly in the UI. +* **(Pro UI)** Redirect user to Home after a successful Support Inquiry submission. + +### Oct 6, 2025: v2.51.0 + +No significant Pro changes are present in this release. + ## Sept 2025: v2.50 -### Sept 22, 2025: v2.50.4 +#### Sept 29, 2025: v2.50.4 -* **(Pro UI)** Changes Engagement Deduplication form label and help text -* **(Pro UI)** Adds toggle for MCP (for superusers only) +* **(MCP)** Added MCP toggle for Superusers only. +* **(Pro UI)** Bypassed endpoint validation on Edit Finding form when Endpoints have not changed. +* **(Pro UI)** Collapsed additional fields in the Universal Parser preview for cleaner display. +* **(Pro UI)** Updated Engagement Deduplication form label and help text for clarity. -### Sept 15, 2025: v2.50.3 +#### Sept 22, 2025: v2.50.3 * **(Pro UI)** Added support for [CVSSv4.0](https://www.first.org/cvss/v4-0/) vector strings. -### Sept 15, 2025: v2.50.2 +#### Sept 15, 2025: v2.50.2 * **(Pro UI)** Added Any/All status filtering. Filtering by status allows you to apply either AND (inner join) logic, or OR (outer join) logic to the filter. * **(Pro UI)** Added Contact Support form for On-Premise installs. -### Sept 9, 2025: v2.50.1 +#### Sept 9, 2025: v2.50.1 * **(Tools)** Removed CSV limit for Qualys HackerGuardian * **(SSO)** Removed Force Password Reset for users created via SSO -### Sept 2, 2025: v2.50.0 +#### Sept 2, 2025: v2.50.0 * **(Pro UI)** "Date During" filter has been added to the UI, allowing users to filter by a range of dates * **(Pro UI)** Vulnerability ID column can now be sorted, however the sorting only considers the **first** vulnerability ID. @@ -40,7 +65,7 @@ For Open Source release notes, please see the [Releases page on GitHub](https:// The Pro UI has been significantly reorganized, with changes to page organization. ![image](images/pro_ui_249.png) -### August 25: 2.49.3 +#### August 25: 2.49.3 [Integrations](/en/share_your_findings/integrations/) has been added to DefectDojo Pro, adding an Jira-style integrations for Azure DevOps, GitHub and GitLab boards. diff --git a/docs/content/en/connecting_your_tools/connectors/about_connectors.md b/docs/content/en/connecting_your_tools/connectors/about_connectors.md index 990a855b971..d21670167fe 100644 --- a/docs/content/en/connecting_your_tools/connectors/about_connectors.md +++ b/docs/content/en/connecting_your_tools/connectors/about_connectors.md @@ -25,6 +25,7 @@ But everyone needs a starting point, and that's where Connectors come in. Connec We currently support Connectors for the following tools, with more on the way: +* **Anchore** * **AWS Security Hub** * **BurpSuite** * **Checkmarx ONE** diff --git a/docs/content/en/connecting_your_tools/connectors/connectors_tool_reference.md b/docs/content/en/connecting_your_tools/connectors/connectors_tool_reference.md index 6133b8d42a6..5ef730db7d1 100644 --- a/docs/content/en/connecting_your_tools/connectors/connectors_tool_reference.md +++ b/docs/content/en/connecting_your_tools/connectors/connectors_tool_reference.md @@ -20,6 +20,17 @@ Whenever possible, we recommend creating a new 'DefectDojo Bot' account within y # **Supported Connectors** +## **Anchore** + +The Anchore connector uses a user's API token to pull data from Anchore Enterprise. Products will be mapped and discovered based on "Applications", which are composed of multiple Images in Anchore - see [Anchore Enterprise Documentation](https://docs.anchore.com/current/docs/sbom_management/application_groups/application_management_anchorectl/) for more information. + +#### Connector Mappings + +1. The Anchore URL in the **Location** field: this is the URL where you access the Anchore. +2. Enter a valid API Key in the Secret field. This is the API key associated with your Burp Service account. + +See the official [Anchore documentation](https://docs.anchore.com/current/docs/) for more information on creating a token for Anchore. + ## **AWS Security Hub** The AWS Security Hub connector uses an AWS access key to interact with the Security Hub APIs. diff --git a/docs/content/en/connecting_your_tools/parsers/_index.md b/docs/content/en/connecting_your_tools/parsers/_index.md index d8b9937b669..031d47990b4 100644 --- a/docs/content/en/connecting_your_tools/parsers/_index.md +++ b/docs/content/en/connecting_your_tools/parsers/_index.md @@ -17,7 +17,7 @@ DefectDojo can parse data from 180+ security reports and counting. | [Connectors](../connectors/about_connectors): supported tools | [Smart Upload](../import_scan_files/smart_upload/): supported tools | | --- | --- | -| AWS Security Hub, BurpSuite, Checkmarx ONE, Dependency-Track, Probely, Semgrep, SonarQube, Snyk, Tenable | Nexpose, NMap, OpenVas, Qualys, Tenable, Wiz | +| Anchore, AWS Security Hub, BurpSuite, Checkmarx ONE, Dependency-Track, Probely, Semgrep, SonarQube, Snyk, Tenable | Nexpose, NMap, OpenVas, Qualys, Tenable, Wiz | # All Supported Tools diff --git a/docs/content/en/connecting_your_tools/parsers/file/wiz.md b/docs/content/en/connecting_your_tools/parsers/file/wiz.md index 64f589a54a2..771d316d062 100644 --- a/docs/content/en/connecting_your_tools/parsers/file/wiz.md +++ b/docs/content/en/connecting_your_tools/parsers/file/wiz.md @@ -1,14 +1,16 @@ --- title: "Wiz Scanner Parser" toc_hide: true -weight: 1 --- -# Wiz Scanner Parser Documentation +The [Wiz](https://www.wiz.io/) parser for DefectDojo supports imports from both Wiz Scanner Standard and SCA (Software Composition Analysis) .csv output from Wiz.io. This document details the parsing of both formats into DefectDojo field mappings, unmapped fields, and location of each field's parsing code for easier troubleshooting and analysis. -## Overview +⚠️ **DefectDojo Pro** Users can also automatically create Findings directly from Wiz using the Wiz Connector. See our [Connectors documentation](/en/connecting_your_tools/connectors/about_connectors/) for more details. -The [Wiz](https://www.wiz.io/) parser for DefectDojo supports imports from both Wiz Scanner Standard and SCA (Software Composition Analysis) .csv output from Wiz.io. This document details the parsing of both formats into DefectDojo field mappings, unmapped fields, and location of each field's parsing code for easier troubleshooting and analysis. +## Link To Tool + +- [Wiz.io](https://www.wiz.io/) +- [Wiz Documentation](https://docs.wiz.io/) ## Supported File Types @@ -22,59 +24,76 @@ To generate these files, export the findings from the Wiz platform by: - Standard Format: Select "Export to CSV" option from the Issues view in the Wiz.io platform - SCA Format: Select "Export to CSV" option from the Vulnerability view in the Wiz.io platform -## Standard Format CSV (WizParserByTitle) +### Sample Scan Data + +Sample Wiz Scanner scans can be found in the [sample scan data folder](https://github.com/DefectDojo/django-DefectDojo/tree/master/unittests/scans/wiz). + +### Default Deduplication Hashcode Fields +By default, DefectDojo identifies duplicate Findings using these [hashcode fields](https://docs.defectdojo.com/en/working_with_findings/finding_deduplication/about_deduplication/): + +- title +- description +- severity + +## Mapped Fields Dictionary + +### Standard Format CSV -### Total Fields in Standard Format CSV +This format applies the `WizParserByTitle` parser class. + +#### Total Fields in Standard Format CSV - Total data fields: 32 - Total data fields parsed: 32 - Total data fields NOT parsed: 0 -### Standard Format Field Mapping Details - -| CSV Field # | CSV Field | Finding Field | Parser Line # | Notes | -| ----------- | -------------------------- | ------------------------------- | ------------- | -------------------------------------------------------------------------------------------------------- | -| 1 | Created At | date | 68 | Parsed using the parse_wiz_datetime function to convert to datetime object | -| 2 | Title | title | 67 | Direct mapping to Finding title | -| 3 | Severity | severity | 69 | Converted to lowercase then capitalized to match DefectDojo's severity format | -| 4 | Status | active, is_mitigated, mitigated | 65 | Converted through WizcliParsers.convert_status function to determine active status and mitigation status | -| 5 | Description | description (partial) | 79-81 | Added to description with "Description:" prefix | -| 6 | Resource Type | description (partial) | 79-81 | Added to description with "Resource Type:" prefix | -| 7 | Resource external ID | description (partial) | 79-81 | Added to description with "Resource external ID:" prefix | -| 8 | Subscription ID | description (partial) | 79-81 | Added to description with "Subscription ID:" prefix | -| 9 | Project IDs | description (partial) | 79-81 | Added to description with "Project IDs:" prefix | -| 10 | Project Names | description (partial) | 79-81 | Added to description with "Project Names:" prefix | -| 11 | Resolved Time | mitigated | 71-74 | Used to set mitigated timestamp if finding is marked as mitigated | -| 12 | Resolution | mitigation (partial) | 62-63 | Added to mitigation text with "Resolution:" prefix | -| 13 | Control ID | description (partial) | 79-81 | Added to description with "Control ID:" prefix | -| 14 | Resource Name | description (partial) | 79-81 | Added to description with "Resource Name:" prefix | -| 15 | Resource Region | description (partial) | 79-81 | Added to description with "Resource Region:" prefix | -| 16 | Resource Status | description (partial) | 79-81 | Added to description with "Resource Status:" prefix | -| 17 | Resource Platform | description (partial) | 79-81 | Added to description with "Resource Platform:" prefix | -| 18 | Resource OS | description (partial) | 79-81 | Added to description with "Resource OS:" prefix | -| 19 | Resource original JSON | description (partial) | 79-81 | Added to description with "Resource original JSON:" prefix | -| 20 | Issue ID | unique_id_from_tool | 85 | Used as unique identifier for the finding | -| 21 | Resource vertex ID | description (partial) | 79-81 | Added to description with "Resource vertex ID:" prefix | -| 22 | Ticket URLs | description (partial) | 79-81 | Added to description with "Ticket URLs:" prefix | -| 23 | Note | description (partial) | 79-81 | Added to description with "Note:" prefix | -| 24 | Due At | description (partial) | 79-81 | Added to description with "Due At:" prefix | -| 25 | Remediation Recommendation | mitigation | 61 | Direct mapping to mitigation field | -| 26 | Subscription Name | description (partial) | 79-81 | Added to description with "Subscription Name:" prefix | -| 27 | Wiz URL | description (partial) | 79-81 | Added to description with "Wiz URL:" prefix | -| 28 | Cloud Provider URL | description (partial) | 79-81 | Added to description with "Cloud Provider URL:" prefix | -| 29 | Resource Tags | description (partial) | 79-81 | Added to description with "Resource Tags:" prefix | -| 30 | Kubernetes Cluster | description (partial) | 79-81 | Added to description with "Kubernetes Cluster:" prefix | -| 31 | Kubernetes Namespace | description (partial) | 79-81 | Added to description with "Kubernetes Namespace:" prefix | -| 32 | Container Service | description (partial) | 79-81 | Added to description with "Container Service:" prefix | - -### Additional Finding Field Settings (Standard Format) +#### Standard Format Field Mapping Details + +| CSV Field | Finding Field | Parser Line # | Notes | +| ---------------------------- | ------------------------------- | ------------- | -------------------------------------------------------------------------------------------------------- | +| `Created At` | date | 68 | Parsed using the parse_wiz_datetime function to convert to datetime object | +| `Title` | title | 67 | Direct mapping to Finding title | +| `Severity` | severity | 69 | Converted to lowercase then capitalized to match DefectDojo's severity format | +| `Status` | active, is_mitigated, mitigated | 65 | Converted through WizcliParsers.convert_status function to determine active status and mitigation status | +| `Description` | description (partial) | 79-81 | Added to description with "Description:" prefix | +| `Resource Type` | description (partial) | 79-81 | Added to description with "Resource Type:" prefix | +| `Resource external ID` | description (partial) | 79-81 | Added to description with "Resource external ID:" prefix | +| `Subscription ID` | description (partial) | 79-81 | Added to description with "Subscription ID:" prefix | +| `Project IDs` | description (partial) | 79-81 | Added to description with "Project IDs:" prefix | +| `Project Names` | description (partial) | 79-81 | Added to description with "Project Names:" prefix | +| `Resolved Time` | mitigated | 71-74 | Used to set mitigated timestamp if finding is marked as mitigated | +| `Resolution` | mitigation (partial) | 62-63 | Added to mitigation text with "Resolution:" prefix | +| `Control ID` | description (partial) | 79-81 | Added to description with "Control ID:" prefix | +| `Resource Name` | description (partial) | 79-81 | Added to description with "Resource Name:" prefix | +| `Resource Region` | description (partial) | 79-81 | Added to description with "Resource Region:" prefix | +| `Resource Status` | description (partial) | 79-81 | Added to description with "Resource Status:" prefix | +| `Resource Platform` | description (partial) | 79-81 | Added to description with "Resource Platform:" prefix | +| `Resource OS` | description (partial) | 79-81 | Added to description with "Resource OS:" prefix | +| `Resource original JSON` | description (partial) | 79-81 | Added to description with "Resource original JSON:" prefix | +| `Issue ID` | unique_id_from_tool | 85 | Used as unique identifier for the finding | +| `Resource vertex ID` | description (partial) | 79-81 | Added to description with "Resource vertex ID:" prefix | +| `Ticket URLs` | description (partial) | 79-81 | Added to description with "Ticket URLs:" prefix | +| `Note` | description (partial) | 79-81 | Added to description with "Note:" prefix | +| `Due At` | description (partial) | 79-81 | Added to description with "Due At:" prefix | +| `Remediation Recommendation` | mitigation | 61 | Direct mapping to mitigation field | +| `Subscription Name` | description (partial) | 79-81 | Added to description with "Subscription Name:" prefix | +| `Wiz URL` | description (partial) | 79-81 | Added to description with "Wiz URL:" prefix | +| `Cloud Provider URL` | description (partial) | 79-81 | Added to description with "Cloud Provider URL:" prefix | +| `Resource Tags` | description (partial) | 79-81 | Added to description with "Resource Tags:" prefix | +| `Kubernetes Cluster` | description (partial) | 79-81 | Added to description with "Kubernetes Cluster:" prefix | +| `Kubernetes Namespace` | description (partial) | 79-81 | Added to description with "Kubernetes Namespace:" prefix | +| `Container Service` | description (partial) | 79-81 | Added to description with "Container Service:" prefix | + +#### Additional Finding Field Settings (Standard Format) | Finding Field | Default Value | Parser Line # | Notes | | --------------- | ------------- | ------------- | ----------------------------- | | static_finding | False | 84 | Set to False for all findings | | dynamic_finding | True | 84 | Set to True for all findings | -## SCA Format (WizParserByDetailedName) +### SCA Format + +This format applies the `WizParserByDetailedName` parser class. ### Total Fields in SCA CSV @@ -82,53 +101,53 @@ To generate these files, export the findings from the Wiz platform by: - Total data fields parsed: 36 - Total data fields NOT parsed: 5 -### SCA Format Field Mapping Details - -| CSV Field # | CSV Field | Finding Field | Parser Line # | Notes | -| ----------- | ------------------------------------------- | ------------------------------ | ------------- | ---------------------------------------------------------------------------------- | -| 1 | ID | unique_id_from_tool | 182 | Used as unique identifier for the finding | -| 2 | WizURL | description | 150-154 | Added to description with "Wiz URL" prefix | -| 3 | Name | title, vulnerability_ids | 169, 182-184 | Used in title format as vulnerability ID and added to vulnerability_ids list | -| 4 | CVSSSeverity | Not parsed | - | Not used in mapping | -| 5 | HasExploit | description | 150-154 | Added to description with "Has Exploit" prefix | -| 6 | HasCisaKevExploit | description | 150-154 | Added to description with "Has Cisa Kev Exploit" prefix | -| 7 | FindingStatus | active, is_mitigated | 180 | Mapped through convert_status function to determine active state | -| 8 | VendorSeverity | severity | 181 | Mapped through \_validate_severities to convert to DefectDojo severity format | -| 9 | FirstDetected | date | 185 | Parsed into date object using date_parser | -| 10 | LastDetected | Not parsed | - | Not used in mapping | -| 11 | ResolvedAt | Not parsed | - | Not used in mapping | -| 12 | ResolutionReason | Not parsed | - | Not used in mapping | -| 13 | Remediation | mitigation | 155-159 | Added to mitigation with "Remediation" prefix | -| 14 | LocationPath | description, mitigation | 150-159 | Added to both description and mitigation with "Location Path" prefix | -| 15 | DetailedName | title, component_name | 169, 183 | Used in title format and mapped to component_name | -| 16 | Version | description, component_version | 150-154, 184 | Added to description with "Version" prefix and mapped to component_version | -| 17 | FixedVersion | mitigation | 155-159 | Added to mitigation with "Fixed Version" prefix | -| 18 | DetectionMethod | description | 150-154 | Added to description with "Detection Method" prefix | -| 19 | Link | description | 150-154 | Added to description with "Link" prefix | -| 20 | Projects | description | 150-154 | Added to description with "Projects" prefix | -| 21 | AssetID | description | 150-154 | Added to description with "Asset ID" prefix | -| 22 | AssetName | description | 150-154 | Added to description with "Asset Name" prefix | -| 23 | AssetRegion | description | 150-154 | Added to description with "Asset Region" prefix | -| 24 | ProviderUniqueId | description | 150-154 | Added to description with "Provider Unique Id" prefix | -| 25 | CloudProviderURL | description | 150-154 | Added to description with "Cloud Provider URL" prefix | -| 26 | CloudPlatform | description | 150-154 | Added to description with "Cloud Platform" prefix | -| 27 | Status | Not parsed | - | Not directly used (FindingStatus is used instead) | -| 28 | SubscriptionExternalId | description | 150-154 | Added to description with "Subscription External Id" prefix | -| 29 | SubscriptionId | description | 150-154 | Added to description with "Subscription Id" prefix | -| 30 | SubscriptionName | description | 150-154 | Added to description with "Subscription Name" prefix | -| 31 | Tags | unsaved_tags | 186 | Parsed into tags list using \_parse_tags function | -| 32 | ExecutionControllers | description | 150-154 | Added to description with "Execution Controllers" prefix | -| 33 | ExecutionControllersSubscriptionExternalIds | description | 150-154 | Added to description with "Execution Controllers Subscription External Ids" prefix | -| 34 | ExecutionControllersSubscriptionNames | description | 150-154 | Added to description with "Execution Controllers Subscription Names" prefix | -| 35 | CriticalRelatedIssuesCount | Not parsed | - | Not used in mapping | -| 36 | HighRelatedIssuesCount | Not parsed | - | Not used in mapping | -| 37 | MediumRelatedIssuesCount | Not parsed | - | Not used in mapping | -| 38 | LowRelatedIssuesCount | Not parsed | - | Not used in mapping | -| 39 | InfoRelatedIssuesCount | Not parsed | - | Not used in mapping | -| 40 | OperatingSystem | description | 150-154 | Added to description with "Operating System" prefix | -| 41 | IpAddresses | description | 150-154 | Added to description with "Ip Addresses" prefix | - -### Additional Finding Field Settings (SCA Format) +#### SCA Format Field Mapping Details + +| CSV Field | Finding Field | Parser Line # | Notes | +| --------------------------------------------- | ------------------------------ | ------------- | ---------------------------------------------------------------------------------- | +| `ID` | unique_id_from_tool | 182 | Used as unique identifier for the finding | +| `WizURL` | description | 150-154 | Added to description with "Wiz URL" prefix | +| `Name` | title, vulnerability_ids | 169, 182-184 | Used in title format as vulnerability ID and added to vulnerability_ids list | +| `CVSSSeverity` | Not parsed | - | Not used in mapping | +| `HasExploit` | description | 150-154 | Added to description with "Has Exploit" prefix | +| `HasCisaKevExploit` | description | 150-154 | Added to description with "Has Cisa Kev Exploit" prefix | +| `FindingStatus` | active, is_mitigated | 180 | Mapped through convert_status function to determine active state | +| `VendorSeverity` | severity | 181 | Mapped through _validate_severities to convert to DefectDojo severity format | +| `FirstDetected` | date | 185 | Parsed into date object using date_parser | +| `LastDetected` | Not parsed | - | Not used in mapping | +| `ResolvedAt` | Not parsed | - | Not used in mapping | +| `ResolutionReason` | Not parsed | - | Not used in mapping | +| `Remediation` | mitigation | 155-159 | Added to mitigation with "Remediation" prefix | +| `LocationPath` | description, mitigation | 150-159 | Added to both description and mitigation with "Location Path" prefix | +| `DetailedName` | title, component_name | 169, 183 | Used in title format and mapped to component_name | +| `Version` | description, component_version | 150-154, 184 | Added to description with "Version" prefix and mapped to component_version | +| `FixedVersion` | mitigation | 155-159 | Added to mitigation with "Fixed Version" prefix | +| `DetectionMethod` | description | 150-154 | Added to description with "Detection Method" prefix | +| `Link` | description | 150-154 | Added to description with "Link" prefix | +| `Projects` | description | 150-154 | Added to description with "Projects" prefix | +| `AssetID` | description | 150-154 | Added to description with "Asset ID" prefix | +| `AssetName` | description | 150-154 | Added to description with "Asset Name" prefix | +| `AssetRegion` | description | 150-154 | Added to description with "Asset Region" prefix | +| `ProviderUniqueId` | description | 150-154 | Added to description with "Provider Unique Id" prefix | +| `CloudProviderURL` | description | 150-154 | Added to description with "Cloud Provider URL" prefix | +| `CloudPlatform` | description | 150-154 | Added to description with "Cloud Platform" prefix | +| `Status` | Not parsed | - | Not directly used (FindingStatus is used instead) | +| `SubscriptionExternalId` | description | 150-154 | Added to description with "Subscription External Id" prefix | +| `SubscriptionId` | description | 150-154 | Added to description with "Subscription Id" prefix | +| `SubscriptionName` | description | 150-154 | Added to description with "Subscription Name" prefix | +| `Tags` | unsaved_tags | 186 | Parsed into tags list using _parse_tags function | +| `ExecutionControllers` | description | 150-154 | Added to description with "Execution Controllers" prefix | +| `ExecutionControllersSubscriptionExternalIds` | description | 150-154 | Added to description with "Execution Controllers Subscription External Ids" prefix | +| `ExecutionControllersSubscriptionNames` | description | 150-154 | Added to description with "Execution Controllers Subscription Names" prefix | +| `CriticalRelatedIssuesCount` | Not parsed | - | Not used in mapping | +| `HighRelatedIssuesCount` | Not parsed | - | Not used in mapping | +| `MediumRelatedIssuesCount` | Not parsed | - | Not used in mapping | +| `LowRelatedIssuesCount` | Not parsed | - | Not used in mapping | +| `InfoRelatedIssuesCount` | Not parsed | - | Not used in mapping | +| `OperatingSystem` | description | 150-154 | Added to description with "Operating System" prefix | +| `IpAddresses` | description | 150-154 | Added to description with "Ip Addresses" prefix | + +#### Additional Finding Field Settings (SCA Format) | Finding Field | Default Value | Parser Line # | Notes | | -------------- | ------------- | ------------- | ----------------------------------- | @@ -137,51 +156,38 @@ To generate these files, export the findings from the Wiz platform by: ## Special Processing Notes -### Date Processing +#### Date Processing - Parser uses function `parse_wiz_datetime()` (lines 207-246) to handle different date formats from Wiz - Handles both ISO8601 and custom Wiz timestamp formats -### Status Conversion +#### Status Conversion - Both parser formats use `WizcliParsers.convert_status()` function to determine finding status (active, mitigated, etc.) - Standard format - if a finding is mitigated, the Resolved Time is used as the mitigated timestamp -### Description Construction +#### Description Construction - Most CSV fields maintain field name as a prefix when added to the Finding description - Description generated by iterating through predefined list of fields and adding data if present -### Title Format +#### Title Format - Standard format: Used directly from the "Title" field - SCA format: Combines package name (DetailedName) and vulnerability ID (Name) in format "{package_name}: {vulnerability_id}" -### Mitigation Construction +#### Mitigation Construction - Standard format: Primary source is "Remediation Recommendation" field with optional "Resolution" field - SCA format: Combines "Remediation", "LocationPath", and "FixedVersion" fields -### Deduplication +#### Deduplication - Both formats use the respective ID field as the unique_id_from_tool for deduplication -### Tags Handling (SCA Format) +#### Tags Handling (SCA Format) - "Tags" field is parsed from a JSON string format into a list of tag strings in format "key: value" (lines 186, 193-201) -### Sample Scan Data - -Sample Wiz Scanner scans can be found in the [sample scan data folder](https://github.com/DefectDojo/django-DefectDojo/tree/master/unittests/scans/wiz). - -## Link To Tool - -- [Wiz.io](https://www.wiz.io/) -- [Wiz Documentation](https://docs.wiz.io/) - -### Default Deduplication Hashcode Fields -By default, DefectDojo identifies duplicate Findings using these [hashcode fields](https://docs.defectdojo.com/en/working_with_findings/finding_deduplication/about_deduplication/): - -- title -- description -- severity +### Source Code +Source code for the Wiz parser can be found on [GitHub](https://github.com/DefectDojo/django-DefectDojo/tree/cba7d81c98e040dc0a16032e82fd92f786b1dbd9/dojo/tools/wiz). \ No newline at end of file diff --git a/docs/content/en/open_source/notification_webhooks/how_to.md b/docs/content/en/open_source/notification_webhooks/how_to.md index b65776bd97f..759d73bc0db 100644 --- a/docs/content/en/open_source/notification_webhooks/how_to.md +++ b/docs/content/en/open_source/notification_webhooks/how_to.md @@ -10,30 +10,10 @@ Webhooks are HTTP requests coming from the DefectDojo instance towards a user-de It is not unusual that in some cases a webhook can not be delivered. It is usually connected to network issues, server misconfiguration, or running upgrades on the server. DefectDojo needs to react to these outages. It might temporarily or permanently disable related endpoints. The following graph shows how it might change the status of the webhook definition based on HTTP responses (or manual user interaction). -```kroki {type=mermaid} -flowchart TD - - START{{Endpoint created}} - ALL{All states} - STATUS_ACTIVE([STATUS_ACTIVE]) - STATUS_INACTIVE_TMP - STATUS_INACTIVE_PERMANENT - STATUS_ACTIVE_TMP([STATUS_ACTIVE_TMP]) - END{{Endpoint removed}} - - START ==> STATUS_ACTIVE - STATUS_ACTIVE --HTTP 200 or 201 --> STATUS_ACTIVE - STATUS_ACTIVE --HTTP 5xx
or HTTP 429
or Timeout--> STATUS_INACTIVE_TMP - STATUS_ACTIVE --Any HTTP 4xx response
or any other HTTP response
or non-HTTP error--> STATUS_INACTIVE_PERMANENT - STATUS_INACTIVE_TMP -.After 60s.-> STATUS_ACTIVE_TMP - STATUS_ACTIVE_TMP --HTTP 5xx
or HTTP 429
or Timeout
within 24h
from the first error-->STATUS_INACTIVE_TMP - STATUS_ACTIVE_TMP -.After 24h.-> STATUS_ACTIVE - STATUS_ACTIVE_TMP --HTTP 200 or 201 --> STATUS_ACTIVE_TMP - STATUS_ACTIVE_TMP --HTTP 5xx
or HTTP 429
or Timeout
within 24h from the first error
or any other HTTP response or error--> STATUS_INACTIVE_PERMANENT - ALL ==Activation by user==> STATUS_ACTIVE - ALL ==Deactivation by user==> STATUS_INACTIVE_PERMANENT - ALL ==Removal of endpoint by user==> END -``` + + + +![image](images/webhook-state-transition-flow.png) Notes: diff --git a/docs/content/en/open_source/notification_webhooks/transition-state b/docs/content/en/open_source/notification_webhooks/transition-state new file mode 100644 index 00000000000..2d27360bbf6 --- /dev/null +++ b/docs/content/en/open_source/notification_webhooks/transition-state @@ -0,0 +1,24 @@ +```kroki {type=mermaid} +flowchart TD + + START{{Endpoint created}} + ALL{All states} + STATUS_ACTIVE([STATUS_ACTIVE]) + STATUS_INACTIVE_TMP + STATUS_INACTIVE_PERMANENT + STATUS_ACTIVE_TMP([STATUS_ACTIVE_TMP]) + END{{Endpoint removed}} + + START ==> STATUS_ACTIVE + STATUS_ACTIVE --HTTP 200 or 201 --> STATUS_ACTIVE + STATUS_ACTIVE --HTTP 5xx
or HTTP 429
or Timeout--> STATUS_INACTIVE_TMP + STATUS_ACTIVE --Any HTTP 4xx response
or any other HTTP response
or non-HTTP error--> STATUS_INACTIVE_PERMANENT + STATUS_INACTIVE_TMP -.After 60s.-> STATUS_ACTIVE_TMP + STATUS_ACTIVE_TMP --HTTP 5xx
or HTTP 429
or Timeout
within 24h
from the first error-->STATUS_INACTIVE_TMP + STATUS_ACTIVE_TMP -.After 24h.-> STATUS_ACTIVE + STATUS_ACTIVE_TMP --HTTP 200 or 201 --> STATUS_ACTIVE_TMP + STATUS_ACTIVE_TMP --HTTP 5xx
or HTTP 429
or Timeout
within 24h from the first error
or any other HTTP response or error--> STATUS_INACTIVE_PERMANENT + ALL ==Activation by user==> STATUS_ACTIVE + ALL ==Deactivation by user==> STATUS_INACTIVE_PERMANENT + ALL ==Removal of endpoint by user==> END +``` \ No newline at end of file diff --git a/docs/content/en/working_with_findings/finding_deduplication/deduplication_algorithms.md b/docs/content/en/working_with_findings/finding_deduplication/deduplication_algorithms.md new file mode 100644 index 00000000000..f0efc473081 --- /dev/null +++ b/docs/content/en/working_with_findings/finding_deduplication/deduplication_algorithms.md @@ -0,0 +1,67 @@ +--- +title: "Deduplication Algorithms" +description: "How DefectDojo identifies duplicates: Unique ID, Hash Code, Unique ID or Hash Code, Legacy" +weight: 3 +--- + +## Overview + +DefectDojo supports four deduplication algorithms that can be selected per parser (test type): + +- **Unique ID From Tool**: Uses the scanner-provided unique identifier. +- **Hash Code**: Uses a configured set of fields to compute a hash. +- **Unique ID From Tool or Hash Code**: Prefer the tool’s unique ID; fall back to hash when no matching unique ID is found. +- **Legacy**: Historical algorithm with multiple conditions; only available in the Open Source version. + +Algorithm selection per parser is controlled by `DEDUPLICATION_ALGORITHM_PER_PARSER` (see the [OS tuning page](deduplication_tuning_os) for configuration details). + +## How endpoints are assessed per algorithm + +Endpoints can influence deduplication in different ways depending on the algorithm and configuration. + +### Unique ID From Tool + +- Deduplication uses `unique_id_from_tool` (or `vuln_id_from_tool`). +- **Endpoints are ignored** for duplicate matching. +- A finding’s hash may still be calculated for other features, but it does not affect deduplication under this algorithm. + +### Hash Code + +- Deduplication uses a hash computed from fields specified by `HASHCODE_FIELDS_PER_SCANNER` for the given parser. +- The hash also includes fields from `HASH_CODE_FIELDS_ALWAYS` (see Service field section below). +- Endpoints can affect deduplication in two ways: + - If the scanner’s hash fields include `endpoints`, they are part of the hash and must match accordingly. +- If the scanner’s hash fields do not include `endpoints`, optional endpoint-based matching can be enabled via `DEDUPE_ALGO_ENDPOINT_FIELDS` (OS setting). When configured: + - Set it to an empty list `[]` to ignore endpoints entirely. + - Set it to a list of endpoint attributes (e.g. `["host", "port"]`). If at least one endpoint pair between the two findings matches on all listed attributes, deduplication can occur. + +### Unique ID From Tool or Hash Code + +- Intended flow: + 1) Try to deduplicate using the tool’s unique ID (endpoints ignored on this path). + 2) If no match by unique ID, fall back to the Hash Code path. +- When falling back to hash code, endpoint behavior is identical to the Hash Code algorithm. + +### Legacy (OS only) + +- Deduplication considers multiple attributes including endpoints. +- Behavior differs for static vs dynamic findings: + - **Static findings**: The new finding must contain all endpoints of the original. Extra endpoints on the new finding are allowed. + - **Dynamic findings**: Endpoints must strictly match (commonly by host and port); differing endpoints prevent deduplication. +- If there are no endpoints and both `file_path` and `line` are empty, deduplication typically does not occur. + +## Background processing + +- Dedupe is triggered on import/reimport and during certain updates run via Celery in the background. + +## Service field and its impact + +- By default, `HASH_CODE_FIELDS_ALWAYS = ["service"]`, meaning the `service` associated with a finding is appended to the hash for all scanners. +- Practical implications: + - Two otherwise identical findings with different `service` values will produce different hashes and will not deduplicate under Hash-based paths. + - During import/reimport, the `Service` field entered in the UI can override the parser-provided service. Changing it can change the hash and therefore affect deduplication outcomes. + - If you want service to have no impact on deduplication, configure `HASH_CODE_FIELDS_ALWAYS` accordingly (see the OS tuning page). Removing `service` from the always-included list will stop it from affecting hashes. + +See also: the [Open Source tuning guide](deduplication_tuning_os) for configuration details and examples. + + diff --git a/docs/content/en/working_with_findings/finding_deduplication/deduplication_tuning_os.md b/docs/content/en/working_with_findings/finding_deduplication/deduplication_tuning_os.md new file mode 100644 index 00000000000..162b683d4c0 --- /dev/null +++ b/docs/content/en/working_with_findings/finding_deduplication/deduplication_tuning_os.md @@ -0,0 +1,147 @@ +--- +title: "Deduplication Tuning (Open Source)" +description: "Configure deduplication in DefectDojo Open Source: algorithms, hash fields, endpoints, and service" +weight: 5 +--- + +This page explains how to tune deduplication in the Open Source (OS) edition of DefectDojo. For a visual, feature-rich tuning UI, see the Pro documentation. The OS edition uses settings files and environment variables. + +See also: [Configuration](../../open_source/installation/configuration) for details on environment variables and `local_settings.py` overrides. + +## What you can configure + +- **Algorithm per parser**: Choose one of Unique ID From Tool, Hash Code, Unique ID From Tool or Hash Code, or Legacy (OS only). +- **Hash fields per scanner**: Decide which fields contribute to the hash for each parser. +- **Allow null CWE**: Control whether a missing/zero CWE is acceptable when hashing. +- **Endpoint consideration**: Optionally use endpoints for deduplication when they’re not part of the hash. +- **Always-included fields**: Add fields (e.g., `service`) to all hashes regardless of per-scanner settings. + +## Key settings (defaults shown) + +All defaults are defined in `dojo/settings/settings.dist.py`. Override via environment or `local_settings.py`. + +### Algorithm per parser + +- Setting: `DEDUPLICATION_ALGORITHM_PER_PARSER` +- Values per parser: one of `unique_id_from_tool`, `hash_code`, `unique_id_from_tool_or_hash_code`, `legacy`. +- Example (env variable JSON string): + +```bash +DD_DEDUPLICATION_ALGORITHM_PER_PARSER='{"Trivy Scan": "hash_code", "Veracode Scan": "unique_id_from_tool_or_hash_code"}' +``` + +### Hash fields per scanner + +- Setting: `HASHCODE_FIELDS_PER_SCANNER` +- Example default for Trivy in OS: + +```startLine:endLine:dojo/settings/settings.dist.py +1318:1321:dojo/settings/settings.dist.py + "Trivy Operator Scan": ["title", "severity", "vulnerability_ids", "description"], + "Trivy Scan": ["title", "severity", "vulnerability_ids", "cwe", "description"], + "TFSec Scan": ["severity", "vuln_id_from_tool", "file_path", "line"], + "Snyk Scan": ["vuln_id_from_tool", "file_path", "component_name", "component_version"], +``` + +- Override example (env variable JSON string): + +```bash +DD_HASHCODE_FIELDS_PER_SCANNER='{"ZAP Scan":["title","cwe","severity"],"Trivy Scan":["title","severity","vulnerability_ids","description"]}' +``` + +### Allow null CWE per scanner + +- Setting: `HASHCODE_ALLOWS_NULL_CWE` +- Controls per parser whether a null/zero CWE is acceptable in hashing. If False and the finding has `cwe = 0`, the hash falls back to the legacy computation for that finding. + +### Always-included fields in hash + +- Setting: `HASH_CODE_FIELDS_ALWAYS` +- Default: `["service"]` +- Impact: Appended to the hash for every scanner. Removing `service` here stops it from affecting hashes across the board. + +```startLine:endLine:dojo/settings/settings.dist.py +1464:1466:dojo/settings/settings.dist.py +# Adding fields to the hash_code calculation regardless of the previous settings +HASH_CODE_FIELDS_ALWAYS = ["service"] +``` + +### Optional endpoint-based dedupe + +- Setting: `DEDUPE_ALGO_ENDPOINT_FIELDS` +- Default: `["host", "path"]` +- Purpose: If endpoints are not part of the hash fields, you can still require a minimal endpoint match to deduplicate. If the list is empty `[]`, endpoints are ignored on the dedupe path. + +```startLine:endLine:dojo/settings/settings.dist.py +1491:1499:dojo/settings/settings.dist.py +# Allows to deduplicate with endpoints if endpoints is not included in the hashcode. +# Possible values are: scheme, host, port, path, query, fragment, userinfo, and user. +# If a finding has more than one endpoint, only one endpoint pair must match to mark the finding as duplicate. +DEDUPE_ALGO_ENDPOINT_FIELDS = ["host", "path"] +``` + +## Endpoints: how to tune + +Endpoints can affect deduplication via two mechanisms: + +1) Include `endpoints` in `HASHCODE_FIELDS_PER_SCANNER` for a parser. Then endpoints are part of the hash and must match exactly according to the parser’s hashing rules. +2) If endpoints are not in the hash fields, use `DEDUPLE_ALGO_ENDPOINT_FIELDS` to specify attributes to compare. Examples: + - `[]`: endpoints are ignored for dedupe. + - `["host"]`: findings dedupe if any endpoint pair matches by host. + - `["host", "port"]`: findings dedupe if any endpoint pair matches by host AND port. + +Notes: + +- For Legacy algorithm, static vs dynamic findings have different endpoint matching rules (see the algorithms page). The `DEDUPLE_ALGO_ENDPOINT_FIELDS` setting applies to the hash-code path, not the Legacy algorithm’s intrinsic logic. +- For `unique_id_from_tool` (ID-based) matching, endpoints are ignored for the dedupe decision. + +## Service field: dedupe and reimport + +- With default `HASH_CODE_FIELDS_ALWAYS = ["service"]`, the `service` field is appended to the hash. Two otherwise equal findings with different `service` values will not dedupe on hash-based paths. +- During import via UI/API, the `Service` input can override the parser-provided service. Changing it changes the hash and can alter dedupe behavior and reimport matching. +- If you want dedupe independent of service, remove `service` from `HASH_CODE_FIELDS_ALWAYS` or leave the `Service` field empty during import. + +## After changing deduplication settings + +- Changes to dedupe configuration (e.g., `HASHCODE_FIELDS_PER_SCANNER`, `HASH_CODE_FIELDS_ALWAYS`, `DEDUPLICATION_ALGORITHM_PER_PARSER`) are not applied retroactively automatically. To re-evaluate existing findings you must run the management command below. + +Run inside the uwsgi container. Example (hash codes only, no dedupe): + +```bash +docker compose exec uwsgi /bin/bash -c "python manage.py dedupe --hash_code_only" +``` + +Help/usage: + +options: + --parser PARSER List of parsers for which hash_code needs recomputing + (defaults to all parsers) + --hash_code_only Only compute hash codes + --dedupe_only Only run deduplication + --dedupe_sync Run dedupe in the foreground, default false +``` + +If you submit dedupe to Celery (without `--dedupe_sync`), allow time for tasks to complete before evaluating results. + +## Where to configure + +- Prefer environment variables in deployments. For local development or advanced overrides, use `local_settings.py`. +- See `configuration.md` for details on how to set environment variables and configure local overrides. + +### Troubleshooting + +To help troubleshooting deduplication use the following tools: + +- Observe log out in the `dojo.specific-loggers.deduplication` category. This is a class independant logger that outputs details about the deduplication process and settings when processing findings. +- Observe the `unique_id_from_tool` and `hash_code` values by hovering over the `ID` field or `Status` column: + +![Unique ID from Tool and Hash Code on the View Finding page](images/hash_code_id_field.png) + +![Unique ID from Tool and Hash Code on the Finding List Status Column](images/hash_code_status_column.png) + +## Related documentation + +- [Deduplication Algorithms](deduplication_algorithms): conceptual overview and endpoint behavior. +- [Avoiding duplicates via reimport](avoiding_duplicates_via_reimport). + + diff --git a/dojo/__init__.py b/dojo/__init__.py index 7f55bf358b3..2e2f6c6c559 100644 --- a/dojo/__init__.py +++ b/dojo/__init__.py @@ -4,6 +4,6 @@ # Django starts so that shared_task will use this app. from .celery import app as celery_app # noqa: F401 -__version__ = "2.51.1" +__version__ = "2.51.2" __url__ = "https://github.com/DefectDojo/django-DefectDojo" __docs__ = "https://documentation.defectdojo.com" diff --git a/dojo/api_v2/serializers.py b/dojo/api_v2/serializers.py index e0aa2ce8dc4..5de0698edee 100644 --- a/dojo/api_v2/serializers.py +++ b/dojo/api_v2/serializers.py @@ -536,13 +536,13 @@ def to_representation(self, instance): return ret def update(self, instance, validated_data): + permissions_in_payload = None new_configuration_permissions = None if ( "user_permissions" in validated_data ): # This field was renamed from "configuration_permissions" in the meantime - new_configuration_permissions = set( - validated_data.pop("user_permissions"), - ) + permissions_in_payload = validated_data.pop("user_permissions") + new_configuration_permissions = set(permissions_in_payload) instance = super().update(instance, validated_data) @@ -563,6 +563,10 @@ def update(self, instance, validated_data): ) instance.user_permissions.set(new_permissions) + # Clear all configuration permissions if an empty list is provided + if isinstance(permissions_in_payload, list) and len(permissions_in_payload) == 0: + instance.user_permissions.clear() + return instance def create(self, validated_data): @@ -695,14 +699,14 @@ def create(self, validated_data): return instance def update(self, instance, validated_data): + permissions_in_payload = None new_configuration_permissions = None if ( "auth_group" in validated_data and "permissions" in validated_data["auth_group"] ): # This field was renamed from "configuration_permissions" in the meantime - new_configuration_permissions = set( - validated_data.pop("auth_group")["permissions"], - ) + permissions_in_payload = validated_data.pop("auth_group")["permissions"] + new_configuration_permissions = set(permissions_in_payload) instance = super().update(instance, validated_data) @@ -723,6 +727,10 @@ def update(self, instance, validated_data): ) instance.auth_group.permissions.set(new_permissions) + # Clear all configuration permissions if an empty list is provided + if isinstance(permissions_in_payload, list) and len(permissions_in_payload) == 0: + instance.auth_group.permissions.clear() + return instance diff --git a/dojo/api_v2/views.py b/dojo/api_v2/views.py index 65591bfb6af..126ac2dee56 100644 --- a/dojo/api_v2/views.py +++ b/dojo/api_v2/views.py @@ -11,7 +11,8 @@ from django.contrib.auth.models import Permission from django.core.exceptions import ValidationError from django.db import IntegrityError -from django.http import FileResponse, Http404, HttpResponse +from django.db.models.query import QuerySet as DjangoQuerySet +from django.http import FileResponse, HttpResponse from django.shortcuts import get_object_or_404 from django.utils import timezone from django_filters.rest_framework import DjangoFilterBackend @@ -2814,16 +2815,19 @@ def report_generate(request, obj, options): ), ) - elif type(obj).__name__ == "CastTaggedQuerySet": + elif isinstance(obj, DjangoQuerySet): + # Support any Django QuerySet (including Tagulous CastTaggedQuerySet) findings = report_finding_filter_class( request.GET, queryset=prefetch_related_findings_for_report(obj).distinct(), ) report_name = "Finding" - else: - raise Http404 + obj_type = type(obj).__name__ + msg = f"Report cannot be generated for object of type {obj_type}" + logger.warning(msg) + raise ValidationError(msg) result = { "product_type": product_type, diff --git a/dojo/filters.py b/dojo/filters.py index 55e4978d6c4..ebd8b023fb8 100644 --- a/dojo/filters.py +++ b/dojo/filters.py @@ -354,6 +354,34 @@ def __init__(self, *args, **kwargs): if exclude: self.form.fields[field].label = "Not " + self.form.fields[field].label + def filter_queryset(self, queryset): + qs = super().filter_queryset(queryset) + if hasattr(self, "form") and hasattr(self.form, "cleaned_data"): + for name, f in self.filters.items(): + field_name = getattr(f, "field_name", "") or "" + # Only apply distinct for tag lookups that can duplicate base rows + if "tags__name" in field_name: + value = self.form.cleaned_data.get(name, None) + if value not in (None, "", [], (), {}): + lookup_expr = getattr(f, "lookup_expr", None) + is_exclude = getattr(f, "exclude", False) + needs_distinct = ( + is_exclude + or lookup_expr in { + "in", + "contains", + "icontains", + "startswith", + "istartswith", + "endswith", + "iendswith", + } + ) + # exact/iexact typically won't duplicate rows + if needs_distinct: + return qs.distinct() + return qs + def get_tags_model_from_field_name(field): exclude = False diff --git a/dojo/finding/helper.py b/dojo/finding/helper.py index dd78ccbce69..95021e9575c 100644 --- a/dojo/finding/helper.py +++ b/dojo/finding/helper.py @@ -628,13 +628,16 @@ def engagement_post_delete(sender, instance, **kwargs): def fix_loop_duplicates(): """Due to bugs in the past and even currently when under high parallel load, there can be transitive duplicates.""" """ i.e. A -> B -> C. This can lead to problems when deleting findingns, performing deduplication, etc """ - candidates = Finding.objects.filter(duplicate_finding__isnull=False, original_finding__isnull=False).order_by("-id") + # Build base queryset without selecting full rows to minimize memory + loop_qs = Finding.objects.filter(duplicate_finding__isnull=False, original_finding__isnull=False) - loop_count = len(candidates) + # Use COUNT(*) at the DB instead of materializing the queryset + loop_count = loop_qs.count() if loop_count > 0: - deduplicationLogger.info(f"Identified {len(candidates)} Findings with Loops") - for find_id in candidates.values_list("id", flat=True): + deduplicationLogger.info(f"Identified {loop_count} Findings with Loops") + # Stream IDs only in descending order to avoid loading full Finding rows + for find_id in loop_qs.order_by("-id").values_list("id", flat=True).iterator(chunk_size=1000): removeLoop(find_id, 50) new_originals = Finding.objects.filter(duplicate_finding__isnull=True, duplicate=True) diff --git a/dojo/metrics/utils.py b/dojo/metrics/utils.py index 913d62c2361..c3fa891aeb9 100644 --- a/dojo/metrics/utils.py +++ b/dojo/metrics/utils.py @@ -27,7 +27,6 @@ from dojo.finding.helper import ACCEPTED_FINDINGS_QUERY, CLOSED_FINDINGS_QUERY, OPEN_FINDINGS_QUERY from dojo.finding.queries import get_authorized_findings from dojo.models import Endpoint_Status, Finding, Product_Type -from dojo.product.queries import get_authorized_products from dojo.utils import ( get_system_setting, queryset_check, @@ -107,23 +106,49 @@ def finding_queries( monthly_counts = query_counts_for_period(MetricsPeriod.MONTH, months_between) weekly_counts = query_counts_for_period(MetricsPeriod.WEEK, weeks_between) - top_ten = get_authorized_products(Permissions.Product_View) + # Build Top 10 from all authorized Findings (not date-limited) to avoid empty lists due to date window + findings_for_top_ten = all_authorized_findings + if len(prod_type) > 0: + findings_for_top_ten = findings_for_top_ten.filter( + test__engagement__product__prod_type__in=prod_type, + ) if get_system_setting("enforce_verified_status", True) or get_system_setting("enforce_verified_status_metrics", True): - top_ten = top_ten.filter(engagement__test__finding__verified=True) - - top_ten = top_ten.filter(engagement__test__finding__false_p=False, - engagement__test__finding__duplicate=False, - engagement__test__finding__out_of_scope=False, - engagement__test__finding__mitigated__isnull=True, - engagement__test__finding__severity__in=("Critical", "High", "Medium", "Low"), - prod_type__in=prod_type) - - top_ten = severity_count( - top_ten, "annotate", "engagement__test__finding__severity", - ).order_by( + findings_for_top_ten = findings_for_top_ten.filter(verified=True) + + findings_for_top_ten = findings_for_top_ten.filter( + false_p=False, + duplicate=False, + out_of_scope=False, + mitigated__isnull=True, + active=True, + risk_accepted=False, + severity__in=("Critical", "High", "Medium", "Low"), + ) + + # Group by product id/name and count findings by severity + top_ten = findings_for_top_ten.values( + product_id=F("test__engagement__product__id"), + product_name=F("test__engagement__product__name"), + ) + top_ten = severity_count(top_ten, "annotate", "severity").order_by( "-critical", "-high", "-medium", "-low", )[:10] + # Remap keys to match template expectations (id/name) + top_ten = [ + { + "id": row.get("product_id"), + "name": row.get("product_name"), + "critical": row.get("critical"), + "high": row.get("high"), + "medium": row.get("medium"), + "low": row.get("low"), + "info": row.get("info"), + "total": row.get("total"), + } + for row in top_ten + ] + return { "all": filtered_findings, "closed": closed_filtered_findings, @@ -217,19 +242,39 @@ def endpoint_queries( monthly_counts = query_counts_for_period(MetricsPeriod.MONTH, months_between) weekly_counts = query_counts_for_period(MetricsPeriod.WEEK, weeks_between) - top_ten = get_authorized_products(Permissions.Product_View) - top_ten = top_ten.filter(engagement__test__finding__status_finding__mitigated=False, - engagement__test__finding__status_finding__false_positive=False, - engagement__test__finding__status_finding__out_of_scope=False, - engagement__test__finding__status_finding__risk_accepted=False, - engagement__test__finding__severity__in=("Critical", "High", "Medium", "Low"), - prod_type__in=prod_type) - - top_ten = severity_count( - top_ten, "annotate", "engagement__test__finding__severity", - ).order_by( + # Build Top 10 from Findings related to the open Endpoint_Status queryset + findings_for_top_ten = findings_queryset(endpoints_qs).filter( + false_p=False, + duplicate=False, + out_of_scope=False, + risk_accepted=False, + severity__in=("Critical", "High", "Medium", "Low"), + ) + if len(prod_type) > 0: + findings_for_top_ten = findings_for_top_ten.filter( + test__engagement__product__prod_type__in=prod_type, + ) + + top_ten = findings_for_top_ten.values( + product_id=F("test__engagement__product__id"), + product_name=F("test__engagement__product__name"), + ) + top_ten = severity_count(top_ten, "annotate", "severity").order_by( "-critical", "-high", "-medium", "-low", )[:10] + top_ten = [ + { + "id": row.get("product_id"), + "name": row.get("product_name"), + "critical": row.get("critical"), + "high": row.get("high"), + "medium": row.get("medium"), + "low": row.get("low"), + "info": row.get("info"), + "total": row.get("total"), + } + for row in top_ten + ] return { "all": endpoints, diff --git a/dojo/metrics/views.py b/dojo/metrics/views.py index 42e045eeb98..88788660b1c 100644 --- a/dojo/metrics/views.py +++ b/dojo/metrics/views.py @@ -33,7 +33,7 @@ identify_view, severity_count, ) -from dojo.models import Dojo_User, Finding, Product, Product_Type, Risk_Acceptance +from dojo.models import Dojo_User, Finding, Product_Type, Risk_Acceptance from dojo.product.queries import get_authorized_products from dojo.product_type.queries import get_authorized_product_types from dojo.utils import ( @@ -355,15 +355,20 @@ def product_type_counts(request): "reporter").order_by( "numerical_severity") - top_ten = Product.objects.filter(engagement__test__finding__date__lte=end_date, - engagement__test__finding__verified=True, - engagement__test__finding__false_p=False, - engagement__test__finding__duplicate=False, - engagement__test__finding__out_of_scope=False, - engagement__test__finding__mitigated__isnull=True, - engagement__test__finding__severity__in=( - "Critical", "High", "Medium", "Low"), - prod_type=pt) + # Build Top 10 from Findings for this product type + top_ten = Finding.objects.filter( + date__lte=end_date, + verified=True, + false_p=False, + duplicate=False, + out_of_scope=False, + mitigated__isnull=True, + severity__in=("Critical", "High", "Medium", "Low"), + test__engagement__product__prod_type=pt, + ).values( + name=F("test__engagement__product__name"), + ) + top_ten = severity_count(top_ten, "annotate", "severity").order_by("-critical", "-high", "-medium", "-low")[:10] else: overall_in_pt = Finding.objects.filter(date__lt=end_date, false_p=False, @@ -400,16 +405,20 @@ def product_type_counts(request): "reporter").order_by( "numerical_severity") - top_ten = Product.objects.filter(engagement__test__finding__date__lte=end_date, - engagement__test__finding__false_p=False, - engagement__test__finding__duplicate=False, - engagement__test__finding__out_of_scope=False, - engagement__test__finding__mitigated__isnull=True, - engagement__test__finding__severity__in=( - "Critical", "High", "Medium", "Low"), - prod_type=pt) - - top_ten = severity_count(top_ten, "annotate", "engagement__test__finding__severity").order_by("-critical", "-high", "-medium", "-low")[:10] + top_ten = Finding.objects.filter( + date__lte=end_date, + false_p=False, + duplicate=False, + out_of_scope=False, + mitigated__isnull=True, + severity__in=("Critical", "High", "Medium", "Low"), + test__engagement__product__prod_type=pt, + ).values( + name=F("test__engagement__product__name"), + ) + top_ten = severity_count(top_ten, "annotate", "severity").order_by("-critical", "-high", "-medium", "-low")[:10] + + # top_ten already annotated above using Findings-based grouping cip = {"S0": 0, "S1": 0, @@ -557,15 +566,21 @@ def product_tag_counts(request): "reporter").order_by( "numerical_severity") - top_ten = Product.objects.filter(engagement__test__finding__date__lte=end_date, - engagement__test__finding__verified=True, - engagement__test__finding__false_p=False, - engagement__test__finding__duplicate=False, - engagement__test__finding__out_of_scope=False, - engagement__test__finding__mitigated__isnull=True, - engagement__test__finding__severity__in=( - "Critical", "High", "Medium", "Low"), - tags__name=pt, engagement__product__in=prods) + # Build Top 10 from Findings for this product tag + top_ten = Finding.objects.filter( + date__lte=end_date, + verified=True, + false_p=False, + duplicate=False, + out_of_scope=False, + mitigated__isnull=True, + severity__in=("Critical", "High", "Medium", "Low"), + test__engagement__product__tags__name=pt, + test__engagement__product__in=prods, + ).values( + name=F("test__engagement__product__name"), + ) + top_ten = severity_count(top_ten, "annotate", "severity").order_by("-critical", "-high", "-medium", "-low")[:10] else: overall_in_pt = Finding.objects.filter(date__lt=end_date, false_p=False, @@ -605,16 +620,21 @@ def product_tag_counts(request): "reporter").order_by( "numerical_severity") - top_ten = Product.objects.filter(engagement__test__finding__date__lte=end_date, - engagement__test__finding__false_p=False, - engagement__test__finding__duplicate=False, - engagement__test__finding__out_of_scope=False, - engagement__test__finding__mitigated__isnull=True, - engagement__test__finding__severity__in=( - "Critical", "High", "Medium", "Low"), - tags__name=pt, engagement__product__in=prods) - - top_ten = severity_count(top_ten, "annotate", "engagement__test__finding__severity").order_by("-critical", "-high", "-medium", "-low")[:10] + top_ten = Finding.objects.filter( + date__lte=end_date, + false_p=False, + duplicate=False, + out_of_scope=False, + mitigated__isnull=True, + severity__in=("Critical", "High", "Medium", "Low"), + test__engagement__product__tags__name=pt, + test__engagement__product__in=prods, + ).values( + name=F("test__engagement__product__name"), + ) + top_ten = severity_count(top_ten, "annotate", "severity").order_by("-critical", "-high", "-medium", "-low")[:10] + + # top_ten already annotated above using Findings-based grouping cip = {"S0": 0, "S1": 0, diff --git a/dojo/models.py b/dojo/models.py index 741f630fb92..8eb0f45f719 100644 --- a/dojo/models.py +++ b/dojo/models.py @@ -2231,7 +2231,9 @@ def hash_code_fields(self): else: deduplicationLogger.debug("Section HASHCODE_FIELDS_PER_SCANNER not found in settings.dist.py") - deduplicationLogger.debug(f"HASHCODE_FIELDS_PER_SCANNER is: {hashCodeFields}") + hash_code_fields_always = getattr(settings, "HASH_CODE_FIELDS_ALWAYS", []) + deduplicationLogger.debug(f"HASHCODE_FIELDS_PER_SCANNER is: {hashCodeFields} + HASH_CODE_FIELDS_ALWAYS: {hash_code_fields_always}") + return hashCodeFields @property @@ -2935,6 +2937,13 @@ def compute_hash_code(self): # Generically use the finding attribute having the same name, converts to str in case it's integer fields_to_hash += str(getattr(self, hashcodeField)) deduplicationLogger.debug(hashcodeField + " : " + str(getattr(self, hashcodeField))) + + # Log the hash_code fields that are always included (but are not part of the hash_code_fields list as they are inserted downtstream in self.hash_fields) + hash_code_fields_always = getattr(settings, "HASH_CODE_FIELDS_ALWAYS", []) + for hashcodeField in hash_code_fields_always: + if getattr(self, hashcodeField): + deduplicationLogger.debug(hashcodeField + " : " + str(getattr(self, hashcodeField))) + deduplicationLogger.debug("compute_hash_code - fields_to_hash = " + fields_to_hash) return self.hash_fields(fields_to_hash) diff --git a/dojo/templates/dojo/findings_list_snippet.html b/dojo/templates/dojo/findings_list_snippet.html index c6d9331b1ce..fd9ec39d07e 100644 --- a/dojo/templates/dojo/findings_list_snippet.html +++ b/dojo/templates/dojo/findings_list_snippet.html @@ -664,7 +664,7 @@

title="Test: {{ finding.test }}">{{ finding.test.test_type }} {% endif %} - + {{ finding|finding_display_status|safe }} {{ finding|import_history }} {% if system_settings.enable_jira %} diff --git a/dojo/templates/dojo/report_builder.html b/dojo/templates/dojo/report_builder.html index 5efbe164b17..1d850c57e34 100644 --- a/dojo/templates/dojo/report_builder.html +++ b/dojo/templates/dojo/report_builder.html @@ -23,7 +23,7 @@

Report Format

@@ -83,8 +83,19 @@

Available Widgets

} }); - if ($('.in-use-widgets ul#sortable2 li').length > 0) { - $('a.run_report').removeClass('disabled'); + if ($('.in-use-widgets ul#sortable2 li').not('.report-options').length > 0) { + var $run = $('a.run_report'); + $run.removeClass('disabled') + .css('pointer-events', '') + .attr('title', 'Run the report') + .attr('data-original-title', 'Run the report'); + } + else { + var $run = $('a.run_report'); + $run.addClass('disabled') + .css('pointer-events', 'auto') + .attr('title', 'Add at least one section from "Available Widgets" before running.') + .attr('data-original-title', 'Add at least one section from "Available Widgets" before running.'); } } @@ -148,6 +159,13 @@

Available Widgets

function runReport(event) { var valid = true; + // Require at least one content widget (exclude report options) + if ($('.in-use-widgets ul#sortable2 li').not('.report-options').length === 0) { + alert('Please add at least one section from "Available Widgets" before running.'); + event.preventDefault(); + return; + } + $('.in-use-widgets .form-control').not('#finding-list .form-control') .not('#endpoint-list .form-control').not('#wysiwyg-content .form-control') .not('.bs-searchbox .form-control').not('div').each(function () { @@ -185,8 +203,7 @@

Available Widgets

event.preventDefault(); } - {% block report_functions %} - {% endblock %} + // placeholder for report widget scripts injected by Django blocks $(function () { $(".available-widgets > ul").sortable({ handle: "div.panel div.panel-heading", @@ -218,11 +235,21 @@

Available Widgets

remove: function (event, ui) { ui.item.find('[data-toggle="tooltip"]').tooltip('hide'); - if ($('.in-use-widgets ul#sortable2 li').length > 0) { - $('a.run_report').removeClass('disabled'); + if ($('.in-use-widgets ul#sortable2 li').not('.report-options').length > 0) { + var $run = $('a.run_report'); + $run.removeClass('disabled') + .css('pointer-events', '') + .attr('title', 'Run the report') + .attr('data-original-title', 'Run the report') + .tooltip('fixTitle'); } else { - $('a.run_report').addClass('disabled'); + var $run = $('a.run_report'); + $run.addClass('disabled') + .css('pointer-events', 'auto') + .attr('title', 'Add at least one section from "Available Widgets" before running.') + .attr('data-original-title', 'Add at least one section from "Available Widgets" before running.') + .tooltip('fixTitle'); } }, receive: function(event, ui) { @@ -235,11 +262,21 @@

Available Widgets

ui.item.find('[data-toggle="tooltip"]').tooltip(); ui.item.find('[data-toggle="tooltip"]').tooltip('hide'); - if ($('.in-use-widgets ul#sortable2 li').length > 0) { - $('a.run_report').removeClass('disabled'); + if ($('.in-use-widgets ul#sortable2 li').not('.report-options').length > 0) { + var $run = $('a.run_report'); + $run.removeClass('disabled') + .css('pointer-events', '') + .attr('title', 'Run the report') + .attr('data-original-title', 'Run the report') + .tooltip('fixTitle'); } else { - $('a.run_report').addClass('disabled'); + var $run = $('a.run_report'); + $run.addClass('disabled') + .css('pointer-events', 'auto') + .attr('title', 'Add at least one section from "Available Widgets" before running.') + .attr('data-original-title', 'Add at least one section from "Available Widgets" before running.') + .tooltip('fixTitle'); } if (ui.item.attr('class') === 'wysiwyg-content') { @@ -339,7 +376,7 @@

Available Widgets

retrieveReportData("{% url 'report_endpoints' %}" + $a.attr('href'), $a.closest('li.endpoint-list')); }) - $('[data-toggle="tooltip"]').tooltip() + $('[data-toggle="tooltip"]').tooltip({container: 'body', placement: 'top'}) $(document).on('click', '.in-use-widgets .panel-available-widget .panel-heading', function (event) { $(this).siblings('.panel-body').slideToggle(); diff --git a/dojo/templates/dojo/view_finding.html b/dojo/templates/dojo/view_finding.html index 9001cbcd9cb..a992a22d401 100755 --- a/dojo/templates/dojo/view_finding.html +++ b/dojo/templates/dojo/view_finding.html @@ -285,7 +285,7 @@

{% block header_body %} - {{ finding.id }} + {{ finding.id }} {% if finding.severity %} @@ -1209,7 +1209,7 @@

Credential var i = $($(this).find('i').get(0)); i.toggleClass('glyphicon-chevron-up').toggleClass('glyphicon-chevron-down'); }) - + // Configure tooltips for CVSS vectors - try multiple approaches $(document).on('shown.bs.tooltip shown.bs.popover', function() { $('.tooltip-inner, .popover-content').css({ @@ -1221,7 +1221,7 @@

Credential 'max-width': '1200px !important' }); }); - + // Force tooltip configuration $('.has-popover').each(function() { $(this).on('mouseenter', function() { diff --git a/dojo/templates/dojo/view_test.html b/dojo/templates/dojo/view_test.html index 824cad760b7..677a586b239 100644 --- a/dojo/templates/dojo/view_test.html +++ b/dojo/templates/dojo/view_test.html @@ -36,7 +36,7 @@

id="dropdownMenu1" data-toggle="dropdown" aria-expanded="true" - aria-label="Test options" + aria-label="Test options" > @@ -697,7 +697,7 @@

-
+