From e0d33041e9380ba11d622741577112bf404c636e Mon Sep 17 00:00:00 2001 From: DefectDojo release bot Date: Mon, 22 Dec 2025 17:29:15 +0000 Subject: [PATCH 01/13] Update versions in application files --- components/package.json | 2 +- helm/defectdojo/Chart.yaml | 12 ++++-------- helm/defectdojo/README.md | 2 +- 3 files changed, 6 insertions(+), 10 deletions(-) diff --git a/components/package.json b/components/package.json index 385f6754f56..d9500b421b6 100644 --- a/components/package.json +++ b/components/package.json @@ -1,6 +1,6 @@ { "name": "defectdojo", - "version": "2.53.4", + "version": "2.54.0-dev", "license" : "BSD-3-Clause", "private": true, "dependencies": { diff --git a/helm/defectdojo/Chart.yaml b/helm/defectdojo/Chart.yaml index 515736d9964..119538cc717 100644 --- a/helm/defectdojo/Chart.yaml +++ b/helm/defectdojo/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 -appVersion: "2.53.4" +appVersion: "2.54.0-dev" description: A Helm chart for Kubernetes to install DefectDojo name: defectdojo -version: 1.9.4 +version: 1.9.5-dev icon: https://defectdojo.com/hubfs/DefectDojo_favicon.png maintainers: - name: madchap @@ -33,9 +33,5 @@ dependencies: # - kind: security # description: Critical bug annotations: - artifacthub.io/prerelease: "false" - artifacthub.io/changes: | - - kind: fixed - description: Drop 'replicas' when HPA is in place - - kind: changed - description: Bump DefectDojo to 2.53.4 + artifacthub.io/prerelease: "true" + artifacthub.io/changes: "" diff --git a/helm/defectdojo/README.md b/helm/defectdojo/README.md index 48c668d9eed..e749100dd98 100644 --- a/helm/defectdojo/README.md +++ b/helm/defectdojo/README.md @@ -511,7 +511,7 @@ The HELM schema will be generated for you. # General information about chart values -![Version: 1.9.4](https://img.shields.io/badge/Version-1.9.4-informational?style=flat-square) ![AppVersion: 2.53.4](https://img.shields.io/badge/AppVersion-2.53.4-informational?style=flat-square) +![Version: 1.9.5-dev](https://img.shields.io/badge/Version-1.9.5--dev-informational?style=flat-square) ![AppVersion: 2.54.0-dev](https://img.shields.io/badge/AppVersion-2.54.0--dev-informational?style=flat-square) A Helm chart for Kubernetes to install DefectDojo From 7af3db24856ac017efad867694670b2492f6f3e6 Mon Sep 17 00:00:00 2001 From: Jino Tesauro <53376807+Jino-T@users.noreply.github.com> Date: Wed, 24 Dec 2025 21:04:10 -0600 Subject: [PATCH 02/13] Added relevant test name to "close old findings" comment (#13930) * added relevant test name to close old findings comment * Added test url to close old finding comment * Better handling for close old findings comments --- dojo/importers/default_importer.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/dojo/importers/default_importer.py b/dojo/importers/default_importer.py index 3030e7f4e4e..5a14728043f 100644 --- a/dojo/importers/default_importer.py +++ b/dojo/importers/default_importer.py @@ -18,7 +18,7 @@ Test_Import, ) from dojo.notifications.helper import create_notification -from dojo.utils import perform_product_grading +from dojo.utils import get_full_url, perform_product_grading from dojo.validators import clean_tags logger = logging.getLogger(__name__) @@ -365,11 +365,13 @@ def close_old_findings( old_findings = old_findings.filter(Q(service__isnull=True) | Q(service__exact="")) # Update the status of the findings and any endpoints for old_finding in old_findings: + url = str(get_full_url(reverse("view_test", args=(self.test.id,)))) + test_title = str(self.test.title) self.mitigate_finding( old_finding, ( - "This finding has been automatically closed " - "as it is not present anymore in recent scans." + 'This Finding has been automatically closed by the Test: \n "' + test_title + '"\n' + url + + "\n\nThis is because this Finding is not present anymore in recent scans." ), finding_groups_enabled=self.findings_groups_enabled, product_grading_option=False, From cbb00969ab38d95155abbd904c9d9cdc9412af35 Mon Sep 17 00:00:00 2001 From: kiblik <5609770+kiblik@users.noreply.github.com> Date: Thu, 25 Dec 2025 04:21:09 +0100 Subject: [PATCH 03/13] feat(docker): Manage images via renovate (not dependabot) (#13953) --- .github/dependabot.yml | 7 ------- .github/renovate.json | 3 +-- 2 files changed, 1 insertion(+), 9 deletions(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 1031d9f6101..ac4c006630e 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -64,10 +64,3 @@ updates: versions: - ">= 4.a" - "< 5" -- package-ecosystem: docker - directory: "/" - schedule: - interval: weekly - open-pull-requests-limit: 10 - target-branch: dev - diff --git a/.github/renovate.json b/.github/renovate.json index 9ba0161dc13..62177863787 100644 --- a/.github/renovate.json +++ b/.github/renovate.json @@ -13,8 +13,7 @@ "components/package.json", "components/package-lock.json", "dojo/components/yarn.lock", - "dojo/components/package.json", - "Dockerfile**" + "dojo/components/package.json" ], "ignoreDeps": [], "packageRules": [{ From 4eb752e4cc231ccb24c081be7aff62565420d249 Mon Sep 17 00:00:00 2001 From: Ross E Esposito Date: Mon, 29 Dec 2025 02:16:23 -0600 Subject: [PATCH 04/13] Increasing timeouts for unit tests (#13849) * Increasing timeouts for unit tests * fix timeouts --------- Co-authored-by: Valentijn Scholten --- .github/workflows/rest-framework-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/rest-framework-tests.yml b/.github/workflows/rest-framework-tests.yml index 591f9cabf27..7f857b4c05c 100644 --- a/.github/workflows/rest-framework-tests.yml +++ b/.github/workflows/rest-framework-tests.yml @@ -58,7 +58,7 @@ jobs: # no celery or initializer needed for unit tests - name: Unit tests - timeout-minutes: 15 + timeout-minutes: 20 run: docker compose up --no-deps --exit-code-from uwsgi uwsgi env: DJANGO_VERSION: ${{ matrix.os }} From 3e1565499da01a0c53b837d2e825c88cfa125186 Mon Sep 17 00:00:00 2001 From: valentijnscholten Date: Mon, 29 Dec 2025 16:52:21 +0100 Subject: [PATCH 05/13] cyclonedx scan: handle missing description (#13963) --- dojo/tools/cyclonedx/xml_parser.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/dojo/tools/cyclonedx/xml_parser.py b/dojo/tools/cyclonedx/xml_parser.py index bbad86899e2..d4850971cec 100644 --- a/dojo/tools/cyclonedx/xml_parser.py +++ b/dojo/tools/cyclonedx/xml_parser.py @@ -194,6 +194,15 @@ def _manage_vulnerability_xml( "b:ratings/b:rating/b:severity", namespaces=ns, ) severity = Cyclonedxhelper().fix_severity(severity) + # by the schema, only id is mandatory, even the severity and description are + # optional + if not description: + description = "\n".join( + [ + f"**Id:** {vuln_id}", + f"**Severity:** {severity}", + ], + ) references = "" for advisory in vulnerability.findall( "b:advisories/b:advisory", namespaces=ns, From 031c94c342ace2ecb2f88a05b22e6cc03a2ba108 Mon Sep 17 00:00:00 2001 From: valentijnscholten Date: Mon, 29 Dec 2025 16:52:59 +0100 Subject: [PATCH 06/13] Fix cross-scanner deduplication endpoint parsing (#10215) (#13964) Fix incorrect endpoint parsing when endpoints lack a protocol (scheme). When endpoints are converted to strings without a protocol, hyperlink.parse() misinterprets the hostname as the scheme, causing deduplication to fail. This fix normalizes endpoint strings by prepending '//' if '://' is missing, replicating the behavior from dojo/endpoint/utils.py line 265. Fixes #10215 --- dojo/finding/deduplication.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/dojo/finding/deduplication.py b/dojo/finding/deduplication.py index d11c66b17ba..14e4d33477c 100644 --- a/dojo/finding/deduplication.py +++ b/dojo/finding/deduplication.py @@ -196,7 +196,18 @@ def is_deduplication_on_engagement_mismatch(new_finding, to_duplicate_finding): def get_endpoints_as_url(finding): - return [hyperlink.parse(str(e)) for e in finding.endpoints.all()] + # Fix for https://github.com/DefectDojo/django-DefectDojo/issues/10215 + # When endpoints lack a protocol (scheme), str(e) returns a string like "10.20.197.218:6379" + # without the "//" prefix. hyperlink.parse() then misinterprets the hostname as the scheme. + # We replicate the behavior from dojo/endpoint/utils.py line 265: prepend "//" if "://" is missing + # to ensure hyperlink.parse() correctly identifies host, port, and path components. + urls = [] + for e in finding.endpoints.all(): + endpoint_str = str(e) + if "://" not in endpoint_str: + endpoint_str = "//" + endpoint_str + urls.append(hyperlink.parse(endpoint_str)) + return urls def are_urls_equal(url1, url2, fields): From b6481ba640d23c9959c370460f4b828d8df62f23 Mon Sep 17 00:00:00 2001 From: valentijnscholten Date: Mon, 29 Dec 2025 16:53:13 +0100 Subject: [PATCH 07/13] Fix Tenable CSV import fails with 'Version of CPE not implemented' (#13967) - Add exception handling around CPE parsing in TenableCSVParser - Log unsupported CPE versions at DEBUG level instead of crashing - Allows import to continue when encountering unsupported CPE formats - Fixes issue #11243 --- dojo/tools/tenable/csv_format.py | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/dojo/tools/tenable/csv_format.py b/dojo/tools/tenable/csv_format.py index 0421776bb78..838419a0962 100644 --- a/dojo/tools/tenable/csv_format.py +++ b/dojo/tools/tenable/csv_format.py @@ -228,17 +228,23 @@ def get_findings(self, filename: str, test: Test): LOGGER.debug( "more than one CPE for a finding. NOT supported by Nessus CSV parser", ) - cpe_decoded = CPE(detected_cpe[0]) - find.component_name = ( - cpe_decoded.get_product()[0] - if len(cpe_decoded.get_product()) > 0 - else None - ) - find.component_version = ( - cpe_decoded.get_version()[0] - if len(cpe_decoded.get_version()) > 0 - else None - ) + try: + cpe_decoded = CPE(detected_cpe[0]) + find.component_name = ( + cpe_decoded.get_product()[0] + if len(cpe_decoded.get_product()) > 0 + else None + ) + find.component_version = ( + cpe_decoded.get_version()[0] + if len(cpe_decoded.get_version()) > 0 + else None + ) + except Exception as e: + LOGGER.debug( + f"Failed to parse CPE '{detected_cpe[0]}': {e}. " + "Skipping component_name and component_version.", + ) find.unsaved_endpoints = [] find.unsaved_vulnerability_ids = [] From 6d830c158d7c648dcdab46b71850dbdea94e9e90 Mon Sep 17 00:00:00 2001 From: valentijnscholten Date: Mon, 29 Dec 2025 16:53:36 +0100 Subject: [PATCH 08/13] Fix FileUpload.copy() to prevent title length exceeding 100 chars (#13968) Fixes #11314 When copying a FileUpload, the copy() method appends ' - clone-{hash}' (17 characters) to the title without checking if it would exceed the database max_length constraint of 100 characters. This causes a DataError when copying tests with files that have long names. The fix truncates the original title before appending the clone suffix to ensure the total length never exceeds 100 characters. --- dojo/models.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/dojo/models.py b/dojo/models.py index d0572d370b7..0628c33165d 100644 --- a/dojo/models.py +++ b/dojo/models.py @@ -803,7 +803,12 @@ def delete(self, *args, **kwargs): def copy(self): copy = copy_model_util(self) # Add unique modifier to file name - copy.title = f"{self.title} - clone-{str(uuid4())[:8]}" + # Truncate title to ensure it doesn't exceed max_length (100) when appending suffix + # Suffix " - clone-{8 chars}" is 17 characters, so truncate to 83 chars + clone_suffix = f" - clone-{str(uuid4())[:8]}" + max_title_length = 100 - len(clone_suffix) + truncated_title = self.title[:max_title_length] if len(self.title) > max_title_length else self.title + copy.title = f"{truncated_title}{clone_suffix}" # Create new unique file name current_url = self.file.url _, current_full_filename = current_url.rsplit("/", 1) From ed6390b35a13030a21eda6954b682299ba01f0b2 Mon Sep 17 00:00:00 2001 From: valentijnscholten Date: Mon, 29 Dec 2025 16:54:57 +0100 Subject: [PATCH 09/13] Fix: Populate vulnerability_id field in BlackDuck Binary Analysis parser (#13973) * Fix Tenable CSV import fails with 'Version of CPE not implemented' - Add exception handling around CPE parsing in TenableCSVParser - Log unsupported CPE versions at DEBUG level instead of crashing - Allows import to continue when encountering unsupported CPE formats - Fixes issue #11243 * Fix: Populate vulnerability_id field in BlackDuck Binary Analysis parser - Add unsaved_vulnerability_ids assignment when CVE is present - This ensures the vulnerability_id field is populated for de-duplication - Fixes #12442 * Test: Add assertions for vulnerability_id field in BlackDuck Binary Analysis parser tests - Verify unsaved_vulnerability_ids is populated with CVE value - Add specific assertion for single vuln test case - Add general assertion for multiple vulns test case - Related to #12442 --- dojo/tools/blackduck_binary_analysis/parser.py | 3 +++ unittests/tools/test_blackduck_binary_analysis_parser.py | 6 ++++++ 2 files changed, 9 insertions(+) diff --git a/dojo/tools/blackduck_binary_analysis/parser.py b/dojo/tools/blackduck_binary_analysis/parser.py index 1a2087487b2..0db2d700a64 100644 --- a/dojo/tools/blackduck_binary_analysis/parser.py +++ b/dojo/tools/blackduck_binary_analysis/parser.py @@ -104,6 +104,9 @@ def ingest_findings(self, sorted_findings, test): finding.fix_available = True else: finding.fix_available = False + # Add vulnerability ID for de-duplication + if cve: + finding.unsaved_vulnerability_ids = [str(cve)] findings[unique_finding_key] = finding return list(findings.values()) diff --git a/unittests/tools/test_blackduck_binary_analysis_parser.py b/unittests/tools/test_blackduck_binary_analysis_parser.py index 4d74c8914b7..5aa198216c1 100644 --- a/unittests/tools/test_blackduck_binary_analysis_parser.py +++ b/unittests/tools/test_blackduck_binary_analysis_parser.py @@ -38,6 +38,9 @@ def test_parse_one_vuln(self): self.assertIsNotNone(finding.vuln_id_from_tool) self.assertEqual("CVE-2023-45853", finding.vuln_id_from_tool) self.assertIsNotNone(finding.unique_id_from_tool) + # Verify vulnerability_id is populated for de-duplication + self.assertIsNotNone(finding.unsaved_vulnerability_ids) + self.assertEqual(["CVE-2023-45853"], finding.unsaved_vulnerability_ids) def test_parse_many_vulns(self): with (get_unit_tests_scans_path("blackduck_binary_analysis") / "many_vulns.csv").open(encoding="utf-8") as testfile: @@ -53,3 +56,6 @@ def test_parse_many_vulns(self): self.assertIsNotNone(finding.file_path) self.assertIsNotNone(finding.vuln_id_from_tool) self.assertIsNotNone(finding.unique_id_from_tool) + # Verify vulnerability_id is populated for de-duplication + self.assertIsNotNone(finding.unsaved_vulnerability_ids) + self.assertGreater(len(finding.unsaved_vulnerability_ids), 0) From 609b024ab8f06fd8fbd1ec984d223c5524fad7f1 Mon Sep 17 00:00:00 2001 From: valentijnscholten Date: Mon, 29 Dec 2025 16:56:46 +0100 Subject: [PATCH 10/13] Add test_type mismatch validation during reimport (#10219) (#13975) * Fix test_type mismatch validation during reimport (#10219) - Add validation in consolidate_dynamic_tests to detect test_type mismatches during reimport - Raise ValidationError with descriptive message when test_type doesn't match - Validation occurs before any findings are processed or deduplication starts - Add test cases for matching test_type, mismatched test_type, and initial import scenarios - Create test data files for generic parser with different test types Fixes #10219 * fixes * add docs --- .../product_hierarchy.md | 37 ++- dojo/importers/base_importer.py | 30 ++- unittests/scans/generic/generic_no_type.json | 13 + .../scans/generic/generic_test_type_1.json | 14 ++ .../scans/generic/generic_test_type_2.json | 14 ++ .../generic_test_type_equals_scan_type.json | 14 ++ unittests/test_importers_importer.py | 233 ++++++++++++++++++ 7 files changed, 345 insertions(+), 10 deletions(-) create mode 100644 unittests/scans/generic/generic_no_type.json create mode 100644 unittests/scans/generic/generic_test_type_1.json create mode 100644 unittests/scans/generic/generic_test_type_2.json create mode 100644 unittests/scans/generic/generic_test_type_equals_scan_type.json diff --git a/docs/content/en/working_with_findings/organizing_engagements_tests/product_hierarchy.md b/docs/content/en/working_with_findings/organizing_engagements_tests/product_hierarchy.md index d2105b75ac5..093ba1d90e0 100644 --- a/docs/content/en/working_with_findings/organizing_engagements_tests/product_hierarchy.md +++ b/docs/content/en/working_with_findings/organizing_engagements_tests/product_hierarchy.md @@ -25,9 +25,9 @@ Product Types can have Role\-Based Access Control rules applied, which limit tea #### What can a Product Type represent? -* If a particular software project has many distinct deployments or versions, it may be worth creating a single Product Type which covers the scope of the entire project, and having each version exist as individual Products. +* If a particular software project has many distinct deployments or versions, it may be worth creating a single Product Type which covers the scope of the entire project, and having each version exist as individual Products. ​ -* You also might consider using Product Types to represent stages in your software development process: one Product Type for 'In Development', one Product Type for 'In Production', etc. +* You also might consider using Product Types to represent stages in your software development process: one Product Type for 'In Development', one Product Type for 'In Production', etc. ​ * Ultimately, it's your decision how you wish to organize your Products, and what you Product Type to represent. Your DefectDojo hierarchy may need to change to fit your security teams' needs. @@ -58,11 +58,11 @@ The following scenarios are good reasons to consider creating a separate DefectD * "**ExampleProduct 1\.0**" uses completely different software components from "**ExampleProduct 2\.0**", and both versions are actively supported by your company. * The team assigned to work on "**ExampleProduct version A**" is different than the product team assigned to work on "**ExampleProduct version B**", and needs to have different security permissions assigned as a result. -These variations within a single Product can also be handled at the Engagement level. Note that Engagements don't have access control in the way Products and Product Types do. +These variations within a single Product can also be handled at the Engagement level. Note that Engagements don't have access control in the way Products and Product Types do. ## **Engagements** -Once a Product is set up, you can begin creating and scheduling Engagements. Engagements are meant to represent moments in time when testing is taking place, and contain one or more **Tests**. +Once a Product is set up, you can begin creating and scheduling Engagements. Engagements are meant to represent moments in time when testing is taking place, and contain one or more **Tests**. Engagements always have: @@ -72,12 +72,12 @@ Engagements always have: * an assigned **Testing Lead** * an associated **Product** -There are two types of Engagement: **Interactive** and **CI/CD**. +There are two types of Engagement: **Interactive** and **CI/CD**. * An **Interactive Engagement** is typically run by an engineer. Interactive Engagements are focused on testing the application while the app is running, using an automated test, human tester, or any activity “interacting” with the application functionality. See [OWASP's definition of IAST](https://owasp.org/www-project-devsecops-guideline/latest/02c-Interactive-Application-Security-Testing#:~:text=Interactive%20Application%20Security%20Testing,interacting%E2%80%9D%20with%20the%20application%20functionality.). * A **CI/CD Engagement** is for automated integration with a CI/CD pipeline. CI/CD Engagements are meant to import data as an automated action, triggered by a step in the release process. -Engagements can be tracked using DefectDojo's **Calendar** view. +Engagements can be tracked using DefectDojo's **Calendar** view. #### What can an Engagement represent? @@ -91,7 +91,7 @@ If you have a planned testing effort scheduled, an Engagement offers you a place * **Test:** Nessus Scan Results (March 12\) * **Test:** NPM Scan Audit Results (March 12\) -* **Test:** Snyk Scan Results (March 12\) +* **Test:** Snyk Scan Results (March 12\) ​ You can also organize CI/CD Test results within an Engagement. These kinds of Engagements are 'Open\-Ended' meaning that they don't have a date, and will instead add additional data each time the associated CI/CD actions are run. @@ -137,6 +137,29 @@ The following Test Types appear in the "Scan Type" dropdown when creating a new Non-parser Test Types should be used when you need to manually create findings that require remediation but don't originate from automated scanner output. +#### **Parser-based Test Types** + +Parser-based test types can be categorized by how their test type name is determined: + +- **Fixed Test Type Names**: The test type name is predefined and known before import (e.g., "ZAP Scan", "Nessus Scan"). + +- **Report-Defined Test Type Names**: The test type name is extracted from the scan report content at import time. + +Examples include: + - **Generic Findings Import**: Creates test types based on the `type` field in JSON reports + - **SARIF**: Creates test types based on tool names in the SARIF report (e.g., "Dockle Scan (SARIF)") + - **OpenReports**: Creates separate test types per source found in the report + +**Report-Defined Test Type Naming Rules:** +- If the report's `type` field equals the scan type → uses scan type directly (e.g., "Generic Findings Import") +- If the report's `type` field differs → creates "{type} Scan ({scan_type})" format (e.g., "Tool1 Scan (Generic Findings Import)") +- If no `type` field is provided → uses scan type directly + +**Important Considerations:** +- Report-defined test types are automatically created when a new type is detected during import or reimport. +- For reimports, the test type name must match exactly - mismatches will raise a validation error +- Deduplication settings (`HASHCODE_FIELDS_PER_SCANNER`) use test type names as keys, so report-defined names must be configured accordingly if you want custom deduplication behavior + #### **How do Tests interact with each other?** Tests take your testing data and group it into Findings. Generally, security teams will be running the same testing effort repeatedly, and Tests in DefectDojo allow you to handle this process in an elegant way. diff --git a/dojo/importers/base_importer.py b/dojo/importers/base_importer.py index 03bb801e32f..380fa24e4e0 100644 --- a/dojo/importers/base_importer.py +++ b/dojo/importers/base_importer.py @@ -205,10 +205,34 @@ def consolidate_dynamic_tests(self, tests: list[Test]) -> list[Finding]: if not self.test: # Determine if we should use a custom test type name if test_raw.type: - test_type_name = f"{tests[0].type} Scan" - if test_type_name != self.scan_type: - test_type_name = f"{test_type_name} ({self.scan_type})" + # If test_raw.type equals scan_type, use scan_type directly + if test_raw.type == self.scan_type: + test_type_name = self.scan_type + else: + test_type_name = f"{tests[0].type} Scan" + if test_type_name != self.scan_type: + test_type_name = f"{test_type_name} ({self.scan_type})" self.test = self.create_test(test_type_name) + else: + # During reimport, validate that the test_type matches + # Calculate the expected test_type_name from the incoming report + expected_test_type_name = self.scan_type + if test_raw.type: + # If test_raw.type equals scan_type, use scan_type directly + if test_raw.type == self.scan_type: + expected_test_type_name = self.scan_type + else: + expected_test_type_name = f"{test_raw.type} Scan" + if expected_test_type_name != self.scan_type: + expected_test_type_name = f"{expected_test_type_name} ({self.scan_type})" + # Compare with existing test's test_type name + if self.test.test_type.name != expected_test_type_name: + msg = ( + f"Test type mismatch: Test {self.test.id} has test_type '{self.test.test_type.name}', " + f"but the report contains test_type '{expected_test_type_name}'. " + f"Reimport with matching test_type or create a new test." + ) + raise ValidationError(msg) # This part change the name of the Test # we get it from the data of the parser # Update the test and test type with meta from the raw test diff --git a/unittests/scans/generic/generic_no_type.json b/unittests/scans/generic/generic_no_type.json new file mode 100644 index 00000000000..73a9ab85bd9 --- /dev/null +++ b/unittests/scans/generic/generic_no_type.json @@ -0,0 +1,13 @@ +{ + "name": "Test Without Type", + "findings": [ + { + "title": "Test Finding Without Type", + "description": "This is a test finding without type field", + "severity": "Medium", + "active": true, + "verified": true + } + ] +} + diff --git a/unittests/scans/generic/generic_test_type_1.json b/unittests/scans/generic/generic_test_type_1.json new file mode 100644 index 00000000000..39e20b3a0c8 --- /dev/null +++ b/unittests/scans/generic/generic_test_type_1.json @@ -0,0 +1,14 @@ +{ + "name": "Test Tool1", + "type": "Tool1", + "findings": [ + { + "title": "Test Finding 1", + "description": "This is a test finding for Tool1", + "severity": "High", + "active": true, + "verified": true + } + ] +} + diff --git a/unittests/scans/generic/generic_test_type_2.json b/unittests/scans/generic/generic_test_type_2.json new file mode 100644 index 00000000000..29bf7112463 --- /dev/null +++ b/unittests/scans/generic/generic_test_type_2.json @@ -0,0 +1,14 @@ +{ + "name": "Test Tool2", + "type": "Tool2", + "findings": [ + { + "title": "Test Finding 2", + "description": "This is a test finding for Tool2", + "severity": "Medium", + "active": true, + "verified": true + } + ] +} + diff --git a/unittests/scans/generic/generic_test_type_equals_scan_type.json b/unittests/scans/generic/generic_test_type_equals_scan_type.json new file mode 100644 index 00000000000..3d65051de53 --- /dev/null +++ b/unittests/scans/generic/generic_test_type_equals_scan_type.json @@ -0,0 +1,14 @@ +{ + "name": "Test With Type Equal To Scan Type", + "type": "Generic Findings Import", + "findings": [ + { + "title": "Test Finding With Type Equal To Scan Type", + "description": "This is a test finding with type equal to scan_type", + "severity": "High", + "active": true, + "verified": true + } + ] +} + diff --git a/unittests/test_importers_importer.py b/unittests/test_importers_importer.py index cc5fb342df7..4077648c812 100644 --- a/unittests/test_importers_importer.py +++ b/unittests/test_importers_importer.py @@ -2,11 +2,13 @@ import uuid from unittest.mock import patch +from django.core.exceptions import ValidationError from django.utils import timezone from rest_framework.authtoken.models import Token from rest_framework.test import APIClient from dojo.importers.default_importer import DefaultImporter +from dojo.importers.default_reimporter import DefaultReImporter from dojo.models import Development_Environment, Engagement, Finding, Product, Product_Type, Test, User from dojo.tools.gitlab_sast.parser import GitlabSastParser from dojo.tools.sarif.parser import SarifParser @@ -148,6 +150,237 @@ def test_import_scan_without_test_scan_type(self): self.assertEqual(1, len_new_findings) self.assertEqual(0, len_closed_findings) + def test_import_generic_with_custom_test_type(self): + """Test Case 4: Initial import (should not trigger validation, should create new test)""" + generic_test_type_1 = get_unit_tests_scans_path("generic") / "generic_test_type_1.json" + with generic_test_type_1.open(encoding="utf-8") as scan: + scan_type = "Generic Findings Import" + user, _ = User.objects.get_or_create(username="admin") + product_type, _ = Product_Type.objects.get_or_create(name="test_generic") + product, _ = Product.objects.get_or_create( + name="TestGenericImporter", + prod_type=product_type, + ) + engagement, _ = Engagement.objects.get_or_create( + name="Test Generic Engagement", + product=product, + target_start=timezone.now(), + target_end=timezone.now(), + ) + environment, _ = Development_Environment.objects.get_or_create(name="Development") + import_options = { + "user": user, + "lead": user, + "scan_date": None, + "environment": environment, + "minimum_severity": "Info", + "active": True, + "verified": True, + "scan_type": scan_type, + "engagement": engagement, + "close_old_findings": False, + } + importer = DefaultImporter(**import_options) + test, _, len_new_findings, len_closed_findings, _, _, _ = importer.process_scan(scan) + # Verify test is created successfully + self.assertIsNotNone(test) + # Verify test_type is set correctly based on report's type field + self.assertEqual("Tool1 Scan (Generic Findings Import)", test.test_type.name) + self.assertEqual(1, len_new_findings) + self.assertEqual(0, len_closed_findings) + + def test_reimport_generic_with_matching_test_type(self): + """Test Case 1: Reimport with matching test_type (should succeed)""" + generic_test_type_1 = get_unit_tests_scans_path("generic") / "generic_test_type_1.json" + with generic_test_type_1.open(encoding="utf-8") as scan: + scan_type = "Generic Findings Import" + user, _ = User.objects.get_or_create(username="admin") + product_type, _ = Product_Type.objects.get_or_create(name="test_generic_reimport") + product, _ = Product.objects.get_or_create( + name="TestGenericReimport", + prod_type=product_type, + ) + engagement, _ = Engagement.objects.get_or_create( + name="Test Generic Reimport Engagement", + product=product, + target_start=timezone.now(), + target_end=timezone.now(), + ) + environment, _ = Development_Environment.objects.get_or_create(name="Development") + import_options = { + "user": user, + "lead": user, + "scan_date": None, + "environment": environment, + "minimum_severity": "Info", + "active": True, + "verified": True, + "scan_type": scan_type, + "engagement": engagement, + "close_old_findings": False, + } + # Initial import + importer = DefaultImporter(**import_options) + test, _, _, _, _, _, _ = importer.process_scan(scan) + original_test_type_name = test.test_type.name + self.assertEqual("Tool1 Scan (Generic Findings Import)", original_test_type_name) + + # Reimport with same test_type + reimport_options = { + "test": test, + "user": user, + "lead": user, + "scan_date": None, + "environment": environment, + "minimum_severity": "Info", + "active": True, + "verified": True, + "scan_type": scan_type, + "close_old_findings": False, + } + reimporter = DefaultReImporter(**reimport_options) + # Reset file pointer for reimport + scan.seek(0) + test_after_reimport, _, _, _, _, _, _ = reimporter.process_scan(scan) + # Verify reimport succeeds without ValidationError + self.assertEqual(test.id, test_after_reimport.id) + # Verify test_type remains unchanged + test.refresh_from_db() + self.assertEqual(original_test_type_name, test.test_type.name) + + def test_reimport_generic_with_different_test_type(self): + """Test Case 2: Reimport with different test_type (should fail with ValidationError)""" + generic_test_type_1 = get_unit_tests_scans_path("generic") / "generic_test_type_1.json" + generic_test_type_2 = get_unit_tests_scans_path("generic") / "generic_test_type_2.json" + with generic_test_type_1.open(encoding="utf-8") as scan: + scan_type = "Generic Findings Import" + user, _ = User.objects.get_or_create(username="admin") + product_type, _ = Product_Type.objects.get_or_create(name="test_generic_mismatch") + product, _ = Product.objects.get_or_create( + name="TestGenericMismatch", + prod_type=product_type, + ) + engagement, _ = Engagement.objects.get_or_create( + name="Test Generic Mismatch Engagement", + product=product, + target_start=timezone.now(), + target_end=timezone.now(), + ) + environment, _ = Development_Environment.objects.get_or_create(name="Development") + import_options = { + "user": user, + "lead": user, + "scan_date": None, + "environment": environment, + "minimum_severity": "Info", + "active": True, + "verified": True, + "scan_type": scan_type, + "engagement": engagement, + "close_old_findings": False, + } + # Initial import with Tool1 + importer = DefaultImporter(**import_options) + test, _, _, _, _, _, _ = importer.process_scan(scan) + original_test_type_name = test.test_type.name + self.assertEqual("Tool1 Scan (Generic Findings Import)", original_test_type_name) + original_finding_count = test.finding_set.count() + + # Attempt to reimport with Tool2 (different test_type) + reimport_options = { + "test": test, + "user": user, + "lead": user, + "scan_date": None, + "environment": environment, + "minimum_severity": "Info", + "active": True, + "verified": True, + "scan_type": scan_type, + "close_old_findings": False, + } + reimporter = DefaultReImporter(**reimport_options) + # Reset file pointer and use different file + with generic_test_type_2.open(encoding="utf-8") as scan2: + # Verify ValidationError is raised with appropriate message + with self.assertRaises(ValidationError) as context: + reimporter.process_scan(scan2) + error_message = str(context.exception) + self.assertIn("Test type mismatch", error_message) + self.assertIn("Tool1 Scan (Generic Findings Import)", error_message) + self.assertIn("Tool2 Scan (Generic Findings Import)", error_message) + self.assertIn(str(test.id), error_message) + + # Verify no findings are processed/updated + test.refresh_from_db() + self.assertEqual(original_finding_count, test.finding_set.count()) + # Verify test_type remains unchanged + self.assertEqual(original_test_type_name, test.test_type.name) + + def test_reimport_generic_type_equals_scan_type(self): + """Test reimport when type field equals scan_type (should succeed)""" + generic_no_type = get_unit_tests_scans_path("generic") / "generic_no_type.json" + generic_test_type_equals_scan_type = get_unit_tests_scans_path("generic") / "generic_test_type_equals_scan_type.json" + with generic_no_type.open(encoding="utf-8") as scan: + scan_type = "Generic Findings Import" + user, _ = User.objects.get_or_create(username="admin") + product_type, _ = Product_Type.objects.get_or_create(name="test_generic_type_equals_scan_type") + product, _ = Product.objects.get_or_create( + name="TestGenericTypeEqualsScanType", + prod_type=product_type, + ) + engagement, _ = Engagement.objects.get_or_create( + name="Test Generic Type Equals Scan Type Engagement", + product=product, + target_start=timezone.now(), + target_end=timezone.now(), + ) + environment, _ = Development_Environment.objects.get_or_create(name="Development") + import_options = { + "user": user, + "lead": user, + "scan_date": None, + "environment": environment, + "minimum_severity": "Info", + "active": True, + "verified": True, + "scan_type": scan_type, + "engagement": engagement, + "close_old_findings": False, + } + # Initial import without type field + importer = DefaultImporter(**import_options) + test, _, _, _, _, _, _ = importer.process_scan(scan) + original_test_type_name = test.test_type.name + # Should create test_type as just scan_type (no type field) + self.assertEqual("Generic Findings Import", original_test_type_name) + + # Reimport with type field equal to scan_type + reimport_options = { + "test": test, + "user": user, + "lead": user, + "scan_date": None, + "environment": environment, + "minimum_severity": "Info", + "active": True, + "verified": True, + "scan_type": scan_type, + "close_old_findings": False, + } + reimporter = DefaultReImporter(**reimport_options) + # Use file with type field equal to scan_type + with generic_test_type_equals_scan_type.open(encoding="utf-8") as scan2: + # Should succeed without ValidationError + test_after_reimport, _, len_new_findings, _, _, _, _ = reimporter.process_scan(scan2) + # Verify reimport succeeds + self.assertEqual(test.id, test_after_reimport.id) + # Verify test_type remains unchanged (should still be "Generic Findings Import") + test.refresh_from_db() + self.assertEqual("Generic Findings Import", test.test_type.name) + # Verify findings were processed + self.assertGreater(len_new_findings, 0) + class FlexibleImportTestAPI(DojoAPITestCase): def __init__(self, *args, **kwargs): From ae696cb3cd4f07d3ea1898a9e9cf86269c7db98c Mon Sep 17 00:00:00 2001 From: valentijnscholten Date: Mon, 29 Dec 2025 16:57:49 +0100 Subject: [PATCH 11/13] Fix JIRA form processing logic to not skip pushing new findings when finding_jira_sync is enabled (#13983) * Fix JIRA form processing logic * ruff --- dojo/finding/views.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/dojo/finding/views.py b/dojo/finding/views.py index 76aec7ed405..4e7100014af 100644 --- a/dojo/finding/views.py +++ b/dojo/finding/views.py @@ -968,8 +968,9 @@ def process_jira_form(self, request: HttpRequest, finding: Finding, context: dic logger.debug("jform.jira_issue: %s", context["jform"].cleaned_data.get("jira_issue")) logger.debug(JFORM_PUSH_TO_JIRA_MESSAGE, context["jform"].cleaned_data.get("push_to_jira")) # can't use helper as when push_all_jira_issues is True, the checkbox gets disabled and is always false + push_to_jira_checkbox = context["jform"].cleaned_data.get("push_to_jira") push_all_jira_issues = jira_helper.is_push_all_issues(finding) - push_to_jira = push_all_jira_issues or context["jform"].cleaned_data.get("push_to_jira") + push_to_jira = push_all_jira_issues or push_to_jira_checkbox or jira_helper.is_keep_in_sync_with_jira(finding) logger.debug("push_to_jira: %s", push_to_jira) logger.debug("push_all_jira_issues: %s", push_all_jira_issues) logger.debug("has_jira_group_issue: %s", finding.has_jira_group_issue) @@ -996,12 +997,6 @@ def process_jira_form(self, request: HttpRequest, finding: Finding, context: dic jira_helper.finding_link_jira(request, finding, new_jira_issue_key) jira_message = "Linked a JIRA issue successfully." # any existing finding should be updated - jira_instance = jira_helper.get_jira_instance(finding) - push_to_jira = ( - push_to_jira - and not (push_to_jira and finding.finding_group) - and (finding.has_jira_issue or (jira_instance and jira_instance.finding_jira_sync)) - ) # Determine if a message should be added if jira_message: messages.add_message( From 681114440a0ca3de3c5d7223cde1111bf0b041d4 Mon Sep 17 00:00:00 2001 From: DefectDojo release bot Date: Mon, 29 Dec 2025 16:11:00 +0000 Subject: [PATCH 12/13] Update versions in application files --- components/package.json | 2 +- dojo/__init__.py | 2 +- helm/defectdojo/Chart.yaml | 8 ++++---- helm/defectdojo/README.md | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/components/package.json b/components/package.json index d9500b421b6..0057e739952 100644 --- a/components/package.json +++ b/components/package.json @@ -1,6 +1,6 @@ { "name": "defectdojo", - "version": "2.54.0-dev", + "version": "2.53.5", "license" : "BSD-3-Clause", "private": true, "dependencies": { diff --git a/dojo/__init__.py b/dojo/__init__.py index 894a4f111d5..f31e0202149 100644 --- a/dojo/__init__.py +++ b/dojo/__init__.py @@ -4,6 +4,6 @@ # Django starts so that shared_task will use this app. from .celery import app as celery_app # noqa: F401 -__version__ = "2.53.4" +__version__ = "2.53.5" __url__ = "https://github.com/DefectDojo/django-DefectDojo" __docs__ = "https://documentation.defectdojo.com" diff --git a/helm/defectdojo/Chart.yaml b/helm/defectdojo/Chart.yaml index 119538cc717..68c97d8282c 100644 --- a/helm/defectdojo/Chart.yaml +++ b/helm/defectdojo/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 -appVersion: "2.54.0-dev" +appVersion: "2.53.5" description: A Helm chart for Kubernetes to install DefectDojo name: defectdojo -version: 1.9.5-dev +version: 1.9.5 icon: https://defectdojo.com/hubfs/DefectDojo_favicon.png maintainers: - name: madchap @@ -33,5 +33,5 @@ dependencies: # - kind: security # description: Critical bug annotations: - artifacthub.io/prerelease: "true" - artifacthub.io/changes: "" + artifacthub.io/prerelease: "false" + artifacthub.io/changes: "- kind: changed\n description: Bump DefectDojo to 2.53.5\n" diff --git a/helm/defectdojo/README.md b/helm/defectdojo/README.md index e749100dd98..be21c4353a6 100644 --- a/helm/defectdojo/README.md +++ b/helm/defectdojo/README.md @@ -511,7 +511,7 @@ The HELM schema will be generated for you. # General information about chart values -![Version: 1.9.5-dev](https://img.shields.io/badge/Version-1.9.5--dev-informational?style=flat-square) ![AppVersion: 2.54.0-dev](https://img.shields.io/badge/AppVersion-2.54.0--dev-informational?style=flat-square) +![Version: 1.9.5](https://img.shields.io/badge/Version-1.9.5-informational?style=flat-square) ![AppVersion: 2.53.5](https://img.shields.io/badge/AppVersion-2.53.5-informational?style=flat-square) A Helm chart for Kubernetes to install DefectDojo From d799696d9f4754d81e76f560437d04192bb2dd4b Mon Sep 17 00:00:00 2001 From: DefectDojo release bot Date: Mon, 29 Dec 2025 17:13:00 +0000 Subject: [PATCH 13/13] Update versions in application files --- components/package.json | 2 +- dojo/__init__.py | 2 +- helm/defectdojo/Chart.yaml | 8 ++++---- helm/defectdojo/README.md | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/components/package.json b/components/package.json index 0057e739952..d9500b421b6 100644 --- a/components/package.json +++ b/components/package.json @@ -1,6 +1,6 @@ { "name": "defectdojo", - "version": "2.53.5", + "version": "2.54.0-dev", "license" : "BSD-3-Clause", "private": true, "dependencies": { diff --git a/dojo/__init__.py b/dojo/__init__.py index f31e0202149..7337d10b9c1 100644 --- a/dojo/__init__.py +++ b/dojo/__init__.py @@ -4,6 +4,6 @@ # Django starts so that shared_task will use this app. from .celery import app as celery_app # noqa: F401 -__version__ = "2.53.5" +__version__ = "2.54.0-dev" __url__ = "https://github.com/DefectDojo/django-DefectDojo" __docs__ = "https://documentation.defectdojo.com" diff --git a/helm/defectdojo/Chart.yaml b/helm/defectdojo/Chart.yaml index 68c97d8282c..ff1d2b213f8 100644 --- a/helm/defectdojo/Chart.yaml +++ b/helm/defectdojo/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 -appVersion: "2.53.5" +appVersion: "2.54.0-dev" description: A Helm chart for Kubernetes to install DefectDojo name: defectdojo -version: 1.9.5 +version: 1.9.6-dev icon: https://defectdojo.com/hubfs/DefectDojo_favicon.png maintainers: - name: madchap @@ -33,5 +33,5 @@ dependencies: # - kind: security # description: Critical bug annotations: - artifacthub.io/prerelease: "false" - artifacthub.io/changes: "- kind: changed\n description: Bump DefectDojo to 2.53.5\n" + artifacthub.io/prerelease: "true" + artifacthub.io/changes: "" diff --git a/helm/defectdojo/README.md b/helm/defectdojo/README.md index be21c4353a6..31bc3123ec8 100644 --- a/helm/defectdojo/README.md +++ b/helm/defectdojo/README.md @@ -511,7 +511,7 @@ The HELM schema will be generated for you. # General information about chart values -![Version: 1.9.5](https://img.shields.io/badge/Version-1.9.5-informational?style=flat-square) ![AppVersion: 2.53.5](https://img.shields.io/badge/AppVersion-2.53.5-informational?style=flat-square) +![Version: 1.9.6-dev](https://img.shields.io/badge/Version-1.9.6--dev-informational?style=flat-square) ![AppVersion: 2.54.0-dev](https://img.shields.io/badge/AppVersion-2.54.0--dev-informational?style=flat-square) A Helm chart for Kubernetes to install DefectDojo