diff --git a/dojo/api_v2/views.py b/dojo/api_v2/views.py index caa4cef95df..c18fd647608 100644 --- a/dojo/api_v2/views.py +++ b/dojo/api_v2/views.py @@ -4,6 +4,7 @@ from datetime import datetime from pathlib import Path +import pghistory import tagulous from crum import get_current_user from dateutil.relativedelta import relativedelta @@ -2530,7 +2531,17 @@ def perform_create(self, serializer): if jira_project := (jira_helper.get_jira_project(jira_driver) if jira_driver else None): push_to_jira = push_to_jira or jira_project.push_all_issues + # Add pghistory context for audit trail (adds to existing middleware context). + # /api/vue is the Pro UI + source = "import_vue" if "/api/vue/" in self.request.path else "import_api" + pghistory.context( + source=source, + scan_type=serializer.validated_data.get("scan_type"), + ) serializer.save(push_to_jira=push_to_jira) + # Add test_id to pghistory context now that test is created + if test_id := serializer.data.get("test"): + pghistory.context(test_id=test_id) def get_queryset(self): return get_authorized_tests(Permissions.Import_Scan_Result) @@ -2678,7 +2689,22 @@ def perform_create(self, serializer): if jira_project := (jira_helper.get_jira_project(jira_driver) if jira_driver else None): push_to_jira = push_to_jira or jira_project.push_all_issues logger.debug("push_to_jira: %s", push_to_jira) + # Add pghistory context for audit trail (adds to existing middleware context) + # For reimport, test may already exist or be created during save + test_id = test.id if test else serializer.validated_data.get("test", {}) + if hasattr(test_id, "id"): + test_id = test_id.id + # /api/vue is the Pro UI + source = "reimport_vue" if "/api/vue/" in self.request.path else "reimport_api" + pghistory.context( + source=source, + test_id=test_id if isinstance(test_id, int) else None, + scan_type=serializer.validated_data.get("scan_type"), + ) serializer.save(push_to_jira=push_to_jira) + # Update test_id if it wasn't available before save + if test_id_from_response := serializer.data.get("test"): + pghistory.context(test_id=test_id_from_response) # Authorization: configuration diff --git a/dojo/celery.py b/dojo/celery.py index 5f2935b4460..ead4a8813a8 100644 --- a/dojo/celery.py +++ b/dojo/celery.py @@ -2,7 +2,7 @@ import os from logging.config import dictConfig -from celery import Celery +from celery import Celery, Task from celery.signals import setup_logging from django.conf import settings @@ -11,7 +11,31 @@ # set the default Django settings module for the 'celery' program. os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dojo.settings.settings") -app = Celery("dojo") + +class PgHistoryTask(Task): + + """ + Custom Celery base task that automatically applies pghistory context. + + When a task is dispatched via dojo_async_task, the current pghistory + context is captured and passed in kwargs as "_pgh_context". This base + class extracts that context and applies it before running the task, + ensuring all database events share the same context as the original + request. + """ + + def __call__(self, *args, **kwargs): + # Import here to avoid circular imports during Celery startup + from dojo.pghistory_utils import get_pghistory_context_manager # noqa: PLC0415 + + # Extract context from kwargs (won't be passed to task function) + pgh_context = kwargs.pop("_pgh_context", None) + + with get_pghistory_context_manager(pgh_context): + return super().__call__(*args, **kwargs) + + +app = Celery("dojo", task_cls=PgHistoryTask) # Using a string here means the worker will not have to # pickle the object when using Windows. diff --git a/dojo/decorators.py b/dojo/decorators.py index bba9efe234c..91f6934b719 100644 --- a/dojo/decorators.py +++ b/dojo/decorators.py @@ -83,10 +83,17 @@ def dojo_async_task(func=None, *, signature=False): def decorator(func): @wraps(func) def __wrapper__(*args, **kwargs): + from dojo.pghistory_utils import get_serializable_pghistory_context # noqa: PLC0415 circular import from dojo.utils import get_current_user # noqa: PLC0415 circular import + user = get_current_user() kwargs["async_user"] = user + # Capture pghistory context to pass to Celery worker + # The PgHistoryTask base class will apply this context in the worker + if pgh_context := get_serializable_pghistory_context(): + kwargs["_pgh_context"] = pgh_context + dojo_async_task_counter.incr( func.__name__, args=args, diff --git a/dojo/engagement/views.py b/dojo/engagement/views.py index a726f514421..9ecdabfdd9b 100644 --- a/dojo/engagement/views.py +++ b/dojo/engagement/views.py @@ -10,6 +10,7 @@ from tempfile import NamedTemporaryFile from time import strftime +import pghistory from django.conf import settings from django.contrib import messages from django.contrib.admin.utils import NestedObjects @@ -1138,10 +1139,18 @@ def post( if form_error := self.process_form(request, context.get("form"), context): add_error_message_to_response(form_error) return self.failure_redirect(request, context) + # Add pghistory context for audit trail (adds to existing middleware context) + pghistory.context( + source="import", + scan_type=context.get("scan_type"), + ) # Kick off the import process if import_error := self.import_findings(context): add_error_message_to_response(import_error) return self.failure_redirect(request, context) + # Add test_id to pghistory context now that test is created + if test := context.get("test"): + pghistory.context(test_id=test.id) # Process the credential form if form_error := self.process_credentials_form(request, context.get("cred_form"), context): add_error_message_to_response(form_error) diff --git a/dojo/finding/views.py b/dojo/finding/views.py index c6caa802027..0d05d24fde1 100644 --- a/dojo/finding/views.py +++ b/dojo/finding/views.py @@ -9,6 +9,7 @@ from itertools import chain from pathlib import Path +import pghistory from django.conf import settings from django.contrib import messages from django.core import serializers @@ -2557,6 +2558,11 @@ def finding_bulk_update_all(request, pid=None): logger.debug("bulk 20") finding_to_update = request.POST.getlist("finding_to_update") + # Add pghistory context for audit trail (adds to existing middleware context) + pghistory.context( + source="bulk_edit", + finding_count=len(finding_to_update), + ) finds = Finding.objects.filter(id__in=finding_to_update).order_by("id") total_find_count = finds.count() prods = set(find.test.engagement.product for find in finds) # noqa: C401 diff --git a/dojo/jira_link/views.py b/dojo/jira_link/views.py index d30681bef27..ebcc9616d4a 100644 --- a/dojo/jira_link/views.py +++ b/dojo/jira_link/views.py @@ -5,6 +5,7 @@ import re # Third party imports +import pghistory from django.contrib import messages from django.contrib.admin.utils import NestedObjects from django.core.exceptions import PermissionDenied @@ -85,85 +86,98 @@ def webhook(request, secret=None): if request.content_type != "application/json": return webhook_responser_handler("debug", "only application/json supported") # Time to process the request + # Parse the JSON first to get webhook event type for context try: parsed = json.loads(request.body.decode("utf-8")) - # Check if the events supplied are supported - if parsed.get("webhookEvent") not in {"comment_created", "jira:issue_updated"}: - return webhook_responser_handler("info", f"Unrecognized JIRA webhook event received: {parsed.get('webhookEvent')}") - - if parsed.get("webhookEvent") == "jira:issue_updated": - # xml examples at the end of file - jid = parsed["issue"]["id"] - # This may raise a 404, but it will be handled in the exception response - try: - jissue = JIRA_Issue.objects.get(jira_id=jid) - except JIRA_Instance.DoesNotExist: - return webhook_responser_handler("info", f"JIRA issue {jid} is not linked to a DefectDojo Finding") - findings = None - # Determine what type of object we will be working with - if jissue.finding: - logger.debug(f"Received issue update for {jissue.jira_key} for finding {jissue.finding.id}") - findings = [jissue.finding] - elif jissue.finding_group: - logger.debug(f"Received issue update for {jissue.jira_key} for finding group {jissue.finding_group}") - findings = jissue.finding_group.findings.all() - elif jissue.engagement: - return webhook_responser_handler("debug", "Update for engagement ignored") - else: - return webhook_responser_handler("info", f"Received issue update for {jissue.jira_key} for unknown object") - # Process the assignee if present - assignee = parsed["issue"]["fields"].get("assignee") - assignee_name = "Jira User" - if assignee is not None: - # First look for the 'name' field. If not present, try 'displayName'. Else put None - assignee_name = assignee.get("name", assignee.get("displayName")) - - # "resolution":{ - # "self":"http://www.testjira.com/rest/api/2/resolution/11", - # "id":"11", - # "description":"Cancelled by the customer.", - # "name":"Cancelled" - # }, - - # or - # "resolution": null - - # or - # "resolution": "None" - - resolution = parsed["issue"]["fields"]["resolution"] - resolution = resolution if resolution and resolution != "None" else None - resolution_id = resolution["id"] if resolution else None - resolution_name = resolution["name"] if resolution else None - jira_now = parse_datetime(parsed["issue"]["fields"]["updated"]) - - if findings: - for finding in findings: - jira_helper.process_resolution_from_jira(finding, resolution_id, resolution_name, assignee_name, jira_now, jissue, finding_group=jissue.finding_group) - # Check for any comment that could have come along with the resolution - if (error_response := check_for_and_create_comment(parsed)) is not None: - return error_response - - if parsed.get("webhookEvent") == "comment_created": - if (error_response := check_for_and_create_comment(parsed)) is not None: - return error_response - except Exception as e: - # Check if the issue is originally a 404 - if isinstance(e, Http404): - return webhook_responser_handler("debug", str(e)) - # Try to get a little more information on the exact exception + return webhook_responser_handler("debug", f"Failed to parse JSON: {e}") + + # Check if the events supplied are supported + if parsed.get("webhookEvent") not in {"comment_created", "jira:issue_updated"}: + return webhook_responser_handler("info", f"Unrecognized JIRA webhook event received: {parsed.get('webhookEvent')}") + + # Wrap processing with pghistory context for audit trail + # JIRA webhooks don't have a user session, so we create a new context + with pghistory.context( + source="jira_webhook", + jira_event=parsed.get("webhookEvent"), + ): try: - message = ( - f"Original Exception: {e}\n" - f"jira webhook body parsed:\n{json.dumps(parsed, indent=4)}" - ) - except Exception: - message = ( - f"Original Exception: {e}\n" - f"jira webhook body :\n{request.body.decode('utf-8')}" - ) - return webhook_responser_handler("debug", message) + if parsed.get("webhookEvent") == "jira:issue_updated": + # xml examples at the end of file + jid = parsed["issue"]["id"] + # This may raise a 404, but it will be handled in the exception response + try: + jissue = JIRA_Issue.objects.get(jira_id=jid) + except JIRA_Instance.DoesNotExist: + return webhook_responser_handler("info", f"JIRA issue {jid} is not linked to a DefectDojo Finding") + # Add jira_key to context now that we have it + pghistory.context(jira_key=jissue.jira_key) + findings = None + # Determine what type of object we will be working with + if jissue.finding: + logger.debug(f"Received issue update for {jissue.jira_key} for finding {jissue.finding.id}") + findings = [jissue.finding] + elif jissue.finding_group: + logger.debug(f"Received issue update for {jissue.jira_key} for finding group {jissue.finding_group}") + findings = jissue.finding_group.findings.all() + elif jissue.engagement: + return webhook_responser_handler("debug", "Update for engagement ignored") + else: + return webhook_responser_handler("info", f"Received issue update for {jissue.jira_key} for unknown object") + # Process the assignee if present + assignee = parsed["issue"]["fields"].get("assignee") + assignee_name = "Jira User" + if assignee is not None: + # First look for the 'name' field. If not present, try 'displayName'. Else put None + assignee_name = assignee.get("name", assignee.get("displayName")) + + # "resolution":{ + # "self":"http://www.testjira.com/rest/api/2/resolution/11", + # "id":"11", + # "description":"Cancelled by the customer.", + # "name":"Cancelled" + # }, + + # or + # "resolution": null + + # or + # "resolution": "None" + + resolution = parsed["issue"]["fields"]["resolution"] + resolution = resolution if resolution and resolution != "None" else None + resolution_id = resolution["id"] if resolution else None + resolution_name = resolution["name"] if resolution else None + jira_now = parse_datetime(parsed["issue"]["fields"]["updated"]) + + if findings: + for finding in findings: + jira_helper.process_resolution_from_jira(finding, resolution_id, resolution_name, assignee_name, jira_now, jissue, finding_group=jissue.finding_group) + # Check for any comment that could have come along with the resolution + if (error_response := check_for_and_create_comment(parsed)) is not None: + return error_response + + if parsed.get("webhookEvent") == "comment_created": + if (error_response := check_for_and_create_comment(parsed)) is not None: + return error_response + + except Exception as e: + # Check if the issue is originally a 404 + if isinstance(e, Http404): + return webhook_responser_handler("debug", str(e)) + # Try to get a little more information on the exact exception + try: + message = ( + f"Original Exception: {e}\n" + f"jira webhook body parsed:\n{json.dumps(parsed, indent=4)}" + ) + except Exception: + message = ( + f"Original Exception: {e}\n" + f"jira webhook body :\n{request.body.decode('utf-8')}" + ) + return webhook_responser_handler("debug", message) return webhook_responser_handler("No logging here", "Success!") diff --git a/dojo/management/commands/dedupe.py b/dojo/management/commands/dedupe.py index a8e0a538cfe..3eddccdc45d 100644 --- a/dojo/management/commands/dedupe.py +++ b/dojo/management/commands/dedupe.py @@ -1,5 +1,6 @@ import logging +import pghistory from django.conf import settings from django.core.management.base import BaseCommand from django.db.models import Prefetch @@ -64,6 +65,21 @@ def handle(self, *args, **options): dedupe_sync = options["dedupe_sync"] dedupe_batch_mode = options.get("dedupe_batch_mode", True) # Default to True (batch mode enabled) + # Wrap with pghistory context for audit trail + with pghistory.context( + source="dedupe_command", + dedupe_sync=dedupe_sync, + ): + self._run_dedupe( + restrict_to_parsers=restrict_to_parsers, + hash_code_only=hash_code_only, + dedupe_only=dedupe_only, + dedupe_sync=dedupe_sync, + dedupe_batch_mode=dedupe_batch_mode, + ) + + def _run_dedupe(self, *, restrict_to_parsers, hash_code_only, dedupe_only, dedupe_sync, dedupe_batch_mode): + """Internal method to run the dedupe logic within pghistory context.""" if restrict_to_parsers is not None: findings = Finding.objects.filter(test__test_type__name__in=restrict_to_parsers).exclude(duplicate=True) logger.info("######## Will process only parsers %s and %d findings ########", *restrict_to_parsers, findings.count()) diff --git a/dojo/management/commands/jira_status_reconciliation.py b/dojo/management/commands/jira_status_reconciliation.py index c8ce694015d..a6a49e9256b 100644 --- a/dojo/management/commands/jira_status_reconciliation.py +++ b/dojo/management/commands/jira_status_reconciliation.py @@ -1,5 +1,6 @@ import logging +import pghistory from dateutil.relativedelta import relativedelta from django.conf import settings from django.core.management.base import BaseCommand @@ -216,10 +217,9 @@ def add_arguments(self, parser): parser.add_argument("--dryrun", action="store_true", help="Only print actions to be performed, but make no modifications.") def handle(self, *args, **options): - # mode = options['mode'] - # product = options['product'] - # engagement = options['engagement'] - # daysback = options['daysback'] - # dryrun = options['dryrun'] - - return jira_status_reconciliation(*args, **options) + # Wrap with pghistory context for audit trail + with pghistory.context( + source="jira_reconciliation", + mode=options.get("mode", "reconcile"), + ): + return jira_status_reconciliation(*args, **options) diff --git a/dojo/pghistory_models.py b/dojo/pghistory_models.py index 936bd939c60..0902c775ec1 100644 --- a/dojo/pghistory_models.py +++ b/dojo/pghistory_models.py @@ -22,10 +22,15 @@ class DojoEvents(pghistory.models.Events): as regular model fields instead of accessing nested JSON data. """ + # Middleware-provided fields user = pghistory.ProxyField("pgh_context__user", models.IntegerField(null=True)) url = pghistory.ProxyField("pgh_context__url", models.TextField(null=True)) remote_addr = pghistory.ProxyField("pgh_context__remote_addr", models.CharField(max_length=45, null=True)) + # Process identification fields + source = pghistory.ProxyField("pgh_context__source", models.CharField(max_length=50, null=True)) + scan_type = pghistory.ProxyField("pgh_context__scan_type", models.CharField(max_length=100, null=True)) + class Meta: proxy = True app_label = "dojo" diff --git a/dojo/pghistory_utils.py b/dojo/pghistory_utils.py new file mode 100644 index 00000000000..4477ee8c124 --- /dev/null +++ b/dojo/pghistory_utils.py @@ -0,0 +1,99 @@ +""" +Utilities for passing pghistory context to Celery tasks. + +pghistory uses thread-local storage, so context is lost when tasks run +in Celery workers. These utilities allow capturing context in the sender +process and recreating it in the worker. +""" +import uuid +from contextlib import nullcontext + +from pghistory import runtime as pghistory_runtime + + +def get_serializable_pghistory_context(): + """ + Capture the current pghistory context for passing to Celery tasks. + + Returns a JSON-serializable dict with context id and metadata, + or None if no context is active. + """ + if hasattr(pghistory_runtime._tracker, "value"): + ctx = pghistory_runtime._tracker.value + return { + "id": str(ctx.id), + "metadata": ctx.metadata.copy(), + } + return None + + +class PgHistoryContextFromTask: + + """ + Context manager to apply pghistory context received from a Celery task. + + This recreates the exact same context (with the same UUID) that was + active when the task was dispatched, ensuring all events share the + same pgh_context_id. + + Usage: + pgh_context = kwargs.pop("_pgh_context", None) + with PgHistoryContextFromTask(pgh_context): + # Task body runs here with context applied + """ + + def __init__(self, context_data): + """ + Initialize with context data from Celery kwargs. + + Args: + context_data: Dict with "id" (UUID string) and "metadata" (dict), + or None for no-op behavior. + + """ + self.context_data = context_data + self._pre_execute_hook = None + self._owns_context = False + + def __enter__(self): + if not self.context_data: + return None + + from django.db import connection # noqa: PLC0415 + + context_id = uuid.UUID(self.context_data["id"]) + metadata = self.context_data["metadata"] + + # Only create a new context if one doesn't already exist + if not hasattr(pghistory_runtime._tracker, "value"): + self._pre_execute_hook = connection.execute_wrapper( + pghistory_runtime._inject_history_context, + ) + self._pre_execute_hook.__enter__() + pghistory_runtime._tracker.value = pghistory_runtime.Context( + id=context_id, + metadata=metadata, + ) + self._owns_context = True + else: + # Context already exists, just merge metadata + pghistory_runtime._tracker.value.metadata.update(metadata) + + return pghistory_runtime._tracker.value + + def __exit__(self, *exc): + if self._owns_context and self._pre_execute_hook: + delattr(pghistory_runtime._tracker, "value") + self._pre_execute_hook.__exit__(*exc) + + +def get_pghistory_context_manager(context_data): + """ + Return appropriate context manager for the given context data. + + Returns PgHistoryContextFromTask if context_data is provided, + otherwise returns a no-op nullcontext. + """ + if context_data: + return PgHistoryContextFromTask(context_data) + return nullcontext() diff --git a/dojo/risk_acceptance/helper.py b/dojo/risk_acceptance/helper.py index 1aa09e82669..1ce170deabb 100644 --- a/dojo/risk_acceptance/helper.py +++ b/dojo/risk_acceptance/helper.py @@ -1,6 +1,7 @@ import logging from contextlib import suppress +import pghistory from dateutil.relativedelta import relativedelta from django.core.exceptions import PermissionDenied from django.urls import reverse @@ -175,39 +176,42 @@ def expiration_handler(*args, **kwargs): If configured also sends a JIRA comment in both case to each jira issue. This is per finding. """ - try: - system_settings = System_Settings.objects.get() - except System_Settings.DoesNotExist: - logger.warning("Unable to get system_settings, skipping risk acceptance expiration job") + # Wrap with pghistory context for audit trail + with pghistory.context(source="risk_acceptance_expiration"): + try: + system_settings = System_Settings.objects.get() + except System_Settings.DoesNotExist: + logger.warning("Unable to get system_settings, skipping risk acceptance expiration job") + return - risk_acceptances = get_expired_risk_acceptances_to_handle() + risk_acceptances = get_expired_risk_acceptances_to_handle() - logger.info("expiring %i risk acceptances that are past expiration date", len(risk_acceptances)) - for risk_acceptance in risk_acceptances: - expire_now(risk_acceptance) - # notification created by expire_now code + logger.info("expiring %i risk acceptances that are past expiration date", len(risk_acceptances)) + for risk_acceptance in risk_acceptances: + expire_now(risk_acceptance) + # notification created by expire_now code - heads_up_days = system_settings.risk_acceptance_notify_before_expiration - if heads_up_days > 0: - risk_acceptances = get_almost_expired_risk_acceptances_to_handle(heads_up_days) + heads_up_days = system_settings.risk_acceptance_notify_before_expiration + if heads_up_days > 0: + risk_acceptances = get_almost_expired_risk_acceptances_to_handle(heads_up_days) - logger.info("notifying for %i risk acceptances that are expiring within %i days", len(risk_acceptances), heads_up_days) - for risk_acceptance in risk_acceptances: - logger.debug("notifying for risk acceptance %i:%s with %i findings", risk_acceptance.id, risk_acceptance, len(risk_acceptance.accepted_findings.all())) + logger.info("notifying for %i risk acceptances that are expiring within %i days", len(risk_acceptances), heads_up_days) + for risk_acceptance in risk_acceptances: + logger.debug("notifying for risk acceptance %i:%s with %i findings", risk_acceptance.id, risk_acceptance, len(risk_acceptance.accepted_findings.all())) - notification_title = "Risk acceptance with " + str(len(risk_acceptance.accepted_findings.all())) + " accepted findings will expire on " + \ - timezone.localtime(risk_acceptance.expiration_date).strftime("%b %d, %Y") + " for " + \ - str(risk_acceptance.engagement.product) + ": " + str(risk_acceptance.engagement.name) + notification_title = "Risk acceptance with " + str(len(risk_acceptance.accepted_findings.all())) + " accepted findings will expire on " + \ + timezone.localtime(risk_acceptance.expiration_date).strftime("%b %d, %Y") + " for " + \ + str(risk_acceptance.engagement.product) + ": " + str(risk_acceptance.engagement.name) - create_notification(event="risk_acceptance_expiration", title=notification_title, risk_acceptance=risk_acceptance, - accepted_findings=risk_acceptance.accepted_findings.all(), engagement=risk_acceptance.engagement, - product=risk_acceptance.engagement.product, - url=reverse("view_risk_acceptance", args=(risk_acceptance.engagement.id, risk_acceptance.id))) + create_notification(event="risk_acceptance_expiration", title=notification_title, risk_acceptance=risk_acceptance, + accepted_findings=risk_acceptance.accepted_findings.all(), engagement=risk_acceptance.engagement, + product=risk_acceptance.engagement.product, + url=reverse("view_risk_acceptance", args=(risk_acceptance.engagement.id, risk_acceptance.id))) - post_jira_comments(risk_acceptance, risk_acceptance.accepted_findings.all(), expiration_warning_message_creator, heads_up_days) + post_jira_comments(risk_acceptance, risk_acceptance.accepted_findings.all(), expiration_warning_message_creator, heads_up_days) - risk_acceptance.expiration_date_warned = timezone.now() - risk_acceptance.save() + risk_acceptance.expiration_date_warned = timezone.now() + risk_acceptance.save() def get_view_risk_acceptance(risk_acceptance: Risk_Acceptance) -> str: diff --git a/dojo/tasks.py b/dojo/tasks.py index f74ffbd6389..d02040fa5b3 100644 --- a/dojo/tasks.py +++ b/dojo/tasks.py @@ -1,6 +1,7 @@ import logging from datetime import timedelta +import pghistory from celery.utils.log import get_task_logger from django.apps import apps from django.conf import settings @@ -99,6 +100,13 @@ def flush_auditlog(*args, **kwargs): @app.task(bind=True) def async_dupe_delete(*args, **kwargs): + # Wrap with pghistory context for audit trail + with pghistory.context(source="dupe_delete_task"): + _async_dupe_delete_impl() + + +def _async_dupe_delete_impl(): + """Internal implementation of async_dupe_delete within pghistory context.""" try: system_settings = System_Settings.objects.get() enabled = system_settings.delete_duplicates @@ -182,12 +190,19 @@ def async_sla_compute_and_notify_task(*args, **kwargs): @app.task def jira_status_reconciliation_task(*args, **kwargs): - return jira_status_reconciliation(*args, **kwargs) + # Wrap with pghistory context for audit trail + with pghistory.context( + source="jira_reconciliation", + mode=kwargs.get("mode", "reconcile"), + ): + return jira_status_reconciliation(*args, **kwargs) @app.task def fix_loop_duplicates_task(*args, **kwargs): - return fix_loop_duplicates() + # Wrap with pghistory context for audit trail + with pghistory.context(source="fix_loop_duplicates"): + return fix_loop_duplicates() @app.task diff --git a/dojo/test/views.py b/dojo/test/views.py index d2bf11092e7..4e6c6353be6 100644 --- a/dojo/test/views.py +++ b/dojo/test/views.py @@ -6,6 +6,7 @@ from datetime import datetime, timedelta from functools import reduce +import pghistory from django.contrib import messages from django.contrib.admin.utils import NestedObjects from django.core.exceptions import ValidationError @@ -1075,6 +1076,12 @@ def post( if form_error := self.process_form(request, context.get("form"), context): add_error_message_to_response(form_error) return self.failure_redirect(request, context) + # Add pghistory context for audit trail (adds to existing middleware context) + pghistory.context( + source="reimport", + test_id=context.get("test").id, + scan_type=context.get("scan_type"), + ) # Kick off the import process if import_error := self.reimport_findings(context): add_error_message_to_response(import_error) diff --git a/dojo/tools/tool_issue_updater.py b/dojo/tools/tool_issue_updater.py index add5beb54a5..fd203edebea 100644 --- a/dojo/tools/tool_issue_updater.py +++ b/dojo/tools/tool_issue_updater.py @@ -1,3 +1,5 @@ +import pghistory + from dojo.celery import app from dojo.decorators import dojo_async_task, dojo_model_from_id, dojo_model_to_id from dojo.tools.api_sonarqube.parser import SCAN_SONARQUBE_API @@ -30,8 +32,9 @@ def tool_issue_updater(finding, *args, **kwargs): @dojo_async_task @app.task def update_findings_from_source_issues(**kwargs): + # Wrap with pghistory context for audit trail + with pghistory.context(source="sonarqube_sync"): + findings = SonarQubeApiUpdaterFromSource().get_findings_to_update() - findings = SonarQubeApiUpdaterFromSource().get_findings_to_update() - - for finding in findings: - SonarQubeApiUpdaterFromSource().update(finding) + for finding in findings: + SonarQubeApiUpdaterFromSource().update(finding)