From 346cc663ecf828d7c9677bee28d3978ed67f8f52 Mon Sep 17 00:00:00 2001 From: kiblik <5609770+kiblik@users.noreply.github.com> Date: Wed, 13 Aug 2025 18:41:26 +0200 Subject: [PATCH 1/2] feat(helm): Improve docs, add schema --- .github/workflows/test-helm-chart.yml | 53 +- helm/defectdojo/README.md.gotmpl | 522 +++++++++++++++ helm/defectdojo/values.schema.json | 893 ++++++++++++++++++++++++++ readme-docs/KUBERNETES.md | 216 ++++++- 4 files changed, 1677 insertions(+), 7 deletions(-) create mode 100644 helm/defectdojo/README.md.gotmpl create mode 100644 helm/defectdojo/values.schema.json diff --git a/.github/workflows/test-helm-chart.yml b/.github/workflows/test-helm-chart.yml index bb2cc2e8272..e807102f72e 100644 --- a/.github/workflows/test-helm-chart.yml +++ b/.github/workflows/test-helm-chart.yml @@ -10,7 +10,7 @@ on: jobs: lint: - name: Lint chart + name: Lint chart (version) runs-on: ubuntu-latest steps: - name: Checkout @@ -72,3 +72,54 @@ jobs: # - name: Run chart-testing (install) # run: ct install --config ct.yaml --target-branch ${{ env.ct-branch }} --helm-extra-args '--set createSecret=true --set createRabbitMqSecret=true --set createPostgresqlSecret=true --set timeout=900' # if: env.changed == 'true' + + docs_generation: + name: Update documentation + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Run helm-docs + uses: losisin/helm-docs-github-action@a57fae5676e4c55a228ea654a1bcaec8dd3cf5b5 # v1.6.2 + with: + fail-on-diff: true + chart-search-root: "helm/defectdojo" + + generate_schema: + name: Update schema + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Generate values schema json + uses: losisin/helm-values-schema-json-action@28b1b33dcd9bd10bb8157627566f3971313a8872 # v2.0.4 + with: + fail-on-diff: true + working-directory: "helm/defectdojo" + useHelmDocs: true + values: values.yaml + + lint_format: + name: Lint chart (format) + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + persist-credentials: false + fetch-depth: 0 + + - name: Set up Helm + uses: azure/setup-helm@b9e51907a09c216f16ebe8536097933489208112 # v4.3.0 + + - name: Configure Helm repos + run: |- + helm repo add bitnami https://charts.bitnami.com/bitnami + helm dependency list ./helm/defectdojo + helm dependency update ./helm/defectdojo + + - name: Lint + run: |- + helm lint ./helm/defectdojo --strict diff --git a/helm/defectdojo/README.md.gotmpl b/helm/defectdojo/README.md.gotmpl new file mode 100644 index 00000000000..48969816512 --- /dev/null +++ b/helm/defectdojo/README.md.gotmpl @@ -0,0 +1,522 @@ +# DefectDojo on Kubernetes + +DefectDojo Kubernetes utilizes [Helm](https://helm.sh/), a +package manager for Kubernetes. Helm Charts help you define, install, and +upgrade even the most complex Kubernetes application. + +For development purposes, +[minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/) +and [Helm](https://helm.sh/) can be installed locally by following +this [guide](https://helm.sh/docs/using_helm/#installing-helm). + +## Supported Kubernetes Versions + +The tests cover the deployment on the lastest [kubernetes version](https://kubernetes.io/releases/) and the oldest supported [version from AWS](https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html#available-versions). The assumption is that version in between do not have significant differences. Current tested versions can looks up in the [github k8s workflow](https://github.com/DefectDojo/django-DefectDojo/blob/master/.github/workflows/k8s-tests.yml). + +## Helm chart + +Starting with version 1.14.0, a helm chart will be pushed onto the `helm-charts` branch during the release process. Don't look for a chart museum, we're leveraging the "raw" capabilities of GitHub at this time. + +To use it, you can add our repo. + +``` +$ helm repo add defectdojo 'https://raw.githubusercontent.com/DefectDojo/django-DefectDojo/helm-charts' + +$ helm repo update +``` + +You should now be able to see the chart. + +``` +$ helm search repo defectdojo +NAME CHART VERSION APP VERSION DESCRIPTION +defectdojo/defectdojo 1.6.153 2.39.0 A Helm chart for Kubernetes to install DefectDojo +``` + +## Kubernetes Local Quickstart + +Requirements: + +1. Helm installed locally +2. Minikube installed locally +3. Latest cloned copy of DefectDojo + +```zsh +git clone https://github.com/DefectDojo/django-DefectDojo +cd django-DefectDojo + +minikube start +minikube addons enable ingress +``` + +Helm >= v3 + +```zsh +helm repo add bitnami https://charts.bitnami.com/bitnami +helm repo update +``` + +Then pull the dependent charts: + +```zsh +helm dependency update ./helm/defectdojo +``` + +Now, install the helm chart into minikube. + +If you have setup an ingress controller: + +```zsh +DJANGO_INGRESS_ENABLED=true +``` + +else: + +```zsh +DJANGO_INGRESS_ENABLED=false +``` + +If you have configured TLS: + +```zsh +DJANGO_INGRESS_ACTIVATE_TLS=true +``` + +else: + +```zsh +DJANGO_INGRESS_ACTIVATE_TLS=false +``` + +Warning: Use the `createSecret*=true` flags only upon first install. For re-installs, see `§Re-install the chart` + +Helm >= v3: + +```zsh +helm install \ + defectdojo \ + ./helm/defectdojo \ + --set django.ingress.enabled=${DJANGO_INGRESS_ENABLED} \ + --set django.ingress.activateTLS=${DJANGO_INGRESS_ACTIVATE_TLS} \ + --set createSecret=true \ + --set createRedisSecret=true \ + --set createPostgresqlSecret=true +``` + +It usually takes up to a minute for the services to startup and the +status of the containers can be viewed by starting up `minikube dashboard`. +Note: If the containers are not cached locally the services will start once the +containers have been pulled locally. + +To be able to access DefectDojo, set up an ingress or access the service +directly by running the following command: + +```zsh +kubectl port-forward --namespace=default \ +service/defectdojo-django 8080:80 +``` + +As you set your host value to defectdojo.default.minikube.local, make sure that +it resolves to the localhost IP address, e.g. by adding the following two lines +to /etc/hosts: + +```zsh +::1 defectdojo.default.minikube.local +127.0.0.1 defectdojo.default.minikube.local +``` + +To find out the password, run the following command: + +```zsh +echo "DefectDojo admin password: $(kubectl \ + get secret defectdojo \ + --namespace=default \ + --output jsonpath='{.data.DD_ADMIN_PASSWORD}' \ + | base64 --decode)" +``` + +To access DefectDojo, go to . +Log in with username admin and the password from the previous command. + +### Minikube with locally built containers + +If testing containers locally, then set the imagePullPolicy to Never, +which ensures containers are not pulled from Docker hub. + +Use the same commands as before but add: + +```zsh + --set imagePullPolicy=Never +``` + +### Installing from a private registry + +If you have stored your images in a private registry, you can install defectdojo chart with (helm 3). + +- First create a secret named "defectdojoregistrykey" based on the credentials that can pull from the registry: see https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +- Then install the chart with the same commands as before but adding: + +```zsh + --set repositoryPrefix= \ + --set imagePullSecrets=defectdojoregistrykey +``` + +### Build Images Locally + +```zsh +# Build images +docker build -t defectdojo/defectdojo-django -f Dockerfile.django . +docker build -t defectdojo/defectdojo-nginx -f Dockerfile.nginx . +``` + +```zsh +# Build images behind proxy +docker build --build-arg http_proxy=http://myproxy.com:8080 --build-arg https_proxy=http://myproxy.com:8080 -t defectdojo/defectdojo-django -f Dockerfile.django . +docker build --build-arg http_proxy=http://myproxy.com:8080 --build-arg https_proxy=http://myproxy.com:8080 -t defectdojo/defectdojo-nginx -f Dockerfile.nginx . +``` + +### Upgrade the chart + +If you want to change kubernetes configuration of use an updated docker image (evolution of defectDojo code), upgrade the application: + +``` +kubectl delete job defectdojo-initializer +helm upgrade defectdojo ./helm/defectdojo/ \ + --set django.ingress.enabled=${DJANGO_INGRESS_ENABLED} \ + --set django.ingress.activateTLS=${DJANGO_INGRESS_ACTIVATE_TLS} +``` + +### Re-install the chart + +In case of issue or in any other situation where you need to re-install the chart, you can do it and re-use the same secrets. + +**Note: With postgresql you'll keep the same database (more information below)** + +```zsh +# helm 3 +helm uninstall defectdojo +helm install \ + defectdojo \ + ./helm/defectdojo \ + --set django.ingress.enabled=${DJANGO_INGRESS_ENABLED} \ + --set django.ingress.activateTLS=${DJANGO_INGRESS_ACTIVATE_TLS} +``` + +## Kubernetes Production + +When running defectdojo in production be aware that you understood the full setup and always have a backup. + +### Encryption to Kubernetes + +Optionally, for TLS locally, you need to install a TLS certificate into your Kubernetes cluster. +For development purposes, you can create your own certificate authority as +described [here](https://github.com/hendrikhalkow/k8s-docs/blob/master/tls.md). + +```zsh +# https://kubernetes.io/docs/concepts/services-networking/ingress/#tls +# Create a TLS secret called minikube-tls as mentioned above, e.g. +K8S_NAMESPACE="default" +TLS_CERT_DOMAIN="${K8S_NAMESPACE}.minikube.local" +kubectl --namespace "${K8S_NAMESPACE}" create secret tls defectdojo-tls \ + --key <(openssl rsa \ + -in "${CA_DIR}/private/${TLS_CERT_DOMAIN}.key.pem" \ + -passin "pass:${TLS_CERT_PASSWORD}") \ + --cert <(cat \ + "${CA_DIR}/certs/${TLS_CERT_DOMAIN}.cert.pem" \ + "${CA_DIR}/chain.pem") +``` + +### Encryption in Kubernetes and End-to-End Encryption + +With the TLS certificate from your Kubernetes cluster all traffic to you cluster is encrypted, but the traffic in your cluster is still unencrypted. + +If you want to encrypt the traffic to the nginx server you can use the option `--set nginx.tls.enabled=true` and `--set nginx.tls.generateCertificate=true` to generate a self signed certificate and use the https config. The option to add you own pregenerated certificate is generelly possible but not implemented in the helm chart yet. + +Be aware that the traffic to the database and celery broker are unencrypted at the moment. + +### Media persistent volume + +By default, DefectDojo helm installation doesn't support persistent storage for storing images (dynamically uploaded by users). By default, it uses emptyDir, which is ephemeral by its nature and doesn't support multiple replicas of django pods, so should not be in use for production. + +To enable persistence of the media storage that supports R/W many, should be in use as backend storage like S3, NFS, glusterfs, etc + +```bash +mediaPersistentVolume: + enabled: true + # any name + name: media + # could be emptyDir (not for production) or pvc + type: pvc + # there are two options to create pvc 1) when you want the chart to create pvc for you, set django.mediaPersistentVolume.persistentVolumeClaim.create to true and do not specify anything for django.mediaPersistentVolume.PersistentVolumeClaim.name 2) when you want to create pvc outside the chart, pass the pvc name via django.mediaPersistentVolume.PersistentVolumeClaim.name and ensure django.mediaPersistentVolume.PersistentVolumeClaim.create is set to false + persistentVolumeClaim: + create: true + name: + size: 5Gi + accessModes: + - ReadWriteMany + storageClassName: +``` + +In the example above, we want the media content to be preserved to `pvc` as `persistentVolumeClaim` k8s resource and what we are basically doing is enabling the pvc to be created conditionally if the user wants to create it using the chart (in this case the pvc name 'defectdojo-media' will be inherited from template file used to deploy the pvc). By default the volume type is emptyDir which does not require a pvc. But when the type is set to pvc then we need a kubernetes Persistent Volume Claim and this is where the django.mediaPersistentVolume.persistentVolumeClaim.name comes into play. + +The accessMode is set to ReadWriteMany by default to accommodate using more than one replica. Ensure storage support ReadWriteMany before setting this option, otherwise set accessMode to ReadWriteOnce. + +NOTE: PersistrentVolume needs to be prepared in front before helm installation/update is triggered. + +For more detail how how to create proper PVC see [example](https://github.com/DefectDojo/Community-Contribs/tree/master/persistent-media) + +### Installation + +**Important:** If you choose to create the secret on your own, you will need to create a secret named `defectdojo` and containing the following fields: + +- DD_ADMIN_PASSWORD +- DD_SECRET_KEY +- DD_CREDENTIAL_AES_256_KEY +- METRICS_HTTP_AUTH_PASSWORD + +Theses fields are required to get the stack running. + +```zsh +# Install Helm chart. Choose a host name that matches the certificate above +helm install \ + defectdojo \ + ./helm/defectdojo \ + --namespace="${K8S_NAMESPACE}" \ + --set host="defectdojo.${TLS_CERT_DOMAIN}" \ + --set django.ingress.secretName="minikube-tls" \ + --set createSecret=true \ + --set createRedisSecret=true \ + --set createPostgresqlSecret=true + +# For high availability deploy multiple instances of Django, Celery and Redis +helm install \ + defectdojo \ + ./helm/defectdojo \ + --namespace="${K8S_NAMESPACE}" \ + --set host="defectdojo.${TLS_CERT_DOMAIN}" \ + --set django.ingress.secretName="minikube-tls" \ + --set django.replicas=3 \ + --set celery.worker.replicas=3 \ + --set redis.replicas=3 \ + --set createSecret=true \ + --set createRedisSecret=true \ + --set createPostgresqlSecret=true + +# Run highly available PostgreSQL cluster +# for production environment. +helm install \ + defectdojo \ + ./helm/defectdojo \ + --namespace="${K8S_NAMESPACE}" \ + --set host="defectdojo.${TLS_CERT_DOMAIN}" \ + --set django.replicas=3 \ + --set celery.worker.replicas=3 \ + --set redis.replicas=3 \ + --set django.ingress.secretName="minikube-tls" \ + --set postgresql.enabled=true \ + --set postgresql.replication.enabled=true \ + --set postgresql.replication.slaveReplicas=3 \ + --set createSecret=true \ + --set createRedisSecret=true \ + --set createPostgresqlSecret=true + +# Note: If you run `helm install defectdojo before, you will get an error +# message like `Error: release defectdojo failed: secrets "defectdojo" already +# exists`. This is because the secret is kept across installations. +# To prevent recreating the secret, add --set createSecret=false` to your +# command. + +# Run test. +helm test defectdojo + +# Navigate to . +``` + +### Prometheus metrics + +It's possible to enable Nginx prometheus exporter by setting `--set monitoring.enabled=true` and `--set monitoring.prometheus.enabled=true`. This adds the Nginx exporter sidecar and the standard Prometheus pod annotations to django deployment. + +## Useful stuff + +### Setting your own domain + +The `siteUrl` in values.yaml controls what domain is configured in Django, and also what the celery workers will put as links in Jira tickets for example. +Set this to your `https://` in values.yaml + +### Multiple Hostnames + +Django requires a list of all hostnames that are valid for requests. +You can add additional hostnames via helm or values file as an array. +This helps if you have a local service submitting reports to defectDojo using +the namespace name (say defectdojo.scans) instead of the TLD name used in a browser. + +In your helm install simply pass them as a defined array, for example: + +`--set "alternativeHosts={defectdojo.default,localhost,defectdojo.example.com}"` + +This will also work with shell inserted variables: + +`--set "alternativeHosts={defectdojo.${TLS_CERT_DOMAIN},localhost}"` + +You will still need to set a host value as well. + +### Using an existing redis setup with redis-sentinel + +If you want to use a redis-sentinel setup as the Celery broker, you will need to set the following. + +1. Set redis.scheme to "sentinel" in values.yaml +2. Set two additional extraEnv vars specifying the sentinel master name and port in values.yaml + +```yaml +celery: + broker: 'redis' + +redis: + redisServer: 'PutYourRedisSentinelAddress' + scheme: 'sentinel' + +extraEnv: + - name: DD_CELERY_BROKER_TRANSPORT_OPTIONS + value: '{"master_name": "mymaster"}' + - name: 'DD_CELERY_BROKER_PORT' + value: '26379' +``` + +### How to use an external PostgreSQL DB with Defectdojo + +#### Step 1: Create a Namespace for DefectDojo + +To begin, create a dedicated namespace for DefectDojo to isolate its resources: +`kubectl create ns defectdojo` + +#### Step 2: Create a Secret for PostgreSQL Credentials + +Set up a Kubernetes Secret to securely store the PostgreSQL user password and database connection URL, which are essential for establishing a secure connection between DefectDojo and your PostgreSQL instance. Apply the secret using the following command: `kubectl apply -f secret.yaml -n defectdojo`. This secret will be referenced within the `extraEnv` section of the DefectDojo Helm values file. + +Sample secret template (replace the placeholders with your PostgreSQL credentials): + +```YAML +apiversion: v1 +kind: Secret +metadata: + name: defectdojo-postgresql-specific +type: Opaque +stringData: # I chose stringData for better visualization of the credentials for debugging + password: +``` + +#### Step 2.5: Install PostgreSQL (Optional) + +If you need to simulate a PostgreSQL database external to DefectDojo, you can install PostgreSQL using the following Helm command: + +```bash +helm repo add bitnami https://charts.bitnami.com/bitnami +helm repo update +helm install defectdojo-postgresql bitnami/postgresql -n defectdojo -f postgresql/values.yaml +``` + +Sample `values.yaml` file for PostgreSQL configuration: + +```YAML +auth: +  username: defectdojo +  password: +  postgresPassword: +  database: defectdojo +  primary: +    persistence: +    size: 10Gi +``` + +#### Step 3: Modify DefectDojo helm values + +Before installing the DefectDojo Helm chart, it's important to customize the `values.yaml` file. Key areas to modify include specifying the PostgreSQL connection details & the extraEnv block: + +```yaml +postgresql: + enabled: false # Disable the creation of the database in the cluster + postgresServer: "127.0.0.1" # Required to skip certain tests not useful on external instances + auth: + username: defectdojo # your database user + database: defectdojo # your database name + secretKeys: + adminPasswordKey: password # the name of the field containing the password value + userPasswordKey: password # the name of the field containing the password value + replicationPasswordKey: password # the name of the field containing the password value + existingSecret: defectdojo-postgresql-specific # the secret containing your database password + +extraEnv: +# Overwrite the database endpoint +- name: DD_DATABASE_HOST + value: +# Overwrite the database port +- name: DD_DATABASE_PORT + value: +``` + +#### Step 4: Deploy DefectDojo + +After modifying the `values.yaml` file as needed, deploy DefectDojo using Helm. This command also generates the required secrets for the DefectDojo admin UI and Redis: + +```bash +helm install defectdojo defectdojo -f values.yaml -n defectdojo --set createSecret=true --set createRedisSecret=true +``` + + +**NOTE**: It is important to highlight that this setup can also be utilized for achieving high availability (HA) in PostgreSQL. By placing a load balancer in front of the PostgreSQL cluster, read and write requests can be efficiently routed to the appropriate primary or standby servers as needed. + + +### kubectl commands + +```zsh +# View logs of a specific pod +kubectl logs $(kubectl get pod --selector=defectdojo.org/component=${POD} \ + -o jsonpath="{.items[0].metadata.name}") -f + +# Open a shell in a specific pod +kubectl exec -it $(kubectl get pod --selector=defectdojo.org/component=${POD} \ + -o jsonpath="{.items[0].metadata.name}") -- /bin/bash +# Or: +kubectl exec defectdojo-django- -c uwsgi -it /bin/sh + +# Open a Python shell in a specific pod +kubectl exec -it $(kubectl get pod --selector=defectdojo.org/component=${POD} \ + -o jsonpath="{.items[0].metadata.name}") -- python manage.py shell +``` + +### Clean up Kubernetes + +Helm >= v3 + +``` +helm uninstall defectdojo +``` + +To remove persistent objects not removed by uninstall (this will remove any database): + +``` +kubectl delete secrets defectdojo defectdojo-redis-specific defectdojo-postgresql-specific +kubectl delete serviceAccount defectdojo +kubectl delete pvc data-defectdojo-redis-0 data-defectdojo-postgresql-0 +``` + + +# General information about chart values + +{{ template "chart.deprecationWarning" . }} + +{{ template "chart.badgesSection" . }} + +{{ template "chart.description" . }} + +{{ template "chart.homepageLine" . }} + +{{ template "chart.maintainersSection" . }} + +{{ template "chart.sourcesSection" . }} + +{{ template "chart.requirementsSection" . }} + +{{ template "chart.valuesSection" . }} + +{{ template "helm-docs.versionFooter" . }} diff --git a/helm/defectdojo/values.schema.json b/helm/defectdojo/values.schema.json new file mode 100644 index 00000000000..967b67b61a1 --- /dev/null +++ b/helm/defectdojo/values.schema.json @@ -0,0 +1,893 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "type": "object", + "properties": { + "admin": { + "type": "object", + "properties": { + "credentialAes256Key": { + "type": "null" + }, + "firstName": { + "type": "string" + }, + "lastName": { + "type": "string" + }, + "mail": { + "type": "string" + }, + "metricsHttpAuthPassword": { + "type": "null" + }, + "password": { + "type": "null" + }, + "secretKey": { + "type": "null" + }, + "user": { + "type": "string" + } + } + }, + "annotations": { + "type": "object" + }, + "celery": { + "type": "object", + "properties": { + "annotations": { + "type": "object" + }, + "beat": { + "type": "object", + "properties": { + "affinity": { + "type": "object" + }, + "annotations": { + "type": "object" + }, + "nodeSelector": { + "type": "object" + }, + "replicas": { + "type": "integer" + }, + "resources": { + "type": "object", + "properties": { + "limits": { + "type": "object", + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + } + }, + "requests": { + "type": "object", + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + } + } + } + }, + "tolerations": { + "type": "array" + } + } + }, + "broker": { + "type": "string" + }, + "brokerHost": { + "type": "string" + }, + "extraVolumes": { + "type": "array" + }, + "logLevel": { + "type": "string" + }, + "worker": { + "type": "object", + "properties": { + "affinity": { + "type": "object" + }, + "annotations": { + "type": "object" + }, + "appSettings": { + "type": "object", + "properties": { + "poolType": { + "type": "string" + } + } + }, + "nodeSelector": { + "type": "object" + }, + "replicas": { + "type": "integer" + }, + "resources": { + "type": "object", + "properties": { + "limits": { + "type": "object", + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + } + }, + "requests": { + "type": "object", + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + } + } + } + }, + "tolerations": { + "type": "array" + } + } + } + } + }, + "cloudsql": { + "type": "object", + "properties": { + "enable_iam_login": { + "type": "boolean" + }, + "enabled": { + "type": "boolean" + }, + "image": { + "type": "object", + "properties": { + "pullPolicy": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "tag": { + "type": "string" + } + } + }, + "instance": { + "type": "string" + }, + "use_private_ip": { + "type": "boolean" + }, + "verbose": { + "type": "boolean" + } + } + }, + "createPostgresqlSecret": { + "type": "boolean" + }, + "createRedisSecret": { + "type": "boolean" + }, + "createSecret": { + "type": "boolean" + }, + "dbMigrationChecker": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "resources": { + "type": "object", + "properties": { + "limits": { + "type": "object", + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + } + }, + "requests": { + "type": "object", + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + } + } + } + } + } + }, + "django": { + "type": "object", + "properties": { + "affinity": { + "type": "object" + }, + "annotations": { + "type": "object" + }, + "extraVolumes": { + "type": "array" + }, + "ingress": { + "type": "object", + "properties": { + "activateTLS": { + "type": "boolean" + }, + "annotations": { + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "ingressClassName": { + "type": "string" + }, + "secretName": { + "type": "string" + } + } + }, + "mediaPersistentVolume": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "fsGroup": { + "type": "integer" + }, + "name": { + "type": "string" + }, + "persistentVolumeClaim": { + "type": "object", + "properties": { + "accessModes": { + "type": "array", + "items": { + "type": "string" + } + }, + "create": { + "type": "boolean" + }, + "name": { + "type": "null" + }, + "size": { + "type": "string" + }, + "storageClassName": { + "type": "null" + } + } + }, + "type": { + "type": "string" + } + } + }, + "nginx": { + "type": "object", + "properties": { + "resources": { + "type": "object", + "properties": { + "limits": { + "type": "object", + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + } + }, + "requests": { + "type": "object", + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + } + } + } + }, + "tls": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "generateCertificate": { + "type": "boolean" + } + } + } + } + }, + "nodeSelector": { + "type": "object" + }, + "replicas": { + "type": "integer" + }, + "service": { + "type": "object", + "properties": { + "annotations": { + "type": "object" + }, + "type": { + "type": "string" + } + } + }, + "strategy": { + "type": "object" + }, + "tolerations": { + "type": "array" + }, + "uwsgi": { + "type": "object", + "properties": { + "appSettings": { + "type": "object", + "properties": { + "processes": { + "type": "integer" + }, + "threads": { + "type": "integer" + } + } + }, + "certificates": { + "type": "object", + "properties": { + "certFileName": { + "type": "string" + }, + "certMountPath": { + "type": "string" + }, + "configName": { + "type": "string" + }, + "enabled": { + "type": "boolean" + } + } + }, + "enableDebug": { + "type": "boolean" + }, + "livenessProbe": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "failureThreshold": { + "type": "integer" + }, + "initialDelaySeconds": { + "type": "integer" + }, + "periodSeconds": { + "type": "integer" + }, + "successThreshold": { + "type": "integer" + }, + "timeoutSeconds": { + "type": "integer" + } + } + }, + "readinessProbe": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "failureThreshold": { + "type": "integer" + }, + "initialDelaySeconds": { + "type": "integer" + }, + "periodSeconds": { + "type": "integer" + }, + "successThreshold": { + "type": "integer" + }, + "timeoutSeconds": { + "type": "integer" + } + } + }, + "resources": { + "type": "object", + "properties": { + "limits": { + "type": "object", + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + } + }, + "requests": { + "type": "object", + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + } + } + } + }, + "startupProbe": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "failureThreshold": { + "type": "integer" + }, + "initialDelaySeconds": { + "type": "integer" + }, + "periodSeconds": { + "type": "integer" + }, + "successThreshold": { + "type": "integer" + }, + "timeoutSeconds": { + "type": "integer" + } + } + } + } + } + } + }, + "extraConfigs": { + "type": "object" + }, + "gke": { + "type": "object", + "properties": { + "useGKEIngress": { + "type": "boolean" + }, + "useManagedCertificate": { + "type": "boolean" + }, + "workloadIdentityEmail": { + "type": "string" + } + } + }, + "host": { + "type": "string" + }, + "imagePullPolicy": { + "type": "string" + }, + "initializer": { + "type": "object", + "properties": { + "affinity": { + "type": "object" + }, + "annotations": { + "type": "object" + }, + "extraVolumes": { + "type": "array" + }, + "jobAnnotations": { + "type": "object" + }, + "keepSeconds": { + "type": "integer" + }, + "labels": { + "type": "object" + }, + "nodeSelector": { + "type": "object" + }, + "resources": { + "type": "object", + "properties": { + "limits": { + "type": "object", + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + } + }, + "requests": { + "type": "object", + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + } + } + } + }, + "run": { + "type": "boolean" + }, + "staticName": { + "type": "boolean" + } + } + }, + "monitoring": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "prometheus": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "image": { + "type": "string" + }, + "imagePullPolicy": { + "type": "string" + } + } + } + } + }, + "networkPolicy": { + "type": "object", + "properties": { + "annotations": { + "type": "object" + }, + "egress": { + "type": "array" + }, + "enabled": { + "type": "boolean" + }, + "ingress": { + "type": "array" + }, + "ingressExtend": { + "type": "array" + } + } + }, + "podLabels": { + "type": "object" + }, + "postgresql": { + "type": "object", + "properties": { + "architecture": { + "type": "string" + }, + "auth": { + "type": "object", + "properties": { + "database": { + "type": "string" + }, + "existingSecret": { + "type": "string" + }, + "password": { + "type": "string" + }, + "secretKeys": { + "type": "object", + "properties": { + "adminPasswordKey": { + "type": "string" + }, + "replicationPasswordKey": { + "type": "string" + }, + "userPasswordKey": { + "type": "string" + } + } + }, + "username": { + "type": "string" + } + } + }, + "enabled": { + "type": "boolean" + }, + "primary": { + "type": "object", + "properties": { + "affinity": { + "type": "object" + }, + "containerSecurityContext": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "runAsUser": { + "type": "integer" + } + } + }, + "name": { + "type": "string" + }, + "nodeSelector": { + "type": "object" + }, + "persistence": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + } + } + }, + "podSecurityContext": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "fsGroup": { + "type": "integer" + } + } + }, + "service": { + "type": "object", + "properties": { + "ports": { + "type": "object", + "properties": { + "postgresql": { + "type": "integer" + } + } + } + } + } + } + }, + "shmVolume": { + "type": "object", + "properties": { + "chmod": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + } + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "containerSecurityContext": { + "type": "object", + "properties": { + "runAsUser": { + "type": "integer" + } + } + }, + "enabled": { + "type": "boolean" + } + } + } + } + }, + "redis": { + "type": "object", + "properties": { + "architecture": { + "type": "string" + }, + "auth": { + "type": "object", + "properties": { + "existingSecret": { + "type": "string" + }, + "existingSecretPasswordKey": { + "type": "string" + }, + "password": { + "type": "string" + } + } + }, + "enabled": { + "type": "boolean" + }, + "scheme": { + "type": "string" + }, + "transportEncryption": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "params": { + "type": "string" + } + } + } + } + }, + "repositoryPrefix": { + "type": "string" + }, + "secrets": { + "type": "object", + "properties": { + "annotations": { + "type": "object" + } + } + }, + "securityContext": { + "type": "object", + "properties": { + "djangoSecurityContext": { + "type": "object", + "properties": { + "runAsUser": { + "type": "integer" + } + } + }, + "enabled": { + "type": "boolean" + }, + "nginxSecurityContext": { + "type": "object", + "properties": { + "runAsUser": { + "type": "integer" + } + } + } + } + }, + "serviceAccount": { + "type": "object", + "properties": { + "annotations": { + "type": "object" + }, + "create": { + "type": "boolean" + }, + "labels": { + "type": "object" + } + } + }, + "tag": { + "type": "string" + }, + "tests": { + "type": "object", + "properties": { + "unitTests": { + "type": "object", + "properties": { + "resources": { + "type": "object", + "properties": { + "limits": { + "type": "object", + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + } + }, + "requests": { + "type": "object", + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + } + } + } + } + } + } + } + } + } +} diff --git a/readme-docs/KUBERNETES.md b/readme-docs/KUBERNETES.md index 1d61ae01aee..a013a49d3a1 100644 --- a/readme-docs/KUBERNETES.md +++ b/readme-docs/KUBERNETES.md @@ -393,8 +393,8 @@ Sample secret template (replace the placeholders with your PostgreSQL credential ```YAML apiversion: v1 kind: Secret -metadata: - name: defectdojo-postgresql-specific +metadata: + name: defectdojo-postgresql-specific type: Opaque stringData: # I chose stringData for better visualization of the credentials for debugging password: @@ -405,7 +405,7 @@ stringData: # I chose stringData for better visualization of the credentials fo If you need to simulate a PostgreSQL database external to DefectDojo, you can install PostgreSQL using the following Helm command: ```bash -helm repo add bitnami https://charts.bitnami.com/bitnami +helm repo add bitnami https://charts.bitnami.com/bitnami helm repo update helm install defectdojo-postgresql bitnami/postgresql -n defectdojo -f postgresql/values.yaml ``` @@ -434,7 +434,7 @@ postgresql: auth: username: defectdojo # your database user database: defectdojo # your database name - secretKeys: + secretKeys: adminPasswordKey: password # the name of the field containing the password value userPasswordKey: password # the name of the field containing the password value replicationPasswordKey: password # the name of the field containing the password value @@ -457,10 +457,8 @@ After modifying the `values.yaml` file as needed, deploy DefectDojo using Helm. helm install defectdojo defectdojo -f values.yaml -n defectdojo --set createSecret=true --set createRedisSecret=true ``` - **NOTE**: It is important to highlight that this setup can also be utilized for achieving high availability (HA) in PostgreSQL. By placing a load balancer in front of the PostgreSQL cluster, read and write requests can be efficiently routed to the appropriate primary or standby servers as needed. - ### kubectl commands ```zsh @@ -494,3 +492,209 @@ kubectl delete secrets defectdojo defectdojo-redis-specific defectdojo-postgresq kubectl delete serviceAccount defectdojo kubectl delete pvc data-defectdojo-redis-0 data-defectdojo-postgresql-0 ``` + + +# General information about chart values + +![Version: 1.6.202-dev](https://img.shields.io/badge/Version-1.6.202--dev-informational?style=flat-square) ![AppVersion: 2.50.0-dev](https://img.shields.io/badge/AppVersion-2.50.0--dev-informational?style=flat-square) + +A Helm chart for Kubernetes to install DefectDojo + +## Maintainers + +| Name | Email | Url | +| ---- | ------ | --- | +| madchap | | | + +## Requirements + +| Repository | Name | Version | +|------------|------|---------| +| https://charts.bitnami.com/bitnami | postgresql | ~16.7.0 | +| https://charts.bitnami.com/bitnami | redis | ~19.6.0 | + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| admin.credentialAes256Key | string | `nil` | | +| admin.firstName | string | `"Administrator"` | | +| admin.lastName | string | `"User"` | | +| admin.mail | string | `"admin@defectdojo.local"` | | +| admin.metricsHttpAuthPassword | string | `nil` | | +| admin.password | string | `nil` | | +| admin.secretKey | string | `nil` | | +| admin.user | string | `"admin"` | | +| annotations | object | `{}` | | +| celery.annotations | object | `{}` | | +| celery.beat.affinity | object | `{}` | | +| celery.beat.annotations | object | `{}` | | +| celery.beat.nodeSelector | object | `{}` | | +| celery.beat.replicas | int | `1` | | +| celery.beat.resources.limits.cpu | string | `"2000m"` | | +| celery.beat.resources.limits.memory | string | `"256Mi"` | | +| celery.beat.resources.requests.cpu | string | `"100m"` | | +| celery.beat.resources.requests.memory | string | `"128Mi"` | | +| celery.beat.tolerations | list | `[]` | | +| celery.broker | string | `"redis"` | | +| celery.brokerHost | string | `""` | | +| celery.extraVolumes | list | `[]` | | +| celery.logLevel | string | `"INFO"` | | +| celery.worker.affinity | object | `{}` | | +| celery.worker.annotations | object | `{}` | | +| celery.worker.appSettings.poolType | string | `"solo"` | | +| celery.worker.nodeSelector | object | `{}` | | +| celery.worker.replicas | int | `1` | | +| celery.worker.resources.limits.cpu | string | `"2000m"` | | +| celery.worker.resources.limits.memory | string | `"512Mi"` | | +| celery.worker.resources.requests.cpu | string | `"100m"` | | +| celery.worker.resources.requests.memory | string | `"128Mi"` | | +| celery.worker.tolerations | list | `[]` | | +| cloudsql.enable_iam_login | bool | `false` | | +| cloudsql.enabled | bool | `false` | | +| cloudsql.image.pullPolicy | string | `"IfNotPresent"` | | +| cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | | +| cloudsql.image.tag | string | `"1.37.8"` | | +| cloudsql.instance | string | `""` | | +| cloudsql.use_private_ip | bool | `false` | | +| cloudsql.verbose | bool | `true` | | +| createPostgresqlSecret | bool | `false` | | +| createRedisSecret | bool | `false` | | +| createSecret | bool | `false` | | +| dbMigrationChecker.enabled | bool | `true` | | +| dbMigrationChecker.resources.limits.cpu | string | `"200m"` | | +| dbMigrationChecker.resources.limits.memory | string | `"200Mi"` | | +| dbMigrationChecker.resources.requests.cpu | string | `"100m"` | | +| dbMigrationChecker.resources.requests.memory | string | `"100Mi"` | | +| django.affinity | object | `{}` | | +| django.annotations | object | `{}` | | +| django.extraVolumes | list | `[]` | | +| django.ingress.activateTLS | bool | `true` | | +| django.ingress.annotations | object | `{}` | | +| django.ingress.enabled | bool | `true` | | +| django.ingress.ingressClassName | string | `""` | | +| django.ingress.secretName | string | `"defectdojo-tls"` | | +| django.mediaPersistentVolume.enabled | bool | `true` | | +| django.mediaPersistentVolume.fsGroup | int | `1001` | | +| django.mediaPersistentVolume.name | string | `"media"` | | +| django.mediaPersistentVolume.persistentVolumeClaim.accessModes[0] | string | `"ReadWriteMany"` | | +| django.mediaPersistentVolume.persistentVolumeClaim.create | bool | `false` | | +| django.mediaPersistentVolume.persistentVolumeClaim.name | string | `nil` | | +| django.mediaPersistentVolume.persistentVolumeClaim.size | string | `"5Gi"` | | +| django.mediaPersistentVolume.persistentVolumeClaim.storageClassName | string | `nil` | | +| django.mediaPersistentVolume.type | string | `"emptyDir"` | | +| django.nginx.resources.limits.cpu | string | `"2000m"` | | +| django.nginx.resources.limits.memory | string | `"256Mi"` | | +| django.nginx.resources.requests.cpu | string | `"100m"` | | +| django.nginx.resources.requests.memory | string | `"128Mi"` | | +| django.nginx.tls.enabled | bool | `false` | | +| django.nginx.tls.generateCertificate | bool | `false` | | +| django.nodeSelector | object | `{}` | | +| django.replicas | int | `1` | | +| django.service.annotations | object | `{}` | | +| django.service.type | string | `""` | | +| django.strategy | object | `{}` | | +| django.tolerations | list | `[]` | | +| django.uwsgi.appSettings.processes | int | `2` | | +| django.uwsgi.appSettings.threads | int | `2` | | +| django.uwsgi.certificates.certFileName | string | `"ca.crt"` | | +| django.uwsgi.certificates.certMountPath | string | `"/certs/"` | | +| django.uwsgi.certificates.configName | string | `"defectdojo-ca-certs"` | | +| django.uwsgi.certificates.enabled | bool | `false` | | +| django.uwsgi.enableDebug | bool | `false` | | +| django.uwsgi.livenessProbe.enabled | bool | `true` | | +| django.uwsgi.livenessProbe.failureThreshold | int | `6` | | +| django.uwsgi.livenessProbe.initialDelaySeconds | int | `0` | | +| django.uwsgi.livenessProbe.periodSeconds | int | `10` | | +| django.uwsgi.livenessProbe.successThreshold | int | `1` | | +| django.uwsgi.livenessProbe.timeoutSeconds | int | `5` | | +| django.uwsgi.readinessProbe.enabled | bool | `true` | | +| django.uwsgi.readinessProbe.failureThreshold | int | `6` | | +| django.uwsgi.readinessProbe.initialDelaySeconds | int | `0` | | +| django.uwsgi.readinessProbe.periodSeconds | int | `10` | | +| django.uwsgi.readinessProbe.successThreshold | int | `1` | | +| django.uwsgi.readinessProbe.timeoutSeconds | int | `5` | | +| django.uwsgi.resources.limits.cpu | string | `"2000m"` | | +| django.uwsgi.resources.limits.memory | string | `"512Mi"` | | +| django.uwsgi.resources.requests.cpu | string | `"100m"` | | +| django.uwsgi.resources.requests.memory | string | `"256Mi"` | | +| django.uwsgi.startupProbe.enabled | bool | `true` | | +| django.uwsgi.startupProbe.failureThreshold | int | `30` | | +| django.uwsgi.startupProbe.initialDelaySeconds | int | `0` | | +| django.uwsgi.startupProbe.periodSeconds | int | `5` | | +| django.uwsgi.startupProbe.successThreshold | int | `1` | | +| django.uwsgi.startupProbe.timeoutSeconds | int | `1` | | +| extraConfigs | object | `{}` | | +| gke.useGKEIngress | bool | `false` | | +| gke.useManagedCertificate | bool | `false` | | +| gke.workloadIdentityEmail | string | `""` | | +| host | string | `"defectdojo.default.minikube.local"` | | +| imagePullPolicy | string | `"Always"` | | +| initializer.affinity | object | `{}` | | +| initializer.annotations | object | `{}` | | +| initializer.extraVolumes | list | `[]` | | +| initializer.jobAnnotations | object | `{}` | | +| initializer.keepSeconds | int | `60` | | +| initializer.labels | object | `{}` | | +| initializer.nodeSelector | object | `{}` | | +| initializer.resources.limits.cpu | string | `"2000m"` | | +| initializer.resources.limits.memory | string | `"512Mi"` | | +| initializer.resources.requests.cpu | string | `"100m"` | | +| initializer.resources.requests.memory | string | `"256Mi"` | | +| initializer.run | bool | `true` | | +| initializer.staticName | bool | `false` | | +| monitoring.enabled | bool | `false` | You can also specify value comments like this | +| monitoring.prometheus.enabled | bool | `false` | | +| monitoring.prometheus.image | string | `"nginx/nginx-prometheus-exporter:1.4.2"` | | +| monitoring.prometheus.imagePullPolicy | string | `"IfNotPresent"` | | +| networkPolicy.annotations | object | `{}` | | +| networkPolicy.egress | list | `[]` | | +| networkPolicy.enabled | bool | `false` | | +| networkPolicy.ingress | list | `[]` | | +| networkPolicy.ingressExtend | list | `[]` | | +| podLabels | object | `{}` | | +| postgresql.architecture | string | `"standalone"` | | +| postgresql.auth.database | string | `"defectdojo"` | | +| postgresql.auth.existingSecret | string | `"defectdojo-postgresql-specific"` | | +| postgresql.auth.password | string | `""` | | +| postgresql.auth.secretKeys.adminPasswordKey | string | `"postgresql-postgres-password"` | | +| postgresql.auth.secretKeys.replicationPasswordKey | string | `"postgresql-replication-password"` | | +| postgresql.auth.secretKeys.userPasswordKey | string | `"postgresql-password"` | | +| postgresql.auth.username | string | `"defectdojo"` | | +| postgresql.enabled | bool | `true` | | +| postgresql.primary.affinity | object | `{}` | | +| postgresql.primary.containerSecurityContext.enabled | bool | `true` | | +| postgresql.primary.containerSecurityContext.runAsUser | int | `1001` | | +| postgresql.primary.name | string | `"primary"` | | +| postgresql.primary.nodeSelector | object | `{}` | | +| postgresql.primary.persistence.enabled | bool | `true` | | +| postgresql.primary.podSecurityContext.enabled | bool | `true` | | +| postgresql.primary.podSecurityContext.fsGroup | int | `1001` | | +| postgresql.primary.service.ports.postgresql | int | `5432` | | +| postgresql.shmVolume.chmod.enabled | bool | `false` | | +| postgresql.volumePermissions.containerSecurityContext.runAsUser | int | `1001` | | +| postgresql.volumePermissions.enabled | bool | `false` | | +| redis.architecture | string | `"standalone"` | | +| redis.auth.existingSecret | string | `"defectdojo-redis-specific"` | | +| redis.auth.existingSecretPasswordKey | string | `"redis-password"` | | +| redis.auth.password | string | `""` | | +| redis.enabled | bool | `true` | | +| redis.scheme | string | `"redis"` | | +| redis.transportEncryption.enabled | bool | `false` | | +| redis.transportEncryption.params | string | `""` | | +| repositoryPrefix | string | `"defectdojo"` | | +| secrets.annotations | object | `{}` | | +| securityContext.djangoSecurityContext.runAsUser | int | `1001` | | +| securityContext.enabled | bool | `true` | | +| securityContext.nginxSecurityContext.runAsUser | int | `1001` | | +| serviceAccount.annotations | object | `{}` | | +| serviceAccount.create | bool | `true` | | +| serviceAccount.labels | object | `{}` | | +| tag | string | `"latest"` | | +| tests.unitTests.resources.limits.cpu | string | `"500m"` | | +| tests.unitTests.resources.limits.memory | string | `"512Mi"` | | +| tests.unitTests.resources.requests.cpu | string | `"100m"` | | +| tests.unitTests.resources.requests.memory | string | `"128Mi"` | | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2) From cf73e1c1ceef2fff279d4d2197aba68598ce7bb0 Mon Sep 17 00:00:00 2001 From: kiblik <5609770+kiblik@users.noreply.github.com> Date: Sat, 6 Sep 2025 13:00:11 +0200 Subject: [PATCH 2/2] Swap helm/defectdojo/README.md & readme-docs/KUBERNETES.md --- .github/workflows/test-helm-chart.yml | 3 +- helm/defectdojo/.helmignore | 2 +- helm/defectdojo/README.md | 728 +++++++++++++++++++++++++- helm/defectdojo/README.md.gotmpl | 5 - helm/defectdojo/values.schema.json | 122 ++++- helm/defectdojo/values.yaml | 3 + readme-docs/KUBERNETES.md | 701 +------------------------ 7 files changed, 844 insertions(+), 720 deletions(-) mode change 120000 => 100644 helm/defectdojo/README.md diff --git a/.github/workflows/test-helm-chart.yml b/.github/workflows/test-helm-chart.yml index e807102f72e..dd2d5daed48 100644 --- a/.github/workflows/test-helm-chart.yml +++ b/.github/workflows/test-helm-chart.yml @@ -80,6 +80,7 @@ jobs: - name: Checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + # If this step is failing, run `helm-docs --chart-search-root helm/defectdojo` - name: Run helm-docs uses: losisin/helm-docs-github-action@a57fae5676e4c55a228ea654a1bcaec8dd3cf5b5 # v1.6.2 with: @@ -93,6 +94,7 @@ jobs: - name: Checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + # If this step is failing, run `helm schema --use-helm-docs` in `helm/defectdojo` - name: Generate values schema json uses: losisin/helm-values-schema-json-action@28b1b33dcd9bd10bb8157627566f3971313a8872 # v2.0.4 with: @@ -116,7 +118,6 @@ jobs: - name: Configure Helm repos run: |- - helm repo add bitnami https://charts.bitnami.com/bitnami helm dependency list ./helm/defectdojo helm dependency update ./helm/defectdojo diff --git a/helm/defectdojo/.helmignore b/helm/defectdojo/.helmignore index 70909a86d60..55abfe74368 100644 --- a/helm/defectdojo/.helmignore +++ b/helm/defectdojo/.helmignore @@ -20,4 +20,4 @@ .idea/ *.tmproj .vscode/ -README.md +README.md.gotmpl diff --git a/helm/defectdojo/README.md b/helm/defectdojo/README.md deleted file mode 120000 index 5c0dd98ed0f..00000000000 --- a/helm/defectdojo/README.md +++ /dev/null @@ -1 +0,0 @@ -../../readme-docs/KUBERNETES.md \ No newline at end of file diff --git a/helm/defectdojo/README.md b/helm/defectdojo/README.md new file mode 100644 index 00000000000..d20a3064ffb --- /dev/null +++ b/helm/defectdojo/README.md @@ -0,0 +1,727 @@ +# DefectDojo on Kubernetes + +DefectDojo Kubernetes utilizes [Helm](https://helm.sh/), a +package manager for Kubernetes. Helm Charts help you define, install, and +upgrade even the most complex Kubernetes application. + +For development purposes, +[minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/) +and [Helm](https://helm.sh/) can be installed locally by following +this [guide](https://helm.sh/docs/using_helm/#installing-helm). + +## Supported Kubernetes Versions + +The tests cover the deployment on the lastest [kubernetes version](https://kubernetes.io/releases/) and the oldest supported [version from AWS](https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html#available-versions). The assumption is that version in between do not have significant differences. Current tested versions can looks up in the [github k8s workflow](https://github.com/DefectDojo/django-DefectDojo/blob/master/.github/workflows/k8s-tests.yml). + +## Helm chart + +Starting with version 1.14.0, a helm chart will be pushed onto the `helm-charts` branch during the release process. Don't look for a chart museum, we're leveraging the "raw" capabilities of GitHub at this time. + +To use it, you can add our repo. + +``` +$ helm repo add defectdojo 'https://raw.githubusercontent.com/DefectDojo/django-DefectDojo/helm-charts' + +$ helm repo update +``` + +You should now be able to see the chart. + +``` +$ helm search repo defectdojo +NAME CHART VERSION APP VERSION DESCRIPTION +defectdojo/defectdojo 1.6.153 2.39.0 A Helm chart for Kubernetes to install DefectDojo +``` + +## Kubernetes Local Quickstart + +Requirements: + +1. Helm installed locally +2. Minikube installed locally +3. Latest cloned copy of DefectDojo + +```zsh +git clone https://github.com/DefectDojo/django-DefectDojo +cd django-DefectDojo + +minikube start +minikube addons enable ingress +``` + +Helm >= v3 + +Then pull the dependent charts: + +```zsh +helm dependency update ./helm/defectdojo +``` + +Now, install the helm chart into minikube. + +If you have setup an ingress controller: + +```zsh +DJANGO_INGRESS_ENABLED=true +``` + +else: + +```zsh +DJANGO_INGRESS_ENABLED=false +``` + +If you have configured TLS: + +```zsh +DJANGO_INGRESS_ACTIVATE_TLS=true +``` + +else: + +```zsh +DJANGO_INGRESS_ACTIVATE_TLS=false +``` + +Warning: Use the `createSecret*=true` flags only upon first install. For re-installs, see `§Re-install the chart` + +Helm >= v3: + +```zsh +helm install \ + defectdojo \ + ./helm/defectdojo \ + --set django.ingress.enabled=${DJANGO_INGRESS_ENABLED} \ + --set django.ingress.activateTLS=${DJANGO_INGRESS_ACTIVATE_TLS} \ + --set createSecret=true \ + --set createRedisSecret=true \ + --set createPostgresqlSecret=true +``` + +It usually takes up to a minute for the services to startup and the +status of the containers can be viewed by starting up `minikube dashboard`. +Note: If the containers are not cached locally the services will start once the +containers have been pulled locally. + +To be able to access DefectDojo, set up an ingress or access the service +directly by running the following command: + +```zsh +kubectl port-forward --namespace=default \ +service/defectdojo-django 8080:80 +``` + +As you set your host value to defectdojo.default.minikube.local, make sure that +it resolves to the localhost IP address, e.g. by adding the following two lines +to /etc/hosts: + +```zsh +::1 defectdojo.default.minikube.local +127.0.0.1 defectdojo.default.minikube.local +``` + +To find out the password, run the following command: + +```zsh +echo "DefectDojo admin password: $(kubectl \ + get secret defectdojo \ + --namespace=default \ + --output jsonpath='{.data.DD_ADMIN_PASSWORD}' \ + | base64 --decode)" +``` + +To access DefectDojo, go to . +Log in with username admin and the password from the previous command. + +### Minikube with locally built containers + +If testing containers locally, then set the imagePullPolicy to Never, +which ensures containers are not pulled from Docker hub. + +Use the same commands as before but add: + +```zsh + --set imagePullPolicy=Never +``` + +### Installing from a private registry + +If you have stored your images in a private registry, you can install defectdojo chart with (helm 3). + +- First create a secret named "defectdojoregistrykey" based on the credentials that can pull from the registry: see https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +- Then install the chart with the same commands as before but adding: + +```zsh + --set repositoryPrefix= \ + --set imagePullSecrets=defectdojoregistrykey +``` + +### Build Images Locally + +```zsh +# Build images +docker build -t defectdojo/defectdojo-django -f Dockerfile.django . +docker build -t defectdojo/defectdojo-nginx -f Dockerfile.nginx . +``` + +```zsh +# Build images behind proxy +docker build --build-arg http_proxy=http://myproxy.com:8080 --build-arg https_proxy=http://myproxy.com:8080 -t defectdojo/defectdojo-django -f Dockerfile.django . +docker build --build-arg http_proxy=http://myproxy.com:8080 --build-arg https_proxy=http://myproxy.com:8080 -t defectdojo/defectdojo-nginx -f Dockerfile.nginx . +``` + +### Upgrade the chart + +If you want to change kubernetes configuration of use an updated docker image (evolution of defectDojo code), upgrade the application: + +``` +kubectl delete job defectdojo-initializer +helm upgrade defectdojo ./helm/defectdojo/ \ + --set django.ingress.enabled=${DJANGO_INGRESS_ENABLED} \ + --set django.ingress.activateTLS=${DJANGO_INGRESS_ACTIVATE_TLS} +``` + +### Re-install the chart + +In case of issue or in any other situation where you need to re-install the chart, you can do it and re-use the same secrets. + +**Note: With postgresql you'll keep the same database (more information below)** + +```zsh +# helm 3 +helm uninstall defectdojo +helm install \ + defectdojo \ + ./helm/defectdojo \ + --set django.ingress.enabled=${DJANGO_INGRESS_ENABLED} \ + --set django.ingress.activateTLS=${DJANGO_INGRESS_ACTIVATE_TLS} +``` + +## Kubernetes Production + +When running defectdojo in production be aware that you understood the full setup and always have a backup. + +### Encryption to Kubernetes + +Optionally, for TLS locally, you need to install a TLS certificate into your Kubernetes cluster. +For development purposes, you can create your own certificate authority as +described [here](https://github.com/hendrikhalkow/k8s-docs/blob/master/tls.md). + +```zsh +# https://kubernetes.io/docs/concepts/services-networking/ingress/#tls +# Create a TLS secret called minikube-tls as mentioned above, e.g. +K8S_NAMESPACE="default" +TLS_CERT_DOMAIN="${K8S_NAMESPACE}.minikube.local" +kubectl --namespace "${K8S_NAMESPACE}" create secret tls defectdojo-tls \ + --key <(openssl rsa \ + -in "${CA_DIR}/private/${TLS_CERT_DOMAIN}.key.pem" \ + -passin "pass:${TLS_CERT_PASSWORD}") \ + --cert <(cat \ + "${CA_DIR}/certs/${TLS_CERT_DOMAIN}.cert.pem" \ + "${CA_DIR}/chain.pem") +``` + +### Encryption in Kubernetes and End-to-End Encryption + +With the TLS certificate from your Kubernetes cluster all traffic to you cluster is encrypted, but the traffic in your cluster is still unencrypted. + +If you want to encrypt the traffic to the nginx server you can use the option `--set nginx.tls.enabled=true` and `--set nginx.tls.generateCertificate=true` to generate a self signed certificate and use the https config. The option to add you own pregenerated certificate is generelly possible but not implemented in the helm chart yet. + +Be aware that the traffic to the database and celery broker are unencrypted at the moment. + +### Media persistent volume + +By default, DefectDojo helm installation doesn't support persistent storage for storing images (dynamically uploaded by users). By default, it uses emptyDir, which is ephemeral by its nature and doesn't support multiple replicas of django pods, so should not be in use for production. + +To enable persistence of the media storage that supports R/W many, should be in use as backend storage like S3, NFS, glusterfs, etc + +```bash +mediaPersistentVolume: + enabled: true + # any name + name: media + # could be emptyDir (not for production) or pvc + type: pvc + # there are two options to create pvc 1) when you want the chart to create pvc for you, set django.mediaPersistentVolume.persistentVolumeClaim.create to true and do not specify anything for django.mediaPersistentVolume.PersistentVolumeClaim.name 2) when you want to create pvc outside the chart, pass the pvc name via django.mediaPersistentVolume.PersistentVolumeClaim.name and ensure django.mediaPersistentVolume.PersistentVolumeClaim.create is set to false + persistentVolumeClaim: + create: true + name: + size: 5Gi + accessModes: + - ReadWriteMany + storageClassName: +``` + +In the example above, we want the media content to be preserved to `pvc` as `persistentVolumeClaim` k8s resource and what we are basically doing is enabling the pvc to be created conditionally if the user wants to create it using the chart (in this case the pvc name 'defectdojo-media' will be inherited from template file used to deploy the pvc). By default the volume type is emptyDir which does not require a pvc. But when the type is set to pvc then we need a kubernetes Persistent Volume Claim and this is where the django.mediaPersistentVolume.persistentVolumeClaim.name comes into play. + +The accessMode is set to ReadWriteMany by default to accommodate using more than one replica. Ensure storage support ReadWriteMany before setting this option, otherwise set accessMode to ReadWriteOnce. + +NOTE: PersistrentVolume needs to be prepared in front before helm installation/update is triggered. + +For more detail how how to create proper PVC see [example](https://github.com/DefectDojo/Community-Contribs/tree/master/persistent-media) + +### Installation + +**Important:** If you choose to create the secret on your own, you will need to create a secret named `defectdojo` and containing the following fields: + +- DD_ADMIN_PASSWORD +- DD_SECRET_KEY +- DD_CREDENTIAL_AES_256_KEY +- METRICS_HTTP_AUTH_PASSWORD + +Theses fields are required to get the stack running. + +```zsh +# Install Helm chart. Choose a host name that matches the certificate above +helm install \ + defectdojo \ + ./helm/defectdojo \ + --namespace="${K8S_NAMESPACE}" \ + --set host="defectdojo.${TLS_CERT_DOMAIN}" \ + --set django.ingress.secretName="minikube-tls" \ + --set createSecret=true \ + --set createRedisSecret=true \ + --set createPostgresqlSecret=true + +# For high availability deploy multiple instances of Django, Celery and Redis +helm install \ + defectdojo \ + ./helm/defectdojo \ + --namespace="${K8S_NAMESPACE}" \ + --set host="defectdojo.${TLS_CERT_DOMAIN}" \ + --set django.ingress.secretName="minikube-tls" \ + --set django.replicas=3 \ + --set celery.worker.replicas=3 \ + --set redis.replicas=3 \ + --set createSecret=true \ + --set createRedisSecret=true \ + --set createPostgresqlSecret=true + +# Run highly available PostgreSQL cluster +# for production environment. +helm install \ + defectdojo \ + ./helm/defectdojo \ + --namespace="${K8S_NAMESPACE}" \ + --set host="defectdojo.${TLS_CERT_DOMAIN}" \ + --set django.replicas=3 \ + --set celery.worker.replicas=3 \ + --set redis.replicas=3 \ + --set django.ingress.secretName="minikube-tls" \ + --set postgresql.enabled=true \ + --set postgresql.replication.enabled=true \ + --set postgresql.replication.slaveReplicas=3 \ + --set createSecret=true \ + --set createRedisSecret=true \ + --set createPostgresqlSecret=true + +# Note: If you run `helm install defectdojo before, you will get an error +# message like `Error: release defectdojo failed: secrets "defectdojo" already +# exists`. This is because the secret is kept across installations. +# To prevent recreating the secret, add --set createSecret=false` to your +# command. + +# Run test. +helm test defectdojo + +# Navigate to . +``` + +### Prometheus metrics + +It's possible to enable Nginx prometheus exporter by setting `--set monitoring.enabled=true` and `--set monitoring.prometheus.enabled=true`. This adds the Nginx exporter sidecar and the standard Prometheus pod annotations to django deployment. + +## Useful stuff + +### Setting your own domain + +The `siteUrl` in values.yaml controls what domain is configured in Django, and also what the celery workers will put as links in Jira tickets for example. +Set this to your `https://` in values.yaml + +### Multiple Hostnames + +Django requires a list of all hostnames that are valid for requests. +You can add additional hostnames via helm or values file as an array. +This helps if you have a local service submitting reports to defectDojo using +the namespace name (say defectdojo.scans) instead of the TLD name used in a browser. + +In your helm install simply pass them as a defined array, for example: + +`--set "alternativeHosts={defectdojo.default,localhost,defectdojo.example.com}"` + +This will also work with shell inserted variables: + +`--set "alternativeHosts={defectdojo.${TLS_CERT_DOMAIN},localhost}"` + +You will still need to set a host value as well. + +### Using an existing redis setup with redis-sentinel + +If you want to use a redis-sentinel setup as the Celery broker, you will need to set the following. + +1. Set redis.scheme to "sentinel" in values.yaml +2. Set two additional extraEnv vars specifying the sentinel master name and port in values.yaml + +```yaml +celery: + broker: 'redis' + +redis: + redisServer: 'PutYourRedisSentinelAddress' + scheme: 'sentinel' + +extraEnv: + - name: DD_CELERY_BROKER_TRANSPORT_OPTIONS + value: '{"master_name": "mymaster"}' + - name: 'DD_CELERY_BROKER_PORT' + value: '26379' +``` + +### How to use an external PostgreSQL DB with Defectdojo + +#### Step 1: Create a Namespace for DefectDojo + +To begin, create a dedicated namespace for DefectDojo to isolate its resources: +`kubectl create ns defectdojo` + +#### Step 2: Create a Secret for PostgreSQL Credentials + +Set up a Kubernetes Secret to securely store the PostgreSQL user password and database connection URL, which are essential for establishing a secure connection between DefectDojo and your PostgreSQL instance. Apply the secret using the following command: `kubectl apply -f secret.yaml -n defectdojo`. This secret will be referenced within the `extraEnv` section of the DefectDojo Helm values file. + +Sample secret template (replace the placeholders with your PostgreSQL credentials): + +```YAML +apiversion: v1 +kind: Secret +metadata: + name: defectdojo-postgresql-specific +type: Opaque +stringData: # I chose stringData for better visualization of the credentials for debugging + password: +``` + +#### Step 2.5: Install PostgreSQL (Optional) + +If you need to simulate a PostgreSQL database external to DefectDojo, you can install PostgreSQL using the following Helm command: + +```bash +helm repo add bitnami https://charts.bitnami.com/bitnami +helm repo update +helm install defectdojo-postgresql bitnami/postgresql -n defectdojo -f postgresql/values.yaml +``` + +Sample `values.yaml` file for PostgreSQL configuration: + +```YAML +auth: +  username: defectdojo +  password: +  postgresPassword: +  database: defectdojo +  primary: +    persistence: +    size: 10Gi +``` + +#### Step 3: Modify DefectDojo helm values + +Before installing the DefectDojo Helm chart, it's important to customize the `values.yaml` file. Key areas to modify include specifying the PostgreSQL connection details & the extraEnv block: + +```yaml +postgresql: + enabled: false # Disable the creation of the database in the cluster + postgresServer: "127.0.0.1" # Required to skip certain tests not useful on external instances + auth: + username: defectdojo # your database user + database: defectdojo # your database name + secretKeys: + adminPasswordKey: password # the name of the field containing the password value + userPasswordKey: password # the name of the field containing the password value + replicationPasswordKey: password # the name of the field containing the password value + existingSecret: defectdojo-postgresql-specific # the secret containing your database password + +extraEnv: +# Overwrite the database endpoint +- name: DD_DATABASE_HOST + value: +# Overwrite the database port +- name: DD_DATABASE_PORT + value: +``` + +#### Step 4: Deploy DefectDojo + +After modifying the `values.yaml` file as needed, deploy DefectDojo using Helm. This command also generates the required secrets for the DefectDojo admin UI and Redis: + +```bash +helm install defectdojo defectdojo -f values.yaml -n defectdojo --set createSecret=true --set createRedisSecret=true +``` + +**NOTE**: It is important to highlight that this setup can also be utilized for achieving high availability (HA) in PostgreSQL. By placing a load balancer in front of the PostgreSQL cluster, read and write requests can be efficiently routed to the appropriate primary or standby servers as needed. + +### kubectl commands + +```zsh +# View logs of a specific pod +kubectl logs $(kubectl get pod --selector=defectdojo.org/component=${POD} \ + -o jsonpath="{.items[0].metadata.name}") -f + +# Open a shell in a specific pod +kubectl exec -it $(kubectl get pod --selector=defectdojo.org/component=${POD} \ + -o jsonpath="{.items[0].metadata.name}") -- /bin/bash +# Or: +kubectl exec defectdojo-django- -c uwsgi -it /bin/sh + +# Open a Python shell in a specific pod +kubectl exec -it $(kubectl get pod --selector=defectdojo.org/component=${POD} \ + -o jsonpath="{.items[0].metadata.name}") -- python manage.py shell +``` + +### Clean up Kubernetes + +Helm >= v3 + +``` +helm uninstall defectdojo +``` + +To remove persistent objects not removed by uninstall (this will remove any database): + +``` +kubectl delete secrets defectdojo defectdojo-redis-specific defectdojo-postgresql-specific +kubectl delete serviceAccount defectdojo +kubectl delete pvc data-defectdojo-redis-0 data-defectdojo-postgresql-0 +``` + +# General information about chart values + +![Version: 1.7.0-dev](https://img.shields.io/badge/Version-1.7.0--dev-informational?style=flat-square) ![AppVersion: 2.51.0-dev](https://img.shields.io/badge/AppVersion-2.51.0--dev-informational?style=flat-square) + +A Helm chart for Kubernetes to install DefectDojo + +## Maintainers + +| Name | Email | Url | +| ---- | ------ | --- | +| madchap | | | + +## Requirements + +| Repository | Name | Version | +|------------|------|---------| +| oci://us-docker.pkg.dev/os-public-container-registry/defectdojo | postgresql | ~16.7.0 | +| oci://us-docker.pkg.dev/os-public-container-registry/defectdojo | redis | ~19.6.4 | + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| admin.credentialAes256Key | string | `nil` | | +| admin.firstName | string | `"Administrator"` | | +| admin.lastName | string | `"User"` | | +| admin.mail | string | `"admin@defectdojo.local"` | | +| admin.metricsHttpAuthPassword | string | `nil` | | +| admin.password | string | `nil` | | +| admin.secretKey | string | `nil` | | +| admin.user | string | `"admin"` | | +| annotations | object | `{}` | | +| celery.annotations | object | `{}` | | +| celery.beat.affinity | object | `{}` | | +| celery.beat.annotations | object | `{}` | | +| celery.beat.extraEnv | list | `[]` | | +| celery.beat.extraInitContainers | list | `[]` | | +| celery.beat.extraVolumeMounts | list | `[]` | | +| celery.beat.extraVolumes | list | `[]` | | +| celery.beat.livenessProbe | object | `{}` | | +| celery.beat.nodeSelector | object | `{}` | | +| celery.beat.podAnnotations | object | `{}` | | +| celery.beat.readinessProbe | object | `{}` | | +| celery.beat.replicas | int | `1` | | +| celery.beat.resources.limits.cpu | string | `"2000m"` | | +| celery.beat.resources.limits.memory | string | `"256Mi"` | | +| celery.beat.resources.requests.cpu | string | `"100m"` | | +| celery.beat.resources.requests.memory | string | `"128Mi"` | | +| celery.beat.startupProbe | object | `{}` | | +| celery.beat.tolerations | list | `[]` | | +| celery.broker | string | `"redis"` | | +| celery.logLevel | string | `"INFO"` | | +| celery.worker.affinity | object | `{}` | | +| celery.worker.annotations | object | `{}` | | +| celery.worker.appSettings.poolType | string | `"solo"` | | +| celery.worker.extraEnv | list | `[]` | | +| celery.worker.extraInitContainers | list | `[]` | | +| celery.worker.extraVolumeMounts | list | `[]` | | +| celery.worker.extraVolumes | list | `[]` | | +| celery.worker.livenessProbe | object | `{}` | | +| celery.worker.nodeSelector | object | `{}` | | +| celery.worker.podAnnotations | object | `{}` | | +| celery.worker.readinessProbe | object | `{}` | | +| celery.worker.replicas | int | `1` | | +| celery.worker.resources.limits.cpu | string | `"2000m"` | | +| celery.worker.resources.limits.memory | string | `"512Mi"` | | +| celery.worker.resources.requests.cpu | string | `"100m"` | | +| celery.worker.resources.requests.memory | string | `"128Mi"` | | +| celery.worker.startupProbe | object | `{}` | | +| celery.worker.tolerations | list | `[]` | | +| cloudsql.enable_iam_login | bool | `false` | | +| cloudsql.enabled | bool | `false` | | +| cloudsql.image.pullPolicy | string | `"IfNotPresent"` | | +| cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | | +| cloudsql.image.tag | string | `"1.37.9"` | | +| cloudsql.instance | string | `""` | | +| cloudsql.use_private_ip | bool | `false` | | +| cloudsql.verbose | bool | `true` | | +| createPostgresqlSecret | bool | `false` | | +| createRedisSecret | bool | `false` | | +| createSecret | bool | `false` | | +| dbMigrationChecker.enabled | bool | `true` | | +| dbMigrationChecker.resources.limits.cpu | string | `"200m"` | | +| dbMigrationChecker.resources.limits.memory | string | `"200Mi"` | | +| dbMigrationChecker.resources.requests.cpu | string | `"100m"` | | +| dbMigrationChecker.resources.requests.memory | string | `"100Mi"` | | +| disableHooks | bool | `false` | | +| django.affinity | object | `{}` | | +| django.annotations | object | `{}` | | +| django.extraInitContainers | list | `[]` | | +| django.extraVolumes | list | `[]` | | +| django.ingress.activateTLS | bool | `true` | | +| django.ingress.annotations | object | `{}` | | +| django.ingress.enabled | bool | `true` | | +| django.ingress.ingressClassName | string | `""` | | +| django.ingress.secretName | string | `"defectdojo-tls"` | | +| django.mediaPersistentVolume.enabled | bool | `true` | | +| django.mediaPersistentVolume.fsGroup | int | `1001` | | +| django.mediaPersistentVolume.name | string | `"media"` | | +| django.mediaPersistentVolume.persistentVolumeClaim.accessModes[0] | string | `"ReadWriteMany"` | | +| django.mediaPersistentVolume.persistentVolumeClaim.create | bool | `false` | | +| django.mediaPersistentVolume.persistentVolumeClaim.name | string | `nil` | | +| django.mediaPersistentVolume.persistentVolumeClaim.size | string | `"5Gi"` | | +| django.mediaPersistentVolume.persistentVolumeClaim.storageClassName | string | `nil` | | +| django.mediaPersistentVolume.type | string | `"emptyDir"` | | +| django.nginx.extraEnv | list | `[]` | | +| django.nginx.extraVolumeMounts | list | `[]` | | +| django.nginx.resources.limits.cpu | string | `"2000m"` | | +| django.nginx.resources.limits.memory | string | `"256Mi"` | | +| django.nginx.resources.requests.cpu | string | `"100m"` | | +| django.nginx.resources.requests.memory | string | `"128Mi"` | | +| django.nginx.tls.enabled | bool | `false` | | +| django.nginx.tls.generateCertificate | bool | `false` | | +| django.nodeSelector | object | `{}` | | +| django.replicas | int | `1` | | +| django.service.annotations | object | `{}` | | +| django.service.type | string | `""` | | +| django.strategy | object | `{}` | | +| django.tolerations | list | `[]` | | +| django.uwsgi.appSettings.processes | int | `2` | | +| django.uwsgi.appSettings.threads | int | `2` | | +| django.uwsgi.certificates.certFileName | string | `"ca.crt"` | | +| django.uwsgi.certificates.certMountPath | string | `"/certs/"` | | +| django.uwsgi.certificates.configName | string | `"defectdojo-ca-certs"` | | +| django.uwsgi.certificates.enabled | bool | `false` | | +| django.uwsgi.enableDebug | bool | `false` | | +| django.uwsgi.extraEnv | list | `[]` | | +| django.uwsgi.extraVolumeMounts | list | `[]` | | +| django.uwsgi.livenessProbe.enabled | bool | `true` | | +| django.uwsgi.livenessProbe.failureThreshold | int | `6` | | +| django.uwsgi.livenessProbe.initialDelaySeconds | int | `0` | | +| django.uwsgi.livenessProbe.periodSeconds | int | `10` | | +| django.uwsgi.livenessProbe.successThreshold | int | `1` | | +| django.uwsgi.livenessProbe.timeoutSeconds | int | `5` | | +| django.uwsgi.readinessProbe.enabled | bool | `true` | | +| django.uwsgi.readinessProbe.failureThreshold | int | `6` | | +| django.uwsgi.readinessProbe.initialDelaySeconds | int | `0` | | +| django.uwsgi.readinessProbe.periodSeconds | int | `10` | | +| django.uwsgi.readinessProbe.successThreshold | int | `1` | | +| django.uwsgi.readinessProbe.timeoutSeconds | int | `5` | | +| django.uwsgi.resources.limits.cpu | string | `"2000m"` | | +| django.uwsgi.resources.limits.memory | string | `"512Mi"` | | +| django.uwsgi.resources.requests.cpu | string | `"100m"` | | +| django.uwsgi.resources.requests.memory | string | `"256Mi"` | | +| django.uwsgi.startupProbe.enabled | bool | `true` | | +| django.uwsgi.startupProbe.failureThreshold | int | `30` | | +| django.uwsgi.startupProbe.initialDelaySeconds | int | `0` | | +| django.uwsgi.startupProbe.periodSeconds | int | `5` | | +| django.uwsgi.startupProbe.successThreshold | int | `1` | | +| django.uwsgi.startupProbe.timeoutSeconds | int | `1` | | +| extraConfigs | object | `{}` | | +| extraLabels | object | `{}` | | +| gke.useGKEIngress | bool | `false` | | +| gke.useManagedCertificate | bool | `false` | | +| gke.workloadIdentityEmail | string | `""` | | +| host | string | `"defectdojo.default.minikube.local"` | | +| imagePullPolicy | string | `"Always"` | | +| imagePullSecrets | string | `nil` | | +| initializer.affinity | object | `{}` | | +| initializer.annotations | object | `{}` | | +| initializer.extraEnv | list | `[]` | | +| initializer.extraVolumeMounts | list | `[]` | | +| initializer.extraVolumes | list | `[]` | | +| initializer.jobAnnotations | object | `{}` | | +| initializer.keepSeconds | int | `60` | | +| initializer.labels | object | `{}` | | +| initializer.nodeSelector | object | `{}` | | +| initializer.resources.limits.cpu | string | `"2000m"` | | +| initializer.resources.limits.memory | string | `"512Mi"` | | +| initializer.resources.requests.cpu | string | `"100m"` | | +| initializer.resources.requests.memory | string | `"256Mi"` | | +| initializer.run | bool | `true` | | +| initializer.staticName | bool | `false` | | +| initializer.tolerations | list | `[]` | | +| monitoring.enabled | bool | `false` | | +| monitoring.prometheus.enabled | bool | `false` | | +| monitoring.prometheus.image | string | `"nginx/nginx-prometheus-exporter:1.4.2"` | | +| monitoring.prometheus.imagePullPolicy | string | `"IfNotPresent"` | | +| networkPolicy.annotations | object | `{}` | | +| networkPolicy.egress | list | `[]` | | +| networkPolicy.enabled | bool | `false` | | +| networkPolicy.ingress | list | `[]` | | +| networkPolicy.ingressExtend | list | `[]` | | +| podLabels | object | `{}` | | +| postgresServer | string | `nil` | | +| postgresql.architecture | string | `"standalone"` | | +| postgresql.auth.database | string | `"defectdojo"` | | +| postgresql.auth.existingSecret | string | `"defectdojo-postgresql-specific"` | | +| postgresql.auth.password | string | `""` | | +| postgresql.auth.secretKeys.adminPasswordKey | string | `"postgresql-postgres-password"` | | +| postgresql.auth.secretKeys.replicationPasswordKey | string | `"postgresql-replication-password"` | | +| postgresql.auth.secretKeys.userPasswordKey | string | `"postgresql-password"` | | +| postgresql.auth.username | string | `"defectdojo"` | | +| postgresql.enabled | bool | `true` | | +| postgresql.primary.affinity | object | `{}` | | +| postgresql.primary.containerSecurityContext.enabled | bool | `true` | | +| postgresql.primary.containerSecurityContext.runAsUser | int | `1001` | | +| postgresql.primary.name | string | `"primary"` | | +| postgresql.primary.nodeSelector | object | `{}` | | +| postgresql.primary.persistence.enabled | bool | `true` | | +| postgresql.primary.podSecurityContext.enabled | bool | `true` | | +| postgresql.primary.podSecurityContext.fsGroup | int | `1001` | | +| postgresql.primary.service.ports.postgresql | int | `5432` | | +| postgresql.shmVolume.chmod.enabled | bool | `false` | | +| postgresql.volumePermissions.containerSecurityContext.runAsUser | int | `1001` | | +| postgresql.volumePermissions.enabled | bool | `false` | | +| redis.architecture | string | `"standalone"` | | +| redis.auth.existingSecret | string | `"defectdojo-redis-specific"` | | +| redis.auth.existingSecretPasswordKey | string | `"redis-password"` | | +| redis.auth.password | string | `""` | | +| redis.enabled | bool | `true` | | +| redis.sentinel.enabled | bool | `false` | | +| redis.tls.enabled | bool | `false` | | +| redisParams | string | `""` | | +| redisServer | string | `nil` | | +| repositoryPrefix | string | `"defectdojo"` | | +| revisionHistoryLimit | int | `10` | | +| secrets.annotations | object | `{}` | | +| securityContext.djangoSecurityContext.runAsUser | int | `1001` | | +| securityContext.enabled | bool | `true` | | +| securityContext.nginxSecurityContext.runAsUser | int | `1001` | | +| serviceAccount.annotations | object | `{}` | | +| serviceAccount.create | bool | `true` | | +| serviceAccount.labels | object | `{}` | | +| tag | string | `"latest"` | | +| tests.unitTests.resources.limits.cpu | string | `"500m"` | | +| tests.unitTests.resources.limits.memory | string | `"512Mi"` | | +| tests.unitTests.resources.requests.cpu | string | `"100m"` | | +| tests.unitTests.resources.requests.memory | string | `"128Mi"` | | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2) diff --git a/helm/defectdojo/README.md.gotmpl b/helm/defectdojo/README.md.gotmpl index 48969816512..9583a95d167 100644 --- a/helm/defectdojo/README.md.gotmpl +++ b/helm/defectdojo/README.md.gotmpl @@ -51,11 +51,6 @@ minikube addons enable ingress Helm >= v3 -```zsh -helm repo add bitnami https://charts.bitnami.com/bitnami -helm repo update -``` - Then pull the dependent charts: ```zsh diff --git a/helm/defectdojo/values.schema.json b/helm/defectdojo/values.schema.json index 967b67b61a1..06b105c9d66 100644 --- a/helm/defectdojo/values.schema.json +++ b/helm/defectdojo/values.schema.json @@ -49,9 +49,30 @@ "annotations": { "type": "object" }, + "extraEnv": { + "type": "array" + }, + "extraInitContainers": { + "type": "array" + }, + "extraVolumeMounts": { + "type": "array" + }, + "extraVolumes": { + "type": "array" + }, + "livenessProbe": { + "type": "object" + }, "nodeSelector": { "type": "object" }, + "podAnnotations": { + "type": "object" + }, + "readinessProbe": { + "type": "object" + }, "replicas": { "type": "integer" }, @@ -82,6 +103,9 @@ } } }, + "startupProbe": { + "type": "object" + }, "tolerations": { "type": "array" } @@ -90,12 +114,6 @@ "broker": { "type": "string" }, - "brokerHost": { - "type": "string" - }, - "extraVolumes": { - "type": "array" - }, "logLevel": { "type": "string" }, @@ -116,9 +134,30 @@ } } }, + "extraEnv": { + "type": "array" + }, + "extraInitContainers": { + "type": "array" + }, + "extraVolumeMounts": { + "type": "array" + }, + "extraVolumes": { + "type": "array" + }, + "livenessProbe": { + "type": "object" + }, "nodeSelector": { "type": "object" }, + "podAnnotations": { + "type": "object" + }, + "readinessProbe": { + "type": "object" + }, "replicas": { "type": "integer" }, @@ -149,6 +188,9 @@ } } }, + "startupProbe": { + "type": "object" + }, "tolerations": { "type": "array" } @@ -234,6 +276,9 @@ } } }, + "disableHooks": { + "type": "boolean" + }, "django": { "type": "object", "properties": { @@ -243,6 +288,9 @@ "annotations": { "type": "object" }, + "extraInitContainers": { + "type": "array" + }, "extraVolumes": { "type": "array" }, @@ -309,6 +357,12 @@ "nginx": { "type": "object", "properties": { + "extraEnv": { + "type": "array" + }, + "extraVolumeMounts": { + "type": "array" + }, "resources": { "type": "object", "properties": { @@ -406,6 +460,12 @@ "enableDebug": { "type": "boolean" }, + "extraEnv": { + "type": "array" + }, + "extraVolumeMounts": { + "type": "array" + }, "livenessProbe": { "type": "object", "properties": { @@ -509,6 +569,9 @@ "extraConfigs": { "type": "object" }, + "extraLabels": { + "type": "object" + }, "gke": { "type": "object", "properties": { @@ -529,6 +592,12 @@ "imagePullPolicy": { "type": "string" }, + "imagePullSecrets": { + "type": [ + "string", + "null" + ] + }, "initializer": { "type": "object", "properties": { @@ -538,6 +607,12 @@ "annotations": { "type": "object" }, + "extraEnv": { + "type": "array" + }, + "extraVolumeMounts": { + "type": "array" + }, "extraVolumes": { "type": "array" }, @@ -585,6 +660,9 @@ }, "staticName": { "type": "boolean" + }, + "tolerations": { + "type": "array" } } }, @@ -633,6 +711,12 @@ "podLabels": { "type": "object" }, + "postgresServer": { + "type": [ + "string", + "null" + ] + }, "postgresql": { "type": "object", "properties": { @@ -784,25 +868,39 @@ "enabled": { "type": "boolean" }, - "scheme": { - "type": "string" + "sentinel": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + } + } }, - "transportEncryption": { + "tls": { "type": "object", "properties": { "enabled": { "type": "boolean" - }, - "params": { - "type": "string" } } } } }, + "redisParams": { + "type": "string" + }, + "redisServer": { + "type": [ + "string", + "null" + ] + }, "repositoryPrefix": { "type": "string" }, + "revisionHistoryLimit": { + "type": "integer" + }, "secrets": { "type": "object", "properties": { diff --git a/helm/defectdojo/values.yaml b/helm/defectdojo/values.yaml index ec4eecd601d..28d91208f0d 100644 --- a/helm/defectdojo/values.yaml +++ b/helm/defectdojo/values.yaml @@ -67,6 +67,7 @@ imagePullPolicy: Always repositoryPrefix: defectdojo # When using a private registry, name of the secret that holds the registry secret (eg deploy token from gitlab-ci project) # Create secrets as: kubectl create secret docker-registry defectdojoregistrykey --docker-username=registry_username --docker-password=registry_password --docker-server='https://index.docker.io/v1/' +# @schema type:[string, null] imagePullSecrets: ~ tag: latest @@ -524,10 +525,12 @@ extraConfigs: {} # External database support. # # To use an external Redis instance, set `redis.enabled` to false and set the address here: +# @schema type:[string, null] redisServer: ~ # Parameters attached to the redis connection string, defaults to "ssl_cert_reqs=optional" if `redis.tls.enabled` redisParams: "" # # To use an external PostgreSQL instance (like CloudSQL), set `postgresql.enabled` to false, # set items in `postgresql.auth` part for authentication, and set the address here: +# @schema type:[string, null] postgresServer: ~ diff --git a/readme-docs/KUBERNETES.md b/readme-docs/KUBERNETES.md index a013a49d3a1..3ee4305cb62 100644 --- a/readme-docs/KUBERNETES.md +++ b/readme-docs/KUBERNETES.md @@ -1,700 +1 @@ -# DefectDojo on Kubernetes - -DefectDojo Kubernetes utilizes [Helm](https://helm.sh/), a -package manager for Kubernetes. Helm Charts help you define, install, and -upgrade even the most complex Kubernetes application. - -For development purposes, -[minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/) -and [Helm](https://helm.sh/) can be installed locally by following -this [guide](https://helm.sh/docs/using_helm/#installing-helm). - -## Supported Kubernetes Versions - -The tests cover the deployment on the lastest [kubernetes version](https://kubernetes.io/releases/) and the oldest supported [version from AWS](https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html#available-versions). The assumption is that version in between do not have significant differences. Current tested versions can looks up in the [github k8s workflow](https://github.com/DefectDojo/django-DefectDojo/blob/master/.github/workflows/k8s-tests.yml). - -## Helm chart - -Starting with version 1.14.0, a helm chart will be pushed onto the `helm-charts` branch during the release process. Don't look for a chart museum, we're leveraging the "raw" capabilities of GitHub at this time. - -To use it, you can add our repo. - -``` -$ helm repo add defectdojo 'https://raw.githubusercontent.com/DefectDojo/django-DefectDojo/helm-charts' - -$ helm repo update -``` - -You should now be able to see the chart. - -``` -$ helm search repo defectdojo -NAME CHART VERSION APP VERSION DESCRIPTION -defectdojo/defectdojo 1.6.153 2.39.0 A Helm chart for Kubernetes to install DefectDojo -``` - -## Kubernetes Local Quickstart - -Requirements: - -1. Helm installed locally -2. Minikube installed locally -3. Latest cloned copy of DefectDojo - -```zsh -git clone https://github.com/DefectDojo/django-DefectDojo -cd django-DefectDojo - -minikube start -minikube addons enable ingress -``` - -Helm >= v3 - -Then pull the dependent charts: - -```zsh -helm dependency update ./helm/defectdojo -``` - -Now, install the helm chart into minikube. - -If you have setup an ingress controller: - -```zsh -DJANGO_INGRESS_ENABLED=true -``` - -else: - -```zsh -DJANGO_INGRESS_ENABLED=false -``` - -If you have configured TLS: - -```zsh -DJANGO_INGRESS_ACTIVATE_TLS=true -``` - -else: - -```zsh -DJANGO_INGRESS_ACTIVATE_TLS=false -``` - -Warning: Use the `createSecret*=true` flags only upon first install. For re-installs, see `§Re-install the chart` - -Helm >= v3: - -```zsh -helm install \ - defectdojo \ - ./helm/defectdojo \ - --set django.ingress.enabled=${DJANGO_INGRESS_ENABLED} \ - --set django.ingress.activateTLS=${DJANGO_INGRESS_ACTIVATE_TLS} \ - --set createSecret=true \ - --set createRedisSecret=true \ - --set createPostgresqlSecret=true -``` - -It usually takes up to a minute for the services to startup and the -status of the containers can be viewed by starting up `minikube dashboard`. -Note: If the containers are not cached locally the services will start once the -containers have been pulled locally. - -To be able to access DefectDojo, set up an ingress or access the service -directly by running the following command: - -```zsh -kubectl port-forward --namespace=default \ -service/defectdojo-django 8080:80 -``` - -As you set your host value to defectdojo.default.minikube.local, make sure that -it resolves to the localhost IP address, e.g. by adding the following two lines -to /etc/hosts: - -```zsh -::1 defectdojo.default.minikube.local -127.0.0.1 defectdojo.default.minikube.local -``` - -To find out the password, run the following command: - -```zsh -echo "DefectDojo admin password: $(kubectl \ - get secret defectdojo \ - --namespace=default \ - --output jsonpath='{.data.DD_ADMIN_PASSWORD}' \ - | base64 --decode)" -``` - -To access DefectDojo, go to . -Log in with username admin and the password from the previous command. - -### Minikube with locally built containers - -If testing containers locally, then set the imagePullPolicy to Never, -which ensures containers are not pulled from Docker hub. - -Use the same commands as before but add: - -```zsh - --set imagePullPolicy=Never -``` - -### Installing from a private registry - -If you have stored your images in a private registry, you can install defectdojo chart with (helm 3). - -- First create a secret named "defectdojoregistrykey" based on the credentials that can pull from the registry: see https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ -- Then install the chart with the same commands as before but adding: - -```zsh - --set repositoryPrefix= \ - --set imagePullSecrets=defectdojoregistrykey -``` - -### Build Images Locally - -```zsh -# Build images -docker build -t defectdojo/defectdojo-django -f Dockerfile.django . -docker build -t defectdojo/defectdojo-nginx -f Dockerfile.nginx . -``` - -```zsh -# Build images behind proxy -docker build --build-arg http_proxy=http://myproxy.com:8080 --build-arg https_proxy=http://myproxy.com:8080 -t defectdojo/defectdojo-django -f Dockerfile.django . -docker build --build-arg http_proxy=http://myproxy.com:8080 --build-arg https_proxy=http://myproxy.com:8080 -t defectdojo/defectdojo-nginx -f Dockerfile.nginx . -``` - -### Upgrade the chart - -If you want to change kubernetes configuration of use an updated docker image (evolution of defectDojo code), upgrade the application: - -``` -kubectl delete job defectdojo-initializer -helm upgrade defectdojo ./helm/defectdojo/ \ - --set django.ingress.enabled=${DJANGO_INGRESS_ENABLED} \ - --set django.ingress.activateTLS=${DJANGO_INGRESS_ACTIVATE_TLS} -``` - -### Re-install the chart - -In case of issue or in any other situation where you need to re-install the chart, you can do it and re-use the same secrets. - -**Note: With postgresql you'll keep the same database (more information below)** - -```zsh -# helm 3 -helm uninstall defectdojo -helm install \ - defectdojo \ - ./helm/defectdojo \ - --set django.ingress.enabled=${DJANGO_INGRESS_ENABLED} \ - --set django.ingress.activateTLS=${DJANGO_INGRESS_ACTIVATE_TLS} -``` - -## Kubernetes Production - -When running defectdojo in production be aware that you understood the full setup and always have a backup. - -### Encryption to Kubernetes - -Optionally, for TLS locally, you need to install a TLS certificate into your Kubernetes cluster. -For development purposes, you can create your own certificate authority as -described [here](https://github.com/hendrikhalkow/k8s-docs/blob/master/tls.md). - -```zsh -# https://kubernetes.io/docs/concepts/services-networking/ingress/#tls -# Create a TLS secret called minikube-tls as mentioned above, e.g. -K8S_NAMESPACE="default" -TLS_CERT_DOMAIN="${K8S_NAMESPACE}.minikube.local" -kubectl --namespace "${K8S_NAMESPACE}" create secret tls defectdojo-tls \ - --key <(openssl rsa \ - -in "${CA_DIR}/private/${TLS_CERT_DOMAIN}.key.pem" \ - -passin "pass:${TLS_CERT_PASSWORD}") \ - --cert <(cat \ - "${CA_DIR}/certs/${TLS_CERT_DOMAIN}.cert.pem" \ - "${CA_DIR}/chain.pem") -``` - -### Encryption in Kubernetes and End-to-End Encryption - -With the TLS certificate from your Kubernetes cluster all traffic to you cluster is encrypted, but the traffic in your cluster is still unencrypted. - -If you want to encrypt the traffic to the nginx server you can use the option `--set nginx.tls.enabled=true` and `--set nginx.tls.generateCertificate=true` to generate a self signed certificate and use the https config. The option to add you own pregenerated certificate is generelly possible but not implemented in the helm chart yet. - -Be aware that the traffic to the database and celery broker are unencrypted at the moment. - -### Media persistent volume - -By default, DefectDojo helm installation doesn't support persistent storage for storing images (dynamically uploaded by users). By default, it uses emptyDir, which is ephemeral by its nature and doesn't support multiple replicas of django pods, so should not be in use for production. - -To enable persistence of the media storage that supports R/W many, should be in use as backend storage like S3, NFS, glusterfs, etc - -```bash -mediaPersistentVolume: - enabled: true - # any name - name: media - # could be emptyDir (not for production) or pvc - type: pvc - # there are two options to create pvc 1) when you want the chart to create pvc for you, set django.mediaPersistentVolume.persistentVolumeClaim.create to true and do not specify anything for django.mediaPersistentVolume.PersistentVolumeClaim.name 2) when you want to create pvc outside the chart, pass the pvc name via django.mediaPersistentVolume.PersistentVolumeClaim.name and ensure django.mediaPersistentVolume.PersistentVolumeClaim.create is set to false - persistentVolumeClaim: - create: true - name: - size: 5Gi - accessModes: - - ReadWriteMany - storageClassName: -``` - -In the example above, we want the media content to be preserved to `pvc` as `persistentVolumeClaim` k8s resource and what we are basically doing is enabling the pvc to be created conditionally if the user wants to create it using the chart (in this case the pvc name 'defectdojo-media' will be inherited from template file used to deploy the pvc). By default the volume type is emptyDir which does not require a pvc. But when the type is set to pvc then we need a kubernetes Persistent Volume Claim and this is where the django.mediaPersistentVolume.persistentVolumeClaim.name comes into play. - -The accessMode is set to ReadWriteMany by default to accommodate using more than one replica. Ensure storage support ReadWriteMany before setting this option, otherwise set accessMode to ReadWriteOnce. - -NOTE: PersistrentVolume needs to be prepared in front before helm installation/update is triggered. - -For more detail how how to create proper PVC see [example](https://github.com/DefectDojo/Community-Contribs/tree/master/persistent-media) - -### Installation - -**Important:** If you choose to create the secret on your own, you will need to create a secret named `defectdojo` and containing the following fields: - -- DD_ADMIN_PASSWORD -- DD_SECRET_KEY -- DD_CREDENTIAL_AES_256_KEY -- METRICS_HTTP_AUTH_PASSWORD - -Theses fields are required to get the stack running. - -```zsh -# Install Helm chart. Choose a host name that matches the certificate above -helm install \ - defectdojo \ - ./helm/defectdojo \ - --namespace="${K8S_NAMESPACE}" \ - --set host="defectdojo.${TLS_CERT_DOMAIN}" \ - --set django.ingress.secretName="minikube-tls" \ - --set createSecret=true \ - --set createRedisSecret=true \ - --set createPostgresqlSecret=true - -# For high availability deploy multiple instances of Django, Celery and Redis -helm install \ - defectdojo \ - ./helm/defectdojo \ - --namespace="${K8S_NAMESPACE}" \ - --set host="defectdojo.${TLS_CERT_DOMAIN}" \ - --set django.ingress.secretName="minikube-tls" \ - --set django.replicas=3 \ - --set celery.worker.replicas=3 \ - --set redis.replicas=3 \ - --set createSecret=true \ - --set createRedisSecret=true \ - --set createPostgresqlSecret=true - -# Run highly available PostgreSQL cluster -# for production environment. -helm install \ - defectdojo \ - ./helm/defectdojo \ - --namespace="${K8S_NAMESPACE}" \ - --set host="defectdojo.${TLS_CERT_DOMAIN}" \ - --set django.replicas=3 \ - --set celery.worker.replicas=3 \ - --set redis.replicas=3 \ - --set django.ingress.secretName="minikube-tls" \ - --set postgresql.enabled=true \ - --set postgresql.replication.enabled=true \ - --set postgresql.replication.slaveReplicas=3 \ - --set createSecret=true \ - --set createRedisSecret=true \ - --set createPostgresqlSecret=true - -# Note: If you run `helm install defectdojo before, you will get an error -# message like `Error: release defectdojo failed: secrets "defectdojo" already -# exists`. This is because the secret is kept across installations. -# To prevent recreating the secret, add --set createSecret=false` to your -# command. - -# Run test. -helm test defectdojo - -# Navigate to . -``` - -### Prometheus metrics - -It's possible to enable Nginx prometheus exporter by setting `--set monitoring.enabled=true` and `--set monitoring.prometheus.enabled=true`. This adds the Nginx exporter sidecar and the standard Prometheus pod annotations to django deployment. - -## Useful stuff - -### Setting your own domain - -The `siteUrl` in values.yaml controls what domain is configured in Django, and also what the celery workers will put as links in Jira tickets for example. -Set this to your `https://` in values.yaml - -### Multiple Hostnames - -Django requires a list of all hostnames that are valid for requests. -You can add additional hostnames via helm or values file as an array. -This helps if you have a local service submitting reports to defectDojo using -the namespace name (say defectdojo.scans) instead of the TLD name used in a browser. - -In your helm install simply pass them as a defined array, for example: - -`--set "alternativeHosts={defectdojo.default,localhost,defectdojo.example.com}"` - -This will also work with shell inserted variables: - -`--set "alternativeHosts={defectdojo.${TLS_CERT_DOMAIN},localhost}"` - -You will still need to set a host value as well. - -### Using an existing redis setup with redis-sentinel - -If you want to use a redis-sentinel setup as the Celery broker, you will need to set the following. - -1. Set redis.scheme to "sentinel" in values.yaml -2. Set two additional extraEnv vars specifying the sentinel master name and port in values.yaml - -```yaml -celery: - broker: 'redis' - -redis: - redisServer: 'PutYourRedisSentinelAddress' - scheme: 'sentinel' - -extraEnv: - - name: DD_CELERY_BROKER_TRANSPORT_OPTIONS - value: '{"master_name": "mymaster"}' - - name: 'DD_CELERY_BROKER_PORT' - value: '26379' -``` - -### How to use an external PostgreSQL DB with Defectdojo - -#### Step 1: Create a Namespace for DefectDojo - -To begin, create a dedicated namespace for DefectDojo to isolate its resources: -`kubectl create ns defectdojo` - -#### Step 2: Create a Secret for PostgreSQL Credentials - -Set up a Kubernetes Secret to securely store the PostgreSQL user password and database connection URL, which are essential for establishing a secure connection between DefectDojo and your PostgreSQL instance. Apply the secret using the following command: `kubectl apply -f secret.yaml -n defectdojo`. This secret will be referenced within the `extraEnv` section of the DefectDojo Helm values file. - -Sample secret template (replace the placeholders with your PostgreSQL credentials): - -```YAML -apiversion: v1 -kind: Secret -metadata: - name: defectdojo-postgresql-specific -type: Opaque -stringData: # I chose stringData for better visualization of the credentials for debugging - password: -``` - -#### Step 2.5: Install PostgreSQL (Optional) - -If you need to simulate a PostgreSQL database external to DefectDojo, you can install PostgreSQL using the following Helm command: - -```bash -helm repo add bitnami https://charts.bitnami.com/bitnami -helm repo update -helm install defectdojo-postgresql bitnami/postgresql -n defectdojo -f postgresql/values.yaml -``` - -Sample `values.yaml` file for PostgreSQL configuration: - -```YAML -auth: -  username: defectdojo -  password: -  postgresPassword: -  database: defectdojo -  primary: -    persistence: -    size: 10Gi -``` - -#### Step 3: Modify DefectDojo helm values - -Before installing the DefectDojo Helm chart, it's important to customize the `values.yaml` file. Key areas to modify include specifying the PostgreSQL connection details & the extraEnv block: - -```yaml -postgresql: - enabled: false # Disable the creation of the database in the cluster - postgresServer: "127.0.0.1" # Required to skip certain tests not useful on external instances - auth: - username: defectdojo # your database user - database: defectdojo # your database name - secretKeys: - adminPasswordKey: password # the name of the field containing the password value - userPasswordKey: password # the name of the field containing the password value - replicationPasswordKey: password # the name of the field containing the password value - existingSecret: defectdojo-postgresql-specific # the secret containing your database password - -extraEnv: -# Overwrite the database endpoint -- name: DD_DATABASE_HOST - value: -# Overwrite the database port -- name: DD_DATABASE_PORT - value: -``` - -#### Step 4: Deploy DefectDojo - -After modifying the `values.yaml` file as needed, deploy DefectDojo using Helm. This command also generates the required secrets for the DefectDojo admin UI and Redis: - -```bash -helm install defectdojo defectdojo -f values.yaml -n defectdojo --set createSecret=true --set createRedisSecret=true -``` - -**NOTE**: It is important to highlight that this setup can also be utilized for achieving high availability (HA) in PostgreSQL. By placing a load balancer in front of the PostgreSQL cluster, read and write requests can be efficiently routed to the appropriate primary or standby servers as needed. - -### kubectl commands - -```zsh -# View logs of a specific pod -kubectl logs $(kubectl get pod --selector=defectdojo.org/component=${POD} \ - -o jsonpath="{.items[0].metadata.name}") -f - -# Open a shell in a specific pod -kubectl exec -it $(kubectl get pod --selector=defectdojo.org/component=${POD} \ - -o jsonpath="{.items[0].metadata.name}") -- /bin/bash -# Or: -kubectl exec defectdojo-django- -c uwsgi -it /bin/sh - -# Open a Python shell in a specific pod -kubectl exec -it $(kubectl get pod --selector=defectdojo.org/component=${POD} \ - -o jsonpath="{.items[0].metadata.name}") -- python manage.py shell -``` - -### Clean up Kubernetes - -Helm >= v3 - -``` -helm uninstall defectdojo -``` - -To remove persistent objects not removed by uninstall (this will remove any database): - -``` -kubectl delete secrets defectdojo defectdojo-redis-specific defectdojo-postgresql-specific -kubectl delete serviceAccount defectdojo -kubectl delete pvc data-defectdojo-redis-0 data-defectdojo-postgresql-0 -``` - - -# General information about chart values - -![Version: 1.6.202-dev](https://img.shields.io/badge/Version-1.6.202--dev-informational?style=flat-square) ![AppVersion: 2.50.0-dev](https://img.shields.io/badge/AppVersion-2.50.0--dev-informational?style=flat-square) - -A Helm chart for Kubernetes to install DefectDojo - -## Maintainers - -| Name | Email | Url | -| ---- | ------ | --- | -| madchap | | | - -## Requirements - -| Repository | Name | Version | -|------------|------|---------| -| https://charts.bitnami.com/bitnami | postgresql | ~16.7.0 | -| https://charts.bitnami.com/bitnami | redis | ~19.6.0 | - -## Values - -| Key | Type | Default | Description | -|-----|------|---------|-------------| -| admin.credentialAes256Key | string | `nil` | | -| admin.firstName | string | `"Administrator"` | | -| admin.lastName | string | `"User"` | | -| admin.mail | string | `"admin@defectdojo.local"` | | -| admin.metricsHttpAuthPassword | string | `nil` | | -| admin.password | string | `nil` | | -| admin.secretKey | string | `nil` | | -| admin.user | string | `"admin"` | | -| annotations | object | `{}` | | -| celery.annotations | object | `{}` | | -| celery.beat.affinity | object | `{}` | | -| celery.beat.annotations | object | `{}` | | -| celery.beat.nodeSelector | object | `{}` | | -| celery.beat.replicas | int | `1` | | -| celery.beat.resources.limits.cpu | string | `"2000m"` | | -| celery.beat.resources.limits.memory | string | `"256Mi"` | | -| celery.beat.resources.requests.cpu | string | `"100m"` | | -| celery.beat.resources.requests.memory | string | `"128Mi"` | | -| celery.beat.tolerations | list | `[]` | | -| celery.broker | string | `"redis"` | | -| celery.brokerHost | string | `""` | | -| celery.extraVolumes | list | `[]` | | -| celery.logLevel | string | `"INFO"` | | -| celery.worker.affinity | object | `{}` | | -| celery.worker.annotations | object | `{}` | | -| celery.worker.appSettings.poolType | string | `"solo"` | | -| celery.worker.nodeSelector | object | `{}` | | -| celery.worker.replicas | int | `1` | | -| celery.worker.resources.limits.cpu | string | `"2000m"` | | -| celery.worker.resources.limits.memory | string | `"512Mi"` | | -| celery.worker.resources.requests.cpu | string | `"100m"` | | -| celery.worker.resources.requests.memory | string | `"128Mi"` | | -| celery.worker.tolerations | list | `[]` | | -| cloudsql.enable_iam_login | bool | `false` | | -| cloudsql.enabled | bool | `false` | | -| cloudsql.image.pullPolicy | string | `"IfNotPresent"` | | -| cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | | -| cloudsql.image.tag | string | `"1.37.8"` | | -| cloudsql.instance | string | `""` | | -| cloudsql.use_private_ip | bool | `false` | | -| cloudsql.verbose | bool | `true` | | -| createPostgresqlSecret | bool | `false` | | -| createRedisSecret | bool | `false` | | -| createSecret | bool | `false` | | -| dbMigrationChecker.enabled | bool | `true` | | -| dbMigrationChecker.resources.limits.cpu | string | `"200m"` | | -| dbMigrationChecker.resources.limits.memory | string | `"200Mi"` | | -| dbMigrationChecker.resources.requests.cpu | string | `"100m"` | | -| dbMigrationChecker.resources.requests.memory | string | `"100Mi"` | | -| django.affinity | object | `{}` | | -| django.annotations | object | `{}` | | -| django.extraVolumes | list | `[]` | | -| django.ingress.activateTLS | bool | `true` | | -| django.ingress.annotations | object | `{}` | | -| django.ingress.enabled | bool | `true` | | -| django.ingress.ingressClassName | string | `""` | | -| django.ingress.secretName | string | `"defectdojo-tls"` | | -| django.mediaPersistentVolume.enabled | bool | `true` | | -| django.mediaPersistentVolume.fsGroup | int | `1001` | | -| django.mediaPersistentVolume.name | string | `"media"` | | -| django.mediaPersistentVolume.persistentVolumeClaim.accessModes[0] | string | `"ReadWriteMany"` | | -| django.mediaPersistentVolume.persistentVolumeClaim.create | bool | `false` | | -| django.mediaPersistentVolume.persistentVolumeClaim.name | string | `nil` | | -| django.mediaPersistentVolume.persistentVolumeClaim.size | string | `"5Gi"` | | -| django.mediaPersistentVolume.persistentVolumeClaim.storageClassName | string | `nil` | | -| django.mediaPersistentVolume.type | string | `"emptyDir"` | | -| django.nginx.resources.limits.cpu | string | `"2000m"` | | -| django.nginx.resources.limits.memory | string | `"256Mi"` | | -| django.nginx.resources.requests.cpu | string | `"100m"` | | -| django.nginx.resources.requests.memory | string | `"128Mi"` | | -| django.nginx.tls.enabled | bool | `false` | | -| django.nginx.tls.generateCertificate | bool | `false` | | -| django.nodeSelector | object | `{}` | | -| django.replicas | int | `1` | | -| django.service.annotations | object | `{}` | | -| django.service.type | string | `""` | | -| django.strategy | object | `{}` | | -| django.tolerations | list | `[]` | | -| django.uwsgi.appSettings.processes | int | `2` | | -| django.uwsgi.appSettings.threads | int | `2` | | -| django.uwsgi.certificates.certFileName | string | `"ca.crt"` | | -| django.uwsgi.certificates.certMountPath | string | `"/certs/"` | | -| django.uwsgi.certificates.configName | string | `"defectdojo-ca-certs"` | | -| django.uwsgi.certificates.enabled | bool | `false` | | -| django.uwsgi.enableDebug | bool | `false` | | -| django.uwsgi.livenessProbe.enabled | bool | `true` | | -| django.uwsgi.livenessProbe.failureThreshold | int | `6` | | -| django.uwsgi.livenessProbe.initialDelaySeconds | int | `0` | | -| django.uwsgi.livenessProbe.periodSeconds | int | `10` | | -| django.uwsgi.livenessProbe.successThreshold | int | `1` | | -| django.uwsgi.livenessProbe.timeoutSeconds | int | `5` | | -| django.uwsgi.readinessProbe.enabled | bool | `true` | | -| django.uwsgi.readinessProbe.failureThreshold | int | `6` | | -| django.uwsgi.readinessProbe.initialDelaySeconds | int | `0` | | -| django.uwsgi.readinessProbe.periodSeconds | int | `10` | | -| django.uwsgi.readinessProbe.successThreshold | int | `1` | | -| django.uwsgi.readinessProbe.timeoutSeconds | int | `5` | | -| django.uwsgi.resources.limits.cpu | string | `"2000m"` | | -| django.uwsgi.resources.limits.memory | string | `"512Mi"` | | -| django.uwsgi.resources.requests.cpu | string | `"100m"` | | -| django.uwsgi.resources.requests.memory | string | `"256Mi"` | | -| django.uwsgi.startupProbe.enabled | bool | `true` | | -| django.uwsgi.startupProbe.failureThreshold | int | `30` | | -| django.uwsgi.startupProbe.initialDelaySeconds | int | `0` | | -| django.uwsgi.startupProbe.periodSeconds | int | `5` | | -| django.uwsgi.startupProbe.successThreshold | int | `1` | | -| django.uwsgi.startupProbe.timeoutSeconds | int | `1` | | -| extraConfigs | object | `{}` | | -| gke.useGKEIngress | bool | `false` | | -| gke.useManagedCertificate | bool | `false` | | -| gke.workloadIdentityEmail | string | `""` | | -| host | string | `"defectdojo.default.minikube.local"` | | -| imagePullPolicy | string | `"Always"` | | -| initializer.affinity | object | `{}` | | -| initializer.annotations | object | `{}` | | -| initializer.extraVolumes | list | `[]` | | -| initializer.jobAnnotations | object | `{}` | | -| initializer.keepSeconds | int | `60` | | -| initializer.labels | object | `{}` | | -| initializer.nodeSelector | object | `{}` | | -| initializer.resources.limits.cpu | string | `"2000m"` | | -| initializer.resources.limits.memory | string | `"512Mi"` | | -| initializer.resources.requests.cpu | string | `"100m"` | | -| initializer.resources.requests.memory | string | `"256Mi"` | | -| initializer.run | bool | `true` | | -| initializer.staticName | bool | `false` | | -| monitoring.enabled | bool | `false` | You can also specify value comments like this | -| monitoring.prometheus.enabled | bool | `false` | | -| monitoring.prometheus.image | string | `"nginx/nginx-prometheus-exporter:1.4.2"` | | -| monitoring.prometheus.imagePullPolicy | string | `"IfNotPresent"` | | -| networkPolicy.annotations | object | `{}` | | -| networkPolicy.egress | list | `[]` | | -| networkPolicy.enabled | bool | `false` | | -| networkPolicy.ingress | list | `[]` | | -| networkPolicy.ingressExtend | list | `[]` | | -| podLabels | object | `{}` | | -| postgresql.architecture | string | `"standalone"` | | -| postgresql.auth.database | string | `"defectdojo"` | | -| postgresql.auth.existingSecret | string | `"defectdojo-postgresql-specific"` | | -| postgresql.auth.password | string | `""` | | -| postgresql.auth.secretKeys.adminPasswordKey | string | `"postgresql-postgres-password"` | | -| postgresql.auth.secretKeys.replicationPasswordKey | string | `"postgresql-replication-password"` | | -| postgresql.auth.secretKeys.userPasswordKey | string | `"postgresql-password"` | | -| postgresql.auth.username | string | `"defectdojo"` | | -| postgresql.enabled | bool | `true` | | -| postgresql.primary.affinity | object | `{}` | | -| postgresql.primary.containerSecurityContext.enabled | bool | `true` | | -| postgresql.primary.containerSecurityContext.runAsUser | int | `1001` | | -| postgresql.primary.name | string | `"primary"` | | -| postgresql.primary.nodeSelector | object | `{}` | | -| postgresql.primary.persistence.enabled | bool | `true` | | -| postgresql.primary.podSecurityContext.enabled | bool | `true` | | -| postgresql.primary.podSecurityContext.fsGroup | int | `1001` | | -| postgresql.primary.service.ports.postgresql | int | `5432` | | -| postgresql.shmVolume.chmod.enabled | bool | `false` | | -| postgresql.volumePermissions.containerSecurityContext.runAsUser | int | `1001` | | -| postgresql.volumePermissions.enabled | bool | `false` | | -| redis.architecture | string | `"standalone"` | | -| redis.auth.existingSecret | string | `"defectdojo-redis-specific"` | | -| redis.auth.existingSecretPasswordKey | string | `"redis-password"` | | -| redis.auth.password | string | `""` | | -| redis.enabled | bool | `true` | | -| redis.scheme | string | `"redis"` | | -| redis.transportEncryption.enabled | bool | `false` | | -| redis.transportEncryption.params | string | `""` | | -| repositoryPrefix | string | `"defectdojo"` | | -| secrets.annotations | object | `{}` | | -| securityContext.djangoSecurityContext.runAsUser | int | `1001` | | -| securityContext.enabled | bool | `true` | | -| securityContext.nginxSecurityContext.runAsUser | int | `1001` | | -| serviceAccount.annotations | object | `{}` | | -| serviceAccount.create | bool | `true` | | -| serviceAccount.labels | object | `{}` | | -| tag | string | `"latest"` | | -| tests.unitTests.resources.limits.cpu | string | `"500m"` | | -| tests.unitTests.resources.limits.memory | string | `"512Mi"` | | -| tests.unitTests.resources.requests.cpu | string | `"100m"` | | -| tests.unitTests.resources.requests.memory | string | `"128Mi"` | | - ----------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2) +Full documentation is located in [HELM Chart README](../helm/defectdojo/README.md)