diff --git a/helm/charts/hydra-distributed/.helmignore b/helm/charts/hydra-distributed/.helmignore
new file mode 100644
index 000000000..b066297f1
--- /dev/null
+++ b/helm/charts/hydra-distributed/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
+*.txt
\ No newline at end of file
diff --git a/helm/charts/hydra-distributed/Chart.lock b/helm/charts/hydra-distributed/Chart.lock
new file mode 100644
index 000000000..dc277bf77
--- /dev/null
+++ b/helm/charts/hydra-distributed/Chart.lock
@@ -0,0 +1,9 @@
+dependencies:
+- name: ory-commons
+ repository: file://../ory-commons
+ version: 0.1.0
+- name: hydra-maester
+ repository: file://../hydra-maester
+ version: 0.50.3
+digest: sha256:18facc7a0739ed1f51746992bbeced44d84b8e0727b89b33f48dad914f4ecebb
+generated: "2024-12-04T11:40:27.919030591Z"
diff --git a/helm/charts/hydra-distributed/Chart.yaml b/helm/charts/hydra-distributed/Chart.yaml
new file mode 100644
index 000000000..fcdeca3fa
--- /dev/null
+++ b/helm/charts/hydra-distributed/Chart.yaml
@@ -0,0 +1,34 @@
+apiVersion: v2
+appVersion: "v2.2.0"
+description:
+ A Helm chart for deploying ORY Hydra in Kubernetes with a distributed layout.
+name: hydra
+icon: https://raw.githubusercontent.com/ory/docs/master/docs/static/img/logo-hydra.svg
+version: 0.50.3
+keywords:
+ - oauth2
+ - openid-connect
+ - openid
+ - oidc
+ - op
+ - api-security
+ - security
+home: https://www.ory.sh/
+sources:
+ - https://github.com/ory/hydra
+ - https://github.com/ory/k8s
+maintainers: # (optional)
+ - name: ORY Team
+ email: hi@ory.sh
+ url: https://www.ory.sh/
+type: application
+dependencies:
+ - name: ory-commons
+ version: 0.1.0
+ repository: file://../ory-commons
+ alias: ory
+ - name: hydra-maester
+ version: 0.50.3
+ condition: maester.enabled
+ alias: hydra-maester
+ repository: file://../hydra-maester
diff --git a/helm/charts/hydra-distributed/README.md b/helm/charts/hydra-distributed/README.md
new file mode 100644
index 000000000..2048a2a17
--- /dev/null
+++ b/helm/charts/hydra-distributed/README.md
@@ -0,0 +1,209 @@
+# hydra-distributed
+
+![Version: 0.49.0](https://img.shields.io/badge/Version-0.49.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v2.2.0](https://img.shields.io/badge/AppVersion-v2.2.0-informational?style=flat-square)
+
+A Helm chart for deploying ORY Hydra in Kubernetes with a distributed layout. Essentially the same as the Hydra chart
+but the admin and public components are created as separate deployments.
+
+**Homepage:**
+
+## Maintainers
+
+| Name | Email | Url |
+| ---- | ------ | --- |
+| ORY Team | | |
+
+## Source Code
+
+*
+*
+
+## Requirements
+
+| Repository | Name | Version |
+|------------|------|---------|
+| file://../hydra-maester | hydra-maester(hydra-maester) | 0.49.0 |
+| file://../ory-commons | ory(ory-commons) | 0.1.0 |
+
+## Values
+
+| Key | Type | Default | Description |
+|-----|------|---------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| affinity | object | `{}` | |
+| configmap.hashSumEnabled | bool | `true` | switch to false to prevent checksum annotations being maintained and propogated to the pods |
+| cronjob.janitor.affinity | object | `{}` | Configure node affinity |
+| cronjob.janitor.annotations | object | `{}` | Set custom cron job level annotations |
+| cronjob.janitor.automountServiceAccountToken | bool | `true` | Set automounting of the SA token |
+| cronjob.janitor.customArgs | list | `[]` | Configure the arguments of the entrypoint, overriding the default value |
+| cronjob.janitor.customCommand | list | `[]` | Configure a custom entrypoint, overriding the default value |
+| cronjob.janitor.extraContainers | string | `""` | If you want to add extra sidecar containers. |
+| cronjob.janitor.extraEnv | list | `[]` | Array of extra envs to be passed to the cronjob. This takes precedence over deployment variables. Kubernetes format is expected. Value is processed with Helm `tpl` - name: FOO value: BAR |
+| cronjob.janitor.extraInitContainers | string | `""` | If you want to add extra init containers. These are processed before the migration init container. |
+| cronjob.janitor.extraVolumeMounts | list | `[]` | |
+| cronjob.janitor.extraVolumes | list | `[]` | If you want to mount external volume |
+| cronjob.janitor.labels | object | `{}` | Set custom cron job level labels |
+| cronjob.janitor.nodeSelector | object | `{}` | Configure node labels for pod assignment |
+| cronjob.janitor.podMetadata | object | `{"annotations":{},"labels":{}}` | Specify pod metadata, this metadata is added directly to the pod, and not higher objects |
+| cronjob.janitor.podMetadata.annotations | object | `{}` | Extra pod level annotations |
+| cronjob.janitor.podMetadata.labels | object | `{}` | Extra pod level labels |
+| cronjob.janitor.podSecurityContext | object | `{}` | |
+| cronjob.janitor.resources | object | `{"limits":{},"requests":{}}` | We usually recommend not to specify default resources and to leave this as a conscious choice for the user. This also increases chances charts run on environments with little resources, such as Minikube. If you do want to specify resources, uncomment the following lines, adjust them as necessary, and remove the curly braces after 'resources:'. limits: cpu: 100m memory: 128Mi requests: cpu: 100m memory: 128Mi |
+| cronjob.janitor.schedule | string | `"0 */1 * * *"` | Configure how often the cron job is ran |
+| cronjob.janitor.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"privileged":false,"readOnlyRootFilesystem":true,"runAsNonRoot":true,"runAsUser":100}` | Configure the containers' SecurityContext for the janitor cronjob |
+| cronjob.janitor.serviceAccount | object | `{"annotations":{"helm.sh/hook":"pre-install, pre-upgrade","helm.sh/hook-delete-policy":"before-hook-creation","helm.sh/hook-weight":"0"},"create":true,"name":""}` | Specify the serviceAccountName value. In some situations it is needed to provides specific permissions to Hydra deployments Like for example installing Hydra on a cluster with a PosSecurityPolicy and Istio. Uncoment if it is needed to provide a ServiceAccount for the Hydra deployment. |
+| cronjob.janitor.serviceAccount.annotations | object | `{"helm.sh/hook":"pre-install, pre-upgrade","helm.sh/hook-delete-policy":"before-hook-creation","helm.sh/hook-weight":"0"}` | Annotations to add to the service account |
+| cronjob.janitor.serviceAccount.create | bool | `true` | Specifies whether a service account should be created |
+| cronjob.janitor.serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template |
+| cronjob.janitor.tolerations | list | `[]` | Configure node tolerations |
+| deployment.admin | object | `{}` | This field acts as overrides only for the `hydra-admin` deployment object. |
+| deployment.public | object | `{}` | This field acts as overrides only for the `hydra-public` deployment object. |
+| deployment.annotations | object | `{}` | Set custom deployment level annotations |
+| deployment.automigration | object | `{"extraEnv":[]}` | Parameters for the automigration initContainer |
+| deployment.automigration.extraEnv | list | `[]` | Array of extra envs to be passed to the initContainer. Kubernetes format is expected. Value is processed with Helm `tpl` - name: FOO value: BAR |
+| deployment.automountServiceAccountToken | bool | `false` | |
+| deployment.autoscaling | object | `{"behavior":{},"enabled":false,"maxReplicas":3,"minReplicas":1,"targetCPU":{},"targetMemory":{}}` | Configure HPA |
+| deployment.autoscaling.behavior | object | `{}` | Set custom behavior https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#configurable-scaling-behavior |
+| deployment.customLivenessProbe | object | `{}` | Configure a custom livenessProbe. This overwrites the default object |
+| deployment.customReadinessProbe | object | `{}` | Configure a custom readinessProbe. This overwrites the default object |
+| deployment.customStartupProbe | object | `{}` | Configure a custom startupProbe. This overwrites the default object |
+| deployment.dnsConfig | object | `{}` | Configure pod dnsConfig. |
+| deployment.extraContainers | string | `""` | If you want to add extra sidecar containers. |
+| deployment.extraEnv | list | `[]` | Array of extra envs to be passed to the deployment. Kubernetes format is expected. Value is processed with Helm `tpl` - name: FOO value: BAR |
+| deployment.extraInitContainers | string | `""` | If you want to add extra init containers. These are processed before the migration init container. |
+| deployment.extraVolumeMounts | list | `[]` | |
+| deployment.extraVolumes | list | `[]` | If you want to mount external volume |
+| deployment.initContainerSecurityContext | object | `{}` | |
+| deployment.labels | object | `{}` | Set custom deployment level labels |
+| deployment.lifecycle | object | `{}` | |
+| deployment.livenessProbe | object | `{"failureThreshold":5,"initialDelaySeconds":5,"periodSeconds":10}` | Default probe timers |
+| deployment.nodeSelector | object | `{}` | Node labels for pod assignment. |
+| deployment.podMetadata | object | `{"annotations":{},"labels":{}}` | Specify pod metadata, this metadata is added directly to the pod, and not higher objects |
+| deployment.podMetadata.annotations | object | `{}` | Extra pod level annotations |
+| deployment.podMetadata.labels | object | `{}` | Extra pod level labels |
+| deployment.podSecurityContext.fsGroup | int | `65534` | |
+| deployment.podSecurityContext.fsGroupChangePolicy | string | `"OnRootMismatch"` | |
+| deployment.podSecurityContext.runAsGroup | int | `65534` | |
+| deployment.podSecurityContext.runAsNonRoot | bool | `true` | |
+| deployment.podSecurityContext.runAsUser | int | `65534` | |
+| deployment.podSecurityContext.seccompProfile.type | string | `"RuntimeDefault"` | |
+| deployment.readinessProbe | object | `{"failureThreshold":5,"initialDelaySeconds":5,"periodSeconds":10}` | Default probe timers |
+| deployment.resources | object | `{}` | We usually recommend not to specify default resources and to leave this as a conscious choice for the user. This also increases chances charts run on environments with little resources, such as Minikube. If you do want to specify resources, uncomment the following lines, adjust them as necessary, and remove the curly braces after 'resources:'. limits: cpu: 100m memory: 128Mi requests: cpu: 100m memory: 128Mi |
+| deployment.revisionHistoryLimit | int | `5` | Number of revisions kept in history |
+| deployment.securityContext.allowPrivilegeEscalation | bool | `false` | |
+| deployment.securityContext.capabilities.drop[0] | string | `"ALL"` | |
+| deployment.securityContext.privileged | bool | `false` | |
+| deployment.securityContext.readOnlyRootFilesystem | bool | `true` | |
+| deployment.securityContext.runAsGroup | int | `65534` | |
+| deployment.securityContext.runAsNonRoot | bool | `true` | |
+| deployment.securityContext.runAsUser | int | `65534` | |
+| deployment.securityContext.seLinuxOptions.level | string | `"s0:c123,c456"` | |
+| deployment.securityContext.seccompProfile.type | string | `"RuntimeDefault"` | |
+| deployment.serviceAccount | object | `{"annotations":{},"create":true,"name":""}` | Specify the serviceAccountName value. In some situations it is needed to provides specific permissions to Hydra deployments Like for example installing Hydra on a cluster with a PosSecurityPolicy and Istio. Uncoment if it is needed to provide a ServiceAccount for the Hydra deployment. |
+| deployment.serviceAccount.annotations | object | `{}` | Annotations to add to the service account |
+| deployment.serviceAccount.create | bool | `true` | Specifies whether a service account should be created |
+| deployment.serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template |
+| deployment.startupProbe | object | `{"failureThreshold":60,"periodSeconds":1,"successThreshold":1,"timeoutSeconds":1}` | Default probe timers |
+| deployment.strategy.rollingUpdate.maxSurge | string | `"25%"` | |
+| deployment.strategy.rollingUpdate.maxUnavailable | string | `"25%"` | |
+| deployment.strategy.type | string | `"RollingUpdate"` | |
+| deployment.terminationGracePeriodSeconds | int | `60` | |
+| deployment.tolerations | list | `[]` | Configure node tolerations. |
+| deployment.topologySpreadConstraints | list | `[]` | Configure pod topologySpreadConstraints. |
+| fullnameOverride | string | `""` | Full chart name override |
+| hydra-maester.adminService.name | string | `""` | The service name value may need to be set if you use `fullnameOverride` for the parent chart |
+| hydra.automigration.customArgs | list | `[]` | Ability to override arguments of the entrypoint. Can be used in-depended of customCommand eg: - sleep 5; - kratos |
+| hydra.automigration.customCommand | list | `[]` | Ability to override the entrypoint of the automigration container (e.g. to source dynamic secrets or export environment dynamic variables) |
+| hydra.automigration.enabled | bool | `false` | |
+| hydra.automigration.resources | object | `{}` | resource requests and limits for the automigration initcontainer |
+| hydra.automigration.type | string | `"job"` | Configure the way to execute database migration. Possible values: job, initContainer When set to job, the migration will be executed as a job on release or upgrade. When set to initContainer, the migration will be executed when kratos pod is created Defaults to job |
+| hydra.command | list | `["hydra"]` | Ability to override the entrypoint of hydra container (e.g. to source dynamic secrets or export environment dynamic variables) |
+| hydra.config | object | `{"secrets":{},"serve":{"admin":{"port":4445},"public":{"port":4444},"tls":{"allow_termination_from":["10.0.0.0/8","172.16.0.0/12","192.168.0.0/16"]}},"urls":{"self":{}}}` | The ORY Hydra configuration. For a full list of available settings, check: https://www.ory.sh/docs/hydra/reference/configuration |
+| hydra.config.secrets | object | `{}` | The secrets have to be provided as a string slice, example: system: - "OG5XbmxXa3dYeGplQXpQanYxeEFuRUFa" - "foo bar 123 456 lorem" - "foo bar 123 456 lorem 1" - "foo bar 123 456 lorem 2" - "foo bar 123 456 lorem 3" |
+| hydra.customArgs | list | `[]` | Ability to override arguments of the entrypoint. Can be used in-depended of customCommand |
+| hydra.dev | bool | `false` | Enable dev mode, not secure in production environments |
+| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy |
+| image.repository | string | `"oryd/hydra"` | ORY Hydra image |
+| image.tag | string | `"v2.2.0"` | ORY Hydra version |
+| imagePullSecrets | list | `[]` | Image pull secrets |
+| ingress.admin.annotations | object | `{}` | |
+| ingress.admin.className | string | `""` | |
+| ingress.admin.enabled | bool | `false` | En-/Disable the api ingress. |
+| ingress.admin.hosts[0].host | string | `"admin.hydra.localhost"` | |
+| ingress.admin.hosts[0].paths[0].path | string | `"/"` | |
+| ingress.admin.hosts[0].paths[0].pathType | string | `"ImplementationSpecific"` | |
+| ingress.public | object | `{"annotations":{},"className":"","enabled":false,"hosts":[{"host":"public.hydra.localhost","paths":[{"path":"/","pathType":"ImplementationSpecific"}]}]}` | Configure ingress for the proxy port. |
+| ingress.public.enabled | bool | `false` | En-/Disable the proxy ingress. |
+| janitor.batchSize | int | `100` | Configure how many records are deleted with each iteration |
+| janitor.cleanupGrants | bool | `false` | Configure if the trust relationships must be cleaned up |
+| janitor.cleanupRequests | bool | `false` | Configure if the consent and authentication requests must be cleaned up |
+| janitor.cleanupTokens | bool | `false` | Configure if the access and refresh tokens must be cleaned up |
+| janitor.enabled | bool | `false` | Enable cleanup of stale database rows by periodically running the janitor command |
+| janitor.limit | int | `10000` | Configure how many records are retrieved from database for deletion |
+| job.annotations | object | `{"helm.sh/hook":"pre-install, pre-upgrade","helm.sh/hook-delete-policy":"before-hook-creation","helm.sh/hook-weight":"1"}` | If you do want to specify annotations, uncomment the following lines, adjust them as necessary, and remove the curly braces after 'annotations:'. |
+| job.automountServiceAccountToken | bool | `true` | Set automounting of the SA token |
+| job.extraContainers | string | `""` | If you want to add extra sidecar containers. |
+| job.extraEnv | list | `[]` | Array of extra envs to be passed to the job. This takes precedence over deployment variables. Kubernetes format is expected. Value is processed with Helm `tpl` - name: FOO value: BAR |
+| job.extraInitContainers | string | `""` | If you want to add extra init containers. extraInitContainers: | - name: ... image: ... |
+| job.labels | object | `{}` | Set custom deployment level labels |
+| job.lifecycle | string | `""` | If you want to add lifecycle hooks. |
+| job.nodeSelector | object | `{}` | Node labels for pod assignment. |
+| job.podMetadata | object | `{"annotations":{},"labels":{}}` | Specify pod metadata, this metadata is added directly to the pod, and not higher objects |
+| job.podMetadata.annotations | object | `{}` | Extra pod level annotations |
+| job.podMetadata.labels | object | `{}` | Extra pod level labels |
+| job.resources | object | `{}` | resource requests and limits for the automigration job |
+| job.serviceAccount | object | `{"annotations":{"helm.sh/hook":"pre-install, pre-upgrade","helm.sh/hook-delete-policy":"before-hook-creation","helm.sh/hook-weight":"0"},"create":true,"name":""}` | Specify the serviceAccountName value. In some situations it is needed to provides specific permissions to Hydra deployments Like for example installing Hydra on a cluster with a PosSecurityPolicy and Istio. Uncoment if it is needed to provide a ServiceAccount for the Hydra deployment. |
+| job.serviceAccount.annotations | object | `{"helm.sh/hook":"pre-install, pre-upgrade","helm.sh/hook-delete-policy":"before-hook-creation","helm.sh/hook-weight":"0"}` | Annotations to add to the service account |
+| job.serviceAccount.create | bool | `true` | Specifies whether a service account should be created |
+| job.serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template |
+| job.shareProcessNamespace | bool | `false` | Set sharing process namespace |
+| job.spec.backoffLimit | int | `10` | Set job back off limit |
+| job.tolerations | list | `[]` | Configure node tolerations. |
+| maester.enabled | bool | `true` | |
+| nameOverride | string | `""` | |
+| pdb.enabled | bool | `false` | |
+| pdb.spec.maxUnavailable | string | `""` | |
+| pdb.spec.minAvailable | string | `""` | |
+| priorityClassName | string | `""` | Pod priority https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ |
+| replicaCount | int | `1` | Number of ORY Hydra members |
+| secret.enabled | bool | `true` | switch to false to prevent creating the secret |
+| secret.hashSumEnabled | bool | `true` | switch to false to prevent checksum annotations being maintained and propogated to the pods |
+| secret.nameOverride | string | `""` | Provide custom name of existing secret, or custom name of secret to be created |
+| secret.secretAnnotations | object | `{"helm.sh/hook":"pre-install, pre-upgrade","helm.sh/hook-delete-policy":"before-hook-creation","helm.sh/hook-weight":"0","helm.sh/resource-policy":"keep"}` | Annotations to be added to secret. Annotations are added only when secret is being created. Existing secret will not be modified. |
+| service.admin | object | `{"annotations":{},"enabled":true,"labels":{},"loadBalancerIP":"","metricsPath":"/admin/metrics/prometheus","name":"http","port":4445,"type":"ClusterIP"}` | Configures the Kubernetes service for the api port. |
+| service.admin.annotations | object | `{}` | If you do want to specify annotations, uncomment the following lines, adjust them as necessary, and remove the curly braces after 'annotations:'. |
+| service.admin.enabled | bool | `true` | En-/disable the service |
+| service.admin.loadBalancerIP | string | `""` | The load balancer IP |
+| service.admin.metricsPath | string | `"/admin/metrics/prometheus"` | Path to the metrics endpoint |
+| service.admin.name | string | `"http"` | The service port name. Useful to set a custom service port name if it must follow a scheme (e.g. Istio) |
+| service.admin.port | int | `4445` | The service port |
+| service.admin.type | string | `"ClusterIP"` | The service type |
+| service.public | object | `{"annotations":{},"enabled":true,"labels":{},"loadBalancerIP":"","name":"http","port":4444,"type":"ClusterIP"}` | Configures the Kubernetes service for the proxy port. |
+| service.public.annotations | object | `{}` | If you do want to specify annotations, uncomment the following lines, adjust them as necessary, and remove the curly braces after 'annotations:'. |
+| service.public.enabled | bool | `true` | En-/disable the service |
+| service.public.loadBalancerIP | string | `""` | The load balancer IP |
+| service.public.name | string | `"http"` | The service port name. Useful to set a custom service port name if it must follow a scheme (e.g. Istio) |
+| service.public.port | int | `4444` | The service port |
+| service.public.type | string | `"ClusterIP"` | The service type |
+| serviceMonitor.enabled | bool | `false` | switch to true to enable creating the ServiceMonitor |
+| serviceMonitor.labels | object | `{}` | Provide additionnal labels to the ServiceMonitor ressource metadata |
+| serviceMonitor.scheme | string | `"http"` | HTTP scheme to use for scraping. |
+| serviceMonitor.scrapeInterval | string | `"60s"` | Interval at which metrics should be scraped |
+| serviceMonitor.scrapeTimeout | string | `"30s"` | Timeout after which the scrape is ended |
+| serviceMonitor.tlsConfig | object | `{}` | TLS configuration to use when scraping the endpoint |
+| test.busybox | object | `{"repository":"busybox","tag":1}` | use a busybox image from another repository |
+| test.labels | object | `{}` | Provide additional labels to the test pod |
+| watcher.automountServiceAccountToken | bool | `true` | |
+| watcher.enabled | bool | `false` | |
+| watcher.image | string | `"oryd/k8s-toolbox:0.0.5"` | |
+| watcher.mountFile | string | `""` | Path to mounted file, which wil be monitored for changes. eg: /etc/secrets/my-secret/foo |
+| watcher.podMetadata | object | `{"annotations":{},"labels":{}}` | Specify pod metadata, this metadata is added directly to the pod, and not higher objects |
+| watcher.podMetadata.annotations | object | `{}` | Extra pod level annotations |
+| watcher.podMetadata.labels | object | `{}` | Extra pod level labels |
+| watcher.podSecurityContext | object | `{}` | pod securityContext for watcher deployment |
+| watcher.resources | object | `{}` | |
+| watcher.revisionHistoryLimit | int | `5` | Number of revisions kept in history |
+| watcher.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"privileged":false,"readOnlyRootFilesystem":true,"runAsNonRoot":true,"runAsUser":100,"seccompProfile":{"type":"RuntimeDefault"}}` | container securityContext for watcher deployment |
+| watcher.watchLabelKey | string | `"ory.sh/watcher"` | Label key used for managing applications |
+
+----------------------------------------------
+Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1)
diff --git a/helm/charts/hydra-distributed/charts/hydra-maester-0.50.3.tgz b/helm/charts/hydra-distributed/charts/hydra-maester-0.50.3.tgz
new file mode 100644
index 000000000..ed2026dc3
Binary files /dev/null and b/helm/charts/hydra-distributed/charts/hydra-maester-0.50.3.tgz differ
diff --git a/helm/charts/hydra-distributed/charts/ory-commons-0.1.0.tgz b/helm/charts/hydra-distributed/charts/ory-commons-0.1.0.tgz
new file mode 100644
index 000000000..91570363f
Binary files /dev/null and b/helm/charts/hydra-distributed/charts/ory-commons-0.1.0.tgz differ
diff --git a/helm/charts/hydra-distributed/files/watch.sh b/helm/charts/hydra-distributed/files/watch.sh
new file mode 100644
index 000000000..cb5b44b93
--- /dev/null
+++ b/helm/charts/hydra-distributed/files/watch.sh
@@ -0,0 +1,17 @@
+set -Eeuo pipefail
+set -x
+
+function rollOut() {
+ DEPLOY=$(kubectl get deploy -n "${NAMESPACE}" -l "${1}" -o name)
+ kubectl set env -n $NAMESPACE ${DEPLOY} sync=$(date "+%Y%m%d-%H%M%S")
+ kubectl rollout status -n $NAMESPACE ${DEPLOY}
+}
+
+while true; do
+ # After change in the CM the symlink is recreated, so we need to restart the monitor
+ inotifywait --event DELETE_SELF "${WATCH_FILE}" |
+ while read path _ file; do
+ echo "---> $path$file modified"
+ rollOut "${LABEL_SELECTOR}"
+ done
+done
diff --git a/helm/charts/hydra-distributed/templates/NOTES.txt b/helm/charts/hydra-distributed/templates/NOTES.txt
new file mode 100644
index 000000000..473a86ddd
--- /dev/null
+++ b/helm/charts/hydra-distributed/templates/NOTES.txt
@@ -0,0 +1,97 @@
+{{ include "hydra.check.override.consistency" . }}
+The ORY Hydra HTTP Public API is available via:
+{{- if .Values.ingress.public.enabled }}
+{{- range $host := .Values.ingress.public.hosts }}
+ {{- range .paths }}
+ http{{ if $.Values.ingress.public.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
+ {{- end }}
+{{- end }}
+{{- else if contains "NodePort" .Values.service.public.type }}
+ export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "hydra.fullname" . }})
+ export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
+ export HYDRA_PUBLIC_URL=http://$NODE_IP:$NODE_PORT
+ curl $HYDRA_PUBLIC_URL/.well-known/openid-configuration
+
+If you have the ORY Hydra CLI installed locally, you can run commands
+against this endpoint:
+
+ hydra token client \
+ --endpoint $HYDRA_PUBLIC_URL \
+ # ...
+
+{{- else if contains "LoadBalancer" .Values.service.public.type }}
+ NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+ You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "hydra.fullname" . }}'
+ export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "hydra.fullname" . }}-public -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+ export HYDRA_PUBLIC_URL=http://$SERVICE_IP:{{ .Values.service.public.port }}
+ curl $HYDRA_PUBLIC_URL/.well-known/openid-configuration
+
+If you have the ORY Hydra CLI installed locally, you can run commands
+against this endpoint:
+
+ hydra token client \
+ --endpoint $HYDRA_PUBLIC_URL \
+ # ...
+
+{{- else if contains "ClusterIP" .Values.service.public.type }}
+ export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "hydra.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
+ echo "Visit http://127.0.0.1:{{ .Values.service.public.port }} to use your application"
+ kubectl port-forward $POD_NAME {{ .Values.service.public.port }}:{{ .Values.hydra.config.serve.public.port }}
+ export HYDRA_PUBLIC_URL=http://127.0.0.1:{{ .Values.service.public.port }}/
+ curl $HYDRA_PUBLIC_URL/.well-known/openid-configuration
+
+If you have the ORY Hydra CLI installed locally, you can run commands
+against this endpoint:
+
+ hydra token client \
+ --endpoint $HYDRA_PUBLIC_URL \
+ # ...
+
+{{- end }}
+
+The ORY Hydra HTTP Admin API is available via:
+{{- if .Values.ingress.admin.enabled }}
+{{- range $host := .Values.ingress.admin.hosts }}
+ {{- range .paths }}
+ http{{ if $.Values.ingress.admin.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
+ {{- end }}
+{{- end }}
+{{- else if contains "NodePort" .Values.service.admin.type }}
+ export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "hydra.fullname" . }})
+ export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
+ export HYDRA_ADMIN_URL=http://$NODE_IP:$NODE_PORT
+ curl $HYDRA_ADMIN_URL/clients
+
+If you have the ORY Hydra CLI installed locally, you can run commands
+against this endpoint:
+
+ hydra clients list \
+ --endpoint $HYDRA_ADMIN_URL
+
+{{- else if contains "LoadBalancer" .Values.service.admin.type }}
+ NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+ You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "hydra.fullname" . }}'
+ export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "hydra.fullname" . }}-admin -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+ export HYDRA_ADMIN_URL=http://$SERVICE_IP:{{ .Values.service.admin.port }}
+ curl $HYDRA_ADMIN_URL/clients
+
+If you have the ORY Hydra CLI installed locally, you can run commands
+against this endpoint:
+
+ hydra clients list \
+ --endpoint $HYDRA_ADMIN_URL
+
+{{- else if contains "ClusterIP" .Values.service.admin.type }}
+ export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "hydra.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
+ echo "Visit http://127.0.0.1:{{ .Values.service.admin.port }} to use your application"
+ kubectl port-forward $POD_NAME {{ .Values.service.admin.port }}:{{ .Values.hydra.config.serve.admin.port }}
+ export HYDRA_ADMIN_URL=http://127.0.0.1:{{ .Values.service.admin.port }}/
+ curl $HYDRA_ADMIN_URL/clients
+
+If you have the ORY Hydra CLI installed locally, you can run commands
+against this endpoint:
+
+ hydra clients list \
+ --endpoint $HYDRA_ADMIN_URL
+
+{{- end }}
diff --git a/helm/charts/hydra-distributed/templates/_helpers.tpl b/helm/charts/hydra-distributed/templates/_helpers.tpl
new file mode 100644
index 000000000..5b9b24655
--- /dev/null
+++ b/helm/charts/hydra-distributed/templates/_helpers.tpl
@@ -0,0 +1,229 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "hydra.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "hydra.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "hydra.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Ensure there is always a way to track down source of the deployment.
+It is unlikely AppVersion will be missing, but we will fallback on the
+chart's version in that case.
+*/}}
+{{- define "hydra.version" -}}
+{{- if .Chart.AppVersion }}
+{{- .Chart.AppVersion -}}
+{{- else -}}
+{{- printf "v%s" .Chart.Version -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "hydra.labels" -}}
+"app.kubernetes.io/name": {{ include "hydra.name" . | quote }}
+"app.kubernetes.io/instance": {{ .Release.Name | quote }}
+"app.kubernetes.io/version": {{ include "hydra.version" . | quote }}
+"app.kubernetes.io/managed-by": {{ .Release.Service | quote }}
+"helm.sh/chart": {{ include "hydra.chart" . | quote }}
+{{- if $.Values.watcher.enabled }}
+{{ printf "\"%s\": \"%s\"" $.Values.watcher.watchLabelKey (include "hydra.name" .) }}
+{{- end }}
+{{- end -}}
+
+{{/*
+Generate the dsn value
+*/}}
+{{- define "hydra.dsn" -}}
+{{- if .Values.demo -}}
+memory
+{{- else if and .Values.secret.nameOverride (not .Values.secret.enabled) -}}
+dsn-loaded-from-env
+{{- else if not (empty (.Values.hydra.config.dsn)) -}}
+{{- .Values.hydra.config.dsn }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Generate the name of the secret resource containing secrets
+*/}}
+{{- define "hydra.secretname" -}}
+{{- if .Values.secret.nameOverride -}}
+{{- .Values.secret.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{ include "hydra.fullname" . }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Generate the secrets.system value
+*/}}
+{{- define "hydra.secrets.system" -}}
+ {{- if (.Values.hydra.config.secrets).system -}}
+ {{- if kindIs "slice" .Values.hydra.config.secrets.system -}}
+ {{- if gt (len .Values.hydra.config.secrets.system) 1 -}}
+ "{{- join "\",\"" .Values.hydra.config.secrets.system -}}"
+ {{- else -}}
+ {{- join "" .Values.hydra.config.secrets.system -}}
+ {{- end -}}
+ {{- else -}}
+ {{- fail "Expected hydra.config.secrets.system to be a list of strings" -}}
+ {{- end -}}
+ {{- else if .Values.demo -}}
+ a-very-insecure-secret-for-checking-out-the-demo
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Generate the secrets.cookie value
+*/}}
+{{- define "hydra.secrets.cookie" -}}
+ {{- if (.Values.hydra.config.secrets).cookie -}}
+ {{- if kindIs "slice" .Values.hydra.config.secrets.cookie -}}
+ {{- if gt (len .Values.hydra.config.secrets.cookie) 1 -}}
+ "{{- join "\",\"" .Values.hydra.config.secrets.cookie -}}"
+ {{- else -}}
+ {{- join "" .Values.hydra.config.secrets.cookie -}}
+ {{- end -}}
+ {{- else -}}
+ {{- fail "Expected hydra.config.secrets.cookie to be a list of strings" -}}
+ {{- end -}}
+ {{- else -}}
+ {{- include "hydra.secrets.system" . }}
+ {{- end -}}
+{{- end -}}
+
+{{/*
+Generate the configmap data, redacting secrets
+*/}}
+{{- define "hydra.configmap" -}}
+{{- $config := omit .Values.hydra.config "dsn" "secrets" -}}
+{{- tpl (toYaml $config) . -}}
+{{- end -}}
+
+{{/*
+Generate the urls.issuer value
+*/}}
+{{- define "hydra.config.urls.issuer" -}}
+{{- if .Values.hydra.config.urls.self.issuer -}}
+{{- .Values.hydra.config.urls.self.issuer }}
+{{- else if .Values.ingress.public.enabled -}}
+{{- $host := index .Values.ingress.public.hosts 0 -}}
+http{{ if $.Values.ingress.public.tls }}s{{ end }}://{{ $host.host }}
+{{- else if contains "ClusterIP" .Values.service.public.type -}}
+http://127.0.0.1:{{ .Values.service.public.port }}/
+{{- end -}}
+{{- end -}}
+
+{{/*
+Check overrides consistency
+*/}}
+{{- define "hydra.check.override.consistency" -}}
+{{- if and .Values.maester.enabled .Values.fullnameOverride -}}
+{{- if not .Values.maester.hydraFullnameOverride -}}
+{{ fail "hydra fullname has been overridden, but the new value has not been provided to maester. Set maester.hydraFullnameOverride" }}
+{{- else if not (eq .Values.maester.hydraFullnameOverride .Values.fullnameOverride) -}}
+{{ fail (tpl "hydra fullname has been overridden, but a different value was provided to maester. {{ .Values.maester.hydraFullnameOverride }} different of {{ .Values.fullnameOverride }}" . ) }}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{- define "hydra.utils.joinListWithComma" -}}
+{{- $local := dict "first" true -}}
+{{- range $k, $v := . -}}{{- if not $local.first -}},{{- end -}}{{- $v -}}{{- $_ := set $local "first" false -}}{{- end -}}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "hydra.serviceAccountName" -}}
+{{- if .Values.deployment.serviceAccount.create }}
+{{- default (include "hydra.fullname" .) .Values.deployment.serviceAccount.name }}
+{{- else }}
+{{- default "default" .Values.deployment.serviceAccount.name }}
+{{- end }}
+{{- end }}
+
+{{/*
+Create the name of the service account for the Job to use
+*/}}
+{{- define "hydra.job.serviceAccountName" -}}
+{{- if .Values.job.serviceAccount.create }}
+{{- printf "%s-job" (default (include "hydra.fullname" .) .Values.job.serviceAccount.name) }}
+{{- else }}
+{{- include "hydra.serviceAccountName" . }}
+{{- end }}
+{{- end }}
+
+{{/*
+Create the name of the service account for the Job to use
+*/}}
+{{- define "hydra.cronjob.janitor.serviceAccountName" -}}
+{{- if .Values.cronjob.janitor.serviceAccount.create }}
+{{- printf "%s-cronjob-janitor" (default (include "hydra.fullname" .) .Values.cronjob.janitor.serviceAccount.name) }}
+{{- else }}
+{{- include "hydra.serviceAccountName" . }}
+{{- end }}
+{{- end }}
+
+{{/*
+Checksum annotations generated from configmaps and secrets
+*/}}
+{{- define "hydra.annotations.checksum" -}}
+{{- if .Values.configmap.hashSumEnabled }}
+checksum/hydra-config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
+{{- end }}
+{{- if and .Values.secret.enabled .Values.secret.hashSumEnabled }}
+checksum/hydra-secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }}
+{{- end }}
+{{- end }}
+
+{{/*
+Check the migration type value and fail if unexpected
+*/}}
+{{- define "hydra.automigration.typeVerification" -}}
+{{- if and .Values.hydra.automigration.enabled .Values.hydra.automigration.type }}
+ {{- if and (ne .Values.hydra.automigration.type "initContainer") (ne .Values.hydra.automigration.type "job") }}
+ {{- fail "hydra.automigration.type must be either 'initContainer' or 'job'" -}}
+ {{- end }}
+{{- end }}
+{{- end }}
+
+{{/*
+Common labels for the janitor cron job
+*/}}
+{{- define "hydra.janitor.labels" -}}
+"app.kubernetes.io/name": {{ printf "%s-janitor" (include "hydra.name" .) | quote }}
+"app.kubernetes.io/instance": {{ .Release.Name | quote }}
+"app.kubernetes.io/version": {{ include "hydra.version" . | quote }}
+"app.kubernetes.io/managed-by": {{ .Release.Service | quote }}
+"app.kubernetes.io/component": janitor
+"helm.sh/chart": {{ include "hydra.chart" . | quote }}
+{{- end -}}
diff --git a/helm/charts/hydra-distributed/templates/configmap-automigrate.yaml b/helm/charts/hydra-distributed/templates/configmap-automigrate.yaml
new file mode 100644
index 000000000..6dbcc8854
--- /dev/null
+++ b/helm/charts/hydra-distributed/templates/configmap-automigrate.yaml
@@ -0,0 +1,18 @@
+{{- if and ( .Values.hydra.automigration.enabled ) ( eq .Values.hydra.automigration.type "job" ) }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "hydra.fullname" . }}-migrate
+ {{- if .Release.Namespace }}
+ namespace: {{ .Release.Namespace }}
+ {{- end }}
+ labels:
+ {{- include "hydra.labels" . | nindent 4 }}
+ annotations:
+ helm.sh/hook-weight: "0"
+ helm.sh/hook: "pre-install, pre-upgrade"
+ helm.sh/hook-delete-policy: "before-hook-creation"
+data:
+ "hydra.yaml": |
+ {{- include "hydra.configmap" . | nindent 4 }}
+{{- end }}
diff --git a/helm/charts/hydra-distributed/templates/configmap.yaml b/helm/charts/hydra-distributed/templates/configmap.yaml
new file mode 100644
index 000000000..0f84335ec
--- /dev/null
+++ b/helm/charts/hydra-distributed/templates/configmap.yaml
@@ -0,0 +1,12 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "hydra.fullname" . }}
+ {{- if .Release.Namespace }}
+ namespace: {{ .Release.Namespace }}
+ {{- end }}
+ labels:
+ {{- include "hydra.labels" . | nindent 4 }}
+data:
+ "hydra.yaml": |
+ {{- include "hydra.configmap" . | nindent 4 }}
diff --git a/helm/charts/hydra-distributed/templates/deployment-admin.yaml b/helm/charts/hydra-distributed/templates/deployment-admin.yaml
new file mode 100644
index 000000000..b6c120b92
--- /dev/null
+++ b/helm/charts/hydra-distributed/templates/deployment-admin.yaml
@@ -0,0 +1,245 @@
+{{- include "hydra.automigration.typeVerification" . -}}
+{{- $deployValues := merge .Values.deployment.admin (omit .Values.deployment "admin" "public") -}}
+{{- $migrationExtraEnv := ternary $deployValues.automigration.extraEnv $deployValues.extraEnv (not (empty $deployValues.automigration.extraEnv )) -}}
+
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "hydra.fullname" . }}-admin
+ {{- if .Release.Namespace }}
+ namespace: {{ .Release.Namespace }}
+ {{- end }}
+ labels:
+ {{- include "hydra.labels" . | nindent 4 }}
+ {{- with $deployValues.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ app.kubernetes.io/component: admin
+ annotations:
+ {{- with $deployValues.annotations }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+{{- if not $deployValues.autoscaling.enabled }}
+ replicas: {{ .Values.replicaCount }}
+{{- end }}
+ revisionHistoryLimit: {{ $deployValues.revisionHistoryLimit }}
+ strategy:
+ {{- toYaml $deployValues.strategy | nindent 4 }}
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: {{ include "hydra.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: admin
+ template:
+ metadata:
+ labels:
+ {{- include "hydra.labels" . | nindent 8 }}
+ {{- with $deployValues.labels }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with $deployValues.podMetadata.labels }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ app.kubernetes.io/component: admin
+ annotations:
+ {{- include "hydra.annotations.checksum" . | nindent 8 -}}
+ {{- with $deployValues.annotations }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with $deployValues.podMetadata.annotations }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ volumes:
+ - name: {{ include "hydra.name" . }}-config-volume
+ configMap:
+ name: {{ include "hydra.fullname" . }}
+ {{- if $deployValues.extraVolumes }}
+ {{- toYaml $deployValues.extraVolumes | nindent 8 }}
+ {{- end }}
+ serviceAccountName: {{ include "hydra.serviceAccountName" . }}
+ automountServiceAccountToken: {{ $deployValues.automountServiceAccountToken }}
+ terminationGracePeriodSeconds: {{ $deployValues.terminationGracePeriodSeconds }}
+ containers:
+ - name: {{ .Chart.Name }}-admin
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ command: {{- toYaml .Values.hydra.command | nindent 12 }}
+ {{- if .Values.hydra.customArgs }}
+ args: {{- toYaml .Values.hydra.customArgs | nindent 12 }}
+ {{- else }}
+ args:
+ - serve
+ - admin
+ {{- if .Values.hydra.dev }}
+ - "--dev"
+ {{- end }}
+ - --config
+ - /etc/config/hydra.yaml
+ {{- end }}
+ volumeMounts:
+ - name: {{ include "hydra.name" . }}-config-volume
+ mountPath: /etc/config
+ readOnly: true
+ {{- if $deployValues.extraVolumeMounts }}
+ {{- toYaml $deployValues.extraVolumeMounts | nindent 12 }}
+ {{- end }}
+ ports:
+ - name: http-admin
+ containerPort: {{ .Values.hydra.config.serve.admin.port }}
+ protocol: TCP
+ livenessProbe:
+ {{- if $deployValues.customLivenessProbe }}
+ {{- toYaml $deployValues.customLivenessProbe | nindent 12 }}
+ {{- else }}
+ httpGet:
+ path: /health/alive
+ port: {{ .Values.hydra.config.serve.admin.port }}
+ httpHeaders:
+ - name: Host
+ value: '127.0.0.1'
+ {{- toYaml $deployValues.livenessProbe | nindent 12 }}
+ {{- end }}
+ readinessProbe:
+ {{- if $deployValues.customReadinessProbe }}
+ {{- toYaml $deployValues.customReadinessProbe | nindent 12 }}
+ {{- else }}
+ httpGet:
+ path: /health/ready
+ port: {{ .Values.hydra.config.serve.admin.port }}
+ httpHeaders:
+ - name: Host
+ value: '127.0.0.1'
+ {{- toYaml $deployValues.readinessProbe | nindent 12 }}
+ {{- end }}
+ startupProbe:
+ {{- if $deployValues.customStartupProbe }}
+ {{- toYaml $deployValues.customStartupProbe | nindent 12 }}
+ {{- else }}
+ httpGet:
+ path: /health/ready
+ port: {{ .Values.hydra.config.serve.admin.port }}
+ httpHeaders:
+ - name: Host
+ value: '127.0.0.1'
+ {{- toYaml $deployValues.startupProbe | nindent 12 }}
+ {{- end }}
+ env:
+ {{- $issuer := include "hydra.config.urls.issuer" . -}}
+ {{- if $issuer }}
+ - name: URLS_SELF_ISSUER
+ value: {{ $issuer | quote }}
+ {{- end }}
+ {{- if not (empty ( include "hydra.dsn" . )) }}
+ {{- if not (include "ory.extraEnvContainsEnvName" (list $deployValues.extraEnv "DSN")) }}
+ - name: DSN
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "hydra.secretname" . }}
+ key: dsn
+ {{- end }}
+ {{- end }}
+ - name: SECRETS_SYSTEM
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "hydra.secretname" . }}
+ key: secretsSystem
+ - name: SECRETS_COOKIE
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "hydra.secretname" . }}
+ key: secretsCookie
+ {{- if $deployValues.extraEnv }}
+ {{- tpl (toYaml $deployValues.extraEnv) . | nindent 12 }}
+ {{- end }}
+ resources:
+ {{- toYaml $deployValues.resources | nindent 12 }}
+ {{- if $deployValues.securityContext }}
+ securityContext:
+ {{- toYaml $deployValues.securityContext | nindent 12 }}
+ {{- end }}
+ lifecycle:
+ {{- toYaml $deployValues.lifecycle | nindent 12 }}
+ {{- if $deployValues.extraContainers }}
+ {{- tpl $deployValues.extraContainers . | nindent 8 }}
+ {{- end }}
+ initContainers:
+ {{- if $deployValues.extraInitContainers }}
+ {{- tpl $deployValues.extraInitContainers . | nindent 8 }}
+ {{- end }}
+ {{- if and ( .Values.hydra.automigration.enabled ) ( eq .Values.hydra.automigration.type "initContainer" ) }}
+ - name: {{ .Chart.Name }}-automigrate
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ {{- if .Values.hydra.automigration.customCommand }}
+ command: {{- toYaml .Values.hydra.automigration.customCommand | nindent 12 }}
+ {{- else }}
+ command: ["hydra"]
+ {{- end }}
+ {{- if .Values.hydra.automigration.customArgs }}
+ args: {{- toYaml .Values.hydra.automigration.customArgs | nindent 12 }}
+ {{- else }}
+ args: ["migrate", "sql", "-e", "--yes", "--config", "/etc/config/hydra.yaml"]
+ {{- end }}
+ volumeMounts:
+ - name: {{ include "hydra.name" . }}-config-volume
+ mountPath: /etc/config
+ readOnly: true
+ {{- with $deployValues.extraVolumeMounts }}
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ env:
+ {{- if not (empty ( include "hydra.dsn" . )) }}
+ {{- if not (include "ory.extraEnvContainsEnvName" (list $migrationExtraEnv "DSN")) }}
+ - name: DSN
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "hydra.secretname" . }}
+ key: dsn
+ {{- end }}
+ {{- end }}
+ {{- if $migrationExtraEnv }}
+ {{- tpl (toYaml $migrationExtraEnv) . | nindent 12 }}
+ {{- end }}
+ {{- if .Values.hydra.automigration.resources }}
+ resources:
+ {{- toYaml .Values.hydra.automigration.resources | nindent 12 }}
+ {{- end }}
+ {{- with $deployValues.initContainerSecurityContext }}
+ securityContext:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.priorityClassName }}
+ priorityClassName: {{ .Values.priorityClassName }}
+ {{- end }}
+ {{- with $deployValues.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with $deployValues.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with $deployValues.topologySpreadConstraints }}
+ topologySpreadConstraints:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with $deployValues.podSecurityContext }}
+ securityContext:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with $deployValues.dnsConfig }}
+ dnsConfig:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
diff --git a/helm/charts/hydra-distributed/templates/deployment-public.yaml b/helm/charts/hydra-distributed/templates/deployment-public.yaml
new file mode 100644
index 000000000..d95291111
--- /dev/null
+++ b/helm/charts/hydra-distributed/templates/deployment-public.yaml
@@ -0,0 +1,244 @@
+{{- $deployValues := merge .Values.deployment.public (omit .Values.deployment "admin" "public") -}}
+{{- $migrationExtraEnv := ternary $deployValues.automigration.extraEnv $deployValues.extraEnv (not (empty $deployValues.automigration.extraEnv )) -}}
+
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "hydra.fullname" . }}-public
+ {{- if .Release.Namespace }}
+ namespace: {{ .Release.Namespace }}
+ {{- end }}
+ labels:
+ {{- include "hydra.labels" . | nindent 4 }}
+ {{- with $deployValues.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ app.kubernetes.io/component: public
+ annotations:
+ {{- with $deployValues.annotations }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+{{- if not $deployValues.autoscaling.enabled }}
+ replicas: {{ .Values.replicaCount }}
+{{- end }}
+ revisionHistoryLimit: {{ $deployValues.revisionHistoryLimit }}
+ strategy:
+ {{- toYaml $deployValues.strategy | nindent 4 }}
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: {{ include "hydra.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: public
+ template:
+ metadata:
+ labels:
+ {{- include "hydra.labels" . | nindent 8 }}
+ {{- with $deployValues.labels }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with $deployValues.podMetadata.labels }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ app.kubernetes.io/component: public
+ annotations:
+ {{- include "hydra.annotations.checksum" . | nindent 8 -}}
+ {{- with $deployValues.annotations }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with $deployValues.podMetadata.annotations }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ volumes:
+ - name: {{ include "hydra.name" . }}-config-volume
+ configMap:
+ name: {{ include "hydra.fullname" . }}
+ {{- if $deployValues.extraVolumes }}
+ {{- toYaml $deployValues.extraVolumes | nindent 8 }}
+ {{- end }}
+ serviceAccountName: {{ include "hydra.serviceAccountName" . }}
+ automountServiceAccountToken: {{ $deployValues.automountServiceAccountToken }}
+ terminationGracePeriodSeconds: {{ $deployValues.terminationGracePeriodSeconds }}
+ containers:
+ - name: {{ .Chart.Name }}-public
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ command: {{- toYaml .Values.hydra.command | nindent 12 }}
+ {{- if .Values.hydra.customArgs }}
+ args: {{- toYaml .Values.hydra.customArgs | nindent 12 }}
+ {{- else }}
+ args:
+ - serve
+ - public
+ {{- if .Values.hydra.dev }}
+ - "--dev"
+ {{- end }}
+ - --config
+ - /etc/config/hydra.yaml
+ {{- end }}
+ volumeMounts:
+ - name: {{ include "hydra.name" . }}-config-volume
+ mountPath: /etc/config
+ readOnly: true
+ {{- if $deployValues.extraVolumeMounts }}
+ {{- toYaml $deployValues.extraVolumeMounts | nindent 12 }}
+ {{- end }}
+ ports:
+ - name: http-public
+ containerPort: {{ .Values.hydra.config.serve.public.port }}
+ protocol: TCP
+ livenessProbe:
+ {{- if $deployValues.customLivenessProbe }}
+ {{- toYaml $deployValues.customLivenessProbe | nindent 12 }}
+ {{- else }}
+ httpGet:
+ path: /health/alive
+ port: {{ .Values.hydra.config.serve.public.port }}
+ httpHeaders:
+ - name: Host
+ value: '127.0.0.1'
+ {{- toYaml $deployValues.livenessProbe | nindent 12 }}
+ {{- end }}
+ readinessProbe:
+ {{- if $deployValues.customReadinessProbe }}
+ {{- toYaml $deployValues.customReadinessProbe | nindent 12 }}
+ {{- else }}
+ httpGet:
+ path: /health/ready
+ port: {{ .Values.hydra.config.serve.public.port }}
+ httpHeaders:
+ - name: Host
+ value: '127.0.0.1'
+ {{- toYaml $deployValues.readinessProbe | nindent 12 }}
+ {{- end }}
+ startupProbe:
+ {{- if $deployValues.customStartupProbe }}
+ {{- toYaml $deployValues.customStartupProbe | nindent 12 }}
+ {{- else }}
+ httpGet:
+ path: /health/ready
+ port: {{ .Values.hydra.config.serve.public.port }}
+ httpHeaders:
+ - name: Host
+ value: '127.0.0.1'
+ {{- toYaml $deployValues.startupProbe | nindent 12 }}
+ {{- end }}
+ env:
+ {{- $issuer := include "hydra.config.urls.issuer" . -}}
+ {{- if $issuer }}
+ - name: URLS_SELF_ISSUER
+ value: {{ $issuer | quote }}
+ {{- end }}
+ {{- if not (empty ( include "hydra.dsn" . )) }}
+ {{- if not (include "ory.extraEnvContainsEnvName" (list $deployValues.extraEnv "DSN")) }}
+ - name: DSN
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "hydra.secretname" . }}
+ key: dsn
+ {{- end }}
+ {{- end }}
+ - name: SECRETS_SYSTEM
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "hydra.secretname" . }}
+ key: secretsSystem
+ - name: SECRETS_COOKIE
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "hydra.secretname" . }}
+ key: secretsCookie
+ {{- if $deployValues.extraEnv }}
+ {{- tpl (toYaml $deployValues.extraEnv) . | nindent 12 }}
+ {{- end }}
+ resources:
+ {{- toYaml $deployValues.resources | nindent 12 }}
+ {{- if $deployValues.securityContext }}
+ securityContext:
+ {{- toYaml $deployValues.securityContext | nindent 12 }}
+ {{- end }}
+ lifecycle:
+ {{- toYaml $deployValues.lifecycle | nindent 12 }}
+ {{- if $deployValues.extraContainers }}
+ {{- tpl $deployValues.extraContainers . | nindent 8 }}
+ {{- end }}
+ initContainers:
+ {{- if $deployValues.extraInitContainers }}
+ {{- tpl $deployValues.extraInitContainers . | nindent 8 }}
+ {{- end }}
+ {{- if and ( .Values.hydra.automigration.enabled ) ( eq .Values.hydra.automigration.type "initContainer" ) }}
+ - name: {{ .Chart.Name }}-automigrate
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ {{- if .Values.hydra.automigration.customCommand }}
+ command: {{- toYaml .Values.hydra.automigration.customCommand | nindent 12 }}
+ {{- else }}
+ command: ["hydra"]
+ {{- end }}
+ {{- if .Values.hydra.automigration.customArgs }}
+ args: {{- toYaml .Values.hydra.automigration.customArgs | nindent 12 }}
+ {{- else }}
+ args: ["migrate", "sql", "-e", "--yes", "--config", "/etc/config/hydra.yaml"]
+ {{- end }}
+ volumeMounts:
+ - name: {{ include "hydra.name" . }}-config-volume
+ mountPath: /etc/config
+ readOnly: true
+ {{- with $deployValues.extraVolumeMounts }}
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ env:
+ {{- if not (empty ( include "hydra.dsn" . )) }}
+ {{- if not (include "ory.extraEnvContainsEnvName" (list $migrationExtraEnv "DSN")) }}
+ - name: DSN
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "hydra.secretname" . }}
+ key: dsn
+ {{- end }}
+ {{- end }}
+ {{- if $migrationExtraEnv }}
+ {{- tpl (toYaml $migrationExtraEnv) . | nindent 12 }}
+ {{- end }}
+ {{- if .Values.hydra.automigration.resources }}
+ resources:
+ {{- toYaml .Values.hydra.automigration.resources | nindent 12 }}
+ {{- end }}
+ {{- with $deployValues.initContainerSecurityContext }}
+ securityContext:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ {{- end }}
+ {{- if .Values.priorityClassName }}
+ priorityClassName: {{ .Values.priorityClassName }}
+ {{- end }}
+ {{- with $deployValues.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with $deployValues.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with $deployValues.topologySpreadConstraints }}
+ topologySpreadConstraints:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with $deployValues.podSecurityContext }}
+ securityContext:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with $deployValues.dnsConfig }}
+ dnsConfig:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
diff --git a/helm/charts/hydra-distributed/templates/deployment-watcher.yaml b/helm/charts/hydra-distributed/templates/deployment-watcher.yaml
new file mode 100644
index 000000000..a0393806f
--- /dev/null
+++ b/helm/charts/hydra-distributed/templates/deployment-watcher.yaml
@@ -0,0 +1,77 @@
+{{- if .Values.watcher.enabled }}
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "hydra.fullname" . }}-watcher
+ {{- if .Release.Namespace }}
+ namespace: {{ .Release.Namespace }}
+ {{- end }}
+ labels:
+ app.kubernetes.io/name: {{ include "hydra.name" . }}-watcher
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ {{- with .Values.deployment.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ annotations:
+ {{- with .Values.deployment.annotations }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ revisionHistoryLimit: {{ .Values.watcher.revisionHistoryLimit }}
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: {{ include "hydra.name" . }}-watcher
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: {{ include "hydra.name" . }}-watcher
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ {{- with .Values.deployment.labels }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.watcher.podMetadata.labels }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ annotations:
+ {{- with .Values.watcher.podMetadata.annotations }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ spec:
+ automountServiceAccountToken: {{ .Values.watcher.automountServiceAccountToken }}
+ serviceAccountName: {{ include "hydra.serviceAccountName" . }}-watcher
+ terminationGracePeriodSeconds: {{ .Values.deployment.terminationGracePeriodSeconds }}
+ containers:
+ - name: watcher
+ {{- with .Values.watcher.securityContext }}
+ securityContext:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ image: {{ .Values.watcher.image }}
+ command:
+ - /bin/bash
+ - -c
+ - |
+ {{- .Files.Get "files/watch.sh" | printf "%s" | nindent 14 }}
+ env:
+ - name: NAMESPACE
+ value: {{ .Release.Namespace | quote }}
+ - name: WATCH_FILE
+ value: {{ .Values.watcher.mountFile | quote }}
+ - name: LABEL_SELECTOR
+ value: '{{ $.Values.watcher.watchLabelKey }}={{ include "hydra.name" . }}'
+ resources:
+ {{- toYaml .Values.watcher.resources | nindent 12 }}
+ volumeMounts:
+ {{- with .Values.deployment.extraVolumeMounts }}
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ {{- with .Values.watcher.podSecurityContext }}
+ securityContext:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ volumes:
+ {{- if .Values.deployment.extraVolumes }}
+ {{- toYaml .Values.deployment.extraVolumes | nindent 8 }}
+ {{- end }}
+{{- end }}
diff --git a/helm/charts/hydra-distributed/templates/hpa.yaml b/helm/charts/hydra-distributed/templates/hpa.yaml
new file mode 100644
index 000000000..c84d25129
--- /dev/null
+++ b/helm/charts/hydra-distributed/templates/hpa.yaml
@@ -0,0 +1,41 @@
+{{- range list "admin" "public" }}
+{{- $deployValues := merge (deepCopy (get $.Values.deployment .)) (omit $.Values.deployment "admin" "public") }}
+{{- if $deployValues.autoscaling.enabled }}
+---
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+ {{- if $.Release.Namespace }}
+ namespace: {{ $.Release.Namespace }}
+ {{- end }}
+ name: {{ include "hydra.fullname" $ }}-{{.}}
+ labels:
+ {{- include "hydra.labels" $ | nindent 4 }}
+ app.kubernetes.io/component: {{. | quote}}
+spec:
+ {{- with $deployValues.autoscaling.behavior }}
+ behavior: {{- toYaml . | nindent 4 }}
+ {{- end }}
+ scaleTargetRef:
+ apiVersion: apps/v1
+ kind: Deployment
+ name: {{ include "hydra.fullname" $ }}-{{.}}
+ minReplicas: {{ $deployValues.autoscaling.minReplicas }}
+ maxReplicas: {{ $deployValues.autoscaling.maxReplicas }}
+ metrics:
+ {{- with $deployValues.autoscaling.targetMemory }}
+ - type: Resource
+ resource:
+ name: memory
+ target:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with $deployValues.autoscaling.targetCPU}}
+ - type: Resource
+ resource:
+ name: cpu
+ target:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+{{- end }}
+{{- end }}
diff --git a/helm/charts/hydra-distributed/templates/ingress-admin.yaml b/helm/charts/hydra-distributed/templates/ingress-admin.yaml
new file mode 100644
index 000000000..129ea28cc
--- /dev/null
+++ b/helm/charts/hydra-distributed/templates/ingress-admin.yaml
@@ -0,0 +1,54 @@
+{{- if .Values.ingress.admin.enabled -}}
+{{- $fullName := include "hydra.fullname" . -}}
+{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
+apiVersion: networking.k8s.io/v1
+{{- else -}}
+apiVersion: networking.k8s.io/v1beta1
+{{- end }}
+kind: Ingress
+metadata:
+ name: {{ $fullName }}-admin
+ {{- if .Release.Namespace }}
+ namespace: {{ .Release.Namespace }}
+ {{- end }}
+ labels:
+ {{- include "hydra.labels" . | nindent 4 }}
+ {{- with .Values.ingress.admin.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ ingressClassName: {{ .Values.ingress.admin.className }}
+ {{- if .Values.ingress.admin.tls }}
+ tls:
+ {{- range .Values.ingress.admin.tls }}
+ - hosts:
+ {{- range .hosts }}
+ - {{ . | quote }}
+ {{- end }}
+ secretName: {{ .secretName }}
+ {{- end }}
+ {{- end }}
+ rules:
+ {{- range .Values.ingress.admin.hosts }}
+ - host: {{ .host | quote }}
+ http:
+ paths:
+ {{- range .paths }}
+ - path: {{ .path }}
+ {{- if .pathType }}
+ pathType: {{ .pathType }}
+ {{- end }}
+ backend:
+ {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
+ service:
+ name: {{ $fullName }}-admin
+ port:
+ name: {{ $.Values.service.admin.name }}
+ {{- else }}
+ serviceName: {{ $fullName }}-admin
+ servicePort: {{ $.Values.service.admin.name }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+{{- end }}
diff --git a/helm/charts/hydra-distributed/templates/ingress-public.yaml b/helm/charts/hydra-distributed/templates/ingress-public.yaml
new file mode 100644
index 000000000..45d3f2a0e
--- /dev/null
+++ b/helm/charts/hydra-distributed/templates/ingress-public.yaml
@@ -0,0 +1,54 @@
+{{- if or .Values.ingress.public.enabled .Values.demo -}}
+{{- $fullName := include "hydra.fullname" . -}}
+{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
+apiVersion: networking.k8s.io/v1
+{{- else -}}
+apiVersion: networking.k8s.io/v1beta1
+{{- end }}
+kind: Ingress
+metadata:
+ name: {{ $fullName }}-public
+ {{- if .Release.Namespace }}
+ namespace: {{ .Release.Namespace }}
+ {{- end }}
+ labels:
+ {{- include "hydra.labels" . | nindent 4 }}
+ {{- with .Values.ingress.public.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ ingressClassName: {{ .Values.ingress.public.className }}
+ {{- if .Values.ingress.public.tls }}
+ tls:
+ {{- range .Values.ingress.public.tls }}
+ - hosts:
+ {{- range .hosts }}
+ - {{ . | quote }}
+ {{- end }}
+ secretName: {{ .secretName }}
+ {{- end }}
+ {{- end }}
+ rules:
+ {{- range .Values.ingress.public.hosts }}
+ - host: {{ .host | quote }}
+ http:
+ paths:
+ {{- range .paths }}
+ - path: {{ .path }}
+ {{- if .pathType }}
+ pathType: {{ .pathType }}
+ {{- end }}
+ backend:
+ {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
+ service:
+ name: {{ $fullName }}-public
+ port:
+ name: {{ $.Values.service.public.name }}
+ {{- else }}
+ serviceName: {{ $fullName }}-public
+ servicePort: {{ $.Values.service.public.name }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+{{- end }}
diff --git a/helm/charts/hydra-distributed/templates/janitor-cron-job-rbac.yaml b/helm/charts/hydra-distributed/templates/janitor-cron-job-rbac.yaml
new file mode 100644
index 000000000..8a1d92d09
--- /dev/null
+++ b/helm/charts/hydra-distributed/templates/janitor-cron-job-rbac.yaml
@@ -0,0 +1,17 @@
+{{- if .Values.cronjob.janitor.serviceAccount.create -}}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "hydra.cronjob.janitor.serviceAccountName" . }}
+ {{- if .Release.Namespace }}
+ namespace: {{ .Release.Namespace }}
+ {{- end }}
+ labels:
+ {{- include "hydra.labels" . | nindent 4 }}
+ {{- with .Values.cronjob.janitor.serviceAccount.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+automountServiceAccountToken: false
+{{- end -}}
\ No newline at end of file
diff --git a/helm/charts/hydra-distributed/templates/janitor-cron-job.yaml b/helm/charts/hydra-distributed/templates/janitor-cron-job.yaml
new file mode 100644
index 000000000..aa52036fd
--- /dev/null
+++ b/helm/charts/hydra-distributed/templates/janitor-cron-job.yaml
@@ -0,0 +1,137 @@
+{{- if .Values.janitor.enabled -}}
+{{- $janitorExtraEnv := ternary .Values.cronjob.janitor.extraEnv .Values.deployment.extraEnv (not (empty .Values.cronjob.janitor.extraEnv )) -}}
+---
+apiVersion: batch/v1
+kind: CronJob
+metadata:
+ name: {{ include "hydra.fullname" . }}-janitor
+ {{- if .Release.Namespace }}
+ namespace: {{ .Release.Namespace }}
+ {{- end }}
+ labels:
+ {{- include "hydra.janitor.labels" . | nindent 4 }}
+ {{- with .Values.cronjob.janitor.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ annotations:
+ {{- with .Values.cronjob.janitor.annotations }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ concurrencyPolicy: Forbid
+ schedule: {{ .Values.cronjob.janitor.schedule | quote }}
+ jobTemplate:
+ spec:
+ template:
+ metadata:
+ labels:
+ {{- include "hydra.janitor.labels" . | nindent 12 }}
+ {{- with .Values.cronjob.janitor.labels }}
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ {{- with .Values.cronjob.janitor.podMetadata.labels }}
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ annotations:
+ {{- include "hydra.annotations.checksum" . | nindent 12 -}}
+ {{- with .Values.cronjob.janitor.annotations }}
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ {{- with $.Values.cronjob.janitor.podMetadata.annotations }}
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ spec:
+ restartPolicy: OnFailure
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ serviceAccountName: {{ include "hydra.cronjob.janitor.serviceAccountName" . }}
+ automountServiceAccountToken: {{ .Values.cronjob.janitor.automountServiceAccountToken }}
+ volumes:
+ - name: {{ include "hydra.name" . }}-config-volume
+ configMap:
+ name: {{ include "hydra.fullname" . }}
+ {{- if .Values.cronjob.janitor.extraVolumes }}
+ {{- toYaml .Values.cronjob.janitor.extraVolumes | nindent 12 }}
+ {{- end }}
+ containers:
+ - name: janitor
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ {{- with .Values.cronjob.janitor.securityContext }}
+ securityContext:
+ {{- toYaml . | nindent 16 }}
+ {{- end }}
+ {{- if .Values.cronjob.janitor.customCommand }}
+ command: {{- toYaml .Values.cronjob.janitor.customCommand | nindent 14 }}
+ {{- else }}
+ command: ["hydra"]
+ {{- end }}
+ {{- if .Values.cronjob.janitor.customArgs }}
+ args: {{- toYaml .Values.cronjob.janitor.customArgs | nindent 14 }}
+ {{- else }}
+ args:
+ - janitor
+ {{- if .Values.janitor.cleanupGrants }}
+ - --grants
+ {{- end }}
+ {{- if .Values.janitor.cleanupRequests }}
+ - --requests
+ {{- end }}
+ {{- if .Values.janitor.cleanupTokens }}
+ - --tokens
+ {{- end }}
+ - --batch-size
+ - {{ .Values.janitor.batchSize | quote }}
+ - --limit
+ - {{ .Values.janitor.limit | quote }}
+ - --config
+ - /etc/config/hydra.yaml
+ {{- end }}
+ env:
+ {{- if not (empty ( include "hydra.dsn" . )) }}
+ {{- if not (include "ory.extraEnvContainsEnvName" (list $janitorExtraEnv "DSN")) }}
+ - name: DSN
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "hydra.secretname" . }}
+ key: dsn
+ {{- end }}
+ {{- end }}
+ {{- with $janitorExtraEnv }}
+ {{- toYaml . | nindent 16 }}
+ {{- end }}
+ resources:
+ {{- toYaml .Values.cronjob.janitor.resources | nindent 16 }}
+ volumeMounts:
+ - name: {{ include "hydra.name" . }}-config-volume
+ mountPath: /etc/config
+ readOnly: true
+ {{- if .Values.cronjob.janitor.extraVolumeMounts }}
+ {{- toYaml .Values.cronjob.janitor.extraVolumeMounts | nindent 16 }}
+ {{- end }}
+ {{- if .Values.cronjob.janitor.extraContainers }}
+ {{- tpl .Values.cronjob.janitor.extraContainers . | nindent 12 }}
+ {{- end }}
+ {{- if .Values.cronjob.janitor.extraInitContainers }}
+ initContainers:
+ {{- tpl .Values.cronjob.janitor.extraInitContainers . | nindent 10 }}
+ {{- end }}
+ {{- with .Values.cronjob.janitor.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ {{- with .Values.cronjob.janitor.podSecurityContext }}
+ securityContext:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ {{- with .Values.cronjob.janitor.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ {{- with .Values.cronjob.janitor.affinity }}
+ affinity:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+{{- end }}
diff --git a/helm/charts/hydra-distributed/templates/job-migration.yaml b/helm/charts/hydra-distributed/templates/job-migration.yaml
new file mode 100644
index 000000000..56cf3e812
--- /dev/null
+++ b/helm/charts/hydra-distributed/templates/job-migration.yaml
@@ -0,0 +1,125 @@
+{{- include "hydra.automigration.typeVerification" . -}}
+{{- if and ( .Values.hydra.automigration.enabled ) ( eq .Values.hydra.automigration.type "job" ) }}
+{{- $nodeSelector := ternary .Values.job.nodeSelector .Values.deployment.nodeSelector (not (empty .Values.job.nodeSelector )) -}}
+{{- $migrationExtraEnv := ternary .Values.job.extraEnv .Values.deployment.extraEnv (not (empty .Values.job.extraEnv )) -}}
+{{- $resources := ternary .Values.job.resources .Values.hydra.automigration.resources (not (empty .Values.job.resources)) -}}
+
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: {{ include "hydra.fullname" . }}-automigrate
+ {{- if .Release.Namespace }}
+ namespace: {{ .Release.Namespace }}
+ {{- end }}
+ labels:
+ {{- include "hydra.labels" . | nindent 4 }}
+ {{- with .Values.job.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ annotations:
+ {{- with .Values.job.annotations }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ template:
+ metadata:
+ annotations:
+ {{- with .Values.job.annotations }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.job.podMetadata.annotations }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ labels:
+ app.kubernetes.io/name: {{ include "hydra.fullname" . }}-automigrate
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ {{- with .Values.job.labels }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.job.podMetadata.labels }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ serviceAccountName: {{ include "hydra.job.serviceAccountName" . }}
+ automountServiceAccountToken: {{ .Values.job.automountServiceAccountToken }}
+ containers:
+ - name: {{ .Chart.Name }}-automigrate
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ {{- if .Values.hydra.automigration.customCommand }}
+ command: {{- toYaml .Values.hydra.automigration.customCommand | nindent 10 }}
+ {{- else }}
+ command: ["hydra"]
+ {{- end }}
+ {{- if .Values.hydra.automigration.customArgs }}
+ args: {{- toYaml .Values.hydra.automigration.customArgs | nindent 10 }}
+ {{- else }}
+ args: ["migrate", "sql", "-e", "--yes", "--config", "/etc/config/hydra.yaml"]
+ {{- end }}
+ env:
+ {{- if not (empty ( include "hydra.dsn" . )) }}
+ {{- if not (include "ory.extraEnvContainsEnvName" (list $migrationExtraEnv "DSN")) }}
+ - name: DSN
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "hydra.secretname" . }}
+ key: dsn
+ {{- end }}
+ {{- end }}
+ {{- with $migrationExtraEnv }}
+ {{- toYaml . | nindent 10 }}
+ {{- end }}
+ lifecycle:
+ {{- if .Values.job.lifecycle }}
+ {{- tpl .Values.job.lifecycle . | nindent 10 }}
+ {{- end }}
+ {{- with .Values.deployment.securityContext }}
+ securityContext:
+ {{- toYaml . | nindent 10 }}
+ {{- end }}
+ {{- with $resources }}
+ resources:
+ {{- toYaml . | nindent 10 }}
+ {{- end }}
+ volumeMounts:
+ - name: {{ include "hydra.name" . }}-config-volume
+ mountPath: /etc/config
+ readOnly: true
+ {{- if .Values.deployment.extraVolumeMounts }}
+ {{- toYaml .Values.deployment.extraVolumeMounts | nindent 10 }}
+ {{- end }}
+ {{- if .Values.job.extraContainers }}
+ {{- tpl .Values.job.extraContainers . | nindent 6 }}
+ {{- end }}
+ {{- if .Values.job.extraInitContainers }}
+ initContainers:
+ {{- tpl .Values.job.extraInitContainers . | nindent 8 }}
+ {{- end }}
+ restartPolicy: Never
+ {{- with .Values.deployment.podSecurityContext }}
+ securityContext:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ shareProcessNamespace: {{ .Values.job.shareProcessNamespace }}
+ volumes:
+ - name: {{ include "hydra.name" . }}-config-volume
+ configMap:
+ name: {{ include "hydra.fullname" . }}-migrate
+ {{- if .Values.deployment.extraVolumes }}
+ {{- toYaml .Values.deployment.extraVolumes | nindent 8 }}
+ {{- end }}
+ {{- with $nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.job.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ backoffLimit: {{ .Values.job.spec.backoffLimit }}
+{{- end }}
diff --git a/helm/charts/hydra-distributed/templates/job-rbac.yaml b/helm/charts/hydra-distributed/templates/job-rbac.yaml
new file mode 100644
index 000000000..96998d8ca
--- /dev/null
+++ b/helm/charts/hydra-distributed/templates/job-rbac.yaml
@@ -0,0 +1,17 @@
+{{- if .Values.job.serviceAccount.create -}}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "hydra.job.serviceAccountName" . }}
+ {{- if .Release.Namespace }}
+ namespace: {{ .Release.Namespace }}
+ {{- end }}
+ labels:
+ {{- include "hydra.labels" . | nindent 4 }}
+ {{- with .Values.job.serviceAccount.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+automountServiceAccountToken: false
+{{- end -}}
\ No newline at end of file
diff --git a/helm/charts/hydra-distributed/templates/pdb.yaml b/helm/charts/hydra-distributed/templates/pdb.yaml
new file mode 100644
index 000000000..32c5df531
--- /dev/null
+++ b/helm/charts/hydra-distributed/templates/pdb.yaml
@@ -0,0 +1,18 @@
+{{- if .Values.pdb.enabled -}}
+---
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ name: {{ include "hydra.fullname" . }}
+spec:
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: {{ include "hydra.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ {{- with .Values.pdb.spec.maxUnavailable }}
+ maxUnavailable: {{ . }}
+ {{- end }}
+ {{- with .Values.pdb.spec.minAvailable }}
+ minAvailable: {{ . }}
+ {{- end }}
+{{- end -}}
\ No newline at end of file
diff --git a/helm/charts/hydra-distributed/templates/rbac-watcher.yaml b/helm/charts/hydra-distributed/templates/rbac-watcher.yaml
new file mode 100644
index 000000000..d68499308
--- /dev/null
+++ b/helm/charts/hydra-distributed/templates/rbac-watcher.yaml
@@ -0,0 +1,53 @@
+{{- if .Values.watcher.enabled }}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "hydra.serviceAccountName" . }}-watcher
+ {{- if .Release.Namespace }}
+ namespace: {{ .Release.Namespace }}
+ {{- end }}
+ labels:
+ app.kubernetes.io/name: {{ include "hydra.name" . }}-watcher
+ app.kubernetes.io/instance: {{ .Release.Name }}
+automountServiceAccountToken: false
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: {{ include "hydra.fullname" . }}-watcher
+ namespace: {{ .Release.Namespace }}
+rules:
+ - apiGroups: ["apps"]
+ resources: ["deployments"]
+ verbs:
+ - list
+ - watch
+ - get
+ - apiGroups: ["apps"]
+ resources: ["deployments"]
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ resourceNames:
+ - {{ include "hydra.fullname" . }}
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: {{ include "hydra.fullname" . }}-watcher
+ {{- if .Release.Namespace }}
+ namespace: {{ .Release.Namespace }}
+ {{- end }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ include "hydra.fullname" . }}-watcher
+subjects:
+ - kind: ServiceAccount
+ name: {{ include "hydra.fullname" . }}-watcher
+ namespace: {{ .Release.Namespace }}
+{{- end }}
diff --git a/helm/charts/hydra-distributed/templates/rbac.yaml b/helm/charts/hydra-distributed/templates/rbac.yaml
new file mode 100644
index 000000000..319489978
--- /dev/null
+++ b/helm/charts/hydra-distributed/templates/rbac.yaml
@@ -0,0 +1,17 @@
+{{- if .Values.deployment.serviceAccount.create -}}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "hydra.serviceAccountName" . }}
+ {{- if .Release.Namespace }}
+ namespace: {{ .Release.Namespace }}
+ {{- end }}
+ labels:
+ {{- include "hydra.labels" . | nindent 4 }}
+ {{- with .Values.deployment.serviceAccount.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+automountServiceAccountToken: false
+{{- end }}
diff --git a/helm/charts/hydra-distributed/templates/secrets.yaml b/helm/charts/hydra-distributed/templates/secrets.yaml
new file mode 100644
index 000000000..4220c649d
--- /dev/null
+++ b/helm/charts/hydra-distributed/templates/secrets.yaml
@@ -0,0 +1,21 @@
+{{- if .Values.secret.enabled -}}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ include "hydra.secretname" . }}
+ {{- if .Release.Namespace }}
+ namespace: {{ .Release.Namespace }}
+ {{- end }}
+ labels:
+ {{- include "hydra.labels" . | nindent 4 }}
+ annotations:
+ {{- with .Values.secret.secretAnnotations }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+type: Opaque
+data:
+ # Generate a random secret if the user doesn't give one. User given password has priority
+ secretsSystem: {{ ( include "hydra.secrets.system" . | default ( randAlphaNum 32 )) | required "Value secrets.system can not be empty!" | b64enc | quote }}
+ secretsCookie: {{ ( include "hydra.secrets.cookie" . | default ( randAlphaNum 32 )) | required "Value secrets.cookie can not be empty!" | b64enc | quote }}
+ dsn: {{ include "hydra.dsn" . | b64enc | quote }}
+{{- end -}}
diff --git a/helm/charts/hydra-distributed/templates/service-admin.yaml b/helm/charts/hydra-distributed/templates/service-admin.yaml
new file mode 100644
index 000000000..c14d19f1a
--- /dev/null
+++ b/helm/charts/hydra-distributed/templates/service-admin.yaml
@@ -0,0 +1,72 @@
+{{- if .Values.service.admin.enabled -}}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "hydra.fullname" . }}-admin
+ {{- if .Release.Namespace }}
+ namespace: {{ .Release.Namespace }}
+ {{- end }}
+ labels:
+ {{- include "hydra.labels" . | nindent 4 }}
+ {{- with .Values.service.admin.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ app.kubernetes.io/component: admin
+ annotations:
+ {{- with .Values.service.admin.annotations }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ type: {{ .Values.service.admin.type }}
+ {{- if eq .Values.service.admin.type "LoadBalancer" }}
+ {{- with .Values.service.admin.loadBalancerIP }}
+ loadBalancerIP: {{ . }}
+ {{- end }}
+ {{- end }}
+ ports:
+ - port: {{ .Values.service.admin.port }}
+ targetPort: http-admin
+ protocol: TCP
+ name: {{ .Values.service.admin.name }}
+ selector:
+ app.kubernetes.io/name: {{ include "hydra.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: admin
+{{- if .Values.serviceMonitor.enabled }}
+---
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ name: {{ include "hydra.fullname" . }}-admin
+ {{- if .Release.Namespace }}
+ namespace: {{ .Release.Namespace }}
+ {{- end }}
+ labels:
+ app.kubernetes.io/component: admin
+ {{- include "hydra.labels" . | nindent 4 }}
+{{- with .Values.serviceMonitor.labels }}
+ {{- toYaml . | nindent 4 }}
+{{- end }}
+ {{- with .Values.service.admin.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ endpoints:
+ - path: {{ .Values.service.admin.metricsPath }}
+ port: {{ .Values.service.admin.name }}
+ scheme: {{ .Values.serviceMonitor.scheme }}
+ interval: {{ .Values.serviceMonitor.scrapeInterval }}
+ scrapeTimeout: {{ .Values.serviceMonitor.scrapeTimeout }}
+ {{- with .Values.serviceMonitor.tlsConfig }}
+ tlsConfig:
+ {{- toYaml . | nindent 6 }}
+ {{- end }}
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: {{ include "hydra.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: admin
+{{- end -}}
+{{- end }}
diff --git a/helm/charts/hydra-distributed/templates/service-public.yaml b/helm/charts/hydra-distributed/templates/service-public.yaml
new file mode 100644
index 000000000..ffd96e48a
--- /dev/null
+++ b/helm/charts/hydra-distributed/templates/service-public.yaml
@@ -0,0 +1,35 @@
+{{- if .Values.service.public.enabled -}}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "hydra.fullname" . }}-public
+ {{- if .Release.Namespace }}
+ namespace: {{ .Release.Namespace }}
+ {{- end }}
+ labels:
+ {{- include "hydra.labels" . | nindent 4 }}
+ {{- with .Values.service.public.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ app.kubernetes.io/component: public
+ annotations:
+ {{- with .Values.service.public.annotations }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ type: {{ .Values.service.public.type }}
+ {{- if eq .Values.service.public.type "LoadBalancer" }}
+ {{- with .Values.service.public.loadBalancerIP }}
+ loadBalancerIP: {{ . }}
+ {{- end }}
+ {{- end }}
+ ports:
+ - port: {{ .Values.service.public.port }}
+ targetPort: http-public
+ protocol: TCP
+ name: {{ .Values.service.public.name }}
+ selector:
+ app.kubernetes.io/name: {{ include "hydra.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: public
+{{- end }}
diff --git a/helm/charts/hydra-distributed/templates/tests/test-connection.yaml b/helm/charts/hydra-distributed/templates/tests/test-connection.yaml
new file mode 100644
index 000000000..db08330a6
--- /dev/null
+++ b/helm/charts/hydra-distributed/templates/tests/test-connection.yaml
@@ -0,0 +1,20 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: "{{ include "hydra.fullname" . }}-test-connection"
+ {{- if .Release.Namespace }}
+ namespace: {{ .Release.Namespace }}
+ {{- end }}
+ labels: {{- include "hydra.labels" . | nindent 4 }}
+ {{- with .Values.test.labels }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ annotations:
+ "helm.sh/hook": test-success
+spec:
+ containers:
+ - name: healthcheck-ready
+ image: "{{ .Values.test.busybox.repository }}:{{ .Values.test.busybox.tag }}"
+ command: ['wget']
+ args: ['{{ include "hydra.fullname" . }}-admin:{{ .Values.service.admin.port }}/health/ready']
+ restartPolicy: Never
diff --git a/helm/charts/hydra-distributed/values.yaml b/helm/charts/hydra-distributed/values.yaml
new file mode 100644
index 000000000..658a16809
--- /dev/null
+++ b/helm/charts/hydra-distributed/values.yaml
@@ -0,0 +1,674 @@
+# -- Number of ORY Hydra members
+replicaCount: 1
+
+image:
+ # -- ORY Hydra image
+ repository: oryd/hydra
+ # -- ORY Hydra version
+ tag: v2.2.0
+ # -- Image pull policy
+ pullPolicy: IfNotPresent
+
+# -- Image pull secrets
+imagePullSecrets: []
+# Chart name override
+nameOverride: ""
+# -- Full chart name override
+fullnameOverride: ""
+
+# -- Pod priority
+# https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
+priorityClassName: ""
+
+## -- Configures the Kubernetes service
+service:
+ # -- Configures the Kubernetes service for the proxy port.
+ public:
+ # -- En-/disable the service
+ enabled: true
+ # -- The service type
+ type: ClusterIP
+ # -- The load balancer IP
+ loadBalancerIP: ""
+ # -- The service port
+ port: 4444
+ # -- The service port name. Useful to set a custom service port name if it must follow a scheme (e.g. Istio)
+ name: http
+ # -- If you do want to specify annotations, uncomment the following lines, adjust them as necessary, and remove the curly braces after 'annotations:'.
+ annotations: {}
+ # kubernetes.io/ingress.class: nginx
+ # kubernetes.io/tls-acme: "true"
+ labels: {}
+ # If you do want to specify additional labels, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'labels:'.
+ # e.g. app: hydra
+ # -- Configures the Kubernetes service for the api port.
+ admin:
+ # -- En-/disable the service
+ enabled: true
+ # -- The service type
+ type: ClusterIP
+ # -- The load balancer IP
+ loadBalancerIP: ""
+ # -- The service port
+ port: 4445
+ # -- The service port name. Useful to set a custom service port name if it must follow a scheme (e.g. Istio)
+ name: http
+ # -- If you do want to specify annotations, uncomment the following lines, adjust them as necessary, and remove the curly braces after 'annotations:'.
+ annotations: {}
+ # kubernetes.io/ingress.class: nginx
+ # kubernetes.io/tls-acme: "true"
+ labels: {}
+ # If you do want to specify additional labels, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'labels:'.
+ # e.g. app: hydra
+ # -- Path to the metrics endpoint
+ metricsPath: /admin/metrics/prometheus
+
+## -- Secret management
+secret:
+ # -- switch to false to prevent creating the secret
+ enabled: true
+ # -- Provide custom name of existing secret, or custom name of secret to be created
+ nameOverride: ""
+ # nameOverride: "myCustomSecret"
+ # -- Annotations to be added to secret. Annotations are added only when secret is being created. Existing secret will not be modified.
+ secretAnnotations:
+ # Create the secret before installation, and only then. This saves the secret from regenerating during an upgrade
+ # pre-upgrade is needed to upgrade from 0.7.0 to newer. Can be deleted afterwards.
+ helm.sh/hook-weight: "0"
+ helm.sh/hook: "pre-install, pre-upgrade"
+ helm.sh/hook-delete-policy: "before-hook-creation"
+ helm.sh/resource-policy: "keep"
+ # -- switch to false to prevent checksum annotations being maintained and propogated to the pods
+ hashSumEnabled: true
+
+## -- Configure ingress
+ingress:
+ # -- Configure ingress for the proxy port.
+ public:
+ # -- En-/Disable the proxy ingress.
+ enabled: false
+ className: ""
+ annotations: {}
+ # kubernetes.io/ingress.class: nginx
+ # kubernetes.io/tls-acme: "true"
+ hosts:
+ - host: public.hydra.localhost
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ # tls: []
+ # hosts:
+ # - proxy.hydra.local
+ # - secretName: hydra-proxy-example-tls
+
+ admin:
+ # -- En-/Disable the api ingress.
+ enabled: false
+ className: ""
+ annotations: {}
+ # If you do want to specify annotations, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'annotations:'.
+ # kubernetes.io/ingress.class: nginx
+ # kubernetes.io/tls-acme: "true"
+ hosts:
+ - host: admin.hydra.localhost
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+# tls: []
+# hosts:
+# - api.hydra.local
+# - secretName: hydra-api-example-tls
+
+## -- Configure ORY Hydra itself
+hydra:
+ # -- Ability to override the entrypoint of hydra container
+ # (e.g. to source dynamic secrets or export environment dynamic variables)
+ command: ["hydra"]
+ # -- Ability to override arguments of the entrypoint. Can be used in-depended of customCommand
+ customArgs: []
+ # -- The ORY Hydra configuration. For a full list of available settings, check:
+ # https://www.ory.sh/docs/hydra/reference/configuration
+ config:
+ serve:
+ public:
+ port: 4444
+ admin:
+ port: 4445
+ tls:
+ allow_termination_from:
+ - 10.0.0.0/8
+ - 172.16.0.0/12
+ - 192.168.0.0/16
+ # -- The secrets have to be provided as a string slice, example:
+ # system:
+ # - "OG5XbmxXa3dYeGplQXpQanYxeEFuRUFa"
+ # - "foo bar 123 456 lorem"
+ # - "foo bar 123 456 lorem 1"
+ # - "foo bar 123 456 lorem 2"
+ # - "foo bar 123 456 lorem 3"
+ secrets: {}
+
+ urls:
+ self: {}
+
+ # -- Enables database migration
+ automigration:
+ enabled: false
+ # -- Configure the way to execute database migration. Possible values: job, initContainer
+ # When set to job, the migration will be executed as a job on release or upgrade.
+ # When set to initContainer, the migration will be executed when kratos pod is created
+ # Defaults to job
+ type: job
+ # -- Ability to override the entrypoint of the automigration container
+ # (e.g. to source dynamic secrets or export environment dynamic variables)
+ customCommand: []
+ # -- Ability to override arguments of the entrypoint. Can be used in-depended of customCommand
+ # eg:
+ # - sleep 5;
+ # - kratos
+ customArgs: []
+ # -- resource requests and limits for the automigration initcontainer
+ resources: {}
+
+ # -- Enable dev mode, not secure in production environments
+ dev: false
+
+## -- Deployment specific config
+deployment:
+ # The admin/public fields act as overrides for the individual deploys.
+ admin: {}
+ public: {}
+
+ strategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxSurge: "25%"
+ maxUnavailable: "25%"
+
+ # -- We usually recommend not to specify default resources and to leave this as a conscious choice for the user.
+ # This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+ resources: {}
+
+ ## -- initContainer securityContext for hydra & migration init
+ initContainerSecurityContext: {}
+
+ ## -- pod securityContext for hydra & migration init
+ podSecurityContext:
+ fsGroupChangePolicy: "OnRootMismatch"
+ runAsNonRoot: true
+ runAsUser: 65534
+ fsGroup: 65534
+ runAsGroup: 65534
+ seccompProfile:
+ type: RuntimeDefault
+
+ ## -- container securityContext for hydra & migration init
+ securityContext:
+ capabilities:
+ drop:
+ - ALL
+ seccompProfile:
+ type: RuntimeDefault
+ readOnlyRootFilesystem: true
+ runAsNonRoot: true
+ runAsUser: 65534
+ runAsGroup: 65534
+ allowPrivilegeEscalation: false
+ privileged: false
+ seLinuxOptions:
+ level: "s0:c123,c456"
+
+ lifecycle: {}
+
+ # -- Set custom deployment level labels
+ labels: {}
+
+ # -- Set custom deployment level annotations
+ annotations: {}
+
+ # -- Specify pod metadata, this metadata is added directly to the pod, and not higher objects
+ podMetadata:
+ # -- Extra pod level labels
+ labels: {}
+ # -- Extra pod level annotations
+ annotations: {}
+
+ # -- Node labels for pod assignment.
+ nodeSelector: {}
+ # If you do want to specify node labels, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'nodeSelector:'.
+ # foo: bar
+
+ # -- Array of extra envs to be passed to the deployment. Kubernetes format is expected. Value is processed with Helm
+ # `tpl`
+ # - name: FOO
+ # value: BAR
+ extraEnv: []
+
+ # -- Parameters for the automigration initContainer
+ automigration:
+ # -- Array of extra envs to be passed to the initContainer. Kubernetes format is expected. Value is processed with
+ # Helm `tpl`
+ # - name: FOO
+ # value: BAR
+ extraEnv: []
+
+ # -- Configure node tolerations.
+ tolerations: []
+
+ # -- Configure pod topologySpreadConstraints.
+ topologySpreadConstraints: []
+ # - maxSkew: 1
+ # topologyKey: topology.kubernetes.io/zone
+ # whenUnsatisfiable: DoNotSchedule
+ # labelSelector:
+ # matchLabels:
+ # app.kubernetes.io/name: hydra
+ # app.kubernetes.io/instance: hydra
+
+ # -- Configure pod dnsConfig.
+ dnsConfig: {}
+ # options:
+ # - name: "ndots"
+ # value: "1"
+
+ # -- Specify the serviceAccountName value.
+ # In some situations it is needed to provides specific permissions to Hydra deployments
+ # Like for example installing Hydra on a cluster with a PosSecurityPolicy and Istio.
+ # Uncoment if it is needed to provide a ServiceAccount for the Hydra deployment.
+ # -- Specify the serviceAccountName value.
+ # In some situations it is needed to provides specific permissions to Hydra deployments
+ # Like for example installing Hydra on a cluster with a PosSecurityPolicy and Istio.
+ # Uncoment if it is needed to provide a ServiceAccount for the Hydra deployment.
+ serviceAccount:
+ # -- Specifies whether a service account should be created
+ create: true
+ # -- Annotations to add to the service account
+ annotations: {}
+ # -- The name of the service account to use. If not set and create is true, a name is generated using the fullname template
+ name: ""
+
+ # -- If you want to mount external volume
+ extraVolumes: []
+ # - name: my-volume
+ # secret:
+ # secretName: my-secret
+ extraVolumeMounts: []
+ # - name: my-volume
+ # mountPath: /etc/secrets/my-secret
+ # readOnly: true
+
+ # For example, mount a secret containing Certificate root CA to verify database
+ # TLS connection.
+ # extraVolumes:
+ # - name: postgresql-tls
+ # secret:
+ # secretName: postgresql-root-ca
+ # extraVolumeMounts:
+ # - name: postgresql-tls
+ # mountPath: "/etc/postgresql-tls"
+ # readOnly: true
+
+ # -- Configure HPA
+ autoscaling:
+ enabled: false
+ minReplicas: 1
+ maxReplicas: 3
+ targetCPU: {}
+ # type: Utilization
+ # averageUtilization: 80
+ targetMemory: {}
+ # type: Utilization
+ # averageUtilization: 80
+ # -- Set custom behavior
+ # https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#configurable-scaling-behavior
+ behavior: {}
+
+ # -- Default probe timers
+ livenessProbe:
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ failureThreshold: 5
+ # -- Default probe timers
+ readinessProbe:
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ failureThreshold: 5
+ # -- Default probe timers
+ startupProbe:
+ failureThreshold: 60
+ successThreshold: 1
+ periodSeconds: 1
+ timeoutSeconds: 1
+
+ automountServiceAccountToken: false
+
+ terminationGracePeriodSeconds: 60
+
+ # -- If you want to add extra init containers. These are processed before the migration init container.
+ extraInitContainers: ""
+ # extraInitContainers: |
+ # - name: ...
+ # image: ...
+
+ # -- If you want to add extra sidecar containers.
+ extraContainers: ""
+ # extraContainers: |
+ # - name: ...
+ # image: ...
+
+ # -- Configure a custom livenessProbe. This overwrites the default object
+ customLivenessProbe: {}
+ # -- Configure a custom readinessProbe. This overwrites the default object
+ customReadinessProbe: {}
+ # -- Configure a custom startupProbe. This overwrites the default object
+ customStartupProbe: {}
+ # -- Number of revisions kept in history
+ revisionHistoryLimit: 5
+
+## -- Values for initialization job
+job:
+ # -- If you do want to specify annotations, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'annotations:'.
+ annotations:
+ helm.sh/hook-weight: "1"
+ helm.sh/hook: "pre-install, pre-upgrade"
+ helm.sh/hook-delete-policy: "before-hook-creation"
+ # kubernetes.io/ingress.class: nginx
+ # kubernetes.io/tls-acme: "true"
+
+ # -- Set custom deployment level labels
+ labels: {}
+
+ # -- If you want to add extra sidecar containers.
+ extraContainers: ""
+ # extraContainers: |
+ # - name: ...
+ # image: ...
+
+ # -- Array of extra envs to be passed to the job. This takes precedence over deployment variables. Kubernetes format
+ # is expected. Value is processed with Helm `tpl`
+ # - name: FOO
+ # value: BAR
+ extraEnv: []
+
+ # -- Specify pod metadata, this metadata is added directly to the pod, and not higher objects
+ podMetadata:
+ # -- Extra pod level labels
+ labels: {}
+ # -- Extra pod level annotations
+ annotations: {}
+
+ # -- If you want to add extra init containers.
+ # extraInitContainers: |
+ # - name: ...
+ # image: ...
+ extraInitContainers: ""
+
+ # -- Node labels for pod assignment.
+ nodeSelector: {}
+ # If you do want to specify node labels, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'nodeSelector:'.
+ # foo: bar
+
+ # -- resource requests and limits for the automigration job
+ resources: {}
+
+ # -- Configure node tolerations.
+ tolerations: []
+
+ # -- If you want to add lifecycle hooks.
+ lifecycle: ""
+ # lifecycle: |
+ # preStop:
+ # exec:
+ # command: [...]
+
+ # -- Set automounting of the SA token
+ automountServiceAccountToken: true
+
+ # -- Set sharing process namespace
+ shareProcessNamespace: false
+
+ # -- Specify the serviceAccountName value.
+ # In some situations it is needed to provides specific permissions to Hydra deployments
+ # Like for example installing Hydra on a cluster with a PosSecurityPolicy and Istio.
+ # Uncoment if it is needed to provide a ServiceAccount for the Hydra deployment.
+ serviceAccount:
+ # -- Specifies whether a service account should be created
+ create: true
+ # -- Annotations to add to the service account
+ annotations:
+ helm.sh/hook-weight: "0"
+ helm.sh/hook: "pre-install, pre-upgrade"
+ helm.sh/hook-delete-policy: "before-hook-creation"
+ # -- The name of the service account to use. If not set and create is true, a name is generated using the fullname template
+ name: ""
+
+ spec:
+ # -- Set job back off limit
+ backoffLimit: 10
+
+## -- Configure node affinity
+affinity: {}
+
+## -- Configures controller setup
+maester:
+ enabled: true
+
+## -- Values for the hydra admin service arguments to hydra-maester
+hydra-maester:
+ adminService:
+ # -- The service name value may need to be set if you use `fullnameOverride` for the parent chart
+ name: ""
+ # -- You only need to set this port if you change the value for `service.admin.port` in the parent chart
+ # port:
+
+## -- Sidecar watcher configuration
+watcher:
+ enabled: false
+ image: oryd/k8s-toolbox:0.0.5
+ # -- Path to mounted file, which wil be monitored for changes. eg: /etc/secrets/my-secret/foo
+ mountFile: ""
+ # -- Specify pod metadata, this metadata is added directly to the pod, and not higher objects
+ podMetadata:
+ # -- Extra pod level labels
+ labels: {}
+ # -- Extra pod level annotations
+ annotations: {}
+ # -- Label key used for managing applications
+ watchLabelKey: "ory.sh/watcher"
+ # -- Number of revisions kept in history
+ revisionHistoryLimit: 5
+
+ # -- pod securityContext for watcher deployment
+ podSecurityContext: {}
+ resources: {}
+ automountServiceAccountToken: true
+
+ # -- container securityContext for watcher deployment
+ securityContext:
+ capabilities:
+ drop:
+ - ALL
+ seccompProfile:
+ type: RuntimeDefault
+ readOnlyRootFilesystem: true
+ runAsNonRoot: true
+ runAsUser: 100
+ allowPrivilegeEscalation: false
+ privileged: false
+
+## -- Janitor cron job configuration
+janitor:
+ # -- Enable cleanup of stale database rows by periodically running the janitor command
+ enabled: false
+
+ # -- Configure if the trust relationships must be cleaned up
+ cleanupGrants: false
+
+ # -- Configure if the consent and authentication requests must be cleaned up
+ cleanupRequests: false
+
+ # -- Configure if the access and refresh tokens must be cleaned up
+ cleanupTokens: false
+
+ # -- Configure how many records are deleted with each iteration
+ batchSize: 100
+
+ # -- Configure how many records are retrieved from database for deletion
+ limit: 10000
+
+## -- CronJob configuration
+cronjob:
+ janitor:
+ # -- Configure how often the cron job is ran
+ schedule: "0 */1 * * *"
+ # -- Configure a custom entrypoint, overriding the default value
+ customCommand: []
+
+ # -- Configure the arguments of the entrypoint, overriding the default value
+ customArgs: []
+
+ # -- Array of extra envs to be passed to the cronjob. This takes precedence over deployment variables. Kubernetes
+ # format is expected. Value is processed with Helm `tpl`
+ # - name: FOO
+ # value: BAR
+ extraEnv: []
+
+ # -- If you want to add extra init containers. These are processed before the migration init container.
+ extraInitContainers: ""
+ # extraInitContainers: |
+ # - name: ...
+ # image: ...
+
+ # -- If you want to add extra sidecar containers.
+ extraContainers: ""
+ # extraContainers: |
+ # - name: ...
+ # image: ...
+
+ # -- If you want to mount external volume
+ extraVolumes: []
+ # - name: my-volume
+ # secret:
+ # secretName: my-secret
+ extraVolumeMounts: []
+ # - name: my-volume
+ # mountPath: /etc/secrets/my-secret
+ # readOnly: true
+
+ # -- Set custom cron job level labels
+ labels: {}
+
+ # -- Set custom cron job level annotations
+ annotations: {}
+
+ # -- Specify pod metadata, this metadata is added directly to the pod, and not higher objects
+ podMetadata:
+ # -- Extra pod level labels
+ labels: {}
+
+ # -- Extra pod level annotations
+ annotations: {}
+
+ # -- Configure node labels for pod assignment
+ nodeSelector: {}
+
+ # -- Configure node tolerations
+ tolerations: []
+
+ # -- Configure node affinity
+ affinity: {}
+
+ # -- Set automounting of the SA token
+ automountServiceAccountToken: true
+
+ # -- Specify the serviceAccountName value.
+ # In some situations it is needed to provides specific permissions to Hydra deployments
+ # Like for example installing Hydra on a cluster with a PosSecurityPolicy and Istio.
+ # Uncoment if it is needed to provide a ServiceAccount for the Hydra deployment.
+ serviceAccount:
+ # -- Specifies whether a service account should be created
+ create: true
+ # -- Annotations to add to the service account
+ annotations:
+ helm.sh/hook-weight: "0"
+ helm.sh/hook: "pre-install, pre-upgrade"
+ helm.sh/hook-delete-policy: "before-hook-creation"
+ # -- The name of the service account to use. If not set and create is true, a name is generated using the fullname template
+ name: ""
+
+ # -- Configure the containers' SecurityContext for the janitor cronjob
+ securityContext:
+ capabilities:
+ drop:
+ - ALL
+ readOnlyRootFilesystem: true
+ runAsNonRoot: true
+ runAsUser: 100
+ allowPrivilegeEscalation: false
+ privileged: false
+
+ ## -- pod securityContext for the janitor cronjob
+ podSecurityContext: {}
+
+ # -- We usually recommend not to specify default resources and to leave this as a conscious choice for the user.
+ # This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+ resources:
+ limits: {}
+ requests: {}
+
+## -- PodDistributionBudget configuration
+pdb:
+ enabled: false
+ spec:
+ minAvailable: ""
+ maxUnavailable: ""
+
+## -- Parameters for the Prometheus ServiceMonitor objects.
+# Reference: https://docs.openshift.com/container-platform/4.6/rest_api/monitoring_apis/servicemonitor-monitoring-coreos-com-v1.html
+serviceMonitor:
+ # -- switch to true to enable creating the ServiceMonitor
+ enabled: false
+ # -- HTTP scheme to use for scraping.
+ scheme: http
+ # -- Interval at which metrics should be scraped
+ scrapeInterval: 60s
+ # -- Timeout after which the scrape is ended
+ scrapeTimeout: 30s
+ # -- Provide additionnal labels to the ServiceMonitor ressource metadata
+ labels: {}
+ # -- TLS configuration to use when scraping the endpoint
+ tlsConfig: {}
+
+configmap:
+ # -- switch to false to prevent checksum annotations being maintained and propogated to the pods
+ hashSumEnabled: true
+
+test:
+ # -- Provide additional labels to the test pod
+ labels: {}
+ # -- use a busybox image from another repository
+ busybox:
+ repository: busybox
+ tag: 1