diff --git a/.github/workflows/microk8s.yml b/.github/workflows/microk8s.yml index 5a050b95605..c9e5f74dd31 100644 --- a/.github/workflows/microk8s.yml +++ b/.github/workflows/microk8s.yml @@ -25,7 +25,7 @@ jobs: matrix: istio: ["true", "false"] # add '"pgsql" when supported - persistence-backends: ["LDAP","MYSQL"] + persistence-backends: ["MYSQL"] fail-fast: false runs-on: ubuntu-latest steps: diff --git a/automation/docs/generated-cn-docs.sh b/automation/docs/generated-cn-docs.sh index eb78604a4f9..230817cbd94 100644 --- a/automation/docs/generated-cn-docs.sh +++ b/automation/docs/generated-cn-docs.sh @@ -17,18 +17,15 @@ cd .. helm-docs "$MAIN_DIRECTORY_LOCATION"/charts/ rm -rf helmtemp echo "Copying Helm chart Readme to helm-chart.md" -cp "$MAIN_DIRECTORY_LOCATION"/charts/janssen/README.md "$MAIN_DIRECTORY_LOCATION"/docs/admin/reference/kubernetes/helm-chart.md +cp "$MAIN_DIRECTORY_LOCATION"/charts/janssen/README.md "$MAIN_DIRECTORY_LOCATION"/docs/janssen-server/reference/kubernetes/helm-chart.md echo "Adding keywords to helm-chart" -sed -i '1 s/^/---\ntags:\n - administration\n - reference\n - kubernetes\n---\n/' "$MAIN_DIRECTORY_LOCATION"/docs/admin/reference/kubernetes/helm-chart.md +sed -i '1 s/^/---\ntags:\n - administration\n - reference\n - kubernetes\n---\n/' "$MAIN_DIRECTORY_LOCATION"/docs/janssen-server/reference/kubernetes/helm-chart.md echo "Copying docker-monolith main README.md to compose.md" -cp "$MAIN_DIRECTORY_LOCATION"/docker-jans-monolith/README.md "$MAIN_DIRECTORY_LOCATION"/docs/admin/install/docker-install/compose.md +cp "$MAIN_DIRECTORY_LOCATION"/docker-jans-monolith/README.md "$MAIN_DIRECTORY_LOCATION"/docs/janssen-server/install/docker-install/compose.md echo "Copying docker images Readme to respective image md" # cp docker files main README.md docker_images="docker-jans-auth-server docker-jans-certmanager docker-jans-config-api docker-jans-configurator docker-jans-fido2 docker-jans-persistence-loader docker-jans-scim docker-jans-monolith docker-jans-casa docker-jans-link docker-jans-all-in-one" for image in $docker_images;do - cp "$MAIN_DIRECTORY_LOCATION"/"$image"/README.md "$MAIN_DIRECTORY_LOCATION"/docs/admin/reference/kubernetes/"$image".md + cp "$MAIN_DIRECTORY_LOCATION"/"$image"/README.md "$MAIN_DIRECTORY_LOCATION"/docs/janssen-server/reference/kubernetes/"$image".md done -echo "cp docker-opendj main README.md" -wget https://raw.githubusercontent.com/GluuFederation/docker-opendj/5.0/README.md -O "$MAIN_DIRECTORY_LOCATION"/docs/admin/reference/kubernetes/docker-opendj.md -sed -i '1 s/^/---\ntags:\n - administration\n - reference\n - kubernetes\n - docker image\n---\n/' "$MAIN_DIRECTORY_LOCATION"/docs/admin/reference/kubernetes/docker-opendj.md -echo "generated-cn-docs.sh executed successfully!" \ No newline at end of file +echo "generated-cn-docs.sh executed successfully!" diff --git a/automation/rancher-partner-charts/app-readme.md b/automation/rancher-partner-charts/app-readme.md index 8b2fadd2974..5aa9786ca5b 100644 --- a/automation/rancher-partner-charts/app-readme.md +++ b/automation/rancher-partner-charts/app-readme.md @@ -23,7 +23,6 @@ The Janssen Server can be deployed to support the following open standards for a - System for Cross-domain Identity Management (SCIM) - FIDO Universal 2nd Factor (U2F) - FIDO 2.0 / WebAuthn -- Lightweight Directory Access Protocol (LDAP) - Remote Authentication Dial-In User Service (RADIUS) ### Important notes for installation: @@ -32,4 +31,4 @@ The Janssen Server can be deployed to support the following open standards for a ### Quick install on Rancher UI with Docker single node - Install the nginx-ingress-controller chart. - Install the OpenEBS chart. -- Install Janssen chart and specify your persistence as ldap. +- Install Janssen chart and specify your persistence. diff --git a/automation/rancher-partner-charts/questions.yaml b/automation/rancher-partner-charts/questions.yaml index 2c686c1875b..62db44b864d 100644 --- a/automation/rancher-partner-charts/questions.yaml +++ b/automation/rancher-partner-charts/questions.yaml @@ -58,37 +58,21 @@ questions: type: enum group: "Persistence" label: Gluu Persistence backend - description: "Persistence backend to run Gluu with ldap|couchbase|hybrid|sql|spanner" + description: "Persistence backend to run Gluu with couchbase|hybrid|sql|spanner" options: - - "ldap" - "couchbase" - "hybrid" - "spanner" - "sql" -# LDAP -- variable: global.opendj.enabled - default: false - type: boolean - group: "Persistence" - required: true - label: Enable installation of OpenDJ - description: "Boolean flag to enable/disable the OpenDJ chart." - show_if: "global.cnPersistenceType=ldap||global.cnPersistenceType=hybrid" -- variable: config.configmap.cnLdapUrl - default: "opendj:1636" - type: hostname - group: "Persistence" - required: true - label: OpenDJ remote URL - description: "OpenDJ remote URL. This must be resolvable by the pods" - show_if: "global.opendj.enabled=false&&global.cnPersistenceType=ldap||global.cnPersistenceType=hybrid" -- variable: config.configmap.cnPersistenceLdapMapping + +# Hybrid +- variable: config.configmap.cnPersistenceHybridMapping default: "default" required: false type: enum group: "Persistence" - label: Gluu Persistence LDAP mapping - description: "Specify data that should be saved in LDAP (one of default, user, cache, site, token, or session; default to default). Note this environment only takes effect when `global.cnPersistenceType` is set to `hybrid`." + label: Gluu Persistence hybrid mapping + description: "Specify data that should be saved in persistence (one of default, user, cache, site, token, or session; default to default). Note this environment only takes effect when `global.cnPersistenceType` is set to `hybrid`." options: - "default" - "user" @@ -241,42 +225,6 @@ questions: label: Couchbase password for the restricted user show_if: "global.cnPersistenceType=couchbase||global.cnPersistenceType=hybrid" -# ============================== -# StorageClass and volume group -# ============================== -- variable: global.storageClass.provisioner - default: "microk8s.io/hostpath" - type: string - group: "Volumes" - required: true - label: StorageClass provisioner - show_if: "global.cnPersistenceType=ldap" - subquestions: - - variable: global.storageClass.allowVolumeExpansion - default: true - type: boolean - group: "Volumes" - required: true - label: StorageClass Volume expansion - - variable: global.storageClass.reclaimPolicy - default: "Retain" - type: enum - group: "Volumes" - required: true - label: StorageClass reclaimPolicy - options: - - "Delete" - - "Retain" - - variable: global.storageClass.volumeBindingMode - default: "WaitForFirstConsumer" - type: enum - group: "Volumes" - required: true - options: - - "WaitForFirstConsumer" - - "Immediate" - label: StorageClass volumeBindingMode - # =========== # Cache group # =========== @@ -365,16 +313,6 @@ questions: label: Organization description: "Organization name. Used for certificate creation." - -- variable: config.ldapPassword - default: "Test1234#" - type: password - group: "Configuration" - required: true - label: LDAP password - description: "LDAP admin password if OpenDJ is used for persistence" - show_if: "global.cnPersistenceType=ldap||global.cnPersistenceType=hybrid" - - variable: global.isFqdnRegistered default: true required: true @@ -647,35 +585,6 @@ questions: label: Fido2 image tag group: "Images" show_if: "global.fido2.enabled=true" -# OpenDJ -- variable: opendj.image.repository - required: true - type: string - default: "gluufederation/opendj" - description: "The OpenDJ Image repository" - label: OpenDJ image repo - group: "Images" - show_if: "global.opendj.enabled=true" -- variable: opendj.image.pullPolicy - required: true - type: enum - group: "Images" - default: IfNotPresent - description: "The OpenDJ Image pull policy" - label: OpenDJ imagePullPolicy - options: - - "Always" - - "IfNotPresent" - - "Never" - show_if: "global.opendj.enabled=true" -- variable: opendj.image.tag - required: true - type: string - default: "5.0.0_dev" - description: "The OpenDJ Image tag" - label: OpenDJ image tag - group: "Images" - show_if: "global.opendj.enabled=true" # Persistence - variable: persistence.image.repository required: true @@ -774,15 +683,6 @@ questions: label: Fido2 Replicas description: "Service replica number." show_if: "global.fido2.enabled=true" -# OpenDJ -- variable: opendj.replicas - default: 1 - required: false - type: int - group: "Replicas" - label: OpenDJ Replicas - description: "Service replica number." - show_if: "global.opendj.enabled=true" # SCIM - variable: scim.replicas default: 1 diff --git a/automation/startjanssendemo.sh b/automation/startjanssendemo.sh index 586a80ec633..278e5cef86d 100644 --- a/automation/startjanssendemo.sh +++ b/automation/startjanssendemo.sh @@ -15,10 +15,10 @@ if ! [[ $JANS_FQDN == *"."*"."* ]]; then exit 1 fi if [[ ! "$JANS_PERSISTENCE" ]]; then - read -rp "Enter persistence type [LDAP|MYSQL|PGSQL]: " JANS_PERSISTENCE + read -rp "Enter persistence type [MYSQL|PGSQL]: " JANS_PERSISTENCE fi -if [[ $JANS_PERSISTENCE != "LDAP" ]] && [[ $JANS_PERSISTENCE != "MYSQL" ]] && [[ $JANS_PERSISTENCE != "PGSQL" ]]; then - echo "[E] Incorrect entry. Please enter either LDAP, MYSQL or PGSQL" +if [[ $JANS_PERSISTENCE != "MYSQL" ]] && [[ $JANS_PERSISTENCE != "PGSQL" ]]; then + echo "[E] Incorrect entry. Please enter either MYSQL or PGSQL" exit 1 fi @@ -121,38 +121,6 @@ config: EOF fi -ENABLE_LDAP="false" -if [[ $JANS_PERSISTENCE == "LDAP" ]]; then - openssl req \ - -x509 \ - -newkey rsa:2048 \ - -sha256 \ - -days 365 \ - -nodes \ - -keyout opendj.key \ - -out opendj.crt \ - -subj "/CN=$JANS_FQDN" \ - -addext 'subjectAltName=DNS:ldap,DNS:opendj' - - LDAP_CERT_B64=$(base64 opendj.crt -w0) - LDAP_KEY_B64=$(base64 opendj.key -w0) - - rm -f opendj.crt opendj.key - - cat << EOF > override.yaml -config: - countryCode: US - email: support@gluu.org - orgName: Gluu - city: Austin - configmap: - cnLdapCrt: $LDAP_CERT_B64 - cnLdapKey: $LDAP_KEY_B64 -EOF - PERSISTENCE_TYPE="ldap" - ENABLE_LDAP="true" -fi - echo "$EXT_IP $JANS_FQDN" | sudo tee -a /etc/hosts > /dev/null cat << EOF >> override.yaml global: @@ -173,8 +141,6 @@ global: persistenceLogLevel: "$LOG_LEVEL" persistenceDurationLogTarget: "$LOG_TARGET" persistenceDurationLogLevel: "$LOG_LEVEL" - ldapStatsLogTarget: "$LOG_TARGET" - ldapStatsLogLevel: "$LOG_LEVEL" scriptLogTarget: "$LOG_TARGET" scriptLogLevel: "$LOG_LEVEL" auditStatsLogTarget: "$LOG_TARGET" @@ -187,7 +153,7 @@ global: timerLogTarget: "$LOG_TARGET" timerLogLevel: "$LOG_LEVEL" ingress: - casaEnabled: true + casaEnabled: true config-api: appLoggers: configApiLogTarget: "$LOG_TARGET" @@ -211,15 +177,10 @@ global: persistenceLogLevel: "$LOG_LEVEL" persistenceDurationLogTarget: "$LOG_TARGET" persistenceDurationLogLevel: "$LOG_LEVEL" - ldapStatsLogTarget: "$LOG_TARGET" - ldapStatsLogLevel: "$LOG_LEVEL" scriptLogTarget: "$LOG_TARGET" scriptLogLevel: "$LOG_LEVEL" fqdn: $JANS_FQDN lbIp: $EXT_IP - opendj: - # -- Boolean flag to enable/disable the OpenDJ chart. - enabled: $ENABLE_LDAP # -- Nginx ingress definitions chart nginx-ingress: ingress: diff --git a/automation/startjanssenmonolithdemo.sh b/automation/startjanssenmonolithdemo.sh index 5af5405a77f..3dc0dfd1cae 100644 --- a/automation/startjanssenmonolithdemo.sh +++ b/automation/startjanssenmonolithdemo.sh @@ -12,7 +12,7 @@ if [[ ! "$JANS_FQDN" ]]; then read -rp "Enter Hostname [demoexample.jans.io]: " JANS_FQDN fi if [[ ! "$JANS_PERSISTENCE" ]]; then - read -rp "Enter persistence type [LDAP|MYSQL|PGSQL|COUCHBASE[TEST]|SPANNER[TEST]]: " JANS_PERSISTENCE + read -rp "Enter persistence type [MYSQL|PGSQL|COUCHBASE[TEST]|SPANNER[TEST]]: " JANS_PERSISTENCE fi if [[ -z $EXT_IP ]]; then @@ -71,7 +71,6 @@ if [[ "$JANS_BUILD_COMMIT" ]]; then # and use the respective image instead of the default image python3 -c "from pathlib import Path ; import ruamel.yaml ; compose = Path('/tmp/jans/docker-jans-monolith/jans-mysql-compose.yml') ; yaml = ruamel.yaml.YAML() ; data = yaml.load(compose) ; data['services']['jans']['build'] = '.' ; del data['services']['jans']['image'] ; yaml.dump(data, compose)" python3 -c "from pathlib import Path ; import ruamel.yaml ; compose = Path('/tmp/jans/docker-jans-monolith/jans-postgres-compose.yml') ; yaml = ruamel.yaml.YAML() ; data = yaml.load(compose) ; data['services']['jans']['build'] = '.' ; del data['services']['jans']['image'] ; yaml.dump(data, compose)" - python3 -c "from pathlib import Path ; import ruamel.yaml ; compose = Path('/tmp/jans/docker-jans-monolith/jans-ldap-compose.yml') ; yaml = ruamel.yaml.YAML() ; data = yaml.load(compose) ; data['services']['jans']['build'] = '.' ; del data['services']['jans']['image'] ; yaml.dump(data, compose)" python3 -c "from pathlib import Path ; import ruamel.yaml ; compose = Path('/tmp/jans/docker-jans-monolith/jans-couchbase-compose.yml') ; yaml = ruamel.yaml.YAML() ; data = yaml.load(compose) ; data['services']['jans']['build'] = '.' ; del data['services']['jans']['image'] ; yaml.dump(data, compose)" python3 -c "from pathlib import Path ; import ruamel.yaml ; compose = Path('/tmp/jans/docker-jans-monolith/jans-spanner-compose.yml') ; yaml = ruamel.yaml.YAML() ; data = yaml.load(compose) ; data['services']['jans']['build'] = '.' ; del data['services']['jans']['image'] ; yaml.dump(data, compose)" fi @@ -87,13 +86,11 @@ if [[ $JANS_PERSISTENCE == "MYSQL" ]]; then bash /tmp/jans/docker-jans-monolith/up.sh mysql elif [[ $JANS_PERSISTENCE == "PGSQL" ]]; then bash /tmp/jans/docker-jans-monolith/up.sh postgres -elif [[ $JANS_PERSISTENCE == "LDAP" ]]; then - bash /tmp/jans/docker-jans-monolith/up.sh ldap elif [[ $JANS_PERSISTENCE == "COUCHBASE" ]]; then bash /tmp/jans/docker-jans-monolith/up.sh couchbase elif [[ $JANS_PERSISTENCE == "SPANNER" ]]; then bash /tmp/jans/docker-jans-monolith/up.sh spanner -fi +fi echo "$EXT_IP $JANS_FQDN" | sudo tee -a /etc/hosts > /dev/null jans_status="unhealthy" # run loop for 5 mins diff --git a/charts/janssen-all-in-one/README.md b/charts/janssen-all-in-one/README.md index eec5b95a6be..309daea3d37 100644 --- a/charts/janssen-all-in-one/README.md +++ b/charts/janssen-all-in-one/README.md @@ -29,7 +29,7 @@ Kubernetes: `>=v1.22.0-0` | additionalLabels | object | `{}` | Additional labels that will be added across the gateway in the format of {mylabel: "myapp"} | | adminPassword | string | `"Test1234#"` | Admin password to log in to the UI. | | alb.ingress | bool | `false` | switches the service to Nodeport for ALB ingress | -| auth-server | object | `{"appLoggers":{"auditStatsLogLevel":"INFO","auditStatsLogTarget":"FILE","authLogLevel":"INFO","authLogTarget":"STDOUT","enableStdoutLogPrefix":"true","httpLogLevel":"INFO","httpLogTarget":"FILE","ldapStatsLogLevel":"INFO","ldapStatsLogTarget":"FILE","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scriptLogLevel":"INFO","scriptLogTarget":"FILE"},"authEncKeys":"RSA1_5 RSA-OAEP","authSigKeys":"RS256 RS384 RS512 ES256 ES384 ES512 PS256 PS384 PS512","enabled":true,"ingress":{"authServerAdditionalAnnotations":{},"authServerEnabled":true,"authServerLabels":{},"deviceCodeAdditionalAnnotations":{},"deviceCodeEnabled":true,"deviceCodeLabels":{},"firebaseMessagingAdditionalAnnotations":{},"firebaseMessagingEnabled":true,"firebaseMessagingLabels":{},"lockAdditionalAnnotations":{},"lockConfigAdditionalAnnotations":{},"lockConfigEnabled":false,"lockConfigLabels":{},"lockEnabled":false,"lockLabels":{},"openidAdditionalAnnotations":{},"openidConfigEnabled":true,"openidConfigLabels":{},"u2fAdditionalAnnotations":{},"u2fConfigEnabled":true,"u2fConfigLabels":{},"uma2AdditionalAnnotations":{},"uma2ConfigEnabled":true,"uma2ConfigLabels":{},"webdiscoveryAdditionalAnnotations":{},"webdiscoveryEnabled":true,"webdiscoveryLabels":{},"webfingerAdditionalAnnotations":{},"webfingerEnabled":true,"webfingerLabels":{}},"lockEnabled":false}` | Parameters used globally across all services helm charts. | +| auth-server | object | `{"appLoggers":{"auditStatsLogLevel":"INFO","auditStatsLogTarget":"FILE","authLogLevel":"INFO","authLogTarget":"STDOUT","enableStdoutLogPrefix":"true","httpLogLevel":"INFO","httpLogTarget":"FILE","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scriptLogLevel":"INFO","scriptLogTarget":"FILE"},"authEncKeys":"RSA1_5 RSA-OAEP","authSigKeys":"RS256 RS384 RS512 ES256 ES384 ES512 PS256 PS384 PS512","enabled":true,"ingress":{"authServerAdditionalAnnotations":{},"authServerEnabled":true,"authServerLabels":{},"deviceCodeAdditionalAnnotations":{},"deviceCodeEnabled":true,"deviceCodeLabels":{},"firebaseMessagingAdditionalAnnotations":{},"firebaseMessagingEnabled":true,"firebaseMessagingLabels":{},"lockAdditionalAnnotations":{},"lockConfigAdditionalAnnotations":{},"lockConfigEnabled":false,"lockConfigLabels":{},"lockEnabled":false,"lockLabels":{},"openidAdditionalAnnotations":{},"openidConfigEnabled":true,"openidConfigLabels":{},"u2fAdditionalAnnotations":{},"u2fConfigEnabled":true,"u2fConfigLabels":{},"uma2AdditionalAnnotations":{},"uma2ConfigEnabled":true,"uma2ConfigLabels":{},"webdiscoveryAdditionalAnnotations":{},"webdiscoveryEnabled":true,"webdiscoveryLabels":{},"webfingerAdditionalAnnotations":{},"webfingerEnabled":true,"webfingerLabels":{}},"lockEnabled":false}` | Parameters used globally across all services helm charts. | | auth-server-key-rotation | object | `{"additionalAnnotations":{},"additionalLabels":{},"customScripts":[],"dnsConfig":{},"dnsPolicy":"","enabled":true,"image":{"pullPolicy":"IfNotPresent","pullSecrets":[],"repository":"ghcr.io/janssenproject/jans/certmanager","tag":"1.1.6_dev"},"initKeysLife":48,"keysLife":48,"keysPushDelay":0,"keysPushStrategy":"NEWER","keysStrategy":"NEWER","lifecycle":{},"resources":{"limits":{"cpu":"300m","memory":"300Mi"},"requests":{"cpu":"300m","memory":"300Mi"}},"usrEnvs":{"normal":{},"secret":{}},"volumeMounts":[],"volumes":[]}` | Responsible for regenerating auth-keys per x hours | | auth-server-key-rotation.additionalAnnotations | object | `{}` | Additional annotations that will be added across the gateway in the format of {cert-manager.io/issuer: "letsencrypt-prod"} | | auth-server-key-rotation.additionalLabels | object | `{}` | Additional labels that will be added across the gateway in the format of {mylabel: "myapp"} | @@ -56,7 +56,7 @@ Kubernetes: `>=v1.22.0-0` | auth-server-key-rotation.usrEnvs.secret | object | `{}` | Add custom secret envs to the service variable1: value1 | | auth-server-key-rotation.volumeMounts | list | `[]` | Configure any additional volumesMounts that need to be attached to the containers | | auth-server-key-rotation.volumes | list | `[]` | Configure any additional volumes that need to be attached to the pod | -| auth-server.appLoggers | object | `{"auditStatsLogLevel":"INFO","auditStatsLogTarget":"FILE","authLogLevel":"INFO","authLogTarget":"STDOUT","enableStdoutLogPrefix":"true","httpLogLevel":"INFO","httpLogTarget":"FILE","ldapStatsLogLevel":"INFO","ldapStatsLogTarget":"FILE","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scriptLogLevel":"INFO","scriptLogTarget":"FILE"}` | App loggers can be configured to define where the logs will be redirected to and the level of each in which it should be displayed. | +| auth-server.appLoggers | object | `{"auditStatsLogLevel":"INFO","auditStatsLogTarget":"FILE","authLogLevel":"INFO","authLogTarget":"STDOUT","enableStdoutLogPrefix":"true","httpLogLevel":"INFO","httpLogTarget":"FILE","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scriptLogLevel":"INFO","scriptLogTarget":"FILE"}` | App loggers can be configured to define where the logs will be redirected to and the level of each in which it should be displayed. | | auth-server.appLoggers.auditStatsLogLevel | string | `"INFO"` | jans-auth_audit.log level | | auth-server.appLoggers.auditStatsLogTarget | string | `"FILE"` | jans-auth_script.log target | | auth-server.appLoggers.authLogLevel | string | `"INFO"` | jans-auth.log level | @@ -64,8 +64,6 @@ Kubernetes: `>=v1.22.0-0` | auth-server.appLoggers.enableStdoutLogPrefix | string | `"true"` | Enable log prefixing which enables prepending the STDOUT logs with the file name. i.e auth-server-script ===> 2022-12-20 17:49:55,744 INFO | | auth-server.appLoggers.httpLogLevel | string | `"INFO"` | http_request_response.log level | | auth-server.appLoggers.httpLogTarget | string | `"FILE"` | http_request_response.log target | -| auth-server.appLoggers.ldapStatsLogLevel | string | `"INFO"` | jans-auth_persistence_ldap_statistics.log level | -| auth-server.appLoggers.ldapStatsLogTarget | string | `"FILE"` | jans-auth_persistence_ldap_statistics.log target | | auth-server.appLoggers.persistenceDurationLogLevel | string | `"INFO"` | jans-auth_persistence_duration.log level | | auth-server.appLoggers.persistenceDurationLogTarget | string | `"FILE"` | jans-auth_persistence_duration.log target | | auth-server.appLoggers.persistenceLogLevel | string | `"INFO"` | jans-auth_persistence.log level | @@ -137,12 +135,10 @@ Kubernetes: `>=v1.22.0-0` | cnPersistenceType | string | `"sql"` | Persistence backend to run Janssen with couchbase|hybrid|sql|spanner. | | cnPrometheusPort | string | `""` | Port used by Prometheus JMX agent (default to empty string). To enable Prometheus JMX agent, set the value to a number. | | cnSqlPasswordFile | string | `"/etc/jans/conf/sql_password"` | Path to SQL password file | -| config-api.appLoggers | object | `{"configApiLogLevel":"INFO","configApiLogTarget":"STDOUT","enableStdoutLogPrefix":"true","ldapStatsLogLevel":"INFO","ldapStatsLogTarget":"FILE","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scriptLogLevel":"INFO","scriptLogTarget":"FILE"}` | App loggers can be configured to define where the logs will be redirected to and the level of each in which it should be displayed. | +| config-api.appLoggers | object | `{"configApiLogLevel":"INFO","configApiLogTarget":"STDOUT","enableStdoutLogPrefix":"true","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scriptLogLevel":"INFO","scriptLogTarget":"FILE"}` | App loggers can be configured to define where the logs will be redirected to and the level of each in which it should be displayed. | | config-api.appLoggers.configApiLogLevel | string | `"INFO"` | configapi.log level | | config-api.appLoggers.configApiLogTarget | string | `"STDOUT"` | configapi.log target | | config-api.appLoggers.enableStdoutLogPrefix | string | `"true"` | Enable log prefixing which enables prepending the STDOUT logs with the file name. i.e config-api_persistence ===> 2022-12-20 17:49:55,744 INFO | -| config-api.appLoggers.ldapStatsLogLevel | string | `"INFO"` | config-api_persistence_ldap_statistics.log level | -| config-api.appLoggers.ldapStatsLogTarget | string | `"FILE"` | config-api_persistence_ldap_statistics.log target | | config-api.appLoggers.persistenceDurationLogLevel | string | `"INFO"` | config-api_persistence_duration.log level | | config-api.appLoggers.persistenceDurationLogTarget | string | `"FILE"` | config-api_persistence_duration.log target | | config-api.appLoggers.persistenceLogLevel | string | `"INFO"` | config-api_persistence.log level | @@ -182,8 +178,6 @@ Kubernetes: `>=v1.22.0-0` | configmap.cnGoogleSpannerDatabaseId | string | `""` | Google Spanner Database ID. Used only when cnPersistenceType is spanner. | | configmap.cnGoogleSpannerInstanceId | string | `""` | Google Spanner ID. Used only when cnPersistenceType is spanner. | | configmap.cnJettyRequestHeaderSize | int | `8192` | Jetty header size in bytes in the auth server | -| configmap.cnLdapCrt | string | `"SWFtTm90YVNlcnZpY2VBY2NvdW50Q2hhbmdlTWV0b09uZQo="` | OpenDJ certificate string. This must be encoded using base64. | -| configmap.cnLdapKey | string | `"SWFtTm90YVNlcnZpY2VBY2NvdW50Q2hhbmdlTWV0b09uZQo="` | OpenDJ key string. This must be encoded using base64. | | configmap.cnMaxRamPercent | string | `"75.0"` | Value passed to Java option -XX:MaxRAMPercentage | | configmap.cnMessageType | string | `"DISABLED"` | Message type (one of POSTGRES, REDIS, or DISABLED) | | configmap.cnOpaUrl | string | `"http://opa.opa.svc.cluster.cluster.local:8181/v1"` | URL of OPA server | @@ -302,10 +296,8 @@ Kubernetes: `>=v1.22.0-0` | kcDbPasswordFile | string | `"/etc/jans/conf/kc_db_password"` | Path to file contains password for database access | | lbIp | string | `"22.22.22.22"` | The Loadbalancer IP created by nginx or istio on clouds that provide static IPs. This is not needed if `fqdn` is globally resolvable. | | lifecycle | object | `{}` | | -| link.appLoggers | object | `{"enableStdoutLogPrefix":"true","ldapStatsLogLevel":"INFO","ldapStatsLogTarget":"FILE","linkLogLevel":"INFO","linkLogTarget":"STDOUT","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scriptLogLevel":"INFO","scriptLogTarget":"FILE"}` | App loggers can be configured to define where the logs will be redirected to and the level of each in which it should be displayed. | +| link.appLoggers | object | `{"enableStdoutLogPrefix":"true","linkLogLevel":"INFO","linkLogTarget":"STDOUT","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scriptLogLevel":"INFO","scriptLogTarget":"FILE"}` | App loggers can be configured to define where the logs will be redirected to and the level of each in which it should be displayed. | | link.appLoggers.enableStdoutLogPrefix | string | `"true"` | Enable log prefixing which enables prepending the STDOUT logs with the file name. i.e link-persistence ===> 2022-12-20 17:49:55,744 INFO | -| link.appLoggers.ldapStatsLogLevel | string | `"INFO"` | cacherefresh_persistence_ldap_statistics.log level | -| link.appLoggers.ldapStatsLogTarget | string | `"FILE"` | cacherefresh_persistence_ldap_statistics.log target | | link.appLoggers.linkLogLevel | string | `"INFO"` | cacherefresh.log level | | link.appLoggers.linkLogTarget | string | `"STDOUT"` | cacherefresh.log target | | link.appLoggers.persistenceDurationLogLevel | string | `"INFO"` | cacherefresh_persistence_duration.log level | @@ -343,10 +335,8 @@ Kubernetes: `>=v1.22.0-0` | saml.ingress.samlAdditionalAnnotations | object | `{}` | SAML ingress resource additional annotations. | | saml.ingress.samlLabels | object | `{}` | SAML config ingress resource labels. key app is taken | | saml.samlServiceName | string | `"saml"` | Name of the saml service. Please keep it as default. | -| scim.appLoggers | object | `{"enableStdoutLogPrefix":"true","ldapStatsLogLevel":"INFO","ldapStatsLogTarget":"FILE","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scimLogLevel":"INFO","scimLogTarget":"STDOUT","scriptLogLevel":"INFO","scriptLogTarget":"FILE"}` | App loggers can be configured to define where the logs will be redirected to and the level of each in which it should be displayed. | +| scim.appLoggers | object | `{"enableStdoutLogPrefix":"true","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scimLogLevel":"INFO","scimLogTarget":"STDOUT","scriptLogLevel":"INFO","scriptLogTarget":"FILE"}` | App loggers can be configured to define where the logs will be redirected to and the level of each in which it should be displayed. | | scim.appLoggers.enableStdoutLogPrefix | string | `"true"` | Enable log prefixing which enables prepending the STDOUT logs with the file name. i.e jans-scim ===> 2022-12-20 17:49:55,744 INFO | -| scim.appLoggers.ldapStatsLogLevel | string | `"INFO"` | jans-scim_persistence_ldap_statistics.log level | -| scim.appLoggers.ldapStatsLogTarget | string | `"FILE"` | jans-scim_persistence_ldap_statistics.log target | | scim.appLoggers.persistenceDurationLogLevel | string | `"INFO"` | jans-scim_persistence_duration.log level | | scim.appLoggers.persistenceDurationLogTarget | string | `"FILE"` | jans-scim_persistence_duration.log target | | scim.appLoggers.persistenceLogLevel | string | `"INFO"` | jans-scim_persistence.log level | @@ -379,4 +369,4 @@ Kubernetes: `>=v1.22.0-0` | volumes | list | `[]` | Configure any additional volumes that need to be attached to the pod | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0) +Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) diff --git a/charts/janssen-all-in-one/templates/configmap.yaml b/charts/janssen-all-in-one/templates/configmap.yaml index e0b96e2dd01..da13f1372b9 100644 --- a/charts/janssen-all-in-one/templates/configmap.yaml +++ b/charts/janssen-all-in-one/templates/configmap.yaml @@ -96,8 +96,6 @@ data: | replace "persistenceLogLevel" "persistence_log_level" | replace "persistenceDurationLogTarget" "persistence_duration_log_target" | replace "persistenceDurationLogLevel" "persistence_duration_log_level" - | replace "ldapStatsLogTarget" "ldap_stats_log_target" - | replace "ldapStatsLogLevel" "ldap_stats_log_level" | replace "scriptLogTarget" "script_log_target" | replace "scriptLogLevel" "script_log_level" | replace "auditStatsLogTarget" "audit_log_target" @@ -114,8 +112,6 @@ data: | replace "persistenceLogLevel" "persistence_log_level" | replace "persistenceDurationLogTarget" "persistence_duration_log_target" | replace "persistenceDurationLogLevel" "persistence_duration_log_level" - | replace "ldapStatsLogTarget" "ldap_stats_log_target" - | replace "ldapStatsLogLevel" "ldap_stats_log_level" | replace "scriptLogTarget" "script_log_target" | replace "scriptLogLevel" "script_log_level" | replace "enableStdoutLogPrefix" "enable_stdout_log_prefix" @@ -155,7 +151,6 @@ data: {{- end }} {{- if .Values.istio.enabled }} CN_COUCHBASE_TRUSTSTORE_ENABLE: "false" - CN_LDAP_USE_SSL: "false" {{- end }} {{- if .Values.scim.enabled }} CN_SCIM_ENABLED: {{ .Values.scim.enabled | quote }} @@ -168,8 +163,6 @@ data: | replace "persistenceLogLevel" "persistence_log_level" | replace "persistenceDurationLogTarget" "persistence_duration_log_target" | replace "persistenceDurationLogLevel" "persistence_duration_log_level" - | replace "ldapStatsLogTarget" "ldap_stats_log_target" - | replace "ldapStatsLogLevel" "ldap_stats_log_level" | replace "scriptLogTarget" "script_log_target" | replace "scriptLogLevel" "script_log_level" | replace "enableStdoutLogPrefix" "enable_stdout_log_prefix" @@ -205,12 +198,6 @@ data: CN_SQL_PASSWORD_FILE: {{ .Values.cnSqlPasswordFile }} CN_COUCHBASE_PASSWORD_FILE: {{ .Values.cnCouchbasePasswordFile }} CN_COUCHBASE_SUPERUSER_PASSWORD_FILE: {{ .Values.cnCouchbaseSuperuserPasswordFile }} - CN_LDAP_PASSWORD_FILE: {{ .Values.cnLdapPasswordFile }} - CN_LDAP_TRUSTSTORE_PASSWORD_FILE: {{ .Values.cnLdapTruststorePasswordFile }} - CN_LDAP_CERT_FILE: {{ .Values.cnLdapCertFile }} - CN_LDAP_KEY_FILE: {{ .Values.cnLdapKeyFile }} - CN_LDAP_CACERT_FILE: {{ .Values.cnLdapCacertFile }} - CN_LDAP_TRUSTSTORE_FILE: {{ .Values.cnLdapTruststoreFile }} CN_CONFIG_API_PLUGINS: {{ index .Values "config-api" "plugins" | quote }} CN_AIO_COMPONENTS: {{ include "janssen-all-in-one.aioComponents" . | fromJsonArray | join "," | quote}} {{- if .Values.saml.enabled }} diff --git a/charts/janssen-all-in-one/templates/secret.yaml b/charts/janssen-all-in-one/templates/secret.yaml index 8d50de27348..e9c440c6f98 100644 --- a/charts/janssen-all-in-one/templates/secret.yaml +++ b/charts/janssen-all-in-one/templates/secret.yaml @@ -36,7 +36,6 @@ stringData: }, "_secret": { "admin_password": {{ .Values.adminPassword | quote }}, - "ldap_password": {{ .Values.adminPassword | quote }}, "redis_password": {{ .Values.redisPassword | quote }}, {{ if or ( eq .Values.cnPersistenceType "sql" ) ( eq .Values.cnPersistenceType "hybrid" ) }} "sql_password": {{ .Values.configmap.cnSqldbUserPassword | quote }}, diff --git a/charts/janssen-all-in-one/values.yaml b/charts/janssen-all-in-one/values.yaml index 9102751a7b6..76f0fe4fcbc 100644 --- a/charts/janssen-all-in-one/values.yaml +++ b/charts/janssen-all-in-one/values.yaml @@ -149,10 +149,6 @@ configmap: cnSecretKubernetesSecret: cn # -- Load balancer address for AWS if the FQDN is not registered. lbAddr: "" - # -- OpenDJ certificate string. This must be encoded using base64. - cnLdapCrt: SWFtTm90YVNlcnZpY2VBY2NvdW50Q2hhbmdlTWV0b09uZQo= - # -- OpenDJ key string. This must be encoded using base64. - cnLdapKey: SWFtTm90YVNlcnZpY2VBY2NvdW50Q2hhbmdlTWV0b09uZQo= # -- Quarkus transaction recovery. When using MySQL, there could be issue regarding XA_RECOVER_ADMIN; refer to https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_xa-recover-admin for details. quarkusTransactionEnableRecovery: true # -- Keycloak logging level @@ -218,10 +214,6 @@ auth-server: persistenceDurationLogTarget: "FILE" # -- jans-auth_persistence_duration.log level persistenceDurationLogLevel: "INFO" - # -- jans-auth_persistence_ldap_statistics.log target - ldapStatsLogTarget: "FILE" - # -- jans-auth_persistence_ldap_statistics.log level - ldapStatsLogLevel: "INFO" # -- jans-auth_script.log target scriptLogTarget: "FILE" # -- jans-auth_script.log level @@ -412,10 +404,6 @@ config-api: persistenceDurationLogTarget: "FILE" # -- config-api_persistence_duration.log level persistenceDurationLogLevel: "INFO" - # -- config-api_persistence_ldap_statistics.log target - ldapStatsLogTarget: "FILE" - # -- config-api_persistence_ldap_statistics.log level - ldapStatsLogLevel: "INFO" # -- config-api_script.log target scriptLogTarget: "FILE" # -- config-api_script.log level @@ -490,10 +478,6 @@ scim: persistenceDurationLogTarget: "FILE" # -- jans-scim_persistence_duration.log level persistenceDurationLogLevel: "INFO" - # -- jans-scim_persistence_ldap_statistics.log target - ldapStatsLogTarget: "FILE" - # -- jans-scim_persistence_ldap_statistics.log level - ldapStatsLogLevel: "INFO" # -- jans-scim_script.log target scriptLogTarget: "FILE" # -- jans-scim_script.log level @@ -533,10 +517,6 @@ link: persistenceDurationLogTarget: "FILE" # -- cacherefresh_persistence_duration.log level persistenceDurationLogLevel: "INFO" - # -- cacherefresh_persistence_ldap_statistics.log target - ldapStatsLogTarget: "FILE" - # -- cacherefresh_persistence_ldap_statistics.log level - ldapStatsLogLevel: "INFO" # -- cacherefresh_script.log target scriptLogTarget: "FILE" # -- cacherefresh_script.log level diff --git a/charts/janssen/Chart.yaml b/charts/janssen/Chart.yaml index a5797da04d5..4e82a324403 100644 --- a/charts/janssen/Chart.yaml +++ b/charts/janssen/Chart.yaml @@ -12,8 +12,6 @@ annotations: image: ghcr.io/janssenproject/jans/config-api:1.1.6_dev - name: fido2 image: ghcr.io/janssenproject/jans/fido2:1.1.6_dev - - name: opendj - image: gluufederation/opendj:5.0.0_dev - name: persistence image: ghcr.io/janssenproject/jans/persistence-loader:1.1.6_dev - name: casa @@ -58,10 +56,6 @@ dependencies: condition: global.config-api.enabled version: 1.1.6-dev - - name: opendj - condition: global.opendj.enabled - version: 1.1.6-dev - - name: auth-server condition: global.auth-server.enabled version: 1.1.6-dev diff --git a/charts/janssen/README.md b/charts/janssen/README.md index 95586c5f81e..1f4b09588c0 100644 --- a/charts/janssen/README.md +++ b/charts/janssen/README.md @@ -33,7 +33,6 @@ Kubernetes: `>=v1.22.0-0` | | kc-scheduler | 1.1.6-dev | | | link | 1.1.6-dev | | | nginx-ingress | 1.1.6-dev | -| | opendj | 1.1.6-dev | | | persistence | 1.1.6-dev | | | saml | 1.1.6-dev | | | scim | 1.1.6-dev | @@ -125,7 +124,7 @@ Kubernetes: `>=v1.22.0-0` | casa.usrEnvs.secret | object | `{}` | Add custom secret envs to the service variable1: value1 | | casa.volumeMounts | list | `[]` | Configure any additional volumesMounts that need to be attached to the containers | | casa.volumes | list | `[]` | Configure any additional volumes that need to be attached to the pod | -| config | object | `{"additionalAnnotations":{},"additionalLabels":{},"adminPassword":"Test1234#","city":"Austin","configmap":{"cnAwsAccessKeyId":"","cnAwsDefaultRegion":"us-west-1","cnAwsProfile":"janssen","cnAwsSecretAccessKey":"","cnAwsSecretsEndpointUrl":"","cnAwsSecretsNamePrefix":"janssen","cnAwsSecretsReplicaRegions":[],"cnCacheType":"NATIVE_PERSISTENCE","cnConfigKubernetesConfigMap":"cn","cnCouchbaseBucketPrefix":"jans","cnCouchbaseCrt":"SWFtTm90YVNlcnZpY2VBY2NvdW50Q2hhbmdlTWV0b09uZQo=","cnCouchbaseIndexNumReplica":0,"cnCouchbasePassword":"P@ssw0rd","cnCouchbaseSuperUser":"admin","cnCouchbaseSuperUserPassword":"Test1234#","cnCouchbaseUrl":"cbjanssen.default.svc.cluster.local","cnCouchbaseUser":"janssen","cnGoogleProjectId":"google-project-to-save-config-and-secrets-to","cnGoogleSecretManagerServiceAccount":"SWFtTm90YVNlcnZpY2VBY2NvdW50Q2hhbmdlTWV0b09uZQo=","cnGoogleSecretNamePrefix":"janssen","cnGoogleSecretVersionId":"latest","cnGoogleSpannerDatabaseId":"","cnGoogleSpannerInstanceId":"","cnJettyRequestHeaderSize":8192,"cnLdapCrt":"SWFtTm90YVNlcnZpY2VBY2NvdW50Q2hhbmdlTWV0b09uZQo=","cnLdapKey":"SWFtTm90YVNlcnZpY2VBY2NvdW50Q2hhbmdlTWV0b09uZQo=","cnLdapUrl":"opendj:1636","cnMaxRamPercent":"75.0","cnMessageType":"DISABLED","cnOpaUrl":"http://opa.opa.svc.cluster.cluster.local:8181/v1","cnPersistenceHybridMapping":"{}","cnRedisSentinelGroup":"","cnRedisSslTruststore":"","cnRedisType":"STANDALONE","cnRedisUrl":"redis.redis.svc.cluster.local:6379","cnRedisUseSsl":false,"cnScimProtectionMode":"OAUTH","cnSecretKubernetesSecret":"cn","cnSqlDbDialect":"mysql","cnSqlDbHost":"my-release-mysql.default.svc.cluster.local","cnSqlDbName":"jans","cnSqlDbPort":3306,"cnSqlDbSchema":"","cnSqlDbTimezone":"UTC","cnSqlDbUser":"jans","cnSqldbUserPassword":"Test1234#","cnVaultAddr":"http://localhost:8200","cnVaultAppRolePath":"approle","cnVaultKvPath":"secret","cnVaultNamespace":"","cnVaultPrefix":"jans","cnVaultRoleId":"","cnVaultRoleIdFile":"/etc/certs/vault_role_id","cnVaultSecretId":"","cnVaultSecretIdFile":"/etc/certs/vault_secret_id","cnVaultVerify":false,"kcDbPassword":"Test1234#","kcDbSchema":"keycloak","kcDbUrlDatabase":"keycloak","kcDbUrlHost":"mysql.kc.svc.cluster.local","kcDbUrlPort":3306,"kcDbUrlProperties":"?useUnicode=true&characterEncoding=UTF-8&character_set_server=utf8mb4","kcDbUsername":"keycloak","kcDbVendor":"mysql","kcLogLevel":"INFO","lbAddr":"","quarkusTransactionEnableRecovery":true},"countryCode":"US","customScripts":[],"dnsConfig":{},"dnsPolicy":"","email":"support@jans.io","image":{"pullSecrets":[],"repository":"ghcr.io/janssenproject/jans/configurator","tag":"1.1.6_dev"},"ldapPassword":"P@ssw0rds","ldapTruststorePassword":"changeit","lifecycle":{},"orgName":"Janssen","redisPassword":"P@assw0rd","resources":{"limits":{"cpu":"300m","memory":"300Mi"},"requests":{"cpu":"300m","memory":"300Mi"}},"salt":"","state":"TX","usrEnvs":{"normal":{},"secret":{}},"volumeMounts":[],"volumes":[]}` | Configuration parameters for setup and initial configuration secret and config layers used by Janssen services. | +| config | object | `{"additionalAnnotations":{},"additionalLabels":{},"adminPassword":"Test1234#","city":"Austin","configmap":{"cnAwsAccessKeyId":"","cnAwsDefaultRegion":"us-west-1","cnAwsProfile":"janssen","cnAwsSecretAccessKey":"","cnAwsSecretsEndpointUrl":"","cnAwsSecretsNamePrefix":"janssen","cnAwsSecretsReplicaRegions":[],"cnCacheType":"NATIVE_PERSISTENCE","cnConfigKubernetesConfigMap":"cn","cnCouchbaseBucketPrefix":"jans","cnCouchbaseCrt":"SWFtTm90YVNlcnZpY2VBY2NvdW50Q2hhbmdlTWV0b09uZQo=","cnCouchbaseIndexNumReplica":0,"cnCouchbasePassword":"P@ssw0rd","cnCouchbaseSuperUser":"admin","cnCouchbaseSuperUserPassword":"Test1234#","cnCouchbaseUrl":"cbjanssen.default.svc.cluster.local","cnCouchbaseUser":"janssen","cnGoogleProjectId":"google-project-to-save-config-and-secrets-to","cnGoogleSecretManagerServiceAccount":"SWFtTm90YVNlcnZpY2VBY2NvdW50Q2hhbmdlTWV0b09uZQo=","cnGoogleSecretNamePrefix":"janssen","cnGoogleSecretVersionId":"latest","cnGoogleSpannerDatabaseId":"","cnGoogleSpannerInstanceId":"","cnJettyRequestHeaderSize":8192,"cnMaxRamPercent":"75.0","cnMessageType":"DISABLED","cnOpaUrl":"http://opa.opa.svc.cluster.cluster.local:8181/v1","cnPersistenceHybridMapping":"{}","cnRedisSentinelGroup":"","cnRedisSslTruststore":"","cnRedisType":"STANDALONE","cnRedisUrl":"redis.redis.svc.cluster.local:6379","cnRedisUseSsl":false,"cnScimProtectionMode":"OAUTH","cnSecretKubernetesSecret":"cn","cnSqlDbDialect":"mysql","cnSqlDbHost":"my-release-mysql.default.svc.cluster.local","cnSqlDbName":"jans","cnSqlDbPort":3306,"cnSqlDbSchema":"","cnSqlDbTimezone":"UTC","cnSqlDbUser":"jans","cnSqldbUserPassword":"Test1234#","cnVaultAddr":"http://localhost:8200","cnVaultAppRolePath":"approle","cnVaultKvPath":"secret","cnVaultNamespace":"","cnVaultPrefix":"jans","cnVaultRoleId":"","cnVaultRoleIdFile":"/etc/certs/vault_role_id","cnVaultSecretId":"","cnVaultSecretIdFile":"/etc/certs/vault_secret_id","cnVaultVerify":false,"kcDbPassword":"Test1234#","kcDbSchema":"keycloak","kcDbUrlDatabase":"keycloak","kcDbUrlHost":"mysql.kc.svc.cluster.local","kcDbUrlPort":3306,"kcDbUrlProperties":"?useUnicode=true&characterEncoding=UTF-8&character_set_server=utf8mb4","kcDbUsername":"keycloak","kcDbVendor":"mysql","kcLogLevel":"INFO","lbAddr":"","quarkusTransactionEnableRecovery":true},"countryCode":"US","customScripts":[],"dnsConfig":{},"dnsPolicy":"","email":"support@jans.io","image":{"pullSecrets":[],"repository":"ghcr.io/janssenproject/jans/configurator","tag":"1.1.6_dev"},"lifecycle":{},"orgName":"Janssen","redisPassword":"P@assw0rd","resources":{"limits":{"cpu":"300m","memory":"300Mi"},"requests":{"cpu":"300m","memory":"300Mi"}},"salt":"","state":"TX","usrEnvs":{"normal":{},"secret":{}},"volumeMounts":[],"volumes":[]}` | Configuration parameters for setup and initial configuration secret and config layers used by Janssen services. | | config-api | object | `{"additionalAnnotations":{},"additionalLabels":{},"customScripts":[],"dnsConfig":{},"dnsPolicy":"","hpa":{"behavior":{},"enabled":true,"maxReplicas":10,"metrics":[],"minReplicas":1,"targetCPUUtilizationPercentage":50},"image":{"pullPolicy":"IfNotPresent","pullSecrets":[],"repository":"ghcr.io/janssenproject/jans/config-api","tag":"1.1.6_dev"},"lifecycle":{},"livenessProbe":{"httpGet":{"path":"/jans-config-api/api/v1/health/live","port":8074},"initialDelaySeconds":30,"periodSeconds":30,"timeoutSeconds":5},"pdb":{"enabled":true,"maxUnavailable":"90%"},"readinessProbe":{"httpGet":{"path":"jans-config-api/api/v1/health/ready","port":8074},"initialDelaySeconds":25,"periodSeconds":25,"timeoutSeconds":5},"replicas":1,"resources":{"limits":{"cpu":"1000m","memory":"1200Mi"},"requests":{"cpu":"1000m","memory":"1200Mi"}},"topologySpreadConstraints":{},"usrEnvs":{"normal":{},"secret":{}},"volumeMounts":[],"volumes":[]}` | Config Api endpoints can be used to configure the auth-server, which is an open-source OpenID Connect Provider (OP) and UMA Authorization Server (AS). | | config-api.additionalAnnotations | object | `{}` | Additional annotations that will be added across the gateway in the format of {cert-manager.io/issuer: "letsencrypt-prod"} | | config-api.additionalLabels | object | `{}` | Additional labels that will be added across the gateway in the format of {mylabel: "myapp"} | @@ -175,13 +174,10 @@ Kubernetes: `>=v1.22.0-0` | config.configmap.cnGoogleSpannerDatabaseId | string | `""` | Google Spanner Database ID. Used only when global.cnPersistenceType is spanner. | | config.configmap.cnGoogleSpannerInstanceId | string | `""` | Google Spanner ID. Used only when global.cnPersistenceType is spanner. | | config.configmap.cnJettyRequestHeaderSize | int | `8192` | Jetty header size in bytes in the auth server | -| config.configmap.cnLdapCrt | string | `"SWFtTm90YVNlcnZpY2VBY2NvdW50Q2hhbmdlTWV0b09uZQo="` | OpenDJ certificate string. This must be encoded using base64. | -| config.configmap.cnLdapKey | string | `"SWFtTm90YVNlcnZpY2VBY2NvdW50Q2hhbmdlTWV0b09uZQo="` | OpenDJ key string. This must be encoded using base64. | -| config.configmap.cnLdapUrl | string | `"opendj:1636"` | OpenDJ internal address. Leave as default. Used when `global.cnPersistenceType` is set to `ldap`. | | config.configmap.cnMaxRamPercent | string | `"75.0"` | Value passed to Java option -XX:MaxRAMPercentage | | config.configmap.cnMessageType | string | `"DISABLED"` | Message type (one of POSTGRES, REDIS, or DISABLED) | | config.configmap.cnOpaUrl | string | `"http://opa.opa.svc.cluster.cluster.local:8181/v1"` | URL of OPA API | -| config.configmap.cnPersistenceHybridMapping | string | `"{}"` | Specify data that should be saved in LDAP (one of default, user, cache, site, token, or session; default to default). Note this environment only takes effect when `global.cnPersistenceType` is set to `hybrid`. { "default": "", "user": "", "site": "", "cache": "", "token": "", "session": "", } | +| config.configmap.cnPersistenceHybridMapping | string | `"{}"` | Specify data that should be saved in persistence (one of default, user, cache, site, token, or session; default to default). Note this environment only takes effect when `global.cnPersistenceType` is set to `hybrid`. { "default": "", "user": "", "site": "", "cache": "", "token": "", "session": "", } | | config.configmap.cnRedisSentinelGroup | string | `""` | Redis Sentinel Group. Often set when `config.configmap.cnRedisType` is set to `SENTINEL`. Can be used when `config.configmap.cnCacheType` is set to `REDIS`. | | config.configmap.cnRedisSslTruststore | string | `""` | Redis SSL truststore. Optional. Can be used when `config.configmap.cnCacheType` is set to `REDIS`. | | config.configmap.cnRedisType | string | `"STANDALONE"` | Redis service type. `STANDALONE` or `CLUSTER`. Can be used when `config.configmap.cnCacheType` is set to `REDIS`. | @@ -226,8 +222,6 @@ Kubernetes: `>=v1.22.0-0` | config.image.pullSecrets | list | `[]` | Image Pull Secrets | | config.image.repository | string | `"ghcr.io/janssenproject/jans/configurator"` | Image to use for deploying. | | config.image.tag | string | `"1.1.6_dev"` | Image tag to use for deploying. | -| config.ldapPassword | string | `"P@ssw0rds"` | LDAP admin password if OpenDJ is used for persistence. | -| config.ldapTruststorePassword | string | `"changeit"` | LDAP truststore password if OpenDJ is used for persistence | | config.orgName | string | `"Janssen"` | Organization name. Used for certificate creation. | | config.redisPassword | string | `"P@assw0rd"` | Redis admin password if `config.configmap.cnCacheType` is set to `REDIS`. | | config.resources | object | `{"limits":{"cpu":"300m","memory":"300Mi"},"requests":{"cpu":"300m","memory":"300Mi"}}` | Resource specs. | @@ -273,11 +267,11 @@ Kubernetes: `>=v1.22.0-0` | fido2.usrEnvs.secret | object | `{}` | Add custom secret envs to the service variable1: value1 | | fido2.volumeMounts | list | `[]` | Configure any additional volumesMounts that need to be attached to the containers | | fido2.volumes | list | `[]` | Configure any additional volumes that need to be attached to the pod | -| global | object | `{"alb":{"ingress":false},"auth-server":{"appLoggers":{"auditStatsLogLevel":"INFO","auditStatsLogTarget":"FILE","authLogLevel":"INFO","authLogTarget":"STDOUT","enableStdoutLogPrefix":"true","httpLogLevel":"INFO","httpLogTarget":"FILE","ldapStatsLogLevel":"INFO","ldapStatsLogTarget":"FILE","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scriptLogLevel":"INFO","scriptLogTarget":"FILE"},"authEncKeys":"RSA1_5 RSA-OAEP","authServerServiceName":"auth-server","authSigKeys":"RS256 RS384 RS512 ES256 ES384 ES512 PS256 PS384 PS512","cnCustomJavaOptions":"","customAnnotations":{"deployment":{},"destinationRule":{},"horizontalPodAutoscaler":{},"podDisruptionBudget":{},"secret":{},"service":{},"virtualService":{}},"enabled":true,"ingress":{"authServerAdditionalAnnotations":{},"authServerEnabled":true,"authServerLabels":{},"deviceCodeAdditionalAnnotations":{},"deviceCodeEnabled":true,"deviceCodeLabels":{},"firebaseMessagingAdditionalAnnotations":{},"firebaseMessagingEnabled":true,"firebaseMessagingLabels":{},"lockAdditionalAnnotations":{},"lockConfigAdditionalAnnotations":{},"lockConfigEnabled":false,"lockConfigLabels":{},"lockEnabled":false,"lockLabels":{},"openidAdditionalAnnotations":{},"openidConfigEnabled":true,"openidConfigLabels":{},"u2fAdditionalAnnotations":{},"u2fConfigEnabled":true,"u2fConfigLabels":{},"uma2AdditionalAnnotations":{},"uma2ConfigEnabled":true,"uma2ConfigLabels":{},"webdiscoveryAdditionalAnnotations":{},"webdiscoveryEnabled":true,"webdiscoveryLabels":{},"webfingerAdditionalAnnotations":{},"webfingerEnabled":true,"webfingerLabels":{}},"lockEnabled":false},"auth-server-key-rotation":{"customAnnotations":{"cronjob":{},"secret":{},"service":{}},"enabled":true,"initKeysLife":48},"awsStorageType":"io1","azureStorageAccountType":"Standard_LRS","azureStorageKind":"Managed","casa":{"appLoggers":{"casaLogLevel":"INFO","casaLogTarget":"STDOUT","enableStdoutLogPrefix":"true","timerLogLevel":"INFO","timerLogTarget":"FILE"},"casaServiceName":"casa","cnCustomJavaOptions":"","customAnnotations":{"deployment":{},"destinationRule":{},"horizontalPodAutoscaler":{},"podDisruptionBudget":{},"secret":{},"service":{},"virtualService":{}},"enabled":true,"ingress":{"casaAdditionalAnnotations":{},"casaEnabled":false,"casaLabels":{}}},"cloud":{"testEnviroment":false},"cnAwsConfigFile":"/etc/jans/conf/aws_config_file","cnAwsSecretsReplicaRegionsFile":"/etc/jans/conf/aws_secrets_replica_regions","cnAwsSharedCredentialsFile":"/etc/jans/conf/aws_shared_credential_file","cnConfiguratorConfigurationFile":"/etc/jans/conf/configuration.json","cnConfiguratorDumpFile":"/etc/jans/conf/configuration.out.json","cnCouchbasePasswordFile":"/etc/jans/conf/couchbase_password","cnCouchbaseSuperuserPasswordFile":"/etc/jans/conf/couchbase_superuser_password","cnDocumentStoreType":"DB","cnGoogleApplicationCredentials":"/etc/jans/conf/google-credentials.json","cnLdapCacertFile":"/etc/certs/opendj.pem","cnLdapCertFile":"/etc/certs/opendj.crt","cnLdapKeyFile":"/etc/certs/opendj.key","cnLdapPasswordFile":"/etc/jans/conf/ldap_password","cnLdapTruststoreFile":"/etc/certs/opendj.pkcs12","cnLdapTruststorePasswordFile":"/etc/jans/conf/ldap_truststore_password","cnPersistenceType":"sql","cnPrometheusPort":"","cnSqlPasswordFile":"/etc/jans/conf/sql_password","config":{"customAnnotations":{"clusterRoleBinding":{},"configMap":{},"job":{},"role":{},"roleBinding":{},"secret":{},"service":{},"serviceAccount":{}},"enabled":true},"config-api":{"appLoggers":{"configApiLogLevel":"INFO","configApiLogTarget":"STDOUT","enableStdoutLogPrefix":"true","ldapStatsLogLevel":"INFO","ldapStatsLogTarget":"FILE","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scriptLogLevel":"INFO","scriptLogTarget":"FILE"},"cnCustomJavaOptions":"","configApiServerServiceName":"config-api","customAnnotations":{"deployment":{},"destinationRule":{},"horizontalPodAutoscaler":{},"podDisruptionBudget":{},"service":{},"virtualService":{}},"enabled":true,"ingress":{"configApiAdditionalAnnotations":{},"configApiEnabled":true,"configApiLabels":{}},"plugins":"fido2,scim,user-mgt"},"configAdapterName":"kubernetes","configSecretAdapter":"kubernetes","fido2":{"appLoggers":{"enableStdoutLogPrefix":"true","fido2LogLevel":"INFO","fido2LogTarget":"STDOUT","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scriptLogLevel":"INFO","scriptLogTarget":"FILE"},"cnCustomJavaOptions":"","customAnnotations":{"deployment":{},"destinationRule":{},"horizontalPodAutoscaler":{},"podDisruptionBudget":{},"secret":{},"service":{},"virtualService":{}},"enabled":true,"fido2ServiceName":"fido2","ingress":{"fido2AdditionalAnnotations":{},"fido2ConfigAdditionalAnnotations":{},"fido2ConfigEnabled":false,"fido2ConfigLabels":{},"fido2Enabled":false,"fido2Labels":{}}},"fqdn":"demoexample.jans.io","gcePdStorageType":"pd-standard","isFqdnRegistered":false,"istio":{"additionalAnnotations":{},"additionalLabels":{},"enabled":false,"gateways":[],"ingress":false,"namespace":"istio-system"},"jobTtlSecondsAfterFinished":300,"kc-scheduler":{"enabled":false},"kcAdminCredentialsFile":"/etc/jans/conf/kc_admin_creds","kcDbPasswordFile":"/etc/jans/conf/kc_db_password","lbIp":"22.22.22.22","link":{"appLoggers":{"enableStdoutLogPrefix":"true","ldapStatsLogLevel":"INFO","ldapStatsLogTarget":"FILE","linkLogLevel":"INFO","linkLogTarget":"STDOUT","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scriptLogLevel":"INFO","scriptLogTarget":"FILE"},"cnCustomJavaOptions":"","customAnnotations":{"deployment":{},"destinationRule":{},"horizontalPodAutoscaler":{},"podDisruptionBudget":{},"service":{},"virtualService":{}},"enabled":false,"ingress":{"linkEnabled":true},"linkServiceName":"link"},"nginx-ingress":{"enabled":true},"opendj":{"customAnnotations":{"cronjob":{},"destinationRule":{},"horizontalPodAutoscaler":{},"podDisruptionBudget":{},"secret":{},"service":{},"statefulset":{},"storageClass":{}},"enabled":false,"ldapServiceName":"opendj"},"persistence":{"customAnnotations":{"job":{},"secret":{},"service":{}},"enabled":true},"saml":{"cnCustomJavaOptions":"","customAnnotations":{"deployment":{},"destinationRule":{},"horizontalPodAutoscaler":{},"podDisruptionBudget":{},"secret":{},"service":{},"virtualService":{}},"enabled":false,"ingress":{"samlAdditionalAnnotations":{},"samlEnabled":false,"samlLabels":{}},"samlServiceName":"saml"},"scim":{"appLoggers":{"enableStdoutLogPrefix":"true","ldapStatsLogLevel":"INFO","ldapStatsLogTarget":"FILE","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scimLogLevel":"INFO","scimLogTarget":"STDOUT","scriptLogLevel":"INFO","scriptLogTarget":"FILE"},"cnCustomJavaOptions":"","customAnnotations":{"deployment":{},"destinationRule":{},"horizontalPodAutoscaler":{},"podDisruptionBudget":{},"secret":{},"service":{},"virtualService":{}},"enabled":true,"ingress":{"scimAdditionalAnnotations":{},"scimConfigAdditionalAnnotations":{},"scimConfigEnabled":false,"scimConfigLabels":{},"scimEnabled":false,"scimLabels":{}},"scimServiceName":"scim"},"serviceAccountName":"default","storageClass":{"allowVolumeExpansion":true,"allowedTopologies":[],"mountOptions":["debug"],"parameters":{},"provisioner":"microk8s.io/hostpath","reclaimPolicy":"Retain","volumeBindingMode":"WaitForFirstConsumer"},"usrEnvs":{"normal":{},"secret":{}}}` | Parameters used globally across all services helm charts. | +| global | object | `{"alb":{"ingress":false},"auth-server":{"appLoggers":{"auditStatsLogLevel":"INFO","auditStatsLogTarget":"FILE","authLogLevel":"INFO","authLogTarget":"STDOUT","enableStdoutLogPrefix":"true","httpLogLevel":"INFO","httpLogTarget":"FILE","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scriptLogLevel":"INFO","scriptLogTarget":"FILE"},"authEncKeys":"RSA1_5 RSA-OAEP","authServerServiceName":"auth-server","authSigKeys":"RS256 RS384 RS512 ES256 ES384 ES512 PS256 PS384 PS512","cnCustomJavaOptions":"","customAnnotations":{"deployment":{},"destinationRule":{},"horizontalPodAutoscaler":{},"podDisruptionBudget":{},"secret":{},"service":{},"virtualService":{}},"enabled":true,"ingress":{"authServerAdditionalAnnotations":{},"authServerEnabled":true,"authServerLabels":{},"deviceCodeAdditionalAnnotations":{},"deviceCodeEnabled":true,"deviceCodeLabels":{},"firebaseMessagingAdditionalAnnotations":{},"firebaseMessagingEnabled":true,"firebaseMessagingLabels":{},"lockAdditionalAnnotations":{},"lockConfigAdditionalAnnotations":{},"lockConfigEnabled":false,"lockConfigLabels":{},"lockEnabled":false,"lockLabels":{},"openidAdditionalAnnotations":{},"openidConfigEnabled":true,"openidConfigLabels":{},"u2fAdditionalAnnotations":{},"u2fConfigEnabled":true,"u2fConfigLabels":{},"uma2AdditionalAnnotations":{},"uma2ConfigEnabled":true,"uma2ConfigLabels":{},"webdiscoveryAdditionalAnnotations":{},"webdiscoveryEnabled":true,"webdiscoveryLabels":{},"webfingerAdditionalAnnotations":{},"webfingerEnabled":true,"webfingerLabels":{}},"lockEnabled":false},"auth-server-key-rotation":{"customAnnotations":{"cronjob":{},"secret":{},"service":{}},"enabled":true,"initKeysLife":48},"awsStorageType":"io1","azureStorageAccountType":"Standard_LRS","azureStorageKind":"Managed","casa":{"appLoggers":{"casaLogLevel":"INFO","casaLogTarget":"STDOUT","enableStdoutLogPrefix":"true","timerLogLevel":"INFO","timerLogTarget":"FILE"},"casaServiceName":"casa","cnCustomJavaOptions":"","customAnnotations":{"deployment":{},"destinationRule":{},"horizontalPodAutoscaler":{},"podDisruptionBudget":{},"secret":{},"service":{},"virtualService":{}},"enabled":true,"ingress":{"casaAdditionalAnnotations":{},"casaEnabled":false,"casaLabels":{}}},"cloud":{"testEnviroment":false},"cnAwsConfigFile":"/etc/jans/conf/aws_config_file","cnAwsSecretsReplicaRegionsFile":"/etc/jans/conf/aws_secrets_replica_regions","cnAwsSharedCredentialsFile":"/etc/jans/conf/aws_shared_credential_file","cnConfiguratorConfigurationFile":"/etc/jans/conf/configuration.json","cnConfiguratorDumpFile":"/etc/jans/conf/configuration.out.json","cnCouchbasePasswordFile":"/etc/jans/conf/couchbase_password","cnCouchbaseSuperuserPasswordFile":"/etc/jans/conf/couchbase_superuser_password","cnDocumentStoreType":"DB","cnGoogleApplicationCredentials":"/etc/jans/conf/google-credentials.json","cnPersistenceType":"sql","cnPrometheusPort":"","cnSqlPasswordFile":"/etc/jans/conf/sql_password","config":{"customAnnotations":{"clusterRoleBinding":{},"configMap":{},"job":{},"role":{},"roleBinding":{},"secret":{},"service":{},"serviceAccount":{}},"enabled":true},"config-api":{"appLoggers":{"configApiLogLevel":"INFO","configApiLogTarget":"STDOUT","enableStdoutLogPrefix":"true","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scriptLogLevel":"INFO","scriptLogTarget":"FILE"},"cnCustomJavaOptions":"","configApiServerServiceName":"config-api","customAnnotations":{"deployment":{},"destinationRule":{},"horizontalPodAutoscaler":{},"podDisruptionBudget":{},"service":{},"virtualService":{}},"enabled":true,"ingress":{"configApiAdditionalAnnotations":{},"configApiEnabled":true,"configApiLabels":{}},"plugins":"fido2,scim,user-mgt"},"configAdapterName":"kubernetes","configSecretAdapter":"kubernetes","fido2":{"appLoggers":{"enableStdoutLogPrefix":"true","fido2LogLevel":"INFO","fido2LogTarget":"STDOUT","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scriptLogLevel":"INFO","scriptLogTarget":"FILE"},"cnCustomJavaOptions":"","customAnnotations":{"deployment":{},"destinationRule":{},"horizontalPodAutoscaler":{},"podDisruptionBudget":{},"secret":{},"service":{},"virtualService":{}},"enabled":true,"fido2ServiceName":"fido2","ingress":{"fido2AdditionalAnnotations":{},"fido2ConfigAdditionalAnnotations":{},"fido2ConfigEnabled":false,"fido2ConfigLabels":{},"fido2Enabled":false,"fido2Labels":{}}},"fqdn":"demoexample.jans.io","gcePdStorageType":"pd-standard","isFqdnRegistered":false,"istio":{"additionalAnnotations":{},"additionalLabels":{},"enabled":false,"gateways":[],"ingress":false,"namespace":"istio-system"},"jobTtlSecondsAfterFinished":300,"kc-scheduler":{"enabled":false},"kcAdminCredentialsFile":"/etc/jans/conf/kc_admin_creds","kcDbPasswordFile":"/etc/jans/conf/kc_db_password","lbIp":"22.22.22.22","link":{"appLoggers":{"enableStdoutLogPrefix":"true","linkLogLevel":"INFO","linkLogTarget":"STDOUT","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scriptLogLevel":"INFO","scriptLogTarget":"FILE"},"cnCustomJavaOptions":"","customAnnotations":{"deployment":{},"destinationRule":{},"horizontalPodAutoscaler":{},"podDisruptionBudget":{},"service":{},"virtualService":{}},"enabled":false,"ingress":{"linkEnabled":true},"linkServiceName":"link"},"nginx-ingress":{"enabled":true},"persistence":{"customAnnotations":{"job":{},"secret":{},"service":{}},"enabled":true},"saml":{"cnCustomJavaOptions":"","customAnnotations":{"deployment":{},"destinationRule":{},"horizontalPodAutoscaler":{},"podDisruptionBudget":{},"secret":{},"service":{},"virtualService":{}},"enabled":false,"ingress":{"samlAdditionalAnnotations":{},"samlEnabled":false,"samlLabels":{}},"samlServiceName":"saml"},"scim":{"appLoggers":{"enableStdoutLogPrefix":"true","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scimLogLevel":"INFO","scimLogTarget":"STDOUT","scriptLogLevel":"INFO","scriptLogTarget":"FILE"},"cnCustomJavaOptions":"","customAnnotations":{"deployment":{},"destinationRule":{},"horizontalPodAutoscaler":{},"podDisruptionBudget":{},"secret":{},"service":{},"virtualService":{}},"enabled":true,"ingress":{"scimAdditionalAnnotations":{},"scimConfigAdditionalAnnotations":{},"scimConfigEnabled":false,"scimConfigLabels":{},"scimEnabled":false,"scimLabels":{}},"scimServiceName":"scim"},"serviceAccountName":"default","storageClass":{"allowVolumeExpansion":true,"allowedTopologies":[],"mountOptions":["debug"],"parameters":{},"provisioner":"microk8s.io/hostpath","reclaimPolicy":"Retain","volumeBindingMode":"WaitForFirstConsumer"},"usrEnvs":{"normal":{},"secret":{}}}` | Parameters used globally across all services helm charts. | | global.alb.ingress | bool | `false` | Activates ALB ingress | | global.auth-server-key-rotation.enabled | bool | `true` | Boolean flag to enable/disable the auth-server-key rotation cronjob chart. | | global.auth-server-key-rotation.initKeysLife | int | `48` | The initial auth server key rotation keys life in hours | -| global.auth-server.appLoggers | object | `{"auditStatsLogLevel":"INFO","auditStatsLogTarget":"FILE","authLogLevel":"INFO","authLogTarget":"STDOUT","enableStdoutLogPrefix":"true","httpLogLevel":"INFO","httpLogTarget":"FILE","ldapStatsLogLevel":"INFO","ldapStatsLogTarget":"FILE","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scriptLogLevel":"INFO","scriptLogTarget":"FILE"}` | App loggers can be configured to define where the logs will be redirected to and the level of each in which it should be displayed. | +| global.auth-server.appLoggers | object | `{"auditStatsLogLevel":"INFO","auditStatsLogTarget":"FILE","authLogLevel":"INFO","authLogTarget":"STDOUT","enableStdoutLogPrefix":"true","httpLogLevel":"INFO","httpLogTarget":"FILE","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scriptLogLevel":"INFO","scriptLogTarget":"FILE"}` | App loggers can be configured to define where the logs will be redirected to and the level of each in which it should be displayed. | | global.auth-server.appLoggers.auditStatsLogLevel | string | `"INFO"` | jans-auth_audit.log level | | global.auth-server.appLoggers.auditStatsLogTarget | string | `"FILE"` | jans-auth_script.log target | | global.auth-server.appLoggers.authLogLevel | string | `"INFO"` | jans-auth.log level | @@ -285,8 +279,6 @@ Kubernetes: `>=v1.22.0-0` | global.auth-server.appLoggers.enableStdoutLogPrefix | string | `"true"` | Enable log prefixing which enables prepending the STDOUT logs with the file name. i.e auth-server-script ===> 2022-12-20 17:49:55,744 INFO | | global.auth-server.appLoggers.httpLogLevel | string | `"INFO"` | http_request_response.log level | | global.auth-server.appLoggers.httpLogTarget | string | `"FILE"` | http_request_response.log target | -| global.auth-server.appLoggers.ldapStatsLogLevel | string | `"INFO"` | jans-auth_persistence_ldap_statistics.log level | -| global.auth-server.appLoggers.ldapStatsLogTarget | string | `"FILE"` | jans-auth_persistence_ldap_statistics.log target | | global.auth-server.appLoggers.persistenceDurationLogLevel | string | `"INFO"` | jans-auth_persistence_duration.log level | | global.auth-server.appLoggers.persistenceDurationLogTarget | string | `"FILE"` | jans-auth_persistence_duration.log target | | global.auth-server.appLoggers.persistenceLogLevel | string | `"INFO"` | jans-auth_persistence.log level | @@ -353,21 +345,13 @@ Kubernetes: `>=v1.22.0-0` | global.cnCouchbaseSuperuserPasswordFile | string | `"/etc/jans/conf/couchbase_superuser_password"` | Path to Couchbase superuser password file | | global.cnDocumentStoreType | string | `"DB"` | Document store type to use for shibboleth files DB. | | global.cnGoogleApplicationCredentials | string | `"/etc/jans/conf/google-credentials.json"` | Base64 encoded service account. The sa must have roles/secretmanager.admin to use Google secrets and roles/spanner.databaseUser to use Spanner. Leave as this is a sensible default. | -| global.cnLdapCacertFile | string | `"/etc/certs/opendj.pem"` | Path to OpenDJ CA cert file | -| global.cnLdapCertFile | string | `"/etc/certs/opendj.crt"` | Path to OpenDJ cert file | -| global.cnLdapKeyFile | string | `"/etc/certs/opendj.key"` | Path to OpenDJ key file | -| global.cnLdapPasswordFile | string | `"/etc/jans/conf/ldap_password"` | Path to LDAP password file | -| global.cnLdapTruststoreFile | string | `"/etc/certs/opendj.pkcs12"` | Path to OpenDJ truststore file | -| global.cnLdapTruststorePasswordFile | string | `"/etc/jans/conf/ldap_truststore_password"` | Path to LDAP truststore password file | -| global.cnPersistenceType | string | `"sql"` | Persistence backend to run Janssen with ldap|couchbase|hybrid|sql|spanner. | +| global.cnPersistenceType | string | `"sql"` | Persistence backend to run Janssen with couchbase|hybrid|sql|spanner. | | global.cnPrometheusPort | string | `""` | Port used by Prometheus JMX agent (default to empty string). To enable Prometheus JMX agent, set the value to a number. | | global.cnSqlPasswordFile | string | `"/etc/jans/conf/sql_password"` | Path to SQL password file | -| global.config-api.appLoggers | object | `{"configApiLogLevel":"INFO","configApiLogTarget":"STDOUT","enableStdoutLogPrefix":"true","ldapStatsLogLevel":"INFO","ldapStatsLogTarget":"FILE","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scriptLogLevel":"INFO","scriptLogTarget":"FILE"}` | App loggers can be configured to define where the logs will be redirected to and the level of each in which it should be displayed. | +| global.config-api.appLoggers | object | `{"configApiLogLevel":"INFO","configApiLogTarget":"STDOUT","enableStdoutLogPrefix":"true","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scriptLogLevel":"INFO","scriptLogTarget":"FILE"}` | App loggers can be configured to define where the logs will be redirected to and the level of each in which it should be displayed. | | global.config-api.appLoggers.configApiLogLevel | string | `"INFO"` | configapi.log level | | global.config-api.appLoggers.configApiLogTarget | string | `"STDOUT"` | configapi.log target | | global.config-api.appLoggers.enableStdoutLogPrefix | string | `"true"` | Enable log prefixing which enables prepending the STDOUT logs with the file name. i.e config-api_persistence ===> 2022-12-20 17:49:55,744 INFO | -| global.config-api.appLoggers.ldapStatsLogLevel | string | `"INFO"` | config-api_persistence_ldap_statistics.log level | -| global.config-api.appLoggers.ldapStatsLogTarget | string | `"FILE"` | config-api_persistence_ldap_statistics.log target | | global.config-api.appLoggers.persistenceDurationLogLevel | string | `"INFO"` | config-api_persistence_duration.log level | | global.config-api.appLoggers.persistenceDurationLogTarget | string | `"FILE"` | config-api_persistence_duration.log target | | global.config-api.appLoggers.persistenceLogLevel | string | `"INFO"` | config-api_persistence.log level | @@ -418,10 +402,8 @@ Kubernetes: `>=v1.22.0-0` | global.kcAdminCredentialsFile | string | `"/etc/jans/conf/kc_admin_creds"` | Path to file contains Keycloak admin credentials (username and password) | | global.kcDbPasswordFile | string | `"/etc/jans/conf/kc_db_password"` | Path to file contains password for database access | | global.lbIp | string | `"22.22.22.22"` | The Loadbalancer IP created by nginx or istio on clouds that provide static IPs. This is not needed if `global.fqdn` is globally resolvable. | -| global.link.appLoggers | object | `{"enableStdoutLogPrefix":"true","ldapStatsLogLevel":"INFO","ldapStatsLogTarget":"FILE","linkLogLevel":"INFO","linkLogTarget":"STDOUT","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scriptLogLevel":"INFO","scriptLogTarget":"FILE"}` | App loggers can be configured to define where the logs will be redirected to and the level of each in which it should be displayed. | +| global.link.appLoggers | object | `{"enableStdoutLogPrefix":"true","linkLogLevel":"INFO","linkLogTarget":"STDOUT","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scriptLogLevel":"INFO","scriptLogTarget":"FILE"}` | App loggers can be configured to define where the logs will be redirected to and the level of each in which it should be displayed. | | global.link.appLoggers.enableStdoutLogPrefix | string | `"true"` | Enable log prefixing which enables prepending the STDOUT logs with the file name. i.e link-persistence ===> 2022-12-20 17:49:55,744 INFO | -| global.link.appLoggers.ldapStatsLogLevel | string | `"INFO"` | cacherefresh_persistence_ldap_statistics.log level | -| global.link.appLoggers.ldapStatsLogTarget | string | `"FILE"` | cacherefresh_persistence_ldap_statistics.log target | | global.link.appLoggers.linkLogLevel | string | `"INFO"` | cacherefresh.log level | | global.link.appLoggers.linkLogTarget | string | `"STDOUT"` | cacherefresh.log target | | global.link.appLoggers.persistenceDurationLogLevel | string | `"INFO"` | cacherefresh_persistence_duration.log level | @@ -435,8 +417,6 @@ Kubernetes: `>=v1.22.0-0` | global.link.ingress | object | `{"linkEnabled":true}` | Enable endpoints in either istio or nginx ingress depending on users choice | | global.link.linkServiceName | string | `"link"` | Name of the link service. Please keep it as default. | | global.nginx-ingress.enabled | bool | `true` | Boolean flag to enable/disable the nginx-ingress definitions chart. | -| global.opendj.enabled | bool | `false` | Boolean flag to enable/disable the OpenDJ chart. | -| global.opendj.ldapServiceName | string | `"opendj"` | Name of the OpenDJ service. Please keep it as default. | | global.persistence.enabled | bool | `true` | Boolean flag to enable/disable the persistence chart. | | global.saml.cnCustomJavaOptions | string | `""` | passing custom java options to saml. DO NOT PASS JAVA_OPTIONS in envs. | | global.saml.enabled | bool | `false` | Boolean flag to enable/disable the saml chart. | @@ -444,10 +424,8 @@ Kubernetes: `>=v1.22.0-0` | global.saml.ingress.samlAdditionalAnnotations | object | `{}` | SAML ingress resource additional annotations. | | global.saml.ingress.samlLabels | object | `{}` | SAML config ingress resource labels. key app is taken | | global.saml.samlServiceName | string | `"saml"` | Name of the saml service. Please keep it as default. | -| global.scim.appLoggers | object | `{"enableStdoutLogPrefix":"true","ldapStatsLogLevel":"INFO","ldapStatsLogTarget":"FILE","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scimLogLevel":"INFO","scimLogTarget":"STDOUT","scriptLogLevel":"INFO","scriptLogTarget":"FILE"}` | App loggers can be configured to define where the logs will be redirected to and the level of each in which it should be displayed. | +| global.scim.appLoggers | object | `{"enableStdoutLogPrefix":"true","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scimLogLevel":"INFO","scimLogTarget":"STDOUT","scriptLogLevel":"INFO","scriptLogTarget":"FILE"}` | App loggers can be configured to define where the logs will be redirected to and the level of each in which it should be displayed. | | global.scim.appLoggers.enableStdoutLogPrefix | string | `"true"` | Enable log prefixing which enables prepending the STDOUT logs with the file name. i.e jans-scim ===> 2022-12-20 17:49:55,744 INFO | -| global.scim.appLoggers.ldapStatsLogLevel | string | `"INFO"` | jans-scim_persistence_ldap_statistics.log level | -| global.scim.appLoggers.ldapStatsLogTarget | string | `"FILE"` | jans-scim_persistence_ldap_statistics.log target | | global.scim.appLoggers.persistenceDurationLogLevel | string | `"INFO"` | jans-scim_persistence_duration.log level | | global.scim.appLoggers.persistenceDurationLogTarget | string | `"FILE"` | jans-scim_persistence_duration.log target | | global.scim.appLoggers.persistenceLogLevel | string | `"INFO"` | jans-scim_persistence.log level | @@ -467,7 +445,7 @@ Kubernetes: `>=v1.22.0-0` | global.scim.ingress.scimLabels | object | `{}` | SCIM config ingress resource labels. key app is taken | | global.scim.scimServiceName | string | `"scim"` | Name of the scim service. Please keep it as default. | | global.serviceAccountName | string | `"default"` | service account used by Kubernetes resources | -| global.storageClass | object | `{"allowVolumeExpansion":true,"allowedTopologies":[],"mountOptions":["debug"],"parameters":{},"provisioner":"microk8s.io/hostpath","reclaimPolicy":"Retain","volumeBindingMode":"WaitForFirstConsumer"}` | StorageClass section for OpenDJ charts. This is not currently used by the openbanking distribution. You may specify custom parameters as needed. | +| global.storageClass | object | `{"allowVolumeExpansion":true,"allowedTopologies":[],"mountOptions":["debug"],"parameters":{},"provisioner":"microk8s.io/hostpath","reclaimPolicy":"Retain","volumeBindingMode":"WaitForFirstConsumer"}` | StorageClass section. This is not currently used by the openbanking distribution. You may specify custom parameters as needed. | | global.storageClass.parameters | object | `{}` | parameters: fsType: "" kind: "" pool: "" storageAccountType: "" type: "" | | global.usrEnvs | object | `{"normal":{},"secret":{}}` | Add custom normal and secret envs to the service. Envs defined in global.userEnvs will be globally available to all services | | global.usrEnvs.normal | object | `{}` | Add custom normal envs to the service. variable1: value1 | @@ -526,37 +504,6 @@ Kubernetes: `>=v1.22.0-0` | nginx-ingress.ingress.additionalAnnotations | object | `{}` | Additional annotations that will be added across all ingress definitions in the format of {cert-manager.io/issuer: "letsencrypt-prod"} Enable client certificate authentication nginx.ingress.kubernetes.io/auth-tls-verify-client: "optional" Create the secret containing the trusted ca certificates nginx.ingress.kubernetes.io/auth-tls-secret: "janssen/tls-certificate" Specify the verification depth in the client certificates chain nginx.ingress.kubernetes.io/auth-tls-verify-depth: "1" Specify if certificates are passed to upstream server nginx.ingress.kubernetes.io/auth-tls-pass-certificate-to-upstream: "true" | | nginx-ingress.ingress.additionalLabels | object | `{}` | Additional labels that will be added across all ingress definitions in the format of {mylabel: "myapp"} | | nginx-ingress.ingress.tls | list | `[{"hosts":["demoexample.jans.io"],"secretName":"tls-certificate"}]` | Secrets holding HTTPS CA cert and key. | -| opendj | object | `{"additionalAnnotations":{},"additionalLabels":{},"backup":{"cronJobSchedule":"*/59 * * * *","enabled":true},"customScripts":[],"dnsConfig":{},"dnsPolicy":"","hpa":{"behavior":{},"enabled":true,"maxReplicas":10,"metrics":[],"minReplicas":1,"targetCPUUtilizationPercentage":50},"image":{"pullPolicy":"IfNotPresent","pullSecrets":[],"repository":"gluufederation/opendj","tag":"5.0.0_dev"},"lifecycle":{"preStop":{"exec":{"command":["/bin/sh","-c","python3 /app/scripts/deregister_peer.py 1>&/proc/1/fd/1"]}}},"livenessProbe":{"exec":{"command":["python3","/app/scripts/healthcheck.py"]},"failureThreshold":20,"initialDelaySeconds":30,"periodSeconds":30,"timeoutSeconds":5},"pdb":{"enabled":true,"maxUnavailable":1},"persistence":{"size":"5Gi"},"ports":{"tcp-admin":{"nodePort":"","port":4444,"protocol":"TCP","targetPort":4444},"tcp-ldap":{"nodePort":"","port":1389,"protocol":"TCP","targetPort":1389},"tcp-ldaps":{"nodePort":"","port":1636,"protocol":"TCP","targetPort":1636},"tcp-repl":{"nodePort":"","port":8989,"protocol":"TCP","targetPort":8989},"tcp-serf":{"nodePort":"","port":7946,"protocol":"TCP","targetPort":7946},"udp-serf":{"nodePort":"","port":7946,"protocol":"UDP","targetPort":7946}},"readinessProbe":{"failureThreshold":20,"initialDelaySeconds":60,"periodSeconds":25,"tcpSocket":{"port":1636},"timeoutSeconds":5},"replicas":1,"resources":{"limits":{"cpu":"1500m","memory":"2000Mi"},"requests":{"cpu":"1500m","memory":"2000Mi"}},"topologySpreadConstraints":{},"usrEnvs":{"normal":{},"secret":{}},"volumeMounts":[],"volumes":[]}` | OpenDJ is a directory server which implements a wide range of Lightweight Directory Access Protocol and related standards, including full compliance with LDAPv3 but also support for Directory Service Markup Language (DSMLv2).Written in Java, OpenDJ offers multi-master replication, access control, and many extensions. | -| opendj.additionalAnnotations | object | `{}` | Additional annotations that will be added across the gateway in the format of {cert-manager.io/issuer: "letsencrypt-prod"} | -| opendj.additionalLabels | object | `{}` | Additional labels that will be added across the gateway in the format of {mylabel: "myapp"} | -| opendj.backup | object | `{"cronJobSchedule":"*/59 * * * *","enabled":true}` | Configure ldap backup cronjob | -| opendj.customScripts | list | `[]` | Add custom scripts that have been mounted to run before the entrypoint. - /tmp/custom.sh - /tmp/custom2.sh | -| opendj.dnsConfig | object | `{}` | Add custom dns config | -| opendj.dnsPolicy | string | `""` | Add custom dns policy | -| opendj.hpa | object | `{"behavior":{},"enabled":true,"maxReplicas":10,"metrics":[],"minReplicas":1,"targetCPUUtilizationPercentage":50}` | Configure the HorizontalPodAutoscaler | -| opendj.hpa.behavior | object | `{}` | Scaling Policies | -| opendj.hpa.metrics | list | `[]` | metrics if targetCPUUtilizationPercentage is not set | -| opendj.image.pullPolicy | string | `"IfNotPresent"` | Image pullPolicy to use for deploying. | -| opendj.image.pullSecrets | list | `[]` | Image Pull Secrets | -| opendj.image.repository | string | `"gluufederation/opendj"` | Image to use for deploying. | -| opendj.image.tag | string | `"5.0.0_dev"` | Image tag to use for deploying. | -| opendj.livenessProbe | object | `{"exec":{"command":["python3","/app/scripts/healthcheck.py"]},"failureThreshold":20,"initialDelaySeconds":30,"periodSeconds":30,"timeoutSeconds":5}` | Configure the liveness healthcheck for OpenDJ if needed. https://github.com/GluuFederation/docker-opendj/blob/master/scripts/healthcheck.py | -| opendj.livenessProbe.exec | object | `{"command":["python3","/app/scripts/healthcheck.py"]}` | Executes the python3 healthcheck. | -| opendj.pdb | object | `{"enabled":true,"maxUnavailable":1}` | Configure the PodDisruptionBudget | -| opendj.persistence.size | string | `"5Gi"` | OpenDJ volume size | -| opendj.readinessProbe | object | `{"failureThreshold":20,"initialDelaySeconds":60,"periodSeconds":25,"tcpSocket":{"port":1636},"timeoutSeconds":5}` | Configure the readiness healthcheck for OpenDJ if needed. https://github.com/GluuFederation/docker-opendj/blob/master/scripts/healthcheck.py | -| opendj.replicas | int | `1` | Service replica number. | -| opendj.resources | object | `{"limits":{"cpu":"1500m","memory":"2000Mi"},"requests":{"cpu":"1500m","memory":"2000Mi"}}` | Resource specs. | -| opendj.resources.limits.cpu | string | `"1500m"` | CPU limit. | -| opendj.resources.limits.memory | string | `"2000Mi"` | Memory limit. | -| opendj.resources.requests.cpu | string | `"1500m"` | CPU request. | -| opendj.resources.requests.memory | string | `"2000Mi"` | Memory request. | -| opendj.topologySpreadConstraints | object | `{}` | Configure the topology spread constraints. Notice this is a map NOT a list as in the upstream API https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ | -| opendj.usrEnvs | object | `{"normal":{},"secret":{}}` | Add custom normal and secret envs to the service | -| opendj.usrEnvs.normal | object | `{}` | Add custom normal envs to the service variable1: value1 | -| opendj.usrEnvs.secret | object | `{}` | Add custom secret envs to the service variable1: value1 | -| opendj.volumeMounts | list | `[]` | Configure any additional volumesMounts that need to be attached to the containers | -| opendj.volumes | list | `[]` | Configure any additional volumes that need to be attached to the pod | | persistence | object | `{"additionalAnnotations":{},"additionalLabels":{},"customScripts":[],"dnsConfig":{},"dnsPolicy":"","image":{"pullPolicy":"IfNotPresent","pullSecrets":[],"repository":"ghcr.io/janssenproject/jans/persistence-loader","tag":"1.1.6_dev"},"lifecycle":{},"resources":{"limits":{"cpu":"300m","memory":"300Mi"},"requests":{"cpu":"300m","memory":"300Mi"}},"usrEnvs":{"normal":{},"secret":{}},"volumeMounts":[],"volumes":[]}` | Job to generate data and initial config for Janssen Server persistence layer. | | persistence.additionalAnnotations | object | `{}` | Additional annotations that will be added across the gateway in the format of {cert-manager.io/issuer: "letsencrypt-prod"} | | persistence.additionalLabels | object | `{}` | Additional labels that will be added across the gateway in the format of {mylabel: "myapp"} | @@ -639,4 +586,4 @@ Kubernetes: `>=v1.22.0-0` | scim.volumes | list | `[]` | Configure any additional volumes that need to be attached to the pod | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0) +Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) diff --git a/charts/janssen/charts/auth-server-key-rotation/templates/cronjobs.yaml b/charts/janssen/charts/auth-server-key-rotation/templates/cronjobs.yaml index 70eaf88a866..9eaa4eda017 100644 --- a/charts/janssen/charts/auth-server-key-rotation/templates/cronjobs.yaml +++ b/charts/janssen/charts/auth-server-key-rotation/templates/cronjobs.yaml @@ -76,9 +76,7 @@ spec: - configMapRef: name: {{ .Release.Name }}-global-user-custom-envs {{- end }} - {{- if and ( .Values.global.opendj.enabled ) (or (eq .Values.global.storageClass.provisioner "microk8s.io/hostpath" ) (eq .Values.global.storageClass.provisioner "k8s.io/minikube-hostpath")) }} - resources: {} - {{- else if .Values.global.cloud.testEnviroment }} + {{- if .Values.global.cloud.testEnviroment }} resources: {} {{- else }} resources: diff --git a/charts/janssen/charts/auth-server/templates/deployment.yml b/charts/janssen/charts/auth-server/templates/deployment.yml index a43dcd976d0..ee24738cae8 100644 --- a/charts/janssen/charts/auth-server/templates/deployment.yml +++ b/charts/janssen/charts/auth-server/templates/deployment.yml @@ -107,9 +107,7 @@ spec: {{- toYaml .Values.livenessProbe | nindent 10 }} readinessProbe: {{- toYaml .Values.readinessProbe | nindent 10 }} - {{- if and ( .Values.global.opendj.enabled ) (or (eq .Values.global.storageClass.provisioner "microk8s.io/hostpath" ) (eq .Values.global.storageClass.provisioner "k8s.io/minikube-hostpath")) }} - resources: {} - {{- else if .Values.global.cloud.testEnviroment }} + {{- if .Values.global.cloud.testEnviroment }} resources: {} {{- else }} resources: diff --git a/charts/janssen/charts/casa/templates/deployment.yaml b/charts/janssen/charts/casa/templates/deployment.yaml index bb217964ebc..0c891bd60e8 100644 --- a/charts/janssen/charts/casa/templates/deployment.yaml +++ b/charts/janssen/charts/casa/templates/deployment.yaml @@ -112,9 +112,7 @@ spec: {{- toYaml .Values.livenessProbe | nindent 12 }} readinessProbe: {{- toYaml .Values.readinessProbe | nindent 12 }} - {{- if and ( .Values.global.opendj.enabled ) (or (eq .Values.global.storageClass.provisioner "microk8s.io/hostpath" ) (eq .Values.global.storageClass.provisioner "k8s.io/minikube-hostpath")) }} - resources: {} - {{- else if .Values.global.cloud.testEnviroment }} + {{- if .Values.global.cloud.testEnviroment }} resources: {} {{- else }} resources: diff --git a/charts/janssen/charts/config-api/templates/deployment.yaml b/charts/janssen/charts/config-api/templates/deployment.yaml index 5b598da5b08..fa97cf0f484 100644 --- a/charts/janssen/charts/config-api/templates/deployment.yaml +++ b/charts/janssen/charts/config-api/templates/deployment.yaml @@ -99,9 +99,7 @@ spec: - mountPath: {{ .Values.global.cnConfiguratorConfigurationFile }} name: {{ .Release.Name }}-configuration-file subPath: {{ .Values.global.cnConfiguratorConfigurationFile | base }} - {{- if and ( .Values.global.opendj.enabled ) (or (eq .Values.global.storageClass.provisioner "microk8s.io/hostpath" ) (eq .Values.global.storageClass.provisioner "k8s.io/minikube-hostpath")) }} - resources: {} - {{- else if .Values.global.cloud.testEnviroment }} + {{- if .Values.global.cloud.testEnviroment }} resources: {} {{- else }} resources: diff --git a/charts/janssen/charts/config/README.md b/charts/janssen/charts/config/README.md index 298c162da12..434fb56b0e1 100644 --- a/charts/janssen/charts/config/README.md +++ b/charts/janssen/charts/config/README.md @@ -53,13 +53,10 @@ Kubernetes: `>=v1.22.0-0` | configmap.cnGoogleSpannerDatabaseId | string | `""` | Google Spanner Database ID. Used only when global.cnPersistenceType is spanner. | | configmap.cnGoogleSpannerInstanceId | string | `""` | Google Spanner ID. Used only when global.cnPersistenceType is spanner. | | configmap.cnJettyRequestHeaderSize | int | `8192` | Jetty header size in bytes in the auth server | -| configmap.cnLdapCrt | string | `"SWFtTm90YVNlcnZpY2VBY2NvdW50Q2hhbmdlTWV0b09uZQo="` | OpenDJ certificate string. This must be encoded using base64. | -| configmap.cnLdapKey | string | `"SWFtTm90YVNlcnZpY2VBY2NvdW50Q2hhbmdlTWV0b09uZQo="` | OpenDJ key string. This must be encoded using base64. | -| configmap.cnLdapUrl | string | `"opendj:1636"` | OpenDJ internal address. Leave as default. Used when `global.cnPersistenceType` is set to `ldap`. | | configmap.cnMaxRamPercent | string | `"75.0"` | Value passed to Java option -XX:MaxRAMPercentage | | configmap.cnMessageType | string | `"DISABLED"` | Message type (one of POSTGRES, REDIS, or DISABLED) | | configmap.cnOpaUrl | string | `"http://opa.opa.svc.cluster.cluster.local:8181/v1"` | URL of OPA API | -| configmap.cnPersistenceHybridMapping | string | `"{}"` | Specify data that should be saved in each persistence (one of default, user, cache, site, token, or session; default to default). Note this environment only takes effect when `global.cnPersistenceType` is set to `hybrid`. { "default": "", "user": "", "site": "", "cache": "", "token": "", "session": "", } | +| configmap.cnPersistenceHybridMapping | string | `"{}"` | Specify data that should be saved in each persistence (one of default, user, cache, site, token, or session; default to default). Note this environment only takes effect when `global.cnPersistenceType` is set to `hybrid`. { "default": "", "user": "", "site": "", "cache": "", "token": "", "session": "", } | | configmap.cnRedisSentinelGroup | string | `""` | Redis Sentinel Group. Often set when `config.configmap.cnRedisType` is set to `SENTINEL`. Can be used when `config.configmap.cnCacheType` is set to `REDIS`. | | configmap.cnRedisSslTruststore | string | `""` | Redis SSL truststore. Optional. Can be used when `config.configmap.cnCacheType` is set to `REDIS`. | | configmap.cnRedisType | string | `"STANDALONE"` | Redis service type. `STANDALONE` or `CLUSTER`. Can be used when `config.configmap.cnCacheType` is set to `REDIS`. | @@ -104,8 +101,6 @@ Kubernetes: `>=v1.22.0-0` | image.pullSecrets | list | `[]` | Image Pull Secrets | | image.repository | string | `"janssenproject/configurator"` | Image to use for deploying. | | image.tag | string | `"1.1.6_dev"` | Image tag to use for deploying. | -| ldapPassword | string | `"P@ssw0rds"` | LDAP admin password if OpennDJ is used for persistence. | -| ldapTruststorePassword | string | `"changeit"` | LDAP truststore password if OpenDJ is used for persistence | | lifecycle | object | `{}` | | | migration | object | `{"enabled":false,"migrationDataFormat":"ldif","migrationDir":"/ce-migration"}` | CE to CN Migration section | | migration.enabled | bool | `false` | Boolean flag to enable migration from CE | @@ -128,4 +123,4 @@ Kubernetes: `>=v1.22.0-0` | volumes | list | `[]` | Configure any additional volumes that need to be attached to the pod | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0) +Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) diff --git a/charts/janssen/charts/config/templates/_helpers.tpl b/charts/janssen/charts/config/templates/_helpers.tpl index b0f79a4a23c..ab0a7abce96 100644 --- a/charts/janssen/charts/config/templates/_helpers.tpl +++ b/charts/janssen/charts/config/templates/_helpers.tpl @@ -81,9 +81,6 @@ Create optional scopes list {{ if eq .Values.global.cnPersistenceType "sql" }} {{ $newList = append $newList ("sql" | quote) }} {{- end }} -{{- if .Values.global.opendj.enabled}} -{{ $newList = append $newList ("ldap" | quote) }} -{{- end}} {{ toJson $newList }} {{- end }} diff --git a/charts/janssen/charts/config/templates/configmaps.yaml b/charts/janssen/charts/config/templates/configmaps.yaml index b46ca2d8e1b..ab9c754d827 100644 --- a/charts/janssen/charts/config/templates/configmaps.yaml +++ b/charts/janssen/charts/config/templates/configmaps.yaml @@ -96,8 +96,6 @@ data: | replace "persistenceLogLevel" "persistence_log_level" | replace "persistenceDurationLogTarget" "persistence_duration_log_target" | replace "persistenceDurationLogLevel" "persistence_duration_log_level" - | replace "ldapStatsLogTarget" "ldap_stats_log_target" - | replace "ldapStatsLogLevel" "ldap_stats_log_level" | replace "scriptLogTarget" "script_log_target" | replace "scriptLogLevel" "script_log_level" | replace "auditStatsLogTarget" "audit_log_target" @@ -114,8 +112,6 @@ data: | replace "persistenceLogLevel" "persistence_log_level" | replace "persistenceDurationLogTarget" "persistence_duration_log_target" | replace "persistenceDurationLogLevel" "persistence_duration_log_level" - | replace "ldapStatsLogTarget" "ldap_stats_log_target" - | replace "ldapStatsLogLevel" "ldap_stats_log_level" | replace "scriptLogTarget" "script_log_target" | replace "scriptLogLevel" "script_log_level" | replace "enableStdoutLogPrefix" "enable_stdout_log_prefix" @@ -126,14 +122,7 @@ data: LB_ADDR: {{ .Values.configmap.lbAddr }} {{- end }} CN_PERSISTENCE_TYPE: {{ .Values.global.cnPersistenceType }} - {{- if or (eq .Values.global.cnPersistenceType "ldap") (eq .Values.global.cnPersistenceType "hybrid") }} - # used only if CN_PERSISTENCE_TYPE is ldap or hybrid - {{- if .Values.configmap.cnLdapUrl }} - CN_LDAP_URL: {{ .Values.configmap.cnLdapUrl | quote }} - {{- else }} - CN_LDAP_URL: {{ cat ( .Values.global.opendj.ldapServiceName ) ":1636" | quote | nospace }} - {{- end }} - {{- else if or (eq .Values.global.cnPersistenceType "couchbase") (eq .Values.global.cnPersistenceType "hybrid") }} + {{- if or (eq .Values.global.cnPersistenceType "couchbase") (eq .Values.global.cnPersistenceType "hybrid") }} # used only if CN_PERSISTENCE_TYPE is couchbase or hybrid CN_COUCHBASE_URL: {{ .Values.configmap.cnCouchbaseUrl }} CN_COUCHBASE_BUCKET_PREFIX: {{ .Values.configmap.cnCouchbaseBucketPrefix }} @@ -151,9 +140,7 @@ data: {{- end }} CN_CONTAINER_MAIN_NAME: {{ .Release.Name }}-auth-server # options: default/user/site/cache/statistic used only if CN_PERSISTENCE_TYPE is hybrid or hybrid - {{- if or (eq .Values.global.cnPersistenceType "hybrid") (eq .Values.global.cnPersistenceType "ldap") }} - # must the same as the opendj service name - CN_CERT_ALT_NAME: {{ .Values.global.opendj.ldapServiceName }} #{{ template "cn.fullname" . }}-service + {{- if (eq .Values.global.cnPersistenceType "hybrid") }} CN_HYBRID_MAPPING: {{ .Values.configmap.cnPersistenceHybridMapping | quote }} {{- end }} # Auto enable installation of some services @@ -168,7 +155,6 @@ data: {{- end }} {{- if .Values.global.istio.enabled }} CN_COUCHBASE_TRUSTSTORE_ENABLE: "false" - CN_LDAP_USE_SSL: "false" {{- end }} {{- if .Values.global.scim.enabled }} CN_SCIM_ENABLED: {{ .Values.global.scim.enabled | quote }} @@ -181,8 +167,6 @@ data: | replace "persistenceLogLevel" "persistence_log_level" | replace "persistenceDurationLogTarget" "persistence_duration_log_target" | replace "persistenceDurationLogLevel" "persistence_duration_log_level" - | replace "ldapStatsLogTarget" "ldap_stats_log_target" - | replace "ldapStatsLogLevel" "ldap_stats_log_level" | replace "scriptLogTarget" "script_log_target" | replace "scriptLogLevel" "script_log_level" | replace "enableStdoutLogPrefix" "enable_stdout_log_prefix" @@ -218,12 +202,6 @@ data: CN_SQL_PASSWORD_FILE: {{ .Values.global.cnSqlPasswordFile }} CN_COUCHBASE_PASSWORD_FILE: {{ .Values.global.cnCouchbasePasswordFile }} CN_COUCHBASE_SUPERUSER_PASSWORD_FILE: {{ .Values.global.cnCouchbaseSuperuserPasswordFile }} - CN_LDAP_PASSWORD_FILE: {{ .Values.global.cnLdapPasswordFile }} - CN_LDAP_TRUSTSTORE_PASSWORD_FILE: {{ .Values.global.cnLdapTruststorePasswordFile }} - CN_LDAP_CERT_FILE: {{ .Values.global.cnLdapCertFile }} - CN_LDAP_KEY_FILE: {{ .Values.global.cnLdapKeyFile }} - CN_LDAP_CACERT_FILE: {{ .Values.global.cnLdapCacertFile }} - CN_LDAP_TRUSTSTORE_FILE: {{ .Values.global.cnLdapTruststoreFile }} CN_CONFIG_API_PLUGINS: {{ index .Values "global" "config-api" "plugins" | quote }} {{- if .Values.global.saml.enabled }} QUARKUS_TRANSACTION_MANAGER_ENABLE_RECOVERY: {{ .Values.configmap.quarkusTransactionEnableRecovery | quote }} diff --git a/charts/janssen/charts/config/templates/secrets.yaml b/charts/janssen/charts/config/templates/secrets.yaml index df01954f66d..6a7c25025a5 100644 --- a/charts/janssen/charts/config/templates/secrets.yaml +++ b/charts/janssen/charts/config/templates/secrets.yaml @@ -36,12 +36,6 @@ stringData: }, "_secret": { "admin_password": {{ .Values.adminPassword | quote }}, - {{ if or ( eq .Values.global.cnPersistenceType "ldap" ) ( eq .Values.global.cnPersistenceType "hybrid" ) }} - "ldap_password": {{ .Values.ldapPassword | quote }}, - "ldap_truststore_pass": {{ .Values.ldapTruststorePassword | quote }}, - "ldap_ssl_cert": {{ .Values.configmap.cnLdapCrt | quote }}, - "ldap_ssl_key": {{ .Values.configmap.cnLdapKey | quote }}, - {{- end }} "redis_password": {{ .Values.redisPassword | quote }}, {{ if or ( eq .Values.global.cnPersistenceType "sql" ) ( eq .Values.global.cnPersistenceType "hybrid" ) }} "sql_password": {{ .Values.configmap.cnSqldbUserPassword | quote }}, diff --git a/charts/janssen/charts/config/values.yaml b/charts/janssen/charts/config/values.yaml index e4cd56db847..4566195a34b 100644 --- a/charts/janssen/charts/config/values.yaml +++ b/charts/janssen/charts/config/values.yaml @@ -110,18 +110,16 @@ configmap: # -- Path to Vault AppRole. cnVaultAppRolePath: approle # [vault_envs] END - # -- OpenDJ internal address. Leave as default. Used when `global.cnPersistenceType` is set to `ldap`. - cnLdapUrl: "opendj:1636" # -- Value passed to Java option -XX:MaxRAMPercentage cnMaxRamPercent: "75.0" # -- Specify data that should be saved in each persistence (one of default, user, cache, site, token, or session; default to default). Note this environment only takes effect when `global.cnPersistenceType` is set to `hybrid`. #{ - # "default": "", - # "user": "", - # "site": "", - # "cache": "", - # "token": "", - # "session": "", + # "default": "", + # "user": "", + # "site": "", + # "cache": "", + # "token": "", + # "session": "", #} cnPersistenceHybridMapping: "{}" # -- Redis Sentinel Group. Often set when `config.configmap.cnRedisType` is set to `SENTINEL`. Can be used when `config.configmap.cnCacheType` is set to `REDIS`. @@ -138,10 +136,6 @@ configmap: cnSecretKubernetesSecret: cn # -- Loadbalancer address for AWS if the FQDN is not registered. lbAddr: "" - # -- OpenDJ certificate string. This must be encoded using base64. - cnLdapCrt: SWFtTm90YVNlcnZpY2VBY2NvdW50Q2hhbmdlTWV0b09uZQo= - # -- OpenDJ key string. This must be encoded using base64. - cnLdapKey: SWFtTm90YVNlcnZpY2VBY2NvdW50Q2hhbmdlTWV0b09uZQo= # -- Quarkus transaction recovery. When using MySQL, there could be issue regarding XA_RECOVER_ADMIN; refer to https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_xa-recover-admin for details. quarkusTransactionEnableRecovery: true # -- Keycloak logging level @@ -177,10 +171,6 @@ image: tag: 1.1.6_dev # -- Image Pull Secrets pullSecrets: [ ] -# -- LDAP admin password if OpennDJ is used for persistence. -ldapPassword: P@ssw0rds -# -- LDAP truststore password if OpenDJ is used for persistence -ldapTruststorePassword: changeit # -- Organization name. Used for certificate creation. orgName: Janssen # -- Redis admin password if `config.configmap.cnCacheType` is set to `REDIS`. diff --git a/charts/janssen/charts/fido2/templates/deployment.yml b/charts/janssen/charts/fido2/templates/deployment.yml index 588dcb12ccc..63f2b69072a 100644 --- a/charts/janssen/charts/fido2/templates/deployment.yml +++ b/charts/janssen/charts/fido2/templates/deployment.yml @@ -107,9 +107,7 @@ spec: {{- toYaml .Values.livenessProbe | nindent 10 }} readinessProbe: {{- toYaml .Values.readinessProbe | nindent 10 }} - {{- if and ( .Values.global.opendj.enabled ) (or (eq .Values.global.storageClass.provisioner "microk8s.io/hostpath" ) (eq .Values.global.storageClass.provisioner "k8s.io/minikube-hostpath")) }} - resources: {} - {{- else if .Values.global.cloud.testEnviroment }} + {{- if .Values.global.cloud.testEnviroment }} resources: {} {{- else }} resources: diff --git a/charts/janssen/charts/kc-scheduler/templates/cronjobs.yaml b/charts/janssen/charts/kc-scheduler/templates/cronjobs.yaml index df2cfe31f7b..866692cbc90 100644 --- a/charts/janssen/charts/kc-scheduler/templates/cronjobs.yaml +++ b/charts/janssen/charts/kc-scheduler/templates/cronjobs.yaml @@ -71,9 +71,7 @@ spec: - configMapRef: name: {{ .Release.Name }}-global-user-custom-envs {{- end }} - {{- if and ( .Values.global.opendj.enabled ) (or (eq .Values.global.storageClass.provisioner "microk8s.io/hostpath" ) (eq .Values.global.storageClass.provisioner "k8s.io/minikube-hostpath")) }} - resources: {} - {{- else if .Values.global.cloud.testEnviroment }} + {{- if .Values.global.cloud.testEnviroment }} resources: {} {{- else }} resources: diff --git a/charts/janssen/charts/link/templates/deployment.yaml b/charts/janssen/charts/link/templates/deployment.yaml index 5a678566075..4c2fbf0998d 100644 --- a/charts/janssen/charts/link/templates/deployment.yaml +++ b/charts/janssen/charts/link/templates/deployment.yaml @@ -98,9 +98,7 @@ spec: - mountPath: {{ .Values.global.cnConfiguratorConfigurationFile }} name: {{ .Release.Name }}-configuration-file subPath: {{ .Values.global.cnConfiguratorConfigurationFile | base }} - {{- if and ( .Values.global.opendj.enabled ) (or (eq .Values.global.storageClass.provisioner "microk8s.io/hostpath" ) (eq .Values.global.storageClass.provisioner "k8s.io/minikube-hostpath")) }} - resources: {} - {{- else if .Values.global.cloud.testEnviroment }} + {{- if .Values.global.cloud.testEnviroment }} resources: {} {{- else }} resources: diff --git a/charts/janssen/charts/opendj/.helmignore b/charts/janssen/charts/opendj/.helmignore deleted file mode 100644 index f0c13194444..00000000000 --- a/charts/janssen/charts/opendj/.helmignore +++ /dev/null @@ -1,21 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj diff --git a/charts/janssen/charts/opendj/Chart.yaml b/charts/janssen/charts/opendj/Chart.yaml deleted file mode 100644 index ec213d6ddd2..00000000000 --- a/charts/janssen/charts/opendj/Chart.yaml +++ /dev/null @@ -1,19 +0,0 @@ - -apiVersion: v2 -name: opendj -version: 1.1.6-dev -kubeVersion: ">=v1.22.0-0" -description: OpenDJ is a directory server which implements a wide range of Lightweight Directory Access Protocol and related standards, including full compliance with LDAPv3 but also support for Directory Service Markup Language (DSMLv2).Written in Java, OpenDJ offers multi-master replication, access control, and many extensions. -type: application -keywords: - - LDAP - - OpenDJ -home: https://jans.io -sources: - - https://github.com/GluuFederation/docker-opendj -maintainers: - - name: Mohammad Abudayyeh - email: support@jans.io - url: https://github.com/moabu -icon: https://github.com/JanssenProject/jans/raw/main/docs/assets/logo/janssen_project_favicon_transparent_50px_50px.png -appVersion: "1.1.6-dev" \ No newline at end of file diff --git a/charts/janssen/charts/opendj/README.md b/charts/janssen/charts/opendj/README.md deleted file mode 100644 index d4569f35579..00000000000 --- a/charts/janssen/charts/opendj/README.md +++ /dev/null @@ -1,74 +0,0 @@ -# opendj - -![Version: 1.1.6-dev](https://img.shields.io/badge/Version-1.1.6--dev-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.1.6-dev](https://img.shields.io/badge/AppVersion-1.1.6--dev-informational?style=flat-square) - -OpenDJ is a directory server which implements a wide range of Lightweight Directory Access Protocol and related standards, including full compliance with LDAPv3 but also support for Directory Service Markup Language (DSMLv2).Written in Java, OpenDJ offers multi-master replication, access control, and many extensions. - -**Homepage:** - -## Maintainers - -| Name | Email | Url | -| ---- | ------ | --- | -| Mohammad Abudayyeh | | | - -## Source Code - -* - -## Requirements - -Kubernetes: `>=v1.22.0-0` - -## Values - -| Key | Type | Default | Description | -|-----|------|---------|-------------| -| additionalAnnotations | object | `{}` | Additional annotations that will be added across all resources in the format of {cert-manager.io/issuer: "letsencrypt-prod"}. key app is taken | -| additionalLabels | object | `{}` | Additional labels that will be added across all resources definitions in the format of {mylabel: "myapp"} | -| customScripts | list | `[]` | Add custom scripts that have been mounted to run before the entrypoint. - /tmp/custom.sh - /tmp/custom2.sh | -| dnsConfig | object | `{}` | Add custom dns config | -| dnsPolicy | string | `""` | Add custom dns policy | -| fullnameOverride | string | `""` | | -| hpa | object | `{"behavior":{},"enabled":true,"maxReplicas":10,"metrics":[],"minReplicas":1,"targetCPUUtilizationPercentage":50}` | Configure the HorizontalPodAutoscaler | -| hpa.behavior | object | `{}` | Scaling Policies | -| hpa.metrics | list | `[]` | metrics if targetCPUUtilizationPercentage is not set | -| image.pullPolicy | string | `"IfNotPresent"` | Image pullPolicy to use for deploying. | -| image.pullSecrets | list | `[]` | Image Pull Secrets | -| image.repository | string | `"gluufederation/opendj"` | Image to use for deploying. | -| image.tag | string | `"5.0.0_dev"` | Image tag to use for deploying. | -| lifecycle.preStop.exec.command[0] | string | `"/bin/sh"` | | -| lifecycle.preStop.exec.command[1] | string | `"-c"` | | -| lifecycle.preStop.exec.command[2] | string | `"python3 /app/scripts/deregister_peer.py 1>&/proc/1/fd/1"` | | -| livenessProbe | object | `{"exec":{"command":["python3","/app/scripts/healthcheck.py"]},"failureThreshold":20,"initialDelaySeconds":30,"periodSeconds":30,"timeoutSeconds":5}` | Configure the liveness healthcheck for OpenDJ if needed. | -| livenessProbe.exec | object | `{"command":["python3","/app/scripts/healthcheck.py"]}` | Executes the python3 healthcheck. | -| nameOverride | string | `""` | | -| openDjVolumeMounts.config.mountPath | string | `"/opt/opendj/config"` | | -| openDjVolumeMounts.config.name | string | `"opendj-volume"` | | -| openDjVolumeMounts.db.mountPath | string | `"/opt/opendj/db"` | | -| openDjVolumeMounts.db.name | string | `"opendj-volume"` | | -| openDjVolumeMounts.flag.mountPath | string | `"/flag"` | | -| openDjVolumeMounts.flag.name | string | `"opendj-volume"` | | -| openDjVolumeMounts.ldif.mountPath | string | `"/opt/opendj/ldif"` | | -| openDjVolumeMounts.ldif.name | string | `"opendj-volume"` | | -| openDjVolumeMounts.logs.mountPath | string | `"/opt/opendj/logs"` | | -| openDjVolumeMounts.logs.name | string | `"opendj-volume"` | | -| persistence.accessModes | string | `"ReadWriteOnce"` | | -| persistence.size | string | `"5Gi"` | OpenDJ volume size | -| persistence.type | string | `"DirectoryOrCreate"` | | -| ports | object | `{"tcp-admin":{"nodePort":"","port":4444,"protocol":"TCP","targetPort":4444},"tcp-ldap":{"nodePort":"","port":1389,"protocol":"TCP","targetPort":1389},"tcp-ldaps":{"nodePort":"","port":1636,"protocol":"TCP","targetPort":1636},"tcp-repl":{"nodePort":"","port":8989,"protocol":"TCP","targetPort":8989},"tcp-serf":{"nodePort":"","port":7946,"protocol":"TCP","targetPort":7946},"udp-serf":{"nodePort":"","port":7946,"protocol":"UDP","targetPort":7946}}` | servicePorts values used in StatefulSet container | -| readinessProbe | object | `{"failureThreshold":20,"initialDelaySeconds":60,"periodSeconds":25,"tcpSocket":{"port":1636},"timeoutSeconds":5}` | Configure the readiness healthcheck for OpenDJ if needed. | -| replicas | int | `1` | Service replica number. | -| resources | object | `{"limits":{"cpu":"1500m","memory":"2000Mi"},"requests":{"cpu":"1500m","memory":"2000Mi"}}` | Resource specs. | -| resources.limits.cpu | string | `"1500m"` | CPU limit. | -| resources.limits.memory | string | `"2000Mi"` | Memory limit. | -| resources.requests.cpu | string | `"1500m"` | CPU request. | -| resources.requests.memory | string | `"2000Mi"` | Memory request. | -| usrEnvs | object | `{"normal":{},"secret":{}}` | Add custom normal and secret envs to the service | -| usrEnvs.normal | object | `{}` | Add custom normal envs to the service variable1: value1 | -| usrEnvs.secret | object | `{}` | Add custom secret envs to the service variable1: value1 | -| volumeMounts | list | `[]` | Configure any additional volumesMounts that need to be attached to the containers | -| volumes | list | `[]` | Configure any additional volumes that need to be attached to the pod | - ----------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0) diff --git a/charts/janssen/charts/opendj/templates/_helpers.tpl b/charts/janssen/charts/opendj/templates/_helpers.tpl deleted file mode 100644 index ecab10b7530..00000000000 --- a/charts/janssen/charts/opendj/templates/_helpers.tpl +++ /dev/null @@ -1,98 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "opendj.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "opendj.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "opendj.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* - Common labels -*/}} -{{- define "opendj.labels" -}} -app: {{ .Release.Name }}-{{ include "opendj.name" . }} -helm.sh/chart: {{ include "opendj.chart" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end -}} - -{{/* -Create user custom defined envs -*/}} -{{- define "opendj.usr-envs"}} -{{- range $key, $val := .Values.usrEnvs.normal }} -- name: {{ $key }} - value: {{ $val | quote }} -{{- end }} -{{- end }} - -{{/* -Create user custom defined secret envs -*/}} -{{- define "opendj.usr-secret-envs"}} -{{- range $key, $val := .Values.usrEnvs.secret }} -- name: {{ $key }} - valueFrom: - secretKeyRef: - name: {{ $.Release.Name }}-{{ $.Chart.Name }}-user-custom-envs - key: {{ $key | quote }} -{{- end }} -{{- end }} - -{{/* -Create topologySpreadConstraints lists -*/}} -{{- define "opendj.topology-spread-constraints"}} -{{- range $key, $val := .Values.topologySpreadConstraints }} -- maxSkew: {{ $val.maxSkew }} - {{- if $val.minDomains }} - minDomains: {{ $val.minDomains }} # optional; beta since v1.25 - {{- end}} - {{- if $val.topologyKey }} - topologyKey: {{ $val.topologyKey }} - {{- end}} - {{- if $val.whenUnsatisfiable }} - whenUnsatisfiable: {{ $val.whenUnsatisfiable }} - {{- end}} - labelSelector: - matchLabels: - app: {{ $.Release.Name }}-{{ include "opendj.name" $ }} - {{- if $val.matchLabelKeys }} - matchLabelKeys: {{ $val.matchLabelKeys }} # optional; alpha since v1.25 - {{- end}} - {{- if $val.nodeAffinityPolicy }} - nodeAffinityPolicy: {{ $val.nodeAffinityPolicy }} # optional; alpha since v1.25 - {{- end}} - {{- if $val.nodeTaintsPolicy }} - nodeTaintsPolicy: {{ $val.nodeTaintsPolicy }} # optional; alpha since v1.25 - {{- end}} -{{- end }} -{{- end }} \ No newline at end of file diff --git a/charts/janssen/charts/opendj/templates/cronjobs.yaml b/charts/janssen/charts/opendj/templates/cronjobs.yaml deleted file mode 100644 index 137334570ee..00000000000 --- a/charts/janssen/charts/opendj/templates/cronjobs.yaml +++ /dev/null @@ -1,114 +0,0 @@ -{{- if .Values.backup.enabled }} -kind: CronJob -apiVersion: batch/v1 -metadata: - name: {{ include "opendj.fullname" . }}-backup - namespace: {{ $.Release.Namespace }} - labels: -{{ include "opendj.labels" $ | indent 4}} -{{- if $.Values.additionalLabels }} -{{ toYaml $.Values.additionalLabels | indent 4 }} -{{- end }} -{{- if or (.Values.additionalAnnotations) (.Values.global.opendj.customAnnotations.cronjob) }} - annotations: -{{- if .Values.additionalAnnotations }} -{{ toYaml .Values.additionalAnnotations | indent 4 }} -{{- end }} -{{- if .Values.global.opendj.customAnnotations.cronjob }} -{{ toYaml .Values.global.opendj.customAnnotations.cronjob | indent 4 }} -{{- end }} -{{- end }} -spec: - schedule: {{ .Values.backup.cronJobSchedule | quote }} - concurrencyPolicy: Forbid - jobTemplate: - spec: - template: - spec: - dnsPolicy: {{ .Values.dnsPolicy | quote }} - {{- with .Values.dnsConfig }} - dnsConfig: - {{ toYaml . | indent 12 }} - {{- end }} - serviceAccountName: {{ .Values.global.serviceAccountName }} - containers: - - name: {{ include "opendj.fullname" . }}-backup - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - envFrom: - - configMapRef: - name: {{ .Release.Name }}-config-cm - ports: - {{- range $key, $value := .Values.ports }} - - containerPort: {{ $value.targetPort }} - name: {{ $key }} - {{- end }} - env: - - name: LDAP_HOST - valueFrom: - configMapKeyRef: - # ConfigMap generated by the Configuration chart when Janssen was installed. This is normally cn. - # Found in Janssen chart under config.configmap.cnConfigKubernetesConfigMap - name: cn - key: ldap_init_host - - name: LDAP_PORT - valueFrom: - configMapKeyRef: - # ConfigMap generated by the Configuration chart when Janssen was installed. This is normally cn. - # Found in Janssen chart under config.configmap.cnConfigKubernetesConfigMap - name: cn - key: ldap_init_port - - name: LDAP_BIND_DN - valueFrom: - configMapKeyRef: - # ConfigMap generated by the Configuration chart when Janssen was installed. This is normally cn. - # Found in Janssen chart under config.configmap.cnConfigKubernetesConfigMap - name: cn - key: ldap_site_binddn - - name: LDAP_PASSWORD - valueFrom: - secretKeyRef: - name: cn - key: ldap_password - # while true; do sleep 60; ldaplog=$(cat /opt/opendj/logs/server.out); startedstr="The Directory Server has started successfully"; if [ -z "${ldaplog##*$startedstr*}" ]; then break; fi; echo "Waiting for opendj server to start"; done - command: - - /bin/sh - - -c - - | - # ========= - # FUNCTIONS - # ========= - - set_java_args() { - # not sure if we can omit `-server` safely - local java_args="-server" - java_args="${java_args} -XX:+UseContainerSupport -XX:MaxRAMPercentage=${CN_MAX_RAM_PERCENTAGE} ${CN_JAVA_OPTIONS}" - # set the env var so it is loaded by `start-ds` script - export OPENDJ_JAVA_ARGS=${java_args} - } - - # ========== - # ENTRYPOINT - # ========== - - mkdir -p /opt/opendj/locks - - python3 /app/scripts/wait.py - python3 /app/scripts/bootstrap.py - - # run OpenDJ server - set_java_args - exec /opt/opendj/bin/start-ds -N & - sleep 300 - RANDOM_NUM=$(cat /dev/urandom | tr -cd '0-5' | head -c 1) - LDAP_BACKUP_FILE=backup-$RANDOM_NUM.ldif - /opt/opendj/bin/export-ldif --hostname "$LDAP_HOST" --port 4444 --bindDN "$LDAP_BIND_DN" --bindPassword "$LDAP_PASSWORD" --backendID userRoot --ldifFile /opt/opendj/ldif/$LDAP_BACKUP_FILE --trustAll - volumeMounts: - - mountPath: {{ .Values.global.cnConfiguratorConfigurationFile }} - name: {{ .Release.Name }}-configuration-file - subPath: {{ .Values.global.cnConfiguratorConfigurationFile | base }} - restartPolicy: Never - volumes: - - name: {{ .Release.Name }}-configuration-file - secret: - secretName: {{ .Release.Name }}-configuration-file -{{- end }} diff --git a/charts/janssen/charts/opendj/templates/hpa.yaml b/charts/janssen/charts/opendj/templates/hpa.yaml deleted file mode 100644 index 17764da7103..00000000000 --- a/charts/janssen/charts/opendj/templates/hpa.yaml +++ /dev/null @@ -1,41 +0,0 @@ -{{ if .Values.hpa.enabled -}} -apiVersion: autoscaling/v1 -kind: HorizontalPodAutoscaler -metadata: - name: {{ include "opendj.fullname" . }} - labels: -{{ include "opendj.labels" $ | indent 4}} -{{- if .Values.additionalLabels }} -{{ toYaml .Values.additionalLabels | indent 4 }} -{{- end }} -{{- if or (.Values.additionalAnnotations) (.Values.global.opendj.customAnnotations.horizontalPodAutoscaler) }} - annotations: -{{- if .Values.additionalAnnotations }} -{{ toYaml .Values.additionalAnnotations | indent 4 }} -{{- end }} -{{- if .Values.global.opendj.customAnnotations.horizontalPodAutoscaler }} -{{ toYaml .Values.global.opendj.customAnnotations.horizontalPodAutoscaler | indent 4 }} -{{- end }} -{{- end }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: StatefulSet - name: {{ include "opendj.fullname" . }} - minReplicas: {{ .Values.hpa.minReplicas }} - maxReplicas: {{ .Values.hpa.maxReplicas }} - {{- if .Values.hpa.targetCPUUtilizationPercentage }} - targetCPUUtilizationPercentage: {{ .Values.hpa.targetCPUUtilizationPercentage }} - {{- else if .Values.hpa.metrics }} - metrics: - {{- with .Values.hpa.metrics }} -{{- toYaml . | nindent 4 }} - {{- end }} - {{- end }} - {{- if .Values.hpa.behavior }} - behavior: - {{- with .Values.hpa.behavior }} -{{- toYaml . | nindent 4 }} - {{- end }} - {{- end }} -{{- end }} \ No newline at end of file diff --git a/charts/janssen/charts/opendj/templates/opendj-destination-rules.yaml b/charts/janssen/charts/opendj/templates/opendj-destination-rules.yaml deleted file mode 100644 index 9aafe4b7cc5..00000000000 --- a/charts/janssen/charts/opendj/templates/opendj-destination-rules.yaml +++ /dev/null @@ -1,28 +0,0 @@ -{{- if or (eq .Values.global.cnPersistenceType "ldap") (eq .Values.global.cnPersistenceType "hybrid") }} -{{- if .Values.global.istio.enabled }} -apiVersion: networking.istio.io/v1alpha3 -kind: DestinationRule -metadata: - name: {{ .Release.Name }}-ldap-mtls - namespace: {{.Release.Namespace}} - labels: -{{ include "opendj.labels" $ | indent 4}} -{{- if .Values.additionalLabels }} -{{ toYaml .Values.additionalLabels | indent 4 }} -{{- end }} -{{- if or (.Values.additionalAnnotations) (.Values.global.opendj.customAnnotations.destinationRule) }} - annotations: -{{- if .Values.additionalAnnotations }} -{{ toYaml .Values.additionalAnnotations | indent 4 }} -{{- end }} -{{- if .Values.global.opendj.customAnnotations.destinationRule }} -{{ toYaml .Values.global.opendj.customAnnotations.destinationRule | indent 4 }} -{{- end }} -{{- end }} -spec: - host: {{ .Values.global.opendj.ldapServiceName }}.{{ .Release.Namespace }}.svc.cluster.local - trafficPolicy: - tls: - mode: ISTIO_MUTUAL -{{- end }} -{{- end }} \ No newline at end of file diff --git a/charts/janssen/charts/opendj/templates/opendj-pdb.yaml b/charts/janssen/charts/opendj/templates/opendj-pdb.yaml deleted file mode 100644 index 50d6b577f2a..00000000000 --- a/charts/janssen/charts/opendj/templates/opendj-pdb.yaml +++ /dev/null @@ -1,26 +0,0 @@ -{{ if .Values.pdb.enabled -}} -apiVersion: policy/v1 -kind: PodDisruptionBudget -metadata: - name: {{ include "opendj.fullname" . }} - labels: - APP_NAME: opendj -{{ include "opendj.labels" . | indent 4 }} -{{- if .Values.additionalLabels }} -{{ toYaml .Values.additionalLabels | indent 4 }} -{{- end }} -{{- if or (.Values.additionalAnnotations) (.Values.global.opendj.customAnnotations.podDisruptionBudget) }} - annotations: -{{- if .Values.additionalAnnotations }} -{{ toYaml .Values.additionalAnnotations | indent 4 }} -{{- end }} -{{- if .Values.global.opendj.customAnnotations.podDisruptionBudget }} -{{ toYaml .Values.global.opendj.customAnnotations.podDisruptionBudget | indent 4 }} -{{- end }} -{{- end }} -spec: - maxUnavailable: {{ .Values.pdb.maxUnavailable }} - selector: - matchLabels: - app: {{ include "opendj.name" $ }} -{{- end }} \ No newline at end of file diff --git a/charts/janssen/charts/opendj/templates/service.yaml b/charts/janssen/charts/opendj/templates/service.yaml deleted file mode 100644 index 764c6ce5387..00000000000 --- a/charts/janssen/charts/opendj/templates/service.yaml +++ /dev/null @@ -1,35 +0,0 @@ -{{- if or (eq .Values.global.cnPersistenceType "ldap") (eq .Values.global.cnPersistenceType "hybrid") }} -apiVersion: v1 -kind: Service -metadata: - name: {{ $.Values.global.opendj.ldapServiceName }} - namespace: {{ $.Release.Namespace }} - labels: -{{ include "opendj.labels" $ | indent 4}} -{{- if $.Values.additionalLabels }} -{{ toYaml $.Values.additionalLabels | indent 4 }} -{{- end }} -{{- if or (.Values.additionalAnnotations) (.Values.global.opendj.customAnnotations.service) }} - annotations: -{{- if .Values.additionalAnnotations }} -{{ toYaml .Values.additionalAnnotations | indent 4 }} -{{- end }} -{{- if .Values.global.opendj.customAnnotations.service }} -{{ toYaml .Values.global.opendj.customAnnotations.service | indent 4 }} -{{- end }} -{{- end }} -spec: - ports: - {{- range $key, $value := $.Values.ports }} - - port: {{ $value.port }} - name: {{ $key }} - targetPort: {{ $value.targetPort }} - protocol: {{ $value.protocol}} - {{- if $value.nodePort }} - nodePort: {{ $value.nodePort }} - {{- end }} - {{- end }} - clusterIP: None - selector: - app: {{ include "opendj.name" $ }} -{{- end }} \ No newline at end of file diff --git a/charts/janssen/charts/opendj/templates/statefulset.yaml b/charts/janssen/charts/opendj/templates/statefulset.yaml deleted file mode 100644 index 07bb4fcd311..00000000000 --- a/charts/janssen/charts/opendj/templates/statefulset.yaml +++ /dev/null @@ -1,119 +0,0 @@ -{{- if or (eq .Values.global.cnPersistenceType "ldap") (eq .Values.global.cnPersistenceType "hybrid") }} -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ include "opendj.fullname" $ }} - namespace: {{ $.Release.Namespace }} - labels: -{{ include "opendj.labels" $ | indent 4}} -{{- if $.Values.additionalLabels }} -{{ toYaml $.Values.additionalLabels | indent 4 }} -{{- end }} -{{- if or (.Values.additionalAnnotations) (.Values.global.opendj.customAnnotations.statefulset) }} - annotations: -{{- if .Values.additionalAnnotations }} -{{ toYaml .Values.additionalAnnotations | indent 4 }} -{{- end }} -{{- if .Values.global.opendj.customAnnotations.statefulset }} -{{ toYaml .Values.global.opendj.customAnnotations.statefulset | indent 4 }} -{{- end }} -{{- end }} -spec: - selector: - matchLabels: - app: {{ include "opendj.name" $ }} - serviceName: {{ include "opendj.name" $ }} - replicas: {{ $.Values.replicas }} - template: - metadata: - labels: - app: {{ include "opendj.name" $ }} - {{- if $.Values.global.istio.ingress }} - annotations: - sidecar.istio.io/rewriteAppHTTPProbers: "true" - {{- end }} - spec: - securityContext: - runAsNonRoot: true - runAsUser: 1000 - runAsGroup: 1000 - fsGroup: 1000 - serviceAccountName: {{ .Values.global.serviceAccountName }} - containers: - - name: {{ include "opendj.name" $ }} - {{- if .Values.customScripts }} - command: - - /bin/sh - - -c - - | - {{- with .Values.customScripts }} - {{- toYaml . | replace "- " "" | nindent 14}} - {{- end }} - /app/scripts/entrypoint.sh - {{- end}} - imagePullPolicy: {{ $.Values.image.pullPolicy }} - image: "{{ $.Values.image.repository }}:{{ $.Values.image.tag }}" - env: - {{- include "opendj.usr-envs" $ | indent 12 }} - {{- include "opendj.usr-secret-envs" $ | indent 12 }} - lifecycle: -{{- toYaml .Values.lifecycle | nindent 10 }} - envFrom: - - configMapRef: - name: {{ $.Release.Name }}-config-cm - {{ if $.Values.global.usrEnvs.secret }} - - secretRef: - name: {{ $.Release.Name }}-global-user-custom-envs - {{- end }} - {{ if $.Values.global.usrEnvs.normal }} - - configMapRef: - name: {{ $.Release.Name }}-global-user-custom-envs - {{- end }} - ports: - {{- range $key, $value := $.Values.ports }} - - containerPort: {{ $value.targetPort }} - name: {{ $key }} - {{- end }} - volumeMounts: - {{- range $key, $values := $.Values.openDjVolumeMounts }} - - mountPath: {{$values.mountPath}} - name: {{$values.name}} - subPath: {{$key}} - {{- end }} - {{- with $.Values.volumeMounts }} -{{- toYaml . | nindent 10 }} - {{- end }} - - mountPath: {{ .Values.global.cnConfiguratorConfigurationFile }} - name: {{ .Release.Name }}-configuration-file - subPath: {{ .Values.global.cnConfiguratorConfigurationFile | base }} - livenessProbe: -{{- toYaml $.Values.livenessProbe | nindent 10 }} - readinessProbe: -{{- toYaml $.Values.readinessProbe | nindent 10 }} - {{- if or (eq $.Values.global.storageClass.provisioner "microk8s.io/hostpath" ) (eq $.Values.global.storageClass.provisioner "k8s.io/minikube-hostpath") }} - resources: {} - {{- else if $.Values.global.cloud.testEnviroment }} - resources: {} - {{- else }} - resources: -{{- toYaml $.Values.resources | nindent 10 }} - {{- end }} - volumes: - - name: {{ .Release.Name }}-configuration-file - secret: - secretName: {{ .Release.Name }}-configuration-file - volumeClaimTemplates: - - metadata: - name: opendj-volume - spec: - accessModes: - - {{ $.Values.persistence.accessModes }} - resources: - requests: - storage: {{ $.Values.persistence.size }} - {{- if eq $.Values.global.storageClass.provisioner "k8s.io/minikube-hostpath" }} - storageClassName: standard - {{- else }} - storageClassName: {{ include "opendj.fullname" $ | quote }} - {{- end }} -{{- end }} diff --git a/charts/janssen/charts/opendj/templates/storageclass.yaml b/charts/janssen/charts/opendj/templates/storageclass.yaml deleted file mode 100644 index 6af1125ff01..00000000000 --- a/charts/janssen/charts/opendj/templates/storageclass.yaml +++ /dev/null @@ -1,60 +0,0 @@ -{{- if or (eq .Values.global.cnPersistenceType "ldap") (eq .Values.global.cnPersistenceType "hybrid") }} -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: {{ include "opendj.fullname" . }} - namespace: {{ .Release.Namespace }} - labels: - storage: opendj -{{ include "opendj.labels" $ | indent 4}} -{{- if .Values.additionalLabels }} -{{ toYaml .Values.additionalLabels | indent 4 }} -{{- end }} - annotations: - "helm.sh/hook": pre-install - "helm.sh/hook-weight": "3" - "helm.sh/hook-delete-policy": before-hook-creation -{{- if .Values.additionalAnnotations }} -{{ toYaml .Values.additionalAnnotations | indent 4 }} -{{- end }} -{{- if .Values.global.opendj.customAnnotations.storageClass }} -{{ toYaml .Values.global.opendj.customAnnotations.storageClass | indent 4 }} -{{- end }} - # Annotation below is to keep the storage class during upgrade. Otherwise, due to the flag at line 1 which is needed, this resource will be deleted. - helm.sh/resource-policy: keep - storageclass.beta.kubernetes.io/is-default-class: "false" - {{- if eq .Values.global.storageClass.provisioner "openebs.io/local" }} - openebs.io/cas-type: local - cas.openebs.io/config: | - - name: StorageType - value: hostpath - - name: BasePath - value: /var/local-hostpath - {{- end }} -provisioner: {{ .Values.global.storageClass.provisioner }} -{{- if and ( ne .Values.global.storageClass.provisioner "microk8s.io/hostpath" ) ( ne .Values.global.storageClass.provisioner "k8s.io/minikube-hostpath") ( ne .Values.global.storageClass.provisioner "kubernetes.io/aws-ebs") ( ne .Values.global.storageClass.provisioner "kubernetes.io/gce-pd") ( ne .Values.global.storageClass.provisioner "dobs.csi.digitalocean.com") ( ne .Values.global.storageClass.provisioner "openebs.io/local") ( ne .Values.global.storageClass.provisioner "kubernetes.io/azure-disk") }} -parameters: -{{ toYaml .Values.global.storageClass.parameters | indent 4 }} -{{- else }} -parameters: - {{- if eq .Values.global.storageClass.provisioner "kubernetes.io/aws-ebs" }} - type: {{ .Values.global.awsStorageType }} - fsType: ext4 - {{- else if eq .Values.global.storageClass.provisioner "kubernetes.io/gce-pd" }} - type: {{ .Values.global.gcePdStorageType }} - {{- else if eq .Values.global.storageClass.provisioner "kubernetes.io/azure-disk" }} - storageAccountType: {{ .Values.global.azureStorageAccountType }} - kind: {{ .Values.global.azureStorageKind }} - {{- else if eq .Values.global.storageClass.provisioner "dobs.csi.digitalocean.com" }} - {{- else if eq .Values.global.storageClass.provisioner "openebs.io/local" }} - {{- else }} - pool: default - fsType: ext4 - {{- end }} -{{- end }} -allowVolumeExpansion: {{ .Values.global.storageClass.allowVolumeExpansion }} -volumeBindingMode: {{ .Values.global.storageClass.volumeBindingMode }} -reclaimPolicy: {{ .Values.global.storageClass.reclaimPolicy }} -mountOptions: {{ .Values.global.storageClass.mountOptions | toJson }} -allowedTopologies: {{ .Values.global.storageClass.allowedTopologies | toJson }} -{{- end }} diff --git a/charts/janssen/charts/opendj/templates/user-custom-secret-envs.yaml b/charts/janssen/charts/opendj/templates/user-custom-secret-envs.yaml deleted file mode 100644 index edb84970f67..00000000000 --- a/charts/janssen/charts/opendj/templates/user-custom-secret-envs.yaml +++ /dev/null @@ -1,25 +0,0 @@ -{{ if .Values.usrEnvs.secret }} -apiVersion: v1 -kind: Secret -metadata: - name: {{ .Release.Name }}-{{ .Chart.Name }}-user-custom-envs - labels: -{{ include "opendj.labels" $ | indent 4}} -{{- if .Values.additionalLabels }} -{{ toYaml .Values.additionalLabels | indent 4 }} -{{- end }} -{{- if or (.Values.additionalAnnotations) (.Values.global.opendj.customAnnotations.secret) }} - annotations: -{{- if .Values.additionalAnnotations }} -{{ toYaml .Values.additionalAnnotations | indent 4 }} -{{- end }} -{{- if .Values.global.opendj.customAnnotations.secret }} -{{ toYaml .Values.global.opendj.customAnnotations.secret | indent 4 }} -{{- end }} -{{- end }} -type: Opaque -data: - {{- range $key, $val := .Values.usrEnvs.secret }} - {{ $key }}: {{ $val | b64enc }} - {{- end}} -{{- end}} \ No newline at end of file diff --git a/charts/janssen/charts/opendj/values.yaml b/charts/janssen/charts/opendj/values.yaml deleted file mode 100644 index ec163f09272..00000000000 --- a/charts/janssen/charts/opendj/values.yaml +++ /dev/null @@ -1,140 +0,0 @@ - -# -- OpenDJ is a directory server which implements a wide range of Lightweight Directory Access Protocol and related standards, including full compliance with LDAPv3 but also support for Directory Service Markup Language (DSMLv2).Written in Java, OpenDJ offers multi-master replication, access control, and many extensions. -# -- Configure the HorizontalPodAutoscaler -hpa: - enabled: true - minReplicas: 1 - maxReplicas: 10 - targetCPUUtilizationPercentage: 50 - # -- metrics if targetCPUUtilizationPercentage is not set - metrics: [] - # -- Scaling Policies - behavior: {} -# -- Add custom normal and secret envs to the service -usrEnvs: - # -- Add custom normal envs to the service - # variable1: value1 - normal: {} - # -- Add custom secret envs to the service - # variable1: value1 - secret: {} -# -- Add custom dns policy -dnsPolicy: "" -# -- Add custom dns config -dnsConfig: {} -image: - # -- Image pullPolicy to use for deploying. - pullPolicy: IfNotPresent - # -- Image to use for deploying. - repository: gluufederation/opendj - # -- Image tag to use for deploying. - tag: 5.0.0_dev - # -- Image Pull Secrets - pullSecrets: [ ] -persistence: - # -- OpenDJ volume size - size: 5Gi - accessModes: ReadWriteOnce - type: DirectoryOrCreate -# -- servicePorts values used in StatefulSet container -ports: - tcp-admin: - nodePort: "" - port: 4444 - protocol: TCP - targetPort: 4444 - tcp-ldap: - nodePort: "" - port: 1389 - protocol: TCP - targetPort: 1389 - tcp-ldaps: - nodePort: "" - port: 1636 - protocol: TCP - targetPort: 1636 - tcp-repl: - nodePort: "" - port: 8989 - protocol: TCP - targetPort: 8989 - tcp-serf: - nodePort: "" - port: 7946 - protocol: TCP - targetPort: 7946 - udp-serf: - nodePort: "" - port: 7946 - protocol: UDP - targetPort: 7946 -# -- Service replica number. -replicas: 1 -# -- Resource specs. -resources: - limits: - # -- CPU limit. - cpu: 1500m - # -- Memory limit. - memory: 2000Mi - requests: - # -- CPU request. - cpu: 1500m - # -- Memory request. - memory: 2000Mi -# -- Configure the liveness healthcheck for OpenDJ if needed. -livenessProbe: - # -- Executes the python3 healthcheck. - exec: - command: - - python3 - - /app/scripts/healthcheck.py - initialDelaySeconds: 30 - periodSeconds: 30 - timeoutSeconds: 5 - failureThreshold: 20 -# -- Configure the readiness healthcheck for OpenDJ if needed. -readinessProbe: - tcpSocket: - port: 1636 - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 25 - failureThreshold: 20 -# -- Configure any additional volumes that need to be attached to the pod -volumes: [] -# -- Configure any additional volumesMounts that need to be attached to the containers -volumeMounts: [] -lifecycle: - preStop: - exec: - command: ["/bin/sh", "-c", "python3 /app/scripts/deregister_peer.py 1>&/proc/1/fd/1"] -nameOverride: "" -fullnameOverride: "" -# VolumeMounts for StatefulSet -# opendj-init vm -openDjVolumeMounts: - config: - mountPath: /opt/opendj/config - name: opendj-volume - ldif: - mountPath: /opt/opendj/ldif - name: opendj-volume - logs: - mountPath: /opt/opendj/logs - name: opendj-volume - db: - mountPath: /opt/opendj/db - name: opendj-volume - flag: - mountPath: /flag - name: opendj-volume - -# -- Additional labels that will be added across all resources definitions in the format of {mylabel: "myapp"} -additionalLabels: { } -# -- Additional annotations that will be added across all resources in the format of {cert-manager.io/issuer: "letsencrypt-prod"}. key app is taken -additionalAnnotations: { } -# -- Add custom scripts that have been mounted to run before the entrypoint. -# - /tmp/custom.sh -# - /tmp/custom2.sh -customScripts: [ ] \ No newline at end of file diff --git a/charts/janssen/charts/saml/templates/deployment.yaml b/charts/janssen/charts/saml/templates/deployment.yaml index 482884b8b0e..24955318919 100644 --- a/charts/janssen/charts/saml/templates/deployment.yaml +++ b/charts/janssen/charts/saml/templates/deployment.yaml @@ -115,9 +115,7 @@ spec: {{- toYaml .Values.livenessProbe | nindent 12 }} readinessProbe: {{- toYaml .Values.readinessProbe | nindent 12 }} - {{- if and ( .Values.global.opendj.enabled ) (or (eq .Values.global.storageClass.provisioner "microk8s.io/hostpath" ) (eq .Values.global.storageClass.provisioner "k8s.io/minikube-hostpath")) }} - resources: {} - {{- else if .Values.global.cloud.testEnviroment }} + {{- if .Values.global.cloud.testEnviroment }} resources: {} {{- else }} resources: diff --git a/charts/janssen/charts/scim/templates/deployment.yml b/charts/janssen/charts/scim/templates/deployment.yml index 3abcd11638e..e6fb79bd6bf 100644 --- a/charts/janssen/charts/scim/templates/deployment.yml +++ b/charts/janssen/charts/scim/templates/deployment.yml @@ -72,9 +72,7 @@ spec: {{- end}} /app/scripts/entrypoint.sh {{- end}} - {{- if and ( .Values.global.opendj.enabled ) (or (eq .Values.global.storageClass.provisioner "microk8s.io/hostpath" ) (eq .Values.global.storageClass.provisioner "k8s.io/minikube-hostpath")) }} - resources: {} - {{- else if .Values.global.cloud.testEnviroment }} + {{- if .Values.global.cloud.testEnviroment }} resources: {} {{- else }} resources: diff --git a/charts/janssen/values.schema.json b/charts/janssen/values.schema.json index e28681e10b8..7b25bdb0c77 100644 --- a/charts/janssen/values.schema.json +++ b/charts/janssen/values.schema.json @@ -42,7 +42,7 @@ "cnSqlDbDialect": { "description": "SQL dialect", "type": "string", - "pattern": "^(mysql|pgsql|ldap)$" + "pattern": "^(mysql|pgsql)$" }, "cnSqlDbHost": { "description": "SQL server address or ip", @@ -163,11 +163,6 @@ "description": "Passphrase for Janssen secret in Google Secret Manager. This is used for encrypting and decrypting data from the Google Secret Manager. Used only when global.configAdapterName and global.configSecretAdapter is set to google.", "$ref": "#/definitions/password" }, - "cnLdapUrl": { - "description": "OpenDJ internal address. Leave as default. Used when `global.cnPersistenceType` is set to `ldap`.", - "type": "string", - "pattern": "^[a-z0-9-:]+$" - }, "cnMaxRamPercent": { "description": "Value passed to Java option -XX:MaxRAMPercentage", "type": "string", @@ -179,7 +174,7 @@ "pattern": "^(OAUTH|TEST|UMA)$" }, "cnPersistenceHybridMapping": { - "description": "Specify data that should be saved in LDAP (one of default, user, cache, site, token, or session; default to default). Note this environment only takes effect when `global.cnPersistenceType` is set to `hybrid`.", + "description": "Specify data that should be saved in persistence (one of default, user, cache, site, token, or session; default to default). Note this environment only takes effect when `global.cnPersistenceType` is set to `hybrid`.", "type": "string" }, "cnRedisSentinelGroup": { @@ -211,16 +206,6 @@ "lbAddr": { "description": "Loadbalancer address for AWS if the FQDN is not registered.", "$ref": "#/definitions/url-pattern" - }, - "cnLdapCrt": { - "description": "OpenDJ certificate string. This must be encoded using base64.", - "type": "string", - "pattern": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$" - }, - "cnLdapKey": { - "description": "OpenDJ key string. This must be encoded using base64.", - "type": "string", - "pattern": "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$" } } }, @@ -247,10 +232,6 @@ } } }, - "ldapPassword": { - "description": "LDAP admin password if OpennDJ is used for persistence.", - "$ref": "#/definitions/password" - }, "orgName": { "description": "Organization name. Used for certificate creation.", "type": "string", @@ -381,16 +362,6 @@ "type": "string", "pattern": "^(FATAL|ERROR|WARN|INFO|DEBUG|TRACE)$" }, - "ldapStatsLogTarget": { - "description": "jans-auth_persistence_ldap_statistics.log target", - "type": "string", - "pattern": "^(STDOUT|FILE)$" - }, - "ldapStatsLogLevel": { - "description": "jans-auth_persistence_ldap_statistics.log level", - "type": "string", - "pattern": "^(FATAL|ERROR|WARN|INFO|DEBUG|TRACE)$" - }, "scriptLogTarget": { "description": "jans-auth_script.log target", "type": "string", @@ -449,9 +420,9 @@ } }, "cnPersistenceType": { - "description": "Persistence backend to run Janssen with ldap|couchbase|hybrid|sql|spanner.", + "description": "Persistence backend to run Janssen with couchbase|hybrid|sql|spanner.", "type": "string", - "pattern": "^(ldap|couchbase|hybrid|sql|spanner)$" + "pattern": "^(couchbase|hybrid|sql|spanner)$" }, "cnDocumentStoreType": { "description": "Document store type to use for shibboleth files DB.", @@ -614,20 +585,6 @@ } } }, - "opendj": { - "type": "object", - "properties": { - "enabled": { - "description": "Boolean flag to enable/disable the OpenDJ chart.", - "type": "boolean" - }, - "ldapServiceName": { - "description": "Name of the OpenDJ service. Please keep it as default.", - "type": "string", - "pattern": "^[a-z0-9-]+$" - } - } - }, "persistence": { "type": "object", "properties": { @@ -682,16 +639,6 @@ "type": "string", "pattern": "^(FATAL|ERROR|WARN|INFO|DEBUG|TRACE)$" }, - "ldapStatsLogTarget": { - "description": "jans-scim_persistence_ldap_statistics.log target", - "type": "string", - "pattern": "^(STDOUT|FILE)$" - }, - "ldapStatsLogLevel": { - "description": "jans-scim_persistence_ldap_statistics.log level", - "type": "string", - "pattern": "^(FATAL|ERROR|WARN|INFO|DEBUG|TRACE)$" - }, "scriptLogTarget": { "description": "jans-scim_script.log target", "type": "string", @@ -796,16 +743,6 @@ "type": "string", "pattern": "^(FATAL|ERROR|WARN|INFO|DEBUG|TRACE)$" }, - "ldapStatsLogTarget": { - "description": "cacherefresh_persistence_ldap_statistics.log target", - "type": "string", - "pattern": "^(STDOUT|FILE)$" - }, - "ldapStatsLogLevel": { - "description": "cacherefresh_persistence_ldap_statistics.log level", - "type": "string", - "pattern": "^(FATAL|ERROR|WARN|INFO|DEBUG|TRACE)$" - }, "scriptLogTarget": { "description": "cacherefresh_script.log target", "type": "string", @@ -858,36 +795,6 @@ "description": "The location of the Couchbase restricted user config.configmap.cnCouchbaseSuperUser password. The file path must end with couchbase_superuser_password.", "type": "string", "pattern": ".*couchbase_superuser_password\\b.*" - }, - "cnLdapPasswordFile": { - "description": "The location of the OpenDJ user password. The file path must end with ldap_password.", - "type": "string", - "pattern": ".*ldap_password\\b.*" - }, - "cnLdapTruststorePasswordFile": { - "description": "The location of the OpenDJ truststore password file. The file path must end with ldap_truststore_password.", - "type": "string", - "pattern": ".*ldap_truststore_password\\b.*" - }, - "cnLdapCertFile": { - "description": "The location of the OpenDJ certificate file. The file path must end with opendj.crt.", - "type": "string", - "pattern": ".*opendj.crt\\b.*" - }, - "cnLdapKeyFile": { - "description": "The location of the OpenDJ certificate file. The file path must end with opendj.key.", - "type": "string", - "pattern": ".*opendj.key\\b.*" - }, - "cnLdapCacertFile": { - "description": "The location of the OpenDJ certificate file. The file path must end with opendj.pem.", - "type": "string", - "pattern": ".*opendj.pem\\b.*" - }, - "cnLdapTruststoreFile": { - "description": "The location of the OpenDJ truststore file. The file path must end with opendj.pkcs12.", - "type": "string", - "pattern": ".*opendj.pkcs12\\b.*" } } }, @@ -896,11 +803,6 @@ "type": "object", "properties": {} }, - "opendj": { - "description": "OpenDJ is a directory server which implements a wide range of Lightweight Directory Access Protocol and related standards, including full compliance with LDAPv3 but also support for Directory Service Markup Language (DSMLv2).Written in Java, OpenDJ offers multi-master replication, access control, and many extensions.", - "type": "object", - "properties": {} - }, "persistence": { "description": "Job to generate data and intial config for Janssen Server persistence layer.", "type": "object", @@ -941,9 +843,6 @@ { "$ref": "#/definitions/nginx-ingress-enabled" }, - { - "$ref": "#/definitions/opendj-enabled" - }, { "$ref": "#/definitions/persistence-enabled" }, @@ -1796,264 +1695,6 @@ }, "else": true }, - "opendj-enabled": { - "if": { - "properties": { - "global": { - "properties": { - "opendj": { - "properties": { - "enabled": { - "const": "true" - } - } - } - } - } - } - }, - "then": { - "properties": { - "opendj": { - "required": [ - "image", - "replicas", - "resources", - "service" - ], - "type": "object", - "properties": { - "hpa": { - "description": "Configure the HorizontalPodAutoscaler", - "type": "object", - "properties": { - "enabled": { - "type": "boolean" - }, - "minReplicas": { - "type": "integer" - }, - "maxReplicas": { - "type": "integer" - }, - "targetCPUUtilizationPercentage": { - "type": "integer" - }, - "metrics": { - "description": "metrics if targetCPUUtilizationPercentage is not set", - "type": "array" - }, - "behavior": { - "description": "Scaling Policies", - "type": "object" - } - } - }, - "usrEnvs": { - "description": "Add custom normal and secret envs to the service", - "type": "object", - "properties": { - "normal": { - "description": "Add custom normal envs to the service", - "type": "object" - }, - "secret": { - "description": "Add custom secret envs to the service", - "type": "object" - } - } - }, - "dnsPolicy": { - "description": "Add custom dns policy", - "type": "string", - "pattern": "^(Default|ClusterFirst|ClusterFirstWithHostNet|None|)$" - }, - "dnsConfig": { - "description": "Add custom dns config", - "type": "object" - }, - "image": { - "type": "object", - "properties": { - "pullPolicy": { - "description": "Image pullPolicy to use for deploying.", - "type": "string", - "pattern": "^(Always|Never|IfNotPresent)$" - }, - "repository": { - "description": "Image to use for deploying", - "type": "string" - }, - "tag": { - "description": "Image tag to use for deploying.", - "type": "string", - "pattern": "^[a-z0-9-_.]+$" - } - } - }, - "persistence": { - "type": "object", - "properties": { - "size": { - "description": "OpenDJ volume size", - "type": "string", - "pattern": "^[0-9]Gi+$" - } - } - }, - "ports": { - "type": "object", - "properties": { - "tcp-admin": { - "type": "object", - "properties": { - "nodePort": { - "type": "string" - }, - "port": { - "type": "integer" - }, - "protocol": { - "type": "string" - }, - "targetPort": { - "type": "integer" - } - } - }, - "tcp-ldap": { - "type": "object", - "properties": { - "nodePort": { - "type": "string" - }, - "port": { - "type": "integer" - }, - "protocol": { - "type": "string" - }, - "targetPort": { - "type": "integer" - } - } - }, - "tcp-ldaps": { - "type": "object", - "properties": { - "nodePort": { - "type": "string" - }, - "port": { - "type": "integer" - }, - "protocol": { - "type": "string" - }, - "targetPort": { - "type": "integer" - } - } - }, - "tcp-repl": { - "type": "object", - "properties": { - "nodePort": { - "type": "string" - }, - "port": { - "type": "integer" - }, - "protocol": { - "type": "string" - }, - "targetPort": { - "type": "integer" - } - } - }, - "tcp-serf": { - "type": "object", - "properties": { - "nodePort": { - "type": "string" - }, - "port": { - "type": "integer" - }, - "protocol": { - "type": "string" - }, - "targetPort": { - "type": "integer" - } - } - }, - "udp-serf": { - "type": "object", - "properties": { - "nodePort": { - "type": "string" - }, - "port": { - "type": "integer" - }, - "protocol": { - "type": "string" - }, - "targetPort": { - "type": "integer" - } - } - } - } - }, - "replicas": { - "description": "Service replica number.", - "type": "integer" - }, - "resources": { - "description": "Resource specs.", - "type": "object", - "properties": { - "limits": { - "type": "object", - "properties": { - "cpu": { - "description": "CPU limit.", - "type": "string", - "pattern": "^[0-9m]+$" - }, - "memory": { - "description": "Memory limit.", - "type": "string", - "pattern": "^[0-9Mi]+$" - } - } - }, - "requests": { - "type": "object", - "properties": { - "cpu": { - "description": "CPU request.", - "type": "string", - "pattern": "^[0-9m]+$" - }, - "memory": { - "description": "Memory request.", - "type": "string", - "pattern": "^[0-9Mi]+$" - } - } - } - } - } - } - } - } - }, - "else": true - }, "persistence-enabled": { "if": { "properties": { diff --git a/charts/janssen/values.yaml b/charts/janssen/values.yaml index 9b50afb175d..95682347e43 100644 --- a/charts/janssen/values.yaml +++ b/charts/janssen/values.yaml @@ -280,20 +280,18 @@ config: # -- Path to Vault AppRole. cnVaultAppRolePath: approle # [vault_envs] END - # -- OpenDJ internal address. Leave as default. Used when `global.cnPersistenceType` is set to `ldap`. - cnLdapUrl: "opendj:1636" # -- Value passed to Java option -XX:MaxRAMPercentage cnMaxRamPercent: "75.0" # -- SCIM protection mode OAUTH|TEST|UMA cnScimProtectionMode: "OAUTH" - # -- Specify data that should be saved in LDAP (one of default, user, cache, site, token, or session; default to default). Note this environment only takes effect when `global.cnPersistenceType` is set to `hybrid`. + # -- Specify data that should be saved in persistence (one of default, user, cache, site, token, or session; default to default). Note this environment only takes effect when `global.cnPersistenceType` is set to `hybrid`. #{ - # "default": "", - # "user": "", - # "site": "", - # "cache": "", - # "token": "", - # "session": "", + # "default": "", + # "user": "", + # "site": "", + # "cache": "", + # "token": "", + # "session": "", #} cnPersistenceHybridMapping: "{}" # -- Redis Sentinel Group. Often set when `config.configmap.cnRedisType` is set to `SENTINEL`. Can be used when `config.configmap.cnCacheType` is set to `REDIS`. @@ -310,10 +308,6 @@ config: cnSecretKubernetesSecret: cn # -- Load balancer address for AWS if the FQDN is not registered. lbAddr: "" - # -- OpenDJ certificate string. This must be encoded using base64. - cnLdapCrt: SWFtTm90YVNlcnZpY2VBY2NvdW50Q2hhbmdlTWV0b09uZQo= - # -- OpenDJ key string. This must be encoded using base64. - cnLdapKey: SWFtTm90YVNlcnZpY2VBY2NvdW50Q2hhbmdlTWV0b09uZQo= # -- Quarkus transaction recovery. When using MySQL, there could be issue regarding XA_RECOVER_ADMIN; refer to https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_xa-recover-admin for details. quarkusTransactionEnableRecovery: true # -- Keycloak logging level @@ -349,10 +343,6 @@ config: tag: 1.1.6_dev # -- Image Pull Secrets pullSecrets: [ ] - # -- LDAP admin password if OpenDJ is used for persistence. - ldapPassword: P@ssw0rds - # -- LDAP truststore password if OpenDJ is used for persistence - ldapTruststorePassword: changeit # -- Organization name. Used for certificate creation. orgName: Janssen # -- Redis admin password if `config.configmap.cnCacheType` is set to `REDIS`. @@ -759,10 +749,6 @@ global: persistenceDurationLogTarget: "FILE" # -- jans-auth_persistence_duration.log level persistenceDurationLogLevel: "INFO" - # -- jans-auth_persistence_ldap_statistics.log target - ldapStatsLogTarget: "FILE" - # -- jans-auth_persistence_ldap_statistics.log level - ldapStatsLogLevel: "INFO" # -- jans-auth_script.log target scriptLogTarget: "FILE" # -- jans-auth_script.log level @@ -901,7 +887,7 @@ global: cnPrometheusPort: "" # -- Document store type to use for shibboleth files DB. cnDocumentStoreType: DB - # -- Persistence backend to run Janssen with ldap|couchbase|hybrid|sql|spanner. + # -- Persistence backend to run Janssen with couchbase|hybrid|sql|spanner. cnPersistenceType: sql config: # — Add custom annotations for kubernetes resources for the service @@ -961,10 +947,6 @@ global: persistenceDurationLogTarget: "FILE" # -- config-api_persistence_duration.log level persistenceDurationLogLevel: "INFO" - # -- config-api_persistence_ldap_statistics.log target - ldapStatsLogTarget: "FILE" - # -- config-api_persistence_ldap_statistics.log level - ldapStatsLogLevel: "INFO" # -- config-api_script.log target scriptLogTarget: "FILE" # -- config-api_script.log level @@ -1053,21 +1035,6 @@ global: nginx-ingress: # -- Boolean flag to enable/disable the nginx-ingress definitions chart. enabled: true - opendj: - # -- Boolean flag to enable/disable the OpenDJ chart. - enabled: false - # -- Name of the OpenDJ service. Please keep it as default. - ldapServiceName: opendj - # — Add custom annotations for kubernetes resources for the service - customAnnotations: - cronjob: {} - horizontalPodAutoscaler: {} - destinationRule: {} - podDisruptionBudget: {} - service: {} - statefulset: {} - storageClass: {} - secret: {} persistence: # — Add custom annotations for kubernetes resources for the service customAnnotations: @@ -1110,10 +1077,6 @@ global: persistenceDurationLogTarget: "FILE" # -- jans-scim_persistence_duration.log level persistenceDurationLogLevel: "INFO" - # -- jans-scim_persistence_ldap_statistics.log target - ldapStatsLogTarget: "FILE" - # -- jans-scim_persistence_ldap_statistics.log level - ldapStatsLogLevel: "INFO" # -- jans-scim_script.log target scriptLogTarget: "FILE" # -- jans-scim_script.log level @@ -1132,7 +1095,7 @@ global: scimLabels: { } # -- SCIM ingress resource additional annotations. scimAdditionalAnnotations: { } - # -- StorageClass section for OpenDJ charts. This is not currently used by the openbanking distribution. You may specify custom parameters as needed. + # -- StorageClass section. This is not currently used by the openbanking distribution. You may specify custom parameters as needed. storageClass: allowVolumeExpansion: true allowedTopologies: [] @@ -1179,10 +1142,6 @@ global: persistenceDurationLogTarget: "FILE" # -- cacherefresh_persistence_duration.log level persistenceDurationLogLevel: "INFO" - # -- cacherefresh_persistence_ldap_statistics.log target - ldapStatsLogTarget: "FILE" - # -- cacherefresh_persistence_ldap_statistics.log level - ldapStatsLogLevel: "INFO" # -- cacherefresh_script.log target scriptLogTarget: "FILE" # -- cacherefresh_script.log level @@ -1221,18 +1180,6 @@ global: cnCouchbasePasswordFile: /etc/jans/conf/couchbase_password # -- Path to Couchbase superuser password file cnCouchbaseSuperuserPasswordFile: /etc/jans/conf/couchbase_superuser_password - # -- Path to LDAP password file - cnLdapPasswordFile: /etc/jans/conf/ldap_password - # -- Path to LDAP truststore password file - cnLdapTruststorePasswordFile: /etc/jans/conf/ldap_truststore_password - # -- Path to OpenDJ cert file - cnLdapCertFile: /etc/certs/opendj.crt - # -- Path to OpenDJ key file - cnLdapKeyFile: /etc/certs/opendj.key - # -- Path to OpenDJ CA cert file - cnLdapCacertFile: /etc/certs/opendj.pem - # -- Path to OpenDJ truststore file - cnLdapTruststoreFile: /etc/certs/opendj.pkcs12 # -- Path to file contains password for database access kcDbPasswordFile: /etc/jans/conf/kc_db_password # -- Path to file contains Keycloak admin credentials (username and password) @@ -1271,149 +1218,6 @@ nginx-ingress: hosts: - demoexample.jans.io -# -- OpenDJ is a directory server which implements a wide range of Lightweight Directory Access Protocol and related standards, including full compliance with LDAPv3 but also support for Directory Service Markup Language (DSMLv2).Written in Java, OpenDJ offers multi-master replication, access control, and many extensions. -opendj: - # -- Configure the topology spread constraints. Notice this is a map NOT a list as in the upstream API - # https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ - topologySpreadConstraints: {} - # -- Define below as many constraints as needed. The key name should follow the structure tsc1, tsc2...etc. - # Do not enter the key labelSelector in the entry/entries below as that is automatically injected by the chart - #tsc1: - # maxSkew: 1 - # minDomains: 1 # optional; beta since v1.25 - # topologyKey: kubernetes.io/hostname - # whenUnsatisfiable: DoNotSchedule - # matchLabelKeys: [] # optional; alpha since v1.25 - # nodeAffinityPolicy: [] # optional; alpha since v1.25 - # nodeTaintsPolicy: [] # optional; alpha since v1.25 - #tsc2: - #maxSkew: 1 - # -- Configure the PodDisruptionBudget - pdb: - enabled: true - maxUnavailable: 1 - # -- Configure ldap backup cronjob - backup: - enabled: true - cronJobSchedule: "*/59 * * * *" - # -- Configure the HorizontalPodAutoscaler - hpa: - enabled: true - minReplicas: 1 - maxReplicas: 10 - targetCPUUtilizationPercentage: 50 - # -- metrics if targetCPUUtilizationPercentage is not set - metrics: [] - # -- Scaling Policies - behavior: {} - # -- Add custom normal and secret envs to the service - usrEnvs: - # -- Add custom normal envs to the service - # variable1: value1 - normal: {} - # -- Add custom secret envs to the service - # variable1: value1 - secret: {} - # -- Add custom dns policy - dnsPolicy: "" - # -- Add custom dns config - dnsConfig: {} - image: - # -- Image pullPolicy to use for deploying. - pullPolicy: IfNotPresent - # -- Image to use for deploying. - repository: gluufederation/opendj - # -- Image tag to use for deploying. - tag: 5.0.0_dev - # -- Image Pull Secrets - pullSecrets: [ ] - - persistence: - # -- OpenDJ volume size - size: 5Gi - ports: - tcp-admin: - nodePort: "" - port: 4444 - protocol: TCP - targetPort: 4444 - tcp-ldap: - nodePort: "" - port: 1389 - protocol: TCP - targetPort: 1389 - tcp-ldaps: - nodePort: "" - port: 1636 - protocol: TCP - targetPort: 1636 - tcp-repl: - nodePort: "" - port: 8989 - protocol: TCP - targetPort: 8989 - tcp-serf: - nodePort: "" - port: 7946 - protocol: TCP - targetPort: 7946 - udp-serf: - nodePort: "" - port: 7946 - protocol: UDP - targetPort: 7946 - # -- Service replica number. - replicas: 1 - # -- Resource specs. - resources: - limits: - # -- CPU limit. - cpu: 1500m - # -- Memory limit. - memory: 2000Mi - requests: - # -- CPU request. - cpu: 1500m - # -- Memory request. - memory: 2000Mi - # -- Configure the liveness healthcheck for OpenDJ if needed. - # https://github.com/GluuFederation/docker-opendj/blob/master/scripts/healthcheck.py - livenessProbe: - # -- Executes the python3 healthcheck. - exec: - command: - - python3 - - /app/scripts/healthcheck.py - initialDelaySeconds: 30 - periodSeconds: 30 - timeoutSeconds: 5 - failureThreshold: 20 - # -- Configure the readiness healthcheck for OpenDJ if needed. - # https://github.com/GluuFederation/docker-opendj/blob/master/scripts/healthcheck.py - readinessProbe: - tcpSocket: - port: 1636 - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 25 - failureThreshold: 20 - # -- Configure any additional volumes that need to be attached to the pod - volumes: [] - # -- Configure any additional volumesMounts that need to be attached to the containers - volumeMounts: [] - lifecycle: - preStop: - exec: - command: ["/bin/sh", "-c", "python3 /app/scripts/deregister_peer.py 1>&/proc/1/fd/1"] - - # -- Additional labels that will be added across the gateway in the format of {mylabel: "myapp"} - additionalLabels: { } - # -- Additional annotations that will be added across the gateway in the format of {cert-manager.io/issuer: "letsencrypt-prod"} - additionalAnnotations: { } - # -- Add custom scripts that have been mounted to run before the entrypoint. - # - /tmp/custom.sh - # - /tmp/custom2.sh - customScripts: [ ] # -- Job to generate data and initial config for Janssen Server persistence layer. persistence: # -- Add custom normal and secret envs to the service diff --git a/docker-jans-all-in-one/Dockerfile b/docker-jans-all-in-one/Dockerfile index abc67bf95ba..7e98465807d 100644 --- a/docker-jans-all-in-one/Dockerfile +++ b/docker-jans-all-in-one/Dockerfile @@ -58,7 +58,7 @@ RUN apk update \ # Assets sync # =========== -ENV JANS_SOURCE_VERSION=5e7b579f097714d685a6c1e0cedd004df0ce85f8 +ENV JANS_SOURCE_VERSION=595fff63b350f8da248bb0fcf3bd3966fa6f1953 # note that as we're pulling from a monorepo (with multiple project in it) # we are using partial-clone and sparse-checkout to get the assets @@ -213,8 +213,6 @@ ENV JETTY_BASE=/opt/jans/jetty \ CN_SQL_PASSWORD_FILE=/etc/jans/conf/sql_password \ CN_COUCHBASE_PASSWORD_FILE=/etc/jans/conf/couchbase_password \ CN_COUCHBASE_SUPERUSER_PASSWORD_FILE=/etc/jans/conf/couchbase_superuser_password \ - CN_LDAP_PASSWORD_FILE=/etc/jans/conf/ldap_password \ - CN_LDAP_TRUSTSTORE_PASSWORD_FILE=/etc/jans/conf/ldap_truststore_password \ CN_KEYCLOAK_LINK_JETTY_HOST=127.0.0.1 \ CN_KEYCLOAK_LINK_JETTY_PORT=9092 \ CN_KEYCLOAK_LINK_JAVA_OPTIONS="" \ diff --git a/docker-jans-auth-server/Dockerfile b/docker-jans-auth-server/Dockerfile index 7c27a07959c..79beaea1a8a 100644 --- a/docker-jans-auth-server/Dockerfile +++ b/docker-jans-auth-server/Dockerfile @@ -103,7 +103,7 @@ RUN mkdir -p ${JETTY_BASE}/jans-auth/agama/fl \ /app/static/rdbm \ /app/schema -ENV JANS_SOURCE_VERSION=5e7b579f097714d685a6c1e0cedd004df0ce85f8 +ENV JANS_SOURCE_VERSION=595fff63b350f8da248bb0fcf3bd3966fa6f1953 ARG JANS_SETUP_DIR=jans-linux-setup/jans_setup # note that as we're pulling from a monorepo (with multiple project in it) @@ -205,10 +205,8 @@ ENV CN_SECRET_ADAPTER=vault \ # Persistence ENV # =============== -ENV CN_PERSISTENCE_TYPE=ldap \ +ENV CN_PERSISTENCE_TYPE=sql \ CN_HYBRID_MAPPING="{}" \ - CN_LDAP_URL=localhost:1636 \ - CN_LDAP_USE_SSL=true \ CN_COUCHBASE_URL=localhost \ CN_COUCHBASE_USER=admin \ CN_COUCHBASE_CERT_FILE=/etc/certs/couchbase.crt \ diff --git a/docker-jans-auth-server/README.md b/docker-jans-auth-server/README.md index edd819087b5..6eddc8cbc54 100644 --- a/docker-jans-auth-server/README.md +++ b/docker-jans-auth-server/README.md @@ -50,10 +50,8 @@ The following environment variables are supported by the container: - `CN_WAIT_SLEEP_DURATION`: Delay between startup "health checks" (default to `10` seconds). - `CN_MAX_RAM_PERCENTAGE`: Value passed to Java option `-XX:MaxRAMPercentage`. - `CN_DEBUG_PORT`: port of remote debugging (if omitted, remote debugging will be disabled). -- `CN_PERSISTENCE_TYPE`: Persistence backend being used (one of `ldap`, `couchbase`, or `hybrid`; default to `ldap`). +- `CN_PERSISTENCE_TYPE`: Persistence backend being used (one of `couchbase`, `spanner`, `sql`, or `hybrid`; default to `sql`). - `CN_HYBRID_MAPPING`: Specify data mapping for each persistence (default to `"{}"`). Note this environment only takes effect when `CN_PERSISTENCE_TYPE` is set to `hybrid`. See [hybrid mapping](#hybrid-mapping) section for details. -- `CN_LDAP_URL`: Address and port of LDAP server (default to `localhost:1636`). -- `CN_LDAP_USE_SSL`: Whether to use SSL connection to LDAP server (default to `true`). - `CN_COUCHBASE_URL`: Address of Couchbase server (default to `localhost`). - `CN_COUCHBASE_USER`: Username of Couchbase server (default to `admin`). - `CN_COUCHBASE_CERT_FILE`: Couchbase root certificate location (default to `/etc/certs/couchbase.crt`). @@ -131,8 +129,6 @@ The following key-value pairs are the defaults: "persistence_log_level": "INFO", "persistence_duration_log_target": "FILE", "persistence_duration_log_level": "INFO", - "ldap_stats_log_target": "FILE", - "ldap_stats_log_level": "INFO", "script_log_target": "FILE", "script_log_level": "INFO", "audit_log_target": "FILE", @@ -189,12 +185,12 @@ As per v1.0.1, hybrid persistence supports all available persistence types. To c ``` { - "default": "", - "user": "", - "site": "", - "cache": "", - "token": "", - "session": "", + "default": "", + "user": "", + "site": "", + "cache": "", + "token": "", + "session": "", } ``` @@ -204,7 +200,7 @@ As per v1.0.1, hybrid persistence supports all available persistence types. To c { "default": "sql", "user": "spanner", - "site": "ldap", + "site": "sql", "cache": "sql", "token": "couchbase", "session": "spanner", @@ -220,4 +216,3 @@ i.e. `http://container:9093/metrics`. Note that Prometheus JMX exporter uses pre-defined config file (see `conf/prometheus-config.yaml`). To customize the config, mount custom config file to `/opt/prometheus/prometheus-config.yaml` inside the container. - diff --git a/docker-jans-auth-server/scripts/bootstrap.py b/docker-jans-auth-server/scripts/bootstrap.py index 853ffdd247b..4301072b8ef 100644 --- a/docker-jans-auth-server/scripts/bootstrap.py +++ b/docker-jans-auth-server/scripts/bootstrap.py @@ -12,9 +12,6 @@ from jans.pycloudlib.persistence.couchbase import sync_couchbase_truststore from jans.pycloudlib.persistence.couchbase import sync_couchbase_password from jans.pycloudlib.persistence.hybrid import render_hybrid_properties -from jans.pycloudlib.persistence.ldap import render_ldap_properties -from jans.pycloudlib.persistence.ldap import sync_ldap_password -from jans.pycloudlib.persistence.ldap import sync_ldap_truststore from jans.pycloudlib.persistence.spanner import render_spanner_properties from jans.pycloudlib.persistence.spanner import sync_google_credentials from jans.pycloudlib.persistence.sql import render_sql_properties @@ -38,7 +35,7 @@ def main(): - persistence_type = os.environ.get("CN_PERSISTENCE_TYPE", "ldap") + persistence_type = os.environ.get("CN_PERSISTENCE_TYPE", "sql") render_salt(manager, "/app/templates/salt", "/etc/jans/conf/salt") render_base_properties("/app/templates/jans.properties", "/etc/jans/conf/jans.properties") @@ -51,17 +48,6 @@ def main(): if not os.path.exists(hybrid_prop): render_hybrid_properties(hybrid_prop) - if "ldap" in persistence_groups: - render_ldap_properties( - manager, - "/app/templates/jans-ldap.properties", - "/etc/jans/conf/jans-ldap.properties", - ) - - if as_boolean(os.environ.get("CN_LDAP_USE_SSL", "true")): - sync_ldap_truststore(manager) - sync_ldap_password(manager) - if "couchbase" in persistence_groups: sync_couchbase_password(manager) render_couchbase_properties( @@ -143,8 +129,6 @@ def configure_logging(): "persistence_log_level": "INFO", "persistence_duration_log_target": "FILE", "persistence_duration_log_level": "INFO", - "ldap_stats_log_target": "FILE", - "ldap_stats_log_level": "INFO", "script_log_target": "FILE", "script_log_level": "INFO", "audit_log_target": "FILE", @@ -191,7 +175,6 @@ def configure_logging(): "http_log_target": "JANS_AUTH_HTTP_REQUEST_RESPONSE_FILE", "persistence_log_target": "JANS_AUTH_PERSISTENCE_FILE", "persistence_duration_log_target": "JANS_AUTH_PERSISTENCE_DURATION_FILE", - "ldap_stats_log_target": "JANS_AUTH_PERSISTENCE_LDAP_STATISTICS_FILE", "script_log_target": "JANS_AUTH_SCRIPT_LOG_FILE", "audit_log_target": "JANS_AUTH_AUDIT_LOG_FILE", } diff --git a/docker-jans-auth-server/scripts/lock.py b/docker-jans-auth-server/scripts/lock.py index 2f8d0e7d401..7dcfbbee390 100644 --- a/docker-jans-auth-server/scripts/lock.py +++ b/docker-jans-auth-server/scripts/lock.py @@ -8,7 +8,6 @@ from jans.pycloudlib import get_manager from jans.pycloudlib.persistence.couchbase import CouchbaseClient -from jans.pycloudlib.persistence.ldap import LdapClient from jans.pycloudlib.persistence.spanner import SpannerClient from jans.pycloudlib.persistence.sql import SqlClient from jans.pycloudlib.persistence.utils import PersistenceMapper @@ -100,7 +99,6 @@ def __init__(self, manager) -> None: self.manager = manager client_classes = { - "ldap": LdapClient, "couchbase": CouchbaseClient, "spanner": SpannerClient, "sql": SqlClient, diff --git a/docker-jans-auth-server/scripts/upgrade.py b/docker-jans-auth-server/scripts/upgrade.py index efc6fa86f52..3e2f52dcf88 100644 --- a/docker-jans-auth-server/scripts/upgrade.py +++ b/docker-jans-auth-server/scripts/upgrade.py @@ -8,7 +8,6 @@ from jans.pycloudlib import get_manager from jans.pycloudlib.persistence.couchbase import CouchbaseClient from jans.pycloudlib.persistence.couchbase import id_from_dn -from jans.pycloudlib.persistence.ldap import LdapClient from jans.pycloudlib.persistence.spanner import SpannerClient from jans.pycloudlib.persistence.sql import SqlClient from jans.pycloudlib.persistence.sql import doc_id_from_dn @@ -121,53 +120,6 @@ def _transform_lock_dynamic_config(conf, manager): return conf, should_update -class LDAPBackend: - def __init__(self, manager): - self.manager = manager - self.client = LdapClient(manager) - self.type = "ldap" - - def format_attrs(self, attrs): - _attrs = {} - for k, v in attrs.items(): - if len(v) < 2: - v = v[0] - _attrs[k] = v - return _attrs - - def get_entry(self, key, filter_="", attrs=None, **kwargs): - filter_ = filter_ or "(objectClass=*)" - - entry = self.client.get(key, filter_=filter_, attributes=attrs) - if not entry: - return None - return Entry(entry.entry_dn, self.format_attrs(entry.entry_attributes_as_dict)) - - def modify_entry(self, key, attrs=None, **kwargs): - attrs = attrs or {} - del_flag = kwargs.get("delete_attr", False) - - if del_flag: - mod = self.client.MODIFY_DELETE - else: - mod = self.client.MODIFY_REPLACE - - for k, v in attrs.items(): - if not isinstance(v, list): - v = [v] - attrs[k] = [(mod, v)] - return self.client.modify(key, attrs) - - def search_entries(self, key, filter_="", attrs=None, **kwargs): - filter_ = filter_ or "(objectClass=*)" - entries = self.client.search(key, filter_, attrs) - - return [ - Entry(entry.entry_dn, self.format_attrs(entry.entry_attributes_as_dict)) - for entry in entries - ] - - class SQLBackend: def __init__(self, manager): self.manager = manager @@ -292,7 +244,6 @@ def search_entries(self, key, filter_="", attrs=None, **kwargs): "sql": SQLBackend, "couchbase": CouchbaseBackend, "spanner": SpannerBackend, - "ldap": LDAPBackend, } @@ -345,16 +296,11 @@ def get_all_scopes(self): if self.backend.type in ("sql", "spanner"): kwargs = {"table_name": "jansScope"} entries = self.backend.search_entries(None, **kwargs) - elif self.backend.type == "couchbase": + else: # likely couchbase kwargs = {"bucket": os.environ.get("CN_COUCHBASE_BUCKET_PREFIX", "jans")} entries = self.backend.search_entries( None, filter_="WHERE objectClass = 'jansScope'", **kwargs ) - else: - # likely ldap - entries = self.backend.search_entries( - "ou=scopes,o=jans", filter_="(objectClass=jansScope)" - ) return { entry.attrs["jansId"]: entry.attrs.get("dn") or entry.id @@ -369,7 +315,7 @@ def update_lock_client_scopes(self): if self.backend.type in ("sql", "spanner"): kwargs = {"table_name": "jansClnt"} id_ = doc_id_from_dn(id_) - elif self.backend.type == "couchbase": + else: # likely couchbase kwargs = {"bucket": os.environ.get("CN_COUCHBASE_BUCKET_PREFIX", "jans")} id_ = id_from_dn(id_) diff --git a/docker-jans-auth-server/scripts/wait.py b/docker-jans-auth-server/scripts/wait.py index 53f3c51c4e8..0817b4e0d1f 100644 --- a/docker-jans-auth-server/scripts/wait.py +++ b/docker-jans-auth-server/scripts/wait.py @@ -13,7 +13,7 @@ def main(): - persistence_type = os.environ.get("CN_PERSISTENCE_TYPE", "ldap") + persistence_type = os.environ.get("CN_PERSISTENCE_TYPE", "sql") validate_persistence_type(persistence_type) if persistence_type == "hybrid": diff --git a/docker-jans-auth-server/templates/jans-auth/log4j2.xml b/docker-jans-auth-server/templates/jans-auth/log4j2.xml index 9f834f71d19..9c9c6ce0822 100644 --- a/docker-jans-auth-server/templates/jans-auth/log4j2.xml +++ b/docker-jans-auth-server/templates/jans-auth/log4j2.xml @@ -53,18 +53,6 @@ - - - - - - - - - - - @@ -112,20 +100,11 @@ - - -persistence - - -persistence - - -persistence-duration - - - -persistence-duration @@ -136,11 +115,6 @@ - - -ldap-stats - - - -script diff --git a/docker-jans-auth-server/templates/jans-ldap.properties b/docker-jans-auth-server/templates/jans-ldap.properties deleted file mode 100644 index a7ec401fe75..00000000000 --- a/docker-jans-auth-server/templates/jans-ldap.properties +++ /dev/null @@ -1,28 +0,0 @@ -bindDN: %(ldap_binddn)s -bindPassword: %(encoded_ox_ldap_pw)s -servers: %(ldap_hostname)s:%(ldaps_port)s - -useSSL: %(ssl_enabled)s -ssl.trustStoreFile: %(ldapTrustStoreFn)s -ssl.trustStorePin: %(encoded_ldapTrustStorePass)s -ssl.trustStoreFormat: pkcs12 - -maxconnections: 40 - -# Max wait 20 seconds -connection.max-wait-time-millis=20000 - -# Force to recreate polled connections after 30 minutes -connection.max-age-time-millis=1800000 - -# Invoke connection health check after checkout it from pool -connection-pool.health-check.on-checkout.enabled=false - -# Interval to check connections in pool. Value is 3 minutes. Not used when onnection-pool.health-check.on-checkout.enabled=true -connection-pool.health-check.interval-millis=180000 - -# How long to wait during connection health check. Max wait 20 seconds -connection-pool.health-check.max-response-time-millis=20000 - -binaryAttributes=objectGUID -certificateAttributes=userCertificate diff --git a/docker-jans-casa/Dockerfile b/docker-jans-casa/Dockerfile index 1fd46b1a720..8d66d5b238a 100644 --- a/docker-jans-casa/Dockerfile +++ b/docker-jans-casa/Dockerfile @@ -60,7 +60,7 @@ RUN mkdir -p /usr/share/java \ # Assets sync # =========== -ENV JANS_SOURCE_VERSION=5e7b579f097714d685a6c1e0cedd004df0ce85f8 +ENV JANS_SOURCE_VERSION=595fff63b350f8da248bb0fcf3bd3966fa6f1953 ARG JANS_SETUP_DIR=jans-linux-setup/jans_setup # note that as we're pulling from a monorepo (with multiple project in it) @@ -163,10 +163,8 @@ ENV CN_SECRET_ADAPTER=vault \ # Persistence ENV # =============== -ENV CN_PERSISTENCE_TYPE=ldap \ +ENV CN_PERSISTENCE_TYPE=sql \ CN_HYBRID_MAPPING="{}" \ - CN_LDAP_URL=localhost:1636 \ - CN_LDAP_USE_SSL=true \ CN_COUCHBASE_URL=localhost \ CN_COUCHBASE_USER=admin \ CN_COUCHBASE_CERT_FILE=/etc/certs/couchbase.crt \ diff --git a/docker-jans-casa/README.md b/docker-jans-casa/README.md index a605ae0f548..89b28d16f43 100644 --- a/docker-jans-casa/README.md +++ b/docker-jans-casa/README.md @@ -41,10 +41,8 @@ The following environment variables are supported by the container: - `CN_WAIT_MAX_TIME`: How long the startup "health checks" should run (default to `300` seconds). - `CN_WAIT_SLEEP_DURATION`: Delay between startup "health checks" (default to `10` seconds). - `CN_MAX_RAM_PERCENTAGE`: Value passed to Java option `-XX:MaxRAMPercentage`. -- `CN_PERSISTENCE_TYPE`: Persistence backend being used (one of `ldap`, `couchbase`, or `hybrid`; default to `ldap`). +- `CN_PERSISTENCE_TYPE`: Persistence backend being used (one of `sql`, `couchbase`, `spanner`, or `hybrid`; default to `sql`). - `CN_HYBRID_MAPPING`: Specify data mapping for each persistence (default to `"{}"`). Note this environment only takes effect when `CN_PERSISTENCE_TYPE` is set to `hybrid`. See [hybrid mapping](#hybrid-mapping) section for details. -- `CN_LDAP_URL`: Address and port of LDAP server (default to `localhost:1636`); required if `CN_PERSISTENCE_TYPE` is set to `ldap` or `hybrid`. -- `CN_LDAP_USE_SSL`: Whether to use SSL connection to LDAP server (default to `true`). - `CN_COUCHBASE_URL`: Address of Couchbase server (default to `localhost`); required if `CN_PERSISTENCE_TYPE` is set to `couchbase` or `hybrid`. - `CN_COUCHBASE_USER`: Username of Couchbase server (default to `admin`); required if `CN_PERSISTENCE_TYPE` is set to `couchbase` or `hybrid`. - `CN_COUCHBASE_CERT_FILE`: Couchbase root certificate location (default to `/etc/certs/couchbase.crt`); required if `CN_PERSISTENCE_TYPE` is set to `couchbase` or `hybrid`. @@ -135,12 +133,12 @@ Hybrid persistence supports all available persistence types. To configure hybrid ``` { - "default": "", - "user": "", - "site": "", - "cache": "", - "token": "", - "session": "", + "default": "", + "user": "", + "site": "", + "cache": "", + "token": "", + "session": "", } ``` @@ -150,7 +148,7 @@ Hybrid persistence supports all available persistence types. To configure hybrid { "default": "sql", "user": "spanner", - "site": "ldap", + "site": "sql", "cache": "sql", "token": "couchbase", "session": "spanner", diff --git a/docker-jans-casa/scripts/bootstrap.py b/docker-jans-casa/scripts/bootstrap.py index 14fcbbab5ca..26e6c965c41 100644 --- a/docker-jans-casa/scripts/bootstrap.py +++ b/docker-jans-casa/scripts/bootstrap.py @@ -16,10 +16,6 @@ from jans.pycloudlib.persistence.couchbase import sync_couchbase_password from jans.pycloudlib.persistence.couchbase import sync_couchbase_truststore from jans.pycloudlib.persistence.hybrid import render_hybrid_properties -from jans.pycloudlib.persistence.ldap import LdapClient -from jans.pycloudlib.persistence.ldap import render_ldap_properties -from jans.pycloudlib.persistence.ldap import sync_ldap_password -from jans.pycloudlib.persistence.ldap import sync_ldap_truststore from jans.pycloudlib.persistence.spanner import render_spanner_properties from jans.pycloudlib.persistence.spanner import SpannerClient from jans.pycloudlib.persistence.spanner import sync_google_credentials @@ -122,7 +118,7 @@ def configure_logging(): def main(): - persistence_type = os.environ.get("CN_PERSISTENCE_TYPE", "ldap") + persistence_type = os.environ.get("CN_PERSISTENCE_TYPE", "sql") render_salt(manager, "/app/templates/salt", "/etc/jans/conf/salt") render_base_properties("/app/templates/jans.properties", "/etc/jans/conf/jans.properties") @@ -133,17 +129,6 @@ def main(): if persistence_type == "hybrid": render_hybrid_properties("/etc/jans/conf/jans-hybrid.properties") - if "ldap" in persistence_groups: - render_ldap_properties( - manager, - "/app/templates/jans-ldap.properties", - "/etc/jans/conf/jans-ldap.properties", - ) - - if as_boolean(os.environ.get("CN_LDAP_USE_SSL", "true")): - sync_ldap_truststore(manager) - sync_ldap_password(manager) - if "couchbase" in persistence_groups: sync_couchbase_password(manager) render_couchbase_properties( @@ -213,7 +198,6 @@ def __init__(self, manager): self.manager = manager client_classes = { - "ldap": LdapClient, "couchbase": CouchbaseClient, "spanner": SpannerClient, "sql": SqlClient, @@ -284,21 +268,17 @@ def _deprecated_script_exists(self): if self.persistence_type in ("sql", "spanner"): return bool(self.client.get("jansCustomScr", doc_id_from_dn(id_))) - # couchbase - if self.persistence_type == "couchbase": - bucket = os.environ.get("CN_COUCHBASE_BUCKET_PREFIX", "jans") - key = id_from_dn(id_) - req = self.client.exec_query( - f"SELECT META().id, {bucket}.* FROM {bucket} USE KEYS '{key}'" - ) - try: - entry = req.json()["results"][0] - return bool(entry["id"]) - except IndexError: - return False - - # ldap - return bool(self.client.get(id_)) + # likely couchbase + bucket = os.environ.get("CN_COUCHBASE_BUCKET_PREFIX", "jans") + key = id_from_dn(id_) + req = self.client.exec_query( + f"SELECT META().id, {bucket}.* FROM {bucket} USE KEYS '{key}'" + ) + try: + entry = req.json()["results"][0] + return bool(entry["id"]) + except IndexError: + return False def import_ldif_files(self): for file_ in self.ldif_files: diff --git a/docker-jans-casa/scripts/upgrade.py b/docker-jans-casa/scripts/upgrade.py index c423289cb88..4f11a498683 100644 --- a/docker-jans-casa/scripts/upgrade.py +++ b/docker-jans-casa/scripts/upgrade.py @@ -9,7 +9,6 @@ from jans.pycloudlib import get_manager from jans.pycloudlib.persistence.couchbase import CouchbaseClient from jans.pycloudlib.persistence.couchbase import id_from_dn -from jans.pycloudlib.persistence.ldap import LdapClient from jans.pycloudlib.persistence.spanner import SpannerClient from jans.pycloudlib.persistence.sql import SqlClient from jans.pycloudlib.persistence.sql import doc_id_from_dn @@ -30,59 +29,6 @@ Entry = namedtuple("Entry", ["id", "attrs"]) -class LDAPBackend: - def __init__(self, manager): - self.manager = manager - self.client = LdapClient(manager) - self.type = "ldap" - - def format_attrs(self, entry, raw_values=None): - raw_values = raw_values or [] - attrs = {} - - for attr in entry.entry_attributes: - if attr in raw_values: - values = entry[attr].raw_values - else: - values = entry[attr].values - - if len(values) < 2: - v = values[0] - else: - v = values - attrs[attr] = v - return attrs - - def get_entry(self, key, filter_="", attrs=None, **kwargs): - filter_ = filter_ or "(objectClass=*)" - raw_values = kwargs.get("raw_values") - - entry = self.client.get(key, filter_=filter_, attributes=attrs) - if not entry: - return None - return Entry(entry.entry_dn, self.format_attrs(entry, raw_values)) - - def modify_entry(self, key, attrs=None, **kwargs): - attrs = attrs or {} - del_attrs = kwargs.get("delete_attrs") or [] - - for k, v in attrs.items(): - if not isinstance(v, list): - v = [v] - - if k in del_attrs: - mod = self.client.MODIFY_DELETE - else: - mod = self.client.MODIFY_REPLACE - attrs[k] = [(mod, v)] - - modified, _ = self.client.modify(key, attrs) - return modified - - def delete_entry(self, key, **kwargs): - return self.client.delete(key) - - class SQLBackend: def __init__(self, manager): self.manager = manager @@ -189,7 +135,6 @@ def delete_entry(self, key, **kwargs): "sql": SQLBackend, "couchbase": CouchbaseBackend, "spanner": SpannerBackend, - "ldap": LDAPBackend, } @@ -365,12 +310,9 @@ def update_agama_deployment(self): if self.backend.type in ("sql", "spanner"): kwargs = {"table_name": "adsPrjDeployment"} deploy_id = doc_id_from_dn(deploy_id) - elif self.backend.type == "couchbase": + else: # likely couchbase kwargs = {"bucket": os.environ.get("CN_COUCHBASE_BUCKET_PREFIX", "jans")} deploy_id = id_from_dn(deploy_id) - else: - # for ldap, get the raw value of the following attribute so we get a precise value - kwargs = {"raw_values": ["jansEndDate"]} entry = self.backend.get_entry(deploy_id, **kwargs) proj_archive = CASA_AGAMA_ARCHIVE @@ -388,14 +330,10 @@ def update_agama_deployment(self): if self.backend.type in ("sql", "spanner"): entry.attrs["jansStartDate"] = start_date entry.attrs["jansEndDate"] = None - elif self.backend.type == "couchbase": + else: # likely couchbase entry.attrs["jansStartDate"] = start_date.strftime("%Y-%m-%dT%H:%M:%SZ") entry.attrs["jansEndDate"] = "" entry.attrs["adsPrjDeplDetails"] = {"projectMetadata": {"projectName": "casa"}} - else: # ldap - # remove jansEndDate - kwargs["delete_attrs"] = ["jansEndDate"] - entry.attrs["jansStartDate"] = generalized_time_utc(start_date) if self.backend.modify_entry(entry.id, entry.attrs, **kwargs): self.manager.config.set("casa_agama_md5sum", assets_md5) diff --git a/docker-jans-casa/scripts/wait.py b/docker-jans-casa/scripts/wait.py index 53f3c51c4e8..0817b4e0d1f 100644 --- a/docker-jans-casa/scripts/wait.py +++ b/docker-jans-casa/scripts/wait.py @@ -13,7 +13,7 @@ def main(): - persistence_type = os.environ.get("CN_PERSISTENCE_TYPE", "ldap") + persistence_type = os.environ.get("CN_PERSISTENCE_TYPE", "sql") validate_persistence_type(persistence_type) if persistence_type == "hybrid": diff --git a/docker-jans-casa/templates/jans-ldap.properties b/docker-jans-casa/templates/jans-ldap.properties deleted file mode 100644 index a7ec401fe75..00000000000 --- a/docker-jans-casa/templates/jans-ldap.properties +++ /dev/null @@ -1,28 +0,0 @@ -bindDN: %(ldap_binddn)s -bindPassword: %(encoded_ox_ldap_pw)s -servers: %(ldap_hostname)s:%(ldaps_port)s - -useSSL: %(ssl_enabled)s -ssl.trustStoreFile: %(ldapTrustStoreFn)s -ssl.trustStorePin: %(encoded_ldapTrustStorePass)s -ssl.trustStoreFormat: pkcs12 - -maxconnections: 40 - -# Max wait 20 seconds -connection.max-wait-time-millis=20000 - -# Force to recreate polled connections after 30 minutes -connection.max-age-time-millis=1800000 - -# Invoke connection health check after checkout it from pool -connection-pool.health-check.on-checkout.enabled=false - -# Interval to check connections in pool. Value is 3 minutes. Not used when onnection-pool.health-check.on-checkout.enabled=true -connection-pool.health-check.interval-millis=180000 - -# How long to wait during connection health check. Max wait 20 seconds -connection-pool.health-check.max-response-time-millis=20000 - -binaryAttributes=objectGUID -certificateAttributes=userCertificate diff --git a/docker-jans-certmanager/Dockerfile b/docker-jans-certmanager/Dockerfile index 5ebf55f7ab1..b7ba7da1e86 100644 --- a/docker-jans-certmanager/Dockerfile +++ b/docker-jans-certmanager/Dockerfile @@ -25,7 +25,7 @@ RUN wget -q ${CN_SOURCE_URL} -P /app/javalibs/ # Assets sync # =========== -ENV JANS_SOURCE_VERSION=5e7b579f097714d685a6c1e0cedd004df0ce85f8 +ENV JANS_SOURCE_VERSION=595fff63b350f8da248bb0fcf3bd3966fa6f1953 # note that as we're pulling from a monorepo (with multiple project in it) # we are using partial-clone and sparse-checkout to get the assets @@ -102,10 +102,8 @@ ENV CN_SECRET_ADAPTER=vault \ # Persistence ENV # =============== -ENV CN_PERSISTENCE_TYPE=ldap \ +ENV CN_PERSISTENCE_TYPE=sql \ CN_HYBRID_MAPPING="{}" \ - CN_LDAP_URL=localhost:1636 \ - CN_LDAP_USE_SSL=true \ CN_COUCHBASE_URL=localhost \ CN_COUCHBASE_USER=admin \ CN_COUCHBASE_CERT_FILE=/etc/certs/couchbase.crt \ diff --git a/docker-jans-certmanager/README.md b/docker-jans-certmanager/README.md index 284a350a897..8ccdcb9176b 100644 --- a/docker-jans-certmanager/README.md +++ b/docker-jans-certmanager/README.md @@ -50,10 +50,8 @@ The following environment variables are supported by the container: - `CN_SECRET_GOOGLE_SECRET_VERSION_ID`: Google Secret Manager version ID (default to `latest`). - `CN_SECRET_GOOGLE_SECRET_NAME_PREFIX`: Prefix for Google Secret Manager name (default to `jans`). - `CN_SECRET_GOOGLE_SECRET_MANAGER_PASSPHRASE`: Passphrase for Google Secret Manager (default to `secret`). -- `CN_PERSISTENCE_TYPE`: Persistence backend being used (one of `ldap`, `couchbase`, or `hybrid`; default to `ldap`). +- `CN_PERSISTENCE_TYPE`: Persistence backend being used (one of `sql`, `couchbase`, `spanner`, or `hybrid`; default to `sql`). - `CN_HYBRID_MAPPING`: Specify data mapping for each persistence (default to `"{}"`). Note this environment only takes effect when `CN_PERSISTENCE_TYPE` is set to `hybrid`. See [hybrid mapping](#hybrid-mapping) section for details. -- `CN_LDAP_URL`: Address and port of LDAP server (default to `localhost:1636`). -- `CN_LDAP_USE_SSL`: Whether to use SSL connection to LDAP server (default to `true`). - `CN_COUCHBASE_URL`: Address of Couchbase server (default to `localhost`). - `CN_COUCHBASE_USER`: Username of Couchbase server (default to `admin`). - `CN_COUCHBASE_CERT_FILE`: Couchbase root certificate location (default to `/etc/certs/couchbase.crt`). @@ -226,12 +224,12 @@ As per v1.0.1, hybrid persistence supports all available persistence types. To c ``` { - "default": "", - "user": "", - "site": "", - "cache": "", - "token": "", - "session": "", + "default": "", + "user": "", + "site": "", + "cache": "", + "token": "", + "session": "", } ``` @@ -241,7 +239,7 @@ As per v1.0.1, hybrid persistence supports all available persistence types. To c { "default": "sql", "user": "spanner", - "site": "ldap", + "site": "sql", "cache": "sql", "token": "couchbase", "session": "spanner", diff --git a/docker-jans-certmanager/scripts/auth_handler.py b/docker-jans-certmanager/scripts/auth_handler.py index 2e4c1e9bae3..5bdd9d1e6f2 100644 --- a/docker-jans-certmanager/scripts/auth_handler.py +++ b/docker-jans-certmanager/scripts/auth_handler.py @@ -7,7 +7,6 @@ from collections import deque from jans.pycloudlib.persistence.couchbase import CouchbaseClient -from jans.pycloudlib.persistence.ldap import LdapClient from jans.pycloudlib.persistence.sql import SqlClient from jans.pycloudlib.persistence.spanner import SpannerClient from jans.pycloudlib.persistence.utils import PersistenceMapper @@ -75,39 +74,6 @@ def modify_auth_config(self, id_, rev, conf_dynamic, conf_webkeys): raise NotImplementedError -class LdapPersistence(BasePersistence): - def __init__(self, manager): - self.client = LdapClient(manager) - - def get_auth_config(self): - entry = self.client.get( - "ou=jans-auth,ou=configuration,o=jans", - attributes=["jansRevision", "jansConfWebKeys", "jansConfDyn"], - ) - - if not entry: - return {} - - config = { - "id": entry.entry_dn, - "jansRevision": entry["jansRevision"][0], - "jansConfWebKeys": entry["jansConfWebKeys"][0], - "jansConfDyn": entry["jansConfDyn"][0], - } - return config - - def modify_auth_config(self, id_, rev, conf_dynamic, conf_webkeys): - modified, _ = self.client.modify( - id_, - { - 'jansRevision': [(self.client.MODIFY_REPLACE, [str(rev)])], - 'jansConfWebKeys': [(self.client.MODIFY_REPLACE, [json.dumps(conf_webkeys)])], - 'jansConfDyn': [(self.client.MODIFY_REPLACE, [json.dumps(conf_dynamic)])], - } - ) - return modified - - class CouchbasePersistence(BasePersistence): def __init__(self, manager): self.client = CouchbaseClient(manager) @@ -178,7 +144,6 @@ def __init__(self, manager): _backend_classes = { - "ldap": LdapPersistence, "couchbase": CouchbasePersistence, "sql": SqlPersistence, "spanner": SpannerPersistence, diff --git a/docker-jans-certmanager/scripts/bootstrap.py b/docker-jans-certmanager/scripts/bootstrap.py index bf8c4741401..eacef55d80c 100644 --- a/docker-jans-certmanager/scripts/bootstrap.py +++ b/docker-jans-certmanager/scripts/bootstrap.py @@ -6,16 +6,12 @@ from jans.pycloudlib import get_manager from jans.pycloudlib.persistence.couchbase import sync_couchbase_password -from jans.pycloudlib.persistence.ldap import sync_ldap_password from jans.pycloudlib.persistence.spanner import sync_google_credentials from jans.pycloudlib.persistence.sql import sync_sql_password from jans.pycloudlib.persistence.utils import PersistenceMapper from settings import LOGGING_CONFIG -# from ldap_handler import LdapHandler from auth_handler import AuthHandler -# from oxshibboleth_handler import OxshibbolethHandler -# from passport_handler import PassportHandler from web_handler import WebHandler logging.config.dictConfig(LOGGING_CONFIG) @@ -24,10 +20,7 @@ #: Map between service name and its handler class PATCH_SERVICE_MAP = { "web": WebHandler, - # "oxshibboleth": OxshibbolethHandler, "auth": AuthHandler, - # "ldap": LdapHandler, - # "passport": PassportHandler, } PRUNE_SERVICE_MAP = { @@ -78,8 +71,6 @@ def patch(service, dry_run, opts): backend_type = mapper.mapping["default"] match backend_type: - case "ldap": - sync_ldap_password(manager) case "sql": sync_sql_password(manager) case "couchbase": @@ -116,8 +107,6 @@ def prune(service, dry_run, opts): backend_type = mapper.mapping["default"] match backend_type: - case "ldap": - sync_ldap_password(manager) case "sql": sync_sql_password(manager) case "couchbase": diff --git a/docker-jans-certmanager/scripts/ldap_handler.py b/docker-jans-certmanager/scripts/ldap_handler.py deleted file mode 100644 index 48024771333..00000000000 --- a/docker-jans-certmanager/scripts/ldap_handler.py +++ /dev/null @@ -1,79 +0,0 @@ -import logging.config - -from jans.pycloudlib.utils import exec_cmd - -from base_handler import BaseHandler -from settings import LOGGING_CONFIG - -logging.config.dictConfig(LOGGING_CONFIG) -logger = logging.getLogger("certmanager") - - -class LdapHandler(BaseHandler): - def generate_x509(self): - alt_name = self.opts.get("subj-alt-name", "localhost") - suffix = "opendj" - - try: - valid_to = int(self.opts.get("valid-to", 365)) - except ValueError: - valid_to = 365 - finally: - if valid_to < 1: - valid_to = 365 - - self._patch_cert_key(suffix, extra_dns=[alt_name], valid_to=valid_to) - - with open("/etc/certs/{}.pem".format(suffix), "w") as fw: - with open("/etc/certs/{}.crt".format(suffix)) as fr: - ldap_ssl_cert = fr.read() - - with open("/etc/certs/{}.key".format(suffix)) as fr: - ldap_ssl_key = fr.read() - - ldap_ssl_cacert = "".join([ldap_ssl_cert, ldap_ssl_key]) - fw.write(ldap_ssl_cacert) - - if not self.dry_run: - self.manager.secret.from_file( - "ldap_ssl_cert", f"/etc/certs/{suffix}.crt", encode=True, - ) - self.manager.secret.from_file( - "ldap_ssl_key", f"/etc/certs/{suffix}.key", encode=True, - ) - self.manager.secret.from_file( - "ldap_ssl_cacert", f"/etc/certs/{suffix}.pem", encode=True, - ) - - def generate_keystore(self): - suffix = "opendj" - passwd = self.manager.secret.get("ldap_truststore_pass") - hostname = self.manager.config.get("hostname") - - logger.info(f"Generating /etc/certs/{suffix}.pkcs12 file") - - # Convert key to pkcs12 - cmd = " ".join([ - "openssl", - "pkcs12", - "-export", - "-inkey /etc/certs/{}.key".format(suffix), - "-in /etc/certs/{}.crt".format(suffix), - "-out /etc/certs/{}.pkcs12".format(suffix), - "-name {}".format(hostname), - "-passout pass:{}".format(passwd), - ]) - _, err, retcode = exec_cmd(cmd) - assert retcode == 0, "Failed to generate PKCS12 file; reason={}".format(err.decode()) - - if not self.dry_run: - self.manager.secret.from_file( - "ldap_pkcs12_base64", - f"/etc/certs/{suffix}.pkcs12", - encode=True, - binary_mode=True, - ) - - def patch(self): - self.generate_x509() - self.generate_keystore() diff --git a/docker-jans-certmanager/scripts/oxshibboleth_handler.py b/docker-jans-certmanager/scripts/oxshibboleth_handler.py deleted file mode 100644 index 4a24a4205e0..00000000000 --- a/docker-jans-certmanager/scripts/oxshibboleth_handler.py +++ /dev/null @@ -1,91 +0,0 @@ -import logging.config - -from jans.pycloudlib.utils import exec_cmd - -from base_handler import BaseHandler -from settings import LOGGING_CONFIG - -logging.config.dictConfig(LOGGING_CONFIG) -logger = logging.getLogger("certmanager") - - -class OxshibbolethHandler(BaseHandler): - @classmethod - def gen_idp3_key(cls, storepass): - cmd = ( - "java -classpath '/app/javalibs/*' " - "net.shibboleth.utilities.java.support.security.BasicKeystoreKeyStrategyTool " - "--storefile /etc/certs/sealer.jks " - "--versionfile /etc/certs/sealer.kver " - "--alias secret " - f"--storepass {storepass}" - ) - return exec_cmd(cmd) - - def _patch_shib_sealer(self, passwd): - sealer_jks = "/etc/certs/sealer.jks" - sealer_kver = "/etc/certs/sealer.kver" - logger.info(f"Generating new {sealer_jks} and {sealer_kver} files") - self.gen_idp3_key(passwd) - return sealer_jks, sealer_kver - - def patch(self): - passwd = self.manager.secret.get("shibJksPass") - - # shibIDP - cert_fn, key_fn = self._patch_cert_key("shibIDP", passwd) - if not self.dry_run: - if cert_fn: - self.manager.secret.from_file( - "shibIDP_cert", cert_fn, encode=True, - ) - if key_fn: - self.manager.secret.from_file( - "shibIDP_cert", key_fn, encode=True, - ) - - keystore_fn = self._patch_keystore( - "shibIDP", self.manager.config.get("hostname"), passwd, - ) - if not self.dry_run: - if keystore_fn: - self.manager.secret.from_file( - "shibIDP_jks_base64", - keystore_fn, - encode=True, - binary_mode=True, - ) - - sealer_jks_fn, sealer_kver_fn = self._patch_shib_sealer(passwd) - if not self.dry_run: - if sealer_jks_fn: - self.manager.secret.from_file( - "sealer_jks_base64", - sealer_jks_fn, - encode=True, - binary_mode=True, - ) - if sealer_kver_fn: - self.manager.secret.from_file( - "sealer_kver_base64", sealer_kver_fn, encode=True, - ) - - # IDP signing - cert_fn, key_fn = self._patch_cert_key("idp-signing", passwd) - if not self.dry_run: - if cert_fn: - self.manager.secret.from_file( - "idp3SigningCertificateText", cert_fn, - ) - if key_fn: - self.manager.secret.from_file("idp3SigningKeyText", key_fn) - - # IDP encryption - cert_fn, key_fn = self._patch_cert_key("idp-encryption", passwd) - if not self.dry_run: - if cert_fn: - self.manager.secret.from_file( - "idp3EncryptionCertificateText", cert_fn, - ) - if key_fn: - self.manager.secret.from_file("idp3EncryptionKeyText", key_fn) diff --git a/docker-jans-certmanager/scripts/passport_handler.py b/docker-jans-certmanager/scripts/passport_handler.py deleted file mode 100644 index bd5aeb27217..00000000000 --- a/docker-jans-certmanager/scripts/passport_handler.py +++ /dev/null @@ -1,96 +0,0 @@ -import base64 -import json -import logging.config -import sys - -from base_handler import BaseHandler -from settings import LOGGING_CONFIG -from utils import generate_openid_keys -from utils import export_openid_keys - -logging.config.dictConfig(LOGGING_CONFIG) -logger = logging.getLogger("certmanager") - - -class PassportHandler(BaseHandler): - def patch_passport_rs(self): - jks_fn = self.manager.config.get("passport_rs_client_jks_fn") - jwks_fn = self.manager.config.get("passport_rs_client_jwks_fn") - - logger.info(f"Generating new {jks_fn} and {jwks_fn}") - - out, err, retcode = generate_openid_keys( - self.manager.secret.get("passport_rs_client_jks_pass"), - jks_fn, - jwks_fn, - self.manager.config.get("default_openid_jks_dn_name"), - ) - if retcode != 0: - logger.error(f"Unable to generate Passport RS keys; reason={err.decode()}") - sys.exit(1) - - cert_alg = self.manager.config.get("passport_rs_client_cert_alg") - cert_alias = "" - for key in json.loads(out)["keys"]: - if key["alg"] == cert_alg: - cert_alias = key["kid"] - break - - if not self.dry_run: - self.manager.secret.set("passport_rs_client_base64_jwks", base64.b64encode(out)) - self.manager.secret.from_file( - "passport_rs_jks_base64", jks_fn, encode=True, binary_mode=True, - ) - self.manager.config.set("passport_rs_client_cert_alias", cert_alias) - - def patch_passport_rp(self): - jks_pass = self.manager.secret.get("passport_rp_client_jks_pass") - jks_fn = self.manager.config.get("passport_rp_client_jks_fn") - jwks_fn = self.manager.config.get("passport_rp_client_jwks_fn") - client_cert_fn = self.manager.config.get("passport_rp_client_cert_fn") - - logger.info(f"Generating new {jks_fn} and {jwks_fn}") - - out, err, code = generate_openid_keys( - jks_pass, - jks_fn, - jwks_fn, - self.manager.config.get("default_openid_jks_dn_name"), - ) - if code != 0: - logger.error(f"Unable to generate Passport RP keys; reason={err.decode()}") - sys.exit(1) - - cert_alg = self.manager.config.get("passport_rp_client_cert_alg") - cert_alias = "" - for key in json.loads(out)["keys"]: - if key["alg"] == cert_alg: - cert_alias = key["kid"] - break - - _, err, retcode = export_openid_keys( - jks_fn, jks_pass, cert_alias, client_cert_fn, - ) - if retcode != 0: - logger.error(f"Unable to generate Passport RP client cert; reason={err.decode()}") - sys.exit(1) - - if not self.dry_run: - self.manager.secret.set("passport_rp_client_base64_jwks", base64.b64encode(out)) - self.manager.secret.from_file( - "passport_rp_jks_base64", jks_fn, encode=True, binary_mode=True, - ) - self.manager.config.set("passport_rp_client_cert_alias", cert_alias) - self.manager.secret.from_file("passport_rp_client_cert_base64", client_cert_fn, encode=True) - - def patch_passport_sp(self): - cert_fn, key_fn = self._patch_cert_key("passport-sp", self.manager.secret.get("passportSpJksPass")) - - if not self.dry_run: - self.manager.secret.from_file("passport_sp_cert_base64", cert_fn, encode=True) - self.manager.secret.from_file("passport_sp_key_base64", key_fn, encode=True) - - def patch(self): - self.patch_passport_rs() - self.patch_passport_rp() - self.patch_passport_sp() diff --git a/docker-jans-config-api/Dockerfile b/docker-jans-config-api/Dockerfile index 1c8a8d23238..e8768f1ede1 100644 --- a/docker-jans-config-api/Dockerfile +++ b/docker-jans-config-api/Dockerfile @@ -78,7 +78,7 @@ RUN mkdir -p ${JETTY_BASE}/jans-config-api/_plugins \ # Assets sync # =========== -ENV JANS_SOURCE_VERSION=5e7b579f097714d685a6c1e0cedd004df0ce85f8 +ENV JANS_SOURCE_VERSION=595fff63b350f8da248bb0fcf3bd3966fa6f1953 ARG JANS_SETUP_DIR=jans-linux-setup/jans_setup ARG JANS_CONFIG_API_RESOURCES=jans-config-api/server/src/main/resources @@ -190,10 +190,8 @@ ENV CN_SECRET_ADAPTER=vault \ # Persistence ENV # =============== -ENV CN_PERSISTENCE_TYPE=ldap \ +ENV CN_PERSISTENCE_TYPE=sql \ CN_HYBRID_MAPPING="{}" \ - CN_LDAP_URL=localhost:1636 \ - CN_LDAP_USE_SSL=true \ CN_COUCHBASE_URL=localhost \ CN_COUCHBASE_USER=admin \ CN_COUCHBASE_CERT_FILE=/etc/certs/couchbase.crt \ diff --git a/docker-jans-config-api/README.md b/docker-jans-config-api/README.md index 576682b65db..898521dd692 100644 --- a/docker-jans-config-api/README.md +++ b/docker-jans-config-api/README.md @@ -49,10 +49,8 @@ The following environment variables are supported by the container: - `CN_WAIT_MAX_TIME`: How long the startup "health checks" should run (default to `300` seconds). - `CN_WAIT_SLEEP_DURATION`: Delay between startup "health checks" (default to `10` seconds). - `CN_MAX_RAM_PERCENTAGE`: Value passed to Java option `-XX:MaxRAMPercentage`. -- `CN_PERSISTENCE_TYPE`: Persistence backend being used (one of `ldap`, `couchbase`, `sql`, `spanner`, or `hybrid`; default to `ldap`). +- `CN_PERSISTENCE_TYPE`: Persistence backend being used (one of `couchbase`, `sql`, `spanner`, or `hybrid`; default to `sql`). - `CN_HYBRID_MAPPING`: Specify data mapping for each persistence (default to `"{}"`). Note this environment only takes effect when `CN_PERSISTENCE_TYPE` is set to `hybrid`. See [hybrid mapping](#hybrid-mapping) section for details. -- `CN_LDAP_URL`: Address and port of LDAP server (default to `localhost:1636`). -- `CN_LDAP_USE_SSL`: Whether to use SSL connection to LDAP server (default to `true`). - `CN_COUCHBASE_URL`: Address of Couchbase server (default to `localhost`). - `CN_COUCHBASE_USER`: Username of Couchbase server (default to `admin`). - `CN_COUCHBASE_CERT_FILE`: Couchbase root certificate location (default to `/etc/certs/couchbase.crt`). @@ -124,8 +122,6 @@ The following key-value pairs are the defaults: "persistence_log_level": "INFO", "persistence_duration_log_target": "FILE", "persistence_duration_log_level": "INFO", - "ldap_stats_log_target": "FILE", - "ldap_stats_log_level": "INFO", "script_log_target": "FILE", "script_log_level": "INFO", "audit_log_target": "FILE", @@ -184,12 +180,12 @@ As per v1.0.1, hybrid persistence supports all available persistence types. To c ``` { - "default": "", - "user": "", - "site": "", - "cache": "", - "token": "", - "session": "", + "default": "", + "user": "", + "site": "", + "cache": "", + "token": "", + "session": "", } ``` @@ -199,7 +195,7 @@ As per v1.0.1, hybrid persistence supports all available persistence types. To c { "default": "sql", "user": "spanner", - "site": "ldap", + "site": "sql", "cache": "sql", "token": "couchbase", "session": "spanner", diff --git a/docker-jans-config-api/scripts/bootstrap.py b/docker-jans-config-api/scripts/bootstrap.py index 1703d9b08e4..d6e93442127 100644 --- a/docker-jans-config-api/scripts/bootstrap.py +++ b/docker-jans-config-api/scripts/bootstrap.py @@ -18,10 +18,6 @@ from jans.pycloudlib.persistence.couchbase import sync_couchbase_password from jans.pycloudlib.persistence.couchbase import sync_couchbase_truststore from jans.pycloudlib.persistence.hybrid import render_hybrid_properties -from jans.pycloudlib.persistence.ldap import LdapClient -from jans.pycloudlib.persistence.ldap import render_ldap_properties -from jans.pycloudlib.persistence.ldap import sync_ldap_password -from jans.pycloudlib.persistence.ldap import sync_ldap_truststore from jans.pycloudlib.persistence.spanner import render_spanner_properties from jans.pycloudlib.persistence.spanner import SpannerClient from jans.pycloudlib.persistence.spanner import sync_google_credentials @@ -50,7 +46,7 @@ def main(): manager = get_manager() - persistence_type = os.environ.get("CN_PERSISTENCE_TYPE", "ldap") + persistence_type = os.environ.get("CN_PERSISTENCE_TYPE", "sql") render_salt(manager, "/app/templates/salt", "/etc/jans/conf/salt") render_base_properties("/app/templates/jans.properties", "/etc/jans/conf/jans.properties") @@ -63,17 +59,6 @@ def main(): if not os.path.exists(hybrid_prop): render_hybrid_properties(hybrid_prop) - if "ldap" in persistence_groups: - render_ldap_properties( - manager, - "/app/templates/jans-ldap.properties", - "/etc/jans/conf/jans-ldap.properties", - ) - - if as_boolean(os.environ.get("CN_LDAP_USE_SSL", "true")): - sync_ldap_truststore(manager) - sync_ldap_password(manager) - if "couchbase" in persistence_groups: sync_couchbase_password(manager) render_couchbase_properties( @@ -155,8 +140,6 @@ def configure_logging(): "persistence_log_level": "INFO", "persistence_duration_log_target": "FILE", "persistence_duration_log_level": "INFO", - "ldap_stats_log_target": "FILE", - "ldap_stats_log_level": "INFO", "script_log_target": "FILE", "script_log_level": "INFO", "audit_log_target": "FILE", @@ -202,7 +185,6 @@ def configure_logging(): "config_api_log_target": "FILE", "persistence_log_target": "JANS_CONFIGAPI_PERSISTENCE_FILE", "persistence_duration_log_target": "JANS_CONFIGAPI_PERSISTENCE_DURATION_FILE", - "ldap_stats_log_target": "JANS_CONFIGAPI_PERSISTENCE_LDAP_STATISTICS_FILE", "script_log_target": "JANS_CONFIGAPI_SCRIPT_LOG_FILE", "audit_log_target": "AUDIT_FILE", } @@ -308,7 +290,6 @@ def __init__(self, manager) -> None: self.manager = manager client_classes = { - "ldap": LdapClient, "couchbase": CouchbaseClient, "spanner": SpannerClient, "sql": SqlClient, @@ -330,19 +311,14 @@ def get_auth_config(self): entry = self.client.get("jansAppConf", doc_id_from_dn(dn)) return json.loads(entry["jansConfDyn"]) - # couchbase - if self.persistence_type == "couchbase": - key = id_from_dn(dn) - bucket = os.environ.get("CN_COUCHBASE_BUCKET_PREFIX", "jans") - req = self.client.exec_query( - f"SELECT META().id, {bucket}.* FROM {bucket} USE KEYS '{key}'" # nosec: 608 - ) - attrs = req.json()["results"][0] - return attrs["jansConfDyn"] - - # ldap - entry = self.client.get(dn, attributes=["jansConfDyn"]) - return json.loads(entry.entry_attributes_as_dict["jansConfDyn"][0]) + # likely couchbase + key = id_from_dn(dn) + bucket = os.environ.get("CN_COUCHBASE_BUCKET_PREFIX", "jans") + req = self.client.exec_query( + f"SELECT META().id, {bucket}.* FROM {bucket} USE KEYS '{key}'" # nosec: 608 + ) + attrs = req.json()["results"][0] + return attrs["jansConfDyn"] def transform_url(self, url): auth_server_url = os.environ.get("CN_AUTH_SERVER_URL", "") diff --git a/docker-jans-config-api/scripts/upgrade.py b/docker-jans-config-api/scripts/upgrade.py index 9c1d6c11ae4..0355e1e195b 100644 --- a/docker-jans-config-api/scripts/upgrade.py +++ b/docker-jans-config-api/scripts/upgrade.py @@ -6,7 +6,6 @@ from jans.pycloudlib import get_manager from jans.pycloudlib.persistence.couchbase import CouchbaseClient -from jans.pycloudlib.persistence.ldap import LdapClient from jans.pycloudlib.persistence.spanner import SpannerClient from jans.pycloudlib.persistence.sql import SqlClient from jans.pycloudlib.persistence.utils import PersistenceMapper @@ -189,53 +188,6 @@ def _transform_api_dynamic_config(conf): return conf, should_update -class LDAPBackend: - def __init__(self, manager): - self.manager = manager - self.client = LdapClient(manager) - self.type = "ldap" - - def format_attrs(self, attrs): - _attrs = {} - for k, v in attrs.items(): - if len(v) < 2: - v = v[0] - _attrs[k] = v - return _attrs - - def get_entry(self, key, filter_="", attrs=None, **kwargs): - filter_ = filter_ or "(objectClass=*)" - - entry = self.client.get(key, filter_=filter_, attributes=attrs) - if not entry: - return None - return Entry(entry.entry_dn, self.format_attrs(entry.entry_attributes_as_dict)) - - def modify_entry(self, key, attrs=None, **kwargs): - attrs = attrs or {} - del_flag = kwargs.get("delete_attr", False) - - if del_flag: - mod = self.client.MODIFY_DELETE - else: - mod = self.client.MODIFY_REPLACE - - for k, v in attrs.items(): - if not isinstance(v, list): - v = [v] - attrs[k] = [(mod, v)] - return self.client.modify(key, attrs) - - def search_entries(self, key, filter_="", attrs=None, **kwargs): - filter_ = filter_ or "(objectClass=*)" - entries = self.client.search(key, filter_, attrs) - - return [ - Entry(entry.entry_dn, self.format_attrs(entry.entry_attributes_as_dict)) - for entry in entries - ] - - class SQLBackend: def __init__(self, manager): self.manager = manager @@ -360,7 +312,6 @@ def search_entries(self, key, filter_="", attrs=None, **kwargs): "sql": SQLBackend, "couchbase": CouchbaseBackend, "spanner": SpannerBackend, - "ldap": LDAPBackend, } @@ -409,7 +360,7 @@ def update_client_redirect_uri(self): if f"https://{hostname}/admin" not in entry.attrs["jansRedirectURI"]["v"]: entry.attrs["jansRedirectURI"]["v"].append(f"https://{hostname}/admin") should_update = True - else: # ldap, couchbase, and spanner + else: # likely couchbase or spanner if f"https://{hostname}/admin" not in entry.attrs["jansRedirectURI"]: entry.attrs["jansRedirectURI"].append(f"https://{hostname}/admin") should_update = True diff --git a/docker-jans-config-api/scripts/wait.py b/docker-jans-config-api/scripts/wait.py index 53f3c51c4e8..0817b4e0d1f 100644 --- a/docker-jans-config-api/scripts/wait.py +++ b/docker-jans-config-api/scripts/wait.py @@ -13,7 +13,7 @@ def main(): - persistence_type = os.environ.get("CN_PERSISTENCE_TYPE", "ldap") + persistence_type = os.environ.get("CN_PERSISTENCE_TYPE", "sql") validate_persistence_type(persistence_type) if persistence_type == "hybrid": diff --git a/docker-jans-config-api/templates/jans-config-api/log4j2.xml b/docker-jans-config-api/templates/jans-config-api/log4j2.xml index 379933a0e6a..144c904a6bc 100644 --- a/docker-jans-config-api/templates/jans-config-api/log4j2.xml +++ b/docker-jans-config-api/templates/jans-config-api/log4j2.xml @@ -46,16 +46,6 @@ - - - - - - - - - - @@ -82,21 +72,11 @@ - - -persistence - - - -persistence - - -persistence-duration - - - -persistence-duration @@ -107,11 +87,6 @@ - - -ldap-stats - - - -script diff --git a/docker-jans-config-api/templates/jans-ldap.properties b/docker-jans-config-api/templates/jans-ldap.properties deleted file mode 100644 index a7ec401fe75..00000000000 --- a/docker-jans-config-api/templates/jans-ldap.properties +++ /dev/null @@ -1,28 +0,0 @@ -bindDN: %(ldap_binddn)s -bindPassword: %(encoded_ox_ldap_pw)s -servers: %(ldap_hostname)s:%(ldaps_port)s - -useSSL: %(ssl_enabled)s -ssl.trustStoreFile: %(ldapTrustStoreFn)s -ssl.trustStorePin: %(encoded_ldapTrustStorePass)s -ssl.trustStoreFormat: pkcs12 - -maxconnections: 40 - -# Max wait 20 seconds -connection.max-wait-time-millis=20000 - -# Force to recreate polled connections after 30 minutes -connection.max-age-time-millis=1800000 - -# Invoke connection health check after checkout it from pool -connection-pool.health-check.on-checkout.enabled=false - -# Interval to check connections in pool. Value is 3 minutes. Not used when onnection-pool.health-check.on-checkout.enabled=true -connection-pool.health-check.interval-millis=180000 - -# How long to wait during connection health check. Max wait 20 seconds -connection-pool.health-check.max-response-time-millis=20000 - -binaryAttributes=objectGUID -certificateAttributes=userCertificate diff --git a/docker-jans-configurator/Dockerfile b/docker-jans-configurator/Dockerfile index 0239a54e429..08e6cc0a4b6 100644 --- a/docker-jans-configurator/Dockerfile +++ b/docker-jans-configurator/Dockerfile @@ -27,7 +27,7 @@ RUN mkdir -p /opt/jans/configurator/javalibs \ # Assets sync # =========== -ENV JANS_SOURCE_VERSION=5e7b579f097714d685a6c1e0cedd004df0ce85f8 +ENV JANS_SOURCE_VERSION=595fff63b350f8da248bb0fcf3bd3966fa6f1953 RUN git clone --depth 500 --filter blob:none --no-checkout https://github.com/janssenproject/jans /tmp/jans \ && cd /tmp/jans \ diff --git a/docker-jans-configurator/README.md b/docker-jans-configurator/README.md index c4b1833318e..e9e9a2d7aab 100644 --- a/docker-jans-configurator/README.md +++ b/docker-jans-configurator/README.md @@ -87,8 +87,7 @@ For fresh installation, generate the initial configuration by creating `/path/to "orgName": "Gluu Inc." }, "_secret": { - "admin_password": "S3cr3t+pass", - "ldap_password": "S3cr3t+pass" + "admin_password": "S3cr3t+pass" } } ``` @@ -99,12 +98,11 @@ For fresh installation, generate the initial configuration by creating `/path/to - `auth_sig_keys`: space-separated key algorithm for signing (default to `RS256 RS384 RS512 ES256 ES384 ES512 PS256 PS384 PS512`) - `auth_enc_keys`: space-separated key algorithm for encryption (default to `RSA1_5 RSA-OAEP`) - - `optional_scopes`: list of optional scopes (as JSON string) that will be used (supported scopes are `ldap`, `couchbase`, `redis`, `sql`; default to empty list) + - `optional_scopes`: list of optional scopes (as JSON string) that will be used (supported scopes are `couchbase`, `redis`, `sql`; default to empty list) - `init_keys_exp`: the initial keys expiration time in hours (default to `48`; extra 1 hour will be added for hard limit) 2. `_secret`: - - `ldap_password`: user's password to access LDAP database (only used if `optional_scopes` list contains `ldap` scope) - `sql_password`: user's password to access SQL database (only used if `optional_scopes` list contains `sql` scope) - `couchbase_password`: user's password to access Couchbase database (only used if `optional_scopes` list contains `couchbase` scope) - `couchbase_superuser_password`: superusers password to access Couchbase database (only used if `optional_scopes` list contains `couchbase` scope) @@ -147,7 +145,7 @@ To generate initial configmaps and secrets: name: config-generate-params containers: - name: configurator-load - image: ghcr.io/janssenproject/jans/configurator:1.1.6_dev + image: ghcr.io/janssenproject/jans/configurator:$VERSION volumeMounts: - mountPath: /app/db/configuration.json name: config-generate-params @@ -185,7 +183,7 @@ To restore configuration from `configuration.out.json` file: name: config-dump-params containers: - name: configurator-load - image: ghcr.io/janssenproject/jans/configurator:1.1.6_dev + image: ghcr.io/janssenproject/jans/configurator:$VERSION volumeMounts: - mountPath: /app/db/configuration.out.json name: config-dump-params @@ -211,7 +209,7 @@ spec: restartPolicy: Never containers: - name: configurator-dump-job - image: ghcr.io/janssenproject/jans/configurator:1.1.6_dev + image: ghcr.io/janssenproject/jans/configurator:$VERSION command: - /bin/sh - -c diff --git a/docker-jans-configurator/scripts/bootstrap.py b/docker-jans-configurator/scripts/bootstrap.py index 8193253bfd6..462502dc26e 100644 --- a/docker-jans-configurator/scripts/bootstrap.py +++ b/docker-jans-configurator/scripts/bootstrap.py @@ -17,7 +17,6 @@ from jans.pycloudlib import wait_for from jans.pycloudlib.persistence.couchbase import sync_couchbase_password from jans.pycloudlib.persistence.couchbase import sync_couchbase_superuser_password -from jans.pycloudlib.persistence.ldap import sync_ldap_password from jans.pycloudlib.persistence.spanner import sync_google_credentials from jans.pycloudlib.persistence.sql import sync_sql_password from jans.pycloudlib.persistence.utils import PersistenceMapper @@ -196,17 +195,6 @@ def transform_base_ctx(self): opt_scopes = self.configmap_params["optional_scopes"] self.set_config("optional_scopes", opt_scopes, False) - def transform_ldap_ctx(self): - encoded_salt = self.get_secret("encoded_salt") - - self.set_secret( - "encoded_ox_ldap_pw", - partial(encode_text, self.secret_params.get("ldap_password", ""), encoded_salt), - ) - - self.set_config("ldap_binddn", "cn=Directory Manager") - self.set_config("ldap_site_binddn", "cn=Directory Manager") - def transform_redis_ctx(self): self.set_secret("redis_password", self.secret_params.get("redis_password", "")) @@ -373,9 +361,6 @@ def transform(self): self.transform_auth_ctx() self.transform_web_ctx() - if "ldap" in opt_scopes: - self.transform_ldap_ctx() - if "redis" in opt_scopes: self.transform_redis_ctx() @@ -541,8 +526,6 @@ def load(configuration_file, dump_file): backend_type = mapper.mapping["default"] match backend_type: - case "ldap": - sync_ldap_password(manager) case "sql": sync_sql_password(manager) case "couchbase": @@ -592,8 +575,6 @@ def dump(dump_file): backend_type = mapper.mapping["default"] match backend_type: - case "ldap": - sync_ldap_password(manager) case "sql": sync_sql_password(manager) case "couchbase": diff --git a/docker-jans-fido2/Dockerfile b/docker-jans-fido2/Dockerfile index 924aa017197..b878d52a146 100644 --- a/docker-jans-fido2/Dockerfile +++ b/docker-jans-fido2/Dockerfile @@ -61,7 +61,7 @@ RUN mkdir -p ${JETTY_BASE}/jans-fido2/webapps \ # Assets sync # =========== -ENV JANS_SOURCE_VERSION=5e7b579f097714d685a6c1e0cedd004df0ce85f8 +ENV JANS_SOURCE_VERSION=595fff63b350f8da248bb0fcf3bd3966fa6f1953 ARG JANS_SETUP_DIR=jans-linux-setup/jans_setup # note that as we're pulling from a monorepo (with multiple project in it) @@ -178,10 +178,8 @@ ENV CN_SECRET_ADAPTER=vault \ # Persistence ENV # =============== -ENV CN_PERSISTENCE_TYPE=ldap \ +ENV CN_PERSISTENCE_TYPE=sql \ CN_HYBRID_MAPPING="{}" \ - CN_LDAP_URL=localhost:1636 \ - CN_LDAP_USE_SSL=true \ CN_COUCHBASE_URL=localhost \ CN_COUCHBASE_USER=admin \ CN_COUCHBASE_CERT_FILE=/etc/certs/couchbase.crt \ diff --git a/docker-jans-fido2/README.md b/docker-jans-fido2/README.md index e460b718f1b..d8339fc980d 100644 --- a/docker-jans-fido2/README.md +++ b/docker-jans-fido2/README.md @@ -49,10 +49,8 @@ The following environment variables are supported by the container: - `CN_WAIT_MAX_TIME`: How long the startup "health checks" should run (default to `300` seconds). - `CN_WAIT_SLEEP_DURATION`: Delay between startup "health checks" (default to `10` seconds). - `CN_MAX_RAM_PERCENTAGE`: Value passed to Java option `-XX:MaxRAMPercentage`. -- `CN_PERSISTENCE_TYPE`: Persistence backend being used (one of `ldap`, `couchbase`, or `hybrid`; default to `ldap`). +- `CN_PERSISTENCE_TYPE`: Persistence backend being used (one of `sql`, `couchbase`, `spanner`, or `hybrid`; default to `sql`). - `CN_HYBRID_MAPPING`: Specify data mapping for each persistence (default to `"{}"`). Note this environment only takes effect when `CN_PERSISTENCE_TYPE` is set to `hybrid`. See [hybrid mapping](#hybrid-mapping) section for details. -- `CN_LDAP_URL`: Address and port of LDAP server (default to `localhost:1636`). -- `CN_LDAP_USE_SSL`: Whether to use SSL connection to LDAP server (default to `true`). - `CN_COUCHBASE_URL`: Address of Couchbase server (default to `localhost`). - `CN_COUCHBASE_USER`: Username of Couchbase server (default to `admin`). - `CN_COUCHBASE_CERT_FILE`: Couchbase root certificate location (default to `/etc/certs/couchbase.crt`). @@ -137,12 +135,12 @@ As per v1.0.1, hybrid persistence supports all available persistence types. To c ``` { - "default": "", - "user": "", - "site": "", - "cache": "", - "token": "", - "session": "", + "default": "", + "user": "", + "site": "", + "cache": "", + "token": "", + "session": "", } ``` @@ -152,7 +150,7 @@ As per v1.0.1, hybrid persistence supports all available persistence types. To c { "default": "sql", "user": "spanner", - "site": "ldap", + "site": "sql", "cache": "sql", "token": "couchbase", "session": "spanner", diff --git a/docker-jans-fido2/scripts/bootstrap.py b/docker-jans-fido2/scripts/bootstrap.py index 53947c9ca25..2760f3c2d14 100644 --- a/docker-jans-fido2/scripts/bootstrap.py +++ b/docker-jans-fido2/scripts/bootstrap.py @@ -13,10 +13,6 @@ from jans.pycloudlib.persistence.couchbase import sync_couchbase_password from jans.pycloudlib.persistence.couchbase import sync_couchbase_truststore from jans.pycloudlib.persistence.hybrid import render_hybrid_properties -from jans.pycloudlib.persistence.ldap import LdapClient -from jans.pycloudlib.persistence.ldap import render_ldap_properties -from jans.pycloudlib.persistence.ldap import sync_ldap_password -from jans.pycloudlib.persistence.ldap import sync_ldap_truststore from jans.pycloudlib.persistence.spanner import render_spanner_properties from jans.pycloudlib.persistence.spanner import SpannerClient from jans.pycloudlib.persistence.spanner import sync_google_credentials @@ -40,7 +36,7 @@ def main(): - persistence_type = os.environ.get("CN_PERSISTENCE_TYPE", "ldap") + persistence_type = os.environ.get("CN_PERSISTENCE_TYPE", "sql") render_salt(manager, "/app/templates/salt", "/etc/jans/conf/salt") render_base_properties("/app/templates/jans.properties", "/etc/jans/conf/jans.properties") @@ -53,17 +49,6 @@ def main(): if not os.path.exists(hybrid_prop): render_hybrid_properties(hybrid_prop) - if "ldap" in persistence_groups: - render_ldap_properties( - manager, - "/app/templates/jans-ldap.properties", - "/etc/jans/conf/jans-ldap.properties", - ) - - if as_boolean(os.environ.get("CN_LDAP_USE_SSL", "true")): - sync_ldap_truststore(manager) - sync_ldap_password(manager) - if "couchbase" in persistence_groups: sync_couchbase_password(manager) render_couchbase_properties( @@ -195,7 +180,6 @@ def __init__(self, manager) -> None: self.manager = manager client_classes = { - "ldap": LdapClient, "couchbase": CouchbaseClient, "spanner": SpannerClient, "sql": SqlClient, diff --git a/docker-jans-fido2/scripts/upgrade.py b/docker-jans-fido2/scripts/upgrade.py index 8de510c537b..f336cb1a779 100644 --- a/docker-jans-fido2/scripts/upgrade.py +++ b/docker-jans-fido2/scripts/upgrade.py @@ -8,7 +8,6 @@ from jans.pycloudlib import get_manager from jans.pycloudlib.persistence.couchbase import CouchbaseClient from jans.pycloudlib.persistence.couchbase import id_from_dn -from jans.pycloudlib.persistence.ldap import LdapClient from jans.pycloudlib.persistence.spanner import SpannerClient from jans.pycloudlib.persistence.sql import doc_id_from_dn from jans.pycloudlib.persistence.sql import SqlClient @@ -79,44 +78,6 @@ def _transform_fido2_error_config(conf): return conf, should_update -class LDAPBackend: - def __init__(self, manager): - self.manager = manager - self.client = LdapClient(manager) - self.type = "ldap" - - def format_attrs(self, attrs): - _attrs = {} - for k, v in attrs.items(): - if len(v) < 2: - v = v[0] - _attrs[k] = v - return _attrs - - def get_entry(self, key, filter_="", attrs=None, **kwargs): - filter_ = filter_ or "(objectClass=*)" - - entry = self.client.get(key, filter_=filter_, attributes=attrs) - if not entry: - return None - return Entry(entry.entry_dn, self.format_attrs(entry.entry_attributes_as_dict)) - - def modify_entry(self, key, attrs=None, **kwargs): - attrs = attrs or {} - del_flag = kwargs.get("delete_attr", False) - - if del_flag: - mod = self.client.MODIFY_DELETE - else: - mod = self.client.MODIFY_REPLACE - - for k, v in attrs.items(): - if not isinstance(v, list): - v = [v] - attrs[k] = [(mod, v)] - return self.client.modify(key, attrs) - - class SQLBackend: def __init__(self, manager): self.manager = manager @@ -211,7 +172,6 @@ def modify_entry(self, key, attrs=None, **kwargs): "sql": SQLBackend, "couchbase": CouchbaseBackend, "spanner": SpannerBackend, - "ldap": LDAPBackend, } diff --git a/docker-jans-fido2/scripts/wait.py b/docker-jans-fido2/scripts/wait.py index 53f3c51c4e8..0817b4e0d1f 100644 --- a/docker-jans-fido2/scripts/wait.py +++ b/docker-jans-fido2/scripts/wait.py @@ -13,7 +13,7 @@ def main(): - persistence_type = os.environ.get("CN_PERSISTENCE_TYPE", "ldap") + persistence_type = os.environ.get("CN_PERSISTENCE_TYPE", "sql") validate_persistence_type(persistence_type) if persistence_type == "hybrid": diff --git a/docker-jans-fido2/templates/jans-fido2/log4j2.xml b/docker-jans-fido2/templates/jans-fido2/log4j2.xml index 2d3a24fa1ba..8f6a08abac1 100644 --- a/docker-jans-fido2/templates/jans-fido2/log4j2.xml +++ b/docker-jans-fido2/templates/jans-fido2/log4j2.xml @@ -64,21 +64,11 @@ - - -persistence - - - -persistence - - -persistence-duration - - - -persistence-duration diff --git a/docker-jans-fido2/templates/jans-ldap.properties b/docker-jans-fido2/templates/jans-ldap.properties deleted file mode 100644 index a7ec401fe75..00000000000 --- a/docker-jans-fido2/templates/jans-ldap.properties +++ /dev/null @@ -1,28 +0,0 @@ -bindDN: %(ldap_binddn)s -bindPassword: %(encoded_ox_ldap_pw)s -servers: %(ldap_hostname)s:%(ldaps_port)s - -useSSL: %(ssl_enabled)s -ssl.trustStoreFile: %(ldapTrustStoreFn)s -ssl.trustStorePin: %(encoded_ldapTrustStorePass)s -ssl.trustStoreFormat: pkcs12 - -maxconnections: 40 - -# Max wait 20 seconds -connection.max-wait-time-millis=20000 - -# Force to recreate polled connections after 30 minutes -connection.max-age-time-millis=1800000 - -# Invoke connection health check after checkout it from pool -connection-pool.health-check.on-checkout.enabled=false - -# Interval to check connections in pool. Value is 3 minutes. Not used when onnection-pool.health-check.on-checkout.enabled=true -connection-pool.health-check.interval-millis=180000 - -# How long to wait during connection health check. Max wait 20 seconds -connection-pool.health-check.max-response-time-millis=20000 - -binaryAttributes=objectGUID -certificateAttributes=userCertificate diff --git a/docker-jans-kc-scheduler/Dockerfile b/docker-jans-kc-scheduler/Dockerfile index 0514452b9e4..f4791f2c46d 100644 --- a/docker-jans-kc-scheduler/Dockerfile +++ b/docker-jans-kc-scheduler/Dockerfile @@ -38,7 +38,7 @@ RUN wget -q https://repo1.maven.org/maven2/org/codehaus/janino/janino/3.1.9/jani # Assets sync # =========== -ENV JANS_SOURCE_VERSION=5e7b579f097714d685a6c1e0cedd004df0ce85f8 +ENV JANS_SOURCE_VERSION=595fff63b350f8da248bb0fcf3bd3966fa6f1953 # note that as we're pulling from a monorepo (with multiple project in it) # we are using partial-clone and sparse-checkout to get the assets diff --git a/docker-jans-keycloak-link/Dockerfile b/docker-jans-keycloak-link/Dockerfile index d634a7370c0..e878dcdd1f0 100644 --- a/docker-jans-keycloak-link/Dockerfile +++ b/docker-jans-keycloak-link/Dockerfile @@ -61,7 +61,7 @@ RUN mkdir -p ${JETTY_BASE}/jans-keycloak-link/webapps \ # Assets sync # =========== -ENV JANS_SOURCE_VERSION=5e7b579f097714d685a6c1e0cedd004df0ce85f8 +ENV JANS_SOURCE_VERSION=595fff63b350f8da248bb0fcf3bd3966fa6f1953 ARG JANS_SETUP_DIR=jans-linux-setup/jans_setup # note that as we're pulling from a monorepo (with multiple project in it) @@ -167,10 +167,8 @@ ENV CN_SECRET_ADAPTER=vault \ # Persistence ENV # =============== -ENV CN_PERSISTENCE_TYPE=ldap \ +ENV CN_PERSISTENCE_TYPE=sql \ CN_HYBRID_MAPPING="{}" \ - CN_LDAP_URL=localhost:1636 \ - CN_LDAP_USE_SSL=true \ CN_COUCHBASE_URL=localhost \ CN_COUCHBASE_USER=admin \ CN_COUCHBASE_CERT_FILE=/etc/certs/couchbase.crt \ diff --git a/docker-jans-keycloak-link/README.md b/docker-jans-keycloak-link/README.md index eb97309933f..28135d5f1d1 100644 --- a/docker-jans-keycloak-link/README.md +++ b/docker-jans-keycloak-link/README.md @@ -49,10 +49,8 @@ The following environment variables are supported by the container: - `CN_WAIT_MAX_TIME`: How long the startup "health checks" should run (default to `300` seconds). - `CN_WAIT_SLEEP_DURATION`: Delay between startup "health checks" (default to `10` seconds). - `CN_MAX_RAM_PERCENTAGE`: Value passed to Java option `-XX:MaxRAMPercentage`. -- `CN_PERSISTENCE_TYPE`: Persistence backend being used (one of `ldap`, `couchbase`, or `hybrid`; default to `ldap`). +- `CN_PERSISTENCE_TYPE`: Persistence backend being used (one of `sql`, `couchbase`, `spanner`, or `hybrid`; default to `sql`). - `CN_HYBRID_MAPPING`: Specify data mapping for each persistence (default to `"{}"`). Note this environment only takes effect when `CN_PERSISTENCE_TYPE` is set to `hybrid`. See [hybrid mapping](#hybrid-mapping) section for details. -- `CN_LDAP_URL`: Address and port of LDAP server (default to `localhost:1636`). -- `CN_LDAP_USE_SSL`: Whether to use SSL connection to LDAP server (default to `true`). - `CN_COUCHBASE_URL`: Address of Couchbase server (default to `localhost`). - `CN_COUCHBASE_USER`: Username of Couchbase server (default to `admin`). - `CN_COUCHBASE_CERT_FILE`: Couchbase root certificate location (default to `/etc/certs/couchbase.crt`). @@ -116,8 +114,6 @@ The following key-value pairs are the defaults: "persistence_log_level": "INFO", "persistence_duration_log_target": "FILE", "persistence_duration_log_level": "INFO", - "ldap_stats_log_target": "FILE", - "ldap_stats_log_level": "INFO", "script_log_target": "FILE", "script_log_level": "INFO" } @@ -139,12 +135,12 @@ As per v1.0.1, hybrid persistence supports all available persistence types. To c ``` { - "default": "", - "user": "", - "site": "", - "cache": "", - "token": "", - "session": "", + "default": "", + "user": "", + "site": "", + "cache": "", + "token": "", + "session": "", } ``` @@ -154,7 +150,7 @@ As per v1.0.1, hybrid persistence supports all available persistence types. To c { "default": "sql", "user": "spanner", - "site": "ldap", + "site": "sql", "cache": "sql", "token": "couchbase", "session": "spanner", diff --git a/docker-jans-keycloak-link/scripts/bootstrap.py b/docker-jans-keycloak-link/scripts/bootstrap.py index 47c522ef899..c0fe359a54b 100644 --- a/docker-jans-keycloak-link/scripts/bootstrap.py +++ b/docker-jans-keycloak-link/scripts/bootstrap.py @@ -15,10 +15,6 @@ from jans.pycloudlib.persistence.couchbase import sync_couchbase_truststore from jans.pycloudlib.persistence.couchbase import sync_couchbase_password from jans.pycloudlib.persistence.hybrid import render_hybrid_properties -from jans.pycloudlib.persistence.ldap import LdapClient -from jans.pycloudlib.persistence.ldap import render_ldap_properties -from jans.pycloudlib.persistence.ldap import sync_ldap_password -from jans.pycloudlib.persistence.ldap import sync_ldap_truststore from jans.pycloudlib.persistence.spanner import render_spanner_properties from jans.pycloudlib.persistence.spanner import SpannerClient from jans.pycloudlib.persistence.spanner import sync_google_credentials @@ -48,7 +44,7 @@ def main(): - persistence_type = os.environ.get("CN_PERSISTENCE_TYPE", "ldap") + persistence_type = os.environ.get("CN_PERSISTENCE_TYPE", "sql") render_salt(manager, "/app/templates/salt", "/etc/jans/conf/salt") render_base_properties("/app/templates/jans.properties", "/etc/jans/conf/jans.properties") @@ -61,17 +57,6 @@ def main(): if not os.path.exists(hybrid_prop): render_hybrid_properties(hybrid_prop) - if "ldap" in persistence_groups: - render_ldap_properties( - manager, - "/app/templates/jans-ldap.properties", - "/etc/jans/conf/jans-ldap.properties", - ) - - if as_boolean(os.environ.get("CN_LDAP_USE_SSL", "true")): - sync_ldap_truststore(manager) - sync_ldap_password(manager) - if "couchbase" in persistence_groups: sync_couchbase_password(manager) render_couchbase_properties( @@ -134,8 +119,6 @@ def configure_logging(): "persistence_log_level": "INFO", "persistence_duration_log_target": "FILE", "persistence_duration_log_level": "INFO", - "ldap_stats_log_target": "FILE", - "ldap_stats_log_level": "INFO", "script_log_target": "FILE", "script_log_level": "INFO", "log_prefix": "", @@ -179,7 +162,6 @@ def configure_logging(): "keycloak_link_log_target": "FILE", "persistence_log_target": "JANS_KEYCLOAK_PERSISTENCE_FILE", "persistence_duration_log_target": "JANS_KEYCLOAK_PERSISTENCE_DURATION_FILE", - "ldap_stats_log_target": "JANS_KEYCLOAK_PERSISTENCE_LDAP_STATISTICS_FILE", "script_log_target": "JANS_KEYCLOAK_SCRIPT_LOG_FILE", } for key, value in file_aliases.items(): @@ -206,7 +188,6 @@ def __init__(self, manager: Manager) -> None: self.manager = manager client_classes = { - "ldap": LdapClient, "couchbase": CouchbaseClient, "spanner": SpannerClient, "sql": SqlClient, @@ -223,6 +204,7 @@ def __init__(self, manager: Manager) -> None: @cached_property def ctx(self) -> dict[str, _t.Any]: ctx = { + # @TODO: double check if we can safely remove these contexts "ldap_binddn": self.manager.config.get("ldap_binddn"), "ldap_hostname": self.manager.config.get("ldap_init_host"), "ldaps_port": self.manager.config.get("ldap_init_port"), diff --git a/docker-jans-keycloak-link/scripts/upgrade.py b/docker-jans-keycloak-link/scripts/upgrade.py index d19a4f8499b..8dc9f44b92f 100644 --- a/docker-jans-keycloak-link/scripts/upgrade.py +++ b/docker-jans-keycloak-link/scripts/upgrade.py @@ -6,7 +6,6 @@ from jans.pycloudlib import get_manager from jans.pycloudlib.persistence.couchbase import CouchbaseClient from jans.pycloudlib.persistence.couchbase import id_from_dn -from jans.pycloudlib.persistence.ldap import LdapClient from jans.pycloudlib.persistence.spanner import SpannerClient from jans.pycloudlib.persistence.sql import doc_id_from_dn from jans.pycloudlib.persistence.sql import SqlClient @@ -20,44 +19,6 @@ Entry = namedtuple("Entry", ["id", "attrs"]) -class LDAPBackend: - def __init__(self, manager): - self.manager = manager - self.client = LdapClient(manager) - self.type = "ldap" - - def format_attrs(self, attrs): - _attrs = {} - for k, v in attrs.items(): - if len(v) < 2: - v = v[0] - _attrs[k] = v - return _attrs - - def get_entry(self, key, filter_="", attrs=None, **kwargs): - filter_ = filter_ or "(objectClass=*)" - - entry = self.client.get(key, filter_=filter_, attributes=attrs) - if not entry: - return None - return Entry(entry.entry_dn, self.format_attrs(entry.entry_attributes_as_dict)) - - def modify_entry(self, key, attrs=None, **kwargs): - attrs = attrs or {} - del_flag = kwargs.get("delete_attr", False) - - if del_flag: - mod = self.client.MODIFY_DELETE - else: - mod = self.client.MODIFY_REPLACE - - for k, v in attrs.items(): - if not isinstance(v, list): - v = [v] - attrs[k] = [(mod, v)] - return self.client.modify(key, attrs) - - class SQLBackend: def __init__(self, manager): self.manager = manager @@ -152,7 +113,6 @@ def modify_entry(self, key, attrs=None, **kwargs): "sql": SQLBackend, "couchbase": CouchbaseBackend, "spanner": SpannerBackend, - "ldap": LDAPBackend, } @@ -170,7 +130,6 @@ def invoke(self): self.enable_ext_script() def enable_ext_script(self): - # default to ldap persistence kwargs = {} script_id = "inum=13D3-E7AD,ou=scripts,o=jans" @@ -178,7 +137,7 @@ def enable_ext_script(self): kwargs = {"table_name": "jansCustomScr"} script_id = doc_id_from_dn(script_id) - elif self.backend.type == "couchbase": + else: # likely couchbase kwargs = {"bucket": os.environ.get("CN_COUCHBASE_BUCKET_PREFIX", "jans")} script_id = id_from_dn(script_id) diff --git a/docker-jans-keycloak-link/scripts/wait.py b/docker-jans-keycloak-link/scripts/wait.py index 53f3c51c4e8..0817b4e0d1f 100644 --- a/docker-jans-keycloak-link/scripts/wait.py +++ b/docker-jans-keycloak-link/scripts/wait.py @@ -13,7 +13,7 @@ def main(): - persistence_type = os.environ.get("CN_PERSISTENCE_TYPE", "ldap") + persistence_type = os.environ.get("CN_PERSISTENCE_TYPE", "sql") validate_persistence_type(persistence_type) if persistence_type == "hybrid": diff --git a/docker-jans-keycloak-link/templates/jans-keycloak-link/log4j2.xml b/docker-jans-keycloak-link/templates/jans-keycloak-link/log4j2.xml index dfe6b7acd86..f8290d3f13f 100644 --- a/docker-jans-keycloak-link/templates/jans-keycloak-link/log4j2.xml +++ b/docker-jans-keycloak-link/templates/jans-keycloak-link/log4j2.xml @@ -37,16 +37,6 @@ - - - - - - - - - - @@ -67,22 +57,13 @@ -persistence - + - - -persistence - - - + -persistence - - -persistence-duration - - - -persistence-duration @@ -93,11 +74,6 @@ - - -ldap-stats - - - -script diff --git a/docker-jans-keycloak-link/templates/jans-ldap.properties b/docker-jans-keycloak-link/templates/jans-ldap.properties deleted file mode 100644 index a7ec401fe75..00000000000 --- a/docker-jans-keycloak-link/templates/jans-ldap.properties +++ /dev/null @@ -1,28 +0,0 @@ -bindDN: %(ldap_binddn)s -bindPassword: %(encoded_ox_ldap_pw)s -servers: %(ldap_hostname)s:%(ldaps_port)s - -useSSL: %(ssl_enabled)s -ssl.trustStoreFile: %(ldapTrustStoreFn)s -ssl.trustStorePin: %(encoded_ldapTrustStorePass)s -ssl.trustStoreFormat: pkcs12 - -maxconnections: 40 - -# Max wait 20 seconds -connection.max-wait-time-millis=20000 - -# Force to recreate polled connections after 30 minutes -connection.max-age-time-millis=1800000 - -# Invoke connection health check after checkout it from pool -connection-pool.health-check.on-checkout.enabled=false - -# Interval to check connections in pool. Value is 3 minutes. Not used when onnection-pool.health-check.on-checkout.enabled=true -connection-pool.health-check.interval-millis=180000 - -# How long to wait during connection health check. Max wait 20 seconds -connection-pool.health-check.max-response-time-millis=20000 - -binaryAttributes=objectGUID -certificateAttributes=userCertificate diff --git a/docker-jans-link/Dockerfile b/docker-jans-link/Dockerfile index 3168cc24135..1052c2d5745 100644 --- a/docker-jans-link/Dockerfile +++ b/docker-jans-link/Dockerfile @@ -61,7 +61,7 @@ RUN mkdir -p ${JETTY_BASE}/jans-link/webapps \ # Assets sync # =========== -ENV JANS_SOURCE_VERSION=5e7b579f097714d685a6c1e0cedd004df0ce85f8 +ENV JANS_SOURCE_VERSION=595fff63b350f8da248bb0fcf3bd3966fa6f1953 ARG JANS_SETUP_DIR=jans-linux-setup/jans_setup # note that as we're pulling from a monorepo (with multiple project in it) @@ -167,10 +167,8 @@ ENV CN_SECRET_ADAPTER=vault \ # Persistence ENV # =============== -ENV CN_PERSISTENCE_TYPE=ldap \ +ENV CN_PERSISTENCE_TYPE=sql \ CN_HYBRID_MAPPING="{}" \ - CN_LDAP_URL=localhost:1636 \ - CN_LDAP_USE_SSL=true \ CN_COUCHBASE_URL=localhost \ CN_COUCHBASE_USER=admin \ CN_COUCHBASE_CERT_FILE=/etc/certs/couchbase.crt \ diff --git a/docker-jans-link/README.md b/docker-jans-link/README.md index 8e4dfc278f5..607737f0f7d 100644 --- a/docker-jans-link/README.md +++ b/docker-jans-link/README.md @@ -49,10 +49,8 @@ The following environment variables are supported by the container: - `CN_WAIT_MAX_TIME`: How long the startup "health checks" should run (default to `300` seconds). - `CN_WAIT_SLEEP_DURATION`: Delay between startup "health checks" (default to `10` seconds). - `CN_MAX_RAM_PERCENTAGE`: Value passed to Java option `-XX:MaxRAMPercentage`. -- `CN_PERSISTENCE_TYPE`: Persistence backend being used (one of `ldap`, `couchbase`, or `hybrid`; default to `ldap`). +- `CN_PERSISTENCE_TYPE`: Persistence backend being used (one of `sql`, `couchbase`, `spanner`, or `hybrid`; default to `sql`). - `CN_HYBRID_MAPPING`: Specify data mapping for each persistence (default to `"{}"`). Note this environment only takes effect when `CN_PERSISTENCE_TYPE` is set to `hybrid`. See [hybrid mapping](#hybrid-mapping) section for details. -- `CN_LDAP_URL`: Address and port of LDAP server (default to `localhost:1636`). -- `CN_LDAP_USE_SSL`: Whether to use SSL connection to LDAP server (default to `true`). - `CN_COUCHBASE_URL`: Address of Couchbase server (default to `localhost`). - `CN_COUCHBASE_USER`: Username of Couchbase server (default to `admin`). - `CN_COUCHBASE_CERT_FILE`: Couchbase root certificate location (default to `/etc/certs/couchbase.crt`). @@ -116,8 +114,6 @@ The following key-value pairs are the defaults: "persistence_log_level": "INFO", "persistence_duration_log_target": "FILE", "persistence_duration_log_level": "INFO", - "ldap_stats_log_target": "FILE", - "ldap_stats_log_level": "INFO", "script_log_target": "FILE", "script_log_level": "INFO" } @@ -139,12 +135,12 @@ As per v1.0.1, hybrid persistence supports all available persistence types. To c ``` { - "default": "", - "user": "", - "site": "", - "cache": "", - "token": "", - "session": "", + "default": "", + "user": "", + "site": "", + "cache": "", + "token": "", + "session": "", } ``` @@ -154,7 +150,7 @@ As per v1.0.1, hybrid persistence supports all available persistence types. To c { "default": "sql", "user": "spanner", - "site": "ldap", + "site": "sql", "cache": "sql", "token": "couchbase", "session": "spanner", diff --git a/docker-jans-link/scripts/bootstrap.py b/docker-jans-link/scripts/bootstrap.py index 040ab75a966..bc2b30f9d74 100644 --- a/docker-jans-link/scripts/bootstrap.py +++ b/docker-jans-link/scripts/bootstrap.py @@ -15,10 +15,6 @@ from jans.pycloudlib.persistence.couchbase import sync_couchbase_password from jans.pycloudlib.persistence.couchbase import sync_couchbase_truststore from jans.pycloudlib.persistence.hybrid import render_hybrid_properties -from jans.pycloudlib.persistence.ldap import LdapClient -from jans.pycloudlib.persistence.ldap import render_ldap_properties -from jans.pycloudlib.persistence.ldap import sync_ldap_password -from jans.pycloudlib.persistence.ldap import sync_ldap_truststore from jans.pycloudlib.persistence.spanner import render_spanner_properties from jans.pycloudlib.persistence.spanner import SpannerClient from jans.pycloudlib.persistence.spanner import sync_google_credentials @@ -49,7 +45,7 @@ def main(): - persistence_type = os.environ.get("CN_PERSISTENCE_TYPE", "ldap") + persistence_type = os.environ.get("CN_PERSISTENCE_TYPE", "sql") render_salt(manager, "/app/templates/salt", "/etc/jans/conf/salt") render_base_properties("/app/templates/jans.properties", "/etc/jans/conf/jans.properties") @@ -62,17 +58,6 @@ def main(): if not os.path.exists(hybrid_prop): render_hybrid_properties(hybrid_prop) - if "ldap" in persistence_groups: - render_ldap_properties( - manager, - "/app/templates/jans-ldap.properties", - "/etc/jans/conf/jans-ldap.properties", - ) - - if as_boolean(os.environ.get("CN_LDAP_USE_SSL", "true")): - sync_ldap_truststore(manager) - sync_ldap_password(manager) - if "couchbase" in persistence_groups: sync_couchbase_password(manager) render_couchbase_properties( @@ -135,8 +120,6 @@ def configure_logging(): "persistence_log_level": "INFO", "persistence_duration_log_target": "FILE", "persistence_duration_log_level": "INFO", - "ldap_stats_log_target": "FILE", - "ldap_stats_log_level": "INFO", "script_log_target": "FILE", "script_log_level": "INFO", "log_prefix": "", @@ -180,7 +163,6 @@ def configure_logging(): "link_log_target": "FILE", "persistence_log_target": "JANS_LINK_PERSISTENCE_FILE", "persistence_duration_log_target": "JANS_LINK_PERSISTENCE_DURATION_FILE", - "ldap_stats_log_target": "JANS_LINK_PERSISTENCE_LDAP_STATISTICS_FILE", "script_log_target": "JANS_LINK_SCRIPT_LOG_FILE", } for key, value in file_aliases.items(): @@ -207,7 +189,6 @@ def __init__(self, manager: Manager) -> None: self.manager = manager client_classes = { - "ldap": LdapClient, "couchbase": CouchbaseClient, "spanner": SpannerClient, "sql": SqlClient, @@ -223,6 +204,7 @@ def __init__(self, manager: Manager) -> None: @cached_property def ctx(self) -> dict[str, _t.Any]: + # @TODO: remove ldap-related contexts after https://github.com/JanssenProject/jans/issues/9698 implemented host, port = os.environ.get("CN_LDAP_URL", "localhost:1636").split(":") password = self.manager.secret.get("encoded_ox_ldap_pw") diff --git a/docker-jans-link/scripts/upgrade.py b/docker-jans-link/scripts/upgrade.py index 0cb17cacdc7..4078d711873 100644 --- a/docker-jans-link/scripts/upgrade.py +++ b/docker-jans-link/scripts/upgrade.py @@ -6,7 +6,6 @@ from jans.pycloudlib import get_manager from jans.pycloudlib.persistence.couchbase import CouchbaseClient from jans.pycloudlib.persistence.couchbase import id_from_dn -from jans.pycloudlib.persistence.ldap import LdapClient from jans.pycloudlib.persistence.spanner import SpannerClient from jans.pycloudlib.persistence.sql import doc_id_from_dn from jans.pycloudlib.persistence.sql import SqlClient @@ -20,44 +19,6 @@ Entry = namedtuple("Entry", ["id", "attrs"]) -class LDAPBackend: - def __init__(self, manager): - self.manager = manager - self.client = LdapClient(manager) - self.type = "ldap" - - def format_attrs(self, attrs): - _attrs = {} - for k, v in attrs.items(): - if len(v) < 2: - v = v[0] - _attrs[k] = v - return _attrs - - def get_entry(self, key, filter_="", attrs=None, **kwargs): - filter_ = filter_ or "(objectClass=*)" - - entry = self.client.get(key, filter_=filter_, attributes=attrs) - if not entry: - return None - return Entry(entry.entry_dn, self.format_attrs(entry.entry_attributes_as_dict)) - - def modify_entry(self, key, attrs=None, **kwargs): - attrs = attrs or {} - del_flag = kwargs.get("delete_attr", False) - - if del_flag: - mod = self.client.MODIFY_DELETE - else: - mod = self.client.MODIFY_REPLACE - - for k, v in attrs.items(): - if not isinstance(v, list): - v = [v] - attrs[k] = [(mod, v)] - return self.client.modify(key, attrs) - - class SQLBackend: def __init__(self, manager): self.manager = manager @@ -152,7 +113,6 @@ def modify_entry(self, key, attrs=None, **kwargs): "sql": SQLBackend, "couchbase": CouchbaseBackend, "spanner": SpannerBackend, - "ldap": LDAPBackend, } @@ -170,7 +130,6 @@ def invoke(self): self.enable_ext_script() def enable_ext_script(self): - # default to ldap persistence kwargs = {} script_id = "inum=13D3-E7AD,ou=scripts,o=jans" @@ -178,7 +137,7 @@ def enable_ext_script(self): kwargs = {"table_name": "jansCustomScr"} script_id = doc_id_from_dn(script_id) - elif self.backend.type == "couchbase": + else: # likely couchbase kwargs = {"bucket": os.environ.get("CN_COUCHBASE_BUCKET_PREFIX", "jans")} script_id = id_from_dn(script_id) diff --git a/docker-jans-link/scripts/wait.py b/docker-jans-link/scripts/wait.py index 53f3c51c4e8..0817b4e0d1f 100644 --- a/docker-jans-link/scripts/wait.py +++ b/docker-jans-link/scripts/wait.py @@ -13,7 +13,7 @@ def main(): - persistence_type = os.environ.get("CN_PERSISTENCE_TYPE", "ldap") + persistence_type = os.environ.get("CN_PERSISTENCE_TYPE", "sql") validate_persistence_type(persistence_type) if persistence_type == "hybrid": diff --git a/docker-jans-link/templates/jans-ldap.properties b/docker-jans-link/templates/jans-ldap.properties deleted file mode 100644 index a7ec401fe75..00000000000 --- a/docker-jans-link/templates/jans-ldap.properties +++ /dev/null @@ -1,28 +0,0 @@ -bindDN: %(ldap_binddn)s -bindPassword: %(encoded_ox_ldap_pw)s -servers: %(ldap_hostname)s:%(ldaps_port)s - -useSSL: %(ssl_enabled)s -ssl.trustStoreFile: %(ldapTrustStoreFn)s -ssl.trustStorePin: %(encoded_ldapTrustStorePass)s -ssl.trustStoreFormat: pkcs12 - -maxconnections: 40 - -# Max wait 20 seconds -connection.max-wait-time-millis=20000 - -# Force to recreate polled connections after 30 minutes -connection.max-age-time-millis=1800000 - -# Invoke connection health check after checkout it from pool -connection-pool.health-check.on-checkout.enabled=false - -# Interval to check connections in pool. Value is 3 minutes. Not used when onnection-pool.health-check.on-checkout.enabled=true -connection-pool.health-check.interval-millis=180000 - -# How long to wait during connection health check. Max wait 20 seconds -connection-pool.health-check.max-response-time-millis=20000 - -binaryAttributes=objectGUID -certificateAttributes=userCertificate diff --git a/docker-jans-link/templates/jans-link/log4j2.xml b/docker-jans-link/templates/jans-link/log4j2.xml index 78812925724..739d4d525f9 100644 --- a/docker-jans-link/templates/jans-link/log4j2.xml +++ b/docker-jans-link/templates/jans-link/log4j2.xml @@ -37,16 +37,6 @@ - - - - - - - - - - @@ -68,20 +58,11 @@ - - -persistence - - -persistence - - -persistence-duration - - - -persistence-duration @@ -92,11 +73,6 @@ - - -ldap-stats - - - -script diff --git a/docker-jans-monolith/Dockerfile b/docker-jans-monolith/Dockerfile index 5974ec08de5..c92c12bcd03 100644 --- a/docker-jans-monolith/Dockerfile +++ b/docker-jans-monolith/Dockerfile @@ -15,6 +15,7 @@ RUN echo 'APT::Install-Suggests "0";' >> /etc/apt/apt.conf.d/00-docker \ && echo 'APT::Install-Recommends "0";' >> /etc/apt/apt.conf.d/00-docker # Prevent prompt errors during package installation +# @TODO: remove python-ldap3 after ldap support removed RUN DEBIAN_FRONTEND=noninteractive \ apt-get update \ && apt-get install -y python3 tini curl ca-certificates dbus systemd iproute2 gpg python3-pip python3-dev libpq-dev gcc python3-psycopg2 python3-ldap3 git maven \ @@ -72,7 +73,6 @@ ENV CN_HOSTNAME="demoexample.jans.io" \ TEST_CLIENT_TRUSTED="true" \ CN_INSTALL_COUCHBASE="false" \ CN_INSTALL_SPANNER="false" \ - CN_INSTALL_LDAP="false" \ CN_INSTALL_MYSQL="false" \ CN_INSTALL_PGSQL="false" \ CN_INSTALL_CONFIG_API="true" \ diff --git a/docker-jans-monolith/README.md b/docker-jans-monolith/README.md index 8bf7800a2a4..88b87cf419c 100644 --- a/docker-jans-monolith/README.md +++ b/docker-jans-monolith/README.md @@ -33,7 +33,6 @@ Installation depends on the set of environment variables shown below. These envi | `CN_CITY` | City. Used for ssl cert generation. | `Austin` | | `CN_STATE` | State. Used for ssl cert generation | `TX` | | `CN_COUNTRY` | Country. Used for ssl cert generation. | `US` | -| `CN_INSTALL_LDAP` | **NOT SUPPORRTED YET** | `false` | | `CN_INSTALL_MYSQL` | Install jans with mysql as the backend | `false` | | `CN_INSTALL_PGSQL` | Install jans with Postgres as the backend | `false` | | `CN_INSTALL_CONFIG_API` | Installs the Config API service. | `true` | @@ -51,14 +50,15 @@ Installation depends on the set of environment variables shown below. These envi ## How to run -Download the compose file +Download the compose file of your chosen persistence from mysql or postgres ```bash wget https://raw.githubusercontent.com/JanssenProject/jans/main/docker-jans-monolith/jans-mysql-compose.yml +wget https://raw.githubusercontent.com/JanssenProject/jans/main/docker-jans-monolith/jans-postgres-compose.yml ``` -Download the the script files +Download the script files ```bash @@ -74,7 +74,7 @@ This docker compose file runs two containers, the janssen monolith container and To start the containers. ```bash -./up.sh +./up.sh #You can pass mysql|postgres as an argument to the script. If you don't pass any, it will default to mysql. ``` To view the containers running @@ -87,19 +87,22 @@ docker compose -f jans-mysql-compose.yml ps To stop the containers. ```bash -./down.sh +./down.sh #You can pass mysql|postgres as an argument to the script. If you don't pass any, it will default to mysql. ``` ## Configure Janssen Server -```bash - -docker compose -f jans-mysql-compose.yml exec jans sh #This opens a bash terminal in the running container +1. Access the Docker container shell using: + ```bash -/opt/jans/jans-cli/config-cli.py #configure using the config-cli + docker compose -f jans-mysql-compose.yml exec jans /bin/bash #This opens a bash terminal in the running container + ``` +2. You can grab `client_id` and `client_pw`(secret) pairs and other values from `setup.properties` or `/opt/jans/jans-setup/setup.properties.last` -/opt/jans/jans-cli/scim-cli.py #configure using the scim-cli -``` +3. Use the CLI tools located under `/opt/jans/jans-cli/` to configure Gluu flex as needed. For example you can run the [TUI](https://docs.jans.io/head/admin/config-guide/config-tools/jans-tui/): + ```bash + python3 /opt/jans/jans-cli/config-cli-tui.py + ``` ## Access endpoints externally @@ -117,5 +120,5 @@ After adding the record you can hit endpoints such as https://demoexample.jans.i Remove setup and volumes ```bash -./clean.sh +./clean.sh #You can pass mysql|postgres as an argument to the script. If you don't pass any, it will default to mysql. ``` diff --git a/docker-jans-monolith/clean.sh b/docker-jans-monolith/clean.sh index 9b87ee81eef..9e63349714c 100644 --- a/docker-jans-monolith/clean.sh +++ b/docker-jans-monolith/clean.sh @@ -5,7 +5,7 @@ if [ -z "$1" ]; then yaml="jans-mysql-compose.yml" else case "$1" in - mysql|ldap|postgres|couchbase|spanner) + mysql|postgres|couchbase|spanner) yaml="jans-${1}-compose.yml" ;; *) @@ -59,4 +59,4 @@ if docker image inspect ${JANSSEN_IMAGE} &> /dev/null; then docker image rm ${JANSSEN_IMAGE} fi -docker image rm "ghcr.io/janssenproject/jans/monolith:${JANSSEN_VERSION}"; \ No newline at end of file +docker image rm "ghcr.io/janssenproject/jans/monolith:${JANSSEN_VERSION}"; diff --git a/docker-jans-monolith/down.sh b/docker-jans-monolith/down.sh index 800b6870ee7..2e44d4c0b4f 100644 --- a/docker-jans-monolith/down.sh +++ b/docker-jans-monolith/down.sh @@ -5,7 +5,7 @@ if [ -z "$1" ]; then yaml="jans-mysql-compose.yml" else case "$1" in - mysql|ldap|postgres|couchbase|spanner) + mysql|postgres|couchbase|spanner) yaml="jans-${1}-compose.yml" ;; *) @@ -46,10 +46,3 @@ if ! docker image inspect ${JANSSEN_IMAGE} &> /dev/null; then fi export JANSSEN_IMAGE docker compose -f ${yaml} down - - - - - - - diff --git a/docker-jans-monolith/jans-ldap-compose.yml b/docker-jans-monolith/jans-ldap-compose.yml deleted file mode 100644 index a6296b8a541..00000000000 --- a/docker-jans-monolith/jans-ldap-compose.yml +++ /dev/null @@ -1,44 +0,0 @@ -version: "3.7" -services: - jans: - image: ${JANSSEN_IMAGE:-ghcr.io/janssenproject/jans/monolith:1.1.6_dev} - restart: always - ports: - - "443:443" - - "80:80" - networks: - - cloud_bridge - environment: - #- CN_HOSTNAME=demoexample.jans.io - - CN_ADMIN_PASS=1t5Fin3#security - - CN_ORG_NAME=Janssen - - CN_EMAIL=support@jans.io - - CN_CITY=Austin - - CN_STATE=TX - - CN_COUNTRY=US - - CN_INSTALL_LDAP=true - - CN_INSTALL_CONFIG_API=true - - CN_INSTALL_SCIM=true - - CN_INSTALL_FIDO2=true - - CN_INSTALL_CASA=true - - CN_INSTALL_KC_LINK=true - - CN_INSTALL_LOCK=true - - CN_INSTALL_SAML=false - - CN_INSTALL_OPA=true - - TEST_CLIENT_ID=9876baac-de39-4c23-8a78-674b59df8c09 - - TEST_CLIENT_TRUSTED=true - - TEST_CLIENT_SECRET=1t5Fin3#security - volumes: - - ./jans-auth-custom:/opt/jans/jetty/jans-auth/custom - - ./jans-config-api-custom:/opt/jans/jetty/jans-config-api/custom - - ./jans-fido2-custom:/opt/jans/jetty/jans-fido2/custom - - ./jans-scim-custom:/opt/jans/jetty/jans-scim/custom - - ./jans-auth-log:/opt/jans/jetty/jans-auth/logs - - ./jans-config-api-log:/opt/jans/jetty/jans-config-api/logs - - ./jans-scim-log:/opt/jans/jetty/jans-scim/logs - - ./jans-fido2-log:/opt/jans/jetty/jans-fido2/log -volumes: - db-data: -networks: - cloud_bridge: - driver: bridge diff --git a/docker-jans-monolith/scripts/entrypoint.sh b/docker-jans-monolith/scripts/entrypoint.sh index 93bec5c516d..b885b7a832c 100644 --- a/docker-jans-monolith/scripts/entrypoint.sh +++ b/docker-jans-monolith/scripts/entrypoint.sh @@ -11,8 +11,7 @@ set -e # CN_CITY: i.e Austin # CN_STATE: i.e TX # CN_COUNTRY: i.e US -# CN_ADMIN_PASS: LDAP or MYSQL and ADMIN user password -# CN_INSTALL_LDAP +# CN_ADMIN_PASS: MYSQL and ADMIN user password # CN_INSTALL_CONFIG_API # CN_INSTALL_SCIM # RDBMS_DATABASE @@ -33,8 +32,8 @@ install_jans() { echo "state=${CN_STATE}" | tee -a setup.properties > /dev/null echo "countryCode=${CN_COUNTRY}" | tee -a setup.properties > /dev/null # shellcheck disable=SC2016 - echo "ldapPass=${CN_ADMIN_PASS}" | tee -a setup.properties > /dev/null - echo "installLdap=""$([[ ${CN_INSTALL_LDAP} == true ]] && echo True || echo False)" | tee -a setup.properties > /dev/null + echo "ldapPass=${CN_ADMIN_PASS}" | tee -a setup.properties > /dev/null # @TODO: remove ldapPass after ldap support removed + echo "installLdap=False" | tee -a setup.properties > /dev/null # @TODO: remove installLdap after ldap support removed echo "install_config_api=""$([[ ${CN_INSTALL_CONFIG_API} == true ]] && echo True || echo False)" | tee -a setup.properties > /dev/null echo "install_scim_server=""$([[ ${CN_INSTALL_SCIM} == true ]] && echo True || echo False)" | tee -a setup.properties > /dev/null echo "installFido2=""$([[ ${CN_INSTALL_FIDO2} == true ]] && echo True || echo False)" | tee -a setup.properties > /dev/null diff --git a/docker-jans-monolith/up.sh b/docker-jans-monolith/up.sh index c8f8201c88d..5b3ab248919 100644 --- a/docker-jans-monolith/up.sh +++ b/docker-jans-monolith/up.sh @@ -5,7 +5,7 @@ if [ -z "$1" ]; then yaml="jans-mysql-compose.yml" else case "$1" in - mysql|ldap|postgres|couchbase|spanner) + mysql|postgres|couchbase|spanner) yaml="jans-${1}-compose.yml" ;; *) @@ -45,8 +45,3 @@ fi export JANSSEN_IMAGE docker compose -f ${yaml} up -d - - - - - diff --git a/docker-jans-persistence-loader/Dockerfile b/docker-jans-persistence-loader/Dockerfile index 2252511756f..3643ef9631d 100644 --- a/docker-jans-persistence-loader/Dockerfile +++ b/docker-jans-persistence-loader/Dockerfile @@ -16,7 +16,7 @@ RUN apk update \ # =========== # janssenproject/jans SHA commit -ENV JANS_SOURCE_VERSION=5e7b579f097714d685a6c1e0cedd004df0ce85f8 +ENV JANS_SOURCE_VERSION=595fff63b350f8da248bb0fcf3bd3966fa6f1953 ARG JANS_SETUP_DIR=jans-linux-setup/jans_setup ARG JANS_SCRIPT_CATALOG_DIR=docs/script-catalog ARG JANS_CONFIG_API_RESOURCES=jans-config-api/server/src/main/resources @@ -146,6 +146,7 @@ ENV CN_CACHE_TYPE=NATIVE_PERSISTENCE \ CN_WAIT_SLEEP_DURATION=10 \ CN_SCIM_ENABLED=false \ CN_PERSISTENCE_SKIP_INITIALIZED=false \ + CN_PERSISTENCE_IMPORT_BUILTIN_LDIF=true \ CN_DOCUMENT_STORE_TYPE=DB \ CN_JACKRABBIT_RMI_URL="" \ CN_JACKRABBIT_URL=http://localhost:8080 \ diff --git a/docker-jans-persistence-loader/README.md b/docker-jans-persistence-loader/README.md index c8568dab968..e04d9e89ab2 100644 --- a/docker-jans-persistence-loader/README.md +++ b/docker-jans-persistence-loader/README.md @@ -8,7 +8,7 @@ tags: ## Overview -Persistence is a special container to load initial data for LDAP or Couchbase. +Persistence is a special container to load initial data for supported persistence. ## Versions @@ -54,12 +54,10 @@ The following environment variables are supported by the container: - `CN_REDIS_URL`: URL of Redis server, format is host:port (optional; default to `localhost:6379`). - `CN_REDIS_TYPE`: Redis service type, either `STANDALONE` or `CLUSTER` (optional; default to `STANDALONE`). - `CN_MEMCACHED_URL`: URL of Memcache server, format is host:port (optional; default to `localhost:11211`). -- `CN_PERSISTENCE_TYPE`: Persistence backend being used (one of `ldap`, `couchbase`, or `hybrid`; default to `ldap`). +- `CN_PERSISTENCE_TYPE`: Persistence backend being used (`couchbase`, `sql`, `spanner`, or `hybrid`; default to `sql`). - `CN_HYBRID_MAPPING`: Specify data mapping for each persistence (default to `"{}"`). Note this environment only takes effect when `CN_PERSISTENCE_TYPE` is set to `hybrid`. See [hybrid mapping](#hybrid-mapping) section for details. - `CN_PERSISTENCE_SKIP_INITIALIZED`: skip initialization if backend already initialized (default to `false`). - `CN_PERSISTENCE_UPDATE_AUTH_DYNAMIC_CONFIG`: Whether to allow automatic updates of `jans-auth` configuration (default to `true`). -- `CN_LDAP_URL`: Address and port of LDAP server (default to `localhost:1636`). -- `CN_LDAP_USE_SSL`: Whether to use SSL connection to LDAP server (default to `true`). - `CN_COUCHBASE_URL`: Address of Couchbase server (default to `localhost`). - `CN_COUCHBASE_USER`: Username of Couchbase server (default to `admin`). - `CN_COUCHBASE_SUPERUSER`: Superuser of Couchbase server (default to empty-string). @@ -103,12 +101,12 @@ As per v1.0.1, hybrid persistence supports all available persistence types. To c ``` { - "default": "", - "user": "", - "site": "", - "cache": "", - "token": "", - "session": "", + "default": "", + "user": "", + "site": "", + "cache": "", + "token": "", + "session": "", } ``` @@ -118,7 +116,7 @@ As per v1.0.1, hybrid persistence supports all available persistence types. To c { "default": "sql", "user": "spanner", - "site": "ldap", + "site": "sql", "cache": "sql", "token": "couchbase", "session": "spanner", diff --git a/docker-jans-persistence-loader/scripts/bootstrap.py b/docker-jans-persistence-loader/scripts/bootstrap.py index 7e0ddc21f7e..6b922e94f38 100644 --- a/docker-jans-persistence-loader/scripts/bootstrap.py +++ b/docker-jans-persistence-loader/scripts/bootstrap.py @@ -4,13 +4,11 @@ from jans.pycloudlib import wait_for_persistence_conn from jans.pycloudlib.persistence.couchbase import sync_couchbase_password from jans.pycloudlib.persistence.couchbase import sync_couchbase_superuser_password -from jans.pycloudlib.persistence.ldap import sync_ldap_password from jans.pycloudlib.persistence.spanner import sync_google_credentials from jans.pycloudlib.persistence.sql import sync_sql_password from jans.pycloudlib.persistence.utils import PersistenceMapper from hybrid_setup import HybridBackend -from ldap_setup import LDAPBackend from couchbase_setup import CouchbaseBackend from sql_setup import SQLBackend from spanner_setup import SpannerBackend @@ -21,7 +19,6 @@ def main(): manager = get_manager() backend_classes = { - "ldap": LDAPBackend, "couchbase": CouchbaseBackend, "hybrid": HybridBackend, "sql": SQLBackend, @@ -29,16 +26,13 @@ def main(): } # initialize the backend - persistence_type = os.environ.get("CN_PERSISTENCE_TYPE", "ldap") + persistence_type = os.environ.get("CN_PERSISTENCE_TYPE", "sql") backend_cls = backend_classes.get(persistence_type) if not backend_cls: raise ValueError("Unsupported persistence backend") persistence_groups = PersistenceMapper().groups().keys() - if "ldap" in persistence_groups: - sync_ldap_password(manager) - if "sql" in persistence_groups: sync_sql_password(manager) diff --git a/docker-jans-persistence-loader/scripts/hybrid_setup.py b/docker-jans-persistence-loader/scripts/hybrid_setup.py index e99cd645f3e..3ddd284bfc9 100644 --- a/docker-jans-persistence-loader/scripts/hybrid_setup.py +++ b/docker-jans-persistence-loader/scripts/hybrid_setup.py @@ -1,13 +1,11 @@ from jans.pycloudlib.persistence.utils import PersistenceMapper -from ldap_setup import LDAPBackend from couchbase_setup import CouchbaseBackend from sql_setup import SQLBackend from spanner_setup import SpannerBackend _backend_classes = { - "ldap": LDAPBackend, "couchbase": CouchbaseBackend, "sql": SQLBackend, "spanner": SpannerBackend, diff --git a/docker-jans-persistence-loader/scripts/ldap_setup.py b/docker-jans-persistence-loader/scripts/ldap_setup.py deleted file mode 100644 index 0054fc8a583..00000000000 --- a/docker-jans-persistence-loader/scripts/ldap_setup.py +++ /dev/null @@ -1,88 +0,0 @@ -import json -import logging.config -import time -from pathlib import Path - -from ldap3.core.exceptions import LDAPSessionTerminatedByServerError -from ldap3.core.exceptions import LDAPSocketOpenError - -from jans.pycloudlib.persistence.ldap import LdapClient - -from settings import LOGGING_CONFIG -from utils import prepare_template_ctx -from hooks import get_ldif_mappings_hook - -logging.config.dictConfig(LOGGING_CONFIG) -logger = logging.getLogger("persistence-loader") - - -class LDAPBackend: - def __init__(self, manager): - self.client = LdapClient(manager) - self.manager = manager - - def check_indexes(self, mapping): - if mapping == "site": - index_name = "jansScrTyp" - backend = "site" - # elif mapping == "statistic": - # index_name = "jansMetricTyp" - # backend = "metric" - else: - index_name = "del" - backend = "userRoot" - - dn = "ds-cfg-attribute={},cn=Index,ds-cfg-backend-id={}," \ - "cn=Backends,cn=config".format(index_name, backend) - - max_wait_time = 300 - sleep_duration = 10 - - for _ in range(0, max_wait_time, sleep_duration): - try: - if self.client.get(dn, attributes=["1.1"]): - return - reason = f"Index {dn} is not ready" - except (LDAPSessionTerminatedByServerError, LDAPSocketOpenError) as exc: - reason = exc - - logger.warning("Waiting for index to be ready; reason={}; " - "retrying in {} seconds".format(reason, sleep_duration)) - time.sleep(sleep_duration) - - def import_builtin_ldif(self, ctx): - optional_scopes = json.loads( - self.manager.config.get("optional_scopes", "[]") - ) - ldif_mappings = get_ldif_mappings_hook("ldap", optional_scopes) - - # ensure base.ldif (contains base RDNs) is in list of ldif files - if ldif_mappings and "default" not in ldif_mappings: - # insert base.ldif into the first mapping found - mapping = next(iter(ldif_mappings)) - ldif_mappings[mapping].insert(0, "base.ldif") - - for mapping, files in ldif_mappings.items(): - self.check_indexes(mapping) - - for file_ in files: - self._import_ldif(f"/app/templates/{file_}", ctx) - - def initialize(self): - ctx = prepare_template_ctx(self.manager) - - logger.info("Importing builtin LDIF files") - self.import_builtin_ldif(ctx) - - logger.info("Importing custom LDIF files (if any)") - self.import_custom_ldif(ctx) - - def import_custom_ldif(self, ctx): - custom_dir = Path("/app/custom_ldif") - - for file_ in custom_dir.rglob("*.ldif"): - self._import_ldif(file_, ctx) - - def _import_ldif(self, path, ctx): - logger.info(f"Importing {path} file") - self.client.create_from_ldif(path, ctx) diff --git a/docker-jans-persistence-loader/scripts/sql_setup.py b/docker-jans-persistence-loader/scripts/sql_setup.py index 17d8d8cf359..d3846fb1fbc 100644 --- a/docker-jans-persistence-loader/scripts/sql_setup.py +++ b/docker-jans-persistence-loader/scripts/sql_setup.py @@ -1,5 +1,6 @@ import json import logging.config +import os import re from collections import defaultdict from string import Template @@ -9,6 +10,7 @@ from sqlalchemy.exc import OperationalError from jans.pycloudlib.persistence.sql import SqlClient +from jans.pycloudlib.utils import as_boolean from settings import LOGGING_CONFIG from utils import prepare_template_ctx @@ -229,8 +231,14 @@ def initialize(self): ctx = prepare_template_ctx(self.manager) - logger.info("Importing builtin LDIF files") - self.import_builtin_ldif(ctx) + if as_boolean(os.environ.get("CN_PERSISTENCE_IMPORT_BUILTIN_LDIF", "true")): + logger.info("Importing builtin LDIF files") + self.import_builtin_ldif(ctx) + else: + logger.warning( + "The builtin LDIF files will not be imported as the feature is disabled. " + "To enable the feature, set the environment variable CN_PERSISTENCE_IMPORT_BUILTIN_LDIF=true" + ) logger.info("Importing custom LDIF files (if any)") self.import_custom_ldif(ctx) @@ -411,11 +419,11 @@ def import_custom_ldif(self, ctx): custom_dir = Path("/app/custom_ldif") for file_ in custom_dir.rglob("*.ldif"): - self._import_ldif(file_, ctx) + self._import_ldif(file_, ctx, self.safe_column_mapping) - def _import_ldif(self, path, ctx): + def _import_ldif(self, path, ctx, transform_column_mapping=None): logger.info(f"Importing {path} file") - self.client.create_from_ldif(path, ctx) + self.client.create_from_ldif(path, ctx, transform_column_mapping) def table_mapping_from_schema(self): schemas = {} @@ -456,3 +464,8 @@ def table_mapping_from_schema(self): data_type = self.get_data_type(attr, table) table_mapping[table].update({attr: data_type}) return table_mapping + + def safe_column_mapping(self, table_name, column_mapping): + if table_name == "jansToken" and "jansUsrId" in column_mapping: + column_mapping["usrId"] = column_mapping.pop("jansUsrId", "") + return column_mapping diff --git a/docker-jans-persistence-loader/scripts/upgrade.py b/docker-jans-persistence-loader/scripts/upgrade.py index ccad36c209a..6d95455c8ca 100644 --- a/docker-jans-persistence-loader/scripts/upgrade.py +++ b/docker-jans-persistence-loader/scripts/upgrade.py @@ -8,7 +8,6 @@ from jans.pycloudlib import get_manager from jans.pycloudlib.persistence.couchbase import CouchbaseClient -from jans.pycloudlib.persistence.ldap import LdapClient from jans.pycloudlib.persistence.spanner import SpannerClient from jans.pycloudlib.persistence.sql import SqlClient from jans.pycloudlib.persistence.sql import doc_id_from_dn @@ -106,47 +105,6 @@ def collect_claim_names(ldif_file="/app/templates/attributes.ldif"): return rows -class LDAPBackend: - def __init__(self, manager): - self.manager = manager - self.client = LdapClient(manager) - self.type = "ldap" - - def get_entry(self, key, filter_="", attrs=None, **kwargs): - def format_attrs(attrs): - _attrs = {} - for k, v in attrs.items(): - if len(v) < 2: - v = v[0] - _attrs[k] = v - return _attrs - - filter_ = filter_ or "(objectClass=*)" - - entry = self.client.get(key, filter_=filter_, attributes=attrs) - if not entry: - return None - return Entry(entry.entry_dn, format_attrs(entry.entry_attributes_as_dict)) - - def modify_entry(self, key, attrs=None, **kwargs): - attrs = attrs or {} - del_flag = kwargs.get("delete_attr", False) - - if del_flag: - mod = self.client.MODIFY_DELETE - else: - mod = self.client.MODIFY_REPLACE - - for k, v in attrs.items(): - if not isinstance(v, list): - v = [v] - attrs[k] = [(mod, v)] - return self.client.modify(key, attrs) - - def delete_entry(self, key, **kwargs): - return self.client.delete(key) - - class SQLBackend: def __init__(self, manager): self.manager = manager @@ -274,7 +232,6 @@ def delete_entry(self, key, **kwargs): "sql": SQLBackend, "couchbase": CouchbaseBackend, "spanner": SpannerBackend, - "ldap": LDAPBackend, } @@ -313,7 +270,6 @@ def invoke(self): self.update_config() def update_scripts_entries(self): - # default to ldap persistence kwargs = {} scim_id = JANS_SCIM_SCRIPT_DN basic_id = JANS_BASIC_SCRIPT_DN @@ -385,7 +341,6 @@ def update_scripts_entries(self): self.backend.modify_entry(agama_entry.id, agama_entry.attrs, **kwargs) def update_auth_dynamic_config(self): - # default to ldap persistence kwargs = {} id_ = JANS_AUTH_CONFIG_DN @@ -415,7 +370,6 @@ def update_auth_dynamic_config(self): def update_attributes_entries(self): def _update_claim_names(): - # default to ldap persistence kwargs = {} rows = collect_claim_names() @@ -463,7 +417,6 @@ def _update_mobile_attr(): _update_mobile_attr() def update_scim_scopes_entries(self): - # default to ldap persistence kwargs = {} # add jansAttrs to SCIM users.read and users.write scopes @@ -485,7 +438,6 @@ def update_scim_scopes_entries(self): self.backend.modify_entry(entry.id, entry.attrs, **kwargs) def update_scopes_entries(self): - # default to ldap persistence kwargs = {} id_ = JANS_PROFILE_SCOPE_DN @@ -506,7 +458,6 @@ def update_scopes_entries(self): self.backend.modify_entry(entry.id, attrs, **kwargs) def update_people_entries(self): - # default to ldap persistence admin_inum = self.manager.config.get("admin_inum") id_ = f"inum={admin_inum},ou=people,o=jans" @@ -541,7 +492,7 @@ def update_people_entries(self): elif self.user_backend.type == "spanner" and not entry.attrs[attr_name]: entry.attrs[attr_name] = [role_name] should_update = True - else: # ldap and couchbase + else: # couchbase if attr_name not in entry.attrs: entry.attrs[attr_name] = [role_name] should_update = True @@ -625,7 +576,6 @@ def update_admin_ui_config(self): self.backend.modify_entry(entry.id, entry.attrs, **kwargs) def update_auth_errors_config(self): - # default to ldap persistence kwargs = {} id_ = JANS_AUTH_CONFIG_DN @@ -662,7 +612,6 @@ def update_auth_errors_config(self): self.backend.modify_entry(entry.id, entry.attrs, **kwargs) def update_auth_static_config(self): - # default to ldap persistence kwargs = {} id_ = JANS_AUTH_CONFIG_DN @@ -830,45 +779,11 @@ def update_config(self): entry.attrs["jansDocStoreConf"]["documentStoreType"] = doc_store_type should_update = True - # set jansDbAuth if persistence is ldap - if self.backend.type == "ldap" and not entry.attrs.get("jansDbAuth"): - should_update = True - if should_update: if self.backend.type != "couchbase": entry.attrs["jansMessageConf"] = json.dumps(entry.attrs["jansMessageConf"]) entry.attrs["jansDocStoreConf"] = json.dumps(entry.attrs["jansDocStoreConf"]) - # set jansDbAuth if persistence is ldap - if self.backend.type == "ldap": - ldaps_port = self.manager.config.get("ldap_init_port") - ldap_hostname = self.manager.config.get("ldap_init_host") - ldap_binddn = self.manager.config.get("ldap_binddn") - ldap_use_ssl = str(as_boolean(os.environ.get("CN_LDAP_USE_SSL", True))).lower() - encoded_ox_ldap_pw = self.manager.secret.get("encoded_ox_ldap_pw") - - entry.attrs["jansDbAuth"] = json.dumps({ - "type": "auth", - "name": None, - "level": 0, - "priority": 1, - "enabled": False, - "version": 0, - "config": { - "configId": "auth_ldap_server", - "servers": [f"{ldap_hostname}:{ldaps_port}"], - "maxConnections": 1000, - "bindDN": ldap_binddn, - "bindPassword": encoded_ox_ldap_pw, - "useSSL": ldap_use_ssl, - "baseDNs": ["ou=people,o=jans"], - "primaryKey": "uid", - "localPrimaryKey": "uid", - "useAnonymousBind": False, - "enabled": False, - }, - }) - revision = entry.attrs.get("jansRevision") or 1 entry.attrs["jansRevision"] = revision + 1 self.backend.modify_entry(entry.id, entry.attrs, **kwargs) @@ -894,7 +809,7 @@ def _transform_message_config(conf): should_update = False provider_type = os.environ.get("CN_MESSAGE_TYPE", "DISABLED") - if os.environ.get("CN_PERSISTENCE_TYPE", "ldap") == "sql" and os.environ.get("CN_SQL_DB_DIALECT", "mysql") in ("pgsql", "postgresql"): + if os.environ.get("CN_PERSISTENCE_TYPE", "sql") == "sql" and os.environ.get("CN_SQL_DB_DIALECT", "mysql") in ("pgsql", "postgresql"): pg_pw_encoded = encode_text( get_sql_password(manager), manager.secret.get("encoded_salt") diff --git a/docker-jans-persistence-loader/scripts/utils.py b/docker-jans-persistence-loader/scripts/utils.py index a69c217208f..53c00bc1cbf 100644 --- a/docker-jans-persistence-loader/scripts/utils.py +++ b/docker-jans-persistence-loader/scripts/utils.py @@ -94,11 +94,6 @@ def get_base_ctx(manager): "jca_pw": jca_pw, "jca_pw_encoded": jca_pw_encoded, - 'ldap_hostname': manager.config.get('ldap_init_host'), - 'ldaps_port': manager.config.get('ldap_init_port'), - 'ldap_binddn': manager.config.get('ldap_binddn'), - "ldap_use_ssl": str(as_boolean(os.environ.get("CN_LDAP_USE_SSL", True))).lower(), - 'encoded_ox_ldap_pw': manager.secret.get('encoded_ox_ldap_pw'), 'jetty_base': manager.config.get('jetty_base'), 'orgName': manager.config.get('orgName'), 'hostname': manager.config.get('hostname'), @@ -113,7 +108,6 @@ def get_base_ctx(manager): 'encoded_shib_jks_pw': manager.secret.get('encoded_shib_jks_pw'), 'shibboleth_version': manager.config.get('shibboleth_version'), 'idp3Folder': manager.config.get('idp3Folder'), - 'ldap_site_binddn': manager.config.get('ldap_site_binddn'), "jansScimEnabled": str(as_boolean(scim_enabled)).lower(), diff --git a/docker-jans-persistence-loader/scripts/wait.py b/docker-jans-persistence-loader/scripts/wait.py index 64014331922..2bd5c3c2c6d 100644 --- a/docker-jans-persistence-loader/scripts/wait.py +++ b/docker-jans-persistence-loader/scripts/wait.py @@ -20,7 +20,7 @@ def validate_doc_store_type(value): def main(): - persistence_type = os.environ.get("CN_PERSISTENCE_TYPE", "ldap") + persistence_type = os.environ.get("CN_PERSISTENCE_TYPE", "sql") validate_persistence_type(persistence_type) if persistence_type == "sql": diff --git a/docker-jans-saml/Dockerfile b/docker-jans-saml/Dockerfile index 836d391c1e8..4811de4c819 100644 --- a/docker-jans-saml/Dockerfile +++ b/docker-jans-saml/Dockerfile @@ -35,7 +35,7 @@ RUN wget -q https://jenkins.jans.io/maven/io/jans/kc-jans-spi/${CN_VERSION}/kc-j # Assets sync # =========== -ENV JANS_SOURCE_VERSION=5e7b579f097714d685a6c1e0cedd004df0ce85f8 +ENV JANS_SOURCE_VERSION=595fff63b350f8da248bb0fcf3bd3966fa6f1953 ARG JANS_SETUP_DIR=jans-linux-setup/jans_setup # note that as we're pulling from a monorepo (with multiple project in it) @@ -129,10 +129,8 @@ ENV CN_SECRET_ADAPTER=vault \ # Persistence ENV # =============== -ENV CN_PERSISTENCE_TYPE=ldap \ +ENV CN_PERSISTENCE_TYPE=sql \ CN_HYBRID_MAPPING="{}" \ - CN_LDAP_URL=localhost:1636 \ - CN_LDAP_USE_SSL=true \ CN_COUCHBASE_URL=localhost \ CN_COUCHBASE_USER=admin \ CN_COUCHBASE_CERT_FILE=/etc/certs/couchbase.crt \ diff --git a/docker-jans-saml/README.md b/docker-jans-saml/README.md index e19d30dea5f..ea347bc5954 100644 --- a/docker-jans-saml/README.md +++ b/docker-jans-saml/README.md @@ -49,10 +49,8 @@ The following environment variables are supported by the container: - `CN_WAIT_MAX_TIME`: How long the startup "health checks" should run (default to `300` seconds). - `CN_WAIT_SLEEP_DURATION`: Delay between startup "health checks" (default to `10` seconds). - `CN_MAX_RAM_PERCENTAGE`: Value passed to Java option `-XX:MaxRAMPercentage`. -- `CN_PERSISTENCE_TYPE`: Persistence backend being used (one of `ldap`, `couchbase`, or `hybrid`; default to `ldap`). +- `CN_PERSISTENCE_TYPE`: Persistence backend being used (one of `sql`, `couchbase`, `spanner`, or `hybrid`; default to `sql`). - `CN_HYBRID_MAPPING`: Specify data mapping for each persistence (default to `"{}"`). Note this environment only takes effect when `CN_PERSISTENCE_TYPE` is set to `hybrid`. See [hybrid mapping](#hybrid-mapping) section for details. -- `CN_LDAP_URL`: Address and port of LDAP server (default to `localhost:1636`). -- `CN_LDAP_USE_SSL`: Whether to use SSL connection to LDAP server (default to `true`). - `CN_COUCHBASE_URL`: Address of Couchbase server (default to `localhost`). - `CN_COUCHBASE_USER`: Username of Couchbase server (default to `admin`). - `CN_COUCHBASE_CERT_FILE`: Couchbase root certificate location (default to `/etc/certs/couchbase.crt`). @@ -100,12 +98,12 @@ As per v1.0.1, hybrid persistence supports all available persistence types. To c ``` { - "default": "", - "user": "", - "site": "", - "cache": "", - "token": "", - "session": "", + "default": "", + "user": "", + "site": "", + "cache": "", + "token": "", + "session": "", } ``` @@ -115,7 +113,7 @@ As per v1.0.1, hybrid persistence supports all available persistence types. To c { "default": "sql", "user": "spanner", - "site": "ldap", + "site": "sql", "cache": "sql", "token": "couchbase", "session": "spanner", diff --git a/docker-jans-saml/scripts/bootstrap.py b/docker-jans-saml/scripts/bootstrap.py index 5f1ce9993de..8dc7d9e0b0f 100644 --- a/docker-jans-saml/scripts/bootstrap.py +++ b/docker-jans-saml/scripts/bootstrap.py @@ -21,10 +21,6 @@ from jans.pycloudlib.persistence.couchbase import sync_couchbase_password from jans.pycloudlib.persistence.couchbase import sync_couchbase_truststore from jans.pycloudlib.persistence.hybrid import render_hybrid_properties -from jans.pycloudlib.persistence.ldap import LdapClient -from jans.pycloudlib.persistence.ldap import render_ldap_properties -from jans.pycloudlib.persistence.ldap import sync_ldap_password -from jans.pycloudlib.persistence.ldap import sync_ldap_truststore from jans.pycloudlib.persistence.spanner import render_spanner_properties from jans.pycloudlib.persistence.spanner import SpannerClient from jans.pycloudlib.persistence.spanner import sync_google_credentials @@ -77,7 +73,7 @@ def render_keycloak_conf(): def main(): - persistence_type = os.environ.get("CN_PERSISTENCE_TYPE", "ldap") + persistence_type = os.environ.get("CN_PERSISTENCE_TYPE", "sql") render_salt(manager, "/app/templates/salt", "/etc/jans/conf/salt") render_base_properties("/app/templates/jans.properties", "/etc/jans/conf/jans.properties") @@ -90,17 +86,6 @@ def main(): if not os.path.exists(hybrid_prop): render_hybrid_properties(hybrid_prop) - if "ldap" in persistence_groups: - render_ldap_properties( - manager, - "/app/templates/jans-ldap.properties", - "/etc/jans/conf/jans-ldap.properties", - ) - - if as_boolean(os.environ.get("CN_LDAP_USE_SSL", "true")): - sync_ldap_truststore(manager) - sync_ldap_password(manager) - if "couchbase" in persistence_groups: sync_couchbase_password(manager) render_couchbase_properties( @@ -151,7 +136,6 @@ def __init__(self, manager: Manager) -> None: self.manager = manager client_classes = { - "ldap": LdapClient, "couchbase": CouchbaseClient, "spanner": SpannerClient, "sql": SqlClient, diff --git a/docker-jans-saml/scripts/upgrade.py b/docker-jans-saml/scripts/upgrade.py index c0bc9a713fc..2b8e59e8e2a 100644 --- a/docker-jans-saml/scripts/upgrade.py +++ b/docker-jans-saml/scripts/upgrade.py @@ -8,7 +8,6 @@ from jans.pycloudlib import get_manager from jans.pycloudlib.persistence.couchbase import CouchbaseClient from jans.pycloudlib.persistence.couchbase import id_from_dn -from jans.pycloudlib.persistence.ldap import LdapClient from jans.pycloudlib.persistence.spanner import SpannerClient from jans.pycloudlib.persistence.sql import doc_id_from_dn from jans.pycloudlib.persistence.sql import SqlClient @@ -75,44 +74,6 @@ def _transform_saml_dynamic_config(conf): return conf, should_update -class LDAPBackend: - def __init__(self, manager): - self.manager = manager - self.client = LdapClient(manager) - self.type = "ldap" - - def format_attrs(self, attrs): - _attrs = {} - for k, v in attrs.items(): - if len(v) < 2: - v = v[0] - _attrs[k] = v - return _attrs - - def get_entry(self, key, filter_="", attrs=None, **kwargs): - filter_ = filter_ or "(objectClass=*)" - - entry = self.client.get(key, filter_=filter_, attributes=attrs) - if not entry: - return None - return Entry(entry.entry_dn, self.format_attrs(entry.entry_attributes_as_dict)) - - def modify_entry(self, key, attrs=None, **kwargs): - attrs = attrs or {} - del_flag = kwargs.get("delete_attr", False) - - if del_flag: - mod = self.client.MODIFY_DELETE - else: - mod = self.client.MODIFY_REPLACE - - for k, v in attrs.items(): - if not isinstance(v, list): - v = [v] - attrs[k] = [(mod, v)] - return self.client.modify(key, attrs) - - class SQLBackend: def __init__(self, manager): self.manager = manager @@ -207,7 +168,6 @@ def modify_entry(self, key, attrs=None, **kwargs): "sql": SQLBackend, "couchbase": CouchbaseBackend, "spanner": SpannerBackend, - "ldap": LDAPBackend, } @@ -231,7 +191,7 @@ def update_saml_dynamic_config(self): if self.backend.type in ("sql", "spanner"): kwargs = {"table_name": "jansAppConf"} id_ = doc_id_from_dn(id_) - elif self.backend.type == "couchbase": + else: # likely kwargs = {"bucket": os.environ.get("CN_COUCHBASE_BUCKET_PREFIX", "jans")} id_ = id_from_dn(id_) diff --git a/docker-jans-saml/scripts/wait.py b/docker-jans-saml/scripts/wait.py index 53f3c51c4e8..0817b4e0d1f 100644 --- a/docker-jans-saml/scripts/wait.py +++ b/docker-jans-saml/scripts/wait.py @@ -13,7 +13,7 @@ def main(): - persistence_type = os.environ.get("CN_PERSISTENCE_TYPE", "ldap") + persistence_type = os.environ.get("CN_PERSISTENCE_TYPE", "sql") validate_persistence_type(persistence_type) if persistence_type == "hybrid": diff --git a/docker-jans-saml/templates/jans-ldap.properties b/docker-jans-saml/templates/jans-ldap.properties deleted file mode 100644 index a7ec401fe75..00000000000 --- a/docker-jans-saml/templates/jans-ldap.properties +++ /dev/null @@ -1,28 +0,0 @@ -bindDN: %(ldap_binddn)s -bindPassword: %(encoded_ox_ldap_pw)s -servers: %(ldap_hostname)s:%(ldaps_port)s - -useSSL: %(ssl_enabled)s -ssl.trustStoreFile: %(ldapTrustStoreFn)s -ssl.trustStorePin: %(encoded_ldapTrustStorePass)s -ssl.trustStoreFormat: pkcs12 - -maxconnections: 40 - -# Max wait 20 seconds -connection.max-wait-time-millis=20000 - -# Force to recreate polled connections after 30 minutes -connection.max-age-time-millis=1800000 - -# Invoke connection health check after checkout it from pool -connection-pool.health-check.on-checkout.enabled=false - -# Interval to check connections in pool. Value is 3 minutes. Not used when onnection-pool.health-check.on-checkout.enabled=true -connection-pool.health-check.interval-millis=180000 - -# How long to wait during connection health check. Max wait 20 seconds -connection-pool.health-check.max-response-time-millis=20000 - -binaryAttributes=objectGUID -certificateAttributes=userCertificate diff --git a/docker-jans-scim/Dockerfile b/docker-jans-scim/Dockerfile index 9618a1ba394..51eb0b117e7 100644 --- a/docker-jans-scim/Dockerfile +++ b/docker-jans-scim/Dockerfile @@ -60,7 +60,7 @@ RUN mkdir -p ${JETTY_BASE}/jans-scim/webapps \ # Assets sync # =========== -ENV JANS_SOURCE_VERSION=5e7b579f097714d685a6c1e0cedd004df0ce85f8 +ENV JANS_SOURCE_VERSION=595fff63b350f8da248bb0fcf3bd3966fa6f1953 ARG JANS_SETUP_DIR=jans-linux-setup/jans_setup ARG JANS_SCIM_RESOURCE_DIR=jans-scim/server/src/main/resources @@ -169,10 +169,8 @@ ENV CN_SECRET_ADAPTER=vault \ # Persistence ENV # =============== -ENV CN_PERSISTENCE_TYPE=ldap \ +ENV CN_PERSISTENCE_TYPE=sql \ CN_HYBRID_MAPPING="{}" \ - CN_LDAP_URL=localhost:1636 \ - CN_LDAP_USE_SSL=true \ CN_COUCHBASE_URL=localhost \ CN_COUCHBASE_USER=admin \ CN_COUCHBASE_CERT_FILE=/etc/certs/couchbase.crt \ diff --git a/docker-jans-scim/README.md b/docker-jans-scim/README.md index ec38f47d5d8..b4a9a98ca2d 100644 --- a/docker-jans-scim/README.md +++ b/docker-jans-scim/README.md @@ -49,10 +49,8 @@ The following environment variables are supported by the container: - `CN_WAIT_MAX_TIME`: How long the startup "health checks" should run (default to `300` seconds). - `CN_WAIT_SLEEP_DURATION`: Delay between startup "health checks" (default to `10` seconds). - `CN_MAX_RAM_PERCENTAGE`: Value passed to Java option `-XX:MaxRAMPercentage`. -- `CN_PERSISTENCE_TYPE`: Persistence backend being used (one of `ldap`, `couchbase`, or `hybrid`; default to `ldap`). +- `CN_PERSISTENCE_TYPE`: Persistence backend being used (one of `sql`, `couchbase`, `spanner`, or `hybrid`; default to `sql`). - `CN_HYBRID_MAPPING`: Specify data mapping for each persistence (default to `"{}"`). Note this environment only takes effect when `CN_PERSISTENCE_TYPE` is set to `hybrid`. See [hybrid mapping](#hybrid-mapping) section for details. -- `CN_LDAP_URL`: Address and port of LDAP server (default to `localhost:1636`). -- `CN_LDAP_USE_SSL`: Whether to use SSL connection to LDAP server (default to `true`). - `CN_COUCHBASE_URL`: Address of Couchbase server (default to `localhost`). - `CN_COUCHBASE_USER`: Username of Couchbase server (default to `admin`). - `CN_COUCHBASE_CERT_FILE`: Couchbase root certificate location (default to `/etc/certs/couchbase.crt`). @@ -116,8 +114,6 @@ The following key-value pairs are the defaults: "persistence_log_level": "INFO", "persistence_duration_log_target": "FILE", "persistence_duration_log_level": "INFO", - "ldap_stats_log_target": "FILE", - "ldap_stats_log_level": "INFO", "script_log_target": "FILE", "script_log_level": "INFO" } @@ -139,12 +135,12 @@ As per v1.0.1, hybrid persistence supports all available persistence types. To c ``` { - "default": "", - "user": "", - "site": "", - "cache": "", - "token": "", - "session": "", + "default": "", + "user": "", + "site": "", + "cache": "", + "token": "", + "session": "", } ``` @@ -154,7 +150,7 @@ As per v1.0.1, hybrid persistence supports all available persistence types. To c { "default": "sql", "user": "spanner", - "site": "ldap", + "site": "sql", "cache": "sql", "token": "couchbase", "session": "spanner", diff --git a/docker-jans-scim/scripts/bootstrap.py b/docker-jans-scim/scripts/bootstrap.py index 3748311620b..592a3cf5fae 100644 --- a/docker-jans-scim/scripts/bootstrap.py +++ b/docker-jans-scim/scripts/bootstrap.py @@ -18,10 +18,6 @@ from jans.pycloudlib.persistence.couchbase import sync_couchbase_password from jans.pycloudlib.persistence.couchbase import sync_couchbase_truststore from jans.pycloudlib.persistence.hybrid import render_hybrid_properties -from jans.pycloudlib.persistence.ldap import LdapClient -from jans.pycloudlib.persistence.ldap import render_ldap_properties -from jans.pycloudlib.persistence.ldap import sync_ldap_truststore -from jans.pycloudlib.persistence.ldap import sync_ldap_password from jans.pycloudlib.persistence.spanner import render_spanner_properties from jans.pycloudlib.persistence.spanner import SpannerClient from jans.pycloudlib.persistence.spanner import sync_google_credentials @@ -54,7 +50,7 @@ def main(): - persistence_type = os.environ.get("CN_PERSISTENCE_TYPE", "ldap") + persistence_type = os.environ.get("CN_PERSISTENCE_TYPE", "sql") render_salt(manager, "/app/templates/salt", "/etc/jans/conf/salt") render_base_properties("/app/templates/jans.properties", "/etc/jans/conf/jans.properties") @@ -67,17 +63,6 @@ def main(): if not os.path.exists(hybrid_prop): render_hybrid_properties(hybrid_prop) - if "ldap" in persistence_groups: - render_ldap_properties( - manager, - "/app/templates/jans-ldap.properties", - "/etc/jans/conf/jans-ldap.properties", - ) - - if as_boolean(os.environ.get("CN_LDAP_USE_SSL", "true")): - sync_ldap_truststore(manager) - sync_ldap_password(manager) - if "couchbase" in persistence_groups: sync_couchbase_password(manager) render_couchbase_properties( @@ -140,8 +125,6 @@ def configure_logging(): "persistence_log_level": "INFO", "persistence_duration_log_target": "FILE", "persistence_duration_log_level": "INFO", - "ldap_stats_log_target": "FILE", - "ldap_stats_log_level": "INFO", "script_log_target": "FILE", "script_log_level": "INFO", "log_prefix": "", @@ -185,7 +168,6 @@ def configure_logging(): "scim_log_target": "FILE", "persistence_log_target": "JANS_SCIM_PERSISTENCE_FILE", "persistence_duration_log_target": "JANS_SCIM_PERSISTENCE_DURATION_FILE", - "ldap_stats_log_target": "JANS_SCIM_PERSISTENCE_LDAP_STATISTICS_FILE", "script_log_target": "JANS_SCIM_SCRIPT_LOG_FILE", } for key, value in file_aliases.items(): @@ -212,7 +194,6 @@ def __init__(self, manager: Manager) -> None: self.manager = manager client_classes = { - "ldap": LdapClient, "couchbase": CouchbaseClient, "spanner": SpannerClient, "sql": SqlClient, @@ -284,17 +265,13 @@ def get_scope_jans_ids(self): entries = self.client.search("jansScope", ["jansId"]) return [entry["jansId"] for entry in entries] - if self.persistence_type == "couchbase": - bucket = os.environ.get("CN_COUCHBASE_BUCKET_PREFIX", "jans") - req = self.client.exec_query( - f"SELECT {bucket}.jansId FROM {bucket} WHERE objectClass = 'jansScope'", - ) - results = req.json()["results"] - return [item["jansId"] for item in results] - - # likely ldap - entries = self.client.search("ou=scopes,o=jans", "(objectClass=jansScope)", ["jansId"]) - return [entry.entry_attributes_as_dict["jansId"][0] for entry in entries] + # likely couchbase + bucket = os.environ.get("CN_COUCHBASE_BUCKET_PREFIX", "jans") + req = self.client.exec_query( + f"SELECT {bucket}.jansId FROM {bucket} WHERE objectClass = 'jansScope'", + ) + results = req.json()["results"] + return [item["jansId"] for item in results] def generate_scopes_ldif(self): # jansId to compare to diff --git a/docker-jans-scim/scripts/upgrade.py b/docker-jans-scim/scripts/upgrade.py index 436ed28482d..ab9dc124e8e 100644 --- a/docker-jans-scim/scripts/upgrade.py +++ b/docker-jans-scim/scripts/upgrade.py @@ -7,7 +7,6 @@ from jans.pycloudlib import get_manager from jans.pycloudlib.persistence.couchbase import CouchbaseClient from jans.pycloudlib.persistence.couchbase import id_from_dn -from jans.pycloudlib.persistence.ldap import LdapClient from jans.pycloudlib.persistence.spanner import SpannerClient from jans.pycloudlib.persistence.sql import doc_id_from_dn from jans.pycloudlib.persistence.sql import SqlClient @@ -22,53 +21,6 @@ Entry = namedtuple("Entry", ["id", "attrs"]) -class LDAPBackend: - def __init__(self, manager): - self.manager = manager - self.client = LdapClient(manager) - self.type = "ldap" - - def format_attrs(self, attrs): - _attrs = {} - for k, v in attrs.items(): - if len(v) < 2: - v = v[0] - _attrs[k] = v - return _attrs - - def get_entry(self, key, filter_="", attrs=None, **kwargs): - filter_ = filter_ or "(objectClass=*)" - - entry = self.client.get(key, filter_=filter_, attributes=attrs) - if not entry: - return None - return Entry(entry.entry_dn, self.format_attrs(entry.entry_attributes_as_dict)) - - def modify_entry(self, key, attrs=None, **kwargs): - attrs = attrs or {} - del_flag = kwargs.get("delete_attr", False) - - if del_flag: - mod = self.client.MODIFY_DELETE - else: - mod = self.client.MODIFY_REPLACE - - for k, v in attrs.items(): - if not isinstance(v, list): - v = [v] - attrs[k] = [(mod, v)] - return self.client.modify(key, attrs) - - def search_entries(self, key, filter_="", attrs=None, **kwargs): - filter_ = filter_ or "(objectClass=*)" - entries = self.client.search(key, filter_, attrs) - - return [ - Entry(entry.entry_dn, self.format_attrs(entry.entry_attributes_as_dict)) - for entry in entries - ] - - class SQLBackend: def __init__(self, manager): self.manager = manager @@ -193,7 +145,6 @@ def search_entries(self, key, filter_="", attrs=None, **kwargs): "sql": SQLBackend, "couchbase": CouchbaseBackend, "spanner": SpannerBackend, - "ldap": LDAPBackend, } @@ -215,16 +166,11 @@ def get_all_scopes(self): if self.backend.type in ("sql", "spanner"): kwargs = {"table_name": "jansScope"} entries = self.backend.search_entries(None, **kwargs) - elif self.backend.type == "couchbase": + else: # likely couchbase kwargs = {"bucket": os.environ.get("CN_COUCHBASE_BUCKET_PREFIX", "jans")} entries = self.backend.search_entries( None, filter_="WHERE objectClass = 'jansScope'", **kwargs ) - else: - # likely ldap - entries = self.backend.search_entries( - "ou=scopes,o=jans", filter_="(objectClass=jansScope)" - ) return { entry.attrs["jansId"]: entry.attrs.get("dn") or entry.id @@ -239,7 +185,7 @@ def update_client_scopes(self): if self.backend.type in ("sql", "spanner"): kwargs = {"table_name": "jansClnt"} id_ = doc_id_from_dn(id_) - elif self.backend.type == "couchbase": + else: # likely couchbase kwargs = {"bucket": os.environ.get("CN_COUCHBASE_BUCKET_PREFIX", "jans")} id_ = id_from_dn(id_) diff --git a/docker-jans-scim/scripts/wait.py b/docker-jans-scim/scripts/wait.py index 53f3c51c4e8..0817b4e0d1f 100644 --- a/docker-jans-scim/scripts/wait.py +++ b/docker-jans-scim/scripts/wait.py @@ -13,7 +13,7 @@ def main(): - persistence_type = os.environ.get("CN_PERSISTENCE_TYPE", "ldap") + persistence_type = os.environ.get("CN_PERSISTENCE_TYPE", "sql") validate_persistence_type(persistence_type) if persistence_type == "hybrid": diff --git a/docker-jans-scim/templates/jans-ldap.properties b/docker-jans-scim/templates/jans-ldap.properties deleted file mode 100644 index a7ec401fe75..00000000000 --- a/docker-jans-scim/templates/jans-ldap.properties +++ /dev/null @@ -1,28 +0,0 @@ -bindDN: %(ldap_binddn)s -bindPassword: %(encoded_ox_ldap_pw)s -servers: %(ldap_hostname)s:%(ldaps_port)s - -useSSL: %(ssl_enabled)s -ssl.trustStoreFile: %(ldapTrustStoreFn)s -ssl.trustStorePin: %(encoded_ldapTrustStorePass)s -ssl.trustStoreFormat: pkcs12 - -maxconnections: 40 - -# Max wait 20 seconds -connection.max-wait-time-millis=20000 - -# Force to recreate polled connections after 30 minutes -connection.max-age-time-millis=1800000 - -# Invoke connection health check after checkout it from pool -connection-pool.health-check.on-checkout.enabled=false - -# Interval to check connections in pool. Value is 3 minutes. Not used when onnection-pool.health-check.on-checkout.enabled=true -connection-pool.health-check.interval-millis=180000 - -# How long to wait during connection health check. Max wait 20 seconds -connection-pool.health-check.max-response-time-millis=20000 - -binaryAttributes=objectGUID -certificateAttributes=userCertificate diff --git a/docker-jans-scim/templates/jans-scim/log4j2.xml b/docker-jans-scim/templates/jans-scim/log4j2.xml index cdab179b199..7e2b1cc2b08 100644 --- a/docker-jans-scim/templates/jans-scim/log4j2.xml +++ b/docker-jans-scim/templates/jans-scim/log4j2.xml @@ -37,16 +37,6 @@ - - - - - - - - - - @@ -69,20 +59,11 @@ - - -persistence - - -persistence - - -persistence-duration - - - -persistence-duration @@ -93,11 +74,6 @@ - - -ldap-stats - - - -script diff --git a/docs/contribute/testing.md b/docs/contribute/testing.md index b9d15f7f3d9..22061e19424 100644 --- a/docs/contribute/testing.md +++ b/docs/contribute/testing.md @@ -96,16 +96,12 @@ As part of pre-release QA check, we run a set of [manual sanity checks](#sanity- | \# | OS Platform | Persistance Type | Deployment Type (VM/CN) | Test | |----|--------------|------------------|-------------------------|---------------------------------| -| 1 | SUSE 15 | Opendj | VM | installation and sanity testing | | 2 | SUSE 15 | Mysql | VM | installation and sanity testing | | 3 | SUSE 15 | Pgsql | VM | installation and sanity testing | -| 4 | RHEL 8 | Opendj | VM | installation and sanity testing | | 5 | RHEL 8 | Mysql | VM | installation and sanity testing | | 6 | RHEL 8 | Pgsql | VM | installation and sanity testing | -| 7 | Ubuntu20 | Opendj | VM | installation and sanity testing | | 8 | Ubuntu20 | Mysql | VM | installation and sanity testing | | 9 | Ubuntu20 | Pgsql | VM | installation and sanity testing | -| 10 | Ubuntu22 | Opendj | VM | installation and sanity testing | | 11 | Ubuntu22 | Mysql | VM | installation and sanity testing | | 12 | Ubuntu22 | Pgsql | VM | installation and sanity testing | diff --git a/docs/includes/cn-system-requirements.md b/docs/includes/cn-system-requirements.md index 8774fe3a6f1..af06b356bef 100644 --- a/docs/includes/cn-system-requirements.md +++ b/docs/includes/cn-system-requirements.md @@ -9,7 +9,6 @@ Use the listing below for a detailed estimation of minimum required resources. T | Service | CPU Unit | RAM | Disk Space | Processor Type | Required | |-------------------|----------|-------|------------|----------------|------------------------------------| | Auth server | 2.5 | 2.5GB | N/A | 64 Bit | Yes | -| LDAP (OpenDJ) | 1.5 | 2GB | 10GB | 64 Bit | Only if Couchbase/SQL not installed| | fido2 | 0.5 | 0.5GB | N/A | 64 Bit | No | | scim | 1 | 1GB | N/A | 64 Bit | No | | config - job | 0.3 | 0.3GB | N/A | 64 Bit | Yes on fresh installs | diff --git a/docs/janssen-server/install/docker-install/compose.md b/docs/janssen-server/install/docker-install/compose.md index b411da4cc5d..88b87cf419c 100644 --- a/docs/janssen-server/install/docker-install/compose.md +++ b/docs/janssen-server/install/docker-install/compose.md @@ -33,7 +33,6 @@ Installation depends on the set of environment variables shown below. These envi | `CN_CITY` | City. Used for ssl cert generation. | `Austin` | | `CN_STATE` | State. Used for ssl cert generation | `TX` | | `CN_COUNTRY` | Country. Used for ssl cert generation. | `US` | -| `CN_INSTALL_LDAP` | **NOT SUPPORRTED YET** | `false` | | `CN_INSTALL_MYSQL` | Install jans with mysql as the backend | `false` | | `CN_INSTALL_PGSQL` | Install jans with Postgres as the backend | `false` | | `CN_INSTALL_CONFIG_API` | Installs the Config API service. | `true` | @@ -51,13 +50,12 @@ Installation depends on the set of environment variables shown below. These envi ## How to run -Download the compose file of your chosen persistence from mysql, postgres or ldap +Download the compose file of your chosen persistence from mysql or postgres ```bash + wget https://raw.githubusercontent.com/JanssenProject/jans/main/docker-jans-monolith/jans-mysql-compose.yml wget https://raw.githubusercontent.com/JanssenProject/jans/main/docker-jans-monolith/jans-postgres-compose.yml -wget https://raw.githubusercontent.com/JanssenProject/jans/main/docker-jans-monolith/jans-ldap-compose.yml - ``` Download the script files @@ -76,7 +74,7 @@ This docker compose file runs two containers, the janssen monolith container and To start the containers. ```bash -./up.sh #You can pass mysql|postgres|ldap as an argument to the script. If you don't pass any, it will default to mysql. +./up.sh #You can pass mysql|postgres as an argument to the script. If you don't pass any, it will default to mysql. ``` To view the containers running @@ -89,7 +87,7 @@ docker compose -f jans-mysql-compose.yml ps To stop the containers. ```bash -./down.sh #You can pass mysql|postgres|ldap as an argument to the script. If you don't pass any, it will default to mysql. +./down.sh #You can pass mysql|postgres as an argument to the script. If you don't pass any, it will default to mysql. ``` ## Configure Janssen Server @@ -122,5 +120,5 @@ After adding the record you can hit endpoints such as https://demoexample.jans.i Remove setup and volumes ```bash -./clean.sh #You can pass mysql|postgres|ldap as an argument to the script. If you don't pass any, it will default to mysql. +./clean.sh #You can pass mysql|postgres as an argument to the script. If you don't pass any, it will default to mysql. ``` diff --git a/docs/janssen-server/install/helm-install/amazon-eks.md b/docs/janssen-server/install/helm-install/amazon-eks.md index a630eafe524..e1f4833b404 100644 --- a/docs/janssen-server/install/helm-install/amazon-eks.md +++ b/docs/janssen-server/install/helm-install/amazon-eks.md @@ -96,72 +96,6 @@ tags: - demoexample.jans.io #CHANGE-THIS to the FQDN used for Jans ``` - - - - - - - LDAP/Opendj for persistence storage - - Prepare cert and key for OpenDJ, for example: - - ``` - openssl req -x509 -newkey rsa:2048 -sha256 -days 365 -nodes -keyout opendj.key -out opendj.crt -subj '/CN=demoexample.jans.io' -addext 'subjectAltName=DNS:ldap,DNS:opendj' - ``` - - Extract the contents of OpenDJ cert and key files as base64 string: - - ``` - OPENDJ_CERT_B64=$(base64 opendj.crt -w0) - OPENDJ_KEY_B64=$(base64 opendj.key -w0) - ``` - - Add the following yaml snippet to your `override.yaml` file: - ```yaml - global: - cnPersistenceType: ldap - storageClass: - provisioner: kubernetes.io/aws-ebs - opendj: - enabled: true - config: - configmap: - # -- contents of OpenDJ cert file in base64-string - cnLdapCrt: - # -- contents of OpenDJ key file in base64-string - cnLdapKey: - ``` - - So if your desired configuration has no-FQDN and LDAP, the final `override.yaml` file will look something like that: - - ```yaml - global: - cnPersistenceType: ldap - isFqdnRegistered: false - storageClass: - provisioner: kubernetes.io/aws-ebs - opendj: - enabled: true - config: - configmap: - lbAddr: http:// #Add LB address from previous command - # -- contents of OpenDJ cert file in base64-string - cnLdapCrt: - # -- contents of OpenDJ key file in base64-string - cnLdapKey: - nginx-ingress: - ingress: - path: / - hosts: - - demoexample.jans.io #CHANGE-THIS to the FQDN used for Jans - tls: - - secretName: tls-certificate - hosts: - - demoexample.jans.io #CHANGE-THIS to the FQDN used for Jans - ``` - - - - Couchbase for pesistence storage Add the following yaml snippet to your `override.yaml` file: diff --git a/docs/janssen-server/install/helm-install/google-gke.md b/docs/janssen-server/install/helm-install/google-gke.md index f42c7d42339..cab932a6a73 100644 --- a/docs/janssen-server/install/helm-install/google-gke.md +++ b/docs/janssen-server/install/helm-install/google-gke.md @@ -88,71 +88,6 @@ tags: - demoexample.jans.io #CHANGE-THIS to the FQDN used for Jans ``` - - - - - - - LDAP/Opendj for persistence storage - - Prepare cert and key for OpenDJ, for example: - - ``` - openssl req -x509 -newkey rsa:2048 -sha256 -days 365 -nodes -keyout opendj.key -out opendj.crt -subj '/CN=demoexample.jans.io' -addext 'subjectAltName=DNS:ldap,DNS:opendj' - ``` - - Extract the contents of OpenDJ cert and key files as base64 string: - - ``` - OPENDJ_CERT_B64=$(base64 opendj.crt -w0) - OPENDJ_KEY_B64=$(base64 opendj.key -w0) - ``` - - Add the following yaml snippet to your `override.yaml` file: - ```yaml - global: - cnPersistenceType: ldap - storageClass: - provisioner: kubernetes.io/gce-pd - opendj: - enabled: true - config: - configmap: - # -- contents of OpenDJ cert file in base64-string - cnLdapCrt: - # -- contents of OpenDJ key file in base64-string - cnLdapKey: - ``` - - So if your desired configuration has no-FQDN and LDAP, the final `override.yaml` file will look something like that: - - ```yaml - global: - cnPersistenceType: ldap - lbIp: #Add the Loadbalancer IP from the previous command - isFqdnRegistered: false - storageClass: - provisioner: kubernetes.io/gce-pd - opendj: - enabled: true - config: - configmap: - # -- contents of OpenDJ cert file in base64-string - cnLdapCrt: - # -- contents of OpenDJ key file in base64-string - cnLdapKey: - nginx-ingress: - ingress: - path: / - hosts: - - demoexample.jans.io #CHANGE-THIS to the FQDN used for Jans - tls: - - secretName: tls-certificate - hosts: - - demoexample.jans.io #CHANGE-THIS to the FQDN used for Jans - ``` - - - Couchbase for pesistence storage Add the following yaml snippet to your `override.yaml` file: diff --git a/docs/janssen-server/install/helm-install/local.md b/docs/janssen-server/install/helm-install/local.md index bae9507d12e..8cb3378cb5e 100644 --- a/docs/janssen-server/install/helm-install/local.md +++ b/docs/janssen-server/install/helm-install/local.md @@ -20,7 +20,6 @@ Use the listing below for a detailed estimation of minimum required resources. T | Service | CPU Unit | RAM | Disk Space | Processor Type | Required | |-------------------|----------|-------|------------|----------------|------------------------------------| | Auth server | 2.5 | 2.5GB | N/A | 64 Bit | Yes | -| LDAP (OpenDJ) | 1.5 | 2GB | 10GB | 64 Bit | Only if Couchbase/SQL not installed| | fido2 | 0.5 | 0.5GB | N/A | 64 Bit | No | | scim | 1 | 1GB | N/A | 64 Bit | No | | config - job | 0.3 | 0.3GB | N/A | 64 Bit | Yes on fresh installs | diff --git a/docs/janssen-server/install/helm-install/microsoft-azure.md b/docs/janssen-server/install/helm-install/microsoft-azure.md index ff2e43440d9..a6d632839fb 100644 --- a/docs/janssen-server/install/helm-install/microsoft-azure.md +++ b/docs/janssen-server/install/helm-install/microsoft-azure.md @@ -93,72 +93,6 @@ tags: - demoexample.jans.io #CHANGE-THIS to the FQDN used for Jans ``` - - - - - - - LDAP/Opendj for persistence storage - - Prepare cert and key for OpenDJ, for example: - - ``` - openssl req -x509 -newkey rsa:2048 -sha256 -days 365 -nodes -keyout opendj.key -out opendj.crt -subj '/CN=demoexample.jans.io' -addext 'subjectAltName=DNS:ldap,DNS:opendj' - ``` - - Extract the contents of OpenDJ cert and key files as base64 string: - - ``` - OPENDJ_CERT_B64=$(base64 opendj.crt -w0) - OPENDJ_KEY_B64=$(base64 opendj.key -w0) - ``` - - Add the following yaml snippet to your `override.yaml` file: - ```yaml - global: - cnPersistenceType: ldap - storageClass: - provisioner: disk.csi.azure.com - opendj: - enabled: true - config: - configmap: - # -- contents of OpenDJ cert file in base64-string - cnLdapCrt: - # -- contents of OpenDJ key file in base64-string - cnLdapKey: - ``` - - So if your desired configuration has no-FQDN and LDAP, the final `override.yaml` file will look something like that: - - ```yaml - global: - cnPersistenceType: ldap - lbIp: #Add the Loadbalancer IP from the previous command - isFqdnRegistered: false - storageClass: - provisioner: disk.csi.azure.com - opendj: - enabled: true - config: - configmap: - # -- contents of OpenDJ cert file in base64-string - cnLdapCrt: - # -- contents of OpenDJ key file in base64-string - cnLdapKey: - nginx-ingress: - ingress: - path: / - hosts: - - demoexample.jans.io #CHANGE-THIS to the FQDN used for Jans - tls: - - secretName: tls-certificate - hosts: - - demoexample.jans.io #CHANGE-THIS to the FQDN used for Jans - ``` - - - - Couchbase for pesistence storage Add the following yaml snippet to your `override.yaml` file: diff --git a/docs/janssen-server/install/setup.md b/docs/janssen-server/install/setup.md index 9ec6de73f65..37df0b67b91 100644 --- a/docs/janssen-server/install/setup.md +++ b/docs/janssen-server/install/setup.md @@ -35,7 +35,7 @@ tags: Enter Password for Admin User: ``` -3. Next, pick a persistence mechanism. Choose from openDJ, MySQL ,PGSql, an LDAP that can be installed locally or remotely, or Couchbase, an enterprise NoSQL cloud database. +3. Next, pick a persistence mechanism. Choose from MySQL ,PGSql, an LDAP that can be installed locally or remotely, or Couchbase, an enterprise NoSQL cloud database. 4. Next, pick which services should be installed for this deployment: diff --git a/docs/janssen-server/kubernetes-ops/cert-management.md b/docs/janssen-server/kubernetes-ops/cert-management.md index bf789450dcd..bf2334fa397 100644 --- a/docs/janssen-server/kubernetes-ops/cert-management.md +++ b/docs/janssen-server/kubernetes-ops/cert-management.md @@ -169,51 +169,3 @@ kubectl apply -f load-web-key-rotation.yaml -n kubectl apply -f auth-key-rotation.yaml -n ``` -## LDAP - -!!! Warning - Starting from `1.0.20`, the `ghcr.io/janssenproject/jans/certmanager` image is no longer handle LDAP certificate and key rotation in favor of re-generating them manually. - -| Associated certificates and keys | -| ----------------------------------- | -| /etc/certs/opendj.crt | -| /etc/certs/opendj.key | - -1. Prepare cert and key for OpenDJ, for example: - - ``` - openssl req -x509 -newkey rsa:2048 -sha256 -days 365 -nodes -keyout opendj.key -out opendj.crt -subj '/CN=demoexample.jans.io' -addext 'subjectAltName=DNS:ldap,DNS:opendj' - ``` - -1. Extract the contents of OpenDJ cert and key files as base64 string: - - ``` - OPENDJ_CERT_B64=$(base64 opendj.crt -w0) - OPENDJ_KEY_B64=$(base64 opendj.key -w0) - ``` - -1. Add the following yaml snippet to your `override.yaml` file, for example: - - ```yaml - global: - cnPersistenceType: ldap - storageClass: - provisioner: kubernetes.io/aws-ebs - opendj: - enabled: true - config: - configmap: - # -- contents of OpenDJ cert file in base64-string - cnLdapCrt: - # -- contents of OpenDJ key file in base64-string - cnLdapKey: - ``` - -1. Rollout restart OpenDJ statefulset: - - ``` - kubectl -n rollout restart sts - ``` - - Wait until pods are re-deployed successfully (this will update OpenDJ certificate and key in Kubernetes secrets). - Afterwards, do rollout restart for other deployments/statefulsets to ensure latest OpenDJ certificate and key are pulled into pods. diff --git a/docs/janssen-server/kubernetes-ops/health-check.md b/docs/janssen-server/kubernetes-ops/health-check.md index da00f65433c..923df41fe54 100644 --- a/docs/janssen-server/kubernetes-ops/health-check.md +++ b/docs/janssen-server/kubernetes-ops/health-check.md @@ -21,32 +21,6 @@ Jans deployed components uses two types of probes: Here is a list of the liveness and readiness probes of the deployed jans components -### Opendj - -Opendj uses [healthcheck.py](https://github.com/GluuFederation/docker-opendj/blob/master/scripts/healthcheck.py) in liveness probe. -This python script connects to opendj to test its liveness. - -```yaml - livenessProbe: - # Executes the python3 healthcheck. - exec: - command: - - python3 - - /app/scripts/healthcheck.py - # Configure the liveness healthcheck for the OpenDJ if needed. - initialDelaySeconds: 30 - periodSeconds: 30 - timeoutSeconds: 5 - failureThreshold: 20 - readinessProbe: - tcpSocket: - port: 1636 - # Configure the readiness healthcheck for the OpenDJ if needed. - initialDelaySeconds: 60 - timeoutSeconds: 5 - periodSeconds: 25 - failureThreshold: 20 -``` ### auth-server Auth-sever executes the python3 [healthcheck.py](https://github.com/JanssenProject/jans/blob/main/docker-jans-auth-server/scripts/healthcheck.py) in liveness and readiness probes. diff --git a/docs/janssen-server/planning/components.md b/docs/janssen-server/planning/components.md index 383666ff203..071c9b3dfd3 100644 --- a/docs/janssen-server/planning/components.md +++ b/docs/janssen-server/planning/components.md @@ -19,7 +19,7 @@ assertions. This service must be Internet-facing. persistence service to store configuration and other entity data (client, person, scope, attribute, FIDO device, etc.) As different databases are good for different deployments, Janssen supports a number of options: -OpenDJ, MySQL, Postgres, Couchbase, Google Spanner, and Amazon Aurora. Other +MySQL, Postgres, Couchbase, Google Spanner, and Amazon Aurora. Other databases may be added in the future. 1. **Cache**: Getting data from a disk is still the slowest part of any diff --git a/docs/janssen-server/planning/persistence.md b/docs/janssen-server/planning/persistence.md index 0237d33f4d2..5187a6df722 100644 --- a/docs/janssen-server/planning/persistence.md +++ b/docs/janssen-server/planning/persistence.md @@ -28,23 +28,6 @@ Janssen's strategy is to provide optionality for persistence. There is no one size fits all solution for databases. The following section will detail some of the pros and cons of the various databases we currently support. -1. **OpenDJ (LDAP)** Janssen supports Gluu's distribution of OpenDJ, and probably any -other similar distributions like ForgeRock OpenDJ, Ping Directory Server, or -Oracle Unified Directory. LDAP in general and OpenDJ in particular have been -successfully backing authentication service for more than 20 years. People tend -to think of the LDAP tree structure as fast for reads, and slow for writes. -That's just not true anymore--OpenDJ is able to perform quite well for write -operations as well. OpenDJ has mature replication support, excellent command -line tools for administration, and excellent stability. The main disadvantage -of OpenDJ is scaling large datasets for high concurrency. While you can get -around this shortcoming with a global LDAP proxy, such a topology gets -complicated and costly to operate, as you have to break up the data and -configure multiple replicated topologies. As a rule of thumb, if concurrency of -more than 120 OpenID code flow authentications per second are needed, you should -consider another database. But for concurrency less than this, OpenDJ is an -excellent choice. Owing to these limitations, **LDAP is not -supported in production deployments using Kubernetes, 1.0.23 and forward**. - 1. **MySQL** You know it... you love it. That's the biggest advantage. Performance is great out of the box. But if you have high concurrency, you'll have to figure out a plan for replication, and horizontal scaling. diff --git a/docs/janssen-server/planning/vm-cluster.md b/docs/janssen-server/planning/vm-cluster.md index fdde6e07044..42bacc104a8 100644 --- a/docs/janssen-server/planning/vm-cluster.md +++ b/docs/janssen-server/planning/vm-cluster.md @@ -25,7 +25,7 @@ components) are stateless, you can use any load balancer routing algorithm, even round robin. You can also use the load balancer to terminate SSL. 1. **Database**: The web services Janssen Components share the database, so -you'll have to use database replication (e.g. OpenDJ replication). You could +you'll have to use database replication. You could also use a cloud database that takes care of replication for you. 1. **Cache**: You can't use `IN-MEMORY` cache which would have no way to diff --git a/docs/janssen-server/recipes/benchmark.md b/docs/janssen-server/recipes/benchmark.md index ac9794b3d4e..9284c0ae73b 100644 --- a/docs/janssen-server/recipes/benchmark.md +++ b/docs/janssen-server/recipes/benchmark.md @@ -199,9 +199,6 @@ Loading users requires a hefty but temporary amount of resources. By default, th | `GOOGLE_PROJECT_ID` | Google Project ID. **Used with Spanner** | `` | | `GOOGLE_SPANNER_INSTANCE_ID` | Google Spanner Instance ID. **Used with Spanner** | `` | | `GOOGLE_SPANNER_DATABASE_ID` | Google Spanner Database ID. **Used with Spanner** | `` | - | `LDAP_URL` | LDAP URL if LDAP is the persistence to load users in. | `opendj:1636` | - | `LDAP_PW` | LDAP PW if LDAP is the persistence to load users in. | `` | - | `LDAP_DN` | LDAP DN if LDAP is the persistence to load users in. | `cn=directory manager` | | `RDBMS_TYPE` | RDBMS type if `mysql` or `pgsql` is the persistence to load users in. | `mysql` | | `RDBMS_DB` | RDBMS Database name if `mysql` or `pgsql` is the persistence to load users in. | `jans` | | `RDBMS_USER` | RDBMS user if `mysql` or `pgsql` is the persistence to load users in. | `jans` | diff --git a/docs/janssen-server/reference/kubernetes/docker-jans-auth-server.md b/docs/janssen-server/reference/kubernetes/docker-jans-auth-server.md index edd819087b5..6eddc8cbc54 100644 --- a/docs/janssen-server/reference/kubernetes/docker-jans-auth-server.md +++ b/docs/janssen-server/reference/kubernetes/docker-jans-auth-server.md @@ -50,10 +50,8 @@ The following environment variables are supported by the container: - `CN_WAIT_SLEEP_DURATION`: Delay between startup "health checks" (default to `10` seconds). - `CN_MAX_RAM_PERCENTAGE`: Value passed to Java option `-XX:MaxRAMPercentage`. - `CN_DEBUG_PORT`: port of remote debugging (if omitted, remote debugging will be disabled). -- `CN_PERSISTENCE_TYPE`: Persistence backend being used (one of `ldap`, `couchbase`, or `hybrid`; default to `ldap`). +- `CN_PERSISTENCE_TYPE`: Persistence backend being used (one of `couchbase`, `spanner`, `sql`, or `hybrid`; default to `sql`). - `CN_HYBRID_MAPPING`: Specify data mapping for each persistence (default to `"{}"`). Note this environment only takes effect when `CN_PERSISTENCE_TYPE` is set to `hybrid`. See [hybrid mapping](#hybrid-mapping) section for details. -- `CN_LDAP_URL`: Address and port of LDAP server (default to `localhost:1636`). -- `CN_LDAP_USE_SSL`: Whether to use SSL connection to LDAP server (default to `true`). - `CN_COUCHBASE_URL`: Address of Couchbase server (default to `localhost`). - `CN_COUCHBASE_USER`: Username of Couchbase server (default to `admin`). - `CN_COUCHBASE_CERT_FILE`: Couchbase root certificate location (default to `/etc/certs/couchbase.crt`). @@ -131,8 +129,6 @@ The following key-value pairs are the defaults: "persistence_log_level": "INFO", "persistence_duration_log_target": "FILE", "persistence_duration_log_level": "INFO", - "ldap_stats_log_target": "FILE", - "ldap_stats_log_level": "INFO", "script_log_target": "FILE", "script_log_level": "INFO", "audit_log_target": "FILE", @@ -189,12 +185,12 @@ As per v1.0.1, hybrid persistence supports all available persistence types. To c ``` { - "default": "", - "user": "", - "site": "", - "cache": "", - "token": "", - "session": "", + "default": "", + "user": "", + "site": "", + "cache": "", + "token": "", + "session": "", } ``` @@ -204,7 +200,7 @@ As per v1.0.1, hybrid persistence supports all available persistence types. To c { "default": "sql", "user": "spanner", - "site": "ldap", + "site": "sql", "cache": "sql", "token": "couchbase", "session": "spanner", @@ -220,4 +216,3 @@ i.e. `http://container:9093/metrics`. Note that Prometheus JMX exporter uses pre-defined config file (see `conf/prometheus-config.yaml`). To customize the config, mount custom config file to `/opt/prometheus/prometheus-config.yaml` inside the container. - diff --git a/docs/janssen-server/reference/kubernetes/docker-jans-casa.md b/docs/janssen-server/reference/kubernetes/docker-jans-casa.md index a605ae0f548..89b28d16f43 100644 --- a/docs/janssen-server/reference/kubernetes/docker-jans-casa.md +++ b/docs/janssen-server/reference/kubernetes/docker-jans-casa.md @@ -41,10 +41,8 @@ The following environment variables are supported by the container: - `CN_WAIT_MAX_TIME`: How long the startup "health checks" should run (default to `300` seconds). - `CN_WAIT_SLEEP_DURATION`: Delay between startup "health checks" (default to `10` seconds). - `CN_MAX_RAM_PERCENTAGE`: Value passed to Java option `-XX:MaxRAMPercentage`. -- `CN_PERSISTENCE_TYPE`: Persistence backend being used (one of `ldap`, `couchbase`, or `hybrid`; default to `ldap`). +- `CN_PERSISTENCE_TYPE`: Persistence backend being used (one of `sql`, `couchbase`, `spanner`, or `hybrid`; default to `sql`). - `CN_HYBRID_MAPPING`: Specify data mapping for each persistence (default to `"{}"`). Note this environment only takes effect when `CN_PERSISTENCE_TYPE` is set to `hybrid`. See [hybrid mapping](#hybrid-mapping) section for details. -- `CN_LDAP_URL`: Address and port of LDAP server (default to `localhost:1636`); required if `CN_PERSISTENCE_TYPE` is set to `ldap` or `hybrid`. -- `CN_LDAP_USE_SSL`: Whether to use SSL connection to LDAP server (default to `true`). - `CN_COUCHBASE_URL`: Address of Couchbase server (default to `localhost`); required if `CN_PERSISTENCE_TYPE` is set to `couchbase` or `hybrid`. - `CN_COUCHBASE_USER`: Username of Couchbase server (default to `admin`); required if `CN_PERSISTENCE_TYPE` is set to `couchbase` or `hybrid`. - `CN_COUCHBASE_CERT_FILE`: Couchbase root certificate location (default to `/etc/certs/couchbase.crt`); required if `CN_PERSISTENCE_TYPE` is set to `couchbase` or `hybrid`. @@ -135,12 +133,12 @@ Hybrid persistence supports all available persistence types. To configure hybrid ``` { - "default": "", - "user": "", - "site": "", - "cache": "", - "token": "", - "session": "", + "default": "", + "user": "", + "site": "", + "cache": "", + "token": "", + "session": "", } ``` @@ -150,7 +148,7 @@ Hybrid persistence supports all available persistence types. To configure hybrid { "default": "sql", "user": "spanner", - "site": "ldap", + "site": "sql", "cache": "sql", "token": "couchbase", "session": "spanner", diff --git a/docs/janssen-server/reference/kubernetes/docker-jans-certmanager.md b/docs/janssen-server/reference/kubernetes/docker-jans-certmanager.md index 284a350a897..8ccdcb9176b 100644 --- a/docs/janssen-server/reference/kubernetes/docker-jans-certmanager.md +++ b/docs/janssen-server/reference/kubernetes/docker-jans-certmanager.md @@ -50,10 +50,8 @@ The following environment variables are supported by the container: - `CN_SECRET_GOOGLE_SECRET_VERSION_ID`: Google Secret Manager version ID (default to `latest`). - `CN_SECRET_GOOGLE_SECRET_NAME_PREFIX`: Prefix for Google Secret Manager name (default to `jans`). - `CN_SECRET_GOOGLE_SECRET_MANAGER_PASSPHRASE`: Passphrase for Google Secret Manager (default to `secret`). -- `CN_PERSISTENCE_TYPE`: Persistence backend being used (one of `ldap`, `couchbase`, or `hybrid`; default to `ldap`). +- `CN_PERSISTENCE_TYPE`: Persistence backend being used (one of `sql`, `couchbase`, `spanner`, or `hybrid`; default to `sql`). - `CN_HYBRID_MAPPING`: Specify data mapping for each persistence (default to `"{}"`). Note this environment only takes effect when `CN_PERSISTENCE_TYPE` is set to `hybrid`. See [hybrid mapping](#hybrid-mapping) section for details. -- `CN_LDAP_URL`: Address and port of LDAP server (default to `localhost:1636`). -- `CN_LDAP_USE_SSL`: Whether to use SSL connection to LDAP server (default to `true`). - `CN_COUCHBASE_URL`: Address of Couchbase server (default to `localhost`). - `CN_COUCHBASE_USER`: Username of Couchbase server (default to `admin`). - `CN_COUCHBASE_CERT_FILE`: Couchbase root certificate location (default to `/etc/certs/couchbase.crt`). @@ -226,12 +224,12 @@ As per v1.0.1, hybrid persistence supports all available persistence types. To c ``` { - "default": "", - "user": "", - "site": "", - "cache": "", - "token": "", - "session": "", + "default": "", + "user": "", + "site": "", + "cache": "", + "token": "", + "session": "", } ``` @@ -241,7 +239,7 @@ As per v1.0.1, hybrid persistence supports all available persistence types. To c { "default": "sql", "user": "spanner", - "site": "ldap", + "site": "sql", "cache": "sql", "token": "couchbase", "session": "spanner", diff --git a/docs/janssen-server/reference/kubernetes/docker-jans-config-api.md b/docs/janssen-server/reference/kubernetes/docker-jans-config-api.md index 576682b65db..898521dd692 100644 --- a/docs/janssen-server/reference/kubernetes/docker-jans-config-api.md +++ b/docs/janssen-server/reference/kubernetes/docker-jans-config-api.md @@ -49,10 +49,8 @@ The following environment variables are supported by the container: - `CN_WAIT_MAX_TIME`: How long the startup "health checks" should run (default to `300` seconds). - `CN_WAIT_SLEEP_DURATION`: Delay between startup "health checks" (default to `10` seconds). - `CN_MAX_RAM_PERCENTAGE`: Value passed to Java option `-XX:MaxRAMPercentage`. -- `CN_PERSISTENCE_TYPE`: Persistence backend being used (one of `ldap`, `couchbase`, `sql`, `spanner`, or `hybrid`; default to `ldap`). +- `CN_PERSISTENCE_TYPE`: Persistence backend being used (one of `couchbase`, `sql`, `spanner`, or `hybrid`; default to `sql`). - `CN_HYBRID_MAPPING`: Specify data mapping for each persistence (default to `"{}"`). Note this environment only takes effect when `CN_PERSISTENCE_TYPE` is set to `hybrid`. See [hybrid mapping](#hybrid-mapping) section for details. -- `CN_LDAP_URL`: Address and port of LDAP server (default to `localhost:1636`). -- `CN_LDAP_USE_SSL`: Whether to use SSL connection to LDAP server (default to `true`). - `CN_COUCHBASE_URL`: Address of Couchbase server (default to `localhost`). - `CN_COUCHBASE_USER`: Username of Couchbase server (default to `admin`). - `CN_COUCHBASE_CERT_FILE`: Couchbase root certificate location (default to `/etc/certs/couchbase.crt`). @@ -124,8 +122,6 @@ The following key-value pairs are the defaults: "persistence_log_level": "INFO", "persistence_duration_log_target": "FILE", "persistence_duration_log_level": "INFO", - "ldap_stats_log_target": "FILE", - "ldap_stats_log_level": "INFO", "script_log_target": "FILE", "script_log_level": "INFO", "audit_log_target": "FILE", @@ -184,12 +180,12 @@ As per v1.0.1, hybrid persistence supports all available persistence types. To c ``` { - "default": "", - "user": "", - "site": "", - "cache": "", - "token": "", - "session": "", + "default": "", + "user": "", + "site": "", + "cache": "", + "token": "", + "session": "", } ``` @@ -199,7 +195,7 @@ As per v1.0.1, hybrid persistence supports all available persistence types. To c { "default": "sql", "user": "spanner", - "site": "ldap", + "site": "sql", "cache": "sql", "token": "couchbase", "session": "spanner", diff --git a/docs/janssen-server/reference/kubernetes/docker-jans-configurator.md b/docs/janssen-server/reference/kubernetes/docker-jans-configurator.md index c4b1833318e..e9e9a2d7aab 100644 --- a/docs/janssen-server/reference/kubernetes/docker-jans-configurator.md +++ b/docs/janssen-server/reference/kubernetes/docker-jans-configurator.md @@ -87,8 +87,7 @@ For fresh installation, generate the initial configuration by creating `/path/to "orgName": "Gluu Inc." }, "_secret": { - "admin_password": "S3cr3t+pass", - "ldap_password": "S3cr3t+pass" + "admin_password": "S3cr3t+pass" } } ``` @@ -99,12 +98,11 @@ For fresh installation, generate the initial configuration by creating `/path/to - `auth_sig_keys`: space-separated key algorithm for signing (default to `RS256 RS384 RS512 ES256 ES384 ES512 PS256 PS384 PS512`) - `auth_enc_keys`: space-separated key algorithm for encryption (default to `RSA1_5 RSA-OAEP`) - - `optional_scopes`: list of optional scopes (as JSON string) that will be used (supported scopes are `ldap`, `couchbase`, `redis`, `sql`; default to empty list) + - `optional_scopes`: list of optional scopes (as JSON string) that will be used (supported scopes are `couchbase`, `redis`, `sql`; default to empty list) - `init_keys_exp`: the initial keys expiration time in hours (default to `48`; extra 1 hour will be added for hard limit) 2. `_secret`: - - `ldap_password`: user's password to access LDAP database (only used if `optional_scopes` list contains `ldap` scope) - `sql_password`: user's password to access SQL database (only used if `optional_scopes` list contains `sql` scope) - `couchbase_password`: user's password to access Couchbase database (only used if `optional_scopes` list contains `couchbase` scope) - `couchbase_superuser_password`: superusers password to access Couchbase database (only used if `optional_scopes` list contains `couchbase` scope) @@ -147,7 +145,7 @@ To generate initial configmaps and secrets: name: config-generate-params containers: - name: configurator-load - image: ghcr.io/janssenproject/jans/configurator:1.1.6_dev + image: ghcr.io/janssenproject/jans/configurator:$VERSION volumeMounts: - mountPath: /app/db/configuration.json name: config-generate-params @@ -185,7 +183,7 @@ To restore configuration from `configuration.out.json` file: name: config-dump-params containers: - name: configurator-load - image: ghcr.io/janssenproject/jans/configurator:1.1.6_dev + image: ghcr.io/janssenproject/jans/configurator:$VERSION volumeMounts: - mountPath: /app/db/configuration.out.json name: config-dump-params @@ -211,7 +209,7 @@ spec: restartPolicy: Never containers: - name: configurator-dump-job - image: ghcr.io/janssenproject/jans/configurator:1.1.6_dev + image: ghcr.io/janssenproject/jans/configurator:$VERSION command: - /bin/sh - -c diff --git a/docs/janssen-server/reference/kubernetes/docker-jans-fido2.md b/docs/janssen-server/reference/kubernetes/docker-jans-fido2.md index e460b718f1b..d8339fc980d 100644 --- a/docs/janssen-server/reference/kubernetes/docker-jans-fido2.md +++ b/docs/janssen-server/reference/kubernetes/docker-jans-fido2.md @@ -49,10 +49,8 @@ The following environment variables are supported by the container: - `CN_WAIT_MAX_TIME`: How long the startup "health checks" should run (default to `300` seconds). - `CN_WAIT_SLEEP_DURATION`: Delay between startup "health checks" (default to `10` seconds). - `CN_MAX_RAM_PERCENTAGE`: Value passed to Java option `-XX:MaxRAMPercentage`. -- `CN_PERSISTENCE_TYPE`: Persistence backend being used (one of `ldap`, `couchbase`, or `hybrid`; default to `ldap`). +- `CN_PERSISTENCE_TYPE`: Persistence backend being used (one of `sql`, `couchbase`, `spanner`, or `hybrid`; default to `sql`). - `CN_HYBRID_MAPPING`: Specify data mapping for each persistence (default to `"{}"`). Note this environment only takes effect when `CN_PERSISTENCE_TYPE` is set to `hybrid`. See [hybrid mapping](#hybrid-mapping) section for details. -- `CN_LDAP_URL`: Address and port of LDAP server (default to `localhost:1636`). -- `CN_LDAP_USE_SSL`: Whether to use SSL connection to LDAP server (default to `true`). - `CN_COUCHBASE_URL`: Address of Couchbase server (default to `localhost`). - `CN_COUCHBASE_USER`: Username of Couchbase server (default to `admin`). - `CN_COUCHBASE_CERT_FILE`: Couchbase root certificate location (default to `/etc/certs/couchbase.crt`). @@ -137,12 +135,12 @@ As per v1.0.1, hybrid persistence supports all available persistence types. To c ``` { - "default": "", - "user": "", - "site": "", - "cache": "", - "token": "", - "session": "", + "default": "", + "user": "", + "site": "", + "cache": "", + "token": "", + "session": "", } ``` @@ -152,7 +150,7 @@ As per v1.0.1, hybrid persistence supports all available persistence types. To c { "default": "sql", "user": "spanner", - "site": "ldap", + "site": "sql", "cache": "sql", "token": "couchbase", "session": "spanner", diff --git a/docs/janssen-server/reference/kubernetes/docker-jans-link.md b/docs/janssen-server/reference/kubernetes/docker-jans-link.md index 8e4dfc278f5..607737f0f7d 100644 --- a/docs/janssen-server/reference/kubernetes/docker-jans-link.md +++ b/docs/janssen-server/reference/kubernetes/docker-jans-link.md @@ -49,10 +49,8 @@ The following environment variables are supported by the container: - `CN_WAIT_MAX_TIME`: How long the startup "health checks" should run (default to `300` seconds). - `CN_WAIT_SLEEP_DURATION`: Delay between startup "health checks" (default to `10` seconds). - `CN_MAX_RAM_PERCENTAGE`: Value passed to Java option `-XX:MaxRAMPercentage`. -- `CN_PERSISTENCE_TYPE`: Persistence backend being used (one of `ldap`, `couchbase`, or `hybrid`; default to `ldap`). +- `CN_PERSISTENCE_TYPE`: Persistence backend being used (one of `sql`, `couchbase`, `spanner`, or `hybrid`; default to `sql`). - `CN_HYBRID_MAPPING`: Specify data mapping for each persistence (default to `"{}"`). Note this environment only takes effect when `CN_PERSISTENCE_TYPE` is set to `hybrid`. See [hybrid mapping](#hybrid-mapping) section for details. -- `CN_LDAP_URL`: Address and port of LDAP server (default to `localhost:1636`). -- `CN_LDAP_USE_SSL`: Whether to use SSL connection to LDAP server (default to `true`). - `CN_COUCHBASE_URL`: Address of Couchbase server (default to `localhost`). - `CN_COUCHBASE_USER`: Username of Couchbase server (default to `admin`). - `CN_COUCHBASE_CERT_FILE`: Couchbase root certificate location (default to `/etc/certs/couchbase.crt`). @@ -116,8 +114,6 @@ The following key-value pairs are the defaults: "persistence_log_level": "INFO", "persistence_duration_log_target": "FILE", "persistence_duration_log_level": "INFO", - "ldap_stats_log_target": "FILE", - "ldap_stats_log_level": "INFO", "script_log_target": "FILE", "script_log_level": "INFO" } @@ -139,12 +135,12 @@ As per v1.0.1, hybrid persistence supports all available persistence types. To c ``` { - "default": "", - "user": "", - "site": "", - "cache": "", - "token": "", - "session": "", + "default": "", + "user": "", + "site": "", + "cache": "", + "token": "", + "session": "", } ``` @@ -154,7 +150,7 @@ As per v1.0.1, hybrid persistence supports all available persistence types. To c { "default": "sql", "user": "spanner", - "site": "ldap", + "site": "sql", "cache": "sql", "token": "couchbase", "session": "spanner", diff --git a/docs/janssen-server/reference/kubernetes/docker-jans-persistence-loader.md b/docs/janssen-server/reference/kubernetes/docker-jans-persistence-loader.md index c8568dab968..e04d9e89ab2 100644 --- a/docs/janssen-server/reference/kubernetes/docker-jans-persistence-loader.md +++ b/docs/janssen-server/reference/kubernetes/docker-jans-persistence-loader.md @@ -8,7 +8,7 @@ tags: ## Overview -Persistence is a special container to load initial data for LDAP or Couchbase. +Persistence is a special container to load initial data for supported persistence. ## Versions @@ -54,12 +54,10 @@ The following environment variables are supported by the container: - `CN_REDIS_URL`: URL of Redis server, format is host:port (optional; default to `localhost:6379`). - `CN_REDIS_TYPE`: Redis service type, either `STANDALONE` or `CLUSTER` (optional; default to `STANDALONE`). - `CN_MEMCACHED_URL`: URL of Memcache server, format is host:port (optional; default to `localhost:11211`). -- `CN_PERSISTENCE_TYPE`: Persistence backend being used (one of `ldap`, `couchbase`, or `hybrid`; default to `ldap`). +- `CN_PERSISTENCE_TYPE`: Persistence backend being used (`couchbase`, `sql`, `spanner`, or `hybrid`; default to `sql`). - `CN_HYBRID_MAPPING`: Specify data mapping for each persistence (default to `"{}"`). Note this environment only takes effect when `CN_PERSISTENCE_TYPE` is set to `hybrid`. See [hybrid mapping](#hybrid-mapping) section for details. - `CN_PERSISTENCE_SKIP_INITIALIZED`: skip initialization if backend already initialized (default to `false`). - `CN_PERSISTENCE_UPDATE_AUTH_DYNAMIC_CONFIG`: Whether to allow automatic updates of `jans-auth` configuration (default to `true`). -- `CN_LDAP_URL`: Address and port of LDAP server (default to `localhost:1636`). -- `CN_LDAP_USE_SSL`: Whether to use SSL connection to LDAP server (default to `true`). - `CN_COUCHBASE_URL`: Address of Couchbase server (default to `localhost`). - `CN_COUCHBASE_USER`: Username of Couchbase server (default to `admin`). - `CN_COUCHBASE_SUPERUSER`: Superuser of Couchbase server (default to empty-string). @@ -103,12 +101,12 @@ As per v1.0.1, hybrid persistence supports all available persistence types. To c ``` { - "default": "", - "user": "", - "site": "", - "cache": "", - "token": "", - "session": "", + "default": "", + "user": "", + "site": "", + "cache": "", + "token": "", + "session": "", } ``` @@ -118,7 +116,7 @@ As per v1.0.1, hybrid persistence supports all available persistence types. To c { "default": "sql", "user": "spanner", - "site": "ldap", + "site": "sql", "cache": "sql", "token": "couchbase", "session": "spanner", diff --git a/docs/janssen-server/reference/kubernetes/docker-jans-saml.md b/docs/janssen-server/reference/kubernetes/docker-jans-saml.md index e19d30dea5f..ea347bc5954 100644 --- a/docs/janssen-server/reference/kubernetes/docker-jans-saml.md +++ b/docs/janssen-server/reference/kubernetes/docker-jans-saml.md @@ -49,10 +49,8 @@ The following environment variables are supported by the container: - `CN_WAIT_MAX_TIME`: How long the startup "health checks" should run (default to `300` seconds). - `CN_WAIT_SLEEP_DURATION`: Delay between startup "health checks" (default to `10` seconds). - `CN_MAX_RAM_PERCENTAGE`: Value passed to Java option `-XX:MaxRAMPercentage`. -- `CN_PERSISTENCE_TYPE`: Persistence backend being used (one of `ldap`, `couchbase`, or `hybrid`; default to `ldap`). +- `CN_PERSISTENCE_TYPE`: Persistence backend being used (one of `sql`, `couchbase`, `spanner`, or `hybrid`; default to `sql`). - `CN_HYBRID_MAPPING`: Specify data mapping for each persistence (default to `"{}"`). Note this environment only takes effect when `CN_PERSISTENCE_TYPE` is set to `hybrid`. See [hybrid mapping](#hybrid-mapping) section for details. -- `CN_LDAP_URL`: Address and port of LDAP server (default to `localhost:1636`). -- `CN_LDAP_USE_SSL`: Whether to use SSL connection to LDAP server (default to `true`). - `CN_COUCHBASE_URL`: Address of Couchbase server (default to `localhost`). - `CN_COUCHBASE_USER`: Username of Couchbase server (default to `admin`). - `CN_COUCHBASE_CERT_FILE`: Couchbase root certificate location (default to `/etc/certs/couchbase.crt`). @@ -100,12 +98,12 @@ As per v1.0.1, hybrid persistence supports all available persistence types. To c ``` { - "default": "", - "user": "", - "site": "", - "cache": "", - "token": "", - "session": "", + "default": "", + "user": "", + "site": "", + "cache": "", + "token": "", + "session": "", } ``` @@ -115,7 +113,7 @@ As per v1.0.1, hybrid persistence supports all available persistence types. To c { "default": "sql", "user": "spanner", - "site": "ldap", + "site": "sql", "cache": "sql", "token": "couchbase", "session": "spanner", diff --git a/docs/janssen-server/reference/kubernetes/docker-jans-scim.md b/docs/janssen-server/reference/kubernetes/docker-jans-scim.md index ec38f47d5d8..b4a9a98ca2d 100644 --- a/docs/janssen-server/reference/kubernetes/docker-jans-scim.md +++ b/docs/janssen-server/reference/kubernetes/docker-jans-scim.md @@ -49,10 +49,8 @@ The following environment variables are supported by the container: - `CN_WAIT_MAX_TIME`: How long the startup "health checks" should run (default to `300` seconds). - `CN_WAIT_SLEEP_DURATION`: Delay between startup "health checks" (default to `10` seconds). - `CN_MAX_RAM_PERCENTAGE`: Value passed to Java option `-XX:MaxRAMPercentage`. -- `CN_PERSISTENCE_TYPE`: Persistence backend being used (one of `ldap`, `couchbase`, or `hybrid`; default to `ldap`). +- `CN_PERSISTENCE_TYPE`: Persistence backend being used (one of `sql`, `couchbase`, `spanner`, or `hybrid`; default to `sql`). - `CN_HYBRID_MAPPING`: Specify data mapping for each persistence (default to `"{}"`). Note this environment only takes effect when `CN_PERSISTENCE_TYPE` is set to `hybrid`. See [hybrid mapping](#hybrid-mapping) section for details. -- `CN_LDAP_URL`: Address and port of LDAP server (default to `localhost:1636`). -- `CN_LDAP_USE_SSL`: Whether to use SSL connection to LDAP server (default to `true`). - `CN_COUCHBASE_URL`: Address of Couchbase server (default to `localhost`). - `CN_COUCHBASE_USER`: Username of Couchbase server (default to `admin`). - `CN_COUCHBASE_CERT_FILE`: Couchbase root certificate location (default to `/etc/certs/couchbase.crt`). @@ -116,8 +114,6 @@ The following key-value pairs are the defaults: "persistence_log_level": "INFO", "persistence_duration_log_target": "FILE", "persistence_duration_log_level": "INFO", - "ldap_stats_log_target": "FILE", - "ldap_stats_log_level": "INFO", "script_log_target": "FILE", "script_log_level": "INFO" } @@ -139,12 +135,12 @@ As per v1.0.1, hybrid persistence supports all available persistence types. To c ``` { - "default": "", - "user": "", - "site": "", - "cache": "", - "token": "", - "session": "", + "default": "", + "user": "", + "site": "", + "cache": "", + "token": "", + "session": "", } ``` @@ -154,7 +150,7 @@ As per v1.0.1, hybrid persistence supports all available persistence types. To c { "default": "sql", "user": "spanner", - "site": "ldap", + "site": "sql", "cache": "sql", "token": "couchbase", "session": "spanner", diff --git a/docs/janssen-server/reference/kubernetes/docker-opendj.md b/docs/janssen-server/reference/kubernetes/docker-opendj.md deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/docs/janssen-server/reference/kubernetes/helm-chart.md b/docs/janssen-server/reference/kubernetes/helm-chart.md index 35b349d28fc..ff4086d2ab2 100644 --- a/docs/janssen-server/reference/kubernetes/helm-chart.md +++ b/docs/janssen-server/reference/kubernetes/helm-chart.md @@ -1,6 +1,6 @@ # janssen -![version: 1.1.6-dev](https://img.shields.io/badge/Version-1.1.6--dev-informational?style=flat-square) ![Appversion: 1.1.6-dev](https://img.shields.io/badge/AppVersion-1.1.6-informational?style=flat-square) +![Version: 1.1.6-dev](https://img.shields.io/badge/Version-1.1.6--dev-informational?style=flat-square) ![AppVersion: 1.1.6-dev](https://img.shields.io/badge/AppVersion-1.1.6--dev-informational?style=flat-square) Janssen Access and Identity Management Microservices Chart. This chart deploys each janssen microservice as a separate deployment. @@ -23,20 +23,19 @@ Kubernetes: `>=v1.22.0-0` | Repository | Name | Version | |------------|------|---------| -| | auth-server | 1.1.6 | -| | auth-server-key-rotation | 1.1.6 | -| | casa | 1.1.6 | -| | cn-istio-ingress | 1.1.6 | -| | config | 1.1.6 | -| | config-api | 1.1.6 | -| | fido2 | 1.1.6 | -| | kc-scheduler | 1.1.6 | -| | link | 1.1.6 | -| | nginx-ingress | 1.1.6 | -| | opendj | 1.1.6 | -| | persistence | 1.1.6 | -| | saml | 1.1.6 | -| | scim | 1.1.6 | +| | auth-server | 1.1.6-dev | +| | auth-server-key-rotation | 1.1.6-dev | +| | casa | 1.1.6-dev | +| | cn-istio-ingress | 1.1.6-dev | +| | config | 1.1.6-dev | +| | config-api | 1.1.6-dev | +| | fido2 | 1.1.6-dev | +| | kc-scheduler | 1.1.6-dev | +| | link | 1.1.6-dev | +| | nginx-ingress | 1.1.6-dev | +| | persistence | 1.1.6-dev | +| | saml | 1.1.6-dev | +| | scim | 1.1.6-dev | ## Values @@ -125,7 +124,7 @@ Kubernetes: `>=v1.22.0-0` | casa.usrEnvs.secret | object | `{}` | Add custom secret envs to the service variable1: value1 | | casa.volumeMounts | list | `[]` | Configure any additional volumesMounts that need to be attached to the containers | | casa.volumes | list | `[]` | Configure any additional volumes that need to be attached to the pod | -| config | object | `{"additionalAnnotations":{},"additionalLabels":{},"adminPassword":"Test1234#","city":"Austin","configmap":{"cnAwsAccessKeyId":"","cnAwsDefaultRegion":"us-west-1","cnAwsProfile":"janssen","cnAwsSecretAccessKey":"","cnAwsSecretsEndpointUrl":"","cnAwsSecretsNamePrefix":"janssen","cnAwsSecretsReplicaRegions":[],"cnCacheType":"NATIVE_PERSISTENCE","cnConfigKubernetesConfigMap":"cn","cnCouchbaseBucketPrefix":"jans","cnCouchbaseCrt":"SWFtTm90YVNlcnZpY2VBY2NvdW50Q2hhbmdlTWV0b09uZQo=","cnCouchbaseIndexNumReplica":0,"cnCouchbasePassword":"P@ssw0rd","cnCouchbaseSuperUser":"admin","cnCouchbaseSuperUserPassword":"Test1234#","cnCouchbaseUrl":"cbjanssen.default.svc.cluster.local","cnCouchbaseUser":"janssen","cnGoogleProjectId":"google-project-to-save-config-and-secrets-to","cnGoogleSecretManagerServiceAccount":"SWFtTm90YVNlcnZpY2VBY2NvdW50Q2hhbmdlTWV0b09uZQo=","cnGoogleSecretNamePrefix":"janssen","cnGoogleSecretVersionId":"latest","cnGoogleSpannerDatabaseId":"","cnGoogleSpannerInstanceId":"","cnJettyRequestHeaderSize":8192,"cnLdapCrt":"SWFtTm90YVNlcnZpY2VBY2NvdW50Q2hhbmdlTWV0b09uZQo=","cnLdapKey":"SWFtTm90YVNlcnZpY2VBY2NvdW50Q2hhbmdlTWV0b09uZQo=","cnLdapUrl":"opendj:1636","cnMaxRamPercent":"75.0","cnMessageType":"DISABLED","cnOpaUrl":"http://opa.opa.svc.cluster.cluster.local:8181/v1","cnPersistenceHybridMapping":"{}","cnRedisSentinelGroup":"","cnRedisSslTruststore":"","cnRedisType":"STANDALONE","cnRedisUrl":"redis.redis.svc.cluster.local:6379","cnRedisUseSsl":false,"cnScimProtectionMode":"OAUTH","cnSecretKubernetesSecret":"cn","cnSqlDbDialect":"mysql","cnSqlDbHost":"my-release-mysql.default.svc.cluster.local","cnSqlDbName":"jans","cnSqlDbPort":3306,"cnSqlDbSchema":"","cnSqlDbTimezone":"UTC","cnSqlDbUser":"jans","cnSqldbUserPassword":"Test1234#","cnVaultAddr":"http://localhost:8200","cnVaultAppRolePath":"approle","cnVaultKvPath":"secret","cnVaultNamespace":"","cnVaultPrefix":"jans","cnVaultRoleId":"","cnVaultRoleIdFile":"/etc/certs/vault_role_id","cnVaultSecretId":"","cnVaultSecretIdFile":"/etc/certs/vault_secret_id","cnVaultVerify":false,"kcDbPassword":"Test1234#","kcDbSchema":"keycloak","kcDbUrlDatabase":"keycloak","kcDbUrlHost":"mysql.kc.svc.cluster.local","kcDbUrlPort":3306,"kcDbUrlProperties":"?useUnicode=true&characterEncoding=UTF-8&character_set_server=utf8mb4","kcDbUsername":"keycloak","kcDbVendor":"mysql","kcLogLevel":"INFO","lbAddr":"","quarkusTransactionEnableRecovery":true},"countryCode":"US","customScripts":[],"dnsConfig":{},"dnsPolicy":"","email":"support@jans.io","image":{"pullSecrets":[],"repository":"ghcr.io/janssenproject/jans/configurator","tag":"1.1.6_dev"},"ldapPassword":"P@ssw0rds","ldapTruststorePassword":"changeit","lifecycle":{},"orgName":"Janssen","redisPassword":"P@assw0rd","resources":{"limits":{"cpu":"300m","memory":"300Mi"},"requests":{"cpu":"300m","memory":"300Mi"}},"salt":"","state":"TX","usrEnvs":{"normal":{},"secret":{}},"volumeMounts":[],"volumes":[]}` | Configuration parameters for setup and initial configuration secret and config layers used by Janssen services. | +| config | object | `{"additionalAnnotations":{},"additionalLabels":{},"adminPassword":"Test1234#","city":"Austin","configmap":{"cnAwsAccessKeyId":"","cnAwsDefaultRegion":"us-west-1","cnAwsProfile":"janssen","cnAwsSecretAccessKey":"","cnAwsSecretsEndpointUrl":"","cnAwsSecretsNamePrefix":"janssen","cnAwsSecretsReplicaRegions":[],"cnCacheType":"NATIVE_PERSISTENCE","cnConfigKubernetesConfigMap":"cn","cnCouchbaseBucketPrefix":"jans","cnCouchbaseCrt":"SWFtTm90YVNlcnZpY2VBY2NvdW50Q2hhbmdlTWV0b09uZQo=","cnCouchbaseIndexNumReplica":0,"cnCouchbasePassword":"P@ssw0rd","cnCouchbaseSuperUser":"admin","cnCouchbaseSuperUserPassword":"Test1234#","cnCouchbaseUrl":"cbjanssen.default.svc.cluster.local","cnCouchbaseUser":"janssen","cnGoogleProjectId":"google-project-to-save-config-and-secrets-to","cnGoogleSecretManagerServiceAccount":"SWFtTm90YVNlcnZpY2VBY2NvdW50Q2hhbmdlTWV0b09uZQo=","cnGoogleSecretNamePrefix":"janssen","cnGoogleSecretVersionId":"latest","cnGoogleSpannerDatabaseId":"","cnGoogleSpannerInstanceId":"","cnJettyRequestHeaderSize":8192,"cnMaxRamPercent":"75.0","cnMessageType":"DISABLED","cnOpaUrl":"http://opa.opa.svc.cluster.cluster.local:8181/v1","cnPersistenceHybridMapping":"{}","cnRedisSentinelGroup":"","cnRedisSslTruststore":"","cnRedisType":"STANDALONE","cnRedisUrl":"redis.redis.svc.cluster.local:6379","cnRedisUseSsl":false,"cnScimProtectionMode":"OAUTH","cnSecretKubernetesSecret":"cn","cnSqlDbDialect":"mysql","cnSqlDbHost":"my-release-mysql.default.svc.cluster.local","cnSqlDbName":"jans","cnSqlDbPort":3306,"cnSqlDbSchema":"","cnSqlDbTimezone":"UTC","cnSqlDbUser":"jans","cnSqldbUserPassword":"Test1234#","cnVaultAddr":"http://localhost:8200","cnVaultAppRolePath":"approle","cnVaultKvPath":"secret","cnVaultNamespace":"","cnVaultPrefix":"jans","cnVaultRoleId":"","cnVaultRoleIdFile":"/etc/certs/vault_role_id","cnVaultSecretId":"","cnVaultSecretIdFile":"/etc/certs/vault_secret_id","cnVaultVerify":false,"kcDbPassword":"Test1234#","kcDbSchema":"keycloak","kcDbUrlDatabase":"keycloak","kcDbUrlHost":"mysql.kc.svc.cluster.local","kcDbUrlPort":3306,"kcDbUrlProperties":"?useUnicode=true&characterEncoding=UTF-8&character_set_server=utf8mb4","kcDbUsername":"keycloak","kcDbVendor":"mysql","kcLogLevel":"INFO","lbAddr":"","quarkusTransactionEnableRecovery":true},"countryCode":"US","customScripts":[],"dnsConfig":{},"dnsPolicy":"","email":"support@jans.io","image":{"pullSecrets":[],"repository":"ghcr.io/janssenproject/jans/configurator","tag":"1.1.6_dev"},"lifecycle":{},"orgName":"Janssen","redisPassword":"P@assw0rd","resources":{"limits":{"cpu":"300m","memory":"300Mi"},"requests":{"cpu":"300m","memory":"300Mi"}},"salt":"","state":"TX","usrEnvs":{"normal":{},"secret":{}},"volumeMounts":[],"volumes":[]}` | Configuration parameters for setup and initial configuration secret and config layers used by Janssen services. | | config-api | object | `{"additionalAnnotations":{},"additionalLabels":{},"customScripts":[],"dnsConfig":{},"dnsPolicy":"","hpa":{"behavior":{},"enabled":true,"maxReplicas":10,"metrics":[],"minReplicas":1,"targetCPUUtilizationPercentage":50},"image":{"pullPolicy":"IfNotPresent","pullSecrets":[],"repository":"ghcr.io/janssenproject/jans/config-api","tag":"1.1.6_dev"},"lifecycle":{},"livenessProbe":{"httpGet":{"path":"/jans-config-api/api/v1/health/live","port":8074},"initialDelaySeconds":30,"periodSeconds":30,"timeoutSeconds":5},"pdb":{"enabled":true,"maxUnavailable":"90%"},"readinessProbe":{"httpGet":{"path":"jans-config-api/api/v1/health/ready","port":8074},"initialDelaySeconds":25,"periodSeconds":25,"timeoutSeconds":5},"replicas":1,"resources":{"limits":{"cpu":"1000m","memory":"1200Mi"},"requests":{"cpu":"1000m","memory":"1200Mi"}},"topologySpreadConstraints":{},"usrEnvs":{"normal":{},"secret":{}},"volumeMounts":[],"volumes":[]}` | Config Api endpoints can be used to configure the auth-server, which is an open-source OpenID Connect Provider (OP) and UMA Authorization Server (AS). | | config-api.additionalAnnotations | object | `{}` | Additional annotations that will be added across the gateway in the format of {cert-manager.io/issuer: "letsencrypt-prod"} | | config-api.additionalLabels | object | `{}` | Additional labels that will be added across the gateway in the format of {mylabel: "myapp"} | @@ -175,13 +174,10 @@ Kubernetes: `>=v1.22.0-0` | config.configmap.cnGoogleSpannerDatabaseId | string | `""` | Google Spanner Database ID. Used only when global.cnPersistenceType is spanner. | | config.configmap.cnGoogleSpannerInstanceId | string | `""` | Google Spanner ID. Used only when global.cnPersistenceType is spanner. | | config.configmap.cnJettyRequestHeaderSize | int | `8192` | Jetty header size in bytes in the auth server | -| config.configmap.cnLdapCrt | string | `"SWFtTm90YVNlcnZpY2VBY2NvdW50Q2hhbmdlTWV0b09uZQo="` | OpenDJ certificate string. This must be encoded using base64. | -| config.configmap.cnLdapKey | string | `"SWFtTm90YVNlcnZpY2VBY2NvdW50Q2hhbmdlTWV0b09uZQo="` | OpenDJ key string. This must be encoded using base64. | -| config.configmap.cnLdapUrl | string | `"opendj:1636"` | OpenDJ internal address. Leave as default. Used when `global.cnPersistenceType` is set to `ldap`. | | config.configmap.cnMaxRamPercent | string | `"75.0"` | Value passed to Java option -XX:MaxRAMPercentage | | config.configmap.cnMessageType | string | `"DISABLED"` | Message type (one of POSTGRES, REDIS, or DISABLED) | | config.configmap.cnOpaUrl | string | `"http://opa.opa.svc.cluster.cluster.local:8181/v1"` | URL of OPA API | -| config.configmap.cnPersistenceHybridMapping | string | `"{}"` | Specify data that should be saved in LDAP (one of default, user, cache, site, token, or session; default to default). Note this environment only takes effect when `global.cnPersistenceType` is set to `hybrid`. { "default": "", "user": "", "site": "", "cache": "", "token": "", "session": "", } | +| config.configmap.cnPersistenceHybridMapping | string | `"{}"` | Specify data that should be saved in persistence (one of default, user, cache, site, token, or session; default to default). Note this environment only takes effect when `global.cnPersistenceType` is set to `hybrid`. { "default": "", "user": "", "site": "", "cache": "", "token": "", "session": "", } | | config.configmap.cnRedisSentinelGroup | string | `""` | Redis Sentinel Group. Often set when `config.configmap.cnRedisType` is set to `SENTINEL`. Can be used when `config.configmap.cnCacheType` is set to `REDIS`. | | config.configmap.cnRedisSslTruststore | string | `""` | Redis SSL truststore. Optional. Can be used when `config.configmap.cnCacheType` is set to `REDIS`. | | config.configmap.cnRedisType | string | `"STANDALONE"` | Redis service type. `STANDALONE` or `CLUSTER`. Can be used when `config.configmap.cnCacheType` is set to `REDIS`. | @@ -226,8 +222,6 @@ Kubernetes: `>=v1.22.0-0` | config.image.pullSecrets | list | `[]` | Image Pull Secrets | | config.image.repository | string | `"ghcr.io/janssenproject/jans/configurator"` | Image to use for deploying. | | config.image.tag | string | `"1.1.6_dev"` | Image tag to use for deploying. | -| config.ldapPassword | string | `"P@ssw0rds"` | LDAP admin password if OpenDJ is used for persistence. | -| config.ldapTruststorePassword | string | `"changeit"` | LDAP truststore password if OpenDJ is used for persistence | | config.orgName | string | `"Janssen"` | Organization name. Used for certificate creation. | | config.redisPassword | string | `"P@assw0rd"` | Redis admin password if `config.configmap.cnCacheType` is set to `REDIS`. | | config.resources | object | `{"limits":{"cpu":"300m","memory":"300Mi"},"requests":{"cpu":"300m","memory":"300Mi"}}` | Resource specs. | @@ -273,11 +267,11 @@ Kubernetes: `>=v1.22.0-0` | fido2.usrEnvs.secret | object | `{}` | Add custom secret envs to the service variable1: value1 | | fido2.volumeMounts | list | `[]` | Configure any additional volumesMounts that need to be attached to the containers | | fido2.volumes | list | `[]` | Configure any additional volumes that need to be attached to the pod | -| global | object | `{"alb":{"ingress":false},"auth-server":{"appLoggers":{"auditStatsLogLevel":"INFO","auditStatsLogTarget":"FILE","authLogLevel":"INFO","authLogTarget":"STDOUT","enableStdoutLogPrefix":"true","httpLogLevel":"INFO","httpLogTarget":"FILE","ldapStatsLogLevel":"INFO","ldapStatsLogTarget":"FILE","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scriptLogLevel":"INFO","scriptLogTarget":"FILE"},"authEncKeys":"RSA1_5 RSA-OAEP","authServerServiceName":"auth-server","authSigKeys":"RS256 RS384 RS512 ES256 ES384 ES512 PS256 PS384 PS512","cnCustomJavaOptions":"","customAnnotations":{"deployment":{},"destinationRule":{},"horizontalPodAutoscaler":{},"podDisruptionBudget":{},"secret":{},"service":{},"virtualService":{}},"enabled":true,"ingress":{"authServerAdditionalAnnotations":{},"authServerEnabled":true,"authServerLabels":{},"deviceCodeAdditionalAnnotations":{},"deviceCodeEnabled":true,"deviceCodeLabels":{},"firebaseMessagingAdditionalAnnotations":{},"firebaseMessagingEnabled":true,"firebaseMessagingLabels":{},"lockAdditionalAnnotations":{},"lockConfigAdditionalAnnotations":{},"lockConfigEnabled":false,"lockConfigLabels":{},"lockEnabled":false,"lockLabels":{},"openidAdditionalAnnotations":{},"openidConfigEnabled":true,"openidConfigLabels":{},"u2fAdditionalAnnotations":{},"u2fConfigEnabled":true,"u2fConfigLabels":{},"uma2AdditionalAnnotations":{},"uma2ConfigEnabled":true,"uma2ConfigLabels":{},"webdiscoveryAdditionalAnnotations":{},"webdiscoveryEnabled":true,"webdiscoveryLabels":{},"webfingerAdditionalAnnotations":{},"webfingerEnabled":true,"webfingerLabels":{}},"lockEnabled":false},"auth-server-key-rotation":{"customAnnotations":{"cronjob":{},"secret":{},"service":{}},"enabled":true,"initKeysLife":48},"awsStorageType":"io1","azureStorageAccountType":"Standard_LRS","azureStorageKind":"Managed","casa":{"appLoggers":{"casaLogLevel":"INFO","casaLogTarget":"STDOUT","enableStdoutLogPrefix":"true","timerLogLevel":"INFO","timerLogTarget":"FILE"},"casaServiceName":"casa","cnCustomJavaOptions":"","customAnnotations":{"deployment":{},"destinationRule":{},"horizontalPodAutoscaler":{},"podDisruptionBudget":{},"secret":{},"service":{},"virtualService":{}},"enabled":true,"ingress":{"casaAdditionalAnnotations":{},"casaEnabled":false,"casaLabels":{}}},"cloud":{"testEnviroment":false},"cnAwsConfigFile":"/etc/jans/conf/aws_config_file","cnAwsSecretsReplicaRegionsFile":"/etc/jans/conf/aws_secrets_replica_regions","cnAwsSharedCredentialsFile":"/etc/jans/conf/aws_shared_credential_file","cnConfiguratorConfigurationFile":"/etc/jans/conf/configuration.json","cnConfiguratorDumpFile":"/etc/jans/conf/configuration.out.json","cnCouchbasePasswordFile":"/etc/jans/conf/couchbase_password","cnCouchbaseSuperuserPasswordFile":"/etc/jans/conf/couchbase_superuser_password","cnDocumentStoreType":"DB","cnGoogleApplicationCredentials":"/etc/jans/conf/google-credentials.json","cnLdapCacertFile":"/etc/certs/opendj.pem","cnLdapCertFile":"/etc/certs/opendj.crt","cnLdapKeyFile":"/etc/certs/opendj.key","cnLdapPasswordFile":"/etc/jans/conf/ldap_password","cnLdapTruststoreFile":"/etc/certs/opendj.pkcs12","cnLdapTruststorePasswordFile":"/etc/jans/conf/ldap_truststore_password","cnPersistenceType":"sql","cnPrometheusPort":"","cnSqlPasswordFile":"/etc/jans/conf/sql_password","config":{"customAnnotations":{"clusterRoleBinding":{},"configMap":{},"job":{},"role":{},"roleBinding":{},"secret":{},"service":{},"serviceAccount":{}},"enabled":true},"config-api":{"appLoggers":{"configApiLogLevel":"INFO","configApiLogTarget":"STDOUT","enableStdoutLogPrefix":"true","ldapStatsLogLevel":"INFO","ldapStatsLogTarget":"FILE","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scriptLogLevel":"INFO","scriptLogTarget":"FILE"},"cnCustomJavaOptions":"","configApiServerServiceName":"config-api","customAnnotations":{"deployment":{},"destinationRule":{},"horizontalPodAutoscaler":{},"podDisruptionBudget":{},"service":{},"virtualService":{}},"enabled":true,"ingress":{"configApiAdditionalAnnotations":{},"configApiEnabled":true,"configApiLabels":{}},"plugins":"fido2,scim,user-mgt"},"configAdapterName":"kubernetes","configSecretAdapter":"kubernetes","fido2":{"appLoggers":{"enableStdoutLogPrefix":"true","fido2LogLevel":"INFO","fido2LogTarget":"STDOUT","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scriptLogLevel":"INFO","scriptLogTarget":"FILE"},"cnCustomJavaOptions":"","customAnnotations":{"deployment":{},"destinationRule":{},"horizontalPodAutoscaler":{},"podDisruptionBudget":{},"secret":{},"service":{},"virtualService":{}},"enabled":true,"fido2ServiceName":"fido2","ingress":{"fido2AdditionalAnnotations":{},"fido2ConfigAdditionalAnnotations":{},"fido2ConfigEnabled":false,"fido2ConfigLabels":{},"fido2Enabled":false,"fido2Labels":{}}},"fqdn":"demoexample.jans.io","gcePdStorageType":"pd-standard","isFqdnRegistered":false,"istio":{"additionalAnnotations":{},"additionalLabels":{},"enabled":false,"gateways":[],"ingress":false,"namespace":"istio-system"},"jobTtlSecondsAfterFinished":300,"kc-scheduler":{"enabled":false},"kcAdminCredentialsFile":"/etc/jans/conf/kc_admin_creds","kcDbPasswordFile":"/etc/jans/conf/kc_db_password","lbIp":"22.22.22.22","link":{"appLoggers":{"enableStdoutLogPrefix":"true","ldapStatsLogLevel":"INFO","ldapStatsLogTarget":"FILE","linkLogLevel":"INFO","linkLogTarget":"STDOUT","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scriptLogLevel":"INFO","scriptLogTarget":"FILE"},"cnCustomJavaOptions":"","customAnnotations":{"deployment":{},"destinationRule":{},"horizontalPodAutoscaler":{},"podDisruptionBudget":{},"service":{},"virtualService":{}},"enabled":false,"ingress":{"linkEnabled":true},"linkServiceName":"link"},"nginx-ingress":{"enabled":true},"opendj":{"customAnnotations":{"cronjob":{},"destinationRule":{},"horizontalPodAutoscaler":{},"podDisruptionBudget":{},"secret":{},"service":{},"statefulset":{},"storageClass":{}},"enabled":false,"ldapServiceName":"opendj"},"persistence":{"customAnnotations":{"job":{},"secret":{},"service":{}},"enabled":true},"saml":{"cnCustomJavaOptions":"","customAnnotations":{"deployment":{},"destinationRule":{},"horizontalPodAutoscaler":{},"podDisruptionBudget":{},"secret":{},"service":{},"virtualService":{}},"enabled":false,"ingress":{"samlAdditionalAnnotations":{},"samlEnabled":false,"samlLabels":{}},"samlServiceName":"saml"},"scim":{"appLoggers":{"enableStdoutLogPrefix":"true","ldapStatsLogLevel":"INFO","ldapStatsLogTarget":"FILE","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scimLogLevel":"INFO","scimLogTarget":"STDOUT","scriptLogLevel":"INFO","scriptLogTarget":"FILE"},"cnCustomJavaOptions":"","customAnnotations":{"deployment":{},"destinationRule":{},"horizontalPodAutoscaler":{},"podDisruptionBudget":{},"secret":{},"service":{},"virtualService":{}},"enabled":true,"ingress":{"scimAdditionalAnnotations":{},"scimConfigAdditionalAnnotations":{},"scimConfigEnabled":false,"scimConfigLabels":{},"scimEnabled":false,"scimLabels":{}},"scimServiceName":"scim"},"serviceAccountName":"default","storageClass":{"allowVolumeExpansion":true,"allowedTopologies":[],"mountOptions":["debug"],"parameters":{},"provisioner":"microk8s.io/hostpath","reclaimPolicy":"Retain","volumeBindingMode":"WaitForFirstConsumer"},"usrEnvs":{"normal":{},"secret":{}}}` | Parameters used globally across all services helm charts. | +| global | object | `{"alb":{"ingress":false},"auth-server":{"appLoggers":{"auditStatsLogLevel":"INFO","auditStatsLogTarget":"FILE","authLogLevel":"INFO","authLogTarget":"STDOUT","enableStdoutLogPrefix":"true","httpLogLevel":"INFO","httpLogTarget":"FILE","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scriptLogLevel":"INFO","scriptLogTarget":"FILE"},"authEncKeys":"RSA1_5 RSA-OAEP","authServerServiceName":"auth-server","authSigKeys":"RS256 RS384 RS512 ES256 ES384 ES512 PS256 PS384 PS512","cnCustomJavaOptions":"","customAnnotations":{"deployment":{},"destinationRule":{},"horizontalPodAutoscaler":{},"podDisruptionBudget":{},"secret":{},"service":{},"virtualService":{}},"enabled":true,"ingress":{"authServerAdditionalAnnotations":{},"authServerEnabled":true,"authServerLabels":{},"deviceCodeAdditionalAnnotations":{},"deviceCodeEnabled":true,"deviceCodeLabels":{},"firebaseMessagingAdditionalAnnotations":{},"firebaseMessagingEnabled":true,"firebaseMessagingLabels":{},"lockAdditionalAnnotations":{},"lockConfigAdditionalAnnotations":{},"lockConfigEnabled":false,"lockConfigLabels":{},"lockEnabled":false,"lockLabels":{},"openidAdditionalAnnotations":{},"openidConfigEnabled":true,"openidConfigLabels":{},"u2fAdditionalAnnotations":{},"u2fConfigEnabled":true,"u2fConfigLabels":{},"uma2AdditionalAnnotations":{},"uma2ConfigEnabled":true,"uma2ConfigLabels":{},"webdiscoveryAdditionalAnnotations":{},"webdiscoveryEnabled":true,"webdiscoveryLabels":{},"webfingerAdditionalAnnotations":{},"webfingerEnabled":true,"webfingerLabels":{}},"lockEnabled":false},"auth-server-key-rotation":{"customAnnotations":{"cronjob":{},"secret":{},"service":{}},"enabled":true,"initKeysLife":48},"awsStorageType":"io1","azureStorageAccountType":"Standard_LRS","azureStorageKind":"Managed","casa":{"appLoggers":{"casaLogLevel":"INFO","casaLogTarget":"STDOUT","enableStdoutLogPrefix":"true","timerLogLevel":"INFO","timerLogTarget":"FILE"},"casaServiceName":"casa","cnCustomJavaOptions":"","customAnnotations":{"deployment":{},"destinationRule":{},"horizontalPodAutoscaler":{},"podDisruptionBudget":{},"secret":{},"service":{},"virtualService":{}},"enabled":true,"ingress":{"casaAdditionalAnnotations":{},"casaEnabled":false,"casaLabels":{}}},"cloud":{"testEnviroment":false},"cnAwsConfigFile":"/etc/jans/conf/aws_config_file","cnAwsSecretsReplicaRegionsFile":"/etc/jans/conf/aws_secrets_replica_regions","cnAwsSharedCredentialsFile":"/etc/jans/conf/aws_shared_credential_file","cnConfiguratorConfigurationFile":"/etc/jans/conf/configuration.json","cnConfiguratorDumpFile":"/etc/jans/conf/configuration.out.json","cnCouchbasePasswordFile":"/etc/jans/conf/couchbase_password","cnCouchbaseSuperuserPasswordFile":"/etc/jans/conf/couchbase_superuser_password","cnDocumentStoreType":"DB","cnGoogleApplicationCredentials":"/etc/jans/conf/google-credentials.json","cnPersistenceType":"sql","cnPrometheusPort":"","cnSqlPasswordFile":"/etc/jans/conf/sql_password","config":{"customAnnotations":{"clusterRoleBinding":{},"configMap":{},"job":{},"role":{},"roleBinding":{},"secret":{},"service":{},"serviceAccount":{}},"enabled":true},"config-api":{"appLoggers":{"configApiLogLevel":"INFO","configApiLogTarget":"STDOUT","enableStdoutLogPrefix":"true","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scriptLogLevel":"INFO","scriptLogTarget":"FILE"},"cnCustomJavaOptions":"","configApiServerServiceName":"config-api","customAnnotations":{"deployment":{},"destinationRule":{},"horizontalPodAutoscaler":{},"podDisruptionBudget":{},"service":{},"virtualService":{}},"enabled":true,"ingress":{"configApiAdditionalAnnotations":{},"configApiEnabled":true,"configApiLabels":{}},"plugins":"fido2,scim,user-mgt"},"configAdapterName":"kubernetes","configSecretAdapter":"kubernetes","fido2":{"appLoggers":{"enableStdoutLogPrefix":"true","fido2LogLevel":"INFO","fido2LogTarget":"STDOUT","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scriptLogLevel":"INFO","scriptLogTarget":"FILE"},"cnCustomJavaOptions":"","customAnnotations":{"deployment":{},"destinationRule":{},"horizontalPodAutoscaler":{},"podDisruptionBudget":{},"secret":{},"service":{},"virtualService":{}},"enabled":true,"fido2ServiceName":"fido2","ingress":{"fido2AdditionalAnnotations":{},"fido2ConfigAdditionalAnnotations":{},"fido2ConfigEnabled":false,"fido2ConfigLabels":{},"fido2Enabled":false,"fido2Labels":{}}},"fqdn":"demoexample.jans.io","gcePdStorageType":"pd-standard","isFqdnRegistered":false,"istio":{"additionalAnnotations":{},"additionalLabels":{},"enabled":false,"gateways":[],"ingress":false,"namespace":"istio-system"},"jobTtlSecondsAfterFinished":300,"kc-scheduler":{"enabled":false},"kcAdminCredentialsFile":"/etc/jans/conf/kc_admin_creds","kcDbPasswordFile":"/etc/jans/conf/kc_db_password","lbIp":"22.22.22.22","link":{"appLoggers":{"enableStdoutLogPrefix":"true","linkLogLevel":"INFO","linkLogTarget":"STDOUT","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scriptLogLevel":"INFO","scriptLogTarget":"FILE"},"cnCustomJavaOptions":"","customAnnotations":{"deployment":{},"destinationRule":{},"horizontalPodAutoscaler":{},"podDisruptionBudget":{},"service":{},"virtualService":{}},"enabled":false,"ingress":{"linkEnabled":true},"linkServiceName":"link"},"nginx-ingress":{"enabled":true},"persistence":{"customAnnotations":{"job":{},"secret":{},"service":{}},"enabled":true},"saml":{"cnCustomJavaOptions":"","customAnnotations":{"deployment":{},"destinationRule":{},"horizontalPodAutoscaler":{},"podDisruptionBudget":{},"secret":{},"service":{},"virtualService":{}},"enabled":false,"ingress":{"samlAdditionalAnnotations":{},"samlEnabled":false,"samlLabels":{}},"samlServiceName":"saml"},"scim":{"appLoggers":{"enableStdoutLogPrefix":"true","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scimLogLevel":"INFO","scimLogTarget":"STDOUT","scriptLogLevel":"INFO","scriptLogTarget":"FILE"},"cnCustomJavaOptions":"","customAnnotations":{"deployment":{},"destinationRule":{},"horizontalPodAutoscaler":{},"podDisruptionBudget":{},"secret":{},"service":{},"virtualService":{}},"enabled":true,"ingress":{"scimAdditionalAnnotations":{},"scimConfigAdditionalAnnotations":{},"scimConfigEnabled":false,"scimConfigLabels":{},"scimEnabled":false,"scimLabels":{}},"scimServiceName":"scim"},"serviceAccountName":"default","usrEnvs":{"normal":{},"secret":{}}}` | Parameters used globally across all services helm charts. | | global.alb.ingress | bool | `false` | Activates ALB ingress | | global.auth-server-key-rotation.enabled | bool | `true` | Boolean flag to enable/disable the auth-server-key rotation cronjob chart. | | global.auth-server-key-rotation.initKeysLife | int | `48` | The initial auth server key rotation keys life in hours | -| global.auth-server.appLoggers | object | `{"auditStatsLogLevel":"INFO","auditStatsLogTarget":"FILE","authLogLevel":"INFO","authLogTarget":"STDOUT","enableStdoutLogPrefix":"true","httpLogLevel":"INFO","httpLogTarget":"FILE","ldapStatsLogLevel":"INFO","ldapStatsLogTarget":"FILE","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scriptLogLevel":"INFO","scriptLogTarget":"FILE"}` | App loggers can be configured to define where the logs will be redirected to and the level of each in which it should be displayed. | +| global.auth-server.appLoggers | object | `{"auditStatsLogLevel":"INFO","auditStatsLogTarget":"FILE","authLogLevel":"INFO","authLogTarget":"STDOUT","enableStdoutLogPrefix":"true","httpLogLevel":"INFO","httpLogTarget":"FILE","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scriptLogLevel":"INFO","scriptLogTarget":"FILE"}` | App loggers can be configured to define where the logs will be redirected to and the level of each in which it should be displayed. | | global.auth-server.appLoggers.auditStatsLogLevel | string | `"INFO"` | jans-auth_audit.log level | | global.auth-server.appLoggers.auditStatsLogTarget | string | `"FILE"` | jans-auth_script.log target | | global.auth-server.appLoggers.authLogLevel | string | `"INFO"` | jans-auth.log level | @@ -285,8 +279,6 @@ Kubernetes: `>=v1.22.0-0` | global.auth-server.appLoggers.enableStdoutLogPrefix | string | `"true"` | Enable log prefixing which enables prepending the STDOUT logs with the file name. i.e auth-server-script ===> 2022-12-20 17:49:55,744 INFO | | global.auth-server.appLoggers.httpLogLevel | string | `"INFO"` | http_request_response.log level | | global.auth-server.appLoggers.httpLogTarget | string | `"FILE"` | http_request_response.log target | -| global.auth-server.appLoggers.ldapStatsLogLevel | string | `"INFO"` | jans-auth_persistence_ldap_statistics.log level | -| global.auth-server.appLoggers.ldapStatsLogTarget | string | `"FILE"` | jans-auth_persistence_ldap_statistics.log target | | global.auth-server.appLoggers.persistenceDurationLogLevel | string | `"INFO"` | jans-auth_persistence_duration.log level | | global.auth-server.appLoggers.persistenceDurationLogTarget | string | `"FILE"` | jans-auth_persistence_duration.log target | | global.auth-server.appLoggers.persistenceLogLevel | string | `"INFO"` | jans-auth_persistence.log level | @@ -353,21 +345,13 @@ Kubernetes: `>=v1.22.0-0` | global.cnCouchbaseSuperuserPasswordFile | string | `"/etc/jans/conf/couchbase_superuser_password"` | Path to Couchbase superuser password file | | global.cnDocumentStoreType | string | `"DB"` | Document store type to use for shibboleth files DB. | | global.cnGoogleApplicationCredentials | string | `"/etc/jans/conf/google-credentials.json"` | Base64 encoded service account. The sa must have roles/secretmanager.admin to use Google secrets and roles/spanner.databaseUser to use Spanner. Leave as this is a sensible default. | -| global.cnLdapCacertFile | string | `"/etc/certs/opendj.pem"` | Path to OpenDJ CA cert file | -| global.cnLdapCertFile | string | `"/etc/certs/opendj.crt"` | Path to OpenDJ cert file | -| global.cnLdapKeyFile | string | `"/etc/certs/opendj.key"` | Path to OpenDJ key file | -| global.cnLdapPasswordFile | string | `"/etc/jans/conf/ldap_password"` | Path to LDAP password file | -| global.cnLdapTruststoreFile | string | `"/etc/certs/opendj.pkcs12"` | Path to OpenDJ truststore file | -| global.cnLdapTruststorePasswordFile | string | `"/etc/jans/conf/ldap_truststore_password"` | Path to LDAP truststore password file | -| global.cnPersistenceType | string | `"sql"` | Persistence backend to run Janssen with ldap|couchbase|hybrid|sql|spanner. | +| global.cnPersistenceType | string | `"sql"` | Persistence backend to run Janssen with couchbase|hybrid|sql|spanner. | | global.cnPrometheusPort | string | `""` | Port used by Prometheus JMX agent (default to empty string). To enable Prometheus JMX agent, set the value to a number. | | global.cnSqlPasswordFile | string | `"/etc/jans/conf/sql_password"` | Path to SQL password file | -| global.config-api.appLoggers | object | `{"configApiLogLevel":"INFO","configApiLogTarget":"STDOUT","enableStdoutLogPrefix":"true","ldapStatsLogLevel":"INFO","ldapStatsLogTarget":"FILE","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scriptLogLevel":"INFO","scriptLogTarget":"FILE"}` | App loggers can be configured to define where the logs will be redirected to and the level of each in which it should be displayed. | +| global.config-api.appLoggers | object | `{"configApiLogLevel":"INFO","configApiLogTarget":"STDOUT","enableStdoutLogPrefix":"true","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scriptLogLevel":"INFO","scriptLogTarget":"FILE"}` | App loggers can be configured to define where the logs will be redirected to and the level of each in which it should be displayed. | | global.config-api.appLoggers.configApiLogLevel | string | `"INFO"` | configapi.log level | | global.config-api.appLoggers.configApiLogTarget | string | `"STDOUT"` | configapi.log target | | global.config-api.appLoggers.enableStdoutLogPrefix | string | `"true"` | Enable log prefixing which enables prepending the STDOUT logs with the file name. i.e config-api_persistence ===> 2022-12-20 17:49:55,744 INFO | -| global.config-api.appLoggers.ldapStatsLogLevel | string | `"INFO"` | config-api_persistence_ldap_statistics.log level | -| global.config-api.appLoggers.ldapStatsLogTarget | string | `"FILE"` | config-api_persistence_ldap_statistics.log target | | global.config-api.appLoggers.persistenceDurationLogLevel | string | `"INFO"` | config-api_persistence_duration.log level | | global.config-api.appLoggers.persistenceDurationLogTarget | string | `"FILE"` | config-api_persistence_duration.log target | | global.config-api.appLoggers.persistenceLogLevel | string | `"INFO"` | config-api_persistence.log level | @@ -418,10 +402,8 @@ Kubernetes: `>=v1.22.0-0` | global.kcAdminCredentialsFile | string | `"/etc/jans/conf/kc_admin_creds"` | Path to file contains Keycloak admin credentials (username and password) | | global.kcDbPasswordFile | string | `"/etc/jans/conf/kc_db_password"` | Path to file contains password for database access | | global.lbIp | string | `"22.22.22.22"` | The Loadbalancer IP created by nginx or istio on clouds that provide static IPs. This is not needed if `global.fqdn` is globally resolvable. | -| global.link.appLoggers | object | `{"enableStdoutLogPrefix":"true","ldapStatsLogLevel":"INFO","ldapStatsLogTarget":"FILE","linkLogLevel":"INFO","linkLogTarget":"STDOUT","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scriptLogLevel":"INFO","scriptLogTarget":"FILE"}` | App loggers can be configured to define where the logs will be redirected to and the level of each in which it should be displayed. | +| global.link.appLoggers | object | `{"enableStdoutLogPrefix":"true","linkLogLevel":"INFO","linkLogTarget":"STDOUT","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scriptLogLevel":"INFO","scriptLogTarget":"FILE"}` | App loggers can be configured to define where the logs will be redirected to and the level of each in which it should be displayed. | | global.link.appLoggers.enableStdoutLogPrefix | string | `"true"` | Enable log prefixing which enables prepending the STDOUT logs with the file name. i.e link-persistence ===> 2022-12-20 17:49:55,744 INFO | -| global.link.appLoggers.ldapStatsLogLevel | string | `"INFO"` | cacherefresh_persistence_ldap_statistics.log level | -| global.link.appLoggers.ldapStatsLogTarget | string | `"FILE"` | cacherefresh_persistence_ldap_statistics.log target | | global.link.appLoggers.linkLogLevel | string | `"INFO"` | cacherefresh.log level | | global.link.appLoggers.linkLogTarget | string | `"STDOUT"` | cacherefresh.log target | | global.link.appLoggers.persistenceDurationLogLevel | string | `"INFO"` | cacherefresh_persistence_duration.log level | @@ -435,8 +417,6 @@ Kubernetes: `>=v1.22.0-0` | global.link.ingress | object | `{"linkEnabled":true}` | Enable endpoints in either istio or nginx ingress depending on users choice | | global.link.linkServiceName | string | `"link"` | Name of the link service. Please keep it as default. | | global.nginx-ingress.enabled | bool | `true` | Boolean flag to enable/disable the nginx-ingress definitions chart. | -| global.opendj.enabled | bool | `false` | Boolean flag to enable/disable the OpenDJ chart. | -| global.opendj.ldapServiceName | string | `"opendj"` | Name of the OpenDJ service. Please keep it as default. | | global.persistence.enabled | bool | `true` | Boolean flag to enable/disable the persistence chart. | | global.saml.cnCustomJavaOptions | string | `""` | passing custom java options to saml. DO NOT PASS JAVA_OPTIONS in envs. | | global.saml.enabled | bool | `false` | Boolean flag to enable/disable the saml chart. | @@ -444,10 +424,8 @@ Kubernetes: `>=v1.22.0-0` | global.saml.ingress.samlAdditionalAnnotations | object | `{}` | SAML ingress resource additional annotations. | | global.saml.ingress.samlLabels | object | `{}` | SAML config ingress resource labels. key app is taken | | global.saml.samlServiceName | string | `"saml"` | Name of the saml service. Please keep it as default. | -| global.scim.appLoggers | object | `{"enableStdoutLogPrefix":"true","ldapStatsLogLevel":"INFO","ldapStatsLogTarget":"FILE","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scimLogLevel":"INFO","scimLogTarget":"STDOUT","scriptLogLevel":"INFO","scriptLogTarget":"FILE"}` | App loggers can be configured to define where the logs will be redirected to and the level of each in which it should be displayed. | +| global.scim.appLoggers | object | `{"enableStdoutLogPrefix":"true","persistenceDurationLogLevel":"INFO","persistenceDurationLogTarget":"FILE","persistenceLogLevel":"INFO","persistenceLogTarget":"FILE","scimLogLevel":"INFO","scimLogTarget":"STDOUT","scriptLogLevel":"INFO","scriptLogTarget":"FILE"}` | App loggers can be configured to define where the logs will be redirected to and the level of each in which it should be displayed. | | global.scim.appLoggers.enableStdoutLogPrefix | string | `"true"` | Enable log prefixing which enables prepending the STDOUT logs with the file name. i.e jans-scim ===> 2022-12-20 17:49:55,744 INFO | -| global.scim.appLoggers.ldapStatsLogLevel | string | `"INFO"` | jans-scim_persistence_ldap_statistics.log level | -| global.scim.appLoggers.ldapStatsLogTarget | string | `"FILE"` | jans-scim_persistence_ldap_statistics.log target | | global.scim.appLoggers.persistenceDurationLogLevel | string | `"INFO"` | jans-scim_persistence_duration.log level | | global.scim.appLoggers.persistenceDurationLogTarget | string | `"FILE"` | jans-scim_persistence_duration.log target | | global.scim.appLoggers.persistenceLogLevel | string | `"INFO"` | jans-scim_persistence.log level | @@ -467,8 +445,6 @@ Kubernetes: `>=v1.22.0-0` | global.scim.ingress.scimLabels | object | `{}` | SCIM config ingress resource labels. key app is taken | | global.scim.scimServiceName | string | `"scim"` | Name of the scim service. Please keep it as default. | | global.serviceAccountName | string | `"default"` | service account used by Kubernetes resources | -| global.storageClass | object | `{"allowVolumeExpansion":true,"allowedTopologies":[],"mountOptions":["debug"],"parameters":{},"provisioner":"microk8s.io/hostpath","reclaimPolicy":"Retain","volumeBindingMode":"WaitForFirstConsumer"}` | StorageClass section for OpenDJ charts. This is not currently used by the openbanking distribution. You may specify custom parameters as needed. | -| global.storageClass.parameters | object | `{}` | parameters: fsType: "" kind: "" pool: "" storageAccountType: "" type: "" | | global.usrEnvs | object | `{"normal":{},"secret":{}}` | Add custom normal and secret envs to the service. Envs defined in global.userEnvs will be globally available to all services | | global.usrEnvs.normal | object | `{}` | Add custom normal envs to the service. variable1: value1 | | global.usrEnvs.secret | object | `{}` | Add custom secret envs to the service. variable1: value1 | @@ -526,37 +502,6 @@ Kubernetes: `>=v1.22.0-0` | nginx-ingress.ingress.additionalAnnotations | object | `{}` | Additional annotations that will be added across all ingress definitions in the format of {cert-manager.io/issuer: "letsencrypt-prod"} Enable client certificate authentication nginx.ingress.kubernetes.io/auth-tls-verify-client: "optional" Create the secret containing the trusted ca certificates nginx.ingress.kubernetes.io/auth-tls-secret: "janssen/tls-certificate" Specify the verification depth in the client certificates chain nginx.ingress.kubernetes.io/auth-tls-verify-depth: "1" Specify if certificates are passed to upstream server nginx.ingress.kubernetes.io/auth-tls-pass-certificate-to-upstream: "true" | | nginx-ingress.ingress.additionalLabels | object | `{}` | Additional labels that will be added across all ingress definitions in the format of {mylabel: "myapp"} | | nginx-ingress.ingress.tls | list | `[{"hosts":["demoexample.jans.io"],"secretName":"tls-certificate"}]` | Secrets holding HTTPS CA cert and key. | -| opendj | object | `{"additionalAnnotations":{},"additionalLabels":{},"backup":{"cronJobSchedule":"*/59 * * * *","enabled":true},"customScripts":[],"dnsConfig":{},"dnsPolicy":"","hpa":{"behavior":{},"enabled":true,"maxReplicas":10,"metrics":[],"minReplicas":1,"targetCPUUtilizationPercentage":50},"image":{"pullPolicy":"IfNotPresent","pullSecrets":[],"repository":"gluufederation/opendj","tag":"5.0.0_dev"},"lifecycle":{"preStop":{"exec":{"command":["/bin/sh","-c","python3 /app/scripts/deregister_peer.py 1>&/proc/1/fd/1"]}}},"livenessProbe":{"exec":{"command":["python3","/app/scripts/healthcheck.py"]},"failureThreshold":20,"initialDelaySeconds":30,"periodSeconds":30,"timeoutSeconds":5},"pdb":{"enabled":true,"maxUnavailable":1},"persistence":{"size":"5Gi"},"ports":{"tcp-admin":{"nodePort":"","port":4444,"protocol":"TCP","targetPort":4444},"tcp-ldap":{"nodePort":"","port":1389,"protocol":"TCP","targetPort":1389},"tcp-ldaps":{"nodePort":"","port":1636,"protocol":"TCP","targetPort":1636},"tcp-repl":{"nodePort":"","port":8989,"protocol":"TCP","targetPort":8989},"tcp-serf":{"nodePort":"","port":7946,"protocol":"TCP","targetPort":7946},"udp-serf":{"nodePort":"","port":7946,"protocol":"UDP","targetPort":7946}},"readinessProbe":{"failureThreshold":20,"initialDelaySeconds":60,"periodSeconds":25,"tcpSocket":{"port":1636},"timeoutSeconds":5},"replicas":1,"resources":{"limits":{"cpu":"1500m","memory":"2000Mi"},"requests":{"cpu":"1500m","memory":"2000Mi"}},"topologySpreadConstraints":{},"usrEnvs":{"normal":{},"secret":{}},"volumeMounts":[],"volumes":[]}` | OpenDJ is a directory server which implements a wide range of Lightweight Directory Access Protocol and related standards, including full compliance with LDAPv3 but also support for Directory Service Markup Language (DSMLv2).Written in Java, OpenDJ offers multi-master replication, access control, and many extensions. | -| opendj.additionalAnnotations | object | `{}` | Additional annotations that will be added across the gateway in the format of {cert-manager.io/issuer: "letsencrypt-prod"} | -| opendj.additionalLabels | object | `{}` | Additional labels that will be added across the gateway in the format of {mylabel: "myapp"} | -| opendj.backup | object | `{"cronJobSchedule":"*/59 * * * *","enabled":true}` | Configure ldap backup cronjob | -| opendj.customScripts | list | `[]` | Add custom scripts that have been mounted to run before the entrypoint. - /tmp/custom.sh - /tmp/custom2.sh | -| opendj.dnsConfig | object | `{}` | Add custom dns config | -| opendj.dnsPolicy | string | `""` | Add custom dns policy | -| opendj.hpa | object | `{"behavior":{},"enabled":true,"maxReplicas":10,"metrics":[],"minReplicas":1,"targetCPUUtilizationPercentage":50}` | Configure the HorizontalPodAutoscaler | -| opendj.hpa.behavior | object | `{}` | Scaling Policies | -| opendj.hpa.metrics | list | `[]` | metrics if targetCPUUtilizationPercentage is not set | -| opendj.image.pullPolicy | string | `"IfNotPresent"` | Image pullPolicy to use for deploying. | -| opendj.image.pullSecrets | list | `[]` | Image Pull Secrets | -| opendj.image.repository | string | `"gluufederation/opendj"` | Image to use for deploying. | -| opendj.image.tag | string | `"5.0.0_dev"` | Image tag to use for deploying. | -| opendj.livenessProbe | object | `{"exec":{"command":["python3","/app/scripts/healthcheck.py"]},"failureThreshold":20,"initialDelaySeconds":30,"periodSeconds":30,"timeoutSeconds":5}` | Configure the liveness healthcheck for OpenDJ if needed. https://github.com/GluuFederation/docker-opendj/blob/master/scripts/healthcheck.py | -| opendj.livenessProbe.exec | object | `{"command":["python3","/app/scripts/healthcheck.py"]}` | Executes the python3 healthcheck. | -| opendj.pdb | object | `{"enabled":true,"maxUnavailable":1}` | Configure the PodDisruptionBudget | -| opendj.persistence.size | string | `"5Gi"` | OpenDJ volume size | -| opendj.readinessProbe | object | `{"failureThreshold":20,"initialDelaySeconds":60,"periodSeconds":25,"tcpSocket":{"port":1636},"timeoutSeconds":5}` | Configure the readiness healthcheck for OpenDJ if needed. https://github.com/GluuFederation/docker-opendj/blob/master/scripts/healthcheck.py | -| opendj.replicas | int | `1` | Service replica number. | -| opendj.resources | object | `{"limits":{"cpu":"1500m","memory":"2000Mi"},"requests":{"cpu":"1500m","memory":"2000Mi"}}` | Resource specs. | -| opendj.resources.limits.cpu | string | `"1500m"` | CPU limit. | -| opendj.resources.limits.memory | string | `"2000Mi"` | Memory limit. | -| opendj.resources.requests.cpu | string | `"1500m"` | CPU request. | -| opendj.resources.requests.memory | string | `"2000Mi"` | Memory request. | -| opendj.topologySpreadConstraints | object | `{}` | Configure the topology spread constraints. Notice this is a map NOT a list as in the upstream API https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ | -| opendj.usrEnvs | object | `{"normal":{},"secret":{}}` | Add custom normal and secret envs to the service | -| opendj.usrEnvs.normal | object | `{}` | Add custom normal envs to the service variable1: value1 | -| opendj.usrEnvs.secret | object | `{}` | Add custom secret envs to the service variable1: value1 | -| opendj.volumeMounts | list | `[]` | Configure any additional volumesMounts that need to be attached to the containers | -| opendj.volumes | list | `[]` | Configure any additional volumes that need to be attached to the pod | | persistence | object | `{"additionalAnnotations":{},"additionalLabels":{},"customScripts":[],"dnsConfig":{},"dnsPolicy":"","image":{"pullPolicy":"IfNotPresent","pullSecrets":[],"repository":"ghcr.io/janssenproject/jans/persistence-loader","tag":"1.1.6_dev"},"lifecycle":{},"resources":{"limits":{"cpu":"300m","memory":"300Mi"},"requests":{"cpu":"300m","memory":"300Mi"}},"usrEnvs":{"normal":{},"secret":{}},"volumeMounts":[],"volumes":[]}` | Job to generate data and initial config for Janssen Server persistence layer. | | persistence.additionalAnnotations | object | `{}` | Additional annotations that will be added across the gateway in the format of {cert-manager.io/issuer: "letsencrypt-prod"} | | persistence.additionalLabels | object | `{}` | Additional labels that will be added across the gateway in the format of {mylabel: "myapp"} | diff --git a/docs/janssen-server/usermgmt/usermgmt-cli-tui.md b/docs/janssen-server/usermgmt/usermgmt-cli-tui.md index d511e1aeb08..befa7a82f3e 100644 --- a/docs/janssen-server/usermgmt/usermgmt-cli-tui.md +++ b/docs/janssen-server/usermgmt/usermgmt-cli-tui.md @@ -8,47 +8,9 @@ tags: ## Local User Management -In this document we will cover managing people in the Jans Server's LDAP Directory, Jans CLI and Jans TUI. +In this document we will cover managing people in the Jans Server's Jans CLI and Jans TUI. -## Manage data in Jans LDAP - -All the data generated by the Jans Server is stored in the local LDAP server included in every deployment. This includes OpenID Connect client data, session data, tokens, user data, and more. - -Use an LDAP browser like [JXplorer](http://jxplorer.org/) or [Apache Directory Studio](https://directory.apache.org/studio/) and can find the configuration in `/etc/jans/conf/jans-ldap.properties`, e.g.: - -For Jans OpenDJ, it will look like this: -``` -bindDN: cn=directory manager -bindPassword: rmQQI/sax0U= -servers: localhost:1636 - -``` - -Establish a tunnel from your computer to the target Jans Server's LDAP. Tunneling is required because Jans Server's LDAP port, 1636, is not exposed to the internet. - -In the below example we are showing how to connect and use Jans Server's internal LDAP server with any LDAP browser. - -* Sign in with `sudo su -` -* Create tunnel: -* `ssh -fNL 5902:localhost:1636 [username]@[ip_of_Jans_server]` -* Open LDAP browser -* Create new connection - -![ldap-connection](https://github.com/JanssenProject/jans/assets/43112579/901483e4-d903-4b5f-af45-0a0c9957c29b) - - -* Perform authentication. 'Password' is the the password of 'admin' user. - - -![ldap-creds](https://github.com/JanssenProject/jans/assets/43112579/c9751ddf-8a0f-4fad-9b49-12ebd425018d) - - -* Browse ldap and go to `ou=people`. - - - ![ldap_people](https://github.com/JanssenProject/jans/assets/43112579/8da57305-0227-4bdb-82f8-0044f8b05afe) - ## Manage data in Jans TUI We can easily add a user using Jans TUI. To do that, run the TUI using `/opt/jans/jans-cli/config-cli-tui.py` for example, and click on the `Users` tab. diff --git a/docs/janssen-server/vm-ops/backup.md b/docs/janssen-server/vm-ops/backup.md index f2a8d83d4bc..a4fa6904dc3 100644 --- a/docs/janssen-server/vm-ops/backup.md +++ b/docs/janssen-server/vm-ops/backup.md @@ -30,102 +30,3 @@ All Jans Server files live in a single folder: /opt. The entire Jans Server fold * Use tar to take a backup: `tar cvf jans-backup.tar /opt/jans/` * Start the server again: `systemctl start list-units --all "jans*"` - - -## LDIF Data Backup - -From time to time (daily or weekly), the LDAP database should be exported in a standard LDIF format. Having the data in plain text offers some options for recovery that are not possible with a binary backup. - -Instructions are provided below for exporting OpenDJ data. The below instructions address situations where unused and expired cache and session related entries are piling and causing issues with functionality. - - -### OpenDj - -If your Jans Server is backed by OpenDJ, follow these steps to backup your data: - -1. First check your cache entries by running the following command: - - ``` - /opt/opendj/bin/ldapsearch -h localhost -p 1636 -Z -X -D "cn=directory manager" -w -b 'o=jans' 'grtId=*' dn | grep 'dn:' | wc -l - ``` - -2. Dump the data as LDIF : - - * Stop the services using `systemctl stop opendj` - - * Now export the LDIF and save it in appropriate place safe - - ``` - /opt/opendj/bin/export-ldif -n userRoot --offline -l databackup_date.ldif - ``` - * Now exclude jansGrant(grntId) so the command becomes: - - ``` - /opt/opendj/bin/export-ldif -n userRoot --offline -l yourdata_withoutoxAuthGrantId.ldif --includeFilter '(!(grtId=*))' - ``` - - * You may also wish to exclude jansMetric so the command becomes: - - ``` - /opt/opendj/bin/export-ldif -n userRoot --offline -l yourdata_withoutGrantIdMetic.ldif --includeFilter '(&(!(grtId=*))(!(objectClass=jansMetric)))' - ``` - -3. Now, only if needed, rebuild indexes: - - * Check status of indexes: - ``` - /opt/opendj/bin/backendstat show-index-status --backendID userRoot --baseDN o=jans - ``` - - Take note of all indexes that need to be rebuilt. If no indexing is needed, move on to step 4. - - * Start the opendj service `systemctl start opendj` - - * Build backend index for all indexes that need it accoring to previous status command, change passoword -w and index name accordingly. This command has to be run for every index separately: - - ``` - /opt/opendj/bin/dsconfig create-backend-index --port 4444 --hostname localhost --bindDN "cn=directory manager" -w password --backend-name userRoot --index-name iname --set index-type:equality --set index-entry-limit:4000 --trustAll --no-prompt - ``` - - * Stop the opendj service `systemctl stop opendj` - - * Rebuild the indexes as needed, here are examples : - - ``` - /opt/opendj/bin/rebuild-index --baseDN o=jans --index iname - /opt/opendj/bin/rebuild-index --baseDN o=jans --index uid - /opt/opendj/bin/rebuild-index --baseDN o=jans --index mail - ``` - - * Check status again : - - ``` - /opt/opendj/bin/backendstat show-index-status --backendID userRoot --baseDN o=jans - ``` - - * Verify indexes: - - ``` - /opt/opendj/bin/verify-index --baseDN o=jans --countErrors - ``` - -4. Next import your previously exported LDIF. - - ``` - /opt/opendj/bin/import-ldif -n userRoot --offline -l your-backup.ldif - ``` - -If you moved to a new LDAP, copy back your schema files to this directory: - -``` -/opt/opendj/config/schema/ -``` - - * Start the `opendj` and other services - * Finally, verify the cache entries have been removed: - - ``` - /opt/opendj/bin/ldapsearch -h localhost -p 1636 -Z -X -D "cn=directory manager" -w -b 'o=jans' 'grtId=*' dn | grep 'dn:' | wc -l - ``` - -You should be done and everything should be working perfectly. You may notice your Jans Server responding slower than before. That is expected -- your LDAP is adjusting to the new data, and indexing might be in process. Give it some time and it should be back to normal. diff --git a/docs/janssen-server/vm-ops/certificates.md b/docs/janssen-server/vm-ops/certificates.md index 05e5014a961..f1c2c383f3a 100644 --- a/docs/janssen-server/vm-ops/certificates.md +++ b/docs/janssen-server/vm-ops/certificates.md @@ -10,12 +10,12 @@ tags: Janssen components have cryptographic keys and X.509 certificates that are stored on the filesystem at the time of installation. Details for certificates associated with each component are provided below. The following certificates are available in the `/etc/certs` folder. -|APACHE |OPENDJ |Jans Auth CA Certificates| -|:---------------|:--------------|:------------------------| -|httpd.crt |opendj.crt |jans-auth-keys.p12 | -|https.csr |opendj.pksc12 | -|httpd.key | | -|httpd.key.orig | | +| APACHE | Jans Auth CA Certificates | +|:---------------|---------------------------| +| httpd.crt | jans-auth-keys.p12 | +| https.csr | +| httpd.key | +| httpd.key.orig | ## Custom Script JSON Files Additionally the following json files are available which are used in different custom scripts for multi-factor authentication.