From a92c342b99c0e5f7cad23eb872e7e7c19984ff90 Mon Sep 17 00:00:00 2001 From: Zack Hodgson Brady Date: Sat, 2 Dec 2023 01:00:14 -0500 Subject: [PATCH] updated chart functionality and values --- charts/cluster-templates/Chart.yaml | 4 +- charts/cluster-templates/README.md | 2 +- charts/cluster-templates/app-readme.md | 2 +- .../cluster-templates/templates/cluster.yaml | 16 +++--- charts/cluster-templates/values-aws.yaml | 22 +++++---- charts/cluster-templates/values-custom.yaml | 28 ++--------- charts/cluster-templates/values.yaml | 24 ++++----- examples/aws/fleet.yaml | 2 +- examples/aws/values-aws-sts.yaml | 43 ++++++++++------ examples/aws/values-aws.yaml | 49 ++++++++++++------- examples/custom/fleet.yaml | 2 +- examples/custom/values-custom.yaml | 6 +-- 12 files changed, 104 insertions(+), 96 deletions(-) diff --git a/charts/cluster-templates/Chart.yaml b/charts/cluster-templates/Chart.yaml index d418313..5697196 100644 --- a/charts/cluster-templates/Chart.yaml +++ b/charts/cluster-templates/Chart.yaml @@ -3,8 +3,8 @@ name: rancher-cluster-templates description: RGS - Hardened Rancher Cluster Templates icon: https://raw.githubusercontent.com/rancherfederal/carbide-docs/main/static/img/carbide-logo.svg type: application -version: 0.3.0 -appVersion: 0.3.0 +version: 0.3.1 +appVersion: 0.3.1 annotations: catalog.cattle.io/type: cluster-template catalog.cattle.io/namespace: fleet-default diff --git a/charts/cluster-templates/README.md b/charts/cluster-templates/README.md index 477d130..1c16f5a 100644 --- a/charts/cluster-templates/README.md +++ b/charts/cluster-templates/README.md @@ -2,7 +2,7 @@ | Type | Chart Version | App Version | | :---------: | :-----------: | :---------: | -| application | `v0.3.0` | `v0.3.0` | +| application | `v0.3.1` | `v0.3.1` | ⚠️ This project is still in active development. As we continued to develop it, there will be breaking changes. ⚠️ diff --git a/charts/cluster-templates/app-readme.md b/charts/cluster-templates/app-readme.md index 477d130..1c16f5a 100644 --- a/charts/cluster-templates/app-readme.md +++ b/charts/cluster-templates/app-readme.md @@ -2,7 +2,7 @@ | Type | Chart Version | App Version | | :---------: | :-----------: | :---------: | -| application | `v0.3.0` | `v0.3.0` | +| application | `v0.3.1` | `v0.3.1` | ⚠️ This project is still in active development. As we continued to develop it, there will be breaking changes. ⚠️ diff --git a/charts/cluster-templates/templates/cluster.yaml b/charts/cluster-templates/templates/cluster.yaml index 4a9a851..05ce2b3 100644 --- a/charts/cluster-templates/templates/cluster.yaml +++ b/charts/cluster-templates/templates/cluster.yaml @@ -183,8 +183,8 @@ spec: {{- if eq $.Values.cloudprovider "harvester" }} machineSelectorConfig: - config: - cloud-provider-config: {{ .Values.cloudProviderConfigSecretName }} - cloud-provider-name: harvester + cloud-provider-config: {{ .Values.cluster.config.cloud_provider_config | default "secret://harvester" }} + cloud-provider-name: {{ .Values.cluster.config.cloud_provider_config | default "harvester" }} {{- if .Values.cluster.config.systemDefaultRegistry }} system-default-registry: {{ .Values.cluster.config.systemDefaultRegistry }} {{- end }} @@ -193,7 +193,10 @@ spec: {{- else if eq $.Values.cloudprovider "vsphere" }} machineSelectorConfig: - config: - cloud-provider-name: "rancher-vsphere" + {{- if .Values.cluster.config.cloud_provider_config }} + cloud-provider-config: {{ .Values.cluster.config.cloud_provider_config }} + {{- end }} + cloud-provider-name: {{ .Values.cluster.config.cloud_provider_config | default "vsphere" }} {{- if .Values.cluster.config.systemDefaultRegistry }} system-default-registry: {{ .Values.cluster.config.systemDefaultRegistry }} {{- end }} @@ -203,11 +206,10 @@ spec: {{- else }} machineSelectorConfig: - config: - {{- if .Values.cluster.config.cloud_provider_name }} - cloud-provider-name: {{ .Values.cluster.config.cloud_provider_name | quote }} - {{- else }} - cloud-provider-name: "" + {{- if .Values.cluster.config.cloud_provider_config }} + cloud-provider-config: {{ .Values.cluster.config.cloud_provider_config }} {{- end }} + cloud-provider-name: {{ .Values.cluster.config.cloud_provider_name }} {{- if .Values.cluster.config.systemDefaultRegistry }} system-default-registry: {{ .Values.cluster.config.systemDefaultRegistry }} {{- end }} diff --git a/charts/cluster-templates/values-aws.yaml b/charts/cluster-templates/values-aws.yaml index f70d510..44a1407 100644 --- a/charts/cluster-templates/values-aws.yaml +++ b/charts/cluster-templates/values-aws.yaml @@ -17,8 +17,8 @@ cluster: # key: value name: rke2-cluster config: - systemDefaultRegistry: docker.io - kubernetesVersion: v1.25.15+rke2r2 + systemDefaultRegistry: docker.io # default registry + kubernetesVersion: v1.26.10+rke2r2 # https://github.com/rancher/rke2/releases localClusterAuthEndpoint: enabled: false # agentEnvVars: @@ -34,6 +34,8 @@ cluster: write_kubeconfig_mode: 0600 use_service_account_credentials: false protect_kernel_defaults: false + cloud_provider_name: 'aws' # aws, azure, harvester, vsphere + # cloud_provider_config: '' # cloud provider config here (cloud.conf) kube_controller_manager_arg: # - kube controller manager arguments here (https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager) kube_scheduler_arg: @@ -77,16 +79,16 @@ cluster: # skipWaitForDeleteTimeoutSeconds: 0 # timeout: 120 -# node and nodepools values +# node and nodepool(s) values nodepools: - - etcd: true + - name: control-plane-nodes + quantity: 3 + etcd: true controlplane: true worker: false labels: {} taints: {} - quantity: 3 paused: false - name: control-plane-nodes # accessKey: # only required if not using cloud provider credentials # secretKey: # only required if not using cloud provider credentials # sessionToken: # only required if not using cloud provider credentials @@ -103,7 +105,7 @@ nodepools: instanceType: # instance type region: # region createSecurityGroup: true - securityGroups: [''] + securityGroups: [''] # https://ranchermanager.docs.rancher.com/getting-started/installation-and-upgrade/installation-requirements/port-requirements # openPort: # - "80" # - "443" @@ -127,14 +129,14 @@ nodepools: userdata: | #cloud-config - - etcd: false + - name: worker-nodes + quantity: 3 + etcd: false controlplane: false worker: true labels: {} taints: {} - quantity: 3 paused: false - name: worker-nodes # accessKey: # only required if not using cloud provider credentials # secretKey: # only required if not using cloud provider credentials # sessionToken: # only required if not using cloud provider credentials diff --git a/charts/cluster-templates/values-custom.yaml b/charts/cluster-templates/values-custom.yaml index c2a20df..b81d181 100644 --- a/charts/cluster-templates/values-custom.yaml +++ b/charts/cluster-templates/values-custom.yaml @@ -14,8 +14,8 @@ cluster: # key: value name: rke2-cluster config: - systemDefaultRegistry: docker.io - kubernetesVersion: v1.25.15+rke2r2 + systemDefaultRegistry: docker.io # default registry + kubernetesVersion: v1.26.10+rke2r2 # https://github.com/rancher/rke2/releases localClusterAuthEndpoint: enabled: false # agentEnvVars: @@ -31,6 +31,8 @@ cluster: write_kubeconfig_mode: 0600 use_service_account_credentials: false protect_kernel_defaults: false + cloud_provider_name: '' # aws, azure, harvester, vsphere + # cloud_provider_config: '' # cloud provider config here (cloud.conf) kube_controller_manager_arg: # - kube controller manager arguments here (https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager) kube_scheduler_arg: @@ -74,28 +76,6 @@ cluster: # skipWaitForDeleteTimeoutSeconds: 0 # timeout: 120 -# node and nodepools values -nodepools: - - etcd: true - controlplane: true - worker: false - labels: {} - taints: {} - quantity: 3 - paused: false - name: control-plane-nodes - # cloudprovider values here - - - etcd: false - controlplane: false - worker: true - labels: {} - taints: {} - quantity: 3 - paused: false - name: worker-nodes - # cloudprovider values here - # addons values addons: monitoring: diff --git a/charts/cluster-templates/values.yaml b/charts/cluster-templates/values.yaml index da2053c..8ce08af 100644 --- a/charts/cluster-templates/values.yaml +++ b/charts/cluster-templates/values.yaml @@ -17,8 +17,8 @@ cluster: # key: value name: rke2-cluster config: - systemDefaultRegistry: docker.io - kubernetesVersion: v1.25.15+rke2r2 + systemDefaultRegistry: docker.io # default registry + kubernetesVersion: v1.26.10+rke2r2 # https://github.com/rancher/rke2/releases localClusterAuthEndpoint: enabled: false # agentEnvVars: @@ -34,6 +34,8 @@ cluster: write_kubeconfig_mode: 0600 use_service_account_credentials: false protect_kernel_defaults: false + cloud_provider_name: '' # aws, azure, harvester, vsphere + # cloud_provider_config: '' # cloud provider config here (cloud.conf) kube_controller_manager_arg: # - kube controller manager arguments here (https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager) kube_scheduler_arg: @@ -77,27 +79,27 @@ cluster: # skipWaitForDeleteTimeoutSeconds: 0 # timeout: 120 -# node and nodepools values +# node and nodepool(s) values nodepools: - - etcd: true + - name: control-plane-nodes + quantity: 3 + etcd: true controlplane: true worker: false labels: {} taints: {} - quantity: 3 paused: false - name: control-plane-nodes - # cloudprovider values here + # cloud provider values here - - etcd: false + - name: worker-nodes + quantity: 3 + etcd: false controlplane: false worker: true labels: {} taints: {} - quantity: 3 paused: false - name: worker-nodes - # cloudprovider values here + # cloud provider values here # addons values addons: diff --git a/examples/aws/fleet.yaml b/examples/aws/fleet.yaml index a0cbfb0..35bfee5 100644 --- a/examples/aws/fleet.yaml +++ b/examples/aws/fleet.yaml @@ -2,7 +2,7 @@ defaultNamespace: fleet-default helm: repo: https://rancherfederal.github.io/rancher-cluster-templates chart: rancher-cluster-templates - version: 0.3.0 + version: 0.3.1 releaseName: aws-cluster valuesFiles: - values-aws.yaml \ No newline at end of file diff --git a/examples/aws/values-aws-sts.yaml b/examples/aws/values-aws-sts.yaml index efef546..dc576ca 100644 --- a/examples/aws/values-aws-sts.yaml +++ b/examples/aws/values-aws-sts.yaml @@ -7,10 +7,10 @@ rancher: cluster: annotations: {} labels: {} - name: aws-rke2-cluster + name: rke2-cluster-aws-sts config: systemDefaultRegistry: rgcrprod.azurecr.us - kubernetesVersion: v1.25.15+rke2r2 + kubernetesVersion: v1.26.10+rke2r2 localClusterAuthEndpoint: enabled: false cni: canal @@ -20,9 +20,11 @@ cluster: profile: cis-1.23 selinux: true secrets_encryption: true - write_kubeconfig_mode: 0640 + write_kubeconfig_mode: 0600 use_service_account_credentials: true protect_kernel_defaults: true + cloud_provider_name: 'aws' # aws, azure, harvester, vsphere + # cloud_provider_config: '' # cloud provider config here (cloud.conf) kube_controller_manager_arg: - bind-address=127.0.0.1 - use-service-account-credentials=true @@ -36,10 +38,10 @@ cluster: - tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - authorization-mode=RBAC,Node - anonymous-auth=false + - admission-control-config-file=/etc/rancher/rke2/rancher-pss.yaml - audit-policy-file=/etc/rancher/rke2/audit-policy.yaml - audit-log-mode=blocking-strict - audit-log-maxage=30 - - admission-control-config-file=/etc/rancher/rke2/rancher-pss.yaml kubelet_arg: - protect-kernel-defaults=true - read-only-port=0 @@ -66,14 +68,14 @@ cluster: enabled: false nodepools: - - etcd: true + - name: control-plane-nodes + quantity: 3 + etcd: true controlplane: true worker: false labels: {} taints: {} - quantity: 3 paused: false - name: control-plane-nodes ami: ami-05a5f6298acdb05b6 accessKey: # access key secretKey: # secret key @@ -90,7 +92,7 @@ nodepools: keypairName: '' securityGroupReadonly: false sshKeyContents: '' - subnetId: subnet-0212fa8bf49d2f821 # required: replace with your subnet id + subnetId: subnet-0b2225cfef59473d4 # required: replace with your subnet id zone: a monitoring: false privateAddressOnly: true @@ -100,7 +102,7 @@ nodepools: rootSize: 64 sshUser: ec2-user volumeType: gp3 - vpcId: vpc-0e8fe916279b4bf8a # required: replace with your vpc id + vpcId: vpc-07ccf27031e43ed3b # required: replace with your vpc id useEbsOptimizedInstance: false usePrivateAddress: true userdata: | @@ -144,8 +146,17 @@ nodepools: content: | apiVersion: audit.k8s.io/v1 kind: Policy + metadata: + name: rke2-audit-policy rules: - - level: RequestResponse + - level: Metadata + resources: + - group: "" + resources: ["secrets"] + - level: RequestResponse + resources: + - group: "" + resources: ["*"] - path: /etc/rancher/rke2/rancher-pss.yaml owner: root content: | @@ -216,14 +227,14 @@ nodepools: - sudo echo -e "[keyfile]\nunmanaged-devices=interface-name:cali*;interface-name:flannel*" > /etc/NetworkManager/conf.d/rke2-canal.conf - sudo mkdir -p /opt/rke2-artifacts/ /etc/rancher/rke2/ /var/lib/rancher/rke2/server/manifests/ - sudo useradd -r -c "etcd user" -s /sbin/nologin -M etcd -U - - etcd: false + - name: worker-nodes + quantity: 3 + etcd: false controlplane: false worker: true labels: {} taints: {} - quantity: 3 paused: false - name: worker-nodes ami: ami-05a5f6298acdb05b6 accessKey: # access key secretKey: # secret key @@ -240,7 +251,7 @@ nodepools: keypairName: '' securityGroupReadonly: false sshKeyContents: '' - subnetId: subnet-0212fa8bf49d2f821 # required: replace with your subnet id + subnetId: subnet-0b2225cfef59473d4 # required: replace with your subnet id zone: a monitoring: false privateAddressOnly: true @@ -250,7 +261,7 @@ nodepools: rootSize: 128 sshUser: ec2-user volumeType: gp3 - vpcId: vpc-0e8fe916279b4bf8a # required: replace with your vpc id + vpcId: vpc-07ccf27031e43ed3b # required: replace with your vpc id useEbsOptimizedInstance: false usePrivateAddress: true userdata: | @@ -326,4 +337,4 @@ addons: manager: svc: type: ClusterIP - rbac: true + rbac: true \ No newline at end of file diff --git a/examples/aws/values-aws.yaml b/examples/aws/values-aws.yaml index 640f71b..fd03fc2 100644 --- a/examples/aws/values-aws.yaml +++ b/examples/aws/values-aws.yaml @@ -9,10 +9,10 @@ rancher: cluster: annotations: {} labels: {} - name: aws-rke2-cluster + name: rke2-cluster-aws config: systemDefaultRegistry: rgcrprod.azurecr.us - kubernetesVersion: v1.25.15+rke2r2 + kubernetesVersion: v1.26.10+rke2r2 localClusterAuthEndpoint: enabled: false cni: canal @@ -22,9 +22,11 @@ cluster: profile: cis-1.23 selinux: true secrets_encryption: true - write_kubeconfig_mode: 0640 + write_kubeconfig_mode: 0600 use_service_account_credentials: true protect_kernel_defaults: true + cloud_provider_name: 'aws' # aws, azure, harvester, vsphere + # cloud_provider_config: '' # cloud provider config here (cloud.conf) kube_controller_manager_arg: - bind-address=127.0.0.1 - use-service-account-credentials=true @@ -38,10 +40,10 @@ cluster: - tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - authorization-mode=RBAC,Node - anonymous-auth=false + - admission-control-config-file=/etc/rancher/rke2/rancher-pss.yaml - audit-policy-file=/etc/rancher/rke2/audit-policy.yaml - audit-log-mode=blocking-strict - audit-log-maxage=30 - - admission-control-config-file=/etc/rancher/rke2/rancher-pss.yaml kubelet_arg: - protect-kernel-defaults=true - read-only-port=0 @@ -68,14 +70,14 @@ cluster: enabled: false nodepools: - - etcd: true + - name: control-plane-nodes + quantity: 3 + etcd: true controlplane: true worker: false labels: {} taints: {} - quantity: 3 paused: false - name: control-plane-nodes ami: ami-05a5f6298acdb05b6 deviceName: /dev/sda1 encryptEbsVolume: false @@ -84,12 +86,12 @@ nodepools: insecureTransport: false instanceType: m5.large region: us-east-1 - createSecurityGroup: true - securityGroups: [''] + createSecurityGroup: false + securityGroups: ['aws-rgs-rke2-sg'] keypairName: '' securityGroupReadonly: false sshKeyContents: '' - subnetId: subnet-0212fa8bf49d2f821 # required: replace with your subnet id + subnetId: subnet-0b2225cfef59473d4 # required: replace with your subnet id zone: a monitoring: false privateAddressOnly: true @@ -99,7 +101,7 @@ nodepools: rootSize: 64 sshUser: ec2-user volumeType: gp3 - vpcId: vpc-0e8fe916279b4bf8a # required: replace with your vpc id + vpcId: vpc-07ccf27031e43ed3b # required: replace with your vpc id useEbsOptimizedInstance: false usePrivateAddress: true userdata: | @@ -143,8 +145,17 @@ nodepools: content: | apiVersion: audit.k8s.io/v1 kind: Policy + metadata: + name: rke2-audit-policy rules: - - level: RequestResponse + - level: Metadata + resources: + - group: "" + resources: ["secrets"] + - level: RequestResponse + resources: + - group: "" + resources: ["*"] - path: /etc/rancher/rke2/rancher-pss.yaml owner: root content: | @@ -215,14 +226,14 @@ nodepools: - sudo echo -e "[keyfile]\nunmanaged-devices=interface-name:cali*;interface-name:flannel*" > /etc/NetworkManager/conf.d/rke2-canal.conf - sudo mkdir -p /opt/rke2-artifacts/ /etc/rancher/rke2/ /var/lib/rancher/rke2/server/manifests/ - sudo useradd -r -c "etcd user" -s /sbin/nologin -M etcd -U - - etcd: false + - name: worker-nodes + quantity: 3 + etcd: false controlplane: false worker: true labels: {} taints: {} - quantity: 3 paused: false - name: worker-nodes ami: ami-05a5f6298acdb05b6 deviceName: /dev/sda1 encryptEbsVolume: false @@ -231,12 +242,12 @@ nodepools: insecureTransport: false instanceType: m5.xlarge region: us-east-1 - createSecurityGroup: true - securityGroups: [''] + createSecurityGroup: false + securityGroups: ['aws-rgs-rke2-sg'] keypairName: '' securityGroupReadonly: false sshKeyContents: '' - subnetId: subnet-0212fa8bf49d2f821 # required: replace with your subnet id + subnetId: subnet-0b2225cfef59473d4 # required: replace with your subnet id zone: a monitoring: false privateAddressOnly: true @@ -246,7 +257,7 @@ nodepools: rootSize: 128 sshUser: ec2-user volumeType: gp3 - vpcId: vpc-0e8fe916279b4bf8a # required: replace with your vpc id + vpcId: vpc-07ccf27031e43ed3b # required: replace with your vpc id useEbsOptimizedInstance: false usePrivateAddress: true userdata: | diff --git a/examples/custom/fleet.yaml b/examples/custom/fleet.yaml index 2cf1a33..c4b5f22 100644 --- a/examples/custom/fleet.yaml +++ b/examples/custom/fleet.yaml @@ -2,7 +2,7 @@ defaultNamespace: fleet-default helm: repo: https://rancherfederal.github.io/rancher-cluster-templates chart: rancher-cluster-templates - version: 0.3.0 + version: 0.3.1 releaseName: custom-cluster valuesFiles: - values-custom.yaml \ No newline at end of file diff --git a/examples/custom/values-custom.yaml b/examples/custom/values-custom.yaml index 9de1197..e4e7bc0 100644 --- a/examples/custom/values-custom.yaml +++ b/examples/custom/values-custom.yaml @@ -10,7 +10,7 @@ cluster: name: rke2-cluster-custom config: systemDefaultRegistry: docker.io - kubernetesVersion: v1.25.15+rke2r2 + kubernetesVersion: v1.26.10+rke2r2 localClusterAuthEndpoint: enabled: false cni: canal @@ -20,7 +20,7 @@ cluster: profile: cis-1.23 selinux: true secrets_encryption: true - write_kubeconfig_mode: 0640 + write_kubeconfig_mode: 0600 use_service_account_credentials: true protect_kernel_defaults: true kube_controller_manager_arg: @@ -36,10 +36,10 @@ cluster: - tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - authorization-mode=RBAC,Node - anonymous-auth=false + - admission-control-config-file=/etc/rancher/rke2/rancher-pss.yaml - audit-policy-file=/etc/rancher/rke2/audit-policy.yaml - audit-log-mode=blocking-strict - audit-log-maxage=30 - - admission-control-config-file=/etc/rancher/rke2/rancher-pss.yaml kubelet_arg: - protect-kernel-defaults=true - read-only-port=0