From f0009db150d209898ab11f0c43f7eafdf4ace967 Mon Sep 17 00:00:00 2001 From: Heber Romero Date: Sat, 9 Dec 2023 00:04:29 -0500 Subject: [PATCH] operators --- README.md | 84 ++++++++--- lab-deployment.yml | 52 ++++++- oadp/policy-oadp.yaml | 129 ++++++++++++++++ thanos/thanos-deployment.yaml | 269 ++++++++++++++++++++++++++++++++++ 4 files changed, 507 insertions(+), 27 deletions(-) create mode 100644 oadp/policy-oadp.yaml create mode 100644 thanos/thanos-deployment.yaml diff --git a/README.md b/README.md index 0119dee..be3bcaf 100644 --- a/README.md +++ b/README.md @@ -90,7 +90,7 @@ ansible-playbook submariner/submariner-install.yml **_NOTE:_** This part of the laboratory has already been provisioned, to focus on the deployment of the ecosystem's own services.
- Deploy ACM + Deploy Advanced Cluster Management for Kubernetes You can use either the OpenShift 4 web console's built-in OperatorHub or the OpenShift CLI to install ACM. The installation breaks down to six steps: @@ -114,49 +114,61 @@ ansible-playbook lab-deployment.yml --tags acm ### Configure Submariner -### Deploy Skupper Operator +## Security and Data Protection -If you want to try a cluster-wide installation, you don't need to create the `OperatorGroup` as it is already defined at the destination namespaces, so you just need to create the subscription at the correct namespaces, see below. +**_NOTE:_** This part of the laboratory has already been provisioned, to focus on the deployment of the ecosystem's own services. -```sh -# Create a Project -oc new-project "{{ username }}" +
+ Deploy Advanced Cluster Security for Kubernetes -# Creating a CatalogSource in the `openshift-marketplace` namespace -oc apply -f ocp/00-CatalogSource.yaml +When RHACM is available, you can create RHACM policies to deploy RHACS to your cluster fleet. This approach ensures that all fleet clusters are protected by RHACS. -# Wait for the skupper-operator catalog pod to be running -oc -n openshift-marketplace get pods | grep skupper-operator +To implement RHACS, you must create two policies in RHACM, one for centralized services and one for protected cluster services. The policy to install centralized services must be applied to the hub cluster. The policy for installing protected cluster services must be applied to the clusters that you want RHACS to protect. You can achieve this separation by using a clusterSelector parameter of the PlacementRule object. -# Create an OperatorGroup in the `my-namespace` namespace -oc apply -f ocp/10-OperatorGroup.yaml +```vars.yml +ansible-playbook lab-deployment.yml --tags acs +``` +
+
+ Deploy Openshift Data Protection -oc apply -f ocp /20-Subscription-cluster.yaml +When RHACM is available, you can create RHACM policies to deploy RHACS to your cluster fleet. This approach ensures that all fleet clusters are protected by RHACS. -# Create a Subscription in the `my-namespace` namespace -oc apply -f ocp/20-Subscription.yaml -``` +To implement RHACS, you must create two policies in RHACM, one for centralized services and one for protected cluster services. The policy to install centralized services must be applied to the hub cluster. The policy for installing protected cluster services must be applied to the clusters that you want RHACS to protect. You can achieve this separation by using a clusterSelector parameter of the PlacementRule object. -### Configure Skupper +```vars.yml +ansible-playbook lab-deployment.yml --tags oadp +``` +
-## Security and Data Protection +## Monitoring and Follow-up **_NOTE:_** This part of the laboratory has already been provisioned, to focus on the deployment of the ecosystem's own services.
- Deploy ACS + Deploy Openshift Monitoring -When RHACM is available, you can create RHACM policies to deploy RHACS to your cluster fleet. This approach ensures that all fleet clusters are protected by RHACS. - -To implement RHACS, you must create two policies in RHACM, one for centralized services and one for protected cluster services. The policy to install centralized services must be applied to the hub cluster. The policy for installing protected cluster services must be applied to the clusters that you want RHACS to protect. You can achieve this separation by using a clusterSelector parameter of the PlacementRule object. +```vars.yml +ansible-playbook lab-deployment.yml --tags acs +``` +
+
+ Deploy Openshift Logging ```vars.yml ansible-playbook lab-deployment.yml --tags acs ```
+
+ Deploy Thanos + +```vars.yml +ansible-playbook lab-deployment.yml --tags thanos +``` +
+ -## Monitoring and Follow-up ## Testing and Continuous Deployment ## Cultural and Organizational Change ## Network Overload and Latency @@ -168,6 +180,32 @@ ansible-playbook lab-deployment.yml --tags acs ### Hybrid Cloud Balancing +### Deploy Skupper Operator + +If you want to try a cluster-wide installation, you don't need to create the `OperatorGroup` as it is already defined at the destination namespaces, so you just need to create the subscription at the correct namespaces, see below. + +```sh +# Create a Project +oc new-project "{{ username }}" + +# Creating a CatalogSource in the `openshift-marketplace` namespace +oc apply -f ocp/00-CatalogSource.yaml + +# Wait for the skupper-operator catalog pod to be running +oc -n openshift-marketplace get pods | grep skupper-operator + +# Create an OperatorGroup in the `my-namespace` namespace +oc apply -f ocp/10-OperatorGroup.yaml + + +oc apply -f ocp /20-Subscription-cluster.yaml + +# Create a Subscription in the `my-namespace` namespace +oc apply -f ocp/20-Subscription.yaml +``` + +### Configure Skupper + ### DRP ### Backup diff --git a/lab-deployment.yml b/lab-deployment.yml index 13be6d0..ae4223b 100644 --- a/lab-deployment.yml +++ b/lab-deployment.yml @@ -54,7 +54,7 @@ - acm - full - - name: Create a project open-cluster-management + - name: Create a project managed-cluster community.kubernetes.k8s: state: present resource_definition: @@ -92,7 +92,7 @@ #################################################################### # Deploy ACS on Openshift #################################################################### - - name: Create a project open-cluster-management + - name: Create a project rhacs-operator community.kubernetes.k8s: state: present resource_definition: @@ -104,7 +104,7 @@ - acs - full - - name: Deploy KlusterletAddonConfig + - name: Deploy ASC Policy community.kubernetes.k8s: state: present src: "acs/policy-acs-operator-secured-clusters.yml" @@ -112,6 +112,50 @@ - acs - full - +#################################################################### +# Deploy OADP on Openshift +#################################################################### + + - name: Create a project open-cluster-management + community.kubernetes.k8s: + state: present + resource_definition: + apiVersion: project.openshift.io/v1 + kind: Project + metadata: + name: openshift-adp + tags: + - oadp + - full + + - name: Deploy OADP Policy + community.kubernetes.k8s: + state: present + src: "oadp/policy-oadp.yaml" + tags: + - oadp + - full +#################################################################### +# Deploy Thanos on Openshift +#################################################################### + - name: Create a project openshift-monitoring + community.kubernetes.k8s: + state: present + resource_definition: + apiVersion: project.openshift.io/v1 + kind: Project + metadata: + name: openshift-monitoring + tags: + - thanos + - full + + - name: Deploy Thanos Policy + community.kubernetes.k8s: + state: present + src: "thanos/thanos-deployment.yaml" + tags: + - thanos + - full diff --git a/oadp/policy-oadp.yaml b/oadp/policy-oadp.yaml new file mode 100644 index 0000000..abd45b6 --- /dev/null +++ b/oadp/policy-oadp.yaml @@ -0,0 +1,129 @@ +apiVersion: policy.open-cluster-management.io/v1 +kind: Policy +metadata: + name: "policy-oadp-operator" + namespace: openshift-adp +spec: + disabled: false + policy-templates: + - objectDefinition: + apiVersion: policy.open-cluster-management.io/v1 + kind: ConfigurationPolicy + metadata: + name: policy-oadp-namespace + namespace: openshift-adp + spec: + object-templates: + - complianceType: musthave + objectDefinition: + apiVersion: v1 + kind: Namespace + metadata: + name: openshift-adp + labels: + openshift.io/cluster-monitoring: "true" + remediationAction: inform + severity: high + - objectDefinition: + apiVersion: policy.open-cluster-management.io/v1 + kind: ConfigurationPolicy + metadata: + name: policy-oadp-operator-operatorgroup + namespace: openshift-adp + spec: + object-templates: + - complianceType: musthave + objectDefinition: + apiVersion: operators.coreos.com/v1alpha2 + kind: OperatorGroup + metadata: + name: openshift-adp-operatorgroup + namespace: openshift-adp + spec: + targetNamespaces: + - openshift-adp + remediationAction: inform + severity: high + - objectDefinition: + apiVersion: policy.open-cluster-management.io/v1 + kind: ConfigurationPolicy + metadata: + name: policy-oadp-operator-subscription + namespace: openshift-adp + spec: + object-templates: + - complianceType: musthave + objectDefinition: + apiVersion: operators.coreos.com/v1alpha1 + kind: Subscription + metadata: + name: redhat-oadp-operator + namespace: openshift-adp + spec: + channel: stable + installPlanApproval: Automatic + name: redhat-oadp-operator + source: redhat-operators + sourceNamespace: openshift-marketplace + remediationAction: inform + severity: high + - objectDefinition: + apiVersion: policy.open-cluster-management.io/v1 + kind: ConfigurationPolicy + metadata: + name: policy-dataprotectionapplication + namespace: openshift-adp + spec: + object-templates: + - complianceType: musthave + objectDefinition: + apiVersion: oadp.openshift.io/v1alpha1 + kind: DataProtectionApplication + metadata: + name: cluster-dpa + namespace: openshift-adp + spec: + backupImages: false + configuration: + restic: + enable: true + velero: + defaultPlugins: + - openshift + - aws + - kubevirt + - csi + noDefaultBackupLocation: true + remediationAction: inform + severity: low + remediationAction: enforce +--- +apiVersion: policy.open-cluster-management.io/v1 +kind: PlacementBinding +metadata: + name: binding-policy-oadp-operator + namespace: openshift-adp +placementRef: + apiGroup: apps.open-cluster-management.io + kind: PlacementRule + name: placement-policy-oadp-operator +subjects: + - apiGroup: policy.open-cluster-management.io + kind: Policy + name: policy-oadp-operator +--- +apiVersion: apps.open-cluster-management.io/v1 +kind: PlacementRule +metadata: + name: placement-policy-oadp-operator + namespace: openshift-adp +spec: + clusterConditions: + - status: 'True' + type: ManagedClusterConditionAvailable + clusterSelector: + matchExpressions: + - key: environment + operator: In + values: + - dev \ No newline at end of file diff --git a/thanos/thanos-deployment.yaml b/thanos/thanos-deployment.yaml new file mode 100644 index 0000000..be3b04d --- /dev/null +++ b/thanos/thanos-deployment.yaml @@ -0,0 +1,269 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: query-layer + app.kubernetes.io/instance: thanos-querier + app.kubernetes.io/managed-by: cluster-monitoring-operator + app.kubernetes.io/name: thanos-query + app.kubernetes.io/part-of: openshift-monitoring + app.kubernetes.io/version: 0.32.5 + name: thanos-querier + namespace: openshift-monitoring +spec: + replicas: 2 + selector: + matchLabels: + app.kubernetes.io/component: query-layer + app.kubernetes.io/instance: thanos-querier + app.kubernetes.io/name: thanos-query + app.kubernetes.io/part-of: openshift-monitoring + strategy: + rollingUpdate: + maxUnavailable: 1 + template: + metadata: + annotations: + target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' + labels: + app.kubernetes.io/component: query-layer + app.kubernetes.io/instance: thanos-querier + app.kubernetes.io/managed-by: cluster-monitoring-operator + app.kubernetes.io/name: thanos-query + app.kubernetes.io/part-of: openshift-monitoring + app.kubernetes.io/version: 0.32.5 + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/component: query-layer + app.kubernetes.io/instance: thanos-querier + app.kubernetes.io/name: thanos-query + app.kubernetes.io/part-of: openshift-monitoring + topologyKey: kubernetes.io/hostname + containers: + - args: + - query + - --grpc-address=127.0.0.1:10901 + - --http-address=127.0.0.1:9090 + - --log.format=logfmt + - --query.replica-label=prometheus_replica + - --query.replica-label=thanos_ruler_replica + - --endpoint=dnssrv+_grpc._tcp.prometheus-operated.openshift-monitoring.svc.cluster.local + - --query.auto-downsampling + - --store.sd-dns-resolver=miekgdns + - --grpc-client-tls-secure + - --grpc-client-tls-cert=/etc/tls/grpc/client.crt + - --grpc-client-tls-key=/etc/tls/grpc/client.key + - --grpc-client-tls-ca=/etc/tls/grpc/ca.crt + - --grpc-client-server-name=prometheus-grpc + - --rule=dnssrv+_grpc._tcp.prometheus-operated.openshift-monitoring.svc.cluster.local + - --target=dnssrv+_grpc._tcp.prometheus-operated.openshift-monitoring.svc.cluster.local + env: + - name: HOST_IP_ADDRESS + valueFrom: + fieldRef: + fieldPath: status.hostIP + image: quay.io/thanos/thanos:v0.32.5 + imagePullPolicy: IfNotPresent + name: thanos-query + ports: + - containerPort: 9090 + name: http + resources: + requests: + cpu: 10m + memory: 12Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: false + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /etc/tls/grpc + name: secret-grpc-tls + - args: + - --secure-listen-address=0.0.0.0:9091 + - --upstream=http://127.0.0.1:9090 + - --config-file=/etc/kube-rbac-proxy/config.yaml + - --tls-cert-file=/etc/tls/private/tls.crt + - --tls-private-key-file=/etc/tls/private/tls.key + - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 + - --ignore-paths=/-/healthy,/-/ready + image: quay.io/brancz/kube-rbac-proxy:v0.15.0 + livenessProbe: + failureThreshold: 4 + httpGet: + path: /-/healthy + port: 9091 + scheme: HTTPS + initialDelaySeconds: 5 + periodSeconds: 30 + name: kube-rbac-proxy-web + ports: + - containerPort: 9091 + name: web + readinessProbe: + failureThreshold: 20 + httpGet: + path: /-/ready + port: 9091 + scheme: HTTPS + initialDelaySeconds: 5 + periodSeconds: 5 + resources: + requests: + cpu: 1m + memory: 15Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /etc/tls/private + name: secret-thanos-querier-tls + - mountPath: /etc/kube-rbac-proxy + name: secret-thanos-querier-kube-rbac-proxy-web + - args: + - --secure-listen-address=0.0.0.0:9092 + - --upstream=http://127.0.0.1:9095 + - --config-file=/etc/kube-rbac-proxy/config.yaml + - --tls-cert-file=/etc/tls/private/tls.crt + - --tls-private-key-file=/etc/tls/private/tls.key + - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 + - --allow-paths=/api/v1/query,/api/v1/query_range,/api/v1/labels,/api/v1/label/*/values,/api/v1/series + image: quay.io/brancz/kube-rbac-proxy:v0.15.0 + name: kube-rbac-proxy + ports: + - containerPort: 9092 + name: tenancy + resources: + requests: + cpu: 1m + memory: 15Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /etc/tls/private + name: secret-thanos-querier-tls + - mountPath: /etc/kube-rbac-proxy + name: secret-thanos-querier-kube-rbac-proxy + - args: + - --insecure-listen-address=127.0.0.1:9095 + - --upstream=http://127.0.0.1:9090 + - --label=namespace + - --enable-label-apis + - --error-on-replace + image: quay.io/prometheuscommunity/prom-label-proxy:v0.7.0 + name: prom-label-proxy + resources: + requests: + cpu: 1m + memory: 15Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + terminationMessagePolicy: FallbackToLogsOnError + - args: + - --secure-listen-address=0.0.0.0:9093 + - --upstream=http://127.0.0.1:9095 + - --config-file=/etc/kube-rbac-proxy/config.yaml + - --tls-cert-file=/etc/tls/private/tls.crt + - --tls-private-key-file=/etc/tls/private/tls.key + - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 + - --allow-paths=/api/v1/rules,/api/v1/alerts + image: quay.io/brancz/kube-rbac-proxy:v0.15.0 + name: kube-rbac-proxy-rules + ports: + - containerPort: 9093 + name: tenancy-rules + resources: + requests: + cpu: 1m + memory: 15Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /etc/tls/private + name: secret-thanos-querier-tls + - mountPath: /etc/kube-rbac-proxy + name: secret-thanos-querier-kube-rbac-proxy-rules + - args: + - --secure-listen-address=0.0.0.0:9094 + - --upstream=http://127.0.0.1:9090 + - --config-file=/etc/kube-rbac-proxy/config.yaml + - --tls-cert-file=/etc/tls/private/tls.crt + - --tls-private-key-file=/etc/tls/private/tls.key + - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 + - --client-ca-file=/etc/tls/client/client-ca.crt + - --allow-paths=/metrics + image: quay.io/brancz/kube-rbac-proxy:v0.15.0 + name: kube-rbac-proxy-metrics + ports: + - containerPort: 9094 + name: metrics + resources: + requests: + cpu: 1m + memory: 15Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /etc/tls/private + name: secret-thanos-querier-tls + - mountPath: /etc/kube-rbac-proxy + name: secret-thanos-querier-kube-rbac-proxy-metrics + - mountPath: /etc/tls/client + name: metrics-client-ca + readOnly: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: thanos-querier + terminationGracePeriodSeconds: 120 + volumes: + - name: secret-thanos-querier-tls + secret: + secretName: thanos-querier-tls + - name: secret-thanos-querier-kube-rbac-proxy + secret: + secretName: thanos-querier-kube-rbac-proxy + - name: secret-thanos-querier-kube-rbac-proxy-web + secret: + secretName: thanos-querier-kube-rbac-proxy-web + - name: secret-thanos-querier-kube-rbac-proxy-rules + secret: + secretName: thanos-querier-kube-rbac-proxy-rules + - name: secret-thanos-querier-kube-rbac-proxy-metrics + secret: + secretName: thanos-querier-kube-rbac-proxy-metrics + - configMap: + name: metrics-client-ca + name: metrics-client-ca \ No newline at end of file