diff --git a/bundle.Dockerfile b/bundle.Dockerfile index 737fabddf..777ac257a 100644 --- a/bundle.Dockerfile +++ b/bundle.Dockerfile @@ -8,7 +8,7 @@ LABEL operators.operatorframework.io.bundle.metadata.v1=metadata/ LABEL operators.operatorframework.io.bundle.package.v1=dell-csm-operator LABEL operators.operatorframework.io.bundle.channels.v1=stable LABEL operators.operatorframework.io.bundle.channel.default.v1=stable -LABEL operators.operatorframework.io.metrics.builder=operator-sdk-v1.14.0+git +LABEL operators.operatorframework.io.metrics.builder=operator-sdk-v1.32.0 LABEL operators.operatorframework.io.metrics.mediatype.v1=metrics+v1 LABEL operators.operatorframework.io.metrics.project_layout=go.kubebuilder.io/v3 diff --git a/bundle/manifests/dell-csm-operator.clusterserviceversion.yaml b/bundle/manifests/dell-csm-operator.clusterserviceversion.yaml index e547ce2a1..d448959e4 100644 --- a/bundle/manifests/dell-csm-operator.clusterserviceversion.yaml +++ b/bundle/manifests/dell-csm-operator.clusterserviceversion.yaml @@ -3520,7 +3520,7 @@ spec: name: Dell Technologies url: https://github.com/dell/csm-operator relatedImages: - - image: docker.io/dellemc/dell-csm-operator:v1.4.1 + - image: docker.io/dellemc/dell-csm-operator:v1.4.2 name: dell-csm-operator - image: docker.io/dellemc/csi-isilon:v2.9.1 name: csi-isilon @@ -3575,5 +3575,5 @@ spec: - image: docker.io/dellemc/connectivity-cert-persister-k8s:0.7.0 name: cert-persister skips: - - dell-csm-operator.v1.4.0 - version: 1.4.1 + - dell-csm-operator.v1.4.0 + version: 1.4.2 diff --git a/controllers/csm_controller.go b/controllers/csm_controller.go index 62ee0778d..3cc656d4d 100644 --- a/controllers/csm_controller.go +++ b/controllers/csm_controller.go @@ -92,7 +92,7 @@ const ( CSMFinalizerName = "finalizer.dell.emc.com" // CSMVersion - - CSMVersion = "v1.8.0" + CSMVersion = "v1.10.0" ) var ( @@ -369,7 +369,7 @@ func (r *ContainerStorageModuleReconciler) handleDeploymentUpdate(oldObj interfa return } - log.Debugw("deployment modified generation", d.Generation, old.Generation) + log.Debugw("deployment modified generation", d.Name, d.Generation, old.Generation) desired := d.Status.Replicas available := d.Status.AvailableReplicas @@ -378,13 +378,16 @@ func (r *ContainerStorageModuleReconciler) handleDeploymentUpdate(oldObj interfa // Replicas: 2 desired | 2 updated | 2 total | 2 available | 0 unavailable - log.Infow("deployment", "desired", desired) - log.Infow("deployment", "numberReady", ready) - log.Infow("deployment", "available", available) - log.Infow("deployment", "numberUnavailable", numberUnavailable) + log.Infow("deployment", "deployment name", d.Name, "desired", desired) + log.Infow("deployment", "deployment name", d.Name, "numberReady", ready) + log.Infow("deployment", "deployment name", d.Name, "available", available) + log.Infow("deployment", "deployment name", d.Name, "numberUnavailable", numberUnavailable) - ns := d.Namespace - log.Debugw("deployment", "namespace", ns, "name", name) + ns := d.Spec.Template.Labels[constants.CsmNamespaceLabel] + if ns == "" { + ns = d.Namespace + } + log.Debugw("csm being modified in handledeployment", "namespace", ns, "name", name) namespacedName := t1.NamespacedName{ Name: name, Namespace: ns, @@ -418,7 +421,11 @@ func (r *ContainerStorageModuleReconciler) handlePodsUpdate(_ interface{}, obj i p, _ := obj.(*corev1.Pod) name := p.GetLabels()[constants.CsmLabel] - ns := p.Namespace + // if this pod is an obs. pod, namespace might not match csm namespace + ns := p.GetLabels()[constants.CsmNamespaceLabel] + if ns == "" { + ns = p.Namespace + } if name == "" { return } @@ -481,7 +488,10 @@ func (r *ContainerStorageModuleReconciler) handleDaemonsetUpdate(oldObj interfac log.Infow("daemonset ", "available", available) log.Infow("daemonset ", "numberUnavailable", numberUnavailable) - ns := d.Namespace + ns := d.Spec.Template.Labels[constants.CsmNamespaceLabel] + if ns == "" { + ns = d.Namespace + } r.Log.Debugw("daemonset ", "ns", ns, "name", name) namespacedName := t1.NamespacedName{ Name: name, @@ -671,7 +681,8 @@ func (r *ContainerStorageModuleReconciler) SyncCSM(ctx context.Context, cr csmv1 log := logger.GetLogger(ctx) // Create/Update Authorization Proxy Server - if authorizationEnabled, _ := utils.IsModuleEnabled(ctx, cr, csmv1.AuthorizationServer); authorizationEnabled { + authorizationEnabled, _ := utils.IsModuleEnabled(ctx, cr, csmv1.AuthorizationServer) + if authorizationEnabled { log.Infow("Create/Update authorization") if err := r.reconcileAuthorization(ctx, false, operatorConfig, cr, ctrlClient); err != nil { return fmt.Errorf("failed to deploy authorization proxy server: %v", err) @@ -834,9 +845,11 @@ func (r *ContainerStorageModuleReconciler) SyncCSM(ctx context.Context, cr csmv1 return err } - // Create/Update DeamonSet - if err = daemonset.SyncDaemonset(ctx, node.DaemonSetApplyConfig, cluster.ClusterK8sClient, cr.Name); err != nil { - return err + // Create/Update DeamonSet, except for auth proxy + if !authorizationEnabled { + if err = daemonset.SyncDaemonset(ctx, node.DaemonSetApplyConfig, cluster.ClusterK8sClient, cr.Name); err != nil { + return err + } } if replicationEnabled { @@ -1187,7 +1200,7 @@ func (r *ContainerStorageModuleReconciler) removeDriver(ctx context.Context, ins return nil } -// removeModule - remove authorization proxy server +// removeModule - remove standalone modules func (r *ContainerStorageModuleReconciler) removeModule(ctx context.Context, instance csmv1.ContainerStorageModule, operatorConfig utils.OperatorConfig, ctrlClient client.Client) error { log := logger.GetLogger(ctx) diff --git a/operatorconfig/driverconfig/powerflex/v2.8.0/node.yaml b/operatorconfig/driverconfig/powerflex/v2.8.0/node.yaml index af0d3148a..5600f5d71 100644 --- a/operatorconfig/driverconfig/powerflex/v2.8.0/node.yaml +++ b/operatorconfig/driverconfig/powerflex/v2.8.0/node.yaml @@ -109,11 +109,6 @@ spec: value: - name: X_CSI_MAX_VOLUMES_PER_NODE value: - - name: X_CSI_POWERFLEX_KUBE_NODE_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName volumeMounts: - name: driver-path mountPath: /plugins/vxflexos.emc.dell.com diff --git a/operatorconfig/driverconfig/powerflex/v2.9.0/node.yaml b/operatorconfig/driverconfig/powerflex/v2.9.0/node.yaml index 2aebacf0b..c2cf8e2ea 100644 --- a/operatorconfig/driverconfig/powerflex/v2.9.0/node.yaml +++ b/operatorconfig/driverconfig/powerflex/v2.9.0/node.yaml @@ -109,11 +109,6 @@ spec: value: - name: X_CSI_MAX_VOLUMES_PER_NODE value: - - name: X_CSI_POWERFLEX_KUBE_NODE_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName volumeMounts: - name: driver-path mountPath: /plugins/vxflexos.emc.dell.com diff --git a/operatorconfig/moduleconfig/authorization/v1.8.0/deployment.yaml b/operatorconfig/moduleconfig/authorization/v1.8.0/deployment.yaml index 935fdbc80..25051dd6d 100644 --- a/operatorconfig/moduleconfig/authorization/v1.8.0/deployment.yaml +++ b/operatorconfig/moduleconfig/authorization/v1.8.0/deployment.yaml @@ -14,6 +14,7 @@ spec: template: metadata: labels: + csm: app: proxy-server spec: containers: @@ -92,6 +93,7 @@ spec: template: metadata: labels: + csm: app: tenant-service spec: containers: @@ -176,6 +178,7 @@ spec: template: metadata: labels: + csm: app: role-service spec: serviceAccountName: role-service @@ -254,6 +257,7 @@ spec: template: metadata: labels: + csm: app: storage-service spec: serviceAccountName: storage-service @@ -316,6 +320,7 @@ spec: template: metadata: labels: + csm: app: redis role: primary tier: backend @@ -367,6 +372,7 @@ spec: template: metadata: labels: + csm: app: redis-commander tier: backend spec: @@ -496,4 +502,4 @@ roleRef: subjects: - kind: Group name: system:serviceaccounts:authorization - apiGroup: rbac.authorization.k8s.io \ No newline at end of file + apiGroup: rbac.authorization.k8s.io diff --git a/operatorconfig/moduleconfig/authorization/v1.8.0/nginx-ingress-controller.yaml b/operatorconfig/moduleconfig/authorization/v1.8.0/nginx-ingress-controller.yaml index 3bafbb56f..bd6feeab0 100644 --- a/operatorconfig/moduleconfig/authorization/v1.8.0/nginx-ingress-controller.yaml +++ b/operatorconfig/moduleconfig/authorization/v1.8.0/nginx-ingress-controller.yaml @@ -426,6 +426,7 @@ spec: template: metadata: labels: + csm: app.kubernetes.io/component: controller app.kubernetes.io/instance: app.kubernetes.io/name: ingress-nginx @@ -660,4 +661,4 @@ webhooks: resources: - ingresses sideEffects: None - \ No newline at end of file + diff --git a/operatorconfig/moduleconfig/authorization/v1.9.0/deployment.yaml b/operatorconfig/moduleconfig/authorization/v1.9.0/deployment.yaml index 935fdbc80..c171dd8e6 100644 --- a/operatorconfig/moduleconfig/authorization/v1.9.0/deployment.yaml +++ b/operatorconfig/moduleconfig/authorization/v1.9.0/deployment.yaml @@ -15,6 +15,7 @@ spec: metadata: labels: app: proxy-server + csm: spec: containers: - name: proxy-server @@ -93,6 +94,7 @@ spec: metadata: labels: app: tenant-service + csm: spec: containers: - name: tenant-service @@ -176,6 +178,7 @@ spec: template: metadata: labels: + csm: app: role-service spec: serviceAccountName: role-service @@ -254,6 +257,7 @@ spec: template: metadata: labels: + csm: app: storage-service spec: serviceAccountName: storage-service @@ -316,6 +320,7 @@ spec: template: metadata: labels: + csm: app: redis role: primary tier: backend @@ -367,6 +372,7 @@ spec: template: metadata: labels: + csm: app: redis-commander tier: backend spec: @@ -496,4 +502,4 @@ roleRef: subjects: - kind: Group name: system:serviceaccounts:authorization - apiGroup: rbac.authorization.k8s.io \ No newline at end of file + apiGroup: rbac.authorization.k8s.io diff --git a/operatorconfig/moduleconfig/authorization/v1.9.0/nginx-ingress-controller.yaml b/operatorconfig/moduleconfig/authorization/v1.9.0/nginx-ingress-controller.yaml index 3bafbb56f..bd6feeab0 100644 --- a/operatorconfig/moduleconfig/authorization/v1.9.0/nginx-ingress-controller.yaml +++ b/operatorconfig/moduleconfig/authorization/v1.9.0/nginx-ingress-controller.yaml @@ -426,6 +426,7 @@ spec: template: metadata: labels: + csm: app.kubernetes.io/component: controller app.kubernetes.io/instance: app.kubernetes.io/name: ingress-nginx @@ -660,4 +661,4 @@ webhooks: resources: - ingresses sideEffects: None - \ No newline at end of file + diff --git a/operatorconfig/moduleconfig/authorization/v1.9.1/deployment.yaml b/operatorconfig/moduleconfig/authorization/v1.9.1/deployment.yaml index 935fdbc80..22ce47e52 100644 --- a/operatorconfig/moduleconfig/authorization/v1.9.1/deployment.yaml +++ b/operatorconfig/moduleconfig/authorization/v1.9.1/deployment.yaml @@ -15,6 +15,7 @@ spec: metadata: labels: app: proxy-server + csm: spec: containers: - name: proxy-server @@ -93,6 +94,7 @@ spec: metadata: labels: app: tenant-service + csm: spec: containers: - name: tenant-service @@ -176,6 +178,7 @@ spec: template: metadata: labels: + csm: app: role-service spec: serviceAccountName: role-service @@ -255,6 +258,7 @@ spec: metadata: labels: app: storage-service + csm: spec: serviceAccountName: storage-service containers: @@ -319,6 +323,7 @@ spec: app: redis role: primary tier: backend + csm: spec: containers: - name: primary @@ -369,6 +374,7 @@ spec: labels: app: redis-commander tier: backend + csm: spec: containers: - name: redis-commander @@ -496,4 +502,4 @@ roleRef: subjects: - kind: Group name: system:serviceaccounts:authorization - apiGroup: rbac.authorization.k8s.io \ No newline at end of file + apiGroup: rbac.authorization.k8s.io diff --git a/operatorconfig/moduleconfig/authorization/v1.9.1/nginx-ingress-controller.yaml b/operatorconfig/moduleconfig/authorization/v1.9.1/nginx-ingress-controller.yaml index 3bafbb56f..135f8afa5 100644 --- a/operatorconfig/moduleconfig/authorization/v1.9.1/nginx-ingress-controller.yaml +++ b/operatorconfig/moduleconfig/authorization/v1.9.1/nginx-ingress-controller.yaml @@ -429,6 +429,7 @@ spec: app.kubernetes.io/component: controller app.kubernetes.io/instance: app.kubernetes.io/name: ingress-nginx + csm: spec: containers: - args: @@ -660,4 +661,4 @@ webhooks: resources: - ingresses sideEffects: None - \ No newline at end of file + diff --git a/operatorconfig/moduleconfig/observability/v1.6.0/karavi-metrics-powerflex.yaml b/operatorconfig/moduleconfig/observability/v1.6.0/karavi-metrics-powerflex.yaml index 8fa8668be..1586047b4 100644 --- a/operatorconfig/moduleconfig/observability/v1.6.0/karavi-metrics-powerflex.yaml +++ b/operatorconfig/moduleconfig/observability/v1.6.0/karavi-metrics-powerflex.yaml @@ -111,6 +111,7 @@ spec: app.kubernetes.io/name: karavi-metrics-powerflex app.kubernetes.io/instance: karavi csm: + csmNamespace: spec: serviceAccount: karavi-metrics-powerflex-controller containers: diff --git a/operatorconfig/moduleconfig/observability/v1.6.0/karavi-metrics-powermax.yaml b/operatorconfig/moduleconfig/observability/v1.6.0/karavi-metrics-powermax.yaml index 174777097..c691412b3 100644 --- a/operatorconfig/moduleconfig/observability/v1.6.0/karavi-metrics-powermax.yaml +++ b/operatorconfig/moduleconfig/observability/v1.6.0/karavi-metrics-powermax.yaml @@ -111,6 +111,7 @@ spec: app.kubernetes.io/name: karavi-metrics-powermax app.kubernetes.io/instance: karavi csm: + csmNamespace: spec: serviceAccountName: karavi-metrics-powermax-controller containers: diff --git a/operatorconfig/moduleconfig/observability/v1.6.0/karavi-metrics-powerscale.yaml b/operatorconfig/moduleconfig/observability/v1.6.0/karavi-metrics-powerscale.yaml index d1ba91cca..408cd3d32 100644 --- a/operatorconfig/moduleconfig/observability/v1.6.0/karavi-metrics-powerscale.yaml +++ b/operatorconfig/moduleconfig/observability/v1.6.0/karavi-metrics-powerscale.yaml @@ -113,6 +113,7 @@ spec: app.kubernetes.io/name: karavi-metrics-powerscale app.kubernetes.io/instance: karavi csm: + csmNamespace: spec: serviceAccount: karavi-metrics-powerscale-controller containers: diff --git a/operatorconfig/moduleconfig/observability/v1.6.0/karavi-otel-collector.yaml b/operatorconfig/moduleconfig/observability/v1.6.0/karavi-otel-collector.yaml index e531f4afc..57a79e6a3 100644 --- a/operatorconfig/moduleconfig/observability/v1.6.0/karavi-otel-collector.yaml +++ b/operatorconfig/moduleconfig/observability/v1.6.0/karavi-otel-collector.yaml @@ -113,6 +113,7 @@ spec: app.kubernetes.io/name: otel-collector app.kubernetes.io/instance: karavi-observability csm: + csmNamespace: spec: volumes: - name: tls-secret diff --git a/operatorconfig/moduleconfig/observability/v1.6.0/karavi-topology.yaml b/operatorconfig/moduleconfig/observability/v1.6.0/karavi-topology.yaml index 730c4f69f..375ba4c4c 100644 --- a/operatorconfig/moduleconfig/observability/v1.6.0/karavi-topology.yaml +++ b/operatorconfig/moduleconfig/observability/v1.6.0/karavi-topology.yaml @@ -89,6 +89,7 @@ spec: app.kubernetes.io/name: karavi-topology app.kubernetes.io/instance: karavi-observability csm: + csmNamespace: spec: volumes: - name: karavi-topology-secret-volume diff --git a/operatorconfig/moduleconfig/observability/v1.7.0/karavi-metrics-powerflex.yaml b/operatorconfig/moduleconfig/observability/v1.7.0/karavi-metrics-powerflex.yaml index 8fa8668be..1586047b4 100644 --- a/operatorconfig/moduleconfig/observability/v1.7.0/karavi-metrics-powerflex.yaml +++ b/operatorconfig/moduleconfig/observability/v1.7.0/karavi-metrics-powerflex.yaml @@ -111,6 +111,7 @@ spec: app.kubernetes.io/name: karavi-metrics-powerflex app.kubernetes.io/instance: karavi csm: + csmNamespace: spec: serviceAccount: karavi-metrics-powerflex-controller containers: diff --git a/operatorconfig/moduleconfig/observability/v1.7.0/karavi-metrics-powermax.yaml b/operatorconfig/moduleconfig/observability/v1.7.0/karavi-metrics-powermax.yaml index 174777097..c691412b3 100644 --- a/operatorconfig/moduleconfig/observability/v1.7.0/karavi-metrics-powermax.yaml +++ b/operatorconfig/moduleconfig/observability/v1.7.0/karavi-metrics-powermax.yaml @@ -111,6 +111,7 @@ spec: app.kubernetes.io/name: karavi-metrics-powermax app.kubernetes.io/instance: karavi csm: + csmNamespace: spec: serviceAccountName: karavi-metrics-powermax-controller containers: diff --git a/operatorconfig/moduleconfig/observability/v1.7.0/karavi-metrics-powerscale.yaml b/operatorconfig/moduleconfig/observability/v1.7.0/karavi-metrics-powerscale.yaml index d1ba91cca..408cd3d32 100644 --- a/operatorconfig/moduleconfig/observability/v1.7.0/karavi-metrics-powerscale.yaml +++ b/operatorconfig/moduleconfig/observability/v1.7.0/karavi-metrics-powerscale.yaml @@ -113,6 +113,7 @@ spec: app.kubernetes.io/name: karavi-metrics-powerscale app.kubernetes.io/instance: karavi csm: + csmNamespace: spec: serviceAccount: karavi-metrics-powerscale-controller containers: diff --git a/operatorconfig/moduleconfig/observability/v1.7.0/karavi-otel-collector.yaml b/operatorconfig/moduleconfig/observability/v1.7.0/karavi-otel-collector.yaml index e531f4afc..57a79e6a3 100644 --- a/operatorconfig/moduleconfig/observability/v1.7.0/karavi-otel-collector.yaml +++ b/operatorconfig/moduleconfig/observability/v1.7.0/karavi-otel-collector.yaml @@ -113,6 +113,7 @@ spec: app.kubernetes.io/name: otel-collector app.kubernetes.io/instance: karavi-observability csm: + csmNamespace: spec: volumes: - name: tls-secret diff --git a/operatorconfig/moduleconfig/observability/v1.7.0/karavi-topology.yaml b/operatorconfig/moduleconfig/observability/v1.7.0/karavi-topology.yaml index 730c4f69f..375ba4c4c 100644 --- a/operatorconfig/moduleconfig/observability/v1.7.0/karavi-topology.yaml +++ b/operatorconfig/moduleconfig/observability/v1.7.0/karavi-topology.yaml @@ -89,6 +89,7 @@ spec: app.kubernetes.io/name: karavi-topology app.kubernetes.io/instance: karavi-observability csm: + csmNamespace: spec: volumes: - name: karavi-topology-secret-volume diff --git a/operatorconfig/moduleconfig/observability/v1.8.0/karavi-metrics-powerflex.yaml b/operatorconfig/moduleconfig/observability/v1.8.0/karavi-metrics-powerflex.yaml index 8fa8668be..1586047b4 100644 --- a/operatorconfig/moduleconfig/observability/v1.8.0/karavi-metrics-powerflex.yaml +++ b/operatorconfig/moduleconfig/observability/v1.8.0/karavi-metrics-powerflex.yaml @@ -111,6 +111,7 @@ spec: app.kubernetes.io/name: karavi-metrics-powerflex app.kubernetes.io/instance: karavi csm: + csmNamespace: spec: serviceAccount: karavi-metrics-powerflex-controller containers: diff --git a/operatorconfig/moduleconfig/observability/v1.8.0/karavi-metrics-powermax.yaml b/operatorconfig/moduleconfig/observability/v1.8.0/karavi-metrics-powermax.yaml index 174777097..c691412b3 100644 --- a/operatorconfig/moduleconfig/observability/v1.8.0/karavi-metrics-powermax.yaml +++ b/operatorconfig/moduleconfig/observability/v1.8.0/karavi-metrics-powermax.yaml @@ -111,6 +111,7 @@ spec: app.kubernetes.io/name: karavi-metrics-powermax app.kubernetes.io/instance: karavi csm: + csmNamespace: spec: serviceAccountName: karavi-metrics-powermax-controller containers: diff --git a/operatorconfig/moduleconfig/observability/v1.8.0/karavi-metrics-powerscale.yaml b/operatorconfig/moduleconfig/observability/v1.8.0/karavi-metrics-powerscale.yaml index d1ba91cca..408cd3d32 100644 --- a/operatorconfig/moduleconfig/observability/v1.8.0/karavi-metrics-powerscale.yaml +++ b/operatorconfig/moduleconfig/observability/v1.8.0/karavi-metrics-powerscale.yaml @@ -113,6 +113,7 @@ spec: app.kubernetes.io/name: karavi-metrics-powerscale app.kubernetes.io/instance: karavi csm: + csmNamespace: spec: serviceAccount: karavi-metrics-powerscale-controller containers: diff --git a/operatorconfig/moduleconfig/observability/v1.8.0/karavi-otel-collector.yaml b/operatorconfig/moduleconfig/observability/v1.8.0/karavi-otel-collector.yaml index e531f4afc..57a79e6a3 100644 --- a/operatorconfig/moduleconfig/observability/v1.8.0/karavi-otel-collector.yaml +++ b/operatorconfig/moduleconfig/observability/v1.8.0/karavi-otel-collector.yaml @@ -113,6 +113,7 @@ spec: app.kubernetes.io/name: otel-collector app.kubernetes.io/instance: karavi-observability csm: + csmNamespace: spec: volumes: - name: tls-secret diff --git a/operatorconfig/moduleconfig/observability/v1.8.0/karavi-topology.yaml b/operatorconfig/moduleconfig/observability/v1.8.0/karavi-topology.yaml index 730c4f69f..375ba4c4c 100644 --- a/operatorconfig/moduleconfig/observability/v1.8.0/karavi-topology.yaml +++ b/operatorconfig/moduleconfig/observability/v1.8.0/karavi-topology.yaml @@ -89,6 +89,7 @@ spec: app.kubernetes.io/name: karavi-topology app.kubernetes.io/instance: karavi-observability csm: + csmNamespace: spec: volumes: - name: karavi-topology-secret-volume diff --git a/pkg/constants/constants.go b/pkg/constants/constants.go index 67db330f4..48a7383a1 100644 --- a/pkg/constants/constants.go +++ b/pkg/constants/constants.go @@ -73,6 +73,9 @@ var PodStatusRemoveString = "rpc error: code = Unknown desc = Error response fro // CsmLabel - label driver resources var CsmLabel = "csm" +// CsmNamespaceLabel - label to track namespace for csm +var CsmNamespaceLabel = "csmNamespace" + // AccLabel - label client resources var AccLabel = "acc" diff --git a/pkg/modules/application_mobility.go b/pkg/modules/application_mobility.go index fe93f0bc2..8ba38d571 100644 --- a/pkg/modules/application_mobility.go +++ b/pkg/modules/application_mobility.go @@ -105,8 +105,6 @@ const ( AppMobCertManagerComponent = "cert-manager" // AppMobVeleroComponent - velero component AppMobVeleroComponent = "velero" - // CSMName - name - CSMName = "" ) // getAppMobilityModule - get instance of app mobility module diff --git a/pkg/modules/authorization.go b/pkg/modules/authorization.go index 7b0d0c78b..6be04d62f 100644 --- a/pkg/modules/authorization.go +++ b/pkg/modules/authorization.go @@ -277,7 +277,6 @@ func getAuthApplyCR(cr csmv1.ContainerStorageModule, op utils.OperatorConfig) (* } } } - for i, c := range container.VolumeMounts { if *c.Name == DefaultDriverConfigParamsVolumeMount { newName := AuthorizationSupportedDrivers[string(cr.Spec.Driver.CSIDriverType)].DriverConfigParamsVolumeMount @@ -490,6 +489,7 @@ func getAuthorizationServerDeployment(op utils.OperatorConfig, cr csmv1.Containe YamlString = strings.ReplaceAll(YamlString, AuthStorageServiceImage, component.StorageService) YamlString = strings.ReplaceAll(YamlString, AuthRedisImage, component.Redis) YamlString = strings.ReplaceAll(YamlString, AuthRedisCommanderImage, component.Commander) + YamlString = strings.ReplaceAll(YamlString, CSMName, cr.Name) for _, env := range component.Envs { if env.Name == "REDIS_STORAGE_CLASS" { @@ -505,6 +505,7 @@ func getAuthorizationServerDeployment(op utils.OperatorConfig, cr csmv1.Containe YamlString = strings.ReplaceAll(YamlString, AuthNamespace, authNamespace) YamlString = strings.ReplaceAll(YamlString, AuthRedisStorageClass, redisStorageClass) + YamlString = strings.ReplaceAll(YamlString, CSMName, cr.Name) return YamlString, nil } @@ -620,6 +621,7 @@ func getAuthorizationIngressRules(op utils.OperatorConfig, cr csmv1.ContainerSto YamlString = strings.ReplaceAll(YamlString, AuthProxyHost, authHostname) YamlString = strings.ReplaceAll(YamlString, AuthProxyIngressHost, proxyIngressHost) YamlString = strings.ReplaceAll(YamlString, AuthProxyIngressClassName, proxyIngressClassName) + YamlString = strings.ReplaceAll(YamlString, CSMName, cr.Name) return YamlString, nil } @@ -675,6 +677,7 @@ func getNginxIngressController(op utils.OperatorConfig, cr csmv1.ContainerStorag YamlString = string(buf) authNamespace := cr.Namespace YamlString = strings.ReplaceAll(YamlString, AuthNamespace, authNamespace) + YamlString = strings.ReplaceAll(YamlString, CSMName, cr.Name) return YamlString, nil } diff --git a/pkg/modules/commonconfig.go b/pkg/modules/commonconfig.go index 7deb10719..000a325fa 100644 --- a/pkg/modules/commonconfig.go +++ b/pkg/modules/commonconfig.go @@ -33,6 +33,8 @@ const ( CertManagerManifest = "cert-manager.yaml" // CommonNamespace - CommonNamespace = "" + // CSMName - name + CSMName = "" ) // SupportedDriverParam - diff --git a/pkg/modules/observability.go b/pkg/modules/observability.go index 8ff632a54..e3aed1755 100644 --- a/pkg/modules/observability.go +++ b/pkg/modules/observability.go @@ -171,6 +171,9 @@ const ( // PMaxObsYamlFile - powermax metrics yaml file PMaxObsYamlFile string = "karavi-metrics-powermax.yaml" + + // CSMNameSpace - namespace CSM is found in. Needed for cases where pod namespace is not namespace of CSM + CSMNameSpace = "" ) // ObservabilitySupportedDrivers is a map containing the CSI Drivers supported by CSM Replication. The key is driver name and the value is the driver plugin identifier @@ -292,6 +295,7 @@ func getTopology(op utils.OperatorConfig, cr csmv1.ContainerStorageModule) (stri } YamlString = strings.ReplaceAll(YamlString, CSMName, cr.Name) + YamlString = strings.ReplaceAll(YamlString, CSMNameSpace, cr.Namespace) YamlString = strings.ReplaceAll(YamlString, TopologyLogLevel, logLevel) YamlString = strings.ReplaceAll(YamlString, TopologyImage, topologyImage) return YamlString, nil @@ -356,6 +360,7 @@ func getOtelCollector(op utils.OperatorConfig, cr csmv1.ContainerStorageModule) } YamlString = strings.ReplaceAll(YamlString, CSMName, cr.Name) + YamlString = strings.ReplaceAll(YamlString, CSMNameSpace, cr.Namespace) YamlString = strings.ReplaceAll(YamlString, OtelCollectorImage, otelCollectorImage) YamlString = strings.ReplaceAll(YamlString, NginxProxyImage, nginxProxyImage) return YamlString, nil @@ -502,6 +507,7 @@ func getPowerScaleMetricsObjects(op utils.OperatorConfig, cr csmv1.ContainerStor } YamlString = strings.ReplaceAll(YamlString, CSMName, cr.Name) + YamlString = strings.ReplaceAll(YamlString, CSMNameSpace, cr.Namespace) YamlString = strings.ReplaceAll(YamlString, PowerScaleImage, pscaleImage) YamlString = strings.ReplaceAll(YamlString, PowerscaleLogLevel, logLevel) YamlString = strings.ReplaceAll(YamlString, PowerScaleMaxConcurrentQueries, maxConcurrentQueries) @@ -703,6 +709,7 @@ func getPowerFlexMetricsObject(op utils.OperatorConfig, cr csmv1.ContainerStorag } YamlString = strings.ReplaceAll(YamlString, CSMName, cr.Name) + YamlString = strings.ReplaceAll(YamlString, CSMNameSpace, cr.Namespace) YamlString = strings.ReplaceAll(YamlString, PowerflexImage, pflexImage) YamlString = strings.ReplaceAll(YamlString, PowerflexLogLevel, logLevel) YamlString = strings.ReplaceAll(YamlString, PowerflexMaxConcurrentQueries, maxConcurrentQueries) @@ -923,6 +930,7 @@ func getPowerMaxMetricsObject(op utils.OperatorConfig, cr csmv1.ContainerStorage } YamlString = strings.ReplaceAll(YamlString, CSMName, cr.Name) + YamlString = strings.ReplaceAll(YamlString, CSMNameSpace, cr.Namespace) YamlString = strings.ReplaceAll(YamlString, PmaxObsImage, pmaxImage) YamlString = strings.ReplaceAll(YamlString, PmaxLogLevel, logLevel) YamlString = strings.ReplaceAll(YamlString, PmaxLogFormat, logFormat) diff --git a/pkg/utils/status.go b/pkg/utils/status.go index 2f0ba431f..b8e4be001 100644 --- a/pkg/utils/status.go +++ b/pkg/utils/status.go @@ -41,6 +41,7 @@ var dMutex sync.RWMutex var checkModuleStatus = map[csmv1.ModuleType]func(context.Context, *csmv1.ContainerStorageModule, ReconcileCSM, *csmv1.ContainerStorageModuleStatus) (bool, error){ csmv1.Observability: observabilityStatusCheck, csmv1.ApplicationMobility: appMobStatusCheck, + csmv1.AuthorizationServer: authProxyStatusCheck, } func getInt32(pointer *int32) int32 { @@ -50,6 +51,58 @@ func getInt32(pointer *int32) int32 { return *pointer } +// calculates deployment state of drivers only; module deployment status will be checked in checkModuleStatus +func getDeploymentStatus(ctx context.Context, instance *csmv1.ContainerStorageModule, r ReconcileCSM) (int32, csmv1.PodStatus, error) { + log := logger.GetLogger(ctx) + var msg string + deployment := &appsv1.Deployment{} + var err error + desired := int32(0) + available := int32(0) + ready := int32(0) + numberUnavailable := int32(0) + totalReplicas := int32(0) + + _, clusterClients, err := GetDefaultClusters(ctx, *instance, r) + if err != nil { + return int32(totalReplicas), csmv1.PodStatus{}, err + } + + for _, cluster := range clusterClients { + log.Infof("deployment status for cluster: %s", cluster.ClusterID) + msg += fmt.Sprintf("error message for %s \n", cluster.ClusterID) + + if instance.GetName() == "" || instance.GetName() == string(csmv1.Authorization) || instance.GetName() == string(csmv1.ApplicationMobility) { + log.Infof("Not a driver instance, will not check deploymentstatus") + return 0, csmv1.PodStatus{Available: "0"}, nil + } + + err = cluster.ClusterCTRLClient.Get(ctx, t1.NamespacedName{ + Name: instance.GetControllerName(), + Namespace: instance.GetNamespace(), + }, deployment) + if err != nil { + return 0, csmv1.PodStatus{Available: "0"}, err + } + log.Infof("Calculating status for deployment: %s", deployment.Name) + desired = deployment.Status.Replicas + available = deployment.Status.AvailableReplicas + ready = deployment.Status.ReadyReplicas + numberUnavailable = deployment.Status.UnavailableReplicas + + log.Infow("deployment", "desired", desired) + log.Infow("deployment", "numberReady", ready) + log.Infow("deployment", "available", available) + log.Infow("deployment", "numberUnavailable", numberUnavailable) + } + + return ready, csmv1.PodStatus{ + Available: fmt.Sprintf("%d", available), + Desired: fmt.Sprintf("%d", desired), + Failed: fmt.Sprintf("%d", numberUnavailable), + }, err +} + // TODO: Currently commented this block of code as the API used to get the latest deployment status is not working as expected // TODO: Can be uncommented once this issues gets sorted out /* func getDeploymentStatus(ctx context.Context, instance *csmv1.ContainerStorageModule, r ReconcileCSM) (int32, csmv1.PodStatus, error) { @@ -236,6 +289,7 @@ func getDaemonSetStatus(ctx context.Context, instance *csmv1.ContainerStorageMod totalAvialable := int32(0) totalDesired := int32(0) totalFailedCount := 0 + totalRunning := int32(0) _, clusterClients, err := GetDefaultClusters(ctx, *instance, r) if err != nil { @@ -250,6 +304,12 @@ func getDaemonSetStatus(ctx context.Context, instance *csmv1.ContainerStorageMod nodeName := instance.GetNodeName() + // Application-mobility has a different node name than the drivers + if instance.GetName() == "application-mobility" { + log.Infof("Changing nodeName for application-mobility") + nodeName = "application-mobility-node-agent" + } + log.Infof("nodeName is %s", nodeName) err := cluster.ClusterCTRLClient.Get(ctx, t1.NamespacedName{ Name: nodeName, @@ -292,16 +352,19 @@ func getDaemonSetStatus(ctx context.Context, instance *csmv1.ContainerStorageMod } } } + if pod.Status.Phase == corev1.PodRunning { + totalRunning++ + } } for k, v := range errMap { msg += k + "=" + v } - log.Infof("daemonset status available pods %d", ds.Status.NumberAvailable) + log.Infof("daemonset status available pods %d", totalRunning) log.Infof("daemonset status failedCount pods %d", failedCount) log.Infof("daemonset status desired pods %d", ds.Status.DesiredNumberScheduled) - totalAvialable += ds.Status.NumberAvailable + totalAvialable += totalRunning totalDesired += ds.Status.DesiredNumberScheduled totalFailedCount += failedCount @@ -321,26 +384,37 @@ func calculateState(ctx context.Context, instance *csmv1.ContainerStorageModule, log := logger.GetLogger(ctx) running := true var err error + nodeStatusGood := true + newStatus.State = constants.Succeeded // TODO: Currently commented this block of code as the API used to get the latest deployment status is not working as expected // TODO: Can be uncommented once this issues gets sorted out - /* controllerReplicas, controllerStatus, controllerErr := getDeploymentStatus(ctx, instance, r) - expected, nodeStatus, daemonSetErr := getDaemonSetStatus(ctx, instance, r) - newStatus.ControllerStatus = controllerStatus - newStatus.NodeStatus = nodeStatus */ + controllerReplicas, controllerStatus, controllerErr := getDeploymentStatus(ctx, instance, r) + if controllerErr != nil { + log.Infof("error from getDeploymentStatus: %s", controllerErr.Error()) + } - expected, nodeStatus, daemonSetErr := getDaemonSetStatus(ctx, instance, r) - newStatus.NodeStatus = nodeStatus - controllerReplicas := newStatus.ControllerStatus.Desired - controllerStatus := newStatus.ControllerStatus + // Auth proxy has no daemonset. Putting this if/else in here and setting nodeStatusGood to true by + // default is a little hacky but will be fixed when we refactor the status code in CSM 1.10 or 1.11 + log.Infof("instance.GetName() is %s", instance.GetName()) + if instance.GetName() != "" && instance.GetName() != string(csmv1.Authorization) { + expected, nodeStatus, daemonSetErr := getDaemonSetStatus(ctx, instance, r) + newStatus.NodeStatus = nodeStatus + if daemonSetErr != nil { + err = daemonSetErr + log.Infof("calculate Daemonseterror msg [%s]", daemonSetErr.Error()) + } + + log.Infof("daemonset expected [%d]", expected) + log.Infof("daemonset nodeStatus.Available [%s]", nodeStatus.Available) + nodeStatusGood = (fmt.Sprintf("%d", expected) == nodeStatus.Available) + } + + newStatus.ControllerStatus = controllerStatus - newStatus.State = constants.Succeeded log.Infof("deployment controllerReplicas [%s]", controllerReplicas) log.Infof("deployment controllerStatus.Available [%s]", controllerStatus.Available) - log.Infof("daemonset expected [%d]", expected) - log.Infof("daemonset nodeStatus.Available [%s]", nodeStatus.Available) - - if (controllerReplicas == controllerStatus.Available) && (fmt.Sprintf("%d", expected) == nodeStatus.Available) { + if (fmt.Sprintf("%d", controllerReplicas) == controllerStatus.Available) && nodeStatusGood { for _, module := range instance.Spec.Modules { moduleStatus, exists := checkModuleStatus[module.Name] if exists && module.Enabled { @@ -352,22 +426,22 @@ func calculateState(ctx context.Context, instance *csmv1.ContainerStorageModule, if !moduleRunning { running = false newStatus.State = constants.Failed - log.Infof("%s module not running", module) + log.Infof("%s module not running", module.Name) break } + log.Infof("%s module running", module.Name) } } } else { + log.Infof("either controllerReplicas != controllerStatus.Available or nodeStatus is bad") + log.Infof("controllerReplicas", controllerReplicas) + log.Infof("controllerStatus.Available", controllerStatus.Available) + log.Infof("nodeStatusGood", nodeStatusGood) running = false newStatus.State = constants.Failed } - log.Infof("calculate overall state [%s]", newStatus.State) - if daemonSetErr != nil { - err = daemonSetErr - log.Infof("calculate Daemonseterror msg [%s]", daemonSetErr.Error()) - } - + log.Infof("setting status to ", "newStatus", newStatus) SetStatus(ctx, r, instance, newStatus) return running, err } @@ -544,18 +618,21 @@ func HandleSuccess(ctx context.Context, instance *csmv1.ContainerStorageModule, log := logger.GetLogger(ctx) running, err := calculateState(ctx, instance, r, newStatus) + log.Info("calculateState returns ", "running", running) if err != nil { log.Error("HandleSuccess Driver status ", "error", err.Error()) newStatus.State = constants.Failed } if running { - newStatus.State = constants.Running + newStatus.State = constants.Succeeded } log.Infow("HandleSuccess Driver state ", "newStatus.State", newStatus.State) - if newStatus.State == constants.Running { + if newStatus.State == constants.Succeeded { // If previously we were in running state - if oldStatus.State == constants.Running { - log.Info("HandleSuccess Driver state didn't change from Running") + if oldStatus.State == constants.Succeeded { + log.Info("HandleSuccess Driver state didn't change from Succeeded") + } else { + log.Info("HandleSuccess Driver state changed to Succeeded") } return reconcile.Result{}, nil } @@ -767,6 +844,14 @@ func observabilityStatusCheck(ctx context.Context, instance *csmv1.ContainerStor metricsRunning := false topologyRunning := false + driverName := instance.Spec.Driver.CSIDriverType + + // TODO: PowerScale DriverType should be changed from "isilon" to "powerscale" + // this is a temporary fix until we can do that + if driverName == "isilon" { + driverName = "powerscale" + } + for _, m := range instance.Spec.Modules { if m.Name == csmv1.Observability { for _, c := range m.Components { @@ -785,7 +870,7 @@ func observabilityStatusCheck(ctx context.Context, instance *csmv1.ContainerStor certEnabled = true } } - if c.Name == fmt.Sprintf("metrics-%s", instance.Spec.Driver.CSIDriverType) { + if c.Name == fmt.Sprintf("metrics-%s", driverName) { if *c.Enabled { metricsEnabled = true } @@ -794,9 +879,8 @@ func observabilityStatusCheck(ctx context.Context, instance *csmv1.ContainerStor } } - namespace := "karavi" opts := []client.ListOption{ - client.InNamespace(namespace), + client.InNamespace(ObservabilityNamespace), } deploymentList := &appsv1.DeploymentList{} err := r.GetClient().List(ctx, deploymentList, opts...) @@ -815,11 +899,11 @@ func observabilityStatusCheck(ctx context.Context, instance *csmv1.ContainerStor if otelEnabled { otelRunning = checkFn(&deployment) } - case fmt.Sprintf("%s-metrics-%s", namespace, instance.Spec.Driver.CSIDriverType): + case fmt.Sprintf("%s-metrics-%s", ObservabilityNamespace, driverName): if metricsEnabled { metricsRunning = checkFn(&deployment) } - case fmt.Sprintf("%s-topology", namespace): + case fmt.Sprintf("%s-topology", ObservabilityNamespace): if topologyEnabled { topologyRunning = checkFn(&deployment) } @@ -881,3 +965,104 @@ func observabilityStatusCheck(ctx context.Context, instance *csmv1.ContainerStor return false, nil } + +// authProxyStatusCheck - calculate success state for auth proxy +func authProxyStatusCheck(ctx context.Context, instance *csmv1.ContainerStorageModule, r ReconcileCSM, _ *csmv1.ContainerStorageModuleStatus) (bool, error) { + log := logger.GetLogger(ctx) + certEnabled := false + nginxEnabled := false + + for _, m := range instance.Spec.Modules { + if m.Name == csmv1.AuthorizationServer { + for _, c := range m.Components { + if c.Name == "ingress-nginx" && *c.Enabled { + nginxEnabled = true + } + if c.Name == "cert-manager" && *c.Enabled { + certEnabled = true + } + } + } + } + + opts := []client.ListOption{ + client.InNamespace(instance.GetNamespace()), + } + deploymentList := &appsv1.DeploymentList{} + err := r.GetClient().List(ctx, deploymentList, opts...) + if err != nil { + return false, err + } + + checkFn := func(deployment *appsv1.Deployment) bool { + return deployment.Status.ReadyReplicas == *deployment.Spec.Replicas + } + + for _, deployment := range deploymentList.Items { + deployment := deployment + switch deployment.Name { + case "authorization-ingress-nginx-controller": + if nginxEnabled { + if !checkFn(&deployment) { + log.Info("%s component not running in auth proxy deployment", deployment.Name) + return false, nil + } + } + case "cert-manager": + if certEnabled { + if !checkFn(&deployment) { + log.Info("%s component not running in auth proxy deployment", deployment.Name) + return false, nil + } + } + case "cert-manager-cainjector": + if certEnabled { + if !checkFn(&deployment) { + log.Info("%s component not running in auth proxy deployment", deployment.Name) + return false, nil + } + } + case "cert-manager-webhook": + if certEnabled { + if !checkFn(&deployment) { + log.Info("%s component not running in auth proxy deployment", deployment.Name) + return false, nil + } + } + case "proxy-server": + if !checkFn(&deployment) { + log.Info("%s component not running in auth proxy deployment", deployment.Name) + return false, nil + } + case "redis-commander": + if !checkFn(&deployment) { + log.Info("%s component not running in auth proxy deployment", deployment.Name) + return false, nil + } + case "redis-primary": + if !checkFn(&deployment) { + log.Info("%s component not running in auth proxy deployment", deployment.Name) + return false, nil + } + case "role-service": + if !checkFn(&deployment) { + log.Info("%s component not running in auth proxy deployment", deployment.Name) + return false, nil + } + case "storage-service": + if !checkFn(&deployment) { + log.Info("%s component not running in auth proxy deployment", deployment.Name) + return false, nil + } + case "tenant-service": + if !checkFn(&deployment) { + log.Info("%s component not running in auth proxy deployment", deployment.Name) + return false, nil + } + } + } + + log.Info("auth proxy deployment successful") + + return true, nil +} diff --git a/tests/config/driverconfig/powerflex/v2.9.1/node.yaml b/tests/config/driverconfig/powerflex/v2.9.1/node.yaml index b1acd48c2..ee89dedf9 100644 --- a/tests/config/driverconfig/powerflex/v2.9.1/node.yaml +++ b/tests/config/driverconfig/powerflex/v2.9.1/node.yaml @@ -100,7 +100,7 @@ spec: - name: SSL_CERT_DIR value: /certs - name: X_CSI_HEALTH_MONITOR_ENABLED - value: false + value: "" - name: X_CSI_APPROVE_SDC_ENABLED value: - name: X_CSI_RENAME_SDC_ENABLED @@ -109,11 +109,6 @@ spec: value: - name: X_CSI_MAX_VOLUMES_PER_NODE value: - - name: X_CSI_POWERFLEX_KUBE_NODE_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName volumeMounts: - name: driver-path mountPath: /plugins/vxflexos.emc.dell.com @@ -123,6 +118,8 @@ spec: - name: pods-path mountPath: /pods mountPropagation: "Bidirectional" + - name: noderoot + mountPath: /noderoot - name: dev mountPath: /dev - name: vxflexos-config @@ -225,6 +222,10 @@ spec: hostPath: path: /pods type: Directory + - name: noderoot + hostPath: + path: / + type: Directory - name: dev hostPath: path: /dev diff --git a/tests/e2e/steps/steps_def.go b/tests/e2e/steps/steps_def.go index e958828cf..57fa6966a 100644 --- a/tests/e2e/steps/steps_def.go +++ b/tests/e2e/steps/steps_def.go @@ -69,7 +69,6 @@ var correctlyAuthInjected = func(cr csmv1.ContainerStorageModule, annotations ma if err != nil { return err } - err = modules.CheckApplyContainersAuth(cnt, string(cr.Spec.Driver.CSIDriverType), true) if err != nil { return err