From 538154770284090574996d67f59eec15a7bae1d5 Mon Sep 17 00:00:00 2001 From: Steven Boland Date: Mon, 23 Oct 2023 15:05:39 +0100 Subject: [PATCH] BPF Upgrade without disruption --- pkg/controller/installation/bpf.go | 111 ++++++------- pkg/controller/installation/bpf_test.go | 2 +- .../installation/core_controller.go | 19 ++- .../installation/core_controller_test.go | 42 +++++ .../logcollector/logcollector_controller.go | 21 +-- .../elastic/elastic_controller_test.go | 42 ++--- pkg/controller/logstorage/elastic/mock.go | 74 +++++++++ .../logstorage/users/user_controller.go | 150 ++++++++++++++++-- .../users/users_controller_suite_test.go | 34 ++++ .../logstorage/users/users_controller_test.go | 106 +++++++++++++ pkg/controller/manager/manager_controller.go | 2 +- pkg/controller/utils/elasticsearch.go | 98 +++++++++--- pkg/controller/utils/utils_test.go | 36 +++++ .../common/networkpolicy/networkpolicy.go | 2 +- pkg/render/fluentd.go | 30 +--- pkg/render/fluentd_test.go | 10 +- pkg/render/logstorage/linseed/linseed.go | 1 + pkg/render/logstorage/linseed/linseed_test.go | 5 +- pkg/render/manager.go | 2 + pkg/render/manager_test.go | 27 ++++ .../expected_policies/compliance-server.json | 2 +- .../compliance-server_ocp.json | 2 +- .../testutils/expected_policies/dex.json | 8 +- .../testutils/expected_policies/dex_ocp.json | 8 +- .../expected_policies/elasticsearch.json | 8 +- .../expected_policies/elasticsearch_ocp.json | 8 +- .../expected_policies/es-gateway.json | 20 +-- .../expected_policies/es-gateway_ocp.json | 20 +-- .../testutils/expected_policies/guardian.json | 8 +- .../expected_policies/guardian_ocp.json | 8 +- .../testutils/expected_policies/kibana.json | 4 +- .../expected_policies/kibana_ocp.json | 4 +- .../testutils/expected_policies/linseed.json | 22 +-- .../linseed_dpi_enabled.json | 22 +-- .../expected_policies/linseed_ocp.json | 22 +-- .../linseed_ocp_dpi_enabled.json | 22 +-- .../expected_policies/packetcapture.json | 2 +- .../packetcapture_managed.json | 2 +- .../packetcapture_managed_ocp.json | 2 +- .../expected_policies/packetcapture_ocp.json | 2 +- test/mainline_test.go | 30 ++++ 41 files changed, 783 insertions(+), 257 deletions(-) create mode 100644 pkg/controller/logstorage/elastic/mock.go create mode 100644 pkg/controller/logstorage/users/users_controller_suite_test.go create mode 100644 pkg/controller/logstorage/users/users_controller_test.go diff --git a/pkg/controller/installation/bpf.go b/pkg/controller/installation/bpf.go index ddc8799ac5..bea5a53d95 100644 --- a/pkg/controller/installation/bpf.go +++ b/pkg/controller/installation/bpf.go @@ -30,7 +30,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -func bpfUpgradeWithoutDisruption(r *ReconcileInstallation, ctx context.Context, install *operator.Installation, ds *appsv1.DaemonSet, fc *crdv1.FelixConfiguration, reqLogger logr.Logger) error { +func bpfUpgradeDaemonsetEnvVar(r *ReconcileInstallation, ctx context.Context, install *operator.Installation, ds *appsv1.DaemonSet, fc *crdv1.FelixConfiguration, reqLogger logr.Logger) error { // Query calico-node DS: if FELIX_BPFENABLED env var set and FC bpfEnabled unset then patch FC and quit. patchFelixConfig, err := processDaemonsetEnvVar(r, ctx, ds, fc, reqLogger) @@ -38,64 +38,50 @@ func bpfUpgradeWithoutDisruption(r *ReconcileInstallation, ctx context.Context, return err } - // Otherwise check dataplane and patch Felix Config according to logic: - if !patchFelixConfig { + // Attempt to patch Felix Config now. + return patchFelixConfigurationImpl(r, ctx, install, fc, reqLogger, patchFelixConfig) +} - // Check the install dataplane mode is either Iptables or BPF. - installBpfEnabled := common.BpfDataplaneEnabled(&install.Spec) +func bpfUpgradeWithoutDisruption(r *ReconcileInstallation, ctx context.Context, install *operator.Installation, ds *appsv1.DaemonSet, fc *crdv1.FelixConfiguration, reqLogger logr.Logger) error { - // Check edge case where User has externally patched FelixConfig bpfEnabled which causes conflict to prevent Operator from upgrading dataplane. - if fc.Spec.BPFEnabled != nil { + var patchFelixConfig bool - fcBPFEnabled := *fc.Spec.BPFEnabled - if installBpfEnabled != fcBPFEnabled { - - // Ensure Felix Config annotations are either empty or equal previous FC bpfEnabled value. - if fc.Annotations[render.BpfOperatorAnnotation] == strconv.FormatBool(installBpfEnabled) { - text := fmt.Sprintf("An error occurred while attempting patch Felix Config bpfEnabled: '%s' as Felix Config has been modified externally to '%s'", - strconv.FormatBool(installBpfEnabled), - strconv.FormatBool(fcBPFEnabled)) - err = errors.New(text) - return err - } - } - } + // Check the install dataplane mode is either Iptables or BPF. + installBpfEnabled := common.BpfDataplaneEnabled(&install.Spec) - if !installBpfEnabled { - // IP Tables dataplane: - // Only patch Felix Config once to prevent log spamming. - if fc.Spec.BPFEnabled == nil || *fc.Spec.BPFEnabled { - patchFelixConfig = true - } - } else { - // BPF dataplane: - // Check daemonset rollout complete before patching. - if fc.Spec.BPFEnabled == nil || !(*fc.Spec.BPFEnabled) { - patchFelixConfig = checkDaemonsetRolloutComplete(ds) - } - } + // Check edge case where User has externally patched FelixConfig bpfEnabled which causes conflict to prevent Operator from upgrading dataplane. + if fc.Spec.BPFEnabled != nil { - // Attempt to patch Felix Config now. - if patchFelixConfig { + fcBPFEnabled := *fc.Spec.BPFEnabled + if installBpfEnabled != fcBPFEnabled { - err = patchFelixConfiguration(r, ctx, fc, reqLogger, installBpfEnabled) - if err != nil { + // Ensure Felix Config annotations are either empty or equal previous FC bpfEnabled value. + if fc.Annotations[render.BpfOperatorAnnotation] == strconv.FormatBool(installBpfEnabled) { + text := fmt.Sprintf("An error occurred while attempting patch Felix Config bpfEnabled: '%s' as Felix Config has been modified externally to '%s'", + strconv.FormatBool(installBpfEnabled), + strconv.FormatBool(fcBPFEnabled)) + err := errors.New(text) return err } - - // Ensure if no errors occurred while attempting to patch Falix Config then successfully patched. - patchFelixConfig = err == nil } } - if patchFelixConfig { - if fc.Spec.BPFEnabled != nil { - msg := fmt.Sprintf("Successfully patched Felix Config OK bpfEnabled='%s'", strconv.FormatBool(*fc.Spec.BPFEnabled)) - reqLogger.Info(msg) + if !installBpfEnabled { + // IP Tables dataplane: + // Only patch Felix Config once to prevent log spamming. + if fc.Spec.BPFEnabled == nil || *fc.Spec.BPFEnabled { + patchFelixConfig = true + } + } else { + // BPF dataplane: + // Check daemonset rollout complete before patching. + if fc.Spec.BPFEnabled == nil || !(*fc.Spec.BPFEnabled) { + patchFelixConfig = checkDaemonsetRolloutComplete(ds) } } - return nil + // Attempt to patch Felix Config now. + return patchFelixConfigurationImpl(r, ctx, install, fc, reqLogger, patchFelixConfig) } func processDaemonsetEnvVar(r *ReconcileInstallation, ctx context.Context, ds *appsv1.DaemonSet, fc *crdv1.FelixConfiguration, reqLogger logr.Logger) (bool, error) { @@ -115,16 +101,7 @@ func processDaemonsetEnvVar(r *ReconcileInstallation, ctx context.Context, ds *a } } - if dsBpfEnabledStatus && fc.Spec.BPFEnabled == nil { - err = patchFelixConfiguration(r, ctx, fc, reqLogger, dsBpfEnabledStatus) - if err != nil { - return false, err - } else { - return true, nil - } - } - - return false, nil + return dsBpfEnabledStatus && fc.Spec.BPFEnabled == nil, nil } func checkDaemonsetRolloutComplete(ds *appsv1.DaemonSet) bool { @@ -142,6 +119,30 @@ func checkDaemonsetRolloutComplete(ds *appsv1.DaemonSet) bool { return false } +func patchFelixConfigurationImpl(r *ReconcileInstallation, ctx context.Context, install *operator.Installation, fc *crdv1.FelixConfiguration, reqLogger logr.Logger, patchFelixConfig bool) error { + + if patchFelixConfig { + + installBpfEnabled := common.BpfDataplaneEnabled(&install.Spec) + err := patchFelixConfiguration(r, ctx, fc, reqLogger, installBpfEnabled) + if err != nil { + return err + } + + // Ensure if no errors occurred while attempting to patch Falix Config then successfully patched. + patchFelixConfig = err == nil + } + + if patchFelixConfig { + if fc.Spec.BPFEnabled != nil { + msg := fmt.Sprintf("Successfully patched Felix Config OK bpfEnabled='%s'", strconv.FormatBool(*fc.Spec.BPFEnabled)) + reqLogger.Info(msg) + } + } + + return nil +} + func patchFelixConfiguration(r *ReconcileInstallation, ctx context.Context, fc *crdv1.FelixConfiguration, reqLogger logr.Logger, patchBpfEnabled bool) error { // Obtain the original FelixConfig to patch. diff --git a/pkg/controller/installation/bpf_test.go b/pkg/controller/installation/bpf_test.go index 85149cd868..eca021585c 100644 --- a/pkg/controller/installation/bpf_test.go +++ b/pkg/controller/installation/bpf_test.go @@ -181,7 +181,7 @@ var _ = Describe("Testing BPF Upgrade without disruption during core-controller Expect(c.Create(ctx, fc)).NotTo(HaveOccurred()) // Act. - err := bpfUpgradeWithoutDisruption(&r, ctx, cr, ds, fc, reqLogger) + err := bpfUpgradeDaemonsetEnvVar(&r, ctx, cr, ds, fc, reqLogger) Expect(err).ShouldNot(HaveOccurred()) // Assert. diff --git a/pkg/controller/installation/core_controller.go b/pkg/controller/installation/core_controller.go index 143315a53f..e089ca7c37 100644 --- a/pkg/controller/installation/core_controller.go +++ b/pkg/controller/installation/core_controller.go @@ -1418,6 +1418,23 @@ func (r *ReconcileInstallation) Reconcile(ctx context.Context, request reconcile return reconcile.Result{}, err } + // BPF Upgrade env var initial check: + var calicoNodeDaemonset appsv1.DaemonSet + calicoNodeDaemonset = appsv1.DaemonSet{} + + err = r.client.Get(ctx, types.NamespacedName{Namespace: common.CalicoNamespace, Name: common.NodeDaemonSetName}, &calicoNodeDaemonset) + if err != nil { + reqLogger.Error(err, "An error occurred when querying the calico-node daemonset") + return reconcile.Result{}, err + } + + // Next delegate logic implementation here using the state of the installation and dependent resources. + err = bpfUpgradeDaemonsetEnvVar(r, ctx, instance, &calicoNodeDaemonset, felixConfiguration, reqLogger) + if err != nil { + reqLogger.Error(err, "An error occurred when attempting to process BPF Upgrade Calico-Node DS env var") + return reconcile.Result{}, err + } + // Create a component handler to create or update the rendered components. handler := utils.NewComponentHandler(log, r.client, r.scheme, instance) for _, component := range components { @@ -1501,7 +1518,7 @@ func (r *ReconcileInstallation) Reconcile(ctx context.Context, request reconcile // BPF Upgrade without disruption: // First get the calico-node daemonset. - calicoNodeDaemonset := appsv1.DaemonSet{} + calicoNodeDaemonset = appsv1.DaemonSet{} err = r.client.Get(ctx, types.NamespacedName{Namespace: common.CalicoNamespace, Name: common.NodeDaemonSetName}, &calicoNodeDaemonset) if err != nil { reqLogger.Error(err, "An error occurred when querying the calico-node daemonset") diff --git a/pkg/controller/installation/core_controller_test.go b/pkg/controller/installation/core_controller_test.go index 5760d96450..e8b50cc6b8 100644 --- a/pkg/controller/installation/core_controller_test.go +++ b/pkg/controller/installation/core_controller_test.go @@ -426,6 +426,20 @@ var _ = Describe("Testing core-controller installation", func() { }, }, })).NotTo(HaveOccurred()) + + Expect(c.Create( + ctx, + &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Name: common.NodeDaemonSetName, Namespace: common.CalicoNamespace}, + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: render.CalicoNodeObjectName}}, + }, + }, + }, + })).NotTo(HaveOccurred()) }) AfterEach(func() { cancel() @@ -810,6 +824,20 @@ var _ = Describe("Testing core-controller installation", func() { Expect(err).NotTo(HaveOccurred()) Expect(c.Create(ctx, prometheusTLS.Secret(common.OperatorNamespace()))).NotTo(HaveOccurred()) Expect(c.Create(ctx, &v3.Tier{ObjectMeta: metav1.ObjectMeta{Name: "allow-tigera"}})).NotTo(HaveOccurred()) + + Expect(c.Create( + ctx, + &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Name: common.NodeDaemonSetName, Namespace: common.CalicoNamespace}, + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: render.CalicoNodeObjectName}}, + }, + }, + }, + })).NotTo(HaveOccurred()) }) AfterEach(func() { cancel() @@ -984,6 +1012,20 @@ var _ = Describe("Testing core-controller installation", func() { Expect(err).NotTo(HaveOccurred()) Expect(c.Create(ctx, prometheusTLS.Secret(common.OperatorNamespace()))).NotTo(HaveOccurred()) Expect(c.Create(ctx, &v3.Tier{ObjectMeta: metav1.ObjectMeta{Name: "allow-tigera"}})).NotTo(HaveOccurred()) + + Expect(c.Create( + ctx, + &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Name: common.NodeDaemonSetName, Namespace: common.CalicoNamespace}, + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: render.CalicoNodeObjectName}}, + }, + }, + }, + })).NotTo(HaveOccurred()) }) AfterEach(func() { cancel() diff --git a/pkg/controller/logcollector/logcollector_controller.go b/pkg/controller/logcollector/logcollector_controller.go index 501abe2c04..93761f7c7e 100644 --- a/pkg/controller/logcollector/logcollector_controller.go +++ b/pkg/controller/logcollector/logcollector_controller.go @@ -351,16 +351,6 @@ func (r *ReconcileLogCollector) Reconcile(ctx context.Context, request reconcile return reconcile.Result{}, err } - esClusterConfig, err := utils.GetElasticsearchClusterConfig(ctx, r.client) - if err != nil { - if errors.IsNotFound(err) { - r.status.SetDegraded(operatorv1.ResourceNotReady, "Elasticsearch cluster configuration is not available, waiting for it to become available", err, reqLogger) - return reconcile.Result{}, nil - } - r.status.SetDegraded(operatorv1.ResourceReadError, "Failed to get the elasticsearch cluster configuration", err, reqLogger) - return reconcile.Result{}, err - } - pullSecrets, err := utils.GetNetworkingPullSecrets(installation, r.client) if err != nil { r.status.SetDegraded(operatorv1.ResourceReadError, "Error retrieving pull secrets", err, reqLogger) @@ -541,10 +531,21 @@ func (r *ReconcileLogCollector) Reconcile(ctx context.Context, request reconcile } var eksConfig *render.EksCloudwatchLogConfig + var esClusterConfig *relasticsearch.ClusterConfig if installation.KubernetesProvider == operatorv1.ProviderEKS { log.Info("Managed kubernetes EKS found, getting necessary credentials and config") if instance.Spec.AdditionalSources != nil { if instance.Spec.AdditionalSources.EksCloudwatchLog != nil { + esClusterConfig, err = utils.GetElasticsearchClusterConfig(ctx, r.client) + if err != nil { + if errors.IsNotFound(err) { + r.status.SetDegraded(operatorv1.ResourceNotReady, "Elasticsearch cluster configuration is not available, waiting for it to become available", err, reqLogger) + return reconcile.Result{}, nil + } + r.status.SetDegraded(operatorv1.ResourceReadError, "Failed to get the elasticsearch cluster configuration", err, reqLogger) + return reconcile.Result{}, err + } + eksConfig, err = getEksCloudwatchLogConfig(r.client, instance.Spec.AdditionalSources.EksCloudwatchLog.FetchInterval, instance.Spec.AdditionalSources.EksCloudwatchLog.Region, diff --git a/pkg/controller/logstorage/elastic/elastic_controller_test.go b/pkg/controller/logstorage/elastic/elastic_controller_test.go index ba057e5e3f..3e15abeec2 100644 --- a/pkg/controller/logstorage/elastic/elastic_controller_test.go +++ b/pkg/controller/logstorage/elastic/elastic_controller_test.go @@ -198,7 +198,7 @@ var _ = Describe("LogStorage controller", func() { Context("LogStorage is nil", func() { // Run the reconciler, expect no error. - r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, mockESCLICreator, dns.DefaultClusterDomain, readyFlag) + r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, MockESCLICreator, dns.DefaultClusterDomain, readyFlag) Expect(err).ShouldNot(HaveOccurred()) _, err = r.Reconcile(ctx, reconcile.Request{}) Expect(err).ShouldNot(HaveOccurred()) @@ -215,7 +215,7 @@ var _ = Describe("LogStorage controller", func() { }) It("returns an error if the LogStorage resource exists and is not marked for deletion", func() { - r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, mockESCLICreator, dns.DefaultClusterDomain, readyFlag) + r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, MockESCLICreator, dns.DefaultClusterDomain, readyFlag) Expect(err).ShouldNot(HaveOccurred()) mockStatus.On("SetDegraded", operatorv1.ResourceValidationError, "LogStorage validation failed - cluster type is managed but LogStorage CR still exists", mock.Anything, mock.Anything).Return() result, err := r.Reconcile(ctx, reconcile.Request{}) @@ -232,7 +232,7 @@ var _ = Describe("LogStorage controller", func() { mockStatus.On("ReadyToMonitor") // mockStatus.On("SetMetaData", mock.Anything).Return() - r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, mockESCLICreator, dns.DefaultClusterDomain, readyFlag) + r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, MockESCLICreator, dns.DefaultClusterDomain, readyFlag) Expect(err).ShouldNot(HaveOccurred()) ls := &operatorv1.LogStorage{} @@ -348,7 +348,7 @@ var _ = Describe("LogStorage controller", func() { Data: map[string]string{"eck_license_level": string(render.ElasticsearchLicenseTypeEnterprise)}, })).ShouldNot(HaveOccurred()) - r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, mockESCLICreator, dns.DefaultClusterDomain, readyFlag) + r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, MockESCLICreator, dns.DefaultClusterDomain, readyFlag) Expect(err).ShouldNot(HaveOccurred()) mockStatus.On("SetDegraded", operatorv1.ResourceNotReady, "Waiting for Elasticsearch cluster to be operational", mock.Anything, mock.Anything).Return() @@ -463,7 +463,7 @@ var _ = Describe("LogStorage controller", func() { ObjectMeta: metav1.ObjectMeta{Namespace: render.ElasticsearchNamespace, Name: render.OIDCUsersESSecretName}, })).ShouldNot(HaveOccurred()) - r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, mockESCLICreator, dns.DefaultClusterDomain, readyFlag) + r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, MockESCLICreator, dns.DefaultClusterDomain, readyFlag) Expect(err).ShouldNot(HaveOccurred()) mockStatus.On("SetDegraded", operatorv1.ResourceNotReady, "Waiting for Elasticsearch cluster to be operational", mock.Anything, mock.Anything).Return() @@ -591,7 +591,7 @@ var _ = Describe("LogStorage controller", func() { Data: map[string]string{"eck_license_level": string(render.ElasticsearchLicenseTypeEnterprise)}, })).ShouldNot(HaveOccurred()) - r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, mockESCLICreator, dns.DefaultClusterDomain, readyFlag) + r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, MockESCLICreator, dns.DefaultClusterDomain, readyFlag) Expect(err).ShouldNot(HaveOccurred()) // Elasticsearch and kibana secrets are good. @@ -635,7 +635,7 @@ var _ = Describe("LogStorage controller", func() { Expect(err).ShouldNot(HaveOccurred()) Expect(cli.Update(ctx, kbSecret)).ShouldNot(HaveOccurred()) - r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, mockESCLICreator, dns.DefaultClusterDomain, readyFlag) + r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, MockESCLICreator, dns.DefaultClusterDomain, readyFlag) Expect(err).ShouldNot(HaveOccurred()) mockStatus.On("SetDegraded", operatorv1.ResourceNotReady, "Waiting for Elasticsearch cluster to be operational", mock.Anything, mock.Anything).Return() @@ -719,7 +719,7 @@ var _ = Describe("LogStorage controller", func() { Expect(cli.Create(ctx, rec)).ShouldNot(HaveOccurred()) } - r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, mockESCLICreator, dns.DefaultClusterDomain, readyFlag) + r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, MockESCLICreator, dns.DefaultClusterDomain, readyFlag) Expect(err).ShouldNot(HaveOccurred()) mockStatus.On("SetDegraded", operatorv1.ResourceNotReady, "Waiting for curator secrets to become available", mock.Anything, mock.Anything).Return() @@ -759,7 +759,7 @@ var _ = Describe("LogStorage controller", func() { Data: map[string]string{"eck_license_level": string(render.ElasticsearchLicenseTypeEnterprise)}, })).ShouldNot(HaveOccurred()) - r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, mockESCLICreator, dns.DefaultClusterDomain, readyFlag) + r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, MockESCLICreator, dns.DefaultClusterDomain, readyFlag) Expect(err).ShouldNot(HaveOccurred()) mockStatus.On("SetDegraded", operatorv1.ResourceNotReady, "Waiting for Elasticsearch cluster to be operational", mock.Anything, mock.Anything).Return() @@ -830,7 +830,7 @@ var _ = Describe("LogStorage controller", func() { }) It("should use default images", func() { - r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, mockESCLICreator, dns.DefaultClusterDomain, readyFlag) + r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, MockESCLICreator, dns.DefaultClusterDomain, readyFlag) Expect(err).ShouldNot(HaveOccurred()) esAdminUserSecret := &corev1.Secret{ @@ -933,7 +933,7 @@ var _ = Describe("LogStorage controller", func() { }, }, })).ToNot(HaveOccurred()) - r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, mockESCLICreator, dns.DefaultClusterDomain, readyFlag) + r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, MockESCLICreator, dns.DefaultClusterDomain, readyFlag) Expect(err).ShouldNot(HaveOccurred()) esAdminUserSecret := &corev1.Secret{ @@ -1056,7 +1056,7 @@ var _ = Describe("LogStorage controller", func() { // mockStatus.On("SetMetaData", mock.Anything).Return() var err error - r, err = NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, mockESCLICreator, dns.DefaultClusterDomain, readyFlag) + r, err = NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, MockESCLICreator, dns.DefaultClusterDomain, readyFlag) Expect(err).ShouldNot(HaveOccurred()) }) @@ -1065,7 +1065,7 @@ var _ = Describe("LogStorage controller", func() { }) It("should wait if tier watch is not ready", func() { - r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, mockESCLICreator, dns.DefaultClusterDomain, &utils.ReadyFlag{}) + r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, MockESCLICreator, dns.DefaultClusterDomain, &utils.ReadyFlag{}) Expect(err).ShouldNot(HaveOccurred()) utils.ExpectWaitForTierWatch(ctx, r, mockStatus) }) @@ -1122,7 +1122,7 @@ var _ = Describe("LogStorage controller", func() { }) It("deletes Elasticsearch and Kibana then removes the finalizers on the LogStorage CR", func() { - r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, mockESCLICreator, dns.DefaultClusterDomain, readyFlag) + r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, MockESCLICreator, dns.DefaultClusterDomain, readyFlag) Expect(err).ShouldNot(HaveOccurred()) esAdminUserSecret := &corev1.Secret{ @@ -1313,17 +1313,3 @@ func CreateLogStorage(client client.Client, ls *operatorv1.LogStorage) { // Create the LogStorage object. ExpectWithOffset(1, client.Create(context.Background(), ls)).ShouldNot(HaveOccurred()) } - -type mockESClient struct{} - -func mockESCLICreator(client client.Client, ctx context.Context, elasticHTTPSEndpoint string) (utils.ElasticClient, error) { - return &mockESClient{}, nil -} - -func (m *mockESClient) CreateUser(ctx context.Context, user *utils.User) error { - return fmt.Errorf("CreateUser not implemented in mock client") -} - -func (*mockESClient) SetILMPolicies(ctx context.Context, ls *operatorv1.LogStorage) error { - return nil -} diff --git a/pkg/controller/logstorage/elastic/mock.go b/pkg/controller/logstorage/elastic/mock.go new file mode 100644 index 0000000000..0c2829f1a4 --- /dev/null +++ b/pkg/controller/logstorage/elastic/mock.go @@ -0,0 +1,74 @@ +// Copyright (c) 2020-2023 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastic + +import ( + "context" + "fmt" + + "github.com/stretchr/testify/mock" + operatorv1 "github.com/tigera/operator/api/v1" + "github.com/tigera/operator/pkg/controller/utils" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type MockESClientKey string + +type MockESClient struct { + mock.Mock +} + +func MockESCLICreator(_ client.Client, ctx context.Context, _ string) (utils.ElasticClient, error) { + if esCli := ctx.Value(MockESClientKey("mockESClient")); esCli != nil { + return esCli.(*MockESClient), nil + } + return &MockESClient{}, nil +} + +func (m *MockESClient) CreateUser(_ context.Context, _ *utils.User) error { + return fmt.Errorf("CreateUser not implemented in mock client") +} + +func (m *MockESClient) SetILMPolicies(_ context.Context, _ *operatorv1.LogStorage) error { + return nil +} + +func (m *MockESClient) DeleteRoles(ctx context.Context, roles []utils.Role) error { + var ret mock.Arguments + for _, role := range roles { + ret = m.MethodCalled("deleteRole", ctx, role) + if ret.Error(0) != nil { + return ret.Error(0) + } + } + + ret = m.Called(ctx, roles) + return ret.Error(0) +} + +func (m *MockESClient) DeleteUser(ctx context.Context, u *utils.User) error { + ret := m.MethodCalled("DeleteRoles", ctx, u.Roles) + if ret.Error(0) != nil { + return ret.Error(0) + } + + ret = m.Called(ctx, u) + return ret.Error(0) +} + +func (m *MockESClient) GetUsers(ctx context.Context) ([]utils.User, error) { + ret := m.Called(ctx) + return ret.Get(0).([]utils.User), ret.Error(1) +} diff --git a/pkg/controller/logstorage/users/user_controller.go b/pkg/controller/logstorage/users/user_controller.go index 10405bdafa..3949a23c19 100644 --- a/pkg/controller/logstorage/users/user_controller.go +++ b/pkg/controller/logstorage/users/user_controller.go @@ -17,13 +17,13 @@ package users import ( "context" "fmt" + "strings" esv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/elasticsearch/v1" + "github.com/go-logr/logr" operatorv1 "github.com/tigera/operator/api/v1" corev1 "k8s.io/api/core/v1" - "github.com/go-logr/logr" - "github.com/tigera/operator/pkg/controller/options" "github.com/tigera/operator/pkg/controller/status" "github.com/tigera/operator/pkg/controller/utils" @@ -49,9 +49,16 @@ type UserController struct { client client.Client scheme *runtime.Scheme status status.StatusManager + esClientFn utils.ElasticsearchClientCreator multiTenant bool } +type UsersCleanupController struct { + client client.Client + scheme *runtime.Scheme + esClientFn utils.ElasticsearchClientCreator +} + func Add(mgr manager.Manager, opts options.AddOptions) error { if !opts.EnterpriseCRDExists { return nil @@ -68,6 +75,7 @@ func Add(mgr manager.Manager, opts options.AddOptions) error { scheme: mgr.GetScheme(), multiTenant: opts.MultiTenant, status: status.New(mgr.GetClient(), "log-storage-users", opts.KubernetesVersion), + esClientFn: utils.NewElasticClient, } r.status.Run(opts.ShutdownContext) @@ -115,6 +123,28 @@ func Add(mgr manager.Manager, opts options.AddOptions) error { return fmt.Errorf("log-storage-user-controller failed to create periodic reconcile watch: %w", err) } + // Now that the users controller is set up, we can also set up the controller that cleans up stale users + usersCleanupReconciler := &UsersCleanupController{ + client: mgr.GetClient(), + scheme: mgr.GetScheme(), + esClientFn: utils.NewElasticClient, + } + + // Create a controller using the reconciler and register it with the manager to receive reconcile calls. + usersCleanupController, err := controller.New("log-storage-cleanup-controller", mgr, controller.Options{Reconciler: usersCleanupReconciler}) + if err != nil { + return err + } + + if err = usersCleanupController.Watch(&source.Kind{Type: &operatorv1.Tenant{}}, &handler.EnqueueRequestForObject{}); err != nil { + return fmt.Errorf("log-storage-cleanup-controller failed to watch Tenant resource: %w", err) + } + + err = utils.AddPeriodicReconcile(usersCleanupController, utils.PeriodicReconcileTime, &handler.EnqueueRequestForObject{}) + if err != nil { + return fmt.Errorf("log-storage-cleanup-controller failed to create periodic reconcile watch: %w", err) + } + return nil } @@ -172,13 +202,37 @@ func (r *UserController) Reconcile(ctx context.Context, request reconcile.Reques return reconcile.Result{}, nil } + clusterIDConfigMap := corev1.ConfigMap{} + clusterIDConfigMapKey := client.ObjectKey{Name: "cluster-info", Namespace: "tigera-operator"} + err = r.client.Get(ctx, clusterIDConfigMapKey, &clusterIDConfigMap) + if err != nil { + r.status.SetDegraded(operatorv1.ResourceReadError, fmt.Sprintf("Waiting for ConfigMap %s/%s to be available", clusterIDConfigMapKey.Namespace, clusterIDConfigMapKey.Name), + nil, reqLogger) + return reconcile.Result{}, err + } + + clusterID, ok := clusterIDConfigMap.Data["cluster-id"] + if !ok { + err = fmt.Errorf("%s/%s ConfigMap does not contain expected 'cluster-id' key", + clusterIDConfigMap.Namespace, clusterIDConfigMap.Name) + r.status.SetDegraded(operatorv1.ResourceReadError, fmt.Sprintf("%v", err), err, reqLogger) + return reconcile.Result{}, err + } + + if clusterID == "" { + err = fmt.Errorf("%s/%s ConfigMap value for key 'cluster-id' must be non-empty", + clusterIDConfigMap.Namespace, clusterIDConfigMap.Name) + r.status.SetDegraded(operatorv1.ResourceReadError, fmt.Sprintf("%v", err), err, reqLogger) + return reconcile.Result{}, err + } + // Query any existing username and password for this Linseed instance. If one already exists, we'll simply // use that. Otherwise, generate a new one. - linseedUser := utils.LinseedUser(tenantID) + linseedUser := utils.LinseedUser(clusterID, tenantID) basicCreds := corev1.Secret{} var credentialSecrets []client.Object key := types.NamespacedName{Name: render.ElasticsearchLinseedUserSecret, Namespace: helper.TruthNamespace()} - if err := r.client.Get(ctx, key, &basicCreds); err != nil && !errors.IsNotFound(err) { + if err = r.client.Get(ctx, key, &basicCreds); err != nil && !errors.IsNotFound(err) { r.status.SetDegraded(operatorv1.ResourceReadError, fmt.Sprintf("Error getting Secret %s", key), err, reqLogger) return reconcile.Result{}, err } else if errors.IsNotFound(err) { @@ -203,13 +257,13 @@ func (r *UserController) Reconcile(ctx context.Context, request reconcile.Reques } else { hdler = utils.NewComponentHandler(reqLogger, r.client, r.scheme, logStorage) } - if err := hdler.CreateOrUpdateOrDelete(ctx, credentialComponent, r.status); err != nil { + if err = hdler.CreateOrUpdateOrDelete(ctx, credentialComponent, r.status); err != nil { r.status.SetDegraded(operatorv1.ResourceUpdateError, "Error creating / updating Linseed user secret", err, reqLogger) return reconcile.Result{}, err } // Now that the secret has been created, also provision the user in ES. - if err := r.createLinseedLogin(ctx, tenantID, &basicCreds, reqLogger); err != nil { + if err = r.createLinseedLogin(ctx, clusterID, tenantID, &basicCreds, reqLogger); err != nil { r.status.SetDegraded(operatorv1.ResourceUpdateError, "Failed to create Linseed user in ES", err, reqLogger) return reconcile.Result{}, err } @@ -219,8 +273,8 @@ func (r *UserController) Reconcile(ctx context.Context, request reconcile.Reques return reconcile.Result{}, nil } -func (r *UserController) createLinseedLogin(ctx context.Context, tenantID string, secret *corev1.Secret, reqLogger logr.Logger) error { - esClient, err := utils.NewElasticClient(r.client, ctx, relasticsearch.ElasticEndpoint()) +func (r *UserController) createLinseedLogin(ctx context.Context, clusterID, tenantID string, secret *corev1.Secret, reqLogger logr.Logger) error { + esClient, err := r.esClientFn(r.client, ctx, relasticsearch.ElasticEndpoint()) if err != nil { r.status.SetDegraded(operatorv1.ResourceCreateError, "Failed to connect to Elasticsearch - failed to create the Elasticsearch client", err, reqLogger) return err @@ -236,7 +290,7 @@ func (r *UserController) createLinseedLogin(ctx context.Context, tenantID string } // Create the user in ES. - user := utils.LinseedUser(tenantID) + user := utils.LinseedUser(clusterID, tenantID) user.Password = password if err = esClient.CreateUser(ctx, user); err != nil { r.status.SetDegraded(operatorv1.ResourceUpdateError, "Failed to create or update Elasticsearch user", err, reqLogger) @@ -245,3 +299,81 @@ func (r *UserController) createLinseedLogin(ctx context.Context, tenantID string return nil } + +func (r *UsersCleanupController) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + helper := utils.NewNamespaceHelper(true, render.ElasticsearchNamespace, request.Namespace) + reqLogger := logf.Log.WithName("controller_logstorage_users_cleanup").WithValues("Request.Namespace", + request.Namespace, "Request.Name", request.Name, "installNS", helper.InstallNamespace(), "truthNS", helper.TruthNamespace()) + reqLogger.Info("Reconciling LogStorage - Cleanup") + + // Wait for Elasticsearch to be installed and available. + elasticsearch, err := utils.GetElasticsearch(ctx, r.client) + if err != nil { + return reconcile.Result{}, err + } + if elasticsearch == nil || elasticsearch.Status.Phase != esv1.ElasticsearchReadyPhase { + return reconcile.Result{}, nil + } + + // Clean up any stale users that may have been left behind by a previous tenant + if err := r.cleanupStaleUsers(ctx, reqLogger); err != nil { + return reconcile.Result{}, err + } + + return reconcile.Result{}, nil +} + +func (r *UsersCleanupController) cleanupStaleUsers(ctx context.Context, logger logr.Logger) error { + esClient, err := r.esClientFn(r.client, ctx, relasticsearch.ElasticEndpoint()) + if err != nil { + return fmt.Errorf("failed to connect to Elasticsearch - failed to create the Elasticsearch client") + } + + allESUsers, err := esClient.GetUsers(ctx) + if err != nil { + return fmt.Errorf("failed to fetch users from Elasticsearch") + } + + tenants := operatorv1.TenantList{} + err = r.client.List(ctx, &tenants) + if err != nil { + return fmt.Errorf("failed to fetch TenantList") + } + + clusterIDConfigMap := corev1.ConfigMap{} + err = r.client.Get(ctx, client.ObjectKey{Name: "cluster-info", Namespace: "tigera-operator"}, &clusterIDConfigMap) + if err != nil { + return fmt.Errorf("failed to fetch cluster-info configmap") + } + + clusterID, ok := clusterIDConfigMap.Data["cluster-id"] + if !ok { + return fmt.Errorf("%s/%s ConfigMap does not contain expected 'cluster-id' key", + clusterIDConfigMap.Namespace, clusterIDConfigMap.Name) + } + + if clusterID == "" { + return fmt.Errorf("%s/%s ConfigMap value for key 'cluster-id' must be non-empty", + clusterIDConfigMap.Namespace, clusterIDConfigMap.Name) + } + + for _, user := range allESUsers { + if strings.HasPrefix(user.Username, fmt.Sprintf("%s_%s_", utils.ElasticsearchUserNameLinseed, clusterID)) { + active := false + for _, t := range tenants.Items { + if strings.Contains(user.Username, t.Spec.ID) { + active = true + break + } + } + if !active { + err = esClient.DeleteUser(ctx, &user) + if err != nil { + logger.Error(err, "Failed to delete elastic user") + } + } + } + } + + return nil +} diff --git a/pkg/controller/logstorage/users/users_controller_suite_test.go b/pkg/controller/logstorage/users/users_controller_suite_test.go new file mode 100644 index 0000000000..eadabe93aa --- /dev/null +++ b/pkg/controller/logstorage/users/users_controller_suite_test.go @@ -0,0 +1,34 @@ +// Copyright (c) 2023 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package users + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/onsi/ginkgo/reporters" + uzap "go.uber.org/zap" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestStatus(t *testing.T) { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true), zap.Level(uzap.NewAtomicLevelAt(uzap.DebugLevel)))) + RegisterFailHandler(Fail) + junitReporter := reporters.NewJUnitReporter("../../../report/ut/logstorage_users_controller_suite.xml") + RunSpecsWithDefaultAndCustomReporters(t, "pkg/controller/logstorage/users Suite", []Reporter{junitReporter}) +} diff --git a/pkg/controller/logstorage/users/users_controller_test.go b/pkg/controller/logstorage/users/users_controller_test.go new file mode 100644 index 0000000000..48fc513290 --- /dev/null +++ b/pkg/controller/logstorage/users/users_controller_test.go @@ -0,0 +1,106 @@ +// Copyright (c) 2023 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package users + +import ( + "context" + "testing" + + corev1 "k8s.io/api/core/v1" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + operatorv1 "github.com/tigera/operator/api/v1" + tigeraelastic "github.com/tigera/operator/pkg/controller/logstorage/elastic" + "github.com/tigera/operator/pkg/controller/utils" + apiv1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var _ = Describe("LogStorage cleanup controller", func() { + var ( + cli client.Client + ) + + BeforeEach(func() { + scheme := runtime.NewScheme() + Expect(operatorv1.AddToScheme(scheme)).NotTo(HaveOccurred()) + Expect(corev1.AddToScheme(scheme)).NotTo(HaveOccurred()) + cli = fake.NewClientBuilder().WithScheme(scheme).Build() + }) + + It("should clean up Elastic users for tenants that no longer exist", func() { + t := &testing.T{} + ctrl := UsersCleanupController{ + client: cli, + esClientFn: tigeraelastic.MockESCLICreator, + } + testESClient := tigeraelastic.MockESClient{} + ctx := context.WithValue(context.Background(), tigeraelastic.MockESClientKey("mockESClient"), &testESClient) + + clusterID1 := "cluster1" + clusterID2 := "cluster2" + + tenantID1 := "tenant1" + tenantID2 := "tenant2" + + staleUser := utils.LinseedUser(clusterID1, tenantID1) + + esTestUsers := []utils.User{ + *staleUser, + *utils.LinseedUser(clusterID1, tenantID2), + *utils.LinseedUser(clusterID2, tenantID1), + *utils.LinseedUser(clusterID2, tenantID2), + } + + testESClient.On("GetUsers", ctx).Return(esTestUsers, nil) + testESClient.On("DeleteUser", ctx, staleUser).Return(nil) + testESClient.On("DeleteRoles", ctx, staleUser.Roles).Return(nil) + + cluster1IDConfigMap := corev1.ConfigMap{ + ObjectMeta: apiv1.ObjectMeta{ + Name: "cluster-info", + Namespace: "tigera-operator", + }, + Data: map[string]string{ + "cluster-id": clusterID1, + }, + } + err := cli.Create(ctx, &cluster1IDConfigMap) + Expect(err).NotTo(HaveOccurred()) + + cluster1Tenant2 := operatorv1.Tenant{ + ObjectMeta: apiv1.ObjectMeta{ + Name: "default", + }, + Spec: operatorv1.TenantSpec{ + ID: tenantID2, + }, + } + + err = cli.Create(ctx, &cluster1Tenant2) + Expect(err).NotTo(HaveOccurred()) + + logr := logf.Log.WithName("cleanup-controller-test") + err = ctrl.cleanupStaleUsers(ctx, logr) + Expect(err).NotTo(HaveOccurred()) + + Expect(testESClient.AssertExpectations(t)) + }) +}) diff --git a/pkg/controller/manager/manager_controller.go b/pkg/controller/manager/manager_controller.go index a82a4dfd9e..33b406e310 100644 --- a/pkg/controller/manager/manager_controller.go +++ b/pkg/controller/manager/manager_controller.go @@ -360,7 +360,7 @@ func (r *ReconcileManager) Reconcile(ctx context.Context, request reconcile.Requ } // Get or create a certificate for the manager pod to use within the cluster. - dnsNames := dns.GetServiceDNSNames(render.ManagerServiceName, render.ManagerNamespace, r.clusterDomain) + dnsNames := dns.GetServiceDNSNames(render.ManagerServiceName, helper.InstallNamespace(), r.clusterDomain) internalTrafficSecret, err := certificateManager.GetOrCreateKeyPair( r.client, render.ManagerInternalTLSSecretName, diff --git a/pkg/controller/utils/elasticsearch.go b/pkg/controller/utils/elasticsearch.go index ea850b6d3b..74440840ac 100644 --- a/pkg/controller/utils/elasticsearch.go +++ b/pkg/controller/utils/elasticsearch.go @@ -108,6 +108,8 @@ type ElasticsearchClientCreator func(client client.Client, ctx context.Context, type ElasticClient interface { SetILMPolicies(context.Context, *operatorv1.LogStorage) error CreateUser(context.Context, *User) error + DeleteUser(context.Context, *User) error + GetUsers(ctx context.Context) ([]User, error) } type esClient struct { @@ -149,22 +151,8 @@ func NewElasticClient(client client.Client, ctx context.Context, elasticHTTPSEnd return &esClient{client: esCli}, err } -func formatName(name string, clusterName, tenantID string, management bool) string { - if tenantID != "" { - return fmt.Sprintf("%s_%s", name, tenantID) - } - - var formattedName string - - if management { - formattedName = string(name) - } else { - formattedName = fmt.Sprintf("%s-%s", string(name), clusterName) - } - - // Add the secure suffix before returning. - formattedName = fmt.Sprintf("%s-secure", formattedName) - return formattedName +func formatName(name, clusterID, tenantID string) string { + return fmt.Sprintf("%s_%s_%s", name, clusterID, tenantID) } func indexPattern(prefix, cluster, suffix, tenant string) string { @@ -177,8 +165,8 @@ func indexPattern(prefix, cluster, suffix, tenant string) string { // Name for the linseed user in ES. var ElasticsearchUserNameLinseed = "tigera-ee-linseed" -func LinseedUser(tenant string) *User { - username := formatName(ElasticsearchUserNameLinseed, "cluster", tenant, true) +func LinseedUser(clusterID, tenant string) *User { + username := formatName(ElasticsearchUserNameLinseed, clusterID, tenant) return &User{ Username: username, Roles: []Role{ @@ -243,9 +231,9 @@ type Application struct { } // CreateRoles wraps createRoles to make creating multiple rows slightly more convenient -func (es *esClient) CreateRoles(roles ...Role) error { +func (es *esClient) CreateRoles(ctx context.Context, roles ...Role) error { for _, role := range roles { - if err := es.createRole(role); err != nil { + if err := es.createRole(ctx, role); err != nil { return err } } @@ -254,12 +242,12 @@ func (es *esClient) CreateRoles(roles ...Role) error { } // createRole attempts to create (or updated) the given Elasticsearch role. -func (es *esClient) createRole(role Role) error { +func (es *esClient) createRole(ctx context.Context, role Role) error { if role.Name == "" { return fmt.Errorf("can't create a role with an empty name") } - _, err := es.client.XPackSecurityPutRole(role.Name).Body(role.Definition).Do(context.TODO()) + _, err := es.client.XPackSecurityPutRole(role.Name).Body(role.Definition).Do(ctx) if err != nil { return err } @@ -276,7 +264,7 @@ func (es *esClient) CreateUser(ctx context.Context, user *User) error { } if len(rolesToCreate) > 0 { - if err := es.CreateRoles(rolesToCreate...); err != nil { + if err := es.CreateRoles(ctx, rolesToCreate...); err != nil { return err } } @@ -295,6 +283,70 @@ func (es *esClient) CreateUser(ctx context.Context, user *User) error { return nil } +// DeleteRoles wraps deleteRoles to make deleting multiple rows slightly more convenient +func (es *esClient) DeleteRoles(ctx context.Context, roles []Role) error { + for _, role := range roles { + if err := es.deleteRole(ctx, role); err != nil { + return err + } + } + + return nil +} + +// deleteRole attempts to delete the given Elasticsearch role. +func (es *esClient) deleteRole(ctx context.Context, role Role) error { + if role.Name == "" { + return fmt.Errorf("can't delete a role with an empty name") + } + + _, err := es.client.XPackSecurityDeleteRole(role.Name).Do(ctx) + if err != nil { + return err + } + + return nil +} + +func (es *esClient) DeleteUser(ctx context.Context, user *User) error { + if err := es.DeleteRoles(ctx, user.Roles); err != nil { + return err + } + + _, err := es.client.XPackSecurityDeleteUser(user.Username).Do(ctx) + if err != nil { + log.Error(err, "Error deleting user") + return err + } + + return nil +} + +// GetUsers returns all users stored in ES +func (es *esClient) GetUsers(ctx context.Context) ([]User, error) { + usersResponse, err := es.client.XPackSecurityGetUser("").Do(ctx) + if err != nil { + log.Error(err, "Error getting users") + return []User{}, err + } + + users := []User{} + for name, data := range *usersResponse { + user := User{ + Username: name, + } + for _, roleName := range data.Roles { + role := Role{ + Name: roleName, + } + user.Roles = append(user.Roles, role) + } + users = append(users, user) + } + + return users, nil +} + // SetILMPolicies creates ILM policies for each timeseries based index using the retention period and storage size in LogStorage func (es *esClient) SetILMPolicies(ctx context.Context, ls *operatorv1.LogStorage) error { policyList := es.listILMPolicies(ls) diff --git a/pkg/controller/utils/utils_test.go b/pkg/controller/utils/utils_test.go index 33d4d19c5a..7735c13134 100644 --- a/pkg/controller/utils/utils_test.go +++ b/pkg/controller/utils/utils_test.go @@ -272,6 +272,42 @@ var _ = Describe("PopulateK8sServiceEndPoint", func() { }) +var _ = Describe("Utils ElasticSearch test", func() { + var ( + userPrefix = "test-es-prefix" + clusterID = "clusterUUID" + tenantID = "tenantID" + ) + It("should generate usernames in expected format", func() { + generatedESUsername := formatName(userPrefix, clusterID, tenantID) + expectedESUsername := fmt.Sprintf("%s_%s_%s", userPrefix, clusterID, tenantID) + Expect(generatedESUsername).To(Equal(expectedESUsername)) + }) + + It("should generate Linseed ElasticUser with expected username and roles", func() { + linseedUser := LinseedUser(clusterID, tenantID) + expectedLinseedESName := fmt.Sprintf("%s_%s_%s", ElasticsearchUserNameLinseed, clusterID, tenantID) + + Expect(linseedUser.Username).To(Equal(expectedLinseedESName)) + Expect(len(linseedUser.Roles)).To(Equal(1)) + linseedRole := linseedUser.Roles[0] + Expect(linseedRole.Name).To(Equal(expectedLinseedESName)) + + expectedLinseedRoleDef := RoleDefinition{ + Cluster: []string{"monitor", "manage_index_templates", "manage_ilm"}, + Indices: []RoleIndex{ + { + // Include both single-index and multi-index name formats. + Names: []string{indexPattern("tigera_secure_ee_*", "*", ".*", tenantID), "calico_*"}, + Privileges: []string{"create_index", "write", "manage", "read"}, + }, + }, + } + + Expect(*linseedRole.Definition).To(Equal(expectedLinseedRoleDef)) + }) +}) + type fakeClient struct { discovery discovery.DiscoveryInterface kubernetes.Interface diff --git a/pkg/render/common/networkpolicy/networkpolicy.go b/pkg/render/common/networkpolicy/networkpolicy.go index fc3b53bee4..47b113ab21 100644 --- a/pkg/render/common/networkpolicy/networkpolicy.go +++ b/pkg/render/common/networkpolicy/networkpolicy.go @@ -88,7 +88,7 @@ func CreateEntityRule(namespace string, deploymentName string, ports ...uint16) func CreateSourceEntityRule(namespace string, deploymentName string) v3.EntityRule { return v3.EntityRule{ Selector: fmt.Sprintf("k8s-app == '%s'", deploymentName), - NamespaceSelector: fmt.Sprintf("name == '%s'", namespace), + NamespaceSelector: fmt.Sprintf("projectcalico.org/name == '%s'", namespace), } } diff --git a/pkg/render/fluentd.go b/pkg/render/fluentd.go index 7146fe6ff7..3d2b19abb0 100644 --- a/pkg/render/fluentd.go +++ b/pkg/render/fluentd.go @@ -17,7 +17,6 @@ package render import ( "crypto/x509" "fmt" - "strconv" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -147,11 +146,13 @@ type EksCloudwatchLogConfig struct { // FluentdConfiguration contains all the config information needed to render the component. type FluentdConfiguration struct { - LogCollector *operatorv1.LogCollector + LogCollector *operatorv1.LogCollector + S3Credential *S3Credential + SplkCredential *SplunkCredential + Filters *FluentdFilters + // ESClusterConfig is only populated for when EKSConfig + // is also defined ESClusterConfig *relasticsearch.ClusterConfig - S3Credential *S3Credential - SplkCredential *SplunkCredential - Filters *FluentdFilters EKSConfig *EksCloudwatchLogConfig PullSecrets []*corev1.Secret Installation *operatorv1.InstallationSpec @@ -790,24 +791,7 @@ func (c *fluentdComponent) envvars() []corev1.EnvVar { } } - envs = append(envs, - corev1.EnvVar{Name: "ELASTIC_FLOWS_INDEX_REPLICAS", Value: strconv.Itoa(c.cfg.ESClusterConfig.Replicas())}, - corev1.EnvVar{Name: "ELASTIC_DNS_INDEX_REPLICAS", Value: strconv.Itoa(c.cfg.ESClusterConfig.Replicas())}, - corev1.EnvVar{Name: "ELASTIC_AUDIT_INDEX_REPLICAS", Value: strconv.Itoa(c.cfg.ESClusterConfig.Replicas())}, - corev1.EnvVar{Name: "ELASTIC_BGP_INDEX_REPLICAS", Value: strconv.Itoa(c.cfg.ESClusterConfig.Replicas())}, - corev1.EnvVar{Name: "ELASTIC_WAF_INDEX_REPLICAS", Value: strconv.Itoa(c.cfg.ESClusterConfig.Replicas())}, - corev1.EnvVar{Name: "ELASTIC_L7_INDEX_REPLICAS", Value: strconv.Itoa(c.cfg.ESClusterConfig.Replicas())}, - corev1.EnvVar{Name: "ELASTIC_RUNTIME_INDEX_REPLICAS", Value: strconv.Itoa(c.cfg.ESClusterConfig.Replicas())}, - - corev1.EnvVar{Name: "ELASTIC_FLOWS_INDEX_SHARDS", Value: strconv.Itoa(c.cfg.ESClusterConfig.FlowShards())}, - corev1.EnvVar{Name: "ELASTIC_DNS_INDEX_SHARDS", Value: strconv.Itoa(c.cfg.ESClusterConfig.Shards())}, - corev1.EnvVar{Name: "ELASTIC_AUDIT_INDEX_SHARDS", Value: strconv.Itoa(c.cfg.ESClusterConfig.Shards())}, - corev1.EnvVar{Name: "ELASTIC_BGP_INDEX_SHARDS", Value: strconv.Itoa(c.cfg.ESClusterConfig.Shards())}, - corev1.EnvVar{Name: "ELASTIC_WAF_INDEX_SHARDS", Value: strconv.Itoa(c.cfg.ESClusterConfig.Shards())}, - corev1.EnvVar{Name: "ELASTIC_L7_INDEX_SHARDS", Value: strconv.Itoa(c.cfg.ESClusterConfig.Shards())}, - corev1.EnvVar{Name: "ELASTIC_RUNTIME_INDEX_SHARDS", Value: strconv.Itoa(c.cfg.ESClusterConfig.Shards())}, - corev1.EnvVar{Name: "CA_CRT_PATH", Value: c.trustedBundlePath()}, - ) + envs = append(envs, corev1.EnvVar{Name: "CA_CRT_PATH", Value: c.trustedBundlePath()}) return envs } diff --git a/pkg/render/fluentd_test.go b/pkg/render/fluentd_test.go index b0f576da96..c83d8908f0 100644 --- a/pkg/render/fluentd_test.go +++ b/pkg/render/fluentd_test.go @@ -41,7 +41,6 @@ import ( ) var _ = Describe("Tigera Secure Fluentd rendering tests", func() { - var esConfigMap *relasticsearch.ClusterConfig var cfg *render.FluentdConfiguration expectedFluentdPolicyForUnmanaged := testutils.GetExpectedPolicyFromFile("testutils/expected_policies/fluentd_unmanaged.json") @@ -51,7 +50,6 @@ var _ = Describe("Tigera Secure Fluentd rendering tests", func() { BeforeEach(func() { // Initialize a default instance to use. Each test can override this to its // desired configuration. - esConfigMap = relasticsearch.NewClusterConfig("clusterTestName", 1, 1, 1) scheme := runtime.NewScheme() Expect(apis.AddToScheme(scheme)).NotTo(HaveOccurred()) cli := fake.NewClientBuilder().WithScheme(scheme).Build() @@ -62,10 +60,9 @@ var _ = Describe("Tigera Secure Fluentd rendering tests", func() { metricsSecret, err := certificateManager.GetOrCreateKeyPair(cli, render.FluentdPrometheusTLSSecretName, common.OperatorNamespace(), []string{""}) Expect(err).NotTo(HaveOccurred()) cfg = &render.FluentdConfiguration{ - LogCollector: &operatorv1.LogCollector{}, - ESClusterConfig: esConfigMap, - ClusterDomain: dns.DefaultClusterDomain, - OSType: rmeta.OSTypeLinux, + LogCollector: &operatorv1.LogCollector{}, + ClusterDomain: dns.DefaultClusterDomain, + OSType: rmeta.OSTypeLinux, Installation: &operatorv1.InstallationSpec{ KubernetesProvider: operatorv1.ProviderNone, }, @@ -823,6 +820,7 @@ var _ = Describe("Tigera Secure Fluentd rendering tests", func() { GroupName: "dummy-eks-cluster-cloudwatch-log-group", FetchInterval: fetchInterval, } + cfg.ESClusterConfig = relasticsearch.NewClusterConfig("clusterTestName", 1, 1, 1) t := corev1.Toleration{ Key: "foo", Operator: corev1.TolerationOpEqual, diff --git a/pkg/render/logstorage/linseed/linseed.go b/pkg/render/logstorage/linseed/linseed.go index 59abd7294b..712f28d4f6 100644 --- a/pkg/render/logstorage/linseed/linseed.go +++ b/pkg/render/logstorage/linseed/linseed.go @@ -309,6 +309,7 @@ func (l *linseed) linseedDeployment() *appsv1.Deployment { // If a tenant was provided, set the expected tenant ID and enable the shared index backend. envVars = append(envVars, corev1.EnvVar{Name: "LINSEED_EXPECTED_TENANT_ID", Value: l.cfg.Tenant.Spec.ID}) envVars = append(envVars, corev1.EnvVar{Name: "BACKEND", Value: "elastic-single-index"}) + envVars = append(envVars, corev1.EnvVar{Name: "LINSEED_MULTI_CLUSTER_FORWARDING_ENDPOINT", Value: fmt.Sprintf("https://tigera-manager.%s.svc:9443", l.cfg.Tenant.Namespace)}) } var initContainers []corev1.Container diff --git a/pkg/render/logstorage/linseed/linseed_test.go b/pkg/render/logstorage/linseed/linseed_test.go index 8b0ce8dfbd..24b65f0e8c 100644 --- a/pkg/render/logstorage/linseed/linseed_test.go +++ b/pkg/render/logstorage/linseed/linseed_test.go @@ -342,7 +342,7 @@ var _ = Describe("Linseed rendering tests", func() { Expect(cr.Rules).To(ContainElements(expectedRules)) }) - It("should render MANAGEMENT_OPERATOR_NS environment variable", func() { + It("should render multi-tenant environment variables", func() { cfg.ManagementCluster = true component := Linseed(cfg) Expect(component).NotTo(BeNil()) @@ -350,6 +350,9 @@ var _ = Describe("Linseed rendering tests", func() { d := rtest.GetResource(resources, DeploymentName, cfg.Namespace, appsv1.GroupName, "v1", "Deployment").(*appsv1.Deployment) envs := d.Spec.Template.Spec.Containers[0].Env Expect(envs).To(ContainElement(corev1.EnvVar{Name: "MANAGEMENT_OPERATOR_NS", Value: "tigera-operator"})) + Expect(envs).To(ContainElement(corev1.EnvVar{Name: "BACKEND", Value: "elastic-single-index"})) + Expect(envs).To(ContainElement(corev1.EnvVar{Name: "LINSEED_EXPECTED_TENANT_ID", Value: cfg.Tenant.Spec.ID})) + Expect(envs).To(ContainElement(corev1.EnvVar{Name: "LINSEED_MULTI_CLUSTER_FORWARDING_ENDPOINT", Value: fmt.Sprintf("https://tigera-manager.%s.svc:9443", cfg.Tenant.Namespace)})) }) }) }) diff --git a/pkg/render/manager.go b/pkg/render/manager.go index 1bf78c67f8..72b262ddb7 100644 --- a/pkg/render/manager.go +++ b/pkg/render/manager.go @@ -534,6 +534,8 @@ func (c *managerComponent) voltronContainer() corev1.Container { if c.cfg.Tenant != nil { env = append(env, corev1.EnvVar{Name: "VOLTRON_TENANT_NAMESPACE", Value: c.cfg.Tenant.Namespace}) + env = append(env, corev1.EnvVar{Name: "VOLTRON_TENANT_ID", Value: c.cfg.Tenant.Spec.ID}) + env = append(env, corev1.EnvVar{Name: "VOLTRON_LINSEED_ENDPOINT", Value: fmt.Sprintf("https://tigera-linseed.%s.svc", c.cfg.Tenant.Namespace)}) } return corev1.Container{ diff --git a/pkg/render/manager_test.go b/pkg/render/manager_test.go index 4025d3f9a5..7a889baaf7 100644 --- a/pkg/render/manager_test.go +++ b/pkg/render/manager_test.go @@ -965,6 +965,31 @@ var _ = Describe("Tigera Secure Manager rendering tests", func() { }, })) }) + + It("should render multi-tenant environment variables", func() { + tenantAResources := renderObjects(renderConfig{ + oidc: false, + managementCluster: nil, + installation: installation, + compliance: compliance, + complianceFeatureActive: true, + ns: tenantANamespace, + tenant: &operatorv1.Tenant{ + ObjectMeta: metav1.ObjectMeta{ + Name: "tenant", + Namespace: tenantANamespace, + }, + Spec: operatorv1.TenantSpec{ + ID: "tenant-a", + }, + }, + }) + d := rtest.GetResource(tenantAResources, "tigera-manager", tenantANamespace, appsv1.GroupName, "v1", "Deployment").(*appsv1.Deployment) + envs := d.Spec.Template.Spec.Containers[2].Env + Expect(envs).To(ContainElement(corev1.EnvVar{Name: "VOLTRON_TENANT_NAMESPACE", Value: tenantANamespace})) + Expect(envs).To(ContainElement(corev1.EnvVar{Name: "VOLTRON_TENANT_ID", Value: "tenant-a"})) + Expect(envs).To(ContainElement(corev1.EnvVar{Name: "VOLTRON_LINSEED_ENDPOINT", Value: fmt.Sprintf("https://tigera-linseed.%s.svc", tenantANamespace)})) + }) }) }) @@ -977,6 +1002,7 @@ type renderConfig struct { openshift bool ns string bindingNamespaces []string + tenant *operatorv1.Tenant } func renderObjects(roc renderConfig) []client.Object { @@ -1042,6 +1068,7 @@ func renderObjects(roc renderConfig) []client.Object { Namespace: roc.ns, BindingNamespaces: roc.bindingNamespaces, TruthNamespace: common.OperatorNamespace(), + Tenant: roc.tenant, } component, err := render.Manager(cfg) Expect(err).To(BeNil(), "Expected Manager to create successfully %s", err) diff --git a/pkg/render/testutils/expected_policies/compliance-server.json b/pkg/render/testutils/expected_policies/compliance-server.json index f4fcdf9f77..2d209adae4 100644 --- a/pkg/render/testutils/expected_policies/compliance-server.json +++ b/pkg/render/testutils/expected_policies/compliance-server.json @@ -19,7 +19,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-manager'", - "namespaceSelector": "name == 'tigera-manager'" + "namespaceSelector": "projectcalico.org/name == 'tigera-manager'" }, "destination": { "ports": [ diff --git a/pkg/render/testutils/expected_policies/compliance-server_ocp.json b/pkg/render/testutils/expected_policies/compliance-server_ocp.json index b0990c66a3..2eb145170e 100644 --- a/pkg/render/testutils/expected_policies/compliance-server_ocp.json +++ b/pkg/render/testutils/expected_policies/compliance-server_ocp.json @@ -19,7 +19,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-manager'", - "namespaceSelector": "name == 'tigera-manager'" + "namespaceSelector": "projectcalico.org/name == 'tigera-manager'" }, "destination": { "ports": [ diff --git a/pkg/render/testutils/expected_policies/dex.json b/pkg/render/testutils/expected_policies/dex.json index c97eabcc74..d628e21af8 100644 --- a/pkg/render/testutils/expected_policies/dex.json +++ b/pkg/render/testutils/expected_policies/dex.json @@ -19,7 +19,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-manager'", - "namespaceSelector": "name == 'tigera-manager'" + "namespaceSelector": "projectcalico.org/name == 'tigera-manager'" }, "destination": { "ports": [ @@ -32,7 +32,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-secure-es-gateway'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" }, "destination": { "ports": [ @@ -50,7 +50,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-server'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -63,7 +63,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-packetcapture'", - "namespaceSelector": "name == 'tigera-packetcapture'" + "namespaceSelector": "projectcalico.org/name == 'tigera-packetcapture'" } }, { diff --git a/pkg/render/testutils/expected_policies/dex_ocp.json b/pkg/render/testutils/expected_policies/dex_ocp.json index b56972487f..8f25c64d04 100644 --- a/pkg/render/testutils/expected_policies/dex_ocp.json +++ b/pkg/render/testutils/expected_policies/dex_ocp.json @@ -19,7 +19,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-manager'", - "namespaceSelector": "name == 'tigera-manager'" + "namespaceSelector": "projectcalico.org/name == 'tigera-manager'" }, "destination": { "ports": [ @@ -32,7 +32,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-secure-es-gateway'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" }, "destination": { "ports": [ @@ -50,7 +50,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-server'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -63,7 +63,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-packetcapture'", - "namespaceSelector": "name == 'tigera-packetcapture'" + "namespaceSelector": "projectcalico.org/name == 'tigera-packetcapture'" } }, { diff --git a/pkg/render/testutils/expected_policies/elasticsearch.json b/pkg/render/testutils/expected_policies/elasticsearch.json index 9e066fb348..d790ad850f 100644 --- a/pkg/render/testutils/expected_policies/elasticsearch.json +++ b/pkg/render/testutils/expected_policies/elasticsearch.json @@ -19,7 +19,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-secure'", - "namespaceSelector": "name == 'tigera-kibana'" + "namespaceSelector": "projectcalico.org/name == 'tigera-kibana'" }, "destination": { "ports": [ @@ -32,7 +32,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-secure-es-gateway'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" }, "destination": { "ports": [ @@ -45,7 +45,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-linseed'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" }, "destination": { "ports": [ @@ -64,7 +64,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'elastic-operator'", - "namespaceSelector": "name == 'tigera-eck-operator'" + "namespaceSelector": "projectcalico.org/name == 'tigera-eck-operator'" } }, { diff --git a/pkg/render/testutils/expected_policies/elasticsearch_ocp.json b/pkg/render/testutils/expected_policies/elasticsearch_ocp.json index 0cea857134..29a8697368 100644 --- a/pkg/render/testutils/expected_policies/elasticsearch_ocp.json +++ b/pkg/render/testutils/expected_policies/elasticsearch_ocp.json @@ -19,7 +19,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-secure'", - "namespaceSelector": "name == 'tigera-kibana'" + "namespaceSelector": "projectcalico.org/name == 'tigera-kibana'" }, "destination": { "ports": [ @@ -32,7 +32,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-secure-es-gateway'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" }, "destination": { "ports": [ @@ -45,7 +45,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-linseed'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" }, "destination": { "ports": [ @@ -63,7 +63,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'elastic-operator'", - "namespaceSelector": "name == 'tigera-eck-operator'" + "namespaceSelector": "projectcalico.org/name == 'tigera-eck-operator'" } }, { diff --git a/pkg/render/testutils/expected_policies/es-gateway.json b/pkg/render/testutils/expected_policies/es-gateway.json index 27716a6b7b..c5c4ec766a 100644 --- a/pkg/render/testutils/expected_policies/es-gateway.json +++ b/pkg/render/testutils/expected_policies/es-gateway.json @@ -32,7 +32,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'eks-log-forwarder'", - "namespaceSelector": "name == 'tigera-fluentd'" + "namespaceSelector": "projectcalico.org/name == 'tigera-fluentd'" }, "destination": { "ports": [ @@ -58,7 +58,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'elastic-curator'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" }, "destination": { "ports": [ @@ -71,7 +71,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-manager'", - "namespaceSelector": "name == 'tigera-manager'" + "namespaceSelector": "projectcalico.org/name == 'tigera-manager'" }, "destination": { "ports": [ @@ -89,7 +89,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-benchmarker'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -102,7 +102,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-controller'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -115,7 +115,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-server'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -128,7 +128,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-snapshotter'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -141,7 +141,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-reporter'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -167,7 +167,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'elastic-operator'", - "namespaceSelector": "name == 'tigera-eck-operator'" + "namespaceSelector": "projectcalico.org/name == 'tigera-eck-operator'" } }, { @@ -180,7 +180,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-elasticsearch-metrics'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" } }, { diff --git a/pkg/render/testutils/expected_policies/es-gateway_ocp.json b/pkg/render/testutils/expected_policies/es-gateway_ocp.json index 134c581081..48a124a2f4 100644 --- a/pkg/render/testutils/expected_policies/es-gateway_ocp.json +++ b/pkg/render/testutils/expected_policies/es-gateway_ocp.json @@ -32,7 +32,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'eks-log-forwarder'", - "namespaceSelector": "name == 'tigera-fluentd'" + "namespaceSelector": "projectcalico.org/name == 'tigera-fluentd'" }, "destination": { "ports": [ @@ -58,7 +58,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'elastic-curator'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" }, "destination": { "ports": [ @@ -71,7 +71,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-manager'", - "namespaceSelector": "name == 'tigera-manager'" + "namespaceSelector": "projectcalico.org/name == 'tigera-manager'" }, "destination": { "ports": [ @@ -89,7 +89,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-benchmarker'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -102,7 +102,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-controller'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -115,7 +115,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-server'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -128,7 +128,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-snapshotter'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -141,7 +141,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-reporter'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -167,7 +167,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'elastic-operator'", - "namespaceSelector": "name == 'tigera-eck-operator'" + "namespaceSelector": "projectcalico.org/name == 'tigera-eck-operator'" } }, { @@ -180,7 +180,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-elasticsearch-metrics'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" } }, { diff --git a/pkg/render/testutils/expected_policies/guardian.json b/pkg/render/testutils/expected_policies/guardian.json index 0f06002a74..472669255d 100644 --- a/pkg/render/testutils/expected_policies/guardian.json +++ b/pkg/render/testutils/expected_policies/guardian.json @@ -36,7 +36,7 @@ }, "protocol": "TCP", "source": { - "namespaceSelector": "name == 'tigera-compliance'", + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'", "selector": "k8s-app == 'compliance-benchmarker'" } }, @@ -49,7 +49,7 @@ }, "protocol": "TCP", "source": { - "namespaceSelector": "name == 'tigera-compliance'", + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'", "selector": "k8s-app == 'compliance-reporter'" } }, @@ -62,7 +62,7 @@ }, "protocol": "TCP", "source": { - "namespaceSelector": "name == 'tigera-compliance'", + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'", "selector": "k8s-app == 'compliance-snapshotter'" } }, @@ -75,7 +75,7 @@ }, "protocol": "TCP", "source": { - "namespaceSelector": "name == 'tigera-compliance'", + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'", "selector": "k8s-app == 'compliance-controller'" } }, diff --git a/pkg/render/testutils/expected_policies/guardian_ocp.json b/pkg/render/testutils/expected_policies/guardian_ocp.json index 40ff5f0b2a..33ee571e34 100644 --- a/pkg/render/testutils/expected_policies/guardian_ocp.json +++ b/pkg/render/testutils/expected_policies/guardian_ocp.json @@ -36,7 +36,7 @@ }, "protocol": "TCP", "source": { - "namespaceSelector": "name == 'tigera-compliance'", + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'", "selector": "k8s-app == 'compliance-benchmarker'" } }, @@ -49,7 +49,7 @@ }, "protocol": "TCP", "source": { - "namespaceSelector": "name == 'tigera-compliance'", + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'", "selector": "k8s-app == 'compliance-reporter'" } }, @@ -62,7 +62,7 @@ }, "protocol": "TCP", "source": { - "namespaceSelector": "name == 'tigera-compliance'", + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'", "selector": "k8s-app == 'compliance-snapshotter'" } }, @@ -75,7 +75,7 @@ }, "protocol": "TCP", "source": { - "namespaceSelector": "name == 'tigera-compliance'", + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'", "selector": "k8s-app == 'compliance-controller'" } }, diff --git a/pkg/render/testutils/expected_policies/kibana.json b/pkg/render/testutils/expected_policies/kibana.json index cd1592bd15..94d063d357 100644 --- a/pkg/render/testutils/expected_policies/kibana.json +++ b/pkg/render/testutils/expected_policies/kibana.json @@ -47,7 +47,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-secure-es-gateway'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" }, "destination": { "ports": [ @@ -65,7 +65,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'elastic-operator'", - "namespaceSelector": "name == 'tigera-eck-operator'" + "namespaceSelector": "projectcalico.org/name == 'tigera-eck-operator'" } } ], diff --git a/pkg/render/testutils/expected_policies/kibana_ocp.json b/pkg/render/testutils/expected_policies/kibana_ocp.json index 20e2dcb17b..1244a594da 100644 --- a/pkg/render/testutils/expected_policies/kibana_ocp.json +++ b/pkg/render/testutils/expected_policies/kibana_ocp.json @@ -47,7 +47,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-secure-es-gateway'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" }, "destination": { "ports": [ @@ -65,7 +65,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'elastic-operator'", - "namespaceSelector": "name == 'tigera-eck-operator'" + "namespaceSelector": "projectcalico.org/name == 'tigera-eck-operator'" } } ], diff --git a/pkg/render/testutils/expected_policies/linseed.json b/pkg/render/testutils/expected_policies/linseed.json index e659953415..b84e9f036c 100644 --- a/pkg/render/testutils/expected_policies/linseed.json +++ b/pkg/render/testutils/expected_policies/linseed.json @@ -32,7 +32,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'eks-log-forwarder'", - "namespaceSelector": "name == 'tigera-fluentd'" + "namespaceSelector": "projectcalico.org/name == 'tigera-fluentd'" }, "destination": { "ports": [ @@ -58,7 +58,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'elastic-curator'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" }, "destination": { "ports": [ @@ -71,7 +71,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-manager'", - "namespaceSelector": "name == 'tigera-manager'" + "namespaceSelector": "projectcalico.org/name == 'tigera-manager'" }, "destination": { "ports": [ @@ -89,7 +89,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-benchmarker'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -102,7 +102,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-controller'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -115,7 +115,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-server'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -128,7 +128,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-snapshotter'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -141,7 +141,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-reporter'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -167,7 +167,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'elastic-operator'", - "namespaceSelector": "name == 'tigera-eck-operator'" + "namespaceSelector": "projectcalico.org/name == 'tigera-eck-operator'" } }, { @@ -180,7 +180,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-elasticsearch-metrics'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" } }, { @@ -193,7 +193,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-policy-recommendation'", - "namespaceSelector": "name == 'tigera-policy-recommendation'" + "namespaceSelector": "projectcalico.org/name == 'tigera-policy-recommendation'" } } ], diff --git a/pkg/render/testutils/expected_policies/linseed_dpi_enabled.json b/pkg/render/testutils/expected_policies/linseed_dpi_enabled.json index 3e9aa082f8..5b2688c8d5 100644 --- a/pkg/render/testutils/expected_policies/linseed_dpi_enabled.json +++ b/pkg/render/testutils/expected_policies/linseed_dpi_enabled.json @@ -32,7 +32,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'eks-log-forwarder'", - "namespaceSelector": "name == 'tigera-fluentd'" + "namespaceSelector": "projectcalico.org/name == 'tigera-fluentd'" }, "destination": { "ports": [ @@ -58,7 +58,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'elastic-curator'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" }, "destination": { "ports": [ @@ -71,7 +71,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-manager'", - "namespaceSelector": "name == 'tigera-manager'" + "namespaceSelector": "projectcalico.org/name == 'tigera-manager'" }, "destination": { "ports": [ @@ -89,7 +89,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-benchmarker'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -102,7 +102,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-controller'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -115,7 +115,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-server'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -128,7 +128,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-snapshotter'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -141,7 +141,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-reporter'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -167,7 +167,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'elastic-operator'", - "namespaceSelector": "name == 'tigera-eck-operator'" + "namespaceSelector": "projectcalico.org/name == 'tigera-eck-operator'" } }, { @@ -180,7 +180,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-elasticsearch-metrics'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" } }, { @@ -193,7 +193,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-policy-recommendation'", - "namespaceSelector": "name == 'tigera-policy-recommendation'" + "namespaceSelector": "projectcalico.org/name == 'tigera-policy-recommendation'" } }, { diff --git a/pkg/render/testutils/expected_policies/linseed_ocp.json b/pkg/render/testutils/expected_policies/linseed_ocp.json index db18a18da7..94416ebcb7 100644 --- a/pkg/render/testutils/expected_policies/linseed_ocp.json +++ b/pkg/render/testutils/expected_policies/linseed_ocp.json @@ -32,7 +32,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'eks-log-forwarder'", - "namespaceSelector": "name == 'tigera-fluentd'" + "namespaceSelector": "projectcalico.org/name == 'tigera-fluentd'" }, "destination": { "ports": [ @@ -58,7 +58,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'elastic-curator'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" }, "destination": { "ports": [ @@ -71,7 +71,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-manager'", - "namespaceSelector": "name == 'tigera-manager'" + "namespaceSelector": "projectcalico.org/name == 'tigera-manager'" }, "destination": { "ports": [ @@ -89,7 +89,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-benchmarker'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -102,7 +102,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-controller'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -115,7 +115,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-server'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -128,7 +128,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-snapshotter'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -141,7 +141,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-reporter'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -167,7 +167,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'elastic-operator'", - "namespaceSelector": "name == 'tigera-eck-operator'" + "namespaceSelector": "projectcalico.org/name == 'tigera-eck-operator'" } }, { @@ -180,7 +180,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-elasticsearch-metrics'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" } }, { @@ -193,7 +193,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-policy-recommendation'", - "namespaceSelector": "name == 'tigera-policy-recommendation'" + "namespaceSelector": "projectcalico.org/name == 'tigera-policy-recommendation'" } } ], diff --git a/pkg/render/testutils/expected_policies/linseed_ocp_dpi_enabled.json b/pkg/render/testutils/expected_policies/linseed_ocp_dpi_enabled.json index 4118f48943..ee9e65fb24 100644 --- a/pkg/render/testutils/expected_policies/linseed_ocp_dpi_enabled.json +++ b/pkg/render/testutils/expected_policies/linseed_ocp_dpi_enabled.json @@ -32,7 +32,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'eks-log-forwarder'", - "namespaceSelector": "name == 'tigera-fluentd'" + "namespaceSelector": "projectcalico.org/name == 'tigera-fluentd'" }, "destination": { "ports": [ @@ -58,7 +58,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'elastic-curator'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" }, "destination": { "ports": [ @@ -71,7 +71,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-manager'", - "namespaceSelector": "name == 'tigera-manager'" + "namespaceSelector": "projectcalico.org/name == 'tigera-manager'" }, "destination": { "ports": [ @@ -89,7 +89,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-benchmarker'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -102,7 +102,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-controller'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -115,7 +115,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-server'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -128,7 +128,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-snapshotter'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -141,7 +141,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-reporter'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -167,7 +167,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'elastic-operator'", - "namespaceSelector": "name == 'tigera-eck-operator'" + "namespaceSelector": "projectcalico.org/name == 'tigera-eck-operator'" } }, { @@ -180,7 +180,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-elasticsearch-metrics'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" } }, { @@ -193,7 +193,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-policy-recommendation'", - "namespaceSelector": "name == 'tigera-policy-recommendation'" + "namespaceSelector": "projectcalico.org/name == 'tigera-policy-recommendation'" } }, { diff --git a/pkg/render/testutils/expected_policies/packetcapture.json b/pkg/render/testutils/expected_policies/packetcapture.json index 9bf25105f7..3794a3c929 100644 --- a/pkg/render/testutils/expected_policies/packetcapture.json +++ b/pkg/render/testutils/expected_policies/packetcapture.json @@ -19,7 +19,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-manager'", - "namespaceSelector": "name == 'tigera-manager'" + "namespaceSelector": "projectcalico.org/name == 'tigera-manager'" }, "destination": { "ports": [ diff --git a/pkg/render/testutils/expected_policies/packetcapture_managed.json b/pkg/render/testutils/expected_policies/packetcapture_managed.json index f8914268e0..44fda24f0e 100644 --- a/pkg/render/testutils/expected_policies/packetcapture_managed.json +++ b/pkg/render/testutils/expected_policies/packetcapture_managed.json @@ -19,7 +19,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-guardian'", - "namespaceSelector": "name == 'tigera-guardian'" + "namespaceSelector": "projectcalico.org/name == 'tigera-guardian'" }, "destination": { "ports": [ diff --git a/pkg/render/testutils/expected_policies/packetcapture_managed_ocp.json b/pkg/render/testutils/expected_policies/packetcapture_managed_ocp.json index cc6d8d1eb5..bee37d027c 100644 --- a/pkg/render/testutils/expected_policies/packetcapture_managed_ocp.json +++ b/pkg/render/testutils/expected_policies/packetcapture_managed_ocp.json @@ -19,7 +19,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-guardian'", - "namespaceSelector": "name == 'tigera-guardian'" + "namespaceSelector": "projectcalico.org/name == 'tigera-guardian'" }, "destination": { "ports": [ diff --git a/pkg/render/testutils/expected_policies/packetcapture_ocp.json b/pkg/render/testutils/expected_policies/packetcapture_ocp.json index 4f97d8680b..2232364ab6 100644 --- a/pkg/render/testutils/expected_policies/packetcapture_ocp.json +++ b/pkg/render/testutils/expected_policies/packetcapture_ocp.json @@ -19,7 +19,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-manager'", - "namespaceSelector": "name == 'tigera-manager'" + "namespaceSelector": "projectcalico.org/name == 'tigera-manager'" }, "destination": { "ports": [ diff --git a/test/mainline_test.go b/test/mainline_test.go index 46579209c0..5cc9b17355 100644 --- a/test/mainline_test.go +++ b/test/mainline_test.go @@ -21,6 +21,9 @@ import ( "strings" "time" + "github.com/tigera/operator/pkg/common" + "github.com/tigera/operator/pkg/render" + rbacv1 "k8s.io/api/rbac/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "sigs.k8s.io/controller-runtime/pkg/cache" @@ -347,6 +350,33 @@ func installResourceCRD(c client.Client, mgr manager.Manager, ctx context.Contex err := c.Create(context.Background(), instance) Expect(err).NotTo(HaveOccurred()) + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: common.CalicoNamespace, Namespace: common.CalicoNamespace}, + } + err = c.Create(context.Background(), ns) + if err != nil && !kerror.IsAlreadyExists(err) { + Expect(err).NotTo(HaveOccurred()) + } + + labels := make(map[string]string) + labels["foo"] = "bar" + ds := &apps.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Name: common.NodeDaemonSetName, Namespace: common.CalicoNamespace}, + Spec: apps.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: labels}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: render.CalicoNodeObjectName, Image: render.CalicoNodeObjectName}}, + }, + }, + }, + } + err = c.Create(context.Background(), ds) + Expect(err).NotTo(HaveOccurred()) + By("Running the operator") return RunOperator(mgr, ctx) }