From e65ad907720ff317eeda20e375bb2c47ea9a272e Mon Sep 17 00:00:00 2001 From: Steven Boland Date: Tue, 10 Oct 2023 09:34:14 +0100 Subject: [PATCH] BPF Upgrade without disruption --- pkg/common/common.go | 11 + pkg/controller/installation/bpf.go | 170 ++++++ pkg/controller/installation/bpf_test.go | 553 ++++++++++++++++++ .../installation/core_controller.go | 37 +- .../installation/core_controller_test.go | 77 ++- .../logcollector/logcollector_controller.go | 21 +- .../elastic/elastic_controller_test.go | 42 +- pkg/controller/logstorage/elastic/mock.go | 74 +++ .../logstorage/users/user_controller.go | 150 ++++- .../users/users_controller_suite_test.go | 34 ++ .../logstorage/users/users_controller_test.go | 106 ++++ pkg/controller/manager/manager_controller.go | 2 +- pkg/controller/migration/convert/bpf_test.go | 20 +- .../migration/convert/felix_vars_test.go | 16 +- pkg/controller/migration/convert/k8s.go | 7 +- pkg/controller/utils/elasticsearch.go | 98 +++- pkg/controller/utils/utils_test.go | 36 ++ .../common/networkpolicy/networkpolicy.go | 2 +- pkg/render/fluentd.go | 30 +- pkg/render/fluentd_test.go | 10 +- pkg/render/logstorage/linseed/linseed.go | 1 + pkg/render/logstorage/linseed/linseed_test.go | 5 +- pkg/render/manager.go | 2 + pkg/render/manager_test.go | 27 + pkg/render/node.go | 33 +- pkg/render/node_test.go | 2 +- .../expected_policies/compliance-server.json | 2 +- .../compliance-server_ocp.json | 2 +- .../testutils/expected_policies/dex.json | 8 +- .../testutils/expected_policies/dex_ocp.json | 8 +- .../expected_policies/elasticsearch.json | 8 +- .../expected_policies/elasticsearch_ocp.json | 8 +- .../expected_policies/es-gateway.json | 20 +- .../expected_policies/es-gateway_ocp.json | 20 +- .../testutils/expected_policies/guardian.json | 8 +- .../expected_policies/guardian_ocp.json | 8 +- .../testutils/expected_policies/kibana.json | 4 +- .../expected_policies/kibana_ocp.json | 4 +- .../testutils/expected_policies/linseed.json | 22 +- .../linseed_dpi_enabled.json | 22 +- .../expected_policies/linseed_ocp.json | 22 +- .../linseed_ocp_dpi_enabled.json | 22 +- .../expected_policies/packetcapture.json | 2 +- .../packetcapture_managed.json | 2 +- .../packetcapture_managed_ocp.json | 2 +- .../expected_policies/packetcapture_ocp.json | 2 +- test/mainline_test.go | 30 + 47 files changed, 1531 insertions(+), 261 deletions(-) create mode 100644 pkg/controller/installation/bpf.go create mode 100644 pkg/controller/installation/bpf_test.go create mode 100644 pkg/controller/logstorage/elastic/mock.go create mode 100644 pkg/controller/logstorage/users/users_controller_suite_test.go create mode 100644 pkg/controller/logstorage/users/users_controller_test.go diff --git a/pkg/common/common.go b/pkg/common/common.go index d8fea3ef14..d9cceb52a7 100644 --- a/pkg/common/common.go +++ b/pkg/common/common.go @@ -14,6 +14,10 @@ package common +import ( + operatorv1 "github.com/tigera/operator/api/v1" +) + const ( CalicoNamespace = "calico-system" TyphaDeploymentName = "calico-typha" @@ -41,4 +45,11 @@ const ( // references with any that already exist on the object rather than replace the owner references. Further // the controller in the owner reference will not be set. MultipleOwnersLabel = "operator.tigera.io/multipleOwners" + BPFVolumeName = "bpffs" ) + +func BpfDataplaneEnabled(installation *operatorv1.InstallationSpec) bool { + return installation.CalicoNetwork != nil && + installation.CalicoNetwork.LinuxDataplane != nil && + *installation.CalicoNetwork.LinuxDataplane == operatorv1.LinuxDataplaneBPF +} diff --git a/pkg/controller/installation/bpf.go b/pkg/controller/installation/bpf.go new file mode 100644 index 0000000000..bea5a53d95 --- /dev/null +++ b/pkg/controller/installation/bpf.go @@ -0,0 +1,170 @@ +// Copyright (c) 2019-2023 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package installation + +import ( + "context" + "errors" + "fmt" + "strconv" + + "github.com/go-logr/logr" + operator "github.com/tigera/operator/api/v1" + crdv1 "github.com/tigera/operator/pkg/apis/crd.projectcalico.org/v1" + "github.com/tigera/operator/pkg/common" + "github.com/tigera/operator/pkg/controller/migration/convert" + "github.com/tigera/operator/pkg/render" + appsv1 "k8s.io/api/apps/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func bpfUpgradeDaemonsetEnvVar(r *ReconcileInstallation, ctx context.Context, install *operator.Installation, ds *appsv1.DaemonSet, fc *crdv1.FelixConfiguration, reqLogger logr.Logger) error { + + // Query calico-node DS: if FELIX_BPFENABLED env var set and FC bpfEnabled unset then patch FC and quit. + patchFelixConfig, err := processDaemonsetEnvVar(r, ctx, ds, fc, reqLogger) + if err != nil { + return err + } + + // Attempt to patch Felix Config now. + return patchFelixConfigurationImpl(r, ctx, install, fc, reqLogger, patchFelixConfig) +} + +func bpfUpgradeWithoutDisruption(r *ReconcileInstallation, ctx context.Context, install *operator.Installation, ds *appsv1.DaemonSet, fc *crdv1.FelixConfiguration, reqLogger logr.Logger) error { + + var patchFelixConfig bool + + // Check the install dataplane mode is either Iptables or BPF. + installBpfEnabled := common.BpfDataplaneEnabled(&install.Spec) + + // Check edge case where User has externally patched FelixConfig bpfEnabled which causes conflict to prevent Operator from upgrading dataplane. + if fc.Spec.BPFEnabled != nil { + + fcBPFEnabled := *fc.Spec.BPFEnabled + if installBpfEnabled != fcBPFEnabled { + + // Ensure Felix Config annotations are either empty or equal previous FC bpfEnabled value. + if fc.Annotations[render.BpfOperatorAnnotation] == strconv.FormatBool(installBpfEnabled) { + text := fmt.Sprintf("An error occurred while attempting patch Felix Config bpfEnabled: '%s' as Felix Config has been modified externally to '%s'", + strconv.FormatBool(installBpfEnabled), + strconv.FormatBool(fcBPFEnabled)) + err := errors.New(text) + return err + } + } + } + + if !installBpfEnabled { + // IP Tables dataplane: + // Only patch Felix Config once to prevent log spamming. + if fc.Spec.BPFEnabled == nil || *fc.Spec.BPFEnabled { + patchFelixConfig = true + } + } else { + // BPF dataplane: + // Check daemonset rollout complete before patching. + if fc.Spec.BPFEnabled == nil || !(*fc.Spec.BPFEnabled) { + patchFelixConfig = checkDaemonsetRolloutComplete(ds) + } + } + + // Attempt to patch Felix Config now. + return patchFelixConfigurationImpl(r, ctx, install, fc, reqLogger, patchFelixConfig) +} + +func processDaemonsetEnvVar(r *ReconcileInstallation, ctx context.Context, ds *appsv1.DaemonSet, fc *crdv1.FelixConfiguration, reqLogger logr.Logger) (bool, error) { + + dsBpfEnabledEnvVar, err := convert.GetEnv(ctx, r.client, ds.Spec.Template.Spec, convert.ComponentCalicoNode, common.NodeDaemonSetName, "FELIX_BPFENABLED") + if err != nil { + reqLogger.Error(err, "An error occurred when querying Calico-Node environment variable FELIX_BPFENABLED") + return false, err + } + + dsBpfEnabledStatus := false + if dsBpfEnabledEnvVar != nil { + dsBpfEnabledStatus, err = strconv.ParseBool(*dsBpfEnabledEnvVar) + if err != nil { + reqLogger.Error(err, "An error occurred when converting Calico-Node environment variable FELIX_BPFENABLED") + return false, err + } + } + + return dsBpfEnabledStatus && fc.Spec.BPFEnabled == nil, nil +} + +func checkDaemonsetRolloutComplete(ds *appsv1.DaemonSet) bool { + + if ds.Spec.Template.Spec.Volumes == nil { + return false + } + + for _, volume := range ds.Spec.Template.Spec.Volumes { + if volume.Name == common.BPFVolumeName { + return ds.Status.CurrentNumberScheduled == ds.Status.UpdatedNumberScheduled && ds.Status.CurrentNumberScheduled == ds.Status.NumberAvailable + } + } + + return false +} + +func patchFelixConfigurationImpl(r *ReconcileInstallation, ctx context.Context, install *operator.Installation, fc *crdv1.FelixConfiguration, reqLogger logr.Logger, patchFelixConfig bool) error { + + if patchFelixConfig { + + installBpfEnabled := common.BpfDataplaneEnabled(&install.Spec) + err := patchFelixConfiguration(r, ctx, fc, reqLogger, installBpfEnabled) + if err != nil { + return err + } + + // Ensure if no errors occurred while attempting to patch Falix Config then successfully patched. + patchFelixConfig = err == nil + } + + if patchFelixConfig { + if fc.Spec.BPFEnabled != nil { + msg := fmt.Sprintf("Successfully patched Felix Config OK bpfEnabled='%s'", strconv.FormatBool(*fc.Spec.BPFEnabled)) + reqLogger.Info(msg) + } + } + + return nil +} + +func patchFelixConfiguration(r *ReconcileInstallation, ctx context.Context, fc *crdv1.FelixConfiguration, reqLogger logr.Logger, patchBpfEnabled bool) error { + + // Obtain the original FelixConfig to patch. + patchFrom := client.MergeFrom(fc.DeepCopy()) + patchText := strconv.FormatBool(patchBpfEnabled) + + // Add managed fields "light". + var fcAnnotations map[string]string + if fc.Annotations == nil { + fcAnnotations = make(map[string]string) + } else { + fcAnnotations = fc.Annotations + } + fcAnnotations[render.BpfOperatorAnnotation] = patchText + fc.SetAnnotations(fcAnnotations) + + fc.Spec.BPFEnabled = &patchBpfEnabled + if err := r.client.Patch(ctx, fc, patchFrom); err != nil { + msg := fmt.Sprintf("An error occurred when attempting to patch Felix configuration BPF Enabled: '%s'", patchText) + reqLogger.Error(err, msg) + return err + } + + return nil +} diff --git a/pkg/controller/installation/bpf_test.go b/pkg/controller/installation/bpf_test.go new file mode 100644 index 0000000000..eca021585c --- /dev/null +++ b/pkg/controller/installation/bpf_test.go @@ -0,0 +1,553 @@ +// Copyright (c) 2019-2023 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package installation + +import ( + "context" + "time" + + crdv1 "github.com/tigera/operator/pkg/apis/crd.projectcalico.org/v1" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/types" + + v3 "github.com/tigera/api/pkg/apis/projectcalico/v3" + "github.com/tigera/operator/pkg/common" + "github.com/tigera/operator/pkg/controller/certificatemanager" + "github.com/tigera/operator/pkg/render" + "github.com/tigera/operator/pkg/render/monitor" + "github.com/tigera/operator/pkg/tls" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/stretchr/testify/mock" + operator "github.com/tigera/operator/api/v1" + "github.com/tigera/operator/pkg/apis" + "github.com/tigera/operator/pkg/controller/status" + "github.com/tigera/operator/pkg/controller/utils" + "github.com/tigera/operator/test" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + schedv1 "k8s.io/api/scheduling/v1" + storagev1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + kfake "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +var _ = Describe("Testing BPF Upgrade without disruption during core-controller installation", func() { + + var c client.Client + var cs *kfake.Clientset + var ctx context.Context + var cancel context.CancelFunc + var r ReconcileInstallation + var scheme *runtime.Scheme + var mockStatus *status.MockStatus + var reqLogger logr.Logger + + ready := &utils.ReadyFlag{} + ready.MarkAsReady() + + Context("Reconcile tests BPF Upgrade without disruption", func() { + + BeforeEach(func() { + // The schema contains all objects that should be known to the fake client when the test runs. + scheme = runtime.NewScheme() + Expect(apis.AddToScheme(scheme)).NotTo(HaveOccurred()) + Expect(appsv1.SchemeBuilder.AddToScheme(scheme)).ShouldNot(HaveOccurred()) + Expect(rbacv1.SchemeBuilder.AddToScheme(scheme)).ShouldNot(HaveOccurred()) + Expect(schedv1.SchemeBuilder.AddToScheme(scheme)).ShouldNot(HaveOccurred()) + Expect(operator.SchemeBuilder.AddToScheme(scheme)).NotTo(HaveOccurred()) + Expect(storagev1.SchemeBuilder.AddToScheme(scheme)).NotTo(HaveOccurred()) + + // Create a client that will have a crud interface of k8s objects. + c = fake.NewClientBuilder().WithScheme(scheme).Build() + ctx, cancel = context.WithCancel(context.Background()) + + // Create a fake clientset for the autoscaler. + var replicas int32 = 1 + objs := []runtime.Object{ + &corev1.Node{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "node1", + Labels: map[string]string{"kubernetes.io/os": "linux"}, + }, + Spec: corev1.NodeSpec{}, + }, + &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{Name: "calico-typha", Namespace: "calico-system"}, + Spec: appsv1.DeploymentSpec{Replicas: &replicas}, + }, + } + cs = kfake.NewSimpleClientset(objs...) + + // Create an object we can use throughout the test to do the core reconcile loops. + mockStatus = &status.MockStatus{} + mockStatus.On("AddDaemonsets", mock.Anything).Return() + mockStatus.On("AddDeployments", mock.Anything).Return() + mockStatus.On("IsAvailable").Return(true) + mockStatus.On("OnCRFound").Return() + mockStatus.On("ClearDegraded") + mockStatus.On("AddCertificateSigningRequests", mock.Anything) + mockStatus.On("ReadyToMonitor") + mockStatus.On("SetMetaData", mock.Anything).Return() + + // Create the indexer and informer used by the typhaAutoscaler + nlw := test.NewNodeListWatch(cs) + nodeIndexInformer := cache.NewSharedIndexInformer(nlw, &corev1.Node{}, 0, cache.Indexers{}) + + go nodeIndexInformer.Run(ctx.Done()) + for nodeIndexInformer.HasSynced() { + time.Sleep(100 * time.Millisecond) + } + + // As the parameters in the client changes, we expect the outcomes of the reconcile loops to change. + r = ReconcileInstallation{ + config: nil, // there is no fake for config + client: c, + scheme: scheme, + autoDetectedProvider: operator.ProviderNone, + status: mockStatus, + typhaAutoscaler: newTyphaAutoscaler(cs, nodeIndexInformer, test.NewTyphaListWatch(cs), mockStatus), + namespaceMigration: &fakeNamespaceMigration{}, + amazonCRDExists: true, + enterpriseCRDsExist: true, + migrationChecked: true, + tierWatchReady: ready, + } + + r.typhaAutoscaler.start(ctx) + certificateManager, err := certificatemanager.Create(c, nil, "", common.OperatorNamespace(), certificatemanager.AllowCACreation()) + Expect(err).NotTo(HaveOccurred()) + prometheusTLS, err := certificateManager.GetOrCreateKeyPair(c, monitor.PrometheusClientTLSSecretName, common.OperatorNamespace(), []string{monitor.PrometheusTLSSecretName}) + Expect(err).NotTo(HaveOccurred()) + Expect(c.Create(ctx, prometheusTLS.Secret(common.OperatorNamespace()))).NotTo(HaveOccurred()) + Expect(c.Create(ctx, &v3.Tier{ObjectMeta: metav1.ObjectMeta{Name: "allow-tigera"}})).NotTo(HaveOccurred()) + + // Create the logger + reqLogger = log.WithValues("Request.Namespace", "test-namespace", "Request.Name", "test-name") + }) + + AfterEach(func() { + cancel() + }) + + It("should query calico-node DS and if FELIX_BPFENABLED true and FelixConfig unset then set BPF enabled true", func() { + + // Arrange. + // FELIX_BPFENABLED env var only set in BPF datatplane. + cr := createInstallation(c, ctx, operator.LinuxDataplaneBPF) + + // Create calico-node Daemonset with FELIX_BPFENABLED env var set. + envVars := []corev1.EnvVar{{Name: "FELIX_BPFENABLED", Value: "true"}} + container := corev1.Container{ + Name: render.CalicoNodeObjectName, + Env: envVars, + } + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Name: common.NodeDaemonSetName, Namespace: common.CalicoNamespace}, + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{container}, + }, + }, + }, + } + Expect(c.Create(ctx, ds)).NotTo(HaveOccurred()) + + // Create felix config + fc := &crdv1.FelixConfiguration{ObjectMeta: metav1.ObjectMeta{Name: "default"}} + Expect(c.Create(ctx, fc)).NotTo(HaveOccurred()) + + // Act. + err := bpfUpgradeDaemonsetEnvVar(&r, ctx, cr, ds, fc, reqLogger) + Expect(err).ShouldNot(HaveOccurred()) + + // Assert. + bpfEnabled := true + err = c.Get(ctx, types.NamespacedName{Name: "default"}, fc) + Expect(err).ShouldNot(HaveOccurred()) + Expect(fc.Spec.BPFEnabled).NotTo(BeNil()) + Expect(fc.Spec.BPFEnabled).To(Equal(&bpfEnabled)) + Expect(fc.Annotations[render.BpfOperatorAnnotation]).To(Equal("true")) + }) + + It("should query calico-node DS in BPF dataplane and if DS status not set then verify rollout not complete", func() { + + // Arrange. + // Upgrade cluster from IP Tables to BPF dataplane. + cr := createInstallation(c, ctx, operator.LinuxDataplaneBPF) + + // Create calico-node Daemonset annotation to indicate update rollout complete. + container := corev1.Container{Name: render.CalicoNodeObjectName} + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: common.NodeDaemonSetName, + Namespace: common.CalicoNamespace, + }, + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{}, + Spec: corev1.PodSpec{Containers: []corev1.Container{container}}, + }, + }, + } + Expect(c.Create(ctx, ds)).NotTo(HaveOccurred()) + + // Create felix config + fc := &crdv1.FelixConfiguration{ObjectMeta: metav1.ObjectMeta{Name: "default"}} + Expect(c.Create(ctx, fc)).NotTo(HaveOccurred()) + + // Act. + err := bpfUpgradeWithoutDisruption(&r, ctx, cr, ds, fc, reqLogger) + Expect(err).ShouldNot(HaveOccurred()) + + // Assert. + err = c.Get(ctx, types.NamespacedName{Name: "default"}, fc) + Expect(err).ShouldNot(HaveOccurred()) + Expect(fc.Spec.BPFEnabled).To(BeNil()) + }) + + It("should query calico-node DS in BPF dataplane and if DS status rolling out then verify rollout not complete", func() { + + // Arrange. + // Upgrade cluster from IP Tables to BPF dataplane. + cr := createInstallation(c, ctx, operator.LinuxDataplaneBPF) + + // Create calico-node Daemonset status updating to indicate rollout not complete. + volume := corev1.Volume{ + Name: "bpffs", + } + dsAnnotations := make(map[string]string) + dsAnnotations[render.BpfOperatorAnnotation] = "true" + container := corev1.Container{Name: render.CalicoNodeObjectName} + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: common.NodeDaemonSetName, + Namespace: common.CalicoNamespace, + Annotations: dsAnnotations, + }, + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{}, + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{volume}, + Containers: []corev1.Container{container}, + }, + }, + }, + Status: appsv1.DaemonSetStatus{ + CurrentNumberScheduled: 2, + UpdatedNumberScheduled: 2, + NumberAvailable: 1, + }, + } + Expect(c.Create(ctx, ds)).NotTo(HaveOccurred()) + + // Create felix config + fc := &crdv1.FelixConfiguration{ObjectMeta: metav1.ObjectMeta{Name: "default"}} + Expect(c.Create(ctx, fc)).NotTo(HaveOccurred()) + + // Act. + err := bpfUpgradeWithoutDisruption(&r, ctx, cr, ds, fc, reqLogger) + Expect(err).ShouldNot(HaveOccurred()) + + // Assert. + err = c.Get(ctx, types.NamespacedName{Name: "default"}, fc) + Expect(err).ShouldNot(HaveOccurred()) + Expect(fc.Spec.BPFEnabled).To(BeNil()) + }) + + It("should query calico-node DS in BPF dataplane and if DS status rolling out complete then patch Felix Config", func() { + + // Arrange. + // Upgrade cluster from BPF to IP Tables dataplane. + cr := createInstallation(c, ctx, operator.LinuxDataplaneBPF) + + // Create calico-node Daemonset status updaetd to indicate rollout is complete. + volume := corev1.Volume{ + Name: "bpffs", + } + dsAnnotations := make(map[string]string) + dsAnnotations[render.BpfOperatorAnnotation] = "true" + container := corev1.Container{Name: render.CalicoNodeObjectName} + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: common.NodeDaemonSetName, + Namespace: common.CalicoNamespace, + Annotations: dsAnnotations, + }, + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{}, + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{volume}, + Containers: []corev1.Container{container}, + }, + }, + }, + Status: appsv1.DaemonSetStatus{ + CurrentNumberScheduled: 4, + UpdatedNumberScheduled: 4, + NumberAvailable: 4, + }, + } + Expect(c.Create(ctx, ds)).NotTo(HaveOccurred()) + + // Create felix config + bpfEnabled := false + fc := &crdv1.FelixConfiguration{ObjectMeta: metav1.ObjectMeta{Name: "default"}} + fc.Spec.BPFEnabled = &bpfEnabled + Expect(c.Create(ctx, fc)).NotTo(HaveOccurred()) + + // Act. + err := bpfUpgradeWithoutDisruption(&r, ctx, cr, ds, fc, reqLogger) + Expect(err).ShouldNot(HaveOccurred()) + + // Assert. + bpfEnabled = true + err = c.Get(ctx, types.NamespacedName{Name: "default"}, fc) + Expect(err).ShouldNot(HaveOccurred()) + Expect(fc.Spec.BPFEnabled).NotTo(BeNil()) + Expect(fc.Spec.BPFEnabled).To(Equal(&bpfEnabled)) + Expect(fc.Annotations[render.BpfOperatorAnnotation]).To(Equal("true")) + }) + + It("should query calico-node DS in BPF dataplane and error Felix Config when bpfEnabled false and FC opp annotations set", func() { + + // Arrange. + // Upgrade cluster from BPF to IP Tables dataplane. + cr := createInstallation(c, ctx, operator.LinuxDataplaneBPF) + + // Create calico-node Daemonset annotation to indicate update rollout complete. + container := corev1.Container{Name: render.CalicoNodeObjectName} + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: common.NodeDaemonSetName, + Namespace: common.CalicoNamespace, + }, + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{}, + Spec: corev1.PodSpec{Containers: []corev1.Container{container}}, + }, + }, + } + Expect(c.Create(ctx, ds)).NotTo(HaveOccurred()) + + // Create felix config + bpfEnabled := false + fcAnnotations := make(map[string]string) + fcAnnotations[render.BpfOperatorAnnotation] = "true" + fc := &crdv1.FelixConfiguration{ObjectMeta: metav1.ObjectMeta{Name: "default", Annotations: fcAnnotations}} + + fc.Spec.BPFEnabled = &bpfEnabled + Expect(c.Create(ctx, fc)).NotTo(HaveOccurred()) + + // Act. + err := bpfUpgradeWithoutDisruption(&r, ctx, cr, ds, fc, reqLogger) + + // Assert. + Expect(err).Should(HaveOccurred()) + }) + + It("should query calico-node DS in Iptables dataplane and patch Felix Config when bpfEnabled empty", func() { + + // Arrange. + // Upgrade cluster from BPF to IP Tables dataplane. + cr := createInstallation(c, ctx, operator.LinuxDataplaneIptables) + + // Create calico-node Daemonset annotation to indicate update rollout complete. + container := corev1.Container{Name: render.CalicoNodeObjectName} + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: common.NodeDaemonSetName, + Namespace: common.CalicoNamespace, + }, + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{}, + Spec: corev1.PodSpec{Containers: []corev1.Container{container}}, + }, + }, + } + Expect(c.Create(ctx, ds)).NotTo(HaveOccurred()) + + // Create felix config + fc := &crdv1.FelixConfiguration{ObjectMeta: metav1.ObjectMeta{Name: "default"}} + Expect(c.Create(ctx, fc)).NotTo(HaveOccurred()) + + // Act. + err := bpfUpgradeWithoutDisruption(&r, ctx, cr, ds, fc, reqLogger) + Expect(err).ShouldNot(HaveOccurred()) + + // Assert. + bpfEnabled := false + err = c.Get(ctx, types.NamespacedName{Name: "default"}, fc) + Expect(err).ShouldNot(HaveOccurred()) + Expect(fc.Spec.BPFEnabled).NotTo(BeNil()) + Expect(fc.Spec.BPFEnabled).To(Equal(&bpfEnabled)) + Expect(fc.Annotations[render.BpfOperatorAnnotation]).To(Equal("false")) + }) + + It("should query calico-node DS in Iptables dataplane and steer Felix Config when bpfEnabled false", func() { + + // Arrange. + // Upgrade cluster from BPF to IP Tables dataplane. + cr := createInstallation(c, ctx, operator.LinuxDataplaneIptables) + + // Create calico-node Daemonset annotation to indicate update rollout complete. + container := corev1.Container{Name: render.CalicoNodeObjectName} + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: common.NodeDaemonSetName, + Namespace: common.CalicoNamespace, + }, + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{}, + Spec: corev1.PodSpec{Containers: []corev1.Container{container}}, + }, + }, + } + Expect(c.Create(ctx, ds)).NotTo(HaveOccurred()) + + // Create felix config + bpfEnabled := false + fc := &crdv1.FelixConfiguration{ObjectMeta: metav1.ObjectMeta{Name: "default"}} + fc.Spec.BPFEnabled = &bpfEnabled + Expect(c.Create(ctx, fc)).NotTo(HaveOccurred()) + + // Act. + err := bpfUpgradeWithoutDisruption(&r, ctx, cr, ds, fc, reqLogger) + Expect(err).ShouldNot(HaveOccurred()) + + // Assert. + err = c.Get(ctx, types.NamespacedName{Name: "default"}, fc) + Expect(err).ShouldNot(HaveOccurred()) + Expect(fc.Spec.BPFEnabled).NotTo(BeNil()) + Expect(fc.Spec.BPFEnabled).To(Equal(&bpfEnabled)) + }) + + It("should query calico-node DS in Iptables dataplane and patch Felix Config when bpfEnabled true and FC not annotations set", func() { + + // Arrange. + // Upgrade cluster from BPF to IP Tables dataplane. + cr := createInstallation(c, ctx, operator.LinuxDataplaneIptables) + + // Create calico-node Daemonset annotation to indicate update rollout complete. + container := corev1.Container{Name: render.CalicoNodeObjectName} + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: common.NodeDaemonSetName, + Namespace: common.CalicoNamespace, + }, + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{}, + Spec: corev1.PodSpec{Containers: []corev1.Container{container}}, + }, + }, + } + Expect(c.Create(ctx, ds)).NotTo(HaveOccurred()) + + // Create felix config + bpfEnabled := true + fc := &crdv1.FelixConfiguration{ObjectMeta: metav1.ObjectMeta{Name: "default"}} + fc.Spec.BPFEnabled = &bpfEnabled + Expect(c.Create(ctx, fc)).NotTo(HaveOccurred()) + + // Act. + err := bpfUpgradeWithoutDisruption(&r, ctx, cr, ds, fc, reqLogger) + Expect(err).ShouldNot(HaveOccurred()) + + // Assert. + bpfEnabled = false + err = c.Get(ctx, types.NamespacedName{Name: "default"}, fc) + Expect(err).ShouldNot(HaveOccurred()) + Expect(fc.Spec.BPFEnabled).NotTo(BeNil()) + Expect(fc.Spec.BPFEnabled).To(Equal(&bpfEnabled)) + }) + + It("should query calico-node DS in Iptables dataplane and error Felix Config when bpfEnabled true and FC opp annotations set", func() { + + // Arrange. + // Upgrade cluster from BPF to IP Tables dataplane. + cr := createInstallation(c, ctx, operator.LinuxDataplaneIptables) + + // Create calico-node Daemonset annotation to indicate update rollout complete. + container := corev1.Container{Name: render.CalicoNodeObjectName} + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: common.NodeDaemonSetName, + Namespace: common.CalicoNamespace, + }, + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{}, + Spec: corev1.PodSpec{Containers: []corev1.Container{container}}, + }, + }, + } + Expect(c.Create(ctx, ds)).NotTo(HaveOccurred()) + + // Create felix config + bpfEnabled := true + fcAnnotations := make(map[string]string) + fcAnnotations[render.BpfOperatorAnnotation] = "false" + fc := &crdv1.FelixConfiguration{ObjectMeta: metav1.ObjectMeta{Name: "default", Annotations: fcAnnotations}} + fc.Spec.BPFEnabled = &bpfEnabled + Expect(c.Create(ctx, fc)).NotTo(HaveOccurred()) + + // Act. + err := bpfUpgradeWithoutDisruption(&r, ctx, cr, ds, fc, reqLogger) + + // Assert. + Expect(err).Should(HaveOccurred()) + }) + }) + +}) + +func createInstallation(c client.Client, ctx context.Context, dp operator.LinuxDataplaneOption) *operator.Installation { + ca, err := tls.MakeCA("test") + Expect(err).NotTo(HaveOccurred()) + cert, _, _ := ca.Config.GetPEMBytes() // create a valid pem block + + //We start off with a 'standard' installation, with nothing special except setting the dataplane. + cr := &operator.Installation{ + ObjectMeta: metav1.ObjectMeta{Name: "default"}, + Spec: operator.InstallationSpec{ + Variant: operator.Calico, + Registry: "some.registry.org/", + CertificateManagement: &operator.CertificateManagement{CACert: cert}, + CalicoNetwork: &operator.CalicoNetworkSpec{ + LinuxDataplane: &dp, + }, + }, + } + + Expect(c.Create(ctx, cr)).NotTo(HaveOccurred()) + return cr +} diff --git a/pkg/controller/installation/core_controller.go b/pkg/controller/installation/core_controller.go index 1f9309361d..e089ca7c37 100644 --- a/pkg/controller/installation/core_controller.go +++ b/pkg/controller/installation/core_controller.go @@ -1418,6 +1418,23 @@ func (r *ReconcileInstallation) Reconcile(ctx context.Context, request reconcile return reconcile.Result{}, err } + // BPF Upgrade env var initial check: + var calicoNodeDaemonset appsv1.DaemonSet + calicoNodeDaemonset = appsv1.DaemonSet{} + + err = r.client.Get(ctx, types.NamespacedName{Namespace: common.CalicoNamespace, Name: common.NodeDaemonSetName}, &calicoNodeDaemonset) + if err != nil { + reqLogger.Error(err, "An error occurred when querying the calico-node daemonset") + return reconcile.Result{}, err + } + + // Next delegate logic implementation here using the state of the installation and dependent resources. + err = bpfUpgradeDaemonsetEnvVar(r, ctx, instance, &calicoNodeDaemonset, felixConfiguration, reqLogger) + if err != nil { + reqLogger.Error(err, "An error occurred when attempting to process BPF Upgrade Calico-Node DS env var") + return reconcile.Result{}, err + } + // Create a component handler to create or update the rendered components. handler := utils.NewComponentHandler(log, r.client, r.scheme, instance) for _, component := range components { @@ -1429,8 +1446,8 @@ func (r *ReconcileInstallation) Reconcile(ctx context.Context, request reconcile // TODO: We handle too many components in this controller at the moment. Once we are done consolidating, // we can have the CreateOrUpdate logic handle this for us. - r.status.AddDaemonsets([]types.NamespacedName{{Name: "calico-node", Namespace: "calico-system"}}) - r.status.AddDeployments([]types.NamespacedName{{Name: "calico-kube-controllers", Namespace: "calico-system"}}) + r.status.AddDaemonsets([]types.NamespacedName{{Name: common.NodeDaemonSetName, Namespace: common.CalicoNamespace}}) + r.status.AddDeployments([]types.NamespacedName{{Name: common.KubeControllersDeploymentName, Namespace: common.CalicoNamespace}}) certificateManager.AddToStatusManager(r.status, render.CSRLabelCalicoSystem) // Run this after we have rendered our components so the new (operator created) @@ -1499,6 +1516,22 @@ func (r *ReconcileInstallation) Reconcile(ctx context.Context, request reconcile // Tell the status manager that we're ready to monitor the resources we've told it about and receive statuses. r.status.ReadyToMonitor() + // BPF Upgrade without disruption: + // First get the calico-node daemonset. + calicoNodeDaemonset = appsv1.DaemonSet{} + err = r.client.Get(ctx, types.NamespacedName{Namespace: common.CalicoNamespace, Name: common.NodeDaemonSetName}, &calicoNodeDaemonset) + if err != nil { + reqLogger.Error(err, "An error occurred when querying the calico-node daemonset") + return reconcile.Result{}, err + } + + // Next delegate logic implementation here using the state of the installation and dependent resources. + err = bpfUpgradeWithoutDisruption(r, ctx, instance, &calicoNodeDaemonset, felixConfiguration, reqLogger) + if err != nil { + reqLogger.Error(err, "An error occurred when attempting to process BPF Upgrade without disruption") + return reconcile.Result{}, err + } + // We can clear the degraded state now since as far as we know everything is in order. r.status.ClearDegraded() diff --git a/pkg/controller/installation/core_controller_test.go b/pkg/controller/installation/core_controller_test.go index adb0fd858a..e8b50cc6b8 100644 --- a/pkg/controller/installation/core_controller_test.go +++ b/pkg/controller/installation/core_controller_test.go @@ -82,6 +82,16 @@ func (f *fakeNamespaceMigration) CleanupMigration(ctx context.Context) error { } var _ = Describe("Testing core-controller installation", func() { + + var c client.Client + var cs *kfake.Clientset + var ctx context.Context + var cancel context.CancelFunc + var r ReconcileInstallation + var cr *operator.Installation + var scheme *runtime.Scheme + var mockStatus *status.MockStatus + table.DescribeTable("checking rendering configuration", func(detectedProvider, configuredProvider operator.Provider, expectedErr error) { configuredInstallation := &operator.Installation{} @@ -317,13 +327,6 @@ var _ = Describe("Testing core-controller installation", func() { ready.MarkAsReady() Context("image reconciliation tests", func() { - var c client.Client - var cs *kfake.Clientset - var ctx context.Context - var cancel context.CancelFunc - var r ReconcileInstallation - var scheme *runtime.Scheme - var mockStatus *status.MockStatus BeforeEach(func() { // The schema contains all objects that should be known to the fake client when the test runs. @@ -423,6 +426,20 @@ var _ = Describe("Testing core-controller installation", func() { }, }, })).NotTo(HaveOccurred()) + + Expect(c.Create( + ctx, + &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Name: common.NodeDaemonSetName, Namespace: common.CalicoNamespace}, + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: render.CalicoNodeObjectName}}, + }, + }, + }, + })).NotTo(HaveOccurred()) }) AfterEach(func() { cancel() @@ -694,15 +711,6 @@ var _ = Describe("Testing core-controller installation", func() { ) Context("management cluster exists", func() { - var c client.Client - var cs *kfake.Clientset - var ctx context.Context - var cancel context.CancelFunc - var r ReconcileInstallation - var cr *operator.Installation - - var scheme *runtime.Scheme - var mockStatus *status.MockStatus var expectedDNSNames []string var certificateManager certificatemanager.CertificateManager @@ -816,6 +824,20 @@ var _ = Describe("Testing core-controller installation", func() { Expect(err).NotTo(HaveOccurred()) Expect(c.Create(ctx, prometheusTLS.Secret(common.OperatorNamespace()))).NotTo(HaveOccurred()) Expect(c.Create(ctx, &v3.Tier{ObjectMeta: metav1.ObjectMeta{Name: "allow-tigera"}})).NotTo(HaveOccurred()) + + Expect(c.Create( + ctx, + &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Name: common.NodeDaemonSetName, Namespace: common.CalicoNamespace}, + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: render.CalicoNodeObjectName}}, + }, + }, + }, + })).NotTo(HaveOccurred()) }) AfterEach(func() { cancel() @@ -885,15 +907,6 @@ var _ = Describe("Testing core-controller installation", func() { }) Context("Reconcile tests", func() { - var c client.Client - var cs *kfake.Clientset - var ctx context.Context - var cancel context.CancelFunc - var r ReconcileInstallation - var scheme *runtime.Scheme - var mockStatus *status.MockStatus - - var cr *operator.Installation BeforeEach(func() { // The schema contains all objects that should be known to the fake client when the test runs. @@ -999,6 +1012,20 @@ var _ = Describe("Testing core-controller installation", func() { Expect(err).NotTo(HaveOccurred()) Expect(c.Create(ctx, prometheusTLS.Secret(common.OperatorNamespace()))).NotTo(HaveOccurred()) Expect(c.Create(ctx, &v3.Tier{ObjectMeta: metav1.ObjectMeta{Name: "allow-tigera"}})).NotTo(HaveOccurred()) + + Expect(c.Create( + ctx, + &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Name: common.NodeDaemonSetName, Namespace: common.CalicoNamespace}, + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: render.CalicoNodeObjectName}}, + }, + }, + }, + })).NotTo(HaveOccurred()) }) AfterEach(func() { cancel() diff --git a/pkg/controller/logcollector/logcollector_controller.go b/pkg/controller/logcollector/logcollector_controller.go index 501abe2c04..93761f7c7e 100644 --- a/pkg/controller/logcollector/logcollector_controller.go +++ b/pkg/controller/logcollector/logcollector_controller.go @@ -351,16 +351,6 @@ func (r *ReconcileLogCollector) Reconcile(ctx context.Context, request reconcile return reconcile.Result{}, err } - esClusterConfig, err := utils.GetElasticsearchClusterConfig(ctx, r.client) - if err != nil { - if errors.IsNotFound(err) { - r.status.SetDegraded(operatorv1.ResourceNotReady, "Elasticsearch cluster configuration is not available, waiting for it to become available", err, reqLogger) - return reconcile.Result{}, nil - } - r.status.SetDegraded(operatorv1.ResourceReadError, "Failed to get the elasticsearch cluster configuration", err, reqLogger) - return reconcile.Result{}, err - } - pullSecrets, err := utils.GetNetworkingPullSecrets(installation, r.client) if err != nil { r.status.SetDegraded(operatorv1.ResourceReadError, "Error retrieving pull secrets", err, reqLogger) @@ -541,10 +531,21 @@ func (r *ReconcileLogCollector) Reconcile(ctx context.Context, request reconcile } var eksConfig *render.EksCloudwatchLogConfig + var esClusterConfig *relasticsearch.ClusterConfig if installation.KubernetesProvider == operatorv1.ProviderEKS { log.Info("Managed kubernetes EKS found, getting necessary credentials and config") if instance.Spec.AdditionalSources != nil { if instance.Spec.AdditionalSources.EksCloudwatchLog != nil { + esClusterConfig, err = utils.GetElasticsearchClusterConfig(ctx, r.client) + if err != nil { + if errors.IsNotFound(err) { + r.status.SetDegraded(operatorv1.ResourceNotReady, "Elasticsearch cluster configuration is not available, waiting for it to become available", err, reqLogger) + return reconcile.Result{}, nil + } + r.status.SetDegraded(operatorv1.ResourceReadError, "Failed to get the elasticsearch cluster configuration", err, reqLogger) + return reconcile.Result{}, err + } + eksConfig, err = getEksCloudwatchLogConfig(r.client, instance.Spec.AdditionalSources.EksCloudwatchLog.FetchInterval, instance.Spec.AdditionalSources.EksCloudwatchLog.Region, diff --git a/pkg/controller/logstorage/elastic/elastic_controller_test.go b/pkg/controller/logstorage/elastic/elastic_controller_test.go index ba057e5e3f..3e15abeec2 100644 --- a/pkg/controller/logstorage/elastic/elastic_controller_test.go +++ b/pkg/controller/logstorage/elastic/elastic_controller_test.go @@ -198,7 +198,7 @@ var _ = Describe("LogStorage controller", func() { Context("LogStorage is nil", func() { // Run the reconciler, expect no error. - r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, mockESCLICreator, dns.DefaultClusterDomain, readyFlag) + r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, MockESCLICreator, dns.DefaultClusterDomain, readyFlag) Expect(err).ShouldNot(HaveOccurred()) _, err = r.Reconcile(ctx, reconcile.Request{}) Expect(err).ShouldNot(HaveOccurred()) @@ -215,7 +215,7 @@ var _ = Describe("LogStorage controller", func() { }) It("returns an error if the LogStorage resource exists and is not marked for deletion", func() { - r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, mockESCLICreator, dns.DefaultClusterDomain, readyFlag) + r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, MockESCLICreator, dns.DefaultClusterDomain, readyFlag) Expect(err).ShouldNot(HaveOccurred()) mockStatus.On("SetDegraded", operatorv1.ResourceValidationError, "LogStorage validation failed - cluster type is managed but LogStorage CR still exists", mock.Anything, mock.Anything).Return() result, err := r.Reconcile(ctx, reconcile.Request{}) @@ -232,7 +232,7 @@ var _ = Describe("LogStorage controller", func() { mockStatus.On("ReadyToMonitor") // mockStatus.On("SetMetaData", mock.Anything).Return() - r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, mockESCLICreator, dns.DefaultClusterDomain, readyFlag) + r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, MockESCLICreator, dns.DefaultClusterDomain, readyFlag) Expect(err).ShouldNot(HaveOccurred()) ls := &operatorv1.LogStorage{} @@ -348,7 +348,7 @@ var _ = Describe("LogStorage controller", func() { Data: map[string]string{"eck_license_level": string(render.ElasticsearchLicenseTypeEnterprise)}, })).ShouldNot(HaveOccurred()) - r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, mockESCLICreator, dns.DefaultClusterDomain, readyFlag) + r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, MockESCLICreator, dns.DefaultClusterDomain, readyFlag) Expect(err).ShouldNot(HaveOccurred()) mockStatus.On("SetDegraded", operatorv1.ResourceNotReady, "Waiting for Elasticsearch cluster to be operational", mock.Anything, mock.Anything).Return() @@ -463,7 +463,7 @@ var _ = Describe("LogStorage controller", func() { ObjectMeta: metav1.ObjectMeta{Namespace: render.ElasticsearchNamespace, Name: render.OIDCUsersESSecretName}, })).ShouldNot(HaveOccurred()) - r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, mockESCLICreator, dns.DefaultClusterDomain, readyFlag) + r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, MockESCLICreator, dns.DefaultClusterDomain, readyFlag) Expect(err).ShouldNot(HaveOccurred()) mockStatus.On("SetDegraded", operatorv1.ResourceNotReady, "Waiting for Elasticsearch cluster to be operational", mock.Anything, mock.Anything).Return() @@ -591,7 +591,7 @@ var _ = Describe("LogStorage controller", func() { Data: map[string]string{"eck_license_level": string(render.ElasticsearchLicenseTypeEnterprise)}, })).ShouldNot(HaveOccurred()) - r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, mockESCLICreator, dns.DefaultClusterDomain, readyFlag) + r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, MockESCLICreator, dns.DefaultClusterDomain, readyFlag) Expect(err).ShouldNot(HaveOccurred()) // Elasticsearch and kibana secrets are good. @@ -635,7 +635,7 @@ var _ = Describe("LogStorage controller", func() { Expect(err).ShouldNot(HaveOccurred()) Expect(cli.Update(ctx, kbSecret)).ShouldNot(HaveOccurred()) - r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, mockESCLICreator, dns.DefaultClusterDomain, readyFlag) + r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, MockESCLICreator, dns.DefaultClusterDomain, readyFlag) Expect(err).ShouldNot(HaveOccurred()) mockStatus.On("SetDegraded", operatorv1.ResourceNotReady, "Waiting for Elasticsearch cluster to be operational", mock.Anything, mock.Anything).Return() @@ -719,7 +719,7 @@ var _ = Describe("LogStorage controller", func() { Expect(cli.Create(ctx, rec)).ShouldNot(HaveOccurred()) } - r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, mockESCLICreator, dns.DefaultClusterDomain, readyFlag) + r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, MockESCLICreator, dns.DefaultClusterDomain, readyFlag) Expect(err).ShouldNot(HaveOccurred()) mockStatus.On("SetDegraded", operatorv1.ResourceNotReady, "Waiting for curator secrets to become available", mock.Anything, mock.Anything).Return() @@ -759,7 +759,7 @@ var _ = Describe("LogStorage controller", func() { Data: map[string]string{"eck_license_level": string(render.ElasticsearchLicenseTypeEnterprise)}, })).ShouldNot(HaveOccurred()) - r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, mockESCLICreator, dns.DefaultClusterDomain, readyFlag) + r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, MockESCLICreator, dns.DefaultClusterDomain, readyFlag) Expect(err).ShouldNot(HaveOccurred()) mockStatus.On("SetDegraded", operatorv1.ResourceNotReady, "Waiting for Elasticsearch cluster to be operational", mock.Anything, mock.Anything).Return() @@ -830,7 +830,7 @@ var _ = Describe("LogStorage controller", func() { }) It("should use default images", func() { - r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, mockESCLICreator, dns.DefaultClusterDomain, readyFlag) + r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, MockESCLICreator, dns.DefaultClusterDomain, readyFlag) Expect(err).ShouldNot(HaveOccurred()) esAdminUserSecret := &corev1.Secret{ @@ -933,7 +933,7 @@ var _ = Describe("LogStorage controller", func() { }, }, })).ToNot(HaveOccurred()) - r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, mockESCLICreator, dns.DefaultClusterDomain, readyFlag) + r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, MockESCLICreator, dns.DefaultClusterDomain, readyFlag) Expect(err).ShouldNot(HaveOccurred()) esAdminUserSecret := &corev1.Secret{ @@ -1056,7 +1056,7 @@ var _ = Describe("LogStorage controller", func() { // mockStatus.On("SetMetaData", mock.Anything).Return() var err error - r, err = NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, mockESCLICreator, dns.DefaultClusterDomain, readyFlag) + r, err = NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, MockESCLICreator, dns.DefaultClusterDomain, readyFlag) Expect(err).ShouldNot(HaveOccurred()) }) @@ -1065,7 +1065,7 @@ var _ = Describe("LogStorage controller", func() { }) It("should wait if tier watch is not ready", func() { - r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, mockESCLICreator, dns.DefaultClusterDomain, &utils.ReadyFlag{}) + r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, MockESCLICreator, dns.DefaultClusterDomain, &utils.ReadyFlag{}) Expect(err).ShouldNot(HaveOccurred()) utils.ExpectWaitForTierWatch(ctx, r, mockStatus) }) @@ -1122,7 +1122,7 @@ var _ = Describe("LogStorage controller", func() { }) It("deletes Elasticsearch and Kibana then removes the finalizers on the LogStorage CR", func() { - r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, mockESCLICreator, dns.DefaultClusterDomain, readyFlag) + r, err := NewReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, MockESCLICreator, dns.DefaultClusterDomain, readyFlag) Expect(err).ShouldNot(HaveOccurred()) esAdminUserSecret := &corev1.Secret{ @@ -1313,17 +1313,3 @@ func CreateLogStorage(client client.Client, ls *operatorv1.LogStorage) { // Create the LogStorage object. ExpectWithOffset(1, client.Create(context.Background(), ls)).ShouldNot(HaveOccurred()) } - -type mockESClient struct{} - -func mockESCLICreator(client client.Client, ctx context.Context, elasticHTTPSEndpoint string) (utils.ElasticClient, error) { - return &mockESClient{}, nil -} - -func (m *mockESClient) CreateUser(ctx context.Context, user *utils.User) error { - return fmt.Errorf("CreateUser not implemented in mock client") -} - -func (*mockESClient) SetILMPolicies(ctx context.Context, ls *operatorv1.LogStorage) error { - return nil -} diff --git a/pkg/controller/logstorage/elastic/mock.go b/pkg/controller/logstorage/elastic/mock.go new file mode 100644 index 0000000000..0c2829f1a4 --- /dev/null +++ b/pkg/controller/logstorage/elastic/mock.go @@ -0,0 +1,74 @@ +// Copyright (c) 2020-2023 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastic + +import ( + "context" + "fmt" + + "github.com/stretchr/testify/mock" + operatorv1 "github.com/tigera/operator/api/v1" + "github.com/tigera/operator/pkg/controller/utils" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type MockESClientKey string + +type MockESClient struct { + mock.Mock +} + +func MockESCLICreator(_ client.Client, ctx context.Context, _ string) (utils.ElasticClient, error) { + if esCli := ctx.Value(MockESClientKey("mockESClient")); esCli != nil { + return esCli.(*MockESClient), nil + } + return &MockESClient{}, nil +} + +func (m *MockESClient) CreateUser(_ context.Context, _ *utils.User) error { + return fmt.Errorf("CreateUser not implemented in mock client") +} + +func (m *MockESClient) SetILMPolicies(_ context.Context, _ *operatorv1.LogStorage) error { + return nil +} + +func (m *MockESClient) DeleteRoles(ctx context.Context, roles []utils.Role) error { + var ret mock.Arguments + for _, role := range roles { + ret = m.MethodCalled("deleteRole", ctx, role) + if ret.Error(0) != nil { + return ret.Error(0) + } + } + + ret = m.Called(ctx, roles) + return ret.Error(0) +} + +func (m *MockESClient) DeleteUser(ctx context.Context, u *utils.User) error { + ret := m.MethodCalled("DeleteRoles", ctx, u.Roles) + if ret.Error(0) != nil { + return ret.Error(0) + } + + ret = m.Called(ctx, u) + return ret.Error(0) +} + +func (m *MockESClient) GetUsers(ctx context.Context) ([]utils.User, error) { + ret := m.Called(ctx) + return ret.Get(0).([]utils.User), ret.Error(1) +} diff --git a/pkg/controller/logstorage/users/user_controller.go b/pkg/controller/logstorage/users/user_controller.go index 10405bdafa..3949a23c19 100644 --- a/pkg/controller/logstorage/users/user_controller.go +++ b/pkg/controller/logstorage/users/user_controller.go @@ -17,13 +17,13 @@ package users import ( "context" "fmt" + "strings" esv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/elasticsearch/v1" + "github.com/go-logr/logr" operatorv1 "github.com/tigera/operator/api/v1" corev1 "k8s.io/api/core/v1" - "github.com/go-logr/logr" - "github.com/tigera/operator/pkg/controller/options" "github.com/tigera/operator/pkg/controller/status" "github.com/tigera/operator/pkg/controller/utils" @@ -49,9 +49,16 @@ type UserController struct { client client.Client scheme *runtime.Scheme status status.StatusManager + esClientFn utils.ElasticsearchClientCreator multiTenant bool } +type UsersCleanupController struct { + client client.Client + scheme *runtime.Scheme + esClientFn utils.ElasticsearchClientCreator +} + func Add(mgr manager.Manager, opts options.AddOptions) error { if !opts.EnterpriseCRDExists { return nil @@ -68,6 +75,7 @@ func Add(mgr manager.Manager, opts options.AddOptions) error { scheme: mgr.GetScheme(), multiTenant: opts.MultiTenant, status: status.New(mgr.GetClient(), "log-storage-users", opts.KubernetesVersion), + esClientFn: utils.NewElasticClient, } r.status.Run(opts.ShutdownContext) @@ -115,6 +123,28 @@ func Add(mgr manager.Manager, opts options.AddOptions) error { return fmt.Errorf("log-storage-user-controller failed to create periodic reconcile watch: %w", err) } + // Now that the users controller is set up, we can also set up the controller that cleans up stale users + usersCleanupReconciler := &UsersCleanupController{ + client: mgr.GetClient(), + scheme: mgr.GetScheme(), + esClientFn: utils.NewElasticClient, + } + + // Create a controller using the reconciler and register it with the manager to receive reconcile calls. + usersCleanupController, err := controller.New("log-storage-cleanup-controller", mgr, controller.Options{Reconciler: usersCleanupReconciler}) + if err != nil { + return err + } + + if err = usersCleanupController.Watch(&source.Kind{Type: &operatorv1.Tenant{}}, &handler.EnqueueRequestForObject{}); err != nil { + return fmt.Errorf("log-storage-cleanup-controller failed to watch Tenant resource: %w", err) + } + + err = utils.AddPeriodicReconcile(usersCleanupController, utils.PeriodicReconcileTime, &handler.EnqueueRequestForObject{}) + if err != nil { + return fmt.Errorf("log-storage-cleanup-controller failed to create periodic reconcile watch: %w", err) + } + return nil } @@ -172,13 +202,37 @@ func (r *UserController) Reconcile(ctx context.Context, request reconcile.Reques return reconcile.Result{}, nil } + clusterIDConfigMap := corev1.ConfigMap{} + clusterIDConfigMapKey := client.ObjectKey{Name: "cluster-info", Namespace: "tigera-operator"} + err = r.client.Get(ctx, clusterIDConfigMapKey, &clusterIDConfigMap) + if err != nil { + r.status.SetDegraded(operatorv1.ResourceReadError, fmt.Sprintf("Waiting for ConfigMap %s/%s to be available", clusterIDConfigMapKey.Namespace, clusterIDConfigMapKey.Name), + nil, reqLogger) + return reconcile.Result{}, err + } + + clusterID, ok := clusterIDConfigMap.Data["cluster-id"] + if !ok { + err = fmt.Errorf("%s/%s ConfigMap does not contain expected 'cluster-id' key", + clusterIDConfigMap.Namespace, clusterIDConfigMap.Name) + r.status.SetDegraded(operatorv1.ResourceReadError, fmt.Sprintf("%v", err), err, reqLogger) + return reconcile.Result{}, err + } + + if clusterID == "" { + err = fmt.Errorf("%s/%s ConfigMap value for key 'cluster-id' must be non-empty", + clusterIDConfigMap.Namespace, clusterIDConfigMap.Name) + r.status.SetDegraded(operatorv1.ResourceReadError, fmt.Sprintf("%v", err), err, reqLogger) + return reconcile.Result{}, err + } + // Query any existing username and password for this Linseed instance. If one already exists, we'll simply // use that. Otherwise, generate a new one. - linseedUser := utils.LinseedUser(tenantID) + linseedUser := utils.LinseedUser(clusterID, tenantID) basicCreds := corev1.Secret{} var credentialSecrets []client.Object key := types.NamespacedName{Name: render.ElasticsearchLinseedUserSecret, Namespace: helper.TruthNamespace()} - if err := r.client.Get(ctx, key, &basicCreds); err != nil && !errors.IsNotFound(err) { + if err = r.client.Get(ctx, key, &basicCreds); err != nil && !errors.IsNotFound(err) { r.status.SetDegraded(operatorv1.ResourceReadError, fmt.Sprintf("Error getting Secret %s", key), err, reqLogger) return reconcile.Result{}, err } else if errors.IsNotFound(err) { @@ -203,13 +257,13 @@ func (r *UserController) Reconcile(ctx context.Context, request reconcile.Reques } else { hdler = utils.NewComponentHandler(reqLogger, r.client, r.scheme, logStorage) } - if err := hdler.CreateOrUpdateOrDelete(ctx, credentialComponent, r.status); err != nil { + if err = hdler.CreateOrUpdateOrDelete(ctx, credentialComponent, r.status); err != nil { r.status.SetDegraded(operatorv1.ResourceUpdateError, "Error creating / updating Linseed user secret", err, reqLogger) return reconcile.Result{}, err } // Now that the secret has been created, also provision the user in ES. - if err := r.createLinseedLogin(ctx, tenantID, &basicCreds, reqLogger); err != nil { + if err = r.createLinseedLogin(ctx, clusterID, tenantID, &basicCreds, reqLogger); err != nil { r.status.SetDegraded(operatorv1.ResourceUpdateError, "Failed to create Linseed user in ES", err, reqLogger) return reconcile.Result{}, err } @@ -219,8 +273,8 @@ func (r *UserController) Reconcile(ctx context.Context, request reconcile.Reques return reconcile.Result{}, nil } -func (r *UserController) createLinseedLogin(ctx context.Context, tenantID string, secret *corev1.Secret, reqLogger logr.Logger) error { - esClient, err := utils.NewElasticClient(r.client, ctx, relasticsearch.ElasticEndpoint()) +func (r *UserController) createLinseedLogin(ctx context.Context, clusterID, tenantID string, secret *corev1.Secret, reqLogger logr.Logger) error { + esClient, err := r.esClientFn(r.client, ctx, relasticsearch.ElasticEndpoint()) if err != nil { r.status.SetDegraded(operatorv1.ResourceCreateError, "Failed to connect to Elasticsearch - failed to create the Elasticsearch client", err, reqLogger) return err @@ -236,7 +290,7 @@ func (r *UserController) createLinseedLogin(ctx context.Context, tenantID string } // Create the user in ES. - user := utils.LinseedUser(tenantID) + user := utils.LinseedUser(clusterID, tenantID) user.Password = password if err = esClient.CreateUser(ctx, user); err != nil { r.status.SetDegraded(operatorv1.ResourceUpdateError, "Failed to create or update Elasticsearch user", err, reqLogger) @@ -245,3 +299,81 @@ func (r *UserController) createLinseedLogin(ctx context.Context, tenantID string return nil } + +func (r *UsersCleanupController) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + helper := utils.NewNamespaceHelper(true, render.ElasticsearchNamespace, request.Namespace) + reqLogger := logf.Log.WithName("controller_logstorage_users_cleanup").WithValues("Request.Namespace", + request.Namespace, "Request.Name", request.Name, "installNS", helper.InstallNamespace(), "truthNS", helper.TruthNamespace()) + reqLogger.Info("Reconciling LogStorage - Cleanup") + + // Wait for Elasticsearch to be installed and available. + elasticsearch, err := utils.GetElasticsearch(ctx, r.client) + if err != nil { + return reconcile.Result{}, err + } + if elasticsearch == nil || elasticsearch.Status.Phase != esv1.ElasticsearchReadyPhase { + return reconcile.Result{}, nil + } + + // Clean up any stale users that may have been left behind by a previous tenant + if err := r.cleanupStaleUsers(ctx, reqLogger); err != nil { + return reconcile.Result{}, err + } + + return reconcile.Result{}, nil +} + +func (r *UsersCleanupController) cleanupStaleUsers(ctx context.Context, logger logr.Logger) error { + esClient, err := r.esClientFn(r.client, ctx, relasticsearch.ElasticEndpoint()) + if err != nil { + return fmt.Errorf("failed to connect to Elasticsearch - failed to create the Elasticsearch client") + } + + allESUsers, err := esClient.GetUsers(ctx) + if err != nil { + return fmt.Errorf("failed to fetch users from Elasticsearch") + } + + tenants := operatorv1.TenantList{} + err = r.client.List(ctx, &tenants) + if err != nil { + return fmt.Errorf("failed to fetch TenantList") + } + + clusterIDConfigMap := corev1.ConfigMap{} + err = r.client.Get(ctx, client.ObjectKey{Name: "cluster-info", Namespace: "tigera-operator"}, &clusterIDConfigMap) + if err != nil { + return fmt.Errorf("failed to fetch cluster-info configmap") + } + + clusterID, ok := clusterIDConfigMap.Data["cluster-id"] + if !ok { + return fmt.Errorf("%s/%s ConfigMap does not contain expected 'cluster-id' key", + clusterIDConfigMap.Namespace, clusterIDConfigMap.Name) + } + + if clusterID == "" { + return fmt.Errorf("%s/%s ConfigMap value for key 'cluster-id' must be non-empty", + clusterIDConfigMap.Namespace, clusterIDConfigMap.Name) + } + + for _, user := range allESUsers { + if strings.HasPrefix(user.Username, fmt.Sprintf("%s_%s_", utils.ElasticsearchUserNameLinseed, clusterID)) { + active := false + for _, t := range tenants.Items { + if strings.Contains(user.Username, t.Spec.ID) { + active = true + break + } + } + if !active { + err = esClient.DeleteUser(ctx, &user) + if err != nil { + logger.Error(err, "Failed to delete elastic user") + } + } + } + } + + return nil +} diff --git a/pkg/controller/logstorage/users/users_controller_suite_test.go b/pkg/controller/logstorage/users/users_controller_suite_test.go new file mode 100644 index 0000000000..eadabe93aa --- /dev/null +++ b/pkg/controller/logstorage/users/users_controller_suite_test.go @@ -0,0 +1,34 @@ +// Copyright (c) 2023 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package users + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/onsi/ginkgo/reporters" + uzap "go.uber.org/zap" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestStatus(t *testing.T) { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true), zap.Level(uzap.NewAtomicLevelAt(uzap.DebugLevel)))) + RegisterFailHandler(Fail) + junitReporter := reporters.NewJUnitReporter("../../../report/ut/logstorage_users_controller_suite.xml") + RunSpecsWithDefaultAndCustomReporters(t, "pkg/controller/logstorage/users Suite", []Reporter{junitReporter}) +} diff --git a/pkg/controller/logstorage/users/users_controller_test.go b/pkg/controller/logstorage/users/users_controller_test.go new file mode 100644 index 0000000000..48fc513290 --- /dev/null +++ b/pkg/controller/logstorage/users/users_controller_test.go @@ -0,0 +1,106 @@ +// Copyright (c) 2023 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package users + +import ( + "context" + "testing" + + corev1 "k8s.io/api/core/v1" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + operatorv1 "github.com/tigera/operator/api/v1" + tigeraelastic "github.com/tigera/operator/pkg/controller/logstorage/elastic" + "github.com/tigera/operator/pkg/controller/utils" + apiv1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var _ = Describe("LogStorage cleanup controller", func() { + var ( + cli client.Client + ) + + BeforeEach(func() { + scheme := runtime.NewScheme() + Expect(operatorv1.AddToScheme(scheme)).NotTo(HaveOccurred()) + Expect(corev1.AddToScheme(scheme)).NotTo(HaveOccurred()) + cli = fake.NewClientBuilder().WithScheme(scheme).Build() + }) + + It("should clean up Elastic users for tenants that no longer exist", func() { + t := &testing.T{} + ctrl := UsersCleanupController{ + client: cli, + esClientFn: tigeraelastic.MockESCLICreator, + } + testESClient := tigeraelastic.MockESClient{} + ctx := context.WithValue(context.Background(), tigeraelastic.MockESClientKey("mockESClient"), &testESClient) + + clusterID1 := "cluster1" + clusterID2 := "cluster2" + + tenantID1 := "tenant1" + tenantID2 := "tenant2" + + staleUser := utils.LinseedUser(clusterID1, tenantID1) + + esTestUsers := []utils.User{ + *staleUser, + *utils.LinseedUser(clusterID1, tenantID2), + *utils.LinseedUser(clusterID2, tenantID1), + *utils.LinseedUser(clusterID2, tenantID2), + } + + testESClient.On("GetUsers", ctx).Return(esTestUsers, nil) + testESClient.On("DeleteUser", ctx, staleUser).Return(nil) + testESClient.On("DeleteRoles", ctx, staleUser.Roles).Return(nil) + + cluster1IDConfigMap := corev1.ConfigMap{ + ObjectMeta: apiv1.ObjectMeta{ + Name: "cluster-info", + Namespace: "tigera-operator", + }, + Data: map[string]string{ + "cluster-id": clusterID1, + }, + } + err := cli.Create(ctx, &cluster1IDConfigMap) + Expect(err).NotTo(HaveOccurred()) + + cluster1Tenant2 := operatorv1.Tenant{ + ObjectMeta: apiv1.ObjectMeta{ + Name: "default", + }, + Spec: operatorv1.TenantSpec{ + ID: tenantID2, + }, + } + + err = cli.Create(ctx, &cluster1Tenant2) + Expect(err).NotTo(HaveOccurred()) + + logr := logf.Log.WithName("cleanup-controller-test") + err = ctrl.cleanupStaleUsers(ctx, logr) + Expect(err).NotTo(HaveOccurred()) + + Expect(testESClient.AssertExpectations(t)) + }) +}) diff --git a/pkg/controller/manager/manager_controller.go b/pkg/controller/manager/manager_controller.go index a82a4dfd9e..33b406e310 100644 --- a/pkg/controller/manager/manager_controller.go +++ b/pkg/controller/manager/manager_controller.go @@ -360,7 +360,7 @@ func (r *ReconcileManager) Reconcile(ctx context.Context, request reconcile.Requ } // Get or create a certificate for the manager pod to use within the cluster. - dnsNames := dns.GetServiceDNSNames(render.ManagerServiceName, render.ManagerNamespace, r.clusterDomain) + dnsNames := dns.GetServiceDNSNames(render.ManagerServiceName, helper.InstallNamespace(), r.clusterDomain) internalTrafficSecret, err := certificateManager.GetOrCreateKeyPair( r.client, render.ManagerInternalTLSSecretName, diff --git a/pkg/controller/migration/convert/bpf_test.go b/pkg/controller/migration/convert/bpf_test.go index 89451d6148..283ec635f1 100644 --- a/pkg/controller/migration/convert/bpf_test.go +++ b/pkg/controller/migration/convert/bpf_test.go @@ -1,4 +1,4 @@ -// Copyright (c) 2022 Tigera, Inc. All rights reserved. +// Copyright (c) 2023 Tigera, Inc. All rights reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -110,7 +110,7 @@ var _ = Describe("convert bpf config", func() { Expect(data).To(Equal(cmData)) }) - It("converts bpfenabled env var set to true", func() { + It("converts dataplane to BPF given bpfenabled env var set to true", func() { comps.client = fake.NewClientBuilder().WithScheme(scheme).WithObjects(endPointCM, f).Build() comps.node.Spec.Template.Spec.Containers[0].Env = []v1.EnvVar{{ Name: "FELIX_BPFENABLED", @@ -125,7 +125,7 @@ var _ = Describe("convert bpf config", func() { Expect(data).To(Equal(cmData)) }) - It("converts bpfenabled env var set to false", func() { + It("converts dataplane to empty given bpfenabled env var set to false", func() { comps.client = fake.NewClientBuilder().WithScheme(scheme).WithObjects(endPointCM, f).Build() comps.node.Spec.Template.Spec.Containers[0].Env = []v1.EnvVar{{ Name: "FELIX_BPFENABLED", @@ -142,6 +142,20 @@ var _ = Describe("convert bpf config", func() { Expect(data).To(BeNil()) }) + It("converts dataplane to empty given bpfenabled env var set not set", func() { + comps.client = fake.NewClientBuilder().WithScheme(scheme).WithObjects(endPointCM, f).Build() + comps.node.Spec.Template.Spec.Containers[0].Env = nil + err := handleBPF(&comps, i) + Expect(err).ToNot(HaveOccurred()) + Expect(i.Spec.CalicoNetwork).To(BeNil()) + err, data := getEndPointCM(&comps, "kube-system") + Expect(err).ToNot(HaveOccurred()) + Expect(data).To(Equal(cmData)) + err, data = getEndPointCM(&comps, common.OperatorNamespace()) + Expect(err).To(HaveOccurred()) + Expect(data).To(BeNil()) + }) + It("returns error when configmap is not present", func() { bpfEnabled := true f.Spec.BPFEnabled = &bpfEnabled diff --git a/pkg/controller/migration/convert/felix_vars_test.go b/pkg/controller/migration/convert/felix_vars_test.go index 68643a9eb3..a1f9a7ed2a 100644 --- a/pkg/controller/migration/convert/felix_vars_test.go +++ b/pkg/controller/migration/convert/felix_vars_test.go @@ -1,4 +1,4 @@ -// Copyright (c) 2022 Tigera, Inc. All rights reserved. +// Copyright (c) 2023 Tigera, Inc. All rights reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -137,20 +137,6 @@ var _ = Describe("felix env parser", func() { c.client = fake.NewClientBuilder().WithScheme(scheme).WithObjects(emptyFelixConfig()).Build() }) - It("sets a boolean", func() { - c.node.Spec.Template.Spec.Containers[0].Env = []v1.EnvVar{{ - Name: "FELIX_BPFENABLED", - Value: "true", - }} - - Expect(handleFelixVars(&c)).ToNot(HaveOccurred()) - - f := crdv1.FelixConfiguration{} - Expect(c.client.Get(ctx, types.NamespacedName{Name: "default"}, &f)).ToNot(HaveOccurred()) - Expect(f.Spec.BPFEnabled).ToNot(BeNil()) - Expect(*f.Spec.BPFEnabled).To(BeTrue()) - }) - It("handles 'none' failsafe inbound ports", func() { c.node.Spec.Template.Spec.Containers[0].Env = []v1.EnvVar{{ Name: "FELIX_FAILSAFEINBOUNDHOSTPORTS", diff --git a/pkg/controller/migration/convert/k8s.go b/pkg/controller/migration/convert/k8s.go index 36ae0bdf65..5ad56275b1 100644 --- a/pkg/controller/migration/convert/k8s.go +++ b/pkg/controller/migration/convert/k8s.go @@ -1,4 +1,4 @@ -// Copyright (c) 2022 Tigera, Inc. All rights reserved. +// Copyright (c) 2023 Tigera, Inc. All rights reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -167,6 +167,11 @@ func (r *CheckedDaemonSet) ignoreEnv(container, key string) { r.checkedVars[container].envVars[key] = true } +// Expose as public API thus can be re-used externally. +func GetEnv(ctx context.Context, client client.Client, pts v1.PodSpec, component, container, key string) (*string, error) { + return getEnv(ctx, client, pts, component, container, key) +} + // getEnv gets the value of an environment variable. func getEnv(ctx context.Context, client client.Client, pts v1.PodSpec, component, container, key string) (*string, error) { c := getContainer(pts, container) diff --git a/pkg/controller/utils/elasticsearch.go b/pkg/controller/utils/elasticsearch.go index ea850b6d3b..74440840ac 100644 --- a/pkg/controller/utils/elasticsearch.go +++ b/pkg/controller/utils/elasticsearch.go @@ -108,6 +108,8 @@ type ElasticsearchClientCreator func(client client.Client, ctx context.Context, type ElasticClient interface { SetILMPolicies(context.Context, *operatorv1.LogStorage) error CreateUser(context.Context, *User) error + DeleteUser(context.Context, *User) error + GetUsers(ctx context.Context) ([]User, error) } type esClient struct { @@ -149,22 +151,8 @@ func NewElasticClient(client client.Client, ctx context.Context, elasticHTTPSEnd return &esClient{client: esCli}, err } -func formatName(name string, clusterName, tenantID string, management bool) string { - if tenantID != "" { - return fmt.Sprintf("%s_%s", name, tenantID) - } - - var formattedName string - - if management { - formattedName = string(name) - } else { - formattedName = fmt.Sprintf("%s-%s", string(name), clusterName) - } - - // Add the secure suffix before returning. - formattedName = fmt.Sprintf("%s-secure", formattedName) - return formattedName +func formatName(name, clusterID, tenantID string) string { + return fmt.Sprintf("%s_%s_%s", name, clusterID, tenantID) } func indexPattern(prefix, cluster, suffix, tenant string) string { @@ -177,8 +165,8 @@ func indexPattern(prefix, cluster, suffix, tenant string) string { // Name for the linseed user in ES. var ElasticsearchUserNameLinseed = "tigera-ee-linseed" -func LinseedUser(tenant string) *User { - username := formatName(ElasticsearchUserNameLinseed, "cluster", tenant, true) +func LinseedUser(clusterID, tenant string) *User { + username := formatName(ElasticsearchUserNameLinseed, clusterID, tenant) return &User{ Username: username, Roles: []Role{ @@ -243,9 +231,9 @@ type Application struct { } // CreateRoles wraps createRoles to make creating multiple rows slightly more convenient -func (es *esClient) CreateRoles(roles ...Role) error { +func (es *esClient) CreateRoles(ctx context.Context, roles ...Role) error { for _, role := range roles { - if err := es.createRole(role); err != nil { + if err := es.createRole(ctx, role); err != nil { return err } } @@ -254,12 +242,12 @@ func (es *esClient) CreateRoles(roles ...Role) error { } // createRole attempts to create (or updated) the given Elasticsearch role. -func (es *esClient) createRole(role Role) error { +func (es *esClient) createRole(ctx context.Context, role Role) error { if role.Name == "" { return fmt.Errorf("can't create a role with an empty name") } - _, err := es.client.XPackSecurityPutRole(role.Name).Body(role.Definition).Do(context.TODO()) + _, err := es.client.XPackSecurityPutRole(role.Name).Body(role.Definition).Do(ctx) if err != nil { return err } @@ -276,7 +264,7 @@ func (es *esClient) CreateUser(ctx context.Context, user *User) error { } if len(rolesToCreate) > 0 { - if err := es.CreateRoles(rolesToCreate...); err != nil { + if err := es.CreateRoles(ctx, rolesToCreate...); err != nil { return err } } @@ -295,6 +283,70 @@ func (es *esClient) CreateUser(ctx context.Context, user *User) error { return nil } +// DeleteRoles wraps deleteRoles to make deleting multiple rows slightly more convenient +func (es *esClient) DeleteRoles(ctx context.Context, roles []Role) error { + for _, role := range roles { + if err := es.deleteRole(ctx, role); err != nil { + return err + } + } + + return nil +} + +// deleteRole attempts to delete the given Elasticsearch role. +func (es *esClient) deleteRole(ctx context.Context, role Role) error { + if role.Name == "" { + return fmt.Errorf("can't delete a role with an empty name") + } + + _, err := es.client.XPackSecurityDeleteRole(role.Name).Do(ctx) + if err != nil { + return err + } + + return nil +} + +func (es *esClient) DeleteUser(ctx context.Context, user *User) error { + if err := es.DeleteRoles(ctx, user.Roles); err != nil { + return err + } + + _, err := es.client.XPackSecurityDeleteUser(user.Username).Do(ctx) + if err != nil { + log.Error(err, "Error deleting user") + return err + } + + return nil +} + +// GetUsers returns all users stored in ES +func (es *esClient) GetUsers(ctx context.Context) ([]User, error) { + usersResponse, err := es.client.XPackSecurityGetUser("").Do(ctx) + if err != nil { + log.Error(err, "Error getting users") + return []User{}, err + } + + users := []User{} + for name, data := range *usersResponse { + user := User{ + Username: name, + } + for _, roleName := range data.Roles { + role := Role{ + Name: roleName, + } + user.Roles = append(user.Roles, role) + } + users = append(users, user) + } + + return users, nil +} + // SetILMPolicies creates ILM policies for each timeseries based index using the retention period and storage size in LogStorage func (es *esClient) SetILMPolicies(ctx context.Context, ls *operatorv1.LogStorage) error { policyList := es.listILMPolicies(ls) diff --git a/pkg/controller/utils/utils_test.go b/pkg/controller/utils/utils_test.go index 33d4d19c5a..7735c13134 100644 --- a/pkg/controller/utils/utils_test.go +++ b/pkg/controller/utils/utils_test.go @@ -272,6 +272,42 @@ var _ = Describe("PopulateK8sServiceEndPoint", func() { }) +var _ = Describe("Utils ElasticSearch test", func() { + var ( + userPrefix = "test-es-prefix" + clusterID = "clusterUUID" + tenantID = "tenantID" + ) + It("should generate usernames in expected format", func() { + generatedESUsername := formatName(userPrefix, clusterID, tenantID) + expectedESUsername := fmt.Sprintf("%s_%s_%s", userPrefix, clusterID, tenantID) + Expect(generatedESUsername).To(Equal(expectedESUsername)) + }) + + It("should generate Linseed ElasticUser with expected username and roles", func() { + linseedUser := LinseedUser(clusterID, tenantID) + expectedLinseedESName := fmt.Sprintf("%s_%s_%s", ElasticsearchUserNameLinseed, clusterID, tenantID) + + Expect(linseedUser.Username).To(Equal(expectedLinseedESName)) + Expect(len(linseedUser.Roles)).To(Equal(1)) + linseedRole := linseedUser.Roles[0] + Expect(linseedRole.Name).To(Equal(expectedLinseedESName)) + + expectedLinseedRoleDef := RoleDefinition{ + Cluster: []string{"monitor", "manage_index_templates", "manage_ilm"}, + Indices: []RoleIndex{ + { + // Include both single-index and multi-index name formats. + Names: []string{indexPattern("tigera_secure_ee_*", "*", ".*", tenantID), "calico_*"}, + Privileges: []string{"create_index", "write", "manage", "read"}, + }, + }, + } + + Expect(*linseedRole.Definition).To(Equal(expectedLinseedRoleDef)) + }) +}) + type fakeClient struct { discovery discovery.DiscoveryInterface kubernetes.Interface diff --git a/pkg/render/common/networkpolicy/networkpolicy.go b/pkg/render/common/networkpolicy/networkpolicy.go index fc3b53bee4..47b113ab21 100644 --- a/pkg/render/common/networkpolicy/networkpolicy.go +++ b/pkg/render/common/networkpolicy/networkpolicy.go @@ -88,7 +88,7 @@ func CreateEntityRule(namespace string, deploymentName string, ports ...uint16) func CreateSourceEntityRule(namespace string, deploymentName string) v3.EntityRule { return v3.EntityRule{ Selector: fmt.Sprintf("k8s-app == '%s'", deploymentName), - NamespaceSelector: fmt.Sprintf("name == '%s'", namespace), + NamespaceSelector: fmt.Sprintf("projectcalico.org/name == '%s'", namespace), } } diff --git a/pkg/render/fluentd.go b/pkg/render/fluentd.go index 7146fe6ff7..3d2b19abb0 100644 --- a/pkg/render/fluentd.go +++ b/pkg/render/fluentd.go @@ -17,7 +17,6 @@ package render import ( "crypto/x509" "fmt" - "strconv" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -147,11 +146,13 @@ type EksCloudwatchLogConfig struct { // FluentdConfiguration contains all the config information needed to render the component. type FluentdConfiguration struct { - LogCollector *operatorv1.LogCollector + LogCollector *operatorv1.LogCollector + S3Credential *S3Credential + SplkCredential *SplunkCredential + Filters *FluentdFilters + // ESClusterConfig is only populated for when EKSConfig + // is also defined ESClusterConfig *relasticsearch.ClusterConfig - S3Credential *S3Credential - SplkCredential *SplunkCredential - Filters *FluentdFilters EKSConfig *EksCloudwatchLogConfig PullSecrets []*corev1.Secret Installation *operatorv1.InstallationSpec @@ -790,24 +791,7 @@ func (c *fluentdComponent) envvars() []corev1.EnvVar { } } - envs = append(envs, - corev1.EnvVar{Name: "ELASTIC_FLOWS_INDEX_REPLICAS", Value: strconv.Itoa(c.cfg.ESClusterConfig.Replicas())}, - corev1.EnvVar{Name: "ELASTIC_DNS_INDEX_REPLICAS", Value: strconv.Itoa(c.cfg.ESClusterConfig.Replicas())}, - corev1.EnvVar{Name: "ELASTIC_AUDIT_INDEX_REPLICAS", Value: strconv.Itoa(c.cfg.ESClusterConfig.Replicas())}, - corev1.EnvVar{Name: "ELASTIC_BGP_INDEX_REPLICAS", Value: strconv.Itoa(c.cfg.ESClusterConfig.Replicas())}, - corev1.EnvVar{Name: "ELASTIC_WAF_INDEX_REPLICAS", Value: strconv.Itoa(c.cfg.ESClusterConfig.Replicas())}, - corev1.EnvVar{Name: "ELASTIC_L7_INDEX_REPLICAS", Value: strconv.Itoa(c.cfg.ESClusterConfig.Replicas())}, - corev1.EnvVar{Name: "ELASTIC_RUNTIME_INDEX_REPLICAS", Value: strconv.Itoa(c.cfg.ESClusterConfig.Replicas())}, - - corev1.EnvVar{Name: "ELASTIC_FLOWS_INDEX_SHARDS", Value: strconv.Itoa(c.cfg.ESClusterConfig.FlowShards())}, - corev1.EnvVar{Name: "ELASTIC_DNS_INDEX_SHARDS", Value: strconv.Itoa(c.cfg.ESClusterConfig.Shards())}, - corev1.EnvVar{Name: "ELASTIC_AUDIT_INDEX_SHARDS", Value: strconv.Itoa(c.cfg.ESClusterConfig.Shards())}, - corev1.EnvVar{Name: "ELASTIC_BGP_INDEX_SHARDS", Value: strconv.Itoa(c.cfg.ESClusterConfig.Shards())}, - corev1.EnvVar{Name: "ELASTIC_WAF_INDEX_SHARDS", Value: strconv.Itoa(c.cfg.ESClusterConfig.Shards())}, - corev1.EnvVar{Name: "ELASTIC_L7_INDEX_SHARDS", Value: strconv.Itoa(c.cfg.ESClusterConfig.Shards())}, - corev1.EnvVar{Name: "ELASTIC_RUNTIME_INDEX_SHARDS", Value: strconv.Itoa(c.cfg.ESClusterConfig.Shards())}, - corev1.EnvVar{Name: "CA_CRT_PATH", Value: c.trustedBundlePath()}, - ) + envs = append(envs, corev1.EnvVar{Name: "CA_CRT_PATH", Value: c.trustedBundlePath()}) return envs } diff --git a/pkg/render/fluentd_test.go b/pkg/render/fluentd_test.go index b0f576da96..c83d8908f0 100644 --- a/pkg/render/fluentd_test.go +++ b/pkg/render/fluentd_test.go @@ -41,7 +41,6 @@ import ( ) var _ = Describe("Tigera Secure Fluentd rendering tests", func() { - var esConfigMap *relasticsearch.ClusterConfig var cfg *render.FluentdConfiguration expectedFluentdPolicyForUnmanaged := testutils.GetExpectedPolicyFromFile("testutils/expected_policies/fluentd_unmanaged.json") @@ -51,7 +50,6 @@ var _ = Describe("Tigera Secure Fluentd rendering tests", func() { BeforeEach(func() { // Initialize a default instance to use. Each test can override this to its // desired configuration. - esConfigMap = relasticsearch.NewClusterConfig("clusterTestName", 1, 1, 1) scheme := runtime.NewScheme() Expect(apis.AddToScheme(scheme)).NotTo(HaveOccurred()) cli := fake.NewClientBuilder().WithScheme(scheme).Build() @@ -62,10 +60,9 @@ var _ = Describe("Tigera Secure Fluentd rendering tests", func() { metricsSecret, err := certificateManager.GetOrCreateKeyPair(cli, render.FluentdPrometheusTLSSecretName, common.OperatorNamespace(), []string{""}) Expect(err).NotTo(HaveOccurred()) cfg = &render.FluentdConfiguration{ - LogCollector: &operatorv1.LogCollector{}, - ESClusterConfig: esConfigMap, - ClusterDomain: dns.DefaultClusterDomain, - OSType: rmeta.OSTypeLinux, + LogCollector: &operatorv1.LogCollector{}, + ClusterDomain: dns.DefaultClusterDomain, + OSType: rmeta.OSTypeLinux, Installation: &operatorv1.InstallationSpec{ KubernetesProvider: operatorv1.ProviderNone, }, @@ -823,6 +820,7 @@ var _ = Describe("Tigera Secure Fluentd rendering tests", func() { GroupName: "dummy-eks-cluster-cloudwatch-log-group", FetchInterval: fetchInterval, } + cfg.ESClusterConfig = relasticsearch.NewClusterConfig("clusterTestName", 1, 1, 1) t := corev1.Toleration{ Key: "foo", Operator: corev1.TolerationOpEqual, diff --git a/pkg/render/logstorage/linseed/linseed.go b/pkg/render/logstorage/linseed/linseed.go index 59abd7294b..712f28d4f6 100644 --- a/pkg/render/logstorage/linseed/linseed.go +++ b/pkg/render/logstorage/linseed/linseed.go @@ -309,6 +309,7 @@ func (l *linseed) linseedDeployment() *appsv1.Deployment { // If a tenant was provided, set the expected tenant ID and enable the shared index backend. envVars = append(envVars, corev1.EnvVar{Name: "LINSEED_EXPECTED_TENANT_ID", Value: l.cfg.Tenant.Spec.ID}) envVars = append(envVars, corev1.EnvVar{Name: "BACKEND", Value: "elastic-single-index"}) + envVars = append(envVars, corev1.EnvVar{Name: "LINSEED_MULTI_CLUSTER_FORWARDING_ENDPOINT", Value: fmt.Sprintf("https://tigera-manager.%s.svc:9443", l.cfg.Tenant.Namespace)}) } var initContainers []corev1.Container diff --git a/pkg/render/logstorage/linseed/linseed_test.go b/pkg/render/logstorage/linseed/linseed_test.go index 8b0ce8dfbd..24b65f0e8c 100644 --- a/pkg/render/logstorage/linseed/linseed_test.go +++ b/pkg/render/logstorage/linseed/linseed_test.go @@ -342,7 +342,7 @@ var _ = Describe("Linseed rendering tests", func() { Expect(cr.Rules).To(ContainElements(expectedRules)) }) - It("should render MANAGEMENT_OPERATOR_NS environment variable", func() { + It("should render multi-tenant environment variables", func() { cfg.ManagementCluster = true component := Linseed(cfg) Expect(component).NotTo(BeNil()) @@ -350,6 +350,9 @@ var _ = Describe("Linseed rendering tests", func() { d := rtest.GetResource(resources, DeploymentName, cfg.Namespace, appsv1.GroupName, "v1", "Deployment").(*appsv1.Deployment) envs := d.Spec.Template.Spec.Containers[0].Env Expect(envs).To(ContainElement(corev1.EnvVar{Name: "MANAGEMENT_OPERATOR_NS", Value: "tigera-operator"})) + Expect(envs).To(ContainElement(corev1.EnvVar{Name: "BACKEND", Value: "elastic-single-index"})) + Expect(envs).To(ContainElement(corev1.EnvVar{Name: "LINSEED_EXPECTED_TENANT_ID", Value: cfg.Tenant.Spec.ID})) + Expect(envs).To(ContainElement(corev1.EnvVar{Name: "LINSEED_MULTI_CLUSTER_FORWARDING_ENDPOINT", Value: fmt.Sprintf("https://tigera-manager.%s.svc:9443", cfg.Tenant.Namespace)})) }) }) }) diff --git a/pkg/render/manager.go b/pkg/render/manager.go index 1bf78c67f8..72b262ddb7 100644 --- a/pkg/render/manager.go +++ b/pkg/render/manager.go @@ -534,6 +534,8 @@ func (c *managerComponent) voltronContainer() corev1.Container { if c.cfg.Tenant != nil { env = append(env, corev1.EnvVar{Name: "VOLTRON_TENANT_NAMESPACE", Value: c.cfg.Tenant.Namespace}) + env = append(env, corev1.EnvVar{Name: "VOLTRON_TENANT_ID", Value: c.cfg.Tenant.Spec.ID}) + env = append(env, corev1.EnvVar{Name: "VOLTRON_LINSEED_ENDPOINT", Value: fmt.Sprintf("https://tigera-linseed.%s.svc", c.cfg.Tenant.Namespace)}) } return corev1.Container{ diff --git a/pkg/render/manager_test.go b/pkg/render/manager_test.go index 4025d3f9a5..7a889baaf7 100644 --- a/pkg/render/manager_test.go +++ b/pkg/render/manager_test.go @@ -965,6 +965,31 @@ var _ = Describe("Tigera Secure Manager rendering tests", func() { }, })) }) + + It("should render multi-tenant environment variables", func() { + tenantAResources := renderObjects(renderConfig{ + oidc: false, + managementCluster: nil, + installation: installation, + compliance: compliance, + complianceFeatureActive: true, + ns: tenantANamespace, + tenant: &operatorv1.Tenant{ + ObjectMeta: metav1.ObjectMeta{ + Name: "tenant", + Namespace: tenantANamespace, + }, + Spec: operatorv1.TenantSpec{ + ID: "tenant-a", + }, + }, + }) + d := rtest.GetResource(tenantAResources, "tigera-manager", tenantANamespace, appsv1.GroupName, "v1", "Deployment").(*appsv1.Deployment) + envs := d.Spec.Template.Spec.Containers[2].Env + Expect(envs).To(ContainElement(corev1.EnvVar{Name: "VOLTRON_TENANT_NAMESPACE", Value: tenantANamespace})) + Expect(envs).To(ContainElement(corev1.EnvVar{Name: "VOLTRON_TENANT_ID", Value: "tenant-a"})) + Expect(envs).To(ContainElement(corev1.EnvVar{Name: "VOLTRON_LINSEED_ENDPOINT", Value: fmt.Sprintf("https://tigera-linseed.%s.svc", tenantANamespace)})) + }) }) }) @@ -977,6 +1002,7 @@ type renderConfig struct { openshift bool ns string bindingNamespaces []string + tenant *operatorv1.Tenant } func renderObjects(roc renderConfig) []client.Object { @@ -1042,6 +1068,7 @@ func renderObjects(roc renderConfig) []client.Object { Namespace: roc.ns, BindingNamespaces: roc.bindingNamespaces, TruthNamespace: common.OperatorNamespace(), + Tenant: roc.tenant, } component, err := render.Manager(cfg) Expect(err).To(BeNil(), "Expected Manager to create successfully %s", err) diff --git a/pkg/render/node.go b/pkg/render/node.go index 7954b53b5c..66f8771de4 100644 --- a/pkg/render/node.go +++ b/pkg/render/node.go @@ -45,11 +45,14 @@ import ( ) const ( - BirdTemplatesConfigMapName = "bird-templates" - birdTemplateHashAnnotation = "hash.operator.tigera.io/bird-templates" - nodeCniConfigAnnotation = "hash.operator.tigera.io/cni-config" - bgpLayoutHashAnnotation = "hash.operator.tigera.io/bgp-layout" - bgpBindModeHashAnnotation = "hash.operator.tigera.io/bgp-bind-mode" + BirdTemplatesConfigMapName = "bird-templates" + birdTemplateHashAnnotation = "hash.operator.tigera.io/bird-templates" + BpfOperatorAnnotation = "operator.tigera.io/bpfEnabled" + + nodeCniConfigAnnotation = "hash.operator.tigera.io/cni-config" + bgpLayoutHashAnnotation = "hash.operator.tigera.io/bgp-layout" + bgpBindModeHashAnnotation = "hash.operator.tigera.io/bgp-bind-mode" + CSRLabelCalicoSystem = "calico-system" BGPLayoutConfigMapName = "bgp-layout" BGPLayoutConfigMapKey = "earlyNetworkConfiguration" @@ -934,13 +937,20 @@ func (c *nodeComponent) nodeDaemonset(cniCfgMap *corev1.ConfigMap) *appsv1.Daemo annotations[bgpBindModeHashAnnotation] = rmeta.AnnotationHash(c.cfg.BindMode) } + // Include the annotation to indicate whether BPF is configured or not. + dsAnnotations := make(map[string]string) + if c.bpfDataplaneEnabled() { + dsAnnotations[BpfOperatorAnnotation] = "true" + } + // Determine the name to use for the calico/node daemonset. For mixed-mode, we run the enterprise DaemonSet // with its own name so as to not conflict. ds := appsv1.DaemonSet{ TypeMeta: metav1.TypeMeta{Kind: "DaemonSet", APIVersion: "apps/v1"}, ObjectMeta: metav1.ObjectMeta{ - Name: common.NodeDaemonSetName, - Namespace: common.CalicoNamespace, + Name: common.NodeDaemonSetName, + Namespace: common.CalicoNamespace, + Annotations: dsAnnotations, }, Spec: appsv1.DaemonSetSpec{ Template: corev1.PodTemplateSpec{ @@ -1110,9 +1120,7 @@ func (c *nodeComponent) nodeVolumes() []corev1.Volume { } func (c *nodeComponent) bpfDataplaneEnabled() bool { - return c.cfg.Installation.CalicoNetwork != nil && - c.cfg.Installation.CalicoNetwork.LinuxDataplane != nil && - *c.cfg.Installation.CalicoNetwork.LinuxDataplane == operatorv1.LinuxDataplaneBPF + return common.BpfDataplaneEnabled(c.cfg.Installation) } func (c *nodeComponent) vppDataplaneEnabled() bool { @@ -1298,7 +1306,7 @@ func (c *nodeComponent) nodeVolumeMounts() []corev1.VolumeMount { ) } if c.bpfDataplaneEnabled() { - nodeVolumeMounts = append(nodeVolumeMounts, corev1.VolumeMount{MountPath: "/sys/fs/bpf", Name: "bpffs"}) + nodeVolumeMounts = append(nodeVolumeMounts, corev1.VolumeMount{MountPath: "/sys/fs/bpf", Name: common.BPFVolumeName}) } if c.vppDataplaneEnabled() { nodeVolumeMounts = append(nodeVolumeMounts, corev1.VolumeMount{MountPath: "/usr/local/bin/felix-plugins", Name: "felix-plugins", ReadOnly: true}) @@ -1495,9 +1503,6 @@ func (c *nodeComponent) nodeEnvVars() []corev1.EnvVar { } } - if c.bpfDataplaneEnabled() { - nodeEnv = append(nodeEnv, corev1.EnvVar{Name: "FELIX_BPFENABLED", Value: "true"}) - } if c.vppDataplaneEnabled() { nodeEnv = append(nodeEnv, corev1.EnvVar{ Name: "FELIX_USEINTERNALDATAPLANEDRIVER", diff --git a/pkg/render/node_test.go b/pkg/render/node_test.go index d1bafa9fb5..fa5d43f6fa 100644 --- a/pkg/render/node_test.go +++ b/pkg/render/node_test.go @@ -505,6 +505,7 @@ var _ = Describe("Node rendering tests", func() { // The DaemonSet should have the correct configuration. ds := dsResource.(*appsv1.DaemonSet) + Expect(ds.Annotations[render.BpfOperatorAnnotation]).To(Equal("true")) if enableIPv4 { rtest.ExpectEnv(ds.Spec.Template.Spec.Containers[0].Env, "CALICO_IPV4POOL_CIDR", "192.168.1.0/16") } @@ -556,7 +557,6 @@ var _ = Describe("Node rendering tests", func() { {Name: "CALICO_MANAGE_CNI", Value: "true"}, {Name: "CALICO_DISABLE_FILE_LOGGING", Value: "false"}, {Name: "CLUSTER_TYPE", Value: "k8s,operator,bgp"}, - {Name: "FELIX_BPFENABLED", Value: "true"}, {Name: "FELIX_DEFAULTENDPOINTTOHOSTACTION", Value: "ACCEPT"}, {Name: "FELIX_HEALTHENABLED", Value: "true"}, {Name: "FELIX_HEALTHPORT", Value: "9099"}, diff --git a/pkg/render/testutils/expected_policies/compliance-server.json b/pkg/render/testutils/expected_policies/compliance-server.json index f4fcdf9f77..2d209adae4 100644 --- a/pkg/render/testutils/expected_policies/compliance-server.json +++ b/pkg/render/testutils/expected_policies/compliance-server.json @@ -19,7 +19,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-manager'", - "namespaceSelector": "name == 'tigera-manager'" + "namespaceSelector": "projectcalico.org/name == 'tigera-manager'" }, "destination": { "ports": [ diff --git a/pkg/render/testutils/expected_policies/compliance-server_ocp.json b/pkg/render/testutils/expected_policies/compliance-server_ocp.json index b0990c66a3..2eb145170e 100644 --- a/pkg/render/testutils/expected_policies/compliance-server_ocp.json +++ b/pkg/render/testutils/expected_policies/compliance-server_ocp.json @@ -19,7 +19,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-manager'", - "namespaceSelector": "name == 'tigera-manager'" + "namespaceSelector": "projectcalico.org/name == 'tigera-manager'" }, "destination": { "ports": [ diff --git a/pkg/render/testutils/expected_policies/dex.json b/pkg/render/testutils/expected_policies/dex.json index c97eabcc74..d628e21af8 100644 --- a/pkg/render/testutils/expected_policies/dex.json +++ b/pkg/render/testutils/expected_policies/dex.json @@ -19,7 +19,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-manager'", - "namespaceSelector": "name == 'tigera-manager'" + "namespaceSelector": "projectcalico.org/name == 'tigera-manager'" }, "destination": { "ports": [ @@ -32,7 +32,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-secure-es-gateway'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" }, "destination": { "ports": [ @@ -50,7 +50,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-server'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -63,7 +63,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-packetcapture'", - "namespaceSelector": "name == 'tigera-packetcapture'" + "namespaceSelector": "projectcalico.org/name == 'tigera-packetcapture'" } }, { diff --git a/pkg/render/testutils/expected_policies/dex_ocp.json b/pkg/render/testutils/expected_policies/dex_ocp.json index b56972487f..8f25c64d04 100644 --- a/pkg/render/testutils/expected_policies/dex_ocp.json +++ b/pkg/render/testutils/expected_policies/dex_ocp.json @@ -19,7 +19,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-manager'", - "namespaceSelector": "name == 'tigera-manager'" + "namespaceSelector": "projectcalico.org/name == 'tigera-manager'" }, "destination": { "ports": [ @@ -32,7 +32,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-secure-es-gateway'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" }, "destination": { "ports": [ @@ -50,7 +50,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-server'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -63,7 +63,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-packetcapture'", - "namespaceSelector": "name == 'tigera-packetcapture'" + "namespaceSelector": "projectcalico.org/name == 'tigera-packetcapture'" } }, { diff --git a/pkg/render/testutils/expected_policies/elasticsearch.json b/pkg/render/testutils/expected_policies/elasticsearch.json index 9e066fb348..d790ad850f 100644 --- a/pkg/render/testutils/expected_policies/elasticsearch.json +++ b/pkg/render/testutils/expected_policies/elasticsearch.json @@ -19,7 +19,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-secure'", - "namespaceSelector": "name == 'tigera-kibana'" + "namespaceSelector": "projectcalico.org/name == 'tigera-kibana'" }, "destination": { "ports": [ @@ -32,7 +32,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-secure-es-gateway'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" }, "destination": { "ports": [ @@ -45,7 +45,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-linseed'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" }, "destination": { "ports": [ @@ -64,7 +64,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'elastic-operator'", - "namespaceSelector": "name == 'tigera-eck-operator'" + "namespaceSelector": "projectcalico.org/name == 'tigera-eck-operator'" } }, { diff --git a/pkg/render/testutils/expected_policies/elasticsearch_ocp.json b/pkg/render/testutils/expected_policies/elasticsearch_ocp.json index 0cea857134..29a8697368 100644 --- a/pkg/render/testutils/expected_policies/elasticsearch_ocp.json +++ b/pkg/render/testutils/expected_policies/elasticsearch_ocp.json @@ -19,7 +19,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-secure'", - "namespaceSelector": "name == 'tigera-kibana'" + "namespaceSelector": "projectcalico.org/name == 'tigera-kibana'" }, "destination": { "ports": [ @@ -32,7 +32,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-secure-es-gateway'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" }, "destination": { "ports": [ @@ -45,7 +45,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-linseed'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" }, "destination": { "ports": [ @@ -63,7 +63,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'elastic-operator'", - "namespaceSelector": "name == 'tigera-eck-operator'" + "namespaceSelector": "projectcalico.org/name == 'tigera-eck-operator'" } }, { diff --git a/pkg/render/testutils/expected_policies/es-gateway.json b/pkg/render/testutils/expected_policies/es-gateway.json index 27716a6b7b..c5c4ec766a 100644 --- a/pkg/render/testutils/expected_policies/es-gateway.json +++ b/pkg/render/testutils/expected_policies/es-gateway.json @@ -32,7 +32,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'eks-log-forwarder'", - "namespaceSelector": "name == 'tigera-fluentd'" + "namespaceSelector": "projectcalico.org/name == 'tigera-fluentd'" }, "destination": { "ports": [ @@ -58,7 +58,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'elastic-curator'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" }, "destination": { "ports": [ @@ -71,7 +71,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-manager'", - "namespaceSelector": "name == 'tigera-manager'" + "namespaceSelector": "projectcalico.org/name == 'tigera-manager'" }, "destination": { "ports": [ @@ -89,7 +89,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-benchmarker'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -102,7 +102,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-controller'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -115,7 +115,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-server'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -128,7 +128,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-snapshotter'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -141,7 +141,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-reporter'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -167,7 +167,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'elastic-operator'", - "namespaceSelector": "name == 'tigera-eck-operator'" + "namespaceSelector": "projectcalico.org/name == 'tigera-eck-operator'" } }, { @@ -180,7 +180,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-elasticsearch-metrics'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" } }, { diff --git a/pkg/render/testutils/expected_policies/es-gateway_ocp.json b/pkg/render/testutils/expected_policies/es-gateway_ocp.json index 134c581081..48a124a2f4 100644 --- a/pkg/render/testutils/expected_policies/es-gateway_ocp.json +++ b/pkg/render/testutils/expected_policies/es-gateway_ocp.json @@ -32,7 +32,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'eks-log-forwarder'", - "namespaceSelector": "name == 'tigera-fluentd'" + "namespaceSelector": "projectcalico.org/name == 'tigera-fluentd'" }, "destination": { "ports": [ @@ -58,7 +58,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'elastic-curator'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" }, "destination": { "ports": [ @@ -71,7 +71,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-manager'", - "namespaceSelector": "name == 'tigera-manager'" + "namespaceSelector": "projectcalico.org/name == 'tigera-manager'" }, "destination": { "ports": [ @@ -89,7 +89,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-benchmarker'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -102,7 +102,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-controller'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -115,7 +115,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-server'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -128,7 +128,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-snapshotter'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -141,7 +141,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-reporter'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -167,7 +167,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'elastic-operator'", - "namespaceSelector": "name == 'tigera-eck-operator'" + "namespaceSelector": "projectcalico.org/name == 'tigera-eck-operator'" } }, { @@ -180,7 +180,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-elasticsearch-metrics'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" } }, { diff --git a/pkg/render/testutils/expected_policies/guardian.json b/pkg/render/testutils/expected_policies/guardian.json index 0f06002a74..472669255d 100644 --- a/pkg/render/testutils/expected_policies/guardian.json +++ b/pkg/render/testutils/expected_policies/guardian.json @@ -36,7 +36,7 @@ }, "protocol": "TCP", "source": { - "namespaceSelector": "name == 'tigera-compliance'", + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'", "selector": "k8s-app == 'compliance-benchmarker'" } }, @@ -49,7 +49,7 @@ }, "protocol": "TCP", "source": { - "namespaceSelector": "name == 'tigera-compliance'", + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'", "selector": "k8s-app == 'compliance-reporter'" } }, @@ -62,7 +62,7 @@ }, "protocol": "TCP", "source": { - "namespaceSelector": "name == 'tigera-compliance'", + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'", "selector": "k8s-app == 'compliance-snapshotter'" } }, @@ -75,7 +75,7 @@ }, "protocol": "TCP", "source": { - "namespaceSelector": "name == 'tigera-compliance'", + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'", "selector": "k8s-app == 'compliance-controller'" } }, diff --git a/pkg/render/testutils/expected_policies/guardian_ocp.json b/pkg/render/testutils/expected_policies/guardian_ocp.json index 40ff5f0b2a..33ee571e34 100644 --- a/pkg/render/testutils/expected_policies/guardian_ocp.json +++ b/pkg/render/testutils/expected_policies/guardian_ocp.json @@ -36,7 +36,7 @@ }, "protocol": "TCP", "source": { - "namespaceSelector": "name == 'tigera-compliance'", + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'", "selector": "k8s-app == 'compliance-benchmarker'" } }, @@ -49,7 +49,7 @@ }, "protocol": "TCP", "source": { - "namespaceSelector": "name == 'tigera-compliance'", + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'", "selector": "k8s-app == 'compliance-reporter'" } }, @@ -62,7 +62,7 @@ }, "protocol": "TCP", "source": { - "namespaceSelector": "name == 'tigera-compliance'", + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'", "selector": "k8s-app == 'compliance-snapshotter'" } }, @@ -75,7 +75,7 @@ }, "protocol": "TCP", "source": { - "namespaceSelector": "name == 'tigera-compliance'", + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'", "selector": "k8s-app == 'compliance-controller'" } }, diff --git a/pkg/render/testutils/expected_policies/kibana.json b/pkg/render/testutils/expected_policies/kibana.json index cd1592bd15..94d063d357 100644 --- a/pkg/render/testutils/expected_policies/kibana.json +++ b/pkg/render/testutils/expected_policies/kibana.json @@ -47,7 +47,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-secure-es-gateway'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" }, "destination": { "ports": [ @@ -65,7 +65,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'elastic-operator'", - "namespaceSelector": "name == 'tigera-eck-operator'" + "namespaceSelector": "projectcalico.org/name == 'tigera-eck-operator'" } } ], diff --git a/pkg/render/testutils/expected_policies/kibana_ocp.json b/pkg/render/testutils/expected_policies/kibana_ocp.json index 20e2dcb17b..1244a594da 100644 --- a/pkg/render/testutils/expected_policies/kibana_ocp.json +++ b/pkg/render/testutils/expected_policies/kibana_ocp.json @@ -47,7 +47,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-secure-es-gateway'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" }, "destination": { "ports": [ @@ -65,7 +65,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'elastic-operator'", - "namespaceSelector": "name == 'tigera-eck-operator'" + "namespaceSelector": "projectcalico.org/name == 'tigera-eck-operator'" } } ], diff --git a/pkg/render/testutils/expected_policies/linseed.json b/pkg/render/testutils/expected_policies/linseed.json index e659953415..b84e9f036c 100644 --- a/pkg/render/testutils/expected_policies/linseed.json +++ b/pkg/render/testutils/expected_policies/linseed.json @@ -32,7 +32,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'eks-log-forwarder'", - "namespaceSelector": "name == 'tigera-fluentd'" + "namespaceSelector": "projectcalico.org/name == 'tigera-fluentd'" }, "destination": { "ports": [ @@ -58,7 +58,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'elastic-curator'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" }, "destination": { "ports": [ @@ -71,7 +71,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-manager'", - "namespaceSelector": "name == 'tigera-manager'" + "namespaceSelector": "projectcalico.org/name == 'tigera-manager'" }, "destination": { "ports": [ @@ -89,7 +89,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-benchmarker'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -102,7 +102,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-controller'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -115,7 +115,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-server'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -128,7 +128,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-snapshotter'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -141,7 +141,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-reporter'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -167,7 +167,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'elastic-operator'", - "namespaceSelector": "name == 'tigera-eck-operator'" + "namespaceSelector": "projectcalico.org/name == 'tigera-eck-operator'" } }, { @@ -180,7 +180,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-elasticsearch-metrics'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" } }, { @@ -193,7 +193,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-policy-recommendation'", - "namespaceSelector": "name == 'tigera-policy-recommendation'" + "namespaceSelector": "projectcalico.org/name == 'tigera-policy-recommendation'" } } ], diff --git a/pkg/render/testutils/expected_policies/linseed_dpi_enabled.json b/pkg/render/testutils/expected_policies/linseed_dpi_enabled.json index 3e9aa082f8..5b2688c8d5 100644 --- a/pkg/render/testutils/expected_policies/linseed_dpi_enabled.json +++ b/pkg/render/testutils/expected_policies/linseed_dpi_enabled.json @@ -32,7 +32,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'eks-log-forwarder'", - "namespaceSelector": "name == 'tigera-fluentd'" + "namespaceSelector": "projectcalico.org/name == 'tigera-fluentd'" }, "destination": { "ports": [ @@ -58,7 +58,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'elastic-curator'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" }, "destination": { "ports": [ @@ -71,7 +71,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-manager'", - "namespaceSelector": "name == 'tigera-manager'" + "namespaceSelector": "projectcalico.org/name == 'tigera-manager'" }, "destination": { "ports": [ @@ -89,7 +89,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-benchmarker'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -102,7 +102,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-controller'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -115,7 +115,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-server'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -128,7 +128,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-snapshotter'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -141,7 +141,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-reporter'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -167,7 +167,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'elastic-operator'", - "namespaceSelector": "name == 'tigera-eck-operator'" + "namespaceSelector": "projectcalico.org/name == 'tigera-eck-operator'" } }, { @@ -180,7 +180,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-elasticsearch-metrics'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" } }, { @@ -193,7 +193,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-policy-recommendation'", - "namespaceSelector": "name == 'tigera-policy-recommendation'" + "namespaceSelector": "projectcalico.org/name == 'tigera-policy-recommendation'" } }, { diff --git a/pkg/render/testutils/expected_policies/linseed_ocp.json b/pkg/render/testutils/expected_policies/linseed_ocp.json index db18a18da7..94416ebcb7 100644 --- a/pkg/render/testutils/expected_policies/linseed_ocp.json +++ b/pkg/render/testutils/expected_policies/linseed_ocp.json @@ -32,7 +32,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'eks-log-forwarder'", - "namespaceSelector": "name == 'tigera-fluentd'" + "namespaceSelector": "projectcalico.org/name == 'tigera-fluentd'" }, "destination": { "ports": [ @@ -58,7 +58,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'elastic-curator'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" }, "destination": { "ports": [ @@ -71,7 +71,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-manager'", - "namespaceSelector": "name == 'tigera-manager'" + "namespaceSelector": "projectcalico.org/name == 'tigera-manager'" }, "destination": { "ports": [ @@ -89,7 +89,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-benchmarker'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -102,7 +102,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-controller'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -115,7 +115,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-server'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -128,7 +128,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-snapshotter'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -141,7 +141,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-reporter'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -167,7 +167,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'elastic-operator'", - "namespaceSelector": "name == 'tigera-eck-operator'" + "namespaceSelector": "projectcalico.org/name == 'tigera-eck-operator'" } }, { @@ -180,7 +180,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-elasticsearch-metrics'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" } }, { @@ -193,7 +193,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-policy-recommendation'", - "namespaceSelector": "name == 'tigera-policy-recommendation'" + "namespaceSelector": "projectcalico.org/name == 'tigera-policy-recommendation'" } } ], diff --git a/pkg/render/testutils/expected_policies/linseed_ocp_dpi_enabled.json b/pkg/render/testutils/expected_policies/linseed_ocp_dpi_enabled.json index 4118f48943..ee9e65fb24 100644 --- a/pkg/render/testutils/expected_policies/linseed_ocp_dpi_enabled.json +++ b/pkg/render/testutils/expected_policies/linseed_ocp_dpi_enabled.json @@ -32,7 +32,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'eks-log-forwarder'", - "namespaceSelector": "name == 'tigera-fluentd'" + "namespaceSelector": "projectcalico.org/name == 'tigera-fluentd'" }, "destination": { "ports": [ @@ -58,7 +58,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'elastic-curator'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" }, "destination": { "ports": [ @@ -71,7 +71,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-manager'", - "namespaceSelector": "name == 'tigera-manager'" + "namespaceSelector": "projectcalico.org/name == 'tigera-manager'" }, "destination": { "ports": [ @@ -89,7 +89,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-benchmarker'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -102,7 +102,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-controller'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -115,7 +115,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-server'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -128,7 +128,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-snapshotter'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -141,7 +141,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'compliance-reporter'", - "namespaceSelector": "name == 'tigera-compliance'" + "namespaceSelector": "projectcalico.org/name == 'tigera-compliance'" } }, { @@ -167,7 +167,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'elastic-operator'", - "namespaceSelector": "name == 'tigera-eck-operator'" + "namespaceSelector": "projectcalico.org/name == 'tigera-eck-operator'" } }, { @@ -180,7 +180,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-elasticsearch-metrics'", - "namespaceSelector": "name == 'tigera-elasticsearch'" + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" } }, { @@ -193,7 +193,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-policy-recommendation'", - "namespaceSelector": "name == 'tigera-policy-recommendation'" + "namespaceSelector": "projectcalico.org/name == 'tigera-policy-recommendation'" } }, { diff --git a/pkg/render/testutils/expected_policies/packetcapture.json b/pkg/render/testutils/expected_policies/packetcapture.json index 9bf25105f7..3794a3c929 100644 --- a/pkg/render/testutils/expected_policies/packetcapture.json +++ b/pkg/render/testutils/expected_policies/packetcapture.json @@ -19,7 +19,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-manager'", - "namespaceSelector": "name == 'tigera-manager'" + "namespaceSelector": "projectcalico.org/name == 'tigera-manager'" }, "destination": { "ports": [ diff --git a/pkg/render/testutils/expected_policies/packetcapture_managed.json b/pkg/render/testutils/expected_policies/packetcapture_managed.json index f8914268e0..44fda24f0e 100644 --- a/pkg/render/testutils/expected_policies/packetcapture_managed.json +++ b/pkg/render/testutils/expected_policies/packetcapture_managed.json @@ -19,7 +19,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-guardian'", - "namespaceSelector": "name == 'tigera-guardian'" + "namespaceSelector": "projectcalico.org/name == 'tigera-guardian'" }, "destination": { "ports": [ diff --git a/pkg/render/testutils/expected_policies/packetcapture_managed_ocp.json b/pkg/render/testutils/expected_policies/packetcapture_managed_ocp.json index cc6d8d1eb5..bee37d027c 100644 --- a/pkg/render/testutils/expected_policies/packetcapture_managed_ocp.json +++ b/pkg/render/testutils/expected_policies/packetcapture_managed_ocp.json @@ -19,7 +19,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-guardian'", - "namespaceSelector": "name == 'tigera-guardian'" + "namespaceSelector": "projectcalico.org/name == 'tigera-guardian'" }, "destination": { "ports": [ diff --git a/pkg/render/testutils/expected_policies/packetcapture_ocp.json b/pkg/render/testutils/expected_policies/packetcapture_ocp.json index 4f97d8680b..2232364ab6 100644 --- a/pkg/render/testutils/expected_policies/packetcapture_ocp.json +++ b/pkg/render/testutils/expected_policies/packetcapture_ocp.json @@ -19,7 +19,7 @@ "protocol": "TCP", "source": { "selector": "k8s-app == 'tigera-manager'", - "namespaceSelector": "name == 'tigera-manager'" + "namespaceSelector": "projectcalico.org/name == 'tigera-manager'" }, "destination": { "ports": [ diff --git a/test/mainline_test.go b/test/mainline_test.go index 46579209c0..5cc9b17355 100644 --- a/test/mainline_test.go +++ b/test/mainline_test.go @@ -21,6 +21,9 @@ import ( "strings" "time" + "github.com/tigera/operator/pkg/common" + "github.com/tigera/operator/pkg/render" + rbacv1 "k8s.io/api/rbac/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "sigs.k8s.io/controller-runtime/pkg/cache" @@ -347,6 +350,33 @@ func installResourceCRD(c client.Client, mgr manager.Manager, ctx context.Contex err := c.Create(context.Background(), instance) Expect(err).NotTo(HaveOccurred()) + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: common.CalicoNamespace, Namespace: common.CalicoNamespace}, + } + err = c.Create(context.Background(), ns) + if err != nil && !kerror.IsAlreadyExists(err) { + Expect(err).NotTo(HaveOccurred()) + } + + labels := make(map[string]string) + labels["foo"] = "bar" + ds := &apps.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Name: common.NodeDaemonSetName, Namespace: common.CalicoNamespace}, + Spec: apps.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: labels}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: render.CalicoNodeObjectName, Image: render.CalicoNodeObjectName}}, + }, + }, + }, + } + err = c.Create(context.Background(), ds) + Expect(err).NotTo(HaveOccurred()) + By("Running the operator") return RunOperator(mgr, ctx) }