From 63b1a66e79daad4c939128192f4c3df74f020b03 Mon Sep 17 00:00:00 2001 From: Alina Militaru <41362174+asincu@users.noreply.github.com> Date: Fri, 19 Jan 2024 17:25:00 -0800 Subject: [PATCH] Migrate job installer to run inside elasticsearch namespace --- controllers/logstorage_controller.go | 8 +- .../dashboards/dashboards_controller.go | 376 ++++++++++ .../dashboards/dashboards_controller_test.go | 511 +++++++++++++ .../dashboards/dashboards_suite_test.go | 34 + .../common/networkpolicy/networkpolicy.go | 8 + pkg/render/intrusion_detection.go | 78 +- pkg/render/intrusion_detection_test.go | 71 +- pkg/render/logstorage.go | 13 +- .../logstorage/dashboards/dashboards.go | 379 ++++++++++ .../dashboards/dashboards_suite_test.go | 30 + .../logstorage/dashboards/dashboards_test.go | 680 ++++++++++++++++++ pkg/render/logstorage/linseed/linseed.go | 6 - .../expected_policies/dashboards.json | 40 ++ .../expected_policies/dashboards_ocp.json | 51 ++ .../testutils/expected_policies/kibana.json | 22 + .../expected_policies/kibana_ocp.json | 22 + .../testutils/expected_policies/linseed.json | 13 - .../linseed_dpi_enabled.json | 13 - .../expected_policies/linseed_ocp.json | 13 - .../linseed_ocp_dpi_enabled.json | 13 - 20 files changed, 2177 insertions(+), 204 deletions(-) create mode 100644 pkg/controller/logstorage/dashboards/dashboards_controller.go create mode 100644 pkg/controller/logstorage/dashboards/dashboards_controller_test.go create mode 100644 pkg/controller/logstorage/dashboards/dashboards_suite_test.go create mode 100644 pkg/render/logstorage/dashboards/dashboards.go create mode 100644 pkg/render/logstorage/dashboards/dashboards_suite_test.go create mode 100644 pkg/render/logstorage/dashboards/dashboards_test.go create mode 100644 pkg/render/testutils/expected_policies/dashboards.json create mode 100644 pkg/render/testutils/expected_policies/dashboards_ocp.json diff --git a/controllers/logstorage_controller.go b/controllers/logstorage_controller.go index 92e7afeaa0..f5764a3680 100644 --- a/controllers/logstorage_controller.go +++ b/controllers/logstorage_controller.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020,2023 Tigera, Inc. All rights reserved. +// Copyright (c) 2020,2024 Tigera, Inc. All rights reserved. /* Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,6 +18,7 @@ package controllers import ( "github.com/go-logr/logr" + "github.com/tigera/operator/pkg/controller/logstorage/dashboards" "github.com/tigera/operator/pkg/controller/logstorage/esmetrics" "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" @@ -84,6 +85,11 @@ func (r *LogStorageReconciler) SetupWithManager(mgr ctrl.Manager, opts options.A return err } + // The settings controller installs + if err := dashboards.Add(mgr, opts); err != nil { + return err + } + // The managed cluster controller runs on managed clusters only, and installs the necessary services for managed cluster components // to talk to the management cluster, as well as the necessary RBAC for management cluster components to talk // to the managed cluster. diff --git a/pkg/controller/logstorage/dashboards/dashboards_controller.go b/pkg/controller/logstorage/dashboards/dashboards_controller.go new file mode 100644 index 0000000000..de8e4782f3 --- /dev/null +++ b/pkg/controller/logstorage/dashboards/dashboards_controller.go @@ -0,0 +1,376 @@ +// Copyright (c) 2024 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dashboards + +import ( + "context" + "fmt" + "net/url" + + esv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/elasticsearch/v1" + v3 "github.com/tigera/api/pkg/apis/projectcalico/v3" + "github.com/tigera/operator/pkg/common" + "github.com/tigera/operator/pkg/controller/certificatemanager" + "github.com/tigera/operator/pkg/controller/utils/imageset" + "github.com/tigera/operator/pkg/render/logstorage" + "github.com/tigera/operator/pkg/render/logstorage/dashboards" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + operatorv1 "github.com/tigera/operator/api/v1" + "github.com/tigera/operator/pkg/controller/options" + "github.com/tigera/operator/pkg/controller/status" + "github.com/tigera/operator/pkg/controller/utils" + "github.com/tigera/operator/pkg/render" + "github.com/tigera/operator/pkg/render/common/networkpolicy" + "github.com/tigera/operator/pkg/tls/certificatemanagement" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +var log = logf.Log.WithName("controller_logstorage_dashboards") + +const ( + tigeraStatusName = "log-storage-dashboards" +) + +type DashboardsSubController struct { + client client.Client + scheme *runtime.Scheme + status status.StatusManager + provider operatorv1.Provider + clusterDomain string + usePSP bool + multiTenant bool + elasticExternal bool + tierWatchReady *utils.ReadyFlag +} + +func Add(mgr manager.Manager, opts options.AddOptions) error { + if !opts.EnterpriseCRDExists { + return nil + } + + r := &DashboardsSubController{ + client: mgr.GetClient(), + scheme: mgr.GetScheme(), + status: status.New(mgr.GetClient(), tigeraStatusName, opts.KubernetesVersion), + clusterDomain: opts.ClusterDomain, + provider: opts.DetectedProvider, + tierWatchReady: &utils.ReadyFlag{}, + } + r.status.Run(opts.ShutdownContext) + + // Create a controller using the reconciler and register it with the manager to receive reconcile calls. + c, err := controller.New("log-storage-dashboards-controller", mgr, controller.Options{Reconciler: r}) + if err != nil { + return err + } + + // Determine how to handle watch events for cluster-scoped resources. For multi-tenant clusters, + // we should update all tenants whenever one changes. For single-tenant clusters, we can just queue the object. + var eventHandler handler.EventHandler = &handler.EnqueueRequestForObject{} + if opts.MultiTenant { + eventHandler = utils.EnqueueAllTenants(mgr.GetClient()) + } + + // Configure watches for operator.tigera.io APIs this controller cares about. + if err = c.Watch(&source.Kind{Type: &operatorv1.LogStorage{}}, eventHandler); err != nil { + return fmt.Errorf("log-storage-dashboards-controller failed to watch LogStorage resource: %w", err) + } + if err = c.Watch(&source.Kind{Type: &operatorv1.Installation{}}, eventHandler); err != nil { + return fmt.Errorf("log-storage-dashboards-controller failed to watch Installation resource: %w", err) + } + if err = c.Watch(&source.Kind{Type: &operatorv1.ManagementClusterConnection{}}, eventHandler); err != nil { + return fmt.Errorf("log-storage-dashboards-controller failed to watch ManagementClusterConnection resource: %w", err) + } + if err = utils.AddTigeraStatusWatch(c, "log-storage-dashboards"); err != nil { + return fmt.Errorf("logstorage-dashboards-controller failed to watch logstorage Tigerastatus: %w", err) + } + if opts.MultiTenant { + if err = c.Watch(&source.Kind{Type: &operatorv1.Tenant{}}, &handler.EnqueueRequestForObject{}); err != nil { + return fmt.Errorf("log-storage-dashboards-controller failed to watch Tenant resource: %w", err) + } + } + + // The namespace(s) we need to monitor depend upon what tenancy mode we're running in. + // For single-tenant, everything is installed in the tigera-manager namespace. + // Make a helper for determining which namespaces to use based on tenancy mode. + helper := utils.NewNamespaceHelper(opts.MultiTenant, render.ElasticsearchNamespace, "") + + // Watch secrets this controller cares about. + secretsToWatch := []string{ + dashboards.ElasticCredentialsSecret, + } + + // Determine namespaces to watch. + namespacesToWatch := []string{helper.TruthNamespace(), helper.InstallNamespace()} + if helper.TruthNamespace() == helper.InstallNamespace() { + namespacesToWatch = []string{helper.InstallNamespace()} + } + for _, ns := range namespacesToWatch { + for _, name := range secretsToWatch { + if err := utils.AddSecretsWatch(c, name, ns); err != nil { + return fmt.Errorf("log-storage-dashboards-controller failed to watch Secret: %w", err) + } + } + } + + // Catch if something modifies the resources that this controller consumes. + if err := utils.AddServiceWatch(c, render.KibanaServiceName, helper.InstallNamespace()); err != nil { + return fmt.Errorf("log-storage-dashboards-controller failed to watch the Service resource: %w", err) + } + if err := utils.AddConfigMapWatch(c, certificatemanagement.TrustedCertConfigMapName, helper.InstallNamespace(), &handler.EnqueueRequestForObject{}); err != nil { + return fmt.Errorf("log-storage-dashboards-controller failed to watch the Service resource: %w", err) + } + + // Check if something modifies resources this controller creates. + err = c.Watch(&source.Kind{Type: &batchv1.Job{ObjectMeta: metav1.ObjectMeta{ + Namespace: helper.InstallNamespace(), + Name: dashboards.Name, + }}}, &handler.EnqueueRequestForObject{}) + if err != nil { + return fmt.Errorf("log-storage-dashboards-controller failed to watch installer job: %v", err) + } + + k8sClient, err := kubernetes.NewForConfig(mgr.GetConfig()) + if err != nil { + return fmt.Errorf("log-storage-dashboards-controller failed to establish a connection to k8s: %w", err) + } + + go utils.WaitToAddTierWatch(networkpolicy.TigeraComponentTierName, c, k8sClient, log, r.tierWatchReady) + go utils.WaitToAddNetworkPolicyWatches(c, k8sClient, log, []types.NamespacedName{ + {Name: dashboards.PolicyName, Namespace: helper.InstallNamespace()}, + }) + + // Perform periodic reconciliation. This acts as a backstop to catch reconcile issues, + // and also makes sure we spot when things change that might not trigger a reconciliation. + err = utils.AddPeriodicReconcile(c, utils.PeriodicReconcileTime, eventHandler) + if err != nil { + return fmt.Errorf("log-storage-dashboards-controller failed to create periodic reconcile watch: %w", err) + } + + return nil +} + +func (d DashboardsSubController) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + helper := utils.NewNamespaceHelper(d.multiTenant, render.ElasticsearchNamespace, request.Namespace) + reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name, "installNS", helper.InstallNamespace(), "truthNS", helper.TruthNamespace()) + reqLogger.Info("Reconciling LogStorage - Dashboards") + + // We skip requests without a namespace specified in multi-tenant setups. + if d.multiTenant && request.Namespace == "" { + return reconcile.Result{}, nil + } + + // When running in multi-tenant mode, we need to install Dashboards in tenant Namespaces. + // We use the tenant API to determine the set of namespaces that should have a K8S job that installs dashboards. + tenant, _, err := utils.GetTenant(ctx, d.multiTenant, d.client, request.Namespace) + if errors.IsNotFound(err) { + reqLogger.Info("No Tenant in this Namespace, skip") + return reconcile.Result{}, nil + } else if err != nil { + d.status.SetDegraded(operatorv1.ResourceReadError, "An error occurred while querying Tenant", err, reqLogger) + return reconcile.Result{}, err + } + + // Get Installation resource. + variant, install, err := utils.GetInstallation(context.Background(), d.client) + if err != nil { + if errors.IsNotFound(err) { + d.status.SetDegraded(operatorv1.ResourceNotFound, "Installation not found", err, reqLogger) + return reconcile.Result{}, err + } + d.status.SetDegraded(operatorv1.ResourceReadError, "An error occurred while querying Installation", err, reqLogger) + return reconcile.Result{}, err + } + + // Validate that the tier watch is ready before querying the tier to ensure we utilize the cache. + if !d.tierWatchReady.IsReady() { + d.status.SetDegraded(operatorv1.ResourceNotReady, "Waiting for Tier watch to be established", nil, reqLogger) + return reconcile.Result{RequeueAfter: utils.StandardRetry}, nil + } + + // Ensure the allow-tigera tier exists, before rendering any network policies within it. + if err := d.client.Get(ctx, client.ObjectKey{Name: networkpolicy.TigeraComponentTierName}, &v3.Tier{}); err != nil { + if errors.IsNotFound(err) { + d.status.SetDegraded(operatorv1.ResourceNotReady, "Waiting for allow-tigera tier to be created", err, reqLogger) + return reconcile.Result{RequeueAfter: utils.StandardRetry}, nil + } else { + d.status.SetDegraded(operatorv1.ResourceReadError, "Error querying allow-tigera tier", err, reqLogger) + return reconcile.Result{}, err + } + } + + managementClusterConnection, err := utils.GetManagementClusterConnection(ctx, d.client) + if err != nil { + d.status.SetDegraded(operatorv1.ResourceReadError, "Error reading ManagementClusterConnection", err, reqLogger) + return reconcile.Result{}, err + } + if managementClusterConnection != nil { + // Dashboard job installer is only relevant for management and standalone clusters. If this is a managed cluster, we can + // simply return early. + reqLogger.V(1).Info("Not installing dashboard job installer on managed cluster") + return reconcile.Result{}, nil + } + + pullSecrets, err := utils.GetNetworkingPullSecrets(install, d.client) + if err != nil { + d.status.SetDegraded(operatorv1.ResourceReadError, "An error occurring while retrieving the pull secrets", err, reqLogger) + return reconcile.Result{}, err + } + + // Get LogStorage resource. + logStorage := &operatorv1.LogStorage{} + key := utils.DefaultTSEEInstanceKey + err = d.client.Get(ctx, key, logStorage) + if err != nil { + if errors.IsNotFound(err) { + d.status.OnCRNotFound() + return reconcile.Result{}, nil + } + d.status.SetDegraded(operatorv1.ResourceReadError, "An error occurred while querying LogStorage", err, reqLogger) + return reconcile.Result{}, err + } + + // Determine where to access Kibana. + kibanaHost := "tigera-secure-kb-http.tigera-kibana.svc" + kibanaPort := "5601" + kibanaScheme := "https" + + var externalKibanaSecret *corev1.Secret + if !d.elasticExternal { + // Wait for Elasticsearch to be installed and available. + elasticsearch, err := utils.GetElasticsearch(ctx, d.client) + if err != nil { + d.status.SetDegraded(operatorv1.ResourceReadError, "An error occurred trying to retrieve Elasticsearch", err, reqLogger) + return reconcile.Result{}, err + } + if elasticsearch == nil || elasticsearch.Status.Phase != esv1.ElasticsearchReadyPhase { + d.status.SetDegraded(operatorv1.ResourceNotReady, "Waiting for Elasticsearch cluster to be operational", nil, reqLogger) + return reconcile.Result{RequeueAfter: utils.StandardRetry}, nil + } + } else { + // If we're using an external ES and Kibana, the Tenant resource must specify the Kibana endpoint. + if tenant == nil || tenant.Spec.Elastic == nil || tenant.Spec.Elastic.KibanaURL == "" { + reqLogger.Error(nil, "Kibana URL must be specified for this tenant") + d.status.SetDegraded(operatorv1.ResourceValidationError, "Kibana URL must be specified for this tenant", nil, reqLogger) + return reconcile.Result{}, nil + } + + // Determine the host and port from the URL. + url, err := url.Parse(tenant.Spec.Elastic.KibanaURL) + if err != nil { + reqLogger.Error(err, "Kibana URL is invalid") + d.status.SetDegraded(operatorv1.ResourceValidationError, "Kibana URL is invalid", err, reqLogger) + return reconcile.Result{}, nil + } + kibanaScheme = url.Scheme + kibanaHost = url.Hostname() + kibanaPort = url.Port() + + if tenant.ElasticMTLS() { + // If mTLS is enabled, get the secret containing the CA and client certificate. + externalKibanaSecret = &corev1.Secret{} + err = d.client.Get(ctx, client.ObjectKey{Name: logstorage.ExternalCertsSecret, Namespace: common.OperatorNamespace()}, externalKibanaSecret) + if err != nil { + reqLogger.Error(err, "Failed to read external Kibana client certificate secret") + d.status.SetDegraded(operatorv1.ResourceReadError, "Waiting for external Kibana client certificate secret to be available", err, reqLogger) + return reconcile.Result{}, err + } + } + } + + // Query the username and password this Dashboards Installer instance should use to authenticate with Elasticsearch. + // ALINA : To check the statement below + // For multi-tenant systems, credentials are created by the elasticsearch users controller. + // For single-tenant system, these are created by es-kube-controllers. + key = types.NamespacedName{Name: dashboards.ElasticCredentialsSecret, Namespace: helper.InstallNamespace()} + credentials := corev1.Secret{} + if err = d.client.Get(ctx, key, &credentials); err != nil && !errors.IsNotFound(err) { + d.status.SetDegraded(operatorv1.ResourceReadError, fmt.Sprintf("Error getting Secret %s", key), err, reqLogger) + return reconcile.Result{}, err + } else if errors.IsNotFound(err) { + d.status.SetDegraded(operatorv1.ResourceNotFound, fmt.Sprintf("Waiting for Dashboards credential Secret %s", key), err, reqLogger) + return reconcile.Result{RequeueAfter: utils.StandardRetry}, nil + } + + // Collect the certificates we need to provision Dashboards. These will have been provisioned already by the ES secrets controller. + opts := []certificatemanager.Option{ + certificatemanager.WithLogger(reqLogger), + certificatemanager.WithTenant(tenant), + } + cm, err := certificatemanager.Create(d.client, install, d.clusterDomain, helper.TruthNamespace(), opts...) + if err != nil { + d.status.SetDegraded(operatorv1.ResourceCreateError, "Unable to create the Tigera CA", err, reqLogger) + return reconcile.Result{}, err + } + + // Query the trusted bundle from the namespace. + trustedBundle, err := cm.LoadTrustedBundle(ctx, d.client, helper.InstallNamespace()) + if err != nil { + d.status.SetDegraded(operatorv1.ResourceReadError, "Error getting trusted bundle", err, reqLogger) + return reconcile.Result{}, err + } + + cfg := &dashboards.Config{ + Installation: install, + PullSecrets: pullSecrets, + Namespace: helper.InstallNamespace(), + TrustedBundle: trustedBundle, + ClusterDomain: d.clusterDomain, + UsePSP: d.usePSP, + IsManaged: managementClusterConnection != nil, + Tenant: tenant, + KibanaHost: kibanaHost, + KibanaScheme: kibanaScheme, + KibanaPort: kibanaPort, + ExternalKibanaClientSecret: externalKibanaSecret, + Credentials: []*corev1.Secret{&credentials}, + } + dashboardsComponent := dashboards.Dashboards(cfg) + + if err := imageset.ApplyImageSet(ctx, d.client, variant, dashboardsComponent); err != nil { + d.status.SetDegraded(operatorv1.ResourceUpdateError, "Error with images from ImageSet", err, reqLogger) + return reconcile.Result{}, err + } + + // In standard installs, the LogStorage owns the dashboards. For multi-tenant, it's owned by the Tenant instance. + var hdler utils.ComponentHandler + if d.multiTenant { + hdler = utils.NewComponentHandler(reqLogger, d.client, d.scheme, tenant) + } else { + hdler = utils.NewComponentHandler(reqLogger, d.client, d.scheme, logStorage) + } + if err := hdler.CreateOrUpdateOrDelete(ctx, dashboardsComponent, d.status); err != nil { + d.status.SetDegraded(operatorv1.ResourceUpdateError, "Error creating / updating / deleting resource", err, reqLogger) + return reconcile.Result{}, err + } + + d.status.ReadyToMonitor() + d.status.ClearDegraded() + + return reconcile.Result{}, nil +} diff --git a/pkg/controller/logstorage/dashboards/dashboards_controller_test.go b/pkg/controller/logstorage/dashboards/dashboards_controller_test.go new file mode 100644 index 0000000000..8b71ef796e --- /dev/null +++ b/pkg/controller/logstorage/dashboards/dashboards_controller_test.go @@ -0,0 +1,511 @@ +// Copyright (c) 2024 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dashboards + +import ( + "context" + "fmt" + + "github.com/tigera/operator/pkg/render/logstorage/dashboards" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/stretchr/testify/mock" + + esv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/elasticsearch/v1" + v3 "github.com/tigera/api/pkg/apis/projectcalico/v3" + operatorv1 "github.com/tigera/operator/api/v1" + "github.com/tigera/operator/test" + admissionv1beta1 "k8s.io/api/admissionregistration/v1beta1" + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + storagev1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + "github.com/tigera/operator/pkg/apis" + "github.com/tigera/operator/pkg/common" + "github.com/tigera/operator/pkg/components" + "github.com/tigera/operator/pkg/controller/certificatemanager" + "github.com/tigera/operator/pkg/controller/logstorage/initializer" + "github.com/tigera/operator/pkg/controller/options" + "github.com/tigera/operator/pkg/controller/status" + "github.com/tigera/operator/pkg/controller/utils" + "github.com/tigera/operator/pkg/dns" + "github.com/tigera/operator/pkg/render" + "github.com/tigera/operator/pkg/render/logstorage" + "github.com/tigera/operator/pkg/tls/certificatemanagement" + + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var successResult = reconcile.Result{} + +func NewDashboardsControllerWithShims( + cli client.Client, + scheme *runtime.Scheme, + status status.StatusManager, + provider operatorv1.Provider, + clusterDomain string, + multiTenant bool, + externalElastic bool, +) (*DashboardsSubController, error) { + opts := options.AddOptions{ + DetectedProvider: provider, + ClusterDomain: clusterDomain, + ShutdownContext: context.TODO(), + MultiTenant: multiTenant, + ElasticExternal: externalElastic, + } + + r := &DashboardsSubController{ + client: cli, + scheme: scheme, + status: status, + clusterDomain: opts.ClusterDomain, + multiTenant: opts.MultiTenant, + elasticExternal: opts.ElasticExternal, + tierWatchReady: &utils.ReadyFlag{}, + } + r.tierWatchReady.MarkAsReady() + r.status.Run(opts.ShutdownContext) + return r, nil +} + +var _ = Describe("LogStorage Dashboards controller", func() { + var ( + cli client.Client + scheme *runtime.Scheme + ctx context.Context + install *operatorv1.Installation + mockStatus *status.MockStatus + r *DashboardsSubController + ) + + BeforeEach(func() { + // This BeforeEach contains common preparation for all tests - both single-tenant and multi-tenant. + // Any test-specific preparation should be done in subsequen BeforeEach blocks in the Contexts below. + scheme = runtime.NewScheme() + Expect(apis.AddToScheme(scheme)).ShouldNot(HaveOccurred()) + Expect(storagev1.SchemeBuilder.AddToScheme(scheme)).ShouldNot(HaveOccurred()) + Expect(appsv1.SchemeBuilder.AddToScheme(scheme)).ShouldNot(HaveOccurred()) + Expect(rbacv1.SchemeBuilder.AddToScheme(scheme)).ShouldNot(HaveOccurred()) + Expect(batchv1.SchemeBuilder.AddToScheme(scheme)).ShouldNot(HaveOccurred()) + Expect(admissionv1beta1.SchemeBuilder.AddToScheme(scheme)).ShouldNot(HaveOccurred()) + ctx = context.Background() + cli = fake.NewClientBuilder().WithScheme(scheme).Build() + + // Create a basic Installation. + var replicas int32 = 2 + install = &operatorv1.Installation{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + }, + Status: operatorv1.InstallationStatus{ + Variant: operatorv1.TigeraSecureEnterprise, + Computed: &operatorv1.InstallationSpec{}, + }, + Spec: operatorv1.InstallationSpec{ + ControlPlaneReplicas: &replicas, + Variant: operatorv1.TigeraSecureEnterprise, + Registry: "some.registry.org/", + }, + } + Expect(cli.Create(ctx, install)).ShouldNot(HaveOccurred()) + + // Create a basic LogStorage. + ls := &operatorv1.LogStorage{} + ls.Name = "tigera-secure" + ls.Status.State = operatorv1.TigeraStatusReady + initializer.FillDefaults(ls) + Expect(cli.Create(ctx, ls)).ShouldNot(HaveOccurred()) + + // Create a basic Elasticsearch instance. + es := &esv1.Elasticsearch{} + es.Name = "tigera-secure" + es.Namespace = render.ElasticsearchNamespace + es.Status.Phase = esv1.ElasticsearchReadyPhase + Expect(cli.Create(ctx, es)).ShouldNot(HaveOccurred()) + + // Create the allow-tigera Tier, since the controller blocks on its existence. + tier := &v3.Tier{ObjectMeta: metav1.ObjectMeta{Name: "allow-tigera"}} + Expect(cli.Create(ctx, tier)).ShouldNot(HaveOccurred()) + }) + + Context("Zero tenant", func() { + BeforeEach(func() { + mockStatus = &status.MockStatus{} + mockStatus.On("Run").Return() + mockStatus.On("AddDaemonsets", mock.Anything) + mockStatus.On("AddDeployments", mock.Anything) + mockStatus.On("AddStatefulSets", mock.Anything) + mockStatus.On("RemoveCertificateSigningRequests", mock.Anything).Return() + mockStatus.On("AddCronJobs", mock.Anything) + mockStatus.On("OnCRFound").Return() + mockStatus.On("ReadyToMonitor") + mockStatus.On("SetDegraded", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + mockStatus.On("ClearDegraded") + + // Create a CA secret for the test, and create its KeyPair. + cm, err := certificatemanager.Create(cli, &install.Spec, dns.DefaultClusterDomain, common.OperatorNamespace(), certificatemanager.AllowCACreation()) + Expect(err).ShouldNot(HaveOccurred()) + Expect(cli.Create(ctx, cm.KeyPair().Secret(common.OperatorNamespace()))).ShouldNot(HaveOccurred()) + + // Create secrets needed for successful installation. + bundle := cm.CreateTrustedBundle() + Expect(cli.Create(ctx, bundle.ConfigMap(render.ElasticsearchNamespace))).ShouldNot(HaveOccurred()) + + // Create the ES user secret. Generally this is created by either es-kube-controllers or the user controller in this operator. + userSecret := &corev1.Secret{} + userSecret.Name = dashboards.ElasticCredentialsSecret + userSecret.Namespace = render.ElasticsearchNamespace + userSecret.Data = map[string][]byte{"username": []byte("test-username"), "password": []byte("test-password")} + Expect(cli.Create(ctx, userSecret)).ShouldNot(HaveOccurred()) + + // Create the reconciler for the tests. + r, err = NewDashboardsControllerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, dns.DefaultClusterDomain, false, false) + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("should wait for the cluster CA to be provisioned", func() { + // Delete the CA secret for this test. + caSecret := &corev1.Secret{} + caSecret.Name = certificatemanagement.CASecretName + caSecret.Namespace = common.OperatorNamespace() + Expect(cli.Delete(ctx, caSecret)).ShouldNot(HaveOccurred()) + + // Run the reconciler. + _, err := r.Reconcile(ctx, reconcile.Request{}) + Expect(err).Should(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("CA secret")) + }) + + It("should reconcile resources for a standalone cluster/management cluster", func() { + // Run the reconciler. + result, err := r.Reconcile(ctx, reconcile.Request{}) + Expect(err).ShouldNot(HaveOccurred()) + Expect(result).Should(Equal(successResult)) + + // Check that K8s Job was created as expected. We don't need to check every resource in detail, since + // the render package has its own tests which cover this in more detail. + dashboardJob := batchv1.Job{ + TypeMeta: metav1.TypeMeta{Kind: "Job", APIVersion: "batch/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: dashboards.Name, + Namespace: render.ElasticsearchNamespace, + }, + } + Expect(test.GetResource(cli, &dashboardJob)).To(BeNil()) + }) + + It("should not reconcile resources for a managed cluster", func() { + managementClusterConnection := &operatorv1.ManagementClusterConnection{ + ObjectMeta: metav1.ObjectMeta{ + Name: "tigera-secure", + }, + } + Expect(cli.Create(ctx, managementClusterConnection)).ShouldNot(HaveOccurred()) + + // Run the reconciler. + result, err := r.Reconcile(ctx, reconcile.Request{}) + Expect(err).ShouldNot(HaveOccurred()) + Expect(result).Should(Equal(successResult)) + + // Check that K8s Job was not created + dashboardJob := batchv1.Job{ + TypeMeta: metav1.TypeMeta{Kind: "Job", APIVersion: "batch/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: dashboards.Name, + Namespace: render.ElasticsearchNamespace, + }, + } + Expect(test.GetResource(cli, &dashboardJob)).To(HaveOccurred()) + }) + + It("should use images from ImageSet", func() { + Expect(cli.Create(ctx, &operatorv1.ImageSet{ + ObjectMeta: metav1.ObjectMeta{Name: "enterprise-" + components.EnterpriseRelease}, + Spec: operatorv1.ImageSetSpec{ + Images: []operatorv1.Image{ + {Image: "tigera/intrusion-detection-job-installer", Digest: "sha256:dashboardhash"}, + {Image: "tigera/key-cert-provisioner", Digest: "sha256:deadbeef0123456789"}, + }, + }, + })).ToNot(HaveOccurred()) + + // Reconcile the resources + result, err := r.Reconcile(ctx, reconcile.Request{}) + Expect(err).ShouldNot(HaveOccurred()) + Expect(result).Should(Equal(successResult)) + + dashboardJob := batchv1.Job{ + TypeMeta: metav1.TypeMeta{Kind: "Job", APIVersion: "batch/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: dashboards.Name, + Namespace: render.ElasticsearchNamespace, + }, + } + Expect(test.GetResource(cli, &dashboardJob)).To(BeNil()) + dashboardInstaller := test.GetContainer(dashboardJob.Spec.Template.Spec.Containers, dashboards.Name) + Expect(dashboardInstaller).ToNot(BeNil()) + Expect(dashboardInstaller.Image).To(Equal(fmt.Sprintf("some.registry.org/%s@%s", components.ComponentElasticTseeInstaller.Image, "sha256:dashboardhash"))) + }) + }) + + Context("Multi-tenant", func() { + var tenantNS string + var tenant *operatorv1.Tenant + + BeforeEach(func() { + // Create the tenant Namespace. + tenantNS = "tenant-namespace" + Expect(cli.Create(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: tenantNS}})).ShouldNot(HaveOccurred()) + + // Create the Tenant object. + tenant = &operatorv1.Tenant{} + tenant.Name = "default" + tenant.Namespace = tenantNS + tenant.Spec.ID = "test-tenant-id" + tenant.Spec.Indices = []operatorv1.Index{ + {BaseIndexName: "calico_alerts", DataType: operatorv1.DataTypeAlerts}, + {BaseIndexName: "calico_auditlogs", DataType: operatorv1.DataTypeAuditLogs}, + {BaseIndexName: "calico_bgplogs", DataType: operatorv1.DataTypeBGPLogs}, + {BaseIndexName: "calico_compliance_benchmarks", DataType: operatorv1.DataTypeComplianceBenchmarks}, + {BaseIndexName: "calico_compliance_reports", DataType: operatorv1.DataTypeComplianceReports}, + {BaseIndexName: "calico_compliance_snapshots", DataType: operatorv1.DataTypeComplianceSnapshots}, + {BaseIndexName: "calico_dnslogs", DataType: operatorv1.DataTypeDNSLogs}, + {BaseIndexName: "calico_flowlogs", DataType: operatorv1.DataTypeFlowLogs}, + {BaseIndexName: "calico_L7logs", DataType: operatorv1.DataTypeL7Logs}, + {BaseIndexName: "calico_runtime_reports", DataType: operatorv1.DataTypeRuntimeReports}, + {BaseIndexName: "calico_threat_feeds_domain_name_set", DataType: operatorv1.DataTypeThreatFeedsDomainSet}, + {BaseIndexName: "calico_threat_feeds_ip_set", DataType: operatorv1.DataTypeThreatFeedsIPSet}, + {BaseIndexName: "calico_waf", DataType: operatorv1.DataTypeWAFLogs}, + } + Expect(cli.Create(ctx, tenant)).ShouldNot(HaveOccurred()) + + mockStatus = &status.MockStatus{} + mockStatus.On("Run").Return() + mockStatus.On("AddDaemonsets", mock.Anything) + mockStatus.On("AddDeployments", mock.Anything) + mockStatus.On("AddStatefulSets", mock.Anything) + mockStatus.On("RemoveCertificateSigningRequests", mock.Anything).Return() + mockStatus.On("AddCronJobs", mock.Anything) + mockStatus.On("OnCRFound").Return() + mockStatus.On("ReadyToMonitor") + mockStatus.On("SetDegraded", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + mockStatus.On("ClearDegraded") + + // Create a CA secret for the test, and create its KeyPair. + opts := []certificatemanager.Option{ + certificatemanager.AllowCACreation(), + certificatemanager.WithTenant(tenant), + } + cm, err := certificatemanager.Create(cli, &install.Spec, dns.DefaultClusterDomain, tenantNS, opts...) + Expect(err).ShouldNot(HaveOccurred()) + Expect(cli.Create(ctx, cm.KeyPair().Secret(tenantNS))).ShouldNot(HaveOccurred()) + + // Create secrets needed for successful installation. + bundle := cm.CreateTrustedBundle() + Expect(cli.Create(ctx, bundle.ConfigMap(tenantNS))).ShouldNot(HaveOccurred()) + + // Create the ES user secret. Generally this is created by either es-kube-controllers or the user controller in this operator. + userSecret := &corev1.Secret{} + userSecret.Name = dashboards.ElasticCredentialsSecret + userSecret.Namespace = tenantNS + userSecret.Data = map[string][]byte{"username": []byte("test-username"), "password": []byte("test-password")} + Expect(cli.Create(ctx, userSecret)).ShouldNot(HaveOccurred()) + + // Create the reconciler for the test. + r, err = NewDashboardsControllerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, dns.DefaultClusterDomain, true, false) + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("should wait for the tenant CA to be provisioned", func() { + // Delete the CA secret for this test. + caSecret := &corev1.Secret{} + caSecret.Name = certificatemanagement.TenantCASecretName + caSecret.Namespace = tenantNS + Expect(cli.Delete(ctx, caSecret)).ShouldNot(HaveOccurred()) + + // Run the reconciler. + _, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Name: "default", Namespace: tenantNS}}) + Expect(err).Should(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("CA secret")) + }) + + It("should not reconcile any resources if no Namespace was given", func() { + // Run the reconciler, passing in a Request with no Namespace. It should return successfully. + _, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Name: "default"}}) + Expect(err).ShouldNot(HaveOccurred()) + + // Check that nothing was installed on the cluster. + dashboardJob := batchv1.Job{ + TypeMeta: metav1.TypeMeta{Kind: "Job", APIVersion: "batch/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: dashboards.Name, + Namespace: render.ElasticsearchNamespace, + }, + } + err = cli.Get(ctx, types.NamespacedName{Name: dashboardJob.Name, Namespace: dashboardJob.Namespace}, &dashboardJob) + Expect(err).Should(HaveOccurred()) + Expect(errors.IsNotFound(err)).Should(BeTrue()) + + // Check that OnCRFound was not called. + mockStatus.AssertNotCalled(GinkgoT(), "OnCRFound") + }) + + It("should reconcile resources for a tenant", func() { + // Run the reconciler. + result, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Name: "default", Namespace: tenantNS}}) + Expect(err).ShouldNot(HaveOccurred()) + Expect(result).Should(Equal(successResult)) + + // Check that Dashboards was created as expected. We don't need to check every resource in detail, since + // the render package has its own tests which cover this in more detail. + dashboardJob := batchv1.Job{ + TypeMeta: metav1.TypeMeta{Kind: "Job", APIVersion: "batch/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: dashboards.Name, + Namespace: tenantNS, + }, + } + Expect(test.GetResource(cli, &dashboardJob)).To(BeNil()) + }) + + It("should use images from ImageSet", func() { + Expect(cli.Create(ctx, &operatorv1.ImageSet{ + ObjectMeta: metav1.ObjectMeta{Name: "enterprise-" + components.EnterpriseRelease}, + Spec: operatorv1.ImageSetSpec{ + Images: []operatorv1.Image{ + {Image: "tigera/intrusion-detection-job-installer", Digest: "sha256:dashboardhash"}, + {Image: "tigera/key-cert-provisioner", Digest: "sha256:deadbeef0123456789"}, + }, + }, + })).ToNot(HaveOccurred()) + + // Run the reconciler. + result, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Name: "default", Namespace: tenantNS}}) + Expect(err).ShouldNot(HaveOccurred()) + Expect(result).Should(Equal(successResult)) + + dashboardJob := batchv1.Job{ + TypeMeta: metav1.TypeMeta{Kind: "Job", APIVersion: "batch/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: dashboards.Name, + Namespace: tenantNS, + }, + } + Expect(test.GetResource(cli, &dashboardJob)).To(BeNil()) + job := test.GetContainer(dashboardJob.Spec.Template.Spec.Containers, dashboards.Name) + Expect(job).ToNot(BeNil()) + Expect(job.Image).To(Equal(fmt.Sprintf("some.registry.org/%s@%s", components.ComponentElasticTseeInstaller.Image, "sha256:dashboardhash"))) + }) + + Context("External Kibana mode", func() { + BeforeEach(func() { + // Delete the Elasticsearch instance, since this is only used for ECK mode. + es := &esv1.Elasticsearch{} + es.Name = "tigera-secure" + es.Namespace = render.ElasticsearchNamespace + Expect(cli.Delete(ctx, es)).ShouldNot(HaveOccurred()) + + // Set the reconcile to run in external ES mode. + r.elasticExternal = true + r.multiTenant = true + + // Set the elasticsearch configuration for the tenant. + tenant.Spec.Elastic = &operatorv1.TenantElasticSpec{KibanaURL: "https://external-kibana:5601"} + Expect(cli.Update(ctx, tenant)).ShouldNot(HaveOccurred()) + }) + + It("should reconcile resources", func() { + // Run the reconciler. + result, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Name: tenant.Name, Namespace: tenant.Namespace}}) + Expect(err).ShouldNot(HaveOccurred()) + Expect(result).Should(Equal(successResult)) + + // SetDegraded should not have been called. + mockStatus.AssertNumberOfCalls(GinkgoT(), "SetDegraded", 0) + + // Check that Dashboards was created as expected. We don't need to check every resource in detail, since + // the render package has its own tests which cover this in more detail. + dashboardJob := batchv1.Job{ + TypeMeta: metav1.TypeMeta{Kind: "Job", APIVersion: "batch/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: dashboards.Name, + Namespace: tenantNS, + }, + } + Expect(test.GetResource(cli, &dashboardJob)).To(BeNil()) + + // Check that the correct External ES environment variables are set. + dashboards := test.GetContainer(dashboardJob.Spec.Template.Spec.Containers, dashboards.Name) + Expect(dashboards).ToNot(BeNil()) + Expect(dashboards.Env).To(ContainElement(corev1.EnvVar{Name: "KIBANA_SCHEME", Value: "https"})) + Expect(dashboards.Env).To(ContainElement(corev1.EnvVar{Name: "KIBANA_HOST", Value: "external-kibana"})) + Expect(dashboards.Env).To(ContainElement(corev1.EnvVar{Name: "KIBANA_PORT", Value: "5601"})) + }) + + It("should reconcile with mTLS enabled", func() { + // Update the tenant with mTLS + tenant.Spec.Elastic.MutualTLS = true + tenant.Spec.Elastic.KibanaURL = "https://external-kibana:5601" + Expect(cli.Update(ctx, tenant)).ShouldNot(HaveOccurred()) + + // Create a dummy secret mocking the client certificates. + esClientSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: logstorage.ExternalCertsSecret, Namespace: common.OperatorNamespace()}, + Data: map[string][]byte{"client.crt": []byte("cert"), "client.key": []byte("key")}, + } + Expect(cli.Create(ctx, esClientSecret)).ShouldNot(HaveOccurred()) + + // Run the reconciler. + result, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Name: tenant.Name, Namespace: tenant.Namespace}}) + Expect(err).NotTo(HaveOccurred()) + Expect(result).Should(Equal(successResult)) + + // SetDegraded should not have been called. + mockStatus.AssertNumberOfCalls(GinkgoT(), "SetDegraded", 0) + + // Check that Dashboards was created as expected. We don't need to check every resource in detail, since + // the render package has its own tests which cover this in more detail. + dashboardJob := batchv1.Job{ + TypeMeta: metav1.TypeMeta{Kind: "Job", APIVersion: "batch/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: dashboards.Name, + Namespace: tenantNS, + }, + } + Expect(test.GetResource(cli, &dashboardJob)).To(BeNil()) + + // Expect the correct volume and mounts to be present. + job := test.GetContainer(dashboardJob.Spec.Template.Spec.Containers, dashboards.Name) + Expect(job).ToNot(BeNil()) + Expect(job.VolumeMounts).To(ContainElement(corev1.VolumeMount{ + Name: "tigera-secure-external-es-certs", + MountPath: "/certs/kibana/mtls", + ReadOnly: true, + })) + }) + }) + }) +}) diff --git a/pkg/controller/logstorage/dashboards/dashboards_suite_test.go b/pkg/controller/logstorage/dashboards/dashboards_suite_test.go new file mode 100644 index 0000000000..db60801bcd --- /dev/null +++ b/pkg/controller/logstorage/dashboards/dashboards_suite_test.go @@ -0,0 +1,34 @@ +// Copyright (c) 2024 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dashboards + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/onsi/ginkgo/reporters" + uzap "go.uber.org/zap" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestStatus(t *testing.T) { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true), zap.Level(uzap.NewAtomicLevelAt(uzap.DebugLevel)))) + RegisterFailHandler(Fail) + junitReporter := reporters.NewJUnitReporter("../../../report/ut/logstorage_dashboards_controller_suite.xml") + RunSpecsWithDefaultAndCustomReporters(t, "pkg/controller/logstorage/dashboards Suite", []Reporter{junitReporter}) +} diff --git a/pkg/render/common/networkpolicy/networkpolicy.go b/pkg/render/common/networkpolicy/networkpolicy.go index 88e947615f..a98fe05871 100644 --- a/pkg/render/common/networkpolicy/networkpolicy.go +++ b/pkg/render/common/networkpolicy/networkpolicy.go @@ -242,6 +242,14 @@ func (h *NetworkPolicyHelper) LinseedSourceEntityRule() v3.EntityRule { return CreateSourceEntityRule(h.namespace("tigera-elasticsearch"), "tigera-linseed") } +func (h *NetworkPolicyHelper) DashboardInstallerEntityRule() v3.EntityRule { + return CreateEntityRule(h.namespace("tigera-elasticsearch"), "dashboards-installer") +} + +func (h *NetworkPolicyHelper) DashboardInstallerSourceEntityRule() v3.EntityRule { + return CreateSourceEntityRule(h.namespace("tigera-elasticsearch"), "dashboards-installer") +} + func (h *NetworkPolicyHelper) LinseedServiceSelectorEntityRule() v3.EntityRule { return CreateServiceSelectorEntityRule(h.namespace("tigera-elasticsearch"), "tigera-linseed") } diff --git a/pkg/render/intrusion_detection.go b/pkg/render/intrusion_detection.go index 6a11fc8335..dd824565df 100644 --- a/pkg/render/intrusion_detection.go +++ b/pkg/render/intrusion_detection.go @@ -20,10 +20,9 @@ import ( "strings" "time" - "github.com/tigera/operator/pkg/ptr" + batchv1 "k8s.io/api/batch/v1" appsv1 "k8s.io/api/apps/v1" - batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" policyv1beta1 "k8s.io/api/policy/v1beta1" rbacv1 "k8s.io/api/rbac/v1" @@ -151,12 +150,6 @@ func (c *intrusionDetectionComponent) ResolveImages(is *operatorv1.ImageSet) err prefix := c.cfg.Installation.ImagePrefix var errMsgs []string var err error - if !c.cfg.ManagedCluster { - c.jobInstallerImage, err = components.GetReference(components.ComponentElasticTseeInstaller, reg, path, prefix, is) - if err != nil { - errMsgs = append(errMsgs, err.Error()) - } - } c.controllerImage, err = components.GetReference(components.ComponentIntrusionDetectionController, reg, path, prefix, is) if err != nil { @@ -195,7 +188,6 @@ func (c *intrusionDetectionComponent) Objects() ([]client.Object, []client.Objec objs = append(objs, c.intrusionDetectionServiceAccount(), - c.intrusionDetectionJobServiceAccount(), c.intrusionDetectionClusterRole(), c.intrusionDetectionClusterRoleBinding(), c.intrusionDetectionRole(), @@ -276,15 +268,12 @@ func (c *intrusionDetectionComponent) Objects() ([]client.Object, []client.Objec // When FIPS mode is enabled, we currently disable our python based images. if !c.cfg.ManagedCluster { idsObjs := []client.Object{ + c.intrusionDetectionJobServiceAccount(), c.intrusionDetectionElasticsearchAllowTigeraPolicy(), c.intrusionDetectionElasticsearchJob(), } - if !operatorv1.IsFIPSModeEnabled(c.cfg.Installation.FIPSMode) { - objs = append(objs, idsObjs...) - } else { - objsToDelete = append(objsToDelete, idsObjs...) - } + objsToDelete = append(objsToDelete, idsObjs...) } if c.cfg.UsePSP { @@ -317,53 +306,12 @@ func (c *intrusionDetectionComponent) Ready() bool { } func (c *intrusionDetectionComponent) intrusionDetectionElasticsearchJob() *batchv1.Job { - podTemplate := relasticsearch.DecorateAnnotations(&corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"job-name": IntrusionDetectionInstallerJobName}, - }, - Spec: corev1.PodSpec{ - Tolerations: c.cfg.Installation.ControlPlaneTolerations, - NodeSelector: c.cfg.Installation.ControlPlaneNodeSelector, - // This value needs to be set to never. The PodFailurePolicy will still ensure that this job will run until completion. - RestartPolicy: corev1.RestartPolicyNever, - ImagePullSecrets: secret.GetReferenceList(c.cfg.PullSecrets), - Containers: []corev1.Container{ - c.intrusionDetectionJobContainer(), - }, - Volumes: []corev1.Volume{c.cfg.TrustedCertBundle.Volume()}, - ServiceAccountName: IntrusionDetectionInstallerJobName, - }, - }, c.cfg.ESSecrets).(*corev1.PodTemplateSpec) - return &batchv1.Job{ TypeMeta: metav1.TypeMeta{Kind: "Job", APIVersion: "batch/v1"}, ObjectMeta: metav1.ObjectMeta{ Name: IntrusionDetectionInstallerJobName, Namespace: IntrusionDetectionNamespace, }, - Spec: batchv1.JobSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "job-name": IntrusionDetectionInstallerJobName, - }, - }, - Template: *podTemplate, - // PodFailurePolicy is not available for k8s < 1.26; setting BackoffLimit to a higher number (default is 6) - // to lessen the frequency of installation failures when responses from Elastic Search takes more time. - BackoffLimit: ptr.Int32ToPtr(30), - PodFailurePolicy: &batchv1.PodFailurePolicy{ - Rules: []batchv1.PodFailurePolicyRule{ - // We don't want the job to fail, so we keep retrying by ignoring incrementing the backoff. - { - Action: "Ignore", - OnExitCodes: &batchv1.PodFailurePolicyOnExitCodesRequirement{ - Operator: "NotIn", - Values: []int32{0}, - }, - }, - }, - }, - }, } } @@ -1196,11 +1144,6 @@ func (c *intrusionDetectionComponent) intrusionDetectionPSPClusterRoleBinding() Name: IntrusionDetectionName, Namespace: IntrusionDetectionNamespace, }, - { - Kind: "ServiceAccount", - Name: IntrusionDetectionInstallerJobName, - Namespace: IntrusionDetectionNamespace, - }, }, } } @@ -1429,27 +1372,12 @@ func (c *intrusionDetectionComponent) intrusionDetectionControllerAllowTigeraPol } func (c *intrusionDetectionComponent) intrusionDetectionElasticsearchAllowTigeraPolicy() *v3.NetworkPolicy { - egressRules := []v3.Rule{} - egressRules = networkpolicy.AppendDNSEgressRules(egressRules, c.cfg.Openshift) - egressRules = append(egressRules, v3.Rule{ - Action: v3.Allow, - Protocol: &networkpolicy.TCPProtocol, - Destination: networkpolicy.DefaultHelper().ESGatewayEntityRule(), - }) - return &v3.NetworkPolicy{ TypeMeta: metav1.TypeMeta{Kind: "NetworkPolicy", APIVersion: "projectcalico.org/v3"}, ObjectMeta: metav1.ObjectMeta{ Name: IntrusionDetectionInstallerPolicyName, Namespace: IntrusionDetectionNamespace, }, - Spec: v3.NetworkPolicySpec{ - Order: &networkpolicy.HighPrecedenceOrder, - Tier: networkpolicy.TigeraComponentTierName, - Selector: fmt.Sprintf("job-name == '%s'", IntrusionDetectionInstallerJobName), - Types: []v3.PolicyType{v3.PolicyTypeEgress}, - Egress: egressRules, - }, } } diff --git a/pkg/render/intrusion_detection_test.go b/pkg/render/intrusion_detection_test.go index e8be89a329..b4ba03378c 100644 --- a/pkg/render/intrusion_detection_test.go +++ b/pkg/render/intrusion_detection_test.go @@ -27,7 +27,6 @@ import ( . "github.com/onsi/gomega" appsv1 "k8s.io/api/apps/v1" - batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/runtime" @@ -72,8 +71,6 @@ var _ = Describe("Intrusion Detection rendering tests", func() { expectedIDPolicyForManaged := testutils.GetExpectedPolicyFromFile("testutils/expected_policies/intrusion-detection-controller_managed.json") expectedIDPolicyForUnmanagedOCP := testutils.GetExpectedPolicyFromFile("testutils/expected_policies/intrusion-detection-controller_unmanaged_ocp.json") expectedIDPolicyForManagedOCP := testutils.GetExpectedPolicyFromFile("testutils/expected_policies/intrusion-detection-controller_managed_ocp.json") - expectedIDInstallerPolicy := testutils.GetExpectedPolicyFromFile("testutils/expected_policies/intrusion-detection-elastic.json") - expectedIDInstallerPolicyForOCP := testutils.GetExpectedPolicyFromFile("testutils/expected_policies/intrusion-detection-elastic_ocp.json") BeforeEach(func() { scheme := runtime.NewScheme() @@ -120,7 +117,6 @@ var _ = Describe("Intrusion Detection rendering tests", func() { {name: "allow-tigera.intrusion-detection-controller", ns: "tigera-intrusion-detection", group: "projectcalico.org", version: "v3", kind: "NetworkPolicy"}, {name: "allow-tigera.default-deny", ns: "tigera-intrusion-detection", group: "projectcalico.org", version: "v3", kind: "NetworkPolicy"}, {name: "intrusion-detection-controller", ns: "tigera-intrusion-detection", group: "", version: "v1", kind: "ServiceAccount"}, - {name: "intrusion-detection-es-job-installer", ns: "tigera-intrusion-detection", group: "", version: "v1", kind: "ServiceAccount"}, {name: "intrusion-detection-controller", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, {name: "intrusion-detection-controller", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRoleBinding"}, {name: "intrusion-detection-controller", ns: "tigera-intrusion-detection", group: "rbac.authorization.k8s.io", version: "v1", kind: "Role"}, @@ -136,8 +132,6 @@ var _ = Describe("Intrusion Detection rendering tests", func() { {name: "network.lateral.originate", ns: "", group: "projectcalico.org", version: "v3", kind: "GlobalAlertTemplate"}, {name: "dns.servfail", ns: "", group: "projectcalico.org", version: "v3", kind: "GlobalAlertTemplate"}, {name: "dns.dos", ns: "", group: "projectcalico.org", version: "v3", kind: "GlobalAlertTemplate"}, - {name: "allow-tigera.intrusion-detection-elastic", ns: "tigera-intrusion-detection", group: "projectcalico.org", version: "v3", kind: "NetworkPolicy"}, - {name: "intrusion-detection-es-job-installer", ns: "tigera-intrusion-detection", group: "batch", version: "v1", kind: "Job"}, {name: "intrusion-detection-psp", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, {name: "intrusion-detection-psp", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRoleBinding"}, {name: "intrusion-detection", ns: "", group: "policy", version: "v1beta1", kind: "PodSecurityPolicy"}, @@ -155,7 +149,6 @@ var _ = Describe("Intrusion Detection rendering tests", func() { // Should mount ManagerTLSSecret for non-managed clusters idc := rtest.GetResource(resources, "intrusion-detection-controller", render.IntrusionDetectionNamespace, "apps", "v1", "Deployment").(*appsv1.Deployment) - idji := rtest.GetResource(resources, "intrusion-detection-es-job-installer", render.IntrusionDetectionNamespace, "batch", "v1", "Job").(*batchv1.Job) Expect(idc.Spec.Template.Spec.Containers).To(HaveLen(2)) idcExpectedEnvVars := []corev1.EnvVar{ {Name: "MULTI_CLUSTER_FORWARDING_CA", Value: "/etc/pki/tls/certs/tigera-ca-bundle.crt"}, @@ -174,56 +167,6 @@ var _ = Describe("Intrusion Detection rendering tests", func() { {Name: "ELASTIC_CA", Value: "/etc/pki/tls/certs/tigera-ca-bundle.crt"}, } Expect(idc.Spec.Template.Spec.Containers[0].Env).To(Equal(idcExpectedEnvVars)) - - Expect(idji.Spec.Template.Spec.Containers).To(HaveLen(1)) - idjiExpectedEnvVars := []corev1.EnvVar{ - {Name: "KIBANA_HOST", Value: "tigera-secure-es-gateway-http.tigera-elasticsearch.svc"}, - {Name: "KIBANA_PORT", Value: "5601", ValueFrom: nil}, - {Name: "KIBANA_SCHEME", Value: "https"}, - {Name: "START_XPACK_TRIAL", Value: "false"}, - { - Name: "USER", ValueFrom: &corev1.EnvVarSource{ - SecretKeyRef: &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "tigera-ee-installer-elasticsearch-access", - }, - Key: "username", - }, - }, - }, - { - Name: "PASSWORD", ValueFrom: &corev1.EnvVarSource{ - SecretKeyRef: &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "tigera-ee-installer-elasticsearch-access", - }, - Key: "password", - }, - }, - }, - {Name: "KB_CA_CERT", Value: "/etc/pki/tls/certs/tigera-ca-bundle.crt"}, - {Name: "FIPS_MODE_ENABLED", Value: "false"}, - {Name: "ELASTIC_INDEX_SUFFIX", Value: "clusterTestName"}, - {Name: "ELASTIC_USER", ValueFrom: secret.GetEnvVarSource(render.ElasticsearchIntrusionDetectionJobUserSecret, "username", false)}, - {Name: "ELASTIC_PASSWORD", ValueFrom: secret.GetEnvVarSource(render.ElasticsearchIntrusionDetectionJobUserSecret, "password", false)}, - } - Expect(idji.Spec.Template.Spec.Containers[0].Env).To(Equal(idjiExpectedEnvVars)) - - Expect(*idji.Spec.Template.Spec.Containers[0].SecurityContext.AllowPrivilegeEscalation).To(BeFalse()) - Expect(*idji.Spec.Template.Spec.Containers[0].SecurityContext.Privileged).To(BeFalse()) - Expect(*idji.Spec.Template.Spec.Containers[0].SecurityContext.RunAsGroup).To(BeEquivalentTo(10001)) - Expect(*idji.Spec.Template.Spec.Containers[0].SecurityContext.RunAsNonRoot).To(BeTrue()) - Expect(*idji.Spec.Template.Spec.Containers[0].SecurityContext.RunAsUser).To(BeEquivalentTo(10001)) - Expect(idji.Spec.Template.Spec.Containers[0].SecurityContext.Capabilities).To(Equal( - &corev1.Capabilities{ - Drop: []corev1.Capability{"ALL"}, - }, - )) - Expect(idji.Spec.Template.Spec.Containers[0].SecurityContext.SeccompProfile).To(Equal( - &corev1.SeccompProfile{ - Type: corev1.SeccompProfileTypeRuntimeDefault, - })) - Expect(idc.Spec.Template.Spec.Containers[0].VolumeMounts).To(HaveLen(2)) Expect(idc.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name).To(Equal("tigera-ca-bundle")) Expect(idc.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath).To(Equal("/etc/pki/tls/certs")) @@ -359,7 +302,6 @@ var _ = Describe("Intrusion Detection rendering tests", func() { {name: "allow-tigera.intrusion-detection-controller", ns: "tigera-intrusion-detection", group: "projectcalico.org", version: "v3", kind: "NetworkPolicy"}, {name: "allow-tigera.default-deny", ns: "tigera-intrusion-detection", group: "projectcalico.org", version: "v3", kind: "NetworkPolicy"}, {name: "intrusion-detection-controller", ns: "tigera-intrusion-detection", group: "", version: "v1", kind: "ServiceAccount"}, - {name: "intrusion-detection-es-job-installer", ns: "tigera-intrusion-detection", group: "", version: "v1", kind: "ServiceAccount"}, {name: "intrusion-detection-controller", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, {name: "intrusion-detection-controller", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRoleBinding"}, {name: "intrusion-detection-controller", ns: "tigera-intrusion-detection", group: "rbac.authorization.k8s.io", version: "v1", kind: "Role"}, @@ -375,8 +317,6 @@ var _ = Describe("Intrusion Detection rendering tests", func() { {name: "network.lateral.originate", ns: "", group: "projectcalico.org", version: "v3", kind: "GlobalAlertTemplate"}, {name: "dns.servfail", ns: "", group: "projectcalico.org", version: "v3", kind: "GlobalAlertTemplate"}, {name: "dns.dos", ns: "", group: "projectcalico.org", version: "v3", kind: "GlobalAlertTemplate"}, - {name: "allow-tigera.intrusion-detection-elastic", ns: "tigera-intrusion-detection", group: "projectcalico.org", version: "v3", kind: "NetworkPolicy"}, - {name: "intrusion-detection-es-job-installer", ns: "tigera-intrusion-detection", group: "batch", version: "v1", kind: "Job"}, {name: "intrusion-detection-psp", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, {name: "intrusion-detection-psp", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRoleBinding"}, {name: "intrusion-detection", ns: "", group: "policy", version: "v1beta1", kind: "PodSecurityPolicy"}, @@ -427,7 +367,7 @@ var _ = Describe("Intrusion Detection rendering tests", func() { })) }) - It("should not render intrusion-detection-es-job-installer and should disable GlobalAlert controller when cluster is managed", func() { + It("should disable GlobalAlert controller when cluster is managed", func() { cfg.Openshift = notOpenshift cfg.ManagedCluster = managedCluster @@ -446,7 +386,6 @@ var _ = Describe("Intrusion Detection rendering tests", func() { {name: "allow-tigera.intrusion-detection-controller", ns: "tigera-intrusion-detection", group: "projectcalico.org", version: "v3", kind: "NetworkPolicy"}, {name: "allow-tigera.default-deny", ns: "tigera-intrusion-detection", group: "projectcalico.org", version: "v3", kind: "NetworkPolicy"}, {name: "intrusion-detection-controller", ns: "tigera-intrusion-detection", group: "", version: "v1", kind: "ServiceAccount"}, - {name: "intrusion-detection-es-job-installer", ns: "tigera-intrusion-detection", group: "", version: "v1", kind: "ServiceAccount"}, {name: "intrusion-detection-controller", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, {name: "intrusion-detection-controller", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRoleBinding"}, {name: "intrusion-detection-controller", ns: "tigera-intrusion-detection", group: "rbac.authorization.k8s.io", version: "v1", kind: "Role"}, @@ -524,9 +463,7 @@ var _ = Describe("Intrusion Detection rendering tests", func() { component := render.IntrusionDetection(cfg) resources, _ := component.Objects() idc := rtest.GetResource(resources, "intrusion-detection-controller", render.IntrusionDetectionNamespace, "apps", "v1", "Deployment").(*appsv1.Deployment) - job := rtest.GetResource(resources, render.IntrusionDetectionInstallerJobName, render.IntrusionDetectionNamespace, "batch", "v1", "Job").(*batchv1.Job) Expect(idc.Spec.Template.Spec.NodeSelector).To(Equal(map[string]string{"foo": "bar"})) - Expect(job.Spec.Template.Spec.NodeSelector).To(Equal(map[string]string{"foo": "bar"})) }) It("should apply controlPlaneTolerations correctly", func() { @@ -541,9 +478,7 @@ var _ = Describe("Intrusion Detection rendering tests", func() { component := render.IntrusionDetection(cfg) resources, _ := component.Objects() idc := rtest.GetResource(resources, "intrusion-detection-controller", render.IntrusionDetectionNamespace, "apps", "v1", "Deployment").(*appsv1.Deployment) - job := rtest.GetResource(resources, render.IntrusionDetectionInstallerJobName, render.IntrusionDetectionNamespace, "batch", "v1", "Job").(*batchv1.Job) Expect(idc.Spec.Template.Spec.Tolerations).To(ConsistOf(t)) - Expect(job.Spec.Template.Spec.Tolerations).To(ConsistOf(t)) }) Context("allow-tigera rendering", func() { @@ -560,8 +495,6 @@ var _ = Describe("Intrusion Detection rendering tests", func() { expectedIDPolicyForManaged, expectedIDPolicyForManagedOCP, ) - } else if !scenario.ManagedCluster && policyName.Name == "allow-tigera.intrusion-detection-elastic" { - return testutils.SelectPolicyByProvider(scenario, expectedIDInstallerPolicy, expectedIDInstallerPolicyForOCP) } return nil @@ -613,7 +546,6 @@ var _ = Describe("Intrusion Detection rendering tests", func() { {name: "allow-tigera.intrusion-detection-controller", ns: "tigera-intrusion-detection", group: "projectcalico.org", version: "v3", kind: "NetworkPolicy"}, {name: "allow-tigera.default-deny", ns: "tigera-intrusion-detection", group: "projectcalico.org", version: "v3", kind: "NetworkPolicy"}, {name: "intrusion-detection-controller", ns: "tigera-intrusion-detection", group: "", version: "v1", kind: "ServiceAccount"}, - {name: "intrusion-detection-es-job-installer", ns: "tigera-intrusion-detection", group: "", version: "v1", kind: "ServiceAccount"}, {name: "intrusion-detection-controller", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, {name: "intrusion-detection-controller", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRoleBinding"}, {name: "intrusion-detection-controller", ns: "tigera-intrusion-detection", group: "rbac.authorization.k8s.io", version: "v1", kind: "Role"}, @@ -702,6 +634,7 @@ var _ = Describe("Intrusion Detection rendering tests", func() { {name: "tigera.io.detectors.training", ns: "tigera-intrusion-detection", group: "", version: "v1", kind: "PodTemplate"}, {name: "tigera.io.detectors.detection", ns: "tigera-intrusion-detection", group: "", version: "v1", kind: "PodTemplate"}, {name: "anomaly-detection-api", ns: "", group: "policy", version: "v1beta1", kind: "PodSecurityPolicy"}, + {name: "intrusion-detection-es-job-installer", ns: "tigera-intrusion-detection", group: "", version: "v1", kind: "ServiceAccount"}, {name: "allow-tigera.intrusion-detection-elastic", ns: "tigera-intrusion-detection", group: "projectcalico.org", version: "v3", kind: "NetworkPolicy"}, {name: "intrusion-detection-es-job-installer", ns: "tigera-intrusion-detection", group: "batch", version: "v1", kind: "Job"}, {name: "tigera-linseed", ns: "tigera-intrusion-detection", group: "rbac.authorization.k8s.io", version: "v1", kind: "RoleBinding"}, diff --git a/pkg/render/logstorage.go b/pkg/render/logstorage.go index 820daa0bee..a4409de8ea 100644 --- a/pkg/render/logstorage.go +++ b/pkg/render/logstorage.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020-2023 Tigera, Inc. All rights reserved. +// Copyright (c) 2020-2024 Tigera, Inc. All rights reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -1760,6 +1760,11 @@ func (es *elasticsearchComponent) kibanaAllowTigeraPolicy() *v3.NetworkPolicy { Protocol: &networkpolicy.TCPProtocol, Destination: networkpolicy.DefaultHelper().ESGatewayEntityRule(), }, + { + Action: v3.Allow, + Protocol: &networkpolicy.TCPProtocol, + Destination: networkpolicy.DefaultHelper().DashboardInstallerEntityRule(), + }, }...) kibanaPortIngressDestination := v3.EntityRule{ @@ -1801,6 +1806,12 @@ func (es *elasticsearchComponent) kibanaAllowTigeraPolicy() *v3.NetworkPolicy { Source: networkpolicy.DefaultHelper().ESGatewaySourceEntityRule(), Destination: kibanaPortIngressDestination, }, + { + Action: v3.Allow, + Protocol: &networkpolicy.TCPProtocol, + Source: networkpolicy.DefaultHelper().DashboardInstallerSourceEntityRule(), + Destination: kibanaPortIngressDestination, + }, { Action: v3.Allow, Protocol: &networkpolicy.TCPProtocol, diff --git a/pkg/render/logstorage/dashboards/dashboards.go b/pkg/render/logstorage/dashboards/dashboards.go new file mode 100644 index 0000000000..a6b1f40a75 --- /dev/null +++ b/pkg/render/logstorage/dashboards/dashboards.go @@ -0,0 +1,379 @@ +// Copyright (c) 2024 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dashboards + +import ( + "fmt" + "strings" + + v3 "github.com/tigera/api/pkg/apis/projectcalico/v3" + operatorv1 "github.com/tigera/operator/api/v1" + "github.com/tigera/operator/pkg/components" + "github.com/tigera/operator/pkg/ptr" + "github.com/tigera/operator/pkg/render" + relasticsearch "github.com/tigera/operator/pkg/render/common/elasticsearch" + rmeta "github.com/tigera/operator/pkg/render/common/meta" + "github.com/tigera/operator/pkg/render/common/networkpolicy" + "github.com/tigera/operator/pkg/render/common/podsecuritypolicy" + "github.com/tigera/operator/pkg/render/common/secret" + "github.com/tigera/operator/pkg/render/common/securitycontext" + "github.com/tigera/operator/pkg/render/logstorage" + "github.com/tigera/operator/pkg/tls/certificatemanagement" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var ( + Name string = "dashboards-installer" + ElasticCredentialsSecret string = "tigera-ee-installer-elasticsearch-access-gateway" + PolicyName = networkpolicy.TigeraComponentPolicyPrefix + Name +) + +func Dashboards(c *Config) render.Component { + return &dashboards{ + cfg: c, + namespace: c.Namespace, + } +} + +type dashboards struct { + image string + csrImage string + cfg *Config + + // Namespace in which to provision the namespaced resources. + namespace string +} + +// Config contains all the information needed to render the Dashboards component. +type Config struct { + // CustomResources provided by the user. + Installation *operatorv1.InstallationSpec + + // Pull secrets provided by the user. + PullSecrets []*corev1.Secret + + // Trusted bundle to use when validating client certificates. + TrustedBundle certificatemanagement.TrustedBundleRO + + // ClusterDomain to use when building service URLs. + ClusterDomain string + + // Whether this is a managed cluster + IsManaged bool + + // Whether the cluster supports pod security policies. + UsePSP bool + + // Namespace to install into. + Namespace string + + // Tenant configuration, if running for a particular tenant. + Tenant *operatorv1.Tenant + + // Secret containing client certificate and key for connecting to the Kibana. If configured, + // mTLS is used between Dashboards and the external Kibana. + ExternalKibanaClientSecret *corev1.Secret + + // Kibana service definition + KibanaHost string + KibanaPort string + KibanaScheme string + + // Credentials are used to provide annotations for elastic search users + Credentials []*corev1.Secret +} + +func (d *dashboards) ResolveImages(is *operatorv1.ImageSet) error { + reg := d.cfg.Installation.Registry + path := d.cfg.Installation.ImagePath + prefix := d.cfg.Installation.ImagePrefix + var err error + errMsgs := []string{} + + // Calculate the image(s) to use for Dashboards, given user registry configuration. + d.image, err = components.GetReference(components.ComponentElasticTseeInstaller, reg, path, prefix, is) + if err != nil { + errMsgs = append(errMsgs, err.Error()) + } + + if d.cfg.Installation.CertificateManagement != nil { + d.csrImage, err = certificatemanagement.ResolveCSRInitImage(d.cfg.Installation, is) + if err != nil { + errMsgs = append(errMsgs, err.Error()) + } + } + if len(errMsgs) != 0 { + return fmt.Errorf(strings.Join(errMsgs, ",")) + } + return nil +} + +func (d *dashboards) Objects() (objsToCreate, objsToDelete []client.Object) { + if d.cfg.IsManaged || operatorv1.IsFIPSModeEnabled(d.cfg.Installation.FIPSMode) { + return nil, d.resources() + } + + return d.resources(), nil +} + +func (d *dashboards) resources() []client.Object { + resources := []client.Object{ + d.AllowTigeraPolicy(), + d.ServiceAccount(), + d.Job(), + } + + if d.cfg.UsePSP { + resources = append(resources, + d.PSPClusterRole(), + d.PSPClusterRoleBinding(), + d.PodSecurityPolicy(), + ) + } + return resources +} + +func (d *dashboards) AllowTigeraPolicy() *v3.NetworkPolicy { + egressRules := []v3.Rule{} + egressRules = networkpolicy.AppendDNSEgressRules(egressRules, d.cfg.Installation.KubernetesProvider == operatorv1.ProviderOpenShift) + egressRules = append(egressRules, v3.Rule{ + Action: v3.Allow, + Protocol: &networkpolicy.TCPProtocol, + Destination: render.KibanaEntityRule, + }) + + return &v3.NetworkPolicy{ + TypeMeta: metav1.TypeMeta{Kind: "NetworkPolicy", APIVersion: "projectcalico.org/v3"}, + ObjectMeta: metav1.ObjectMeta{ + Name: PolicyName, + Namespace: d.cfg.Namespace, + }, + Spec: v3.NetworkPolicySpec{ + Order: &networkpolicy.HighPrecedenceOrder, + Tier: networkpolicy.TigeraComponentTierName, + Selector: fmt.Sprintf("job-name == '%s'", Name), + Types: []v3.PolicyType{v3.PolicyTypeEgress}, + Egress: egressRules, + }, + } +} + +func (d *dashboards) Job() *batchv1.Job { + annotations := d.cfg.TrustedBundle.HashAnnotations() + if d.cfg.ExternalKibanaClientSecret != nil { + annotations["hash.operator.tigera.io/kibana-client-secret"] = rmeta.SecretsAnnotationHash(d.cfg.ExternalKibanaClientSecret) + } + + volumeMounts := append( + d.cfg.TrustedBundle.VolumeMounts(d.SupportedOSType()), + ) + + volumes := []corev1.Volume{ + d.cfg.TrustedBundle.Volume(), + } + + secretName := ElasticCredentialsSecret + + envVars := []corev1.EnvVar{ + { + Name: "KIBANA_HOST", + Value: d.cfg.KibanaHost, + }, + { + Name: "KIBANA_PORT", + Value: d.cfg.KibanaPort, + }, + { + Name: "KIBANA_SCHEME", + Value: d.cfg.KibanaScheme, + }, + { + // We no longer need to start the xpack trial from the installer pod. Logstorage + // now takes care of this in combination with the ECK operator (v1). + Name: "START_XPACK_TRIAL", + Value: "false", + }, + { + Name: "USER", + ValueFrom: secret.GetEnvVarSource(secretName, "username", false), + }, + { + Name: "PASSWORD", + ValueFrom: secret.GetEnvVarSource(secretName, "password", false), + }, + { + Name: "KB_CA_CERT", + Value: d.cfg.TrustedBundle.MountPath(), + }, + { + Name: "FIPS_MODE_ENABLED", + Value: operatorv1.IsFIPSModeEnabledString(d.cfg.Installation.FIPSMode), + }, + relasticsearch.ElasticUserEnvVar(ElasticCredentialsSecret), + relasticsearch.ElasticPasswordEnvVar(ElasticCredentialsSecret), + } + + if d.cfg.Tenant != nil { + envVars = append(envVars, corev1.EnvVar{ + Name: "KIBANA_SPACE_ID", + Value: d.cfg.Tenant.Spec.ID, + }) + } + + if d.cfg.ExternalKibanaClientSecret != nil { + // Add a volume for the required client certificate and key. + volumes = append(volumes, corev1.Volume{ + Name: logstorage.ExternalCertsVolumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: logstorage.ExternalCertsSecret, + }, + }, + }) + volumeMounts = append(volumeMounts, corev1.VolumeMount{ + Name: logstorage.ExternalCertsVolumeName, + MountPath: "/certs/kibana/mtls", + ReadOnly: true, + }) + + // Configure Dashboards to use the mounted client certificate and key. + envVars = append(envVars, corev1.EnvVar{Name: "KIBANA_MTLS_ENABLED", Value: "true"}) + envVars = append(envVars, corev1.EnvVar{Name: "KIBANA_CLIENT_KEY", Value: "/certs/kibana/mtls/client.key"}) + envVars = append(envVars, corev1.EnvVar{Name: "KIBANA_CLIENT_CERT", Value: "/certs/kibana/mtls/client.crt"}) + } + + podTemplate := relasticsearch.DecorateAnnotations(&corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"job-name": Name, "k8s-app": Name}, + Annotations: annotations, + }, + Spec: corev1.PodSpec{ + Tolerations: d.cfg.Installation.ControlPlaneTolerations, + NodeSelector: d.cfg.Installation.ControlPlaneNodeSelector, + // This value needs to be set to never. The PodFailurePolicy will still ensure that this job will run until completion. + RestartPolicy: corev1.RestartPolicyNever, + ImagePullSecrets: secret.GetReferenceList(d.cfg.PullSecrets), + Containers: []corev1.Container{ + { + Name: Name, + Image: d.image, + ImagePullPolicy: render.ImagePullPolicy(), + Env: envVars, + SecurityContext: securitycontext.NewNonRootContext(), + VolumeMounts: volumeMounts, + }, + }, + Volumes: volumes, + ServiceAccountName: Name, + }, + }, d.cfg.Credentials).(*corev1.PodTemplateSpec) + + return &batchv1.Job{ + TypeMeta: metav1.TypeMeta{Kind: "Job", APIVersion: "batch/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: Name, + Namespace: d.namespace, + }, + Spec: batchv1.JobSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "job-name": Name, + }, + }, + Template: *podTemplate, + // PodFailurePolicy is not available for k8s < 1.26; setting BackoffLimit to a higher number (default is 6) + // to lessen the frequency of installation failures when responses from Elastic Search takes more time. + BackoffLimit: ptr.Int32ToPtr(30), + PodFailurePolicy: &batchv1.PodFailurePolicy{ + Rules: []batchv1.PodFailurePolicyRule{ + // We don't want the job to fail, so we keep retrying by ignoring incrementing the backoff. + { + Action: "Ignore", + OnExitCodes: &batchv1.PodFailurePolicyOnExitCodesRequirement{ + Operator: "NotIn", + Values: []int32{0}, + }, + }, + }, + }, + }, + } +} + +func (d *dashboards) ServiceAccount() *corev1.ServiceAccount { + return &corev1.ServiceAccount{ + TypeMeta: metav1.TypeMeta{Kind: "ServiceAccount", APIVersion: "v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: Name, + Namespace: d.namespace, + }, + } +} + +func (d *dashboards) PSPClusterRole() *rbacv1.ClusterRole { + return &rbacv1.ClusterRole{ + TypeMeta: metav1.TypeMeta{Kind: "ClusterRole", APIVersion: "rbac.authorization.k8s.io/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: Name, + }, + Rules: []rbacv1.PolicyRule{ + { + // Allow access to the pod security policy in case this is enforced on the cluster + APIGroups: []string{"policy"}, + Resources: []string{"podsecuritypolicies"}, + Verbs: []string{"use"}, + ResourceNames: []string{Name}, + }, + }, + } +} + +func (d *dashboards) PSPClusterRoleBinding() *rbacv1.ClusterRoleBinding { + return &rbacv1.ClusterRoleBinding{ + TypeMeta: metav1.TypeMeta{Kind: "ClusterRoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: Name, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: Name, + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: Name, + Namespace: d.namespace, + }, + }, + } +} + +func (d *dashboards) PodSecurityPolicy() *policyv1beta1.PodSecurityPolicy { + return podsecuritypolicy.NewBasePolicy(Name) +} + +func (d *dashboards) Ready() bool { + return true +} + +func (d *dashboards) SupportedOSType() rmeta.OSType { + return rmeta.OSTypeLinux +} diff --git a/pkg/render/logstorage/dashboards/dashboards_suite_test.go b/pkg/render/logstorage/dashboards/dashboards_suite_test.go new file mode 100644 index 0000000000..6cd0f9be05 --- /dev/null +++ b/pkg/render/logstorage/dashboards/dashboards_suite_test.go @@ -0,0 +1,30 @@ +// Copyright (c) 2024 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dashboards + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/onsi/ginkgo/reporters" +) + +func TestRender(t *testing.T) { + RegisterFailHandler(Fail) + junitReporter := reporters.NewJUnitReporter("../../../../report/ut/dashboards_suite.xml") + RunSpecsWithDefaultAndCustomReporters(t, "pkg/logstorage/dashboards Suite", []Reporter{junitReporter}) +} diff --git a/pkg/render/logstorage/dashboards/dashboards_test.go b/pkg/render/logstorage/dashboards/dashboards_test.go new file mode 100644 index 0000000000..a8dab84ccd --- /dev/null +++ b/pkg/render/logstorage/dashboards/dashboards_test.go @@ -0,0 +1,680 @@ +// Copyright (c) 2024 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dashboards + +import ( + "context" + "fmt" + + "github.com/google/go-cmp/cmp" + "github.com/tigera/operator/pkg/render/logstorage" + batchv1 "k8s.io/api/batch/v1" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/extensions/table" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + v3 "github.com/tigera/api/pkg/apis/projectcalico/v3" + operatorv1 "github.com/tigera/operator/api/v1" + "github.com/tigera/operator/pkg/apis" + "github.com/tigera/operator/pkg/common" + "github.com/tigera/operator/pkg/controller/certificatemanager" + "github.com/tigera/operator/pkg/dns" + "github.com/tigera/operator/pkg/ptr" + "github.com/tigera/operator/pkg/render" + rtest "github.com/tigera/operator/pkg/render/common/test" + "github.com/tigera/operator/pkg/render/testutils" + "github.com/tigera/operator/pkg/tls/certificatemanagement" +) + +type resourceTestObj struct { + name string + ns string + typ runtime.Object + f func(resource runtime.Object) +} + +var _ = Describe("Dashboards rendering tests", func() { + Context("zero-tenant rendering", func() { + var installation *operatorv1.InstallationSpec + var replicas int32 + var cfg *Config + clusterDomain := "cluster.local" + expectedPolicy := testutils.GetExpectedPolicyFromFile("../../testutils/expected_policies/dashboards.json") + expectedPolicyForOpenshift := testutils.GetExpectedPolicyFromFile("../../testutils/expected_policies/dashboards_ocp.json") + + expectedResources := []resourceTestObj{ + {PolicyName, render.ElasticsearchNamespace, &v3.NetworkPolicy{}, nil}, + {Name, render.ElasticsearchNamespace, &corev1.ServiceAccount{}, nil}, + {Name, render.ElasticsearchNamespace, &batchv1.Job{}, nil}, + {Name, "", &rbacv1.ClusterRole{}, nil}, + {Name, "", &rbacv1.ClusterRoleBinding{}, nil}, + {Name, "", &policyv1beta1.PodSecurityPolicy{}, nil}, + } + + BeforeEach(func() { + installation = &operatorv1.InstallationSpec{ + ControlPlaneReplicas: &replicas, + KubernetesProvider: operatorv1.ProviderNone, + Registry: "testregistry.com/", + } + + replicas = 2 + bundle := getBundle(installation) + + cfg = &Config{ + Installation: installation, + PullSecrets: []*corev1.Secret{ + {ObjectMeta: metav1.ObjectMeta{Name: "tigera-pull-secret"}}, + }, + TrustedBundle: bundle, + ClusterDomain: clusterDomain, + UsePSP: true, + Namespace: render.ElasticsearchNamespace, + KibanaHost: "tigera-secure-kb-http.tigera-kibana.svc", + KibanaScheme: "https", + KibanaPort: "5601", + } + }) + + It("should render a Dashboards Jobs and all supporting resources", func() { + component := Dashboards(cfg) + createResources, _ := component.Objects() + compareResources(createResources, expectedResources) + }) + + It("should render properly when PSP is not supported by the cluster", func() { + cfg.UsePSP = false + component := Dashboards(cfg) + Expect(component.ResolveImages(nil)).To(BeNil()) + resources, _ := component.Objects() + + // Should not contain any PodSecurityPolicies + for _, r := range resources { + Expect(r.GetObjectKind().GroupVersionKind().Kind).NotTo(Equal("PodSecurityPolicy")) + Expect(r.GetObjectKind().GroupVersionKind().Kind).NotTo(Equal("ClusterRole")) + Expect(r.GetObjectKind().GroupVersionKind().Kind).NotTo(Equal("ClusterRoleBinding")) + } + }) + + It("should apply controlPlaneNodeSelector correctly", func() { + installation.ControlPlaneNodeSelector = map[string]string{"foo": "bar"} + + component := Dashboards(cfg) + + resources, _ := component.Objects() + job, ok := rtest.GetResource(resources, Name, render.ElasticsearchNamespace, "batch", "v1", "Job").(*batchv1.Job) + Expect(ok).To(BeTrue(), "Job not found") + Expect(job.Spec.Template.Spec.NodeSelector).To(Equal(map[string]string{"foo": "bar"})) + }) + + It("should apply controlPlaneTolerations correctly", func() { + t := corev1.Toleration{ + Key: "foo", + Operator: corev1.TolerationOpEqual, + Value: "bar", + } + + installation.ControlPlaneTolerations = []corev1.Toleration{t} + component := Dashboards(cfg) + + resources, _ := component.Objects() + job, ok := rtest.GetResource(resources, Name, render.ElasticsearchNamespace, "batch", "v1", "Job").(*batchv1.Job) + Expect(ok).To(BeTrue(), "Job not found") + Expect(job.Spec.Template.Spec.Tolerations).To(ConsistOf(t)) + }) + + Context("allow-tigera rendering", func() { + policyName := types.NamespacedName{Name: "allow-tigera.dashboards-installer", Namespace: "tigera-elasticsearch"} + + getExpectedPolicy := func(scenario testutils.AllowTigeraScenario) *v3.NetworkPolicy { + if scenario.ManagedCluster { + return nil + } + + return testutils.SelectPolicyByProvider(scenario, expectedPolicy, expectedPolicyForOpenshift) + } + + DescribeTable("should render allow-tigera policy", + func(scenario testutils.AllowTigeraScenario) { + if scenario.Openshift { + cfg.Installation.KubernetesProvider = operatorv1.ProviderOpenShift + } else { + cfg.Installation.KubernetesProvider = operatorv1.ProviderNone + } + component := Dashboards(cfg) + resources, _ := component.Objects() + + policy := testutils.GetAllowTigeraPolicyFromResources(policyName, resources) + expectedPolicy := getExpectedPolicy(scenario) + if !cmp.Equal(policy, expectedPolicy) { + cmp.Diff(policy, expectedPolicy) + } + Expect(policy).To(Equal(expectedPolicy)) + }, + // Dashboards only renders in the presence of an LogStorage CR and absence of a ManagementClusterConnection CR, therefore + // does not have a config option for managed clusters. + Entry("for management/standalone, kube-dns", testutils.AllowTigeraScenario{ManagedCluster: false, Openshift: false}), + Entry("for management/standalone, openshift-dns", testutils.AllowTigeraScenario{ManagedCluster: false, Openshift: true}), + ) + }) + + It("should not render when FIPS mode is enabled", func() { + bundle := getBundle(installation) + enabled := operatorv1.FIPSModeEnabled + installation.FIPSMode = &enabled + component := Dashboards(&Config{ + Installation: installation, + PullSecrets: []*corev1.Secret{ + {ObjectMeta: metav1.ObjectMeta{Name: "tigera-pull-secret"}}, + }, + TrustedBundle: bundle, + ClusterDomain: clusterDomain, + Namespace: render.ElasticsearchNamespace, + KibanaHost: "tigera-secure-kb-http.tigera-kibana.tigera-kibana.svc", + KibanaScheme: "htpps", + KibanaPort: "5601", + }) + + resources, _ := component.Objects() + _, ok := rtest.GetResource(resources, Name, render.ElasticsearchNamespace, "batch", "v1", "Job").(*batchv1.Job) + Expect(ok).To(BeFalse(), "Jobs not found") + }) + }) + + Context("multi-tenant rendering", func() { + var installation *operatorv1.InstallationSpec + var tenant *operatorv1.Tenant + var replicas int32 + var cfg *Config + clusterDomain := "cluster.local" + + BeforeEach(func() { + replicas = 2 + installation = &operatorv1.InstallationSpec{ + ControlPlaneReplicas: &replicas, + KubernetesProvider: operatorv1.ProviderNone, + Registry: "testregistry.com/", + } + tenant = &operatorv1.Tenant{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-tenant", + Namespace: "test-tenant-ns", + }, + Spec: operatorv1.TenantSpec{ + ID: "test-tenant", + Elastic: &operatorv1.TenantElasticSpec{ + KibanaURL: "https://external-kibana:443", + MutualTLS: true, + }, + }, + } + bundle := getBundle(installation) + cfg = &Config{ + Installation: installation, + PullSecrets: []*corev1.Secret{ + {ObjectMeta: metav1.ObjectMeta{Name: "tigera-pull-secret"}}, + }, + TrustedBundle: bundle, + ClusterDomain: clusterDomain, + Namespace: "tenant-test-tenant", + Tenant: tenant, + KibanaHost: "external-kibana", + KibanaScheme: "https", + KibanaPort: "443", + } + }) + + It("should support an external kibana endpoint", func() { + cfg.ExternalKibanaClientSecret = &corev1.Secret{ + TypeMeta: metav1.TypeMeta{Kind: "Secret", APIVersion: "v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: logstorage.ExternalCertsSecret, + Namespace: cfg.Namespace, + }, + Data: map[string][]byte{ + "client.crt": {1, 2, 3}, + "client.key": {4, 5, 6}, + }, + } + component := Dashboards(cfg) + createResources, _ := component.Objects() + d, ok := rtest.GetResource(createResources, Name, cfg.Namespace, "batch", "v1", "Job").(*batchv1.Job) + Expect(ok).To(BeTrue(), "Job not found") + + // The deployment should have the hash annotation set, as well as a volume and volume mount for the client secret. + Expect(d.Spec.Template.Annotations["hash.operator.tigera.io/kibana-client-secret"]).To(Equal("ae1a6776a81bf1fc0ee4aac936a90bd61a07aea7")) + Expect(d.Spec.Template.Spec.Volumes).To(ContainElement(corev1.Volume{ + Name: logstorage.ExternalCertsVolumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: logstorage.ExternalCertsSecret, + }, + }, + })) + Expect(d.Spec.Template.Spec.Containers[0].VolumeMounts).To(ContainElement(corev1.VolumeMount{ + Name: logstorage.ExternalCertsVolumeName, + MountPath: "/certs/kibana/mtls", + ReadOnly: true, + })) + + // Should expect mTLS env vars set. + Expect(d.Spec.Template.Spec.Containers[0].Env).To(ContainElement(corev1.EnvVar{ + Name: "KIBANA_CLIENT_KEY", Value: "/certs/kibana/mtls/client.key", + })) + Expect(d.Spec.Template.Spec.Containers[0].Env).To(ContainElement(corev1.EnvVar{ + Name: "KIBANA_CLIENT_CERT", Value: "/certs/kibana/mtls/client.crt", + })) + Expect(d.Spec.Template.Spec.Containers[0].Env).To(ContainElement(corev1.EnvVar{ + Name: "KIBANA_MTLS_ENABLED", Value: "true", + })) + }) + + It("should render resources in the tenant namespace", func() { + component := Dashboards(cfg) + Expect(component).NotTo(BeNil()) + resources, _ := component.Objects() + job := rtest.GetResource(resources, Name, cfg.Namespace, batchv1.GroupName, "v1", "Job").(*batchv1.Job) + Expect(job).NotTo(BeNil()) + sa := rtest.GetResource(resources, Name, cfg.Namespace, corev1.GroupName, "v1", "ServiceAccount").(*corev1.ServiceAccount) + Expect(sa).NotTo(BeNil()) + netPol := rtest.GetResource(resources, fmt.Sprintf("allow-tigera.%s", Name), cfg.Namespace, "projectcalico.org", "v3", "NetworkPolicy").(*v3.NetworkPolicy) + Expect(netPol).NotTo(BeNil()) + }) + + It("should render multi-tenant environment variables", func() { + component := Dashboards(cfg) + Expect(component).NotTo(BeNil()) + resources, _ := component.Objects() + job := rtest.GetResource(resources, Name, cfg.Namespace, batchv1.GroupName, "v1", "Job").(*batchv1.Job) + envs := job.Spec.Template.Spec.Containers[0].Env + Expect(envs).To(ContainElement(corev1.EnvVar{Name: "KIBANA_SPACE_ID", Value: cfg.Tenant.Spec.ID})) + Expect(envs).To(ContainElement(corev1.EnvVar{Name: "KIBANA_SCHEME", Value: "https"})) + Expect(envs).To(ContainElement(corev1.EnvVar{Name: "KIBANA_HOST", Value: "external-kibana"})) + Expect(envs).To(ContainElement(corev1.EnvVar{Name: "KIBANA_PORT", Value: "443"})) + }) + }) + + Context("single-tenant with external elastic rendering", func() { + var installation *operatorv1.InstallationSpec + var tenant *operatorv1.Tenant + var replicas int32 + var cfg *Config + clusterDomain := "cluster.local" + + BeforeEach(func() { + replicas = 2 + installation = &operatorv1.InstallationSpec{ + ControlPlaneReplicas: &replicas, + KubernetesProvider: operatorv1.ProviderNone, + Registry: "testregistry.com/", + } + tenant = &operatorv1.Tenant{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-tenant", + }, + Spec: operatorv1.TenantSpec{ + ID: "test-tenant", + }, + } + bundle := getBundle(installation) + cfg = &Config{ + Installation: installation, + PullSecrets: []*corev1.Secret{ + {ObjectMeta: metav1.ObjectMeta{Name: "tigera-pull-secret"}}, + }, + TrustedBundle: bundle, + ClusterDomain: clusterDomain, + Namespace: render.ElasticsearchNamespace, + Tenant: tenant, + KibanaHost: "external-kibana", + KibanaScheme: "https", + KibanaPort: "443", + } + }) + + It("should support an external kibana endpoint", func() { + cfg.ExternalKibanaClientSecret = &corev1.Secret{ + TypeMeta: metav1.TypeMeta{Kind: "Secret", APIVersion: "v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: logstorage.ExternalCertsSecret, + Namespace: render.ElasticsearchNamespace, + }, + Data: map[string][]byte{ + "client.crt": {1, 2, 3}, + "client.key": {4, 5, 6}, + }, + } + component := Dashboards(cfg) + createResources, _ := component.Objects() + d, ok := rtest.GetResource(createResources, Name, render.ElasticsearchNamespace, "batch", "v1", "Job").(*batchv1.Job) + Expect(ok).To(BeTrue(), "Job not found") + + // The deployment should have the hash annotation set, as well as a volume and volume mount for the client secret. + Expect(d.Spec.Template.Annotations["hash.operator.tigera.io/kibana-client-secret"]).To(Equal("ae1a6776a81bf1fc0ee4aac936a90bd61a07aea7")) + Expect(d.Spec.Template.Spec.Volumes).To(ContainElement(corev1.Volume{ + Name: logstorage.ExternalCertsVolumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: logstorage.ExternalCertsSecret, + }, + }, + })) + Expect(d.Spec.Template.Spec.Containers[0].VolumeMounts).To(ContainElement(corev1.VolumeMount{ + Name: logstorage.ExternalCertsVolumeName, + MountPath: "/certs/kibana/mtls", + ReadOnly: true, + })) + + // Should expect mTLS env vars set. + Expect(d.Spec.Template.Spec.Containers[0].Env).To(ContainElement(corev1.EnvVar{ + Name: "KIBANA_CLIENT_KEY", Value: "/certs/kibana/mtls/client.key", + })) + Expect(d.Spec.Template.Spec.Containers[0].Env).To(ContainElement(corev1.EnvVar{ + Name: "KIBANA_CLIENT_CERT", Value: "/certs/kibana/mtls/client.crt", + })) + Expect(d.Spec.Template.Spec.Containers[0].Env).To(ContainElement(corev1.EnvVar{ + Name: "KIBANA_MTLS_ENABLED", Value: "true", + })) + }) + + It("should render resources in the elasticsearch namespace", func() { + component := Dashboards(cfg) + Expect(component).NotTo(BeNil()) + resources, _ := component.Objects() + job := rtest.GetResource(resources, Name, render.ElasticsearchNamespace, batchv1.GroupName, "v1", "Job").(*batchv1.Job) + Expect(job).NotTo(BeNil()) + sa := rtest.GetResource(resources, Name, render.ElasticsearchNamespace, corev1.GroupName, "v1", "ServiceAccount").(*corev1.ServiceAccount) + Expect(sa).NotTo(BeNil()) + netPol := rtest.GetResource(resources, fmt.Sprintf("allow-tigera.%s", Name), render.ElasticsearchNamespace, "projectcalico.org", "v3", "NetworkPolicy").(*v3.NetworkPolicy) + Expect(netPol).NotTo(BeNil()) + }) + + It("should render single-tenant environment variables", func() { + component := Dashboards(cfg) + Expect(component).NotTo(BeNil()) + resources, _ := component.Objects() + d := rtest.GetResource(resources, Name, cfg.Namespace, batchv1.GroupName, "v1", "Job").(*batchv1.Job) + envs := d.Spec.Template.Spec.Containers[0].Env + Expect(envs).To(ContainElement(corev1.EnvVar{Name: "KIBANA_SPACE_ID", Value: cfg.Tenant.Spec.ID})) + Expect(envs).To(ContainElement(corev1.EnvVar{Name: "KIBANA_SCHEME", Value: "https"})) + Expect(envs).To(ContainElement(corev1.EnvVar{Name: "KIBANA_HOST", Value: "external-kibana"})) + Expect(envs).To(ContainElement(corev1.EnvVar{Name: "KIBANA_PORT", Value: "443"})) + }) + }) + + Context("single-tenant with internal elastic rendering", func() { + var installation *operatorv1.InstallationSpec + var tenant *operatorv1.Tenant + var replicas int32 + var cfg *Config + clusterDomain := "cluster.local" + + BeforeEach(func() { + replicas = 2 + installation = &operatorv1.InstallationSpec{ + ControlPlaneReplicas: &replicas, + KubernetesProvider: operatorv1.ProviderNone, + Registry: "testregistry.com/", + } + tenant = &operatorv1.Tenant{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-tenant", + }, + Spec: operatorv1.TenantSpec{ + ID: "test-tenant", + }, + } + bundle := getBundle(installation) + cfg = &Config{ + Installation: installation, + PullSecrets: []*corev1.Secret{ + {ObjectMeta: metav1.ObjectMeta{Name: "tigera-pull-secret"}}, + }, + TrustedBundle: bundle, + ClusterDomain: clusterDomain, + Namespace: render.ElasticsearchNamespace, + Tenant: tenant, + KibanaHost: "tigera-secure-kb-http.tigera-kibana.svc", + KibanaScheme: "https", + KibanaPort: "5601", + } + }) + + It("should render resources in the elasticsearch namespace", func() { + component := Dashboards(cfg) + Expect(component).NotTo(BeNil()) + resources, _ := component.Objects() + job := rtest.GetResource(resources, Name, render.ElasticsearchNamespace, batchv1.GroupName, "v1", "Job").(*batchv1.Job) + Expect(job).NotTo(BeNil()) + sa := rtest.GetResource(resources, Name, render.ElasticsearchNamespace, corev1.GroupName, "v1", "ServiceAccount").(*corev1.ServiceAccount) + Expect(sa).NotTo(BeNil()) + netPol := rtest.GetResource(resources, fmt.Sprintf("allow-tigera.%s", Name), render.ElasticsearchNamespace, "projectcalico.org", "v3", "NetworkPolicy").(*v3.NetworkPolicy) + Expect(netPol).NotTo(BeNil()) + }) + + It("should render single-tenant environment variables", func() { + component := Dashboards(cfg) + Expect(component).NotTo(BeNil()) + resources, _ := component.Objects() + d := rtest.GetResource(resources, Name, cfg.Namespace, batchv1.GroupName, "v1", "Job").(*batchv1.Job) + envs := d.Spec.Template.Spec.Containers[0].Env + Expect(envs).To(ContainElement(corev1.EnvVar{Name: "KIBANA_SPACE_ID", Value: cfg.Tenant.Spec.ID})) + Expect(envs).To(ContainElement(corev1.EnvVar{Name: "KIBANA_SCHEME", Value: "https"})) + Expect(envs).To(ContainElement(corev1.EnvVar{Name: "KIBANA_HOST", Value: "tigera-secure-kb-http.tigera-kibana.svc"})) + Expect(envs).To(ContainElement(corev1.EnvVar{Name: "KIBANA_PORT", Value: "5601"})) + }) + }) +}) + +func getBundle(installation *operatorv1.InstallationSpec) certificatemanagement.TrustedBundle { + scheme := runtime.NewScheme() + Expect(apis.AddToScheme(scheme)).NotTo(HaveOccurred()) + cli := fake.NewClientBuilder().WithScheme(scheme).Build() + + certificateManager, err := certificatemanager.Create(cli, installation, dns.DefaultClusterDomain, common.OperatorNamespace(), certificatemanager.AllowCACreation()) + Expect(err).NotTo(HaveOccurred()) + + trustedBundle := certificateManager.CreateTrustedBundle() + Expect(cli.Create(context.Background(), certificateManager.KeyPair().Secret(common.OperatorNamespace()))).NotTo(HaveOccurred()) + + return trustedBundle +} + +func compareResources(resources []client.Object, expectedResources []resourceTestObj) { + Expect(resources).To(HaveLen(len(expectedResources))) + for i, expectedResource := range expectedResources { + resource := resources[i] + actualName := resource.(metav1.ObjectMetaAccessor).GetObjectMeta().GetName() + actualNS := resource.(metav1.ObjectMetaAccessor).GetObjectMeta().GetNamespace() + + Expect(actualName).To(Equal(expectedResource.name), fmt.Sprintf("Rendered resource has wrong name (position %d, name %s, namespace %s)", i, actualName, actualNS)) + Expect(actualNS).To(Equal(expectedResource.ns), fmt.Sprintf("Rendered resource has wrong namespace (position %d, name %s, namespace %s)", i, actualName, actualNS)) + Expect(resource).Should(BeAssignableToTypeOf(expectedResource.typ)) + if expectedResource.f != nil { + expectedResource.f(resource) + } + } + + // Check job + job := rtest.GetResource(resources, Name, render.ElasticsearchNamespace, "batch", "v1", "Job").(*batchv1.Job) + ExpectWithOffset(1, job).NotTo(BeNil()) + + // Check containers + expected := expectedContainers() + actual := job.Spec.Template.Spec.Containers + ExpectWithOffset(1, len(actual)).To(Equal(len(expected))) + ExpectWithOffset(1, actual[0].Env).To(ConsistOf(expected[0].Env)) + ExpectWithOffset(1, actual[0].EnvFrom).To(ConsistOf(expected[0].EnvFrom)) + ExpectWithOffset(1, actual[0].VolumeMounts).To(ConsistOf(expected[0].VolumeMounts)) + ExpectWithOffset(1, actual[0].ReadinessProbe).To(Equal(expected[0].ReadinessProbe)) + ExpectWithOffset(1, actual[0].LivenessProbe).To(Equal(expected[0].LivenessProbe)) + ExpectWithOffset(1, actual[0].SecurityContext).To(Equal(expected[0].SecurityContext)) + ExpectWithOffset(1, actual[0].Name).To(Equal(expected[0].Name)) + ExpectWithOffset(1, actual[0].Resources).To(Equal(expected[0].Resources)) + ExpectWithOffset(1, actual[0].Image).To(Equal(expected[0].Image)) + ExpectWithOffset(1, actual[0].Ports).To(Equal(expected[0].Ports)) + ExpectWithOffset(1, actual).To(ConsistOf(expected)) + + // Check volumeMounts + ExpectWithOffset(1, job.Spec.Template.Spec.Volumes).To(ConsistOf(expectedVolumes())) + + // Check annotations + ExpectWithOffset(1, job.Spec.Template.Annotations).To(HaveKeyWithValue("tigera-operator.hash.operator.tigera.io/tigera-ca-private", Not(BeEmpty()))) + ExpectWithOffset(1, job.Spec.Template.Annotations).To(HaveKeyWithValue("hash.operator.tigera.io/elasticsearch-secrets", Not(BeEmpty()))) + + // Check permissions + clusterRole := rtest.GetResource(resources, Name, "", "rbac.authorization.k8s.io", "v1", "ClusterRole").(*rbacv1.ClusterRole) + Expect(clusterRole.Rules).To(ConsistOf([]rbacv1.PolicyRule{ + + { + APIGroups: []string{"policy"}, + Resources: []string{"podsecuritypolicies"}, + ResourceNames: []string{Name}, + Verbs: []string{"use"}, + }, + })) + clusterRoleBinding := rtest.GetResource(resources, Name, "", "rbac.authorization.k8s.io", "v1", "ClusterRoleBinding").(*rbacv1.ClusterRoleBinding) + Expect(clusterRoleBinding.RoleRef.Name).To(Equal(Name)) + Expect(clusterRoleBinding.Subjects).To(ConsistOf([]rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: Name, + Namespace: render.ElasticsearchNamespace, + }, + })) +} + +func expectedVolumes() []corev1.Volume { + return []corev1.Volume{ + { + Name: "tigera-ca-bundle", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "tigera-ca-bundle", + }, + }, + }, + }, + } +} + +func expectedContainers() []corev1.Container { + return []corev1.Container{ + { + Name: Name, + ImagePullPolicy: render.ImagePullPolicy(), + SecurityContext: &corev1.SecurityContext{ + Capabilities: &corev1.Capabilities{Drop: []corev1.Capability{"ALL"}}, + AllowPrivilegeEscalation: ptr.BoolToPtr(false), + Privileged: ptr.BoolToPtr(false), + RunAsNonRoot: ptr.BoolToPtr(true), + RunAsGroup: ptr.Int64ToPtr(10001), + RunAsUser: ptr.Int64ToPtr(10001), + SeccompProfile: &corev1.SeccompProfile{Type: corev1.SeccompProfileTypeRuntimeDefault}, + }, + Env: []corev1.EnvVar{ + { + Name: "KIBANA_HOST", + Value: "tigera-secure-kb-http.tigera-kibana.svc", + }, + { + Name: "KIBANA_PORT", + Value: "5601", + }, + { + Name: "KIBANA_SCHEME", + Value: "https", + }, + { + Name: "START_XPACK_TRIAL", + Value: "false", + }, + { + Name: "USER", + Value: "", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: ElasticCredentialsSecret, + }, + Key: "username", + }, + }, + }, + { + Name: "PASSWORD", + Value: "", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: ElasticCredentialsSecret, + }, + Key: "password", + }, + }, + }, + { + Name: "KB_CA_CERT", + Value: "/etc/pki/tls/certs/tigera-ca-bundle.crt", + }, + { + Name: "FIPS_MODE_ENABLED", + Value: "false", + }, + { + Name: "ELASTIC_USER", + Value: "", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: ElasticCredentialsSecret, + }, + Key: "username", + }, + }, + }, + { + Name: "ELASTIC_PASSWORD", + Value: "", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: ElasticCredentialsSecret, + }, + Key: "password", + }, + }, + }}, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "tigera-ca-bundle", + MountPath: "/etc/pki/tls/certs", + ReadOnly: true, + }, + }, + }, + } +} diff --git a/pkg/render/logstorage/linseed/linseed.go b/pkg/render/logstorage/linseed/linseed.go index 2888f1e98e..3a52aad27b 100644 --- a/pkg/render/logstorage/linseed/linseed.go +++ b/pkg/render/logstorage/linseed/linseed.go @@ -594,12 +594,6 @@ func (l *linseed) linseedAllowTigeraPolicy() *v3.NetworkPolicy { Source: render.EKSLogForwarderEntityRule, Destination: linseedIngressDestinationEntityRule, }, - { - Action: v3.Allow, - Protocol: &networkpolicy.TCPProtocol, - Source: render.IntrusionDetectionInstallerSourceEntityRule, - Destination: linseedIngressDestinationEntityRule, - }, { Action: v3.Allow, Protocol: &networkpolicy.TCPProtocol, diff --git a/pkg/render/testutils/expected_policies/dashboards.json b/pkg/render/testutils/expected_policies/dashboards.json new file mode 100644 index 0000000000..d74bb3558a --- /dev/null +++ b/pkg/render/testutils/expected_policies/dashboards.json @@ -0,0 +1,40 @@ +{ + "apiVersion": "projectcalico.org/v3", + "kind": "NetworkPolicy", + "metadata": { + "name": "allow-tigera.dashboards-installer", + "namespace": "tigera-elasticsearch" + }, + "spec": { + "order": 1, + "tier": "allow-tigera", + "selector": "job-name == 'dashboards-installer'", + "types": [ + "Egress" + ], + "egress": [ + { + "action": "Allow", + "protocol": "UDP", + "destination": { + "namespaceSelector": "projectcalico.org/name == 'kube-system'", + "selector": "k8s-app == 'kube-dns'", + "ports": [ + 53 + ] + } + }, + { + "action": "Allow", + "protocol": "TCP", + "destination": { + "namespaceSelector": "projectcalico.org/name == 'tigera-kibana'", + "selector": "k8s-app == 'tigera-secure'", + "ports": [ + 5601 + ] + } + } + ] + } +} diff --git a/pkg/render/testutils/expected_policies/dashboards_ocp.json b/pkg/render/testutils/expected_policies/dashboards_ocp.json new file mode 100644 index 0000000000..bcdf5e1e7b --- /dev/null +++ b/pkg/render/testutils/expected_policies/dashboards_ocp.json @@ -0,0 +1,51 @@ +{ + "apiVersion": "projectcalico.org/v3", + "kind": "NetworkPolicy", + "metadata": { + "name": "allow-tigera.dashboards-installer", + "namespace": "tigera-elasticsearch" + }, + "spec": { + "order": 1, + "tier": "allow-tigera", + "selector": "job-name == 'dashboards-installer'", + "types": [ + "Egress" + ], + "egress": [ + { + "action": "Allow", + "protocol": "UDP", + "destination": { + "namespaceSelector": "projectcalico.org/name == 'openshift-dns'", + "selector": "dns.operator.openshift.io/daemonset-dns == 'default'", + "ports": [ + 5353 + ] + } + }, + { + "action": "Allow", + "protocol": "TCP", + "destination": { + "namespaceSelector": "projectcalico.org/name == 'openshift-dns'", + "selector": "dns.operator.openshift.io/daemonset-dns == 'default'", + "ports": [ + 5353 + ] + } + }, + { + "action": "Allow", + "protocol": "TCP", + "destination": { + "namespaceSelector": "projectcalico.org/name == 'tigera-kibana'", + "selector": "k8s-app == 'tigera-secure'", + "ports": [ + 5601 + ] + } + } + ] + } +} diff --git a/pkg/render/testutils/expected_policies/kibana.json b/pkg/render/testutils/expected_policies/kibana.json index 94d063d357..6f3675cd2d 100644 --- a/pkg/render/testutils/expected_policies/kibana.json +++ b/pkg/render/testutils/expected_policies/kibana.json @@ -55,6 +55,19 @@ ] } }, + { + "action": "Allow", + "protocol": "TCP", + "source": { + "selector": "k8s-app == 'dashboards-installer'", + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" + }, + "destination": { + "ports": [ + 5601 + ] + } + }, { "action": "Allow", "destination": { @@ -114,6 +127,15 @@ "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" }, "protocol": "TCP" + }, + { + "action": "Allow", + "destination": { + "selector": "k8s-app == 'dashboards-installer'", + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'", + "ports" : [] + }, + "protocol": "TCP" } ] } diff --git a/pkg/render/testutils/expected_policies/kibana_ocp.json b/pkg/render/testutils/expected_policies/kibana_ocp.json index 1244a594da..c9572ce1ba 100644 --- a/pkg/render/testutils/expected_policies/kibana_ocp.json +++ b/pkg/render/testutils/expected_policies/kibana_ocp.json @@ -55,6 +55,19 @@ ] } }, + { + "action": "Allow", + "protocol": "TCP", + "source": { + "selector": "k8s-app == 'dashboards-installer'", + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" + }, + "destination": { + "ports": [ + 5601 + ] + } + }, { "action": "Allow", "destination": { @@ -125,6 +138,15 @@ "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" }, "protocol": "TCP" + }, + { + "action": "Allow", + "destination": { + "selector": "k8s-app == 'dashboards-installer'", + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'", + "ports" : [] + }, + "protocol": "TCP" } ] } diff --git a/pkg/render/testutils/expected_policies/linseed.json b/pkg/render/testutils/expected_policies/linseed.json index 0fc95c4e8d..df7584285a 100644 --- a/pkg/render/testutils/expected_policies/linseed.json +++ b/pkg/render/testutils/expected_policies/linseed.json @@ -40,19 +40,6 @@ ] } }, - { - "action": "Allow", - "protocol": "TCP", - "source": { - "selector": "job-name == 'intrusion-detection-es-job-installer'", - "namespaceSelector": "projectcalico.org/name == 'tigera-intrusion-detection'" - }, - "destination": { - "ports": [ - 8444 - ] - } - }, { "action": "Allow", "protocol": "TCP", diff --git a/pkg/render/testutils/expected_policies/linseed_dpi_enabled.json b/pkg/render/testutils/expected_policies/linseed_dpi_enabled.json index db7bc2d53f..1f1ae9617c 100644 --- a/pkg/render/testutils/expected_policies/linseed_dpi_enabled.json +++ b/pkg/render/testutils/expected_policies/linseed_dpi_enabled.json @@ -40,19 +40,6 @@ ] } }, - { - "action": "Allow", - "protocol": "TCP", - "source": { - "selector": "job-name == 'intrusion-detection-es-job-installer'", - "namespaceSelector": "projectcalico.org/name == 'tigera-intrusion-detection'" - }, - "destination": { - "ports": [ - 8444 - ] - } - }, { "action": "Allow", "protocol": "TCP", diff --git a/pkg/render/testutils/expected_policies/linseed_ocp.json b/pkg/render/testutils/expected_policies/linseed_ocp.json index a71fee19d3..b2abab5a14 100644 --- a/pkg/render/testutils/expected_policies/linseed_ocp.json +++ b/pkg/render/testutils/expected_policies/linseed_ocp.json @@ -40,19 +40,6 @@ ] } }, - { - "action": "Allow", - "protocol": "TCP", - "source": { - "selector": "job-name == 'intrusion-detection-es-job-installer'", - "namespaceSelector": "projectcalico.org/name == 'tigera-intrusion-detection'" - }, - "destination": { - "ports": [ - 8444 - ] - } - }, { "action": "Allow", "protocol": "TCP", diff --git a/pkg/render/testutils/expected_policies/linseed_ocp_dpi_enabled.json b/pkg/render/testutils/expected_policies/linseed_ocp_dpi_enabled.json index 8c9a449322..9d59dea70f 100644 --- a/pkg/render/testutils/expected_policies/linseed_ocp_dpi_enabled.json +++ b/pkg/render/testutils/expected_policies/linseed_ocp_dpi_enabled.json @@ -40,19 +40,6 @@ ] } }, - { - "action": "Allow", - "protocol": "TCP", - "source": { - "selector": "job-name == 'intrusion-detection-es-job-installer'", - "namespaceSelector": "projectcalico.org/name == 'tigera-intrusion-detection'" - }, - "destination": { - "ports": [ - 8444 - ] - } - }, { "action": "Allow", "protocol": "TCP",