diff --git a/Makefile b/Makefile index 924eb0cdfd5..2a39b2c6b9f 100644 --- a/Makefile +++ b/Makefile @@ -142,9 +142,7 @@ endef .PHONY: manifests manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. -# TODO: enable below when we do webhook -# $(CONTROLLER_GEN) rbac:roleName=controller-manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases - $(CONTROLLER_GEN) rbac:roleName=controller-manager-role crd:ignoreUnexportedFields=true paths="./..." output:crd:artifacts:config=config/crd/bases + $(CONTROLLER_GEN) rbac:roleName=controller-manager-role crd:ignoreUnexportedFields=true webhook paths="./..." output:crd:artifacts:config=config/crd/bases $(call fetch-external-crds,github.com/openshift/api,route/v1) $(call fetch-external-crds,github.com/openshift/api,user/v1) diff --git a/PROJECT b/PROJECT index cbb40d6a5b2..4bfc6c97792 100644 --- a/PROJECT +++ b/PROJECT @@ -21,6 +21,9 @@ resources: kind: DSCInitialization path: github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1 version: v1 + webhooks: + validation: true + webhookVersion: v1 - api: crdVersion: v1 namespaced: false @@ -30,4 +33,7 @@ resources: kind: DataScienceCluster path: github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1 version: v1 + webhooks: + validation: true + webhookVersion: v1 version: "3" diff --git a/bundle/manifests/redhat-ods-operator-webhook-service_v1_service.yaml b/bundle/manifests/redhat-ods-operator-webhook-service_v1_service.yaml new file mode 100644 index 00000000000..218c45fefcc --- /dev/null +++ b/bundle/manifests/redhat-ods-operator-webhook-service_v1_service.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + service.beta.openshift.io/inject-cabundle: "true" + service.beta.openshift.io/serving-cert-secret-name: redhat-ods-operator-controller-webhook-cert + creationTimestamp: null + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/created-by: rhods-operator + app.kubernetes.io/instance: webhook-service + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: service + app.kubernetes.io/part-of: rhods-operator + name: redhat-ods-operator-webhook-service +spec: + ports: + - port: 443 + protocol: TCP + targetPort: 9443 + selector: + control-plane: controller-manager +status: + loadBalancer: {} diff --git a/bundle/manifests/rhods-operator.clusterserviceversion.yaml b/bundle/manifests/rhods-operator.clusterserviceversion.yaml index 4a35574e1fe..e07d80f7cd3 100644 --- a/bundle/manifests/rhods-operator.clusterserviceversion.yaml +++ b/bundle/manifests/rhods-operator.clusterserviceversion.yaml @@ -1684,6 +1684,10 @@ spec: initialDelaySeconds: 15 periodSeconds: 20 name: manager + ports: + - containerPort: 9443 + name: webhook-server + protocol: TCP readinessProbe: httpGet: path: /readyz @@ -1702,10 +1706,19 @@ spec: capabilities: drop: - ALL + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true securityContext: runAsNonRoot: true serviceAccountName: redhat-ods-operator-controller-manager terminationGracePeriodSeconds: 10 + volumes: + - name: cert + secret: + defaultMode: 420 + secretName: redhat-ods-operator-controller-webhook-cert strategy: deployment installModes: - supported: false @@ -1733,3 +1746,26 @@ spec: provider: name: Red Hat version: 2.9.0 + webhookdefinitions: + - admissionReviewVersions: + - v1 + containerPort: 443 + deploymentName: redhat-ods-operator-controller-manager + failurePolicy: Fail + generateName: operator.opendatahub.io + rules: + - apiGroups: + - datasciencecluster.opendatahub.io + - dscinitialization.opendatahub.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - datascienceclusters + - dscinitializations + sideEffects: None + targetPort: 9443 + type: ValidatingAdmissionWebhook + webhookPath: /validate-opendatahub-io-v1 diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index fc1b6912725..1a7fe00e8c9 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -21,7 +21,7 @@ resources: - ../manager # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in # crd/kustomization.yaml -#- ../webhook +- ../webhook # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. #- ../certmanager # [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. @@ -37,6 +37,7 @@ resources: # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in # crd/kustomization.yaml +# Moved below to patches #- manager_webhook_patch.yaml # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. @@ -76,3 +77,5 @@ resources: patches: - path: manager_auth_proxy_patch.yaml +# [WEBHOOK] +- path: manager_webhook_patch.yaml diff --git a/config/default/manager_webhook_patch.yaml b/config/default/manager_webhook_patch.yaml new file mode 100644 index 00000000000..d76a0133d0d --- /dev/null +++ b/config/default/manager_webhook_patch.yaml @@ -0,0 +1,23 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + ports: + - containerPort: 9443 + name: webhook-server + protocol: TCP + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true + volumes: + - name: cert + secret: + defaultMode: 420 + secretName: redhat-ods-operator-controller-webhook-cert diff --git a/config/webhook/kustomization.yaml b/config/webhook/kustomization.yaml new file mode 100644 index 00000000000..8428859f524 --- /dev/null +++ b/config/webhook/kustomization.yaml @@ -0,0 +1,9 @@ +resources: +- manifests.yaml +- service.yaml + +commonAnnotations: + service.beta.openshift.io/inject-cabundle: "true" + +configurations: +- kustomizeconfig.yaml diff --git a/config/webhook/kustomizeconfig.yaml b/config/webhook/kustomizeconfig.yaml new file mode 100644 index 00000000000..25e21e3c963 --- /dev/null +++ b/config/webhook/kustomizeconfig.yaml @@ -0,0 +1,25 @@ +# the following config is for teaching kustomize where to look at when substituting vars. +# It requires kustomize v2.1.0 or newer to work properly. +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: MutatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/name + - kind: ValidatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/name + +namespace: +- kind: MutatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/namespace + create: true +- kind: ValidatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/namespace + create: true + +varReference: +- path: metadata/annotations diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml new file mode 100644 index 00000000000..5595314bffb --- /dev/null +++ b/config/webhook/manifests.yaml @@ -0,0 +1,29 @@ +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + creationTimestamp: null + name: validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-opendatahub-io-v1 + failurePolicy: Fail + name: operator.opendatahub.io + rules: + - apiGroups: + - datasciencecluster.opendatahub.io + - dscinitialization.opendatahub.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - datascienceclusters + - dscinitializations + sideEffects: None diff --git a/config/webhook/service.yaml b/config/webhook/service.yaml new file mode 100644 index 00000000000..72c652ddaae --- /dev/null +++ b/config/webhook/service.yaml @@ -0,0 +1,22 @@ + +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/name: service + app.kubernetes.io/instance: webhook-service + app.kubernetes.io/component: webhook + app.kubernetes.io/created-by: rhods-operator + app.kubernetes.io/part-of: rhods-operator + app.kubernetes.io/managed-by: kustomize + name: webhook-service + namespace: system + annotations: + service.beta.openshift.io/serving-cert-secret-name: redhat-ods-operator-controller-webhook-cert +spec: + ports: + - port: 443 + protocol: TCP + targetPort: 9443 + selector: + control-plane: controller-manager diff --git a/controllers/certconfigmapgenerator/certconfigmapgenerator_controller.go b/controllers/certconfigmapgenerator/certconfigmapgenerator_controller.go index 19bd0d7176d..dd1f5c6b861 100644 --- a/controllers/certconfigmapgenerator/certconfigmapgenerator_controller.go +++ b/controllers/certconfigmapgenerator/certconfigmapgenerator_controller.go @@ -67,9 +67,6 @@ func (r *CertConfigmapGeneratorReconciler) Reconcile(ctx context.Context, req ct return ctrl.Result{}, nil case 1: dsciInstance = &dsciInstances.Items[0] - default: - message := "only one instance of DSCInitialization object is allowed" - return ctrl.Result{}, errors.New(message) } if dsciInstance.Spec.TrustedCABundle.ManagementState != operatorv1.Managed { diff --git a/controllers/datasciencecluster/datasciencecluster_controller.go b/controllers/datasciencecluster/datasciencecluster_controller.go index d75811ae559..d82f6fb6d6c 100644 --- a/controllers/datasciencecluster/datasciencecluster_controller.go +++ b/controllers/datasciencecluster/datasciencecluster_controller.go @@ -19,7 +19,6 @@ package datasciencecluster import ( "context" - "errors" "fmt" "strings" "time" @@ -138,19 +137,6 @@ func (r *DataScienceClusterReconciler) Reconcile(ctx context.Context, req ctrl.R return reconcile.Result{Requeue: true}, nil } - if len(instances.Items) > 1 { - message := fmt.Sprintf("only one instance of DataScienceCluster object is allowed. Update existing instance %s", req.Name) - err := errors.New(message) - _ = r.reportError(err, instance, message) - - _, _ = status.UpdateWithRetry(ctx, r.Client, instance, func(saved *dsc.DataScienceCluster) { - status.SetErrorCondition(&saved.Status.Conditions, status.DuplicateDataScienceCluster, message) - saved.Status.Phase = status.PhaseError - }) - - return ctrl.Result{}, err - } - // Verify a valid DSCInitialization instance is created dsciInstances := &dsci.DSCInitializationList{} err = r.Client.List(ctx, dsciInstances) @@ -161,7 +147,7 @@ func (r *DataScienceClusterReconciler) Reconcile(ctx context.Context, req ctrl.R } // Update phase to error state if DataScienceCluster is created without valid DSCInitialization - switch len(dsciInstances.Items) { + switch len(dsciInstances.Items) { // only handle number as 0 or 1, others won't be existed since webhook block creation case 0: reason := status.ReconcileFailed message := "Failed to get a valid DSCInitialization instance, please create a DSCI instance" @@ -178,13 +164,6 @@ func (r *DataScienceClusterReconciler) Reconcile(ctx context.Context, req ctrl.R case 1: dscInitializationSpec := dsciInstances.Items[0].Spec dscInitializationSpec.DeepCopyInto(r.DataScienceCluster.DSCISpec) - default: - message := "only one instance of DSCInitialization object is allowed" - _, _ = status.UpdateWithRetry(ctx, r.Client, instance, func(saved *dsc.DataScienceCluster) { - status.SetErrorCondition(&saved.Status.Conditions, status.DuplicateDSCInitialization, message) - saved.Status.Phase = status.PhaseError - }) - return ctrl.Result{}, errors.New(message) } if instance.ObjectMeta.DeletionTimestamp.IsZero() { diff --git a/controllers/dscinitialization/dscinitialization_controller.go b/controllers/dscinitialization/dscinitialization_controller.go index d7d5adb12e5..20a5f22ad61 100644 --- a/controllers/dscinitialization/dscinitialization_controller.go +++ b/controllers/dscinitialization/dscinitialization_controller.go @@ -19,8 +19,6 @@ package dscinitialization import ( "context" - "errors" - "fmt" "path/filepath" "reflect" @@ -89,33 +87,11 @@ func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Re } var instance *dsciv1.DSCInitialization - switch { + switch { // only handle number as 0 or 1, others won't be existed since webhook block creation case len(instances.Items) == 0: return ctrl.Result{}, nil case len(instances.Items) == 1: instance = &instances.Items[0] - case len(instances.Items) > 1: - // find out the one by created timestamp and use it as the default one - earliestDSCI := &instances.Items[0] - for _, instance := range instances.Items { - currentDSCI := instance - if currentDSCI.CreationTimestamp.Before(&earliestDSCI.CreationTimestamp) { - earliestDSCI = ¤tDSCI - } - } - message := fmt.Sprintf("only one instance of DSCInitialization object is allowed. Please delete other instances than %s", earliestDSCI.Name) - // update all instances Message and Status - for _, deletionInstance := range instances.Items { - deletionInstance := deletionInstance - if deletionInstance.Name != earliestDSCI.Name { - _, _ = status.UpdateWithRetry(ctx, r.Client, &deletionInstance, func(saved *dsciv1.DSCInitialization) { - status.SetErrorCondition(&saved.Status.Conditions, status.DuplicateDSCInitialization, message) - saved.Status.Phase = status.PhaseError - }) - } - } - - return ctrl.Result{}, errors.New(message) } if instance.ObjectMeta.DeletionTimestamp.IsZero() { diff --git a/controllers/dscinitialization/dscinitialization_test.go b/controllers/dscinitialization/dscinitialization_test.go index 6a726a1998b..17b510c4d2c 100644 --- a/controllers/dscinitialization/dscinitialization_test.go +++ b/controllers/dscinitialization/dscinitialization_test.go @@ -20,6 +20,7 @@ import ( const ( workingNamespace = "test-operator-ns" + applicationName = "default-dsci" applicationNamespace = "test-application-ns" configmapName = "odh-common-config" monitoringNamespace = "test-monitoring-ns" @@ -29,10 +30,10 @@ const ( var _ = Describe("DataScienceCluster initialization", func() { Context("Creation of related resources", func() { // must be default as instance name, or it will break - const applicationName = "default-dsci" + BeforeEach(func() { // when - desiredDsci := createDSCI(applicationName, operatorv1.Managed, operatorv1.Managed, monitoringNamespace) + desiredDsci := createDSCI(operatorv1.Managed, operatorv1.Managed, monitoringNamespace) Expect(k8sClient.Create(context.Background(), desiredDsci)).Should(Succeed()) foundDsci := &dsci.DSCInitialization{} Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci)). @@ -119,7 +120,7 @@ var _ = Describe("DataScienceCluster initialization", func() { const applicationName = "default-dsci" It("Should not create monitoring namespace if monitoring is disabled", func() { // when - desiredDsci := createDSCI(applicationName, operatorv1.Removed, operatorv1.Managed, monitoringNamespace2) + desiredDsci := createDSCI(operatorv1.Removed, operatorv1.Managed, monitoringNamespace2) Expect(k8sClient.Create(context.Background(), desiredDsci)).Should(Succeed()) foundDsci := &dsci.DSCInitialization{} Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci)). @@ -135,7 +136,7 @@ var _ = Describe("DataScienceCluster initialization", func() { }) It("Should create default monitoring namespace if monitoring enabled", func() { // when - desiredDsci := createDSCI(applicationName, operatorv1.Managed, operatorv1.Managed, monitoringNamespace2) + desiredDsci := createDSCI(operatorv1.Managed, operatorv1.Managed, monitoringNamespace2) Expect(k8sClient.Create(context.Background(), desiredDsci)).Should(Succeed()) foundDsci := &dsci.DSCInitialization{} Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci)). @@ -154,22 +155,6 @@ var _ = Describe("DataScienceCluster initialization", func() { Context("Handling existing resources", func() { AfterEach(cleanupResources) - const applicationName = "default-dsci" - - It("Should not have more than one DSCI instance in the cluster", func() { - - anotherApplicationName := "default2" - // given - desiredDsci := createDSCI(applicationName, operatorv1.Managed, operatorv1.Managed, monitoringNamespace) - Expect(k8sClient.Create(context.Background(), desiredDsci)).Should(Succeed()) - // when - desiredDsci2 := createDSCI(anotherApplicationName, operatorv1.Managed, operatorv1.Managed, monitoringNamespace) - // then - Eventually(dscInitializationIsReady(anotherApplicationName, workingNamespace, desiredDsci2)). - WithTimeout(timeout). - WithPolling(interval). - Should(BeFalse()) - }) It("Should not update rolebinding if it exists", func() { applicationName := envtestutil.AppendRandomNameTo("rolebinding-test") @@ -199,7 +184,7 @@ var _ = Describe("DataScienceCluster initialization", func() { Should(BeTrue()) // when - desiredDsci := createDSCI(applicationName, operatorv1.Managed, operatorv1.Managed, monitoringNamespace) + desiredDsci := createDSCI(operatorv1.Managed, operatorv1.Managed, monitoringNamespace) Expect(k8sClient.Create(context.Background(), desiredDsci)).Should(Succeed()) foundDsci := &dsci.DSCInitialization{} Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci)). @@ -240,7 +225,7 @@ var _ = Describe("DataScienceCluster initialization", func() { Should(BeTrue()) // when - desiredDsci := createDSCI(applicationName, operatorv1.Managed, operatorv1.Managed, monitoringNamespace) + desiredDsci := createDSCI(operatorv1.Managed, operatorv1.Managed, monitoringNamespace) Expect(k8sClient.Create(context.Background(), desiredDsci)).Should(Succeed()) foundDsci := &dsci.DSCInitialization{} Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci)). @@ -277,7 +262,7 @@ var _ = Describe("DataScienceCluster initialization", func() { Should(BeTrue()) // when - desiredDsci := createDSCI(applicationName, operatorv1.Managed, operatorv1.Managed, monitoringNamespace) + desiredDsci := createDSCI(operatorv1.Managed, operatorv1.Managed, monitoringNamespace) Expect(k8sClient.Create(context.Background(), desiredDsci)).Should(Succeed()) foundDsci := &dsci.DSCInitialization{} Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci)). @@ -349,14 +334,14 @@ func objectExists(ns string, name string, obj client.Object) func() bool { //nol } } -func createDSCI(appName string, enableMonitoring operatorv1.ManagementState, enableTrustedCABundle operatorv1.ManagementState, monitoringNS string) *dsci.DSCInitialization { +func createDSCI(enableMonitoring operatorv1.ManagementState, enableTrustedCABundle operatorv1.ManagementState, monitoringNS string) *dsci.DSCInitialization { return &dsci.DSCInitialization{ TypeMeta: metav1.TypeMeta{ Kind: "DSCInitialization", APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ - Name: appName, + Name: applicationName, Namespace: workingNamespace, }, Spec: dsci.DSCInitializationSpec{ diff --git a/controllers/webhook/webhook.go b/controllers/webhook/webhook.go new file mode 100644 index 00000000000..f730dc78703 --- /dev/null +++ b/controllers/webhook/webhook.go @@ -0,0 +1,111 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "context" + "fmt" + "net/http" + + admissionv1 "k8s.io/api/admission/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +var log = ctrl.Log.WithName("odh-controller-webhook") + +//+kubebuilder:webhook:path=/validate-opendatahub-io-v1,mutating=false,failurePolicy=fail,sideEffects=None,groups=datasciencecluster.opendatahub.io;dscinitialization.opendatahub.io,resources=datascienceclusters;dscinitializations,verbs=create;update,versions=v1,name=operator.opendatahub.io,admissionReviewVersions=v1 +//nolint:lll + +type OpenDataHubWebhook struct { + client client.Client + decoder *admission.Decoder +} + +func (w *OpenDataHubWebhook) SetupWithManager(mgr ctrl.Manager) { + hookServer := mgr.GetWebhookServer() + odhWebhook := &webhook.Admission{ + Handler: w, + } + hookServer.Register("/validate-opendatahub-io-v1", odhWebhook) +} + +func (w *OpenDataHubWebhook) InjectDecoder(d *admission.Decoder) error { + w.decoder = d + return nil +} + +func (w *OpenDataHubWebhook) InjectClient(c client.Client) error { + w.client = c + return nil +} + +func (w *OpenDataHubWebhook) checkDupCreation(ctx context.Context, req admission.Request) admission.Response { + if req.Operation != admissionv1.Create { + return admission.Allowed(fmt.Sprintf("duplication check: skipping %v request", req.Operation)) + } + + switch req.Kind.Kind { + case "DataScienceCluster": + case "DSCInitialization": + default: + log.Info("Got wrong kind", "kind", req.Kind.Kind) + return admission.Errored(http.StatusBadRequest, nil) + } + + gvk := schema.GroupVersionKind{ + Group: req.Kind.Group, + Version: req.Kind.Version, + Kind: req.Kind.Kind, + } + + list := &unstructured.UnstructuredList{} + list.SetGroupVersionKind(gvk) + + if err := w.client.List(ctx, list); err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + + // if len == 1 now creation of #2 is being handled + if len(list.Items) > 0 { + return admission.Denied(fmt.Sprintf("Only one instance of %s object is allowed", req.Kind.Kind)) + } + + return admission.Allowed(fmt.Sprintf("%s duplication check passed", req.Kind.Kind)) +} + +func (w *OpenDataHubWebhook) Handle(ctx context.Context, req admission.Request) admission.Response { + var resp admission.Response + + // Handle only Create and Update + if req.Operation == admissionv1.Delete || req.Operation == admissionv1.Connect { + msg := fmt.Sprintf("ODH skipping %v request", req.Operation) + log.Info(msg) + return admission.Allowed(msg) + } + + resp = w.checkDupCreation(ctx, req) + if !resp.Allowed { + return resp + } + + return admission.Allowed(fmt.Sprintf("%s allowed", req.Kind.Kind)) +} diff --git a/controllers/webhook/webhook_suite_test.go b/controllers/webhook/webhook_suite_test.go new file mode 100644 index 00000000000..95ce437c10c --- /dev/null +++ b/controllers/webhook/webhook_suite_test.go @@ -0,0 +1,244 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook_test + +import ( + "context" + "crypto/tls" + "fmt" + "net" + "path/filepath" + "testing" + "time" + + operatorv1 "github.com/openshift/api/operator/v1" + admissionv1beta1 "k8s.io/api/admission/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + dsc "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" + dsci "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/components" + "github.com/opendatahub-io/opendatahub-operator/v2/components/codeflare" + "github.com/opendatahub-io/opendatahub-operator/v2/components/dashboard" + "github.com/opendatahub-io/opendatahub-operator/v2/components/datasciencepipelines" + "github.com/opendatahub-io/opendatahub-operator/v2/components/kserve" + "github.com/opendatahub-io/opendatahub-operator/v2/components/modelmeshserving" + "github.com/opendatahub-io/opendatahub-operator/v2/components/ray" + "github.com/opendatahub-io/opendatahub-operator/v2/components/trustyai" + "github.com/opendatahub-io/opendatahub-operator/v2/components/workbenches" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/webhook" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +const ( + namespace = "webhook-test-ns" + nameBase = "webhook-test" +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var testEnv *envtest.Environment +var ctx context.Context +var cancel context.CancelFunc + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Webhook Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + ctx, cancel = context.WithCancel(context.TODO()) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: false, + WebhookInstallOptions: envtest.WebhookInstallOptions{ + Paths: []string{filepath.Join("..", "..", "config", "webhook")}, + }, + } + + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + scheme := runtime.NewScheme() + // DSCI + err = dsci.AddToScheme(scheme) + Expect(err).NotTo(HaveOccurred()) + // DSC + err = dsc.AddToScheme(scheme) + Expect(err).NotTo(HaveOccurred()) + // Webhook + err = admissionv1beta1.AddToScheme(scheme) + Expect(err).NotTo(HaveOccurred()) + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + + // start webhook server using Manager + webhookInstallOptions := &testEnv.WebhookInstallOptions + mgr, err := ctrl.NewManager(cfg, ctrl.Options{ + Scheme: scheme, + Host: webhookInstallOptions.LocalServingHost, + Port: webhookInstallOptions.LocalServingPort, + CertDir: webhookInstallOptions.LocalServingCertDir, + LeaderElection: false, + MetricsBindAddress: "0", + }) + Expect(err).NotTo(HaveOccurred()) + + (&webhook.OpenDataHubWebhook{}).SetupWithManager(mgr) + + //+kubebuilder:scaffold:webhook + + go func() { + defer GinkgoRecover() + err = mgr.Start(ctx) + Expect(err).NotTo(HaveOccurred()) + }() + + // wait for the webhook server to get ready + dialer := &net.Dialer{Timeout: time.Second} + addrPort := fmt.Sprintf("%s:%d", webhookInstallOptions.LocalServingHost, webhookInstallOptions.LocalServingPort) + Eventually(func() error { + conn, err := tls.DialWithDialer(dialer, "tcp", addrPort, &tls.Config{InsecureSkipVerify: true}) //nolint:gosec + if err != nil { + return err + } + conn.Close() + return nil + }).Should(Succeed()) + +}) + +var _ = AfterSuite(func() { + cancel() + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) + +var _ = Describe("DSC/DSCI webhook", func() { + It("Should not have more than one DSCI instance in the cluster", func() { + desiredDsci := newDSCI(nameBase + "-dsci-1") + Expect(k8sClient.Create(context.Background(), desiredDsci)).Should(Succeed()) + desiredDsci2 := newDSCI(nameBase + "-dsci-2") + Expect(k8sClient.Create(context.Background(), desiredDsci2)).ShouldNot(Succeed()) + }) + + It("Should block creation of second DSC instance", func() { + dscSpec := newDSC(nameBase+"-dsc-1", namespace) + Expect(k8sClient.Create(context.Background(), dscSpec)).Should(Succeed()) + dscSpec = newDSC(nameBase+"-dsc-2", namespace) + Expect(k8sClient.Create(context.Background(), dscSpec)).ShouldNot(Succeed()) + }) +}) + +func newDSCI(appName string) *dsci.DSCInitialization { + monitoringNS := "monitoring-namespace" + return &dsci.DSCInitialization{ + TypeMeta: metav1.TypeMeta{ + Kind: "DSCInitialization", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: appName, + Namespace: namespace, + }, + Spec: dsci.DSCInitializationSpec{ + ApplicationsNamespace: namespace, + Monitoring: dsci.Monitoring{ + Namespace: monitoringNS, + ManagementState: operatorv1.Managed, + }, + TrustedCABundle: dsci.TrustedCABundleSpec{ + ManagementState: operatorv1.Managed, + }, + }, + } +} +func newDSC(name string, namespace string) *dsc.DataScienceCluster { + return &dsc.DataScienceCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: dsc.DataScienceClusterSpec{ + Components: dsc.Components{ + Dashboard: dashboard.Dashboard{ + Component: components.Component{ + ManagementState: operatorv1.Removed, + }, + }, + Workbenches: workbenches.Workbenches{ + Component: components.Component{ + ManagementState: operatorv1.Removed, + }, + }, + ModelMeshServing: modelmeshserving.ModelMeshServing{ + Component: components.Component{ + ManagementState: operatorv1.Removed, + }, + }, + DataSciencePipelines: datasciencepipelines.DataSciencePipelines{ + Component: components.Component{ + ManagementState: operatorv1.Removed, + }, + }, + Kserve: kserve.Kserve{ + Component: components.Component{ + ManagementState: operatorv1.Removed, + }, + }, + CodeFlare: codeflare.CodeFlare{ + Component: components.Component{ + ManagementState: operatorv1.Removed, + }, + }, + Ray: ray.Ray{ + Component: components.Component{ + ManagementState: operatorv1.Removed, + }, + }, + TrustyAI: trustyai.TrustyAI{ + Component: components.Component{ + ManagementState: operatorv1.Removed, + }, + }, + }, + }, + } +} diff --git a/main.go b/main.go index 9e1e8d145ca..20a6268314d 100644 --- a/main.go +++ b/main.go @@ -55,6 +55,7 @@ import ( datascienceclustercontrollers "github.com/opendatahub-io/opendatahub-operator/v2/controllers/datasciencecluster" dscicontr "github.com/opendatahub-io/opendatahub-operator/v2/controllers/dscinitialization" "github.com/opendatahub-io/opendatahub-operator/v2/controllers/secretgenerator" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/webhook" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/common" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/upgrade" @@ -141,6 +142,8 @@ func main() { os.Exit(1) } + (&webhook.OpenDataHubWebhook{}).SetupWithManager(mgr) + if err = (&dscicontr.DSCInitializationReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), @@ -206,8 +209,18 @@ func main() { // Check if user opted for disabling DSC configuration _, disableDSCConfig := os.LookupEnv("DISABLE_DSC_CONFIG") if !disableDSCConfig { - if err = upgrade.CreateDefaultDSCI(setupClient, platform, dscApplicationsNamespace, dscMonitoringNamespace); err != nil { - setupLog.Error(err, "unable to create initial setup for the operator") + var createDefaultDSCIFunc manager.RunnableFunc = func(ctx context.Context) error { + err := upgrade.CreateDefaultDSCI(setupClient, platform, dscApplicationsNamespace, dscMonitoringNamespace) + if err != nil { + setupLog.Error(err, "unable to create initial setup for the operator") + } + return err + } + + err := mgr.Add(createDefaultDSCIFunc) + if err != nil { + setupLog.Error(err, "error scheduling DSCI creation") + os.Exit(1) } } diff --git a/tests/e2e/controller_setup_test.go b/tests/e2e/controller_setup_test.go index a4b44aa8af5..d83f65d6155 100644 --- a/tests/e2e/controller_setup_test.go +++ b/tests/e2e/controller_setup_test.go @@ -77,9 +77,9 @@ func NewTestContext() (*testContext, error) { //nolint:golint,revive // Only use } // setup DSCI CR since we do not create automatically by operator - testDSCI := setupDSCICR() + testDSCI := setupDSCICR("e2e-test-dsci") // Setup DataScienceCluster CR - testDSC := setupDSCInstance() + testDSC := setupDSCInstance("e2e-test") return &testContext{ cfg: config, diff --git a/tests/e2e/dsc_creation_test.go b/tests/e2e/dsc_creation_test.go index 99b0446a337..ae2368fb563 100644 --- a/tests/e2e/dsc_creation_test.go +++ b/tests/e2e/dsc_creation_test.go @@ -14,6 +14,9 @@ import ( autoscalingv1 "k8s.io/api/autoscaling/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/retry" @@ -40,10 +43,16 @@ func creationTestSuite(t *testing.T) { err = testCtx.testDSCICreation() require.NoError(t, err, "error creating DSCI CR") }) + t.Run("Creation of more than one of DSCInitialization instance", func(t *testing.T) { + testCtx.testDSCIDuplication(t) + }) t.Run("Creation of DataScienceCluster instance", func(t *testing.T) { err = testCtx.testDSCCreation() require.NoError(t, err, "error creating DataScienceCluster instance") }) + t.Run("Creation of more than one of DataScienceCluster instance", func(t *testing.T) { + testCtx.testDSCDuplication(t) + }) t.Run("Validate all deployed components", func(t *testing.T) { err = testCtx.testAllApplicationCreation(t) require.NoError(t, err, "error testing deployments for DataScienceCluster: "+testCtx.testDsc.Name) @@ -167,6 +176,56 @@ func (tc *testContext) testDSCCreation() error { return waitDSCReady(tc) } +func (tc *testContext) requireInstalled(t *testing.T, gvk schema.GroupVersionKind) { + t.Helper() + list := &unstructured.UnstructuredList{} + list.SetGroupVersionKind(gvk) + + err := tc.customClient.List(tc.ctx, list) + require.NoErrorf(t, err, "Could not get %s list", gvk.Kind) + + require.Greaterf(t, len(list.Items), 0, "%s has not been installed", gvk.Kind) +} + +func (tc *testContext) testDuplication(t *testing.T, gvk schema.GroupVersionKind, o any) { + t.Helper() + tc.requireInstalled(t, gvk) + + u, err := runtime.DefaultUnstructuredConverter.ToUnstructured(o) + require.NoErrorf(t, err, "Could not unstructure %s", gvk.Kind) + + obj := &unstructured.Unstructured{ + Object: u, + } + obj.SetGroupVersionKind(gvk) + + err = tc.customClient.Create(tc.ctx, obj) + + require.Errorf(t, err, "Could create second %s", gvk.Kind) +} + +func (tc *testContext) testDSCIDuplication(t *testing.T) { //nolint:thelper + gvk := schema.GroupVersionKind{ + Group: "dscinitialization.opendatahub.io", + Version: "v1", + Kind: "DSCInitialization", + } + dup := setupDSCICR("e2e-test-dsci-dup") + + tc.testDuplication(t, gvk, dup) +} + +func (tc *testContext) testDSCDuplication(t *testing.T) { //nolint:thelper + gvk := schema.GroupVersionKind{ + Group: "datasciencecluster.opendatahub.io", + Version: "v1", + Kind: "DataScienceCluster", + } + dup := setupDSCInstance("e2e-test-dup") + + tc.testDuplication(t, gvk, dup) +} + func (tc *testContext) testAllApplicationCreation(t *testing.T) error { //nolint:funlen,thelper // Validate test instance is in Ready state diff --git a/tests/e2e/helper_test.go b/tests/e2e/helper_test.go index a31fae68491..3622772c005 100644 --- a/tests/e2e/helper_test.go +++ b/tests/e2e/helper_test.go @@ -66,10 +66,10 @@ func (tc *testContext) waitForControllerDeployment(name string, replicas int32) return err } -func setupDSCICR() *dsci.DSCInitialization { +func setupDSCICR(name string) *dsci.DSCInitialization { dsciTest := &dsci.DSCInitialization{ ObjectMeta: metav1.ObjectMeta{ - Name: "e2e-test-dsci", + Name: name, }, Spec: dsci.DSCInitializationSpec{ ApplicationsNamespace: "opendatahub", @@ -89,10 +89,10 @@ func setupDSCICR() *dsci.DSCInitialization { return dsciTest } -func setupDSCInstance() *dsc.DataScienceCluster { +func setupDSCInstance(name string) *dsc.DataScienceCluster { dscTest := &dsc.DataScienceCluster{ ObjectMeta: metav1.ObjectMeta{ - Name: "e2e-test-dsc", + Name: name, }, Spec: dsc.DataScienceClusterSpec{ Components: dsc.Components{