diff --git a/Makefile b/Makefile index be77f81f9a2..20a875edcd4 100644 --- a/Makefile +++ b/Makefile @@ -143,9 +143,7 @@ endef .PHONY: manifests manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. -# TODO: enable below when we do webhook -# $(CONTROLLER_GEN) rbac:roleName=controller-manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases - $(CONTROLLER_GEN) rbac:roleName=controller-manager-role crd:ignoreUnexportedFields=true paths="./..." output:crd:artifacts:config=config/crd/bases + $(CONTROLLER_GEN) rbac:roleName=controller-manager-role crd:ignoreUnexportedFields=true webhook paths="./..." output:crd:artifacts:config=config/crd/bases $(call fetch-external-crds,github.com/openshift/api,route/v1) $(call fetch-external-crds,github.com/openshift/api,user/v1) diff --git a/PROJECT b/PROJECT index cbb40d6a5b2..4bfc6c97792 100644 --- a/PROJECT +++ b/PROJECT @@ -21,6 +21,9 @@ resources: kind: DSCInitialization path: github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1 version: v1 + webhooks: + validation: true + webhookVersion: v1 - api: crdVersion: v1 namespaced: false @@ -30,4 +33,7 @@ resources: kind: DataScienceCluster path: github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1 version: v1 + webhooks: + validation: true + webhookVersion: v1 version: "3" diff --git a/bundle/manifests/rhods-operator.clusterserviceversion.yaml b/bundle/manifests/rhods-operator.clusterserviceversion.yaml index 87aab1c33b5..ad60f4b6c9b 100644 --- a/bundle/manifests/rhods-operator.clusterserviceversion.yaml +++ b/bundle/manifests/rhods-operator.clusterserviceversion.yaml @@ -99,7 +99,7 @@ metadata: categories: AI/Machine Learning, Big Data certified: "False" containerImage: quay.io/opendatahub/opendatahub-operator:v2.0.0 - createdAt: "2024-07-24T19:29:46Z" + createdAt: "2024-08-01T14:26:51Z" features.operators.openshift.io/disconnected: "true" features.operators.openshift.io/fips-compliant: "false" features.operators.openshift.io/proxy-aware: "false" @@ -1729,6 +1729,10 @@ spec: initialDelaySeconds: 15 periodSeconds: 20 name: manager + ports: + - containerPort: 9443 + name: webhook-server + protocol: TCP readinessProbe: httpGet: path: /readyz @@ -1747,10 +1751,19 @@ spec: capabilities: drop: - ALL + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true securityContext: runAsNonRoot: true serviceAccountName: redhat-ods-operator-controller-manager terminationGracePeriodSeconds: 10 + volumes: + - name: cert + secret: + defaultMode: 420 + secretName: redhat-ods-operator-controller-webhook-cert strategy: deployment installModes: - supported: false @@ -1779,3 +1792,26 @@ spec: provider: name: Red Hat version: 2.12.0 + webhookdefinitions: + - admissionReviewVersions: + - v1 + containerPort: 443 + deploymentName: redhat-ods-operator-controller-manager + failurePolicy: Fail + generateName: operator.opendatahub.io + rules: + - apiGroups: + - datasciencecluster.opendatahub.io + - dscinitialization.opendatahub.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - datascienceclusters + - dscinitializations + sideEffects: None + targetPort: 9443 + type: ValidatingAdmissionWebhook + webhookPath: /validate-opendatahub-io-v1 diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index fc1b6912725..1a7fe00e8c9 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -21,7 +21,7 @@ resources: - ../manager # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in # crd/kustomization.yaml -#- ../webhook +- ../webhook # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. #- ../certmanager # [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. @@ -37,6 +37,7 @@ resources: # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in # crd/kustomization.yaml +# Moved below to patches #- manager_webhook_patch.yaml # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. @@ -76,3 +77,5 @@ resources: patches: - path: manager_auth_proxy_patch.yaml +# [WEBHOOK] +- path: manager_webhook_patch.yaml diff --git a/config/default/manager_webhook_patch.yaml b/config/default/manager_webhook_patch.yaml new file mode 100644 index 00000000000..d76a0133d0d --- /dev/null +++ b/config/default/manager_webhook_patch.yaml @@ -0,0 +1,23 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + ports: + - containerPort: 9443 + name: webhook-server + protocol: TCP + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true + volumes: + - name: cert + secret: + defaultMode: 420 + secretName: redhat-ods-operator-controller-webhook-cert diff --git a/config/monitoring/networkpolicy/operator/operator.yaml b/config/monitoring/networkpolicy/operator/operator.yaml index a61b5a46f2a..95a53cec3f9 100644 --- a/config/monitoring/networkpolicy/operator/operator.yaml +++ b/config/monitoring/networkpolicy/operator/operator.yaml @@ -22,5 +22,8 @@ spec: - namespaceSelector: matchLabels: opendatahub.io/generated-namespace: "true" + - namespaceSelector: + matchLabels: + policy-group.network.openshift.io/host-network: "" policyTypes: - - Ingress \ No newline at end of file + - Ingress diff --git a/config/webhook/kustomization.yaml b/config/webhook/kustomization.yaml new file mode 100644 index 00000000000..8428859f524 --- /dev/null +++ b/config/webhook/kustomization.yaml @@ -0,0 +1,9 @@ +resources: +- manifests.yaml +- service.yaml + +commonAnnotations: + service.beta.openshift.io/inject-cabundle: "true" + +configurations: +- kustomizeconfig.yaml diff --git a/config/webhook/kustomizeconfig.yaml b/config/webhook/kustomizeconfig.yaml new file mode 100644 index 00000000000..25e21e3c963 --- /dev/null +++ b/config/webhook/kustomizeconfig.yaml @@ -0,0 +1,25 @@ +# the following config is for teaching kustomize where to look at when substituting vars. +# It requires kustomize v2.1.0 or newer to work properly. +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: MutatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/name + - kind: ValidatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/name + +namespace: +- kind: MutatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/namespace + create: true +- kind: ValidatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/namespace + create: true + +varReference: +- path: metadata/annotations diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml new file mode 100644 index 00000000000..5595314bffb --- /dev/null +++ b/config/webhook/manifests.yaml @@ -0,0 +1,29 @@ +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + creationTimestamp: null + name: validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-opendatahub-io-v1 + failurePolicy: Fail + name: operator.opendatahub.io + rules: + - apiGroups: + - datasciencecluster.opendatahub.io + - dscinitialization.opendatahub.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - datascienceclusters + - dscinitializations + sideEffects: None diff --git a/config/webhook/service.yaml b/config/webhook/service.yaml new file mode 100644 index 00000000000..72c652ddaae --- /dev/null +++ b/config/webhook/service.yaml @@ -0,0 +1,22 @@ + +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/name: service + app.kubernetes.io/instance: webhook-service + app.kubernetes.io/component: webhook + app.kubernetes.io/created-by: rhods-operator + app.kubernetes.io/part-of: rhods-operator + app.kubernetes.io/managed-by: kustomize + name: webhook-service + namespace: system + annotations: + service.beta.openshift.io/serving-cert-secret-name: redhat-ods-operator-controller-webhook-cert +spec: + ports: + - port: 443 + protocol: TCP + targetPort: 9443 + selector: + control-plane: controller-manager diff --git a/controllers/certconfigmapgenerator/certconfigmapgenerator_controller.go b/controllers/certconfigmapgenerator/certconfigmapgenerator_controller.go index b355cff49e5..87fb116326b 100644 --- a/controllers/certconfigmapgenerator/certconfigmapgenerator_controller.go +++ b/controllers/certconfigmapgenerator/certconfigmapgenerator_controller.go @@ -65,9 +65,6 @@ func (r *CertConfigmapGeneratorReconciler) Reconcile(ctx context.Context, req ct return ctrl.Result{}, nil case 1: dsciInstance = &dsciInstances.Items[0] - default: - message := "only one instance of DSCInitialization object is allowed" - return ctrl.Result{}, errors.New(message) } if skipApplyTrustCAConfig(dsciInstance.Spec.TrustedCABundle) { diff --git a/controllers/datasciencecluster/datasciencecluster_controller.go b/controllers/datasciencecluster/datasciencecluster_controller.go index c013c217b9d..a185a735e6c 100644 --- a/controllers/datasciencecluster/datasciencecluster_controller.go +++ b/controllers/datasciencecluster/datasciencecluster_controller.go @@ -144,19 +144,6 @@ func (r *DataScienceClusterReconciler) Reconcile(ctx context.Context, req ctrl.R return reconcile.Result{Requeue: true}, nil } - if len(instances.Items) > 1 { - message := fmt.Sprintf("only one instance of DataScienceCluster object is allowed. Update existing instance %s", req.Name) - err := errors.New(message) - _ = r.reportError(err, instance, message) - - _, _ = status.UpdateWithRetry(ctx, r.Client, instance, func(saved *dscv1.DataScienceCluster) { - status.SetErrorCondition(&saved.Status.Conditions, status.DuplicateDataScienceCluster, message) - saved.Status.Phase = status.PhaseError - }) - - return ctrl.Result{}, err - } - // Verify a valid DSCInitialization instance is created dsciInstances := &dsciv1.DSCInitializationList{} err = r.Client.List(ctx, dsciInstances) @@ -167,7 +154,7 @@ func (r *DataScienceClusterReconciler) Reconcile(ctx context.Context, req ctrl.R } // Update phase to error state if DataScienceCluster is created without valid DSCInitialization - switch len(dsciInstances.Items) { + switch len(dsciInstances.Items) { // only handle number as 0 or 1, others won't be existed since webhook block creation case 0: reason := status.ReconcileFailed message := "Failed to get a valid DSCInitialization instance, please create a DSCI instance" @@ -186,13 +173,6 @@ func (r *DataScienceClusterReconciler) Reconcile(ctx context.Context, req ctrl.R case 1: dscInitializationSpec := dsciInstances.Items[0].Spec dscInitializationSpec.DeepCopyInto(r.DataScienceCluster.DSCISpec) - default: - message := "only one instance of DSCInitialization object is allowed" - _, _ = status.UpdateWithRetry(ctx, r.Client, instance, func(saved *dscv1.DataScienceCluster) { - status.SetErrorCondition(&saved.Status.Conditions, status.DuplicateDSCInitialization, message) - saved.Status.Phase = status.PhaseError - }) - return ctrl.Result{}, errors.New(message) } if instance.ObjectMeta.DeletionTimestamp.IsZero() { diff --git a/controllers/dscinitialization/dscinitialization_controller.go b/controllers/dscinitialization/dscinitialization_controller.go index 7ca934682a5..962c4d8d9e2 100644 --- a/controllers/dscinitialization/dscinitialization_controller.go +++ b/controllers/dscinitialization/dscinitialization_controller.go @@ -19,8 +19,6 @@ package dscinitialization import ( "context" - "errors" - "fmt" "path/filepath" "reflect" @@ -94,33 +92,11 @@ func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Re } var instance *dsciv1.DSCInitialization - switch { + switch { // only handle number as 0 or 1, others won't be existed since webhook block creation case len(instances.Items) == 0: return ctrl.Result{}, nil case len(instances.Items) == 1: instance = &instances.Items[0] - case len(instances.Items) > 1: - // find out the one by created timestamp and use it as the default one - earliestDSCI := &instances.Items[0] - for _, instance := range instances.Items { - currentDSCI := instance - if currentDSCI.CreationTimestamp.Before(&earliestDSCI.CreationTimestamp) { - earliestDSCI = ¤tDSCI - } - } - message := fmt.Sprintf("only one instance of DSCInitialization object is allowed. Please delete other instances than %s", earliestDSCI.Name) - // update all instances Message and Status - for _, deletionInstance := range instances.Items { - deletionInstance := deletionInstance - if deletionInstance.Name != earliestDSCI.Name { - _, _ = status.UpdateWithRetry(ctx, r.Client, &deletionInstance, func(saved *dsciv1.DSCInitialization) { - status.SetErrorCondition(&saved.Status.Conditions, status.DuplicateDSCInitialization, message) - saved.Status.Phase = status.PhaseError - }) - } - } - - return ctrl.Result{}, errors.New(message) } if instance.ObjectMeta.DeletionTimestamp.IsZero() { diff --git a/controllers/dscinitialization/dscinitialization_test.go b/controllers/dscinitialization/dscinitialization_test.go index d558f1e916d..ff391dcd2a6 100644 --- a/controllers/dscinitialization/dscinitialization_test.go +++ b/controllers/dscinitialization/dscinitialization_test.go @@ -166,24 +166,6 @@ var _ = Describe("DataScienceCluster initialization", func() { Context("Handling existing resources", func() { AfterEach(cleanupResources) - const applicationName = "default-dsci" - - It("Should not have more than one DSCI instance in the cluster", func(ctx context.Context) { - - anotherApplicationName := "default2" - // given - desiredDsci := createDSCI(applicationName, operatorv1.Managed, operatorv1.Managed, monitoringNamespace) - Expect(k8sClient.Create(context.Background(), desiredDsci)).Should(Succeed()) - // when - desiredDsci2 := createDSCI(anotherApplicationName, operatorv1.Managed, operatorv1.Managed, monitoringNamespace) - // then - Eventually(dscInitializationIsReady(anotherApplicationName, workingNamespace, desiredDsci2)). - WithContext(ctx). - WithTimeout(timeout). - WithPolling(interval). - Should(BeFalse()) - }) - It("Should not update rolebinding if it exists", func(ctx context.Context) { applicationName := envtestutil.AppendRandomNameTo("rolebinding-test") diff --git a/controllers/webhook/webhook.go b/controllers/webhook/webhook.go new file mode 100644 index 00000000000..bdb7283878b --- /dev/null +++ b/controllers/webhook/webhook.go @@ -0,0 +1,101 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "context" + "fmt" + "net/http" + + admissionv1 "k8s.io/api/admission/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +var log = ctrl.Log.WithName("rhoai-controller-webhook") + +//+kubebuilder:webhook:path=/validate-opendatahub-io-v1,mutating=false,failurePolicy=fail,sideEffects=None,groups=datasciencecluster.opendatahub.io;dscinitialization.opendatahub.io,resources=datascienceclusters;dscinitializations,verbs=create;update,versions=v1,name=operator.opendatahub.io,admissionReviewVersions=v1 +//nolint:lll + +type OpenDataHubWebhook struct { + Client client.Client + Decoder *admission.Decoder +} + +func (w *OpenDataHubWebhook) SetupWithManager(mgr ctrl.Manager) { + hookServer := mgr.GetWebhookServer() + odhWebhook := &webhook.Admission{ + Handler: w, + } + hookServer.Register("/validate-opendatahub-io-v1", odhWebhook) +} + +func (w *OpenDataHubWebhook) checkDupCreation(ctx context.Context, req admission.Request) admission.Response { + if req.Operation != admissionv1.Create { + return admission.Allowed(fmt.Sprintf("duplication check: skipping %v request", req.Operation)) + } + + switch req.Kind.Kind { + case "DataScienceCluster": + case "DSCInitialization": + default: + log.Info("Got wrong kind", "kind", req.Kind.Kind) + return admission.Errored(http.StatusBadRequest, nil) + } + + gvk := schema.GroupVersionKind{ + Group: req.Kind.Group, + Version: req.Kind.Version, + Kind: req.Kind.Kind, + } + + list := &unstructured.UnstructuredList{} + list.SetGroupVersionKind(gvk) + + if err := w.Client.List(ctx, list); err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + + // if len == 1 now creation of #2 is being handled + if len(list.Items) > 0 { + return admission.Denied(fmt.Sprintf("Only one instance of %s object is allowed", req.Kind.Kind)) + } + + return admission.Allowed(fmt.Sprintf("%s duplication check passed", req.Kind.Kind)) +} + +func (w *OpenDataHubWebhook) Handle(ctx context.Context, req admission.Request) admission.Response { + var resp admission.Response + + // Handle only Create and Update + if req.Operation == admissionv1.Delete || req.Operation == admissionv1.Connect { + msg := fmt.Sprintf("RHOAI skipping %v request", req.Operation) + log.Info(msg) + return admission.Allowed(msg) + } + + resp = w.checkDupCreation(ctx, req) + if !resp.Allowed { + return resp + } + + return admission.Allowed(fmt.Sprintf("%s allowed", req.Kind.Kind)) +} diff --git a/controllers/webhook/webhook_suite_test.go b/controllers/webhook/webhook_suite_test.go new file mode 100644 index 00000000000..2b83dd1ece1 --- /dev/null +++ b/controllers/webhook/webhook_suite_test.go @@ -0,0 +1,258 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook_test + +import ( + "context" + "crypto/tls" + "fmt" + "net" + "path/filepath" + "testing" + "time" + + operatorv1 "github.com/openshift/api/operator/v1" + admissionv1beta1 "k8s.io/api/admission/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics/server" + ctrlwebhook "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" + dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/components" + "github.com/opendatahub-io/opendatahub-operator/v2/components/codeflare" + "github.com/opendatahub-io/opendatahub-operator/v2/components/dashboard" + "github.com/opendatahub-io/opendatahub-operator/v2/components/datasciencepipelines" + "github.com/opendatahub-io/opendatahub-operator/v2/components/kserve" + "github.com/opendatahub-io/opendatahub-operator/v2/components/modelmeshserving" + "github.com/opendatahub-io/opendatahub-operator/v2/components/ray" + "github.com/opendatahub-io/opendatahub-operator/v2/components/trustyai" + "github.com/opendatahub-io/opendatahub-operator/v2/components/workbenches" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/webhook" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +const ( + namespace = "webhook-test-ns" + nameBase = "webhook-test" +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var testEnv *envtest.Environment +var gCtx context.Context +var gCancel context.CancelFunc + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Webhook Suite") +} + +var _ = BeforeSuite(func() { + // can't use suite's context as the manager should survive the function + gCtx, gCancel = context.WithCancel(context.Background()) + + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: false, + WebhookInstallOptions: envtest.WebhookInstallOptions{ + Paths: []string{filepath.Join("..", "..", "config", "webhook")}, + }, + } + + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + scheme := runtime.NewScheme() + // DSCI + err = dsciv1.AddToScheme(scheme) + Expect(err).NotTo(HaveOccurred()) + // DSC + err = dscv1.AddToScheme(scheme) + Expect(err).NotTo(HaveOccurred()) + // Webhook + err = admissionv1beta1.AddToScheme(scheme) + Expect(err).NotTo(HaveOccurred()) + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + + // start webhook server using Manager + webhookInstallOptions := &testEnv.WebhookInstallOptions + mgr, err := ctrl.NewManager(cfg, ctrl.Options{ + Scheme: scheme, + LeaderElection: false, + Metrics: ctrlmetrics.Options{ + BindAddress: "0", + CertDir: webhookInstallOptions.LocalServingCertDir, + }, + WebhookServer: ctrlwebhook.NewServer(ctrlwebhook.Options{ + Port: webhookInstallOptions.LocalServingPort, + TLSOpts: []func(*tls.Config){func(config *tls.Config) {}}, + Host: webhookInstallOptions.LocalServingHost, + CertDir: webhookInstallOptions.LocalServingCertDir, + }), + }) + Expect(err).NotTo(HaveOccurred()) + + (&webhook.OpenDataHubWebhook{ + Client: mgr.GetClient(), + Decoder: admission.NewDecoder(mgr.GetScheme()), + }).SetupWithManager(mgr) + + // +kubebuilder:scaffold:webhook + + go func() { + defer GinkgoRecover() + err = mgr.Start(gCtx) + Expect(err).NotTo(HaveOccurred()) + }() + + // wait for the webhook server to get ready + dialer := &net.Dialer{Timeout: time.Second} + addrPort := fmt.Sprintf("%s:%d", webhookInstallOptions.LocalServingHost, webhookInstallOptions.LocalServingPort) + Eventually(func() error { + conn, err := tls.DialWithDialer(dialer, "tcp", addrPort, &tls.Config{InsecureSkipVerify: true}) //nolint:gosec + if err != nil { + return err + } + conn.Close() + return nil + }).Should(Succeed()) + +}) + +var _ = AfterSuite(func() { + gCancel() + + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) + +var _ = Describe("DSC/DSCI webhook", func() { + It("Should not have more than one DSCI instance in the cluster", func(ctx context.Context) { + desiredDsci := newDSCI(nameBase + "-dsci-1") + Expect(k8sClient.Create(ctx, desiredDsci)).Should(Succeed()) + desiredDsci2 := newDSCI(nameBase + "-dsci-2") + Expect(k8sClient.Create(ctx, desiredDsci2)).ShouldNot(Succeed()) + }) + + It("Should block creation of second DSC instance", func(ctx context.Context) { + dscSpec := newDSC(nameBase+"-dsc-1", namespace) + Expect(k8sClient.Create(ctx, dscSpec)).Should(Succeed()) + dscSpec = newDSC(nameBase+"-dsc-2", namespace) + Expect(k8sClient.Create(ctx, dscSpec)).ShouldNot(Succeed()) + }) +}) + +func newDSCI(appName string) *dsciv1.DSCInitialization { + monitoringNS := "monitoring-namespace" + return &dsciv1.DSCInitialization{ + TypeMeta: metav1.TypeMeta{ + Kind: "DSCInitialization", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: appName, + Namespace: namespace, + }, + Spec: dsciv1.DSCInitializationSpec{ + ApplicationsNamespace: namespace, + Monitoring: dsciv1.Monitoring{ + Namespace: monitoringNS, + ManagementState: operatorv1.Managed, + }, + TrustedCABundle: &dsciv1.TrustedCABundleSpec{ + ManagementState: operatorv1.Managed, + }, + }, + } +} +func newDSC(name string, namespace string) *dscv1.DataScienceCluster { + return &dscv1.DataScienceCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: dscv1.DataScienceClusterSpec{ + Components: dscv1.Components{ + Dashboard: dashboard.Dashboard{ + Component: components.Component{ + ManagementState: operatorv1.Removed, + }, + }, + Workbenches: workbenches.Workbenches{ + Component: components.Component{ + ManagementState: operatorv1.Removed, + }, + }, + ModelMeshServing: modelmeshserving.ModelMeshServing{ + Component: components.Component{ + ManagementState: operatorv1.Removed, + }, + }, + DataSciencePipelines: datasciencepipelines.DataSciencePipelines{ + Component: components.Component{ + ManagementState: operatorv1.Removed, + }, + }, + Kserve: kserve.Kserve{ + Component: components.Component{ + ManagementState: operatorv1.Removed, + }, + }, + CodeFlare: codeflare.CodeFlare{ + Component: components.Component{ + ManagementState: operatorv1.Removed, + }, + }, + Ray: ray.Ray{ + Component: components.Component{ + ManagementState: operatorv1.Removed, + }, + }, + TrustyAI: trustyai.TrustyAI{ + Component: components.Component{ + ManagementState: operatorv1.Removed, + }, + }, + }, + }, + } +} diff --git a/main.go b/main.go index cdd31d5d92b..152de4f6b70 100644 --- a/main.go +++ b/main.go @@ -50,6 +50,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics/server" ctrlwebhook "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" @@ -58,6 +59,7 @@ import ( dscctrl "github.com/opendatahub-io/opendatahub-operator/v2/controllers/datasciencecluster" dscictrl "github.com/opendatahub-io/opendatahub-operator/v2/controllers/dscinitialization" "github.com/opendatahub-io/opendatahub-operator/v2/controllers/secretgenerator" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/webhook" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/logger" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/upgrade" @@ -151,6 +153,11 @@ func main() { os.Exit(1) } + (&webhook.OpenDataHubWebhook{ + Client: mgr.GetClient(), + Decoder: admission.NewDecoder(mgr.GetScheme()), + }).SetupWithManager(mgr) + if err = (&dscictrl.DSCInitializationReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), diff --git a/tests/e2e/creation_test.go b/tests/e2e/creation_test.go index d880d0bbba8..b73c64d8e08 100644 --- a/tests/e2e/creation_test.go +++ b/tests/e2e/creation_test.go @@ -14,6 +14,9 @@ import ( autoscalingv1 "k8s.io/api/autoscaling/v1" k8serr "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/retry" @@ -41,10 +44,9 @@ func creationTestSuite(t *testing.T) { require.NoError(t, err, "error creating DSCI CR") }) - // TODO: enable test when we have webhook in place - // t.Run("Creation of more than one of DSCInitialization instance", func(t *testing.T) { - // testCtx.testDSCIDuplication(t) - // }) + t.Run("Creation of more than one of DSCInitialization instance", func(t *testing.T) { + testCtx.testDSCIDuplication(t) + }) t.Run("Validate DSCInitialization instance", func(t *testing.T) { err = testCtx.validateDSCI() @@ -60,10 +62,9 @@ func creationTestSuite(t *testing.T) { err = testCtx.testDSCCreation() require.NoError(t, err, "error creating DataScienceCluster instance") }) - // TODO: enable test when we have webhook in place - // t.Run("Creation of more than one of DataScienceCluster instance", func(t *testing.T) { - // testCtx.testDSCDuplication(t) - // }) + t.Run("Creation of more than one of DataScienceCluster instance", func(t *testing.T) { + testCtx.testDSCDuplication(t) + }) t.Run("Validate Ownerrefrences exist", func(t *testing.T) { err = testCtx.testOwnerrefrences() require.NoError(t, err, "error getting all DataScienceCluster's Ownerrefrences") @@ -189,16 +190,6 @@ func (tc *testContext) validateDSCReady() error { return waitDSCReady(tc) } -// func (tc *testContext) requireInstalled(t *testing.T, gvk schema.GroupVersionKind) { -// t.Helper() -// list := &unstructured.UnstructuredList{} -// list.SetGroupVersionKind(gvk) -// err := tc.customClient.List(tc.ctx, list) -// require.NotEmptyf(t, err, "Could not get %s list", gvk.Kind) -// require.Greaterf(t, len(list.Items), 0, "%s has not been installed", gvk.Kind) -//} -// Verify DSC instance is in Ready phase when all components are up and running - func waitDSCReady(tc *testContext) error { // wait for 2 mins which is on the safe side, normally it should get ready once all components are ready err := tc.wait(func(ctx context.Context) (bool, error) { @@ -219,39 +210,55 @@ func waitDSCReady(tc *testContext) error { return nil } -// func (tc *testContext) testDuplication(t *testing.T, gvk schema.GroupVersionKind, o any) { -// t.Helper() -// tc.requireInstalled(t, gvk) -// u, err := runtime.DefaultUnstructuredConverter.ToUnstructured(o) -// require.NoErrorf(t, err, "Could not unstructure %s", gvk.Kind) -// obj := &unstructured.Unstructured{ -// Object: u, -// } -// obj.SetGroupVersionKind(gvk) -// err = tc.customClient.Create(tc.ctx, obj) -// require.Errorf(t, err, "Could create second %s", gvk.Kind) -//} - -// func (tc *testContext) testDSCIDuplication(t *testing.T) { //nolint:thelper -// gvk := schema.GroupVersionKind{ -// Group: "dscinitialization.opendatahub.io", -// Version: "v1", -// Kind: "DSCInitialization", -// } -// dup := setupDSCICR("e2e-test-dsci-dup") -// tc.testDuplication(t, gvk, dup) -// } - -// func (tc *testContext) testDSCDuplication(t *testing.T) { //nolint:thelper -// gvk := schema.GroupVersionKind{ -// Group: "datasciencecluster.opendatahub.io", -// Version: "v1", -// Kind: "DataScienceCluster", -// } -// -// dup := setupDSCInstance("e2e-test-dsc-dup") -// tc.testDuplication(t, gvk, dup) -//} +func (tc *testContext) requireInstalled(t *testing.T, gvk schema.GroupVersionKind) { + t.Helper() + list := &unstructured.UnstructuredList{} + list.SetGroupVersionKind(gvk) + + err := tc.customClient.List(tc.ctx, list) + require.NoErrorf(t, err, "Could not get %s list", gvk.Kind) + + require.NotEmptyf(t, list.Items, "%s has not been installed", gvk.Kind) +} + +func (tc *testContext) testDuplication(t *testing.T, gvk schema.GroupVersionKind, o any) { + t.Helper() + tc.requireInstalled(t, gvk) + + u, err := runtime.DefaultUnstructuredConverter.ToUnstructured(o) + require.NoErrorf(t, err, "Could not unstructure %s", gvk.Kind) + + obj := &unstructured.Unstructured{ + Object: u, + } + obj.SetGroupVersionKind(gvk) + + err = tc.customClient.Create(tc.ctx, obj) + + require.Errorf(t, err, "Could create second %s", gvk.Kind) +} + +func (tc *testContext) testDSCIDuplication(t *testing.T) { //nolint:thelper + gvk := schema.GroupVersionKind{ + Group: "dscinitialization.opendatahub.io", + Version: "v1", + Kind: "DSCInitialization", + } + dup := setupDSCICR("e2e-test-dsci-dup") + + tc.testDuplication(t, gvk, dup) +} + +func (tc *testContext) testDSCDuplication(t *testing.T) { //nolint:thelper + gvk := schema.GroupVersionKind{ + Group: "datasciencecluster.opendatahub.io", + Version: "v1", + Kind: "DataScienceCluster", + } + dup := setupDSCInstance("e2e-test-dsc-dup") + + tc.testDuplication(t, gvk, dup) +} func (tc *testContext) testAllComponentCreation(t *testing.T) error { //nolint:funlen,thelper // Validate all components are in Ready state