diff --git a/examples/coco/1-kcp-origin.yaml b/examples/coco/1-kcp-origin.yaml new file mode 100644 index 00000000..acd4fad0 --- /dev/null +++ b/examples/coco/1-kcp-origin.yaml @@ -0,0 +1,21 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: add-runtimeclass +spec: + mutateExistingOnPolicyUpdate: true + rules: + - name: add-runtimeclass-to-deployment + match: + resources: + kinds: + - Deployment + mutate: + targets: + - apiVersion: apps/v1 + kind: Deployment + patchStrategicMerge: + spec: + template: + spec: + runtimeClassName: kata-qemu \ No newline at end of file diff --git a/examples/coco/2-kcp-snp.yaml b/examples/coco/2-kcp-snp.yaml new file mode 100644 index 00000000..80c2b956 --- /dev/null +++ b/examples/coco/2-kcp-snp.yaml @@ -0,0 +1,21 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: add-runtimeclass +spec: + mutateExistingOnPolicyUpdate: true + rules: + - name: add-runtimeclass-to-deployment + match: + resources: + kinds: + - Deployment + mutate: + targets: + - apiVersion: apps/v1 + kind: Deployment + patchStrategicMerge: + spec: + template: + spec: + runtimeClassName: kata-qemu-snp \ No newline at end of file diff --git a/examples/coco/3-kcp-label.yaml b/examples/coco/3-kcp-label.yaml new file mode 100644 index 00000000..eb0a3636 --- /dev/null +++ b/examples/coco/3-kcp-label.yaml @@ -0,0 +1,24 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: add-runtimeclass +spec: + mutateExistingOnPolicyUpdate: true + rules: + - name: add-runtimeclass-to-deployment + match: + resources: + kinds: + - Deployment + selector: + matchLabels: + app: nginx-test + mutate: + targets: + - apiVersion: apps/v1 + kind: Deployment + patchStrategicMerge: + spec: + template: + spec: + runtimeClassName: kata-qemu-snp \ No newline at end of file diff --git a/examples/coco/csib-si-coco.yaml b/examples/coco/csib-si-coco.yaml new file mode 100644 index 00000000..00f92757 --- /dev/null +++ b/examples/coco/csib-si-coco.yaml @@ -0,0 +1,25 @@ +apiVersion: intent.security.nimbus.com/v1alpha1 +kind: SecurityIntent +metadata: + name: coco-workload +spec: + intent: + id: cocoWorkload + description: "Ensure workload is encryted by running the specified workload in a Confidential VM" + action: Block +--- +apiVersion: intent.security.nimbus.com/v1alpha1 +kind: ClusterSecurityIntentBinding +metadata: + name: coco-workload-binding +spec: + intents: + - name: coco-workload + selector: + nsSelector: + matchNames: + - default + workloadSelector: + matchLabels: + app: nginx-test + diff --git a/examples/coco/k8s-nginx-coco-deploy.yaml b/examples/coco/k8s-nginx-coco-deploy.yaml new file mode 100644 index 00000000..86860ada --- /dev/null +++ b/examples/coco/k8s-nginx-coco-deploy.yaml @@ -0,0 +1,19 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-test-coco + labels: + app: nginx-coco +spec: + replicas: 1 + selector: + matchLabels: + app: nginx-coco + template: + metadata: + labels: + app: nginx-coco + spec: + containers: + - name: nginx-coco + image: 1hcoj/nginx \ No newline at end of file diff --git a/examples/coco/k8s-nginx-test-deploy.yaml b/examples/coco/k8s-nginx-test-deploy.yaml new file mode 100644 index 00000000..b7cb0d18 --- /dev/null +++ b/examples/coco/k8s-nginx-test-deploy.yaml @@ -0,0 +1,19 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-test + labels: + app: nginx-test +spec: + replicas: 1 + selector: + matchLabels: + app: nginx-test + template: + metadata: + labels: + app: nginx-test + spec: + containers: + - name: nginx-test + image: 1hcoj/nginx \ No newline at end of file diff --git a/pkg/adapter/idpool/idpool.go b/pkg/adapter/idpool/idpool.go index 403e968f..1fd3e167 100644 --- a/pkg/adapter/idpool/idpool.go +++ b/pkg/adapter/idpool/idpool.go @@ -20,6 +20,7 @@ const ( DisallowChRoot = "disallowChRoot" DisallowCapabilities = "disallowCapabilities" ExploitPFA = "PreventExecutionFromTempOrLogsFolders" + CocoWorkload = "cocoWorkload" ) // KaIds are IDs supported by KubeArmor. @@ -46,6 +47,10 @@ var KyvIds = []string{ EscapeToHost, } +var CocoIds = []string{ + CocoWorkload, +} + // IsIdSupportedBy determines whether a given ID is supported by a security engine. func IsIdSupportedBy(id, securityEngine string) bool { switch strings.ToLower(securityEngine) { @@ -55,6 +60,8 @@ func IsIdSupportedBy(id, securityEngine string) bool { return in(id, NetPolIDs) case "kyverno": return in(id, KyvIds) + case "coco": + return in(id, CocoIds) default: return false } diff --git a/pkg/adapter/nimbus-coco/Makefile b/pkg/adapter/nimbus-coco/Makefile new file mode 100644 index 00000000..a22c17c8 --- /dev/null +++ b/pkg/adapter/nimbus-coco/Makefile @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright 2023 Authors of Nimbus + +# Image URL to use all building/pushing image targets +IMG ?= 5gsec/nimbus-coco +# Image Tag to use all building/pushing image targets +TAG ?= v0.1 + +CONTAINER_TOOL ?= docker +BINARY ?= bin/nimbus-coco + +build: + @go build -ldflags="-w" -o ${BINARY} main.go + +run: build + @./${BINARY} + +.PHONY: docker-build +docker-build: + $(CONTAINER_TOOL) build -t ${IMG}:${TAG} -t ${IMG}:latest --build-arg VERSION=${TAG} -f ./Dockerfile ../../../ + +.PHONY: docker-push +docker-push: + $(CONTAINER_TOOL) push ${IMG}:${TAG} + $(CONTAINER_TOOL) push ${IMG}:latest + +PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le +.PHONY: docker-buildx +docker-buildx: + # copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile + sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross + - $(CONTAINER_TOOL) buildx create --name project-v3-builder + $(CONTAINER_TOOL) buildx use project-v3-builder + - $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) --build-arg VERSION=${TAG} --tag ${IMG}:${TAG} -f Dockerfile.cross ../../../ || { $(CONTAINER_TOOL) buildx rm project-v3-builder; rm Dockerfile.cross; exit 1; } + - $(CONTAINER_TOOL) buildx rm project-v3-builder + rm Dockerfile.cross \ No newline at end of file diff --git a/pkg/adapter/nimbus-coco/go.mod b/pkg/adapter/nimbus-coco/go.mod new file mode 100644 index 00000000..324d7876 --- /dev/null +++ b/pkg/adapter/nimbus-coco/go.mod @@ -0,0 +1,5 @@ +module github.com/5GSEC/nimbus/pkg/adapter/nimbus-coco + +go 1.22.0 + +replace github.com/5GSEC/nimbus => ../../../../nimbus diff --git a/pkg/adapter/nimbus-coco/main.go b/pkg/adapter/nimbus-coco/main.go new file mode 100644 index 00000000..080336b0 --- /dev/null +++ b/pkg/adapter/nimbus-coco/main.go @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2023 Authors of Nimbus + +package main + +import ( + "context" + "os" + "os/signal" + "syscall" + + "github.com/5GSEC/nimbus/pkg/adapter/nimbus-coco/manager" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func main() { + ctrl.SetLogger(zap.New()) + logger := ctrl.Log + + ctx, cancelFunc := context.WithCancel(context.Background()) + ctrl.LoggerInto(ctx, logger) + + go func() { + termChan := make(chan os.Signal) + signal.Notify(termChan, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) + <-termChan + logger.Info("Shutdown signal received, waiting for all workers to finish") + cancelFunc() + logger.Info("All workers finished, shutting down") + }() + + logger.Info("Coco adapter started") + manager.Run(ctx) +} diff --git a/pkg/adapter/nimbus-coco/manager/manager.go b/pkg/adapter/nimbus-coco/manager/manager.go new file mode 100644 index 00000000..a8492b55 --- /dev/null +++ b/pkg/adapter/nimbus-coco/manager/manager.go @@ -0,0 +1,207 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2023 Authors of Nimbus +package manager + +import ( + "context" + "strings" + + "github.com/go-logr/logr" + kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + + v1alpha1 "github.com/5GSEC/nimbus/api/v1alpha1" + "github.com/5GSEC/nimbus/pkg/adapter/k8s" + "github.com/5GSEC/nimbus/pkg/adapter/nimbus-coco/processor" + "github.com/5GSEC/nimbus/pkg/adapter/nimbus-coco/watcher" + adapterutil "github.com/5GSEC/nimbus/pkg/adapter/util" + globalwatcher "github.com/5GSEC/nimbus/pkg/adapter/watcher" +) + +var ( + scheme = runtime.NewScheme() + k8sClient client.Client +) + +func init() { + utilruntime.Must(v1alpha1.AddToScheme(scheme)) + utilruntime.Must(kyvernov1.AddToScheme(scheme)) + k8sClient = k8s.NewOrDie(scheme) +} + +func Run(ctx context.Context) { + clusterNpChan := make(chan string) + deletedClusterNpChan := make(chan string) + go globalwatcher.WatchClusterNimbusPolicies(ctx, clusterNpChan, deletedClusterNpChan) + + updatedKcpCh := make(chan string) + deletedKcpCh := make(chan string) + go watcher.WatchKcps(ctx, updatedKcpCh, deletedKcpCh) + + for { + select { + case <-ctx.Done(): + close(clusterNpChan) + close(deletedClusterNpChan) + close(updatedKcpCh) + close(deletedKcpCh) + return + case createdCnp := <-clusterNpChan: + createOrUpdateKcp(ctx, createdCnp) + case deletedCnp := <-deletedClusterNpChan: + deleteKcp(ctx, deletedCnp) + case updatedKcp := <-updatedKcpCh: + reconcileKcp(ctx, updatedKcp, false) + case deletedKcp := <-deletedKcpCh: + reconcileKcp(ctx, deletedKcp, true) + } + + } +} +func reconcileKcp(ctx context.Context, kcpName string, deleted bool) { + logger := log.FromContext(ctx) + cnpName := adapterutil.ExtractClusterNpName(kcpName) + var cnp v1alpha1.ClusterNimbusPolicy + err := k8sClient.Get(ctx, types.NamespacedName{Name: cnpName}, &cnp) + if err != nil { + if !errors.IsNotFound(err) { + logger.Error(err, "failed to get ClusterNimbusPolicy", "ClusterNimbusPolicy.Name", cnpName) + } + return + } + if deleted { + logger.V(2).Info("Reconciling deleted KyvernoClusterPolicy", "KyvernoClusterPolicy.Name", kcpName) + } else { + logger.V(2).Info("Reconciling modified KyvernoClusterPolicy", "KyvernoClusterPolicy.Name", kcpName) + } + createOrUpdateKcp(ctx, cnpName) +} + +func createOrUpdateKcp(ctx context.Context, cnpName string) { + logger := log.FromContext(ctx) + var cnp v1alpha1.ClusterNimbusPolicy + if err := k8sClient.Get(ctx, types.NamespacedName{Name: cnpName}, &cnp); err != nil { + logger.Error(err, "failed to get ClusterNimbusPolicy", "ClusterNimbusPolicy.Name", cnpName) + return + } + + if adapterutil.IsOrphan(cnp.GetOwnerReferences(), "ClusterSecurityIntentBinding") { + logger.V(4).Info("Ignoring orphan ClusterNimbusPolicy", "ClusterNimbusPolicy.Name", cnpName) + return + } + + deleteDanglingkcps(ctx, cnp, logger) + kcps := processor.BuildKcpsFrom(logger, &cnp) + + for idx := range kcps { + kcp := kcps[idx] + + // Set ClusterNimbusPolicy as the owner of the KCP + if err := ctrl.SetControllerReference(&cnp, &kcp, scheme); err != nil { + logger.Error(err, "failed to set OwnerReference on KyvernoClusterPolicy", "Name", kcp.Name) + return + } + + var existingKcp kyvernov1.ClusterPolicy + err := k8sClient.Get(ctx, types.NamespacedName{Name: kcp.Name}, &existingKcp) + if err != nil && !errors.IsNotFound(err) { + logger.Error(err, "failed to get existing KyvernoClusterPolicy", "KyvernoClusterPolicy.Name", kcp.Name) + return + } + if err != nil { + if errors.IsNotFound(err) { + if err = k8sClient.Create(ctx, &kcp); err != nil { + logger.Error(err, "failed to create KyvernoClusterPolicy", "KyvernoClusterPolicy.Name", kcp.Name) + return + } + logger.Info("KyvernoClusterPolicy created", "KyvernoClusterPolicy.Name", kcp.Name) + } + } else { + kcp.ObjectMeta.ResourceVersion = existingKcp.ObjectMeta.ResourceVersion + if err = k8sClient.Update(ctx, &kcp); err != nil { + logger.Error(err, "failed to configure existing KyvernoClusterPolicy", "KyvernoClusterPolicy.Name", existingKcp.Name) + return + } + logger.Info("KyvernoClusterPolicy configured", "KyvernoClusterPolicy.Name", existingKcp.Name) + } + + if err = adapterutil.UpdateCnpStatus(ctx, k8sClient, "KyvernoClusterPolicy/"+kcp.Name, cnp.Name, false); err != nil { + logger.Error(err, "failed to update KyvernoClusterPolicies status in NimbusPolicy") + } + } +} + +func deleteKcp(ctx context.Context, cnpName string) { + logger := log.FromContext(ctx) + var kcps kyvernov1.ClusterPolicyList + + if err := k8sClient.List(ctx, &kcps); err != nil { + logger.Error(err, "failed to list KyvernoClusterPolicies") + return + } + + // Kubernetes GC automatically deletes the child when the parent/owner is + // deleted. So, we don't need to do anything in this case since NimbusPolicy is + // the owner and when it gets deleted corresponding kps will be automatically + // deleted. + for _, kcp := range kcps.Items { + logger.Info("KyvernoClusterPolicy already deleted due to ClusterNimbusPolicy deletion", + "KyvernoClusterPolicy.Name", kcp.Name, + "ClusterNimbusPolicy.Name", cnpName, + ) + } +} + +func deleteDanglingkcps(ctx context.Context, cnp v1alpha1.ClusterNimbusPolicy, logger logr.Logger) { + var existingkcps kyvernov1.ClusterPolicyList + if err := k8sClient.List(ctx, &existingkcps); err != nil { + logger.Error(err, "failed to list KyvernoClusterPolicies for cleanup") + return + } + + var kcpsOwnedByCnp []kyvernov1.ClusterPolicy + for _, kcp := range existingkcps.Items { + for _, ownerRef := range kcp.OwnerReferences { + if ownerRef.Name == cnp.Name && ownerRef.UID == cnp.UID { + kcpsOwnedByCnp = append(kcpsOwnedByCnp, kcp) + break + } + } + } + if len(kcpsOwnedByCnp) == 0 { + return + } + + kcpsToDelete := make(map[string]kyvernov1.ClusterPolicy) + + // Populate owned kcps + for _, kcpOwnedByCnp := range kcpsOwnedByCnp { + kcpsToDelete[kcpOwnedByCnp.Name] = kcpOwnedByCnp + } + + for _, nimbusRule := range cnp.Spec.NimbusRules { + kcpName := cnp.Name + "-" + strings.ToLower(nimbusRule.ID) + delete(kcpsToDelete, kcpName) + } + + for kcpName := range kcpsToDelete { + kcp := kcpsToDelete[kcpName] + if err := k8sClient.Delete(ctx, &kcp); err != nil { + logger.Error(err, "failed to delete dangling KyvernoClusterPolicy", "KyvernoClusterPolicy.Name", kcp.Name) + continue + } + + logger.Info("Dangling KyvernoClusterPolicy deleted", "KyvernoClusterPolicy.Name", kcp.Name) + + if err := adapterutil.UpdateCnpStatus(ctx, k8sClient, "KyvernoClusterPolicy/"+kcp.Name, cnp.Name, true); err != nil { + logger.Error(err, "failed to update KyvernoClusterPolicy statis in ClusterNimbusPolicy") + } + + } +} diff --git a/pkg/adapter/nimbus-coco/processor/kcpbuilder.go b/pkg/adapter/nimbus-coco/processor/kcpbuilder.go new file mode 100644 index 00000000..262d4392 --- /dev/null +++ b/pkg/adapter/nimbus-coco/processor/kcpbuilder.go @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2023 Authors of Nimbus + +package processor + +import ( + "encoding/json" + "strings" + + v1alpha1 "github.com/5GSEC/nimbus/api/v1alpha1" + "github.com/5GSEC/nimbus/pkg/adapter/idpool" + "github.com/go-logr/logr" + kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1" + apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func BuildKcpsFrom(logger logr.Logger, cnp *v1alpha1.ClusterNimbusPolicy) []kyvernov1.ClusterPolicy { + // Build KCPs based on given IDs + var kcps []kyvernov1.ClusterPolicy + for _, nimbusRule := range cnp.Spec.NimbusRules { + id := nimbusRule.ID + if idpool.IsIdSupportedBy(id, "coco") { + kcp := buildKcpFor(id, cnp) + kcp.Name = cnp.Name + "-" + strings.ToLower(id) + kcp.Annotations = make(map[string]string) + kcp.Annotations["policies.kyverno.io/description"] = nimbusRule.Description + if nimbusRule.Rule.RuleAction == "Block" { + kcp.Spec.ValidationFailureAction = kyvernov1.ValidationFailureAction("Enforce") + } else { + kcp.Spec.ValidationFailureAction = kyvernov1.ValidationFailureAction("Audit") + } + kcp.Spec.MutateExistingOnPolicyUpdate = true + addManagedByAnnotationForClusterScopedPolicy(&kcp) + kcps = append(kcps, kcp) + } else { + logger.Info("Coco does not support this ID", "ID", id, + "NimbusPolicy", cnp.Name, "NimbusPolicy.Namespace", cnp.Namespace) + } + } + return kcps +} + +// buildKcpFor builds a KyvernoPolicy based on intent ID supported by Kyverno Policy Engine. +func buildKcpFor(id string, cnp *v1alpha1.ClusterNimbusPolicy) kyvernov1.ClusterPolicy { + switch id { + case idpool.CocoWorkload: + return clusterCocoWorkload(cnp) + default: + return kyvernov1.ClusterPolicy{} + } +} + +var nsBlackList = []string{"kube-system"} + +func clusterCocoWorkload(cnp *v1alpha1.ClusterNimbusPolicy) kyvernov1.ClusterPolicy { + var matchResource kyvernov1.ResourceDescription + var excludeFilters []kyvernov1.ResourceFilter + + // exclude kube-system + excludeFilters = append(excludeFilters, kyvernov1.ResourceFilter{ + ResourceDescription: kyvernov1.ResourceDescription{ + Namespaces: nsBlackList, + }, + }) + + if len(cnp.Spec.NsSelector.MatchNames) > 0 { + matchResource = kyvernov1.ResourceDescription{ + Kinds: []string{ + "Deployment", + }, + Namespaces: cnp.Spec.NsSelector.MatchNames, + } + if len(cnp.Spec.WorkloadSelector.MatchLabels) > 0 { + matchResource.Selector = &metav1.LabelSelector{ + MatchLabels: cnp.Spec.WorkloadSelector.MatchLabels, + } + } + } else if len(cnp.Spec.NsSelector.ExcludeNames) > 0 { + matchResource = kyvernov1.ResourceDescription{ + Kinds: []string{ + "Deployment", + }, + } + excludeFilters = append(excludeFilters, kyvernov1.ResourceFilter{ + ResourceDescription: kyvernov1.ResourceDescription{ + Namespaces: cnp.Spec.NsSelector.ExcludeNames, + }, + }) + } + + background := true + + targets := []kyvernov1.TargetResourceSpec{ + { + ResourceSpec: kyvernov1.ResourceSpec{ + APIVersion: "apps/v1", + Kind: "Deployment", + }, + }, + } + + patchStrategicMerge := map[string]interface{}{ + "spec": map[string]interface{}{ + "template": map[string]interface{}{ + "spec": map[string]interface{}{ + "runtimeClassName": "kata-qemu-snp", + }, + }, + }, + } + patchBytes, _ := json.Marshal(patchStrategicMerge) + + return kyvernov1.ClusterPolicy{ + Spec: kyvernov1.Spec{ + Background: &background, + Rules: []kyvernov1.Rule{ + { + Name: "coco-workload", + MatchResources: kyvernov1.MatchResources{ + ResourceDescription: matchResource, + }, + ExcludeResources: kyvernov1.MatchResources{ + Any: excludeFilters, + }, + Mutation: kyvernov1.Mutation{ + Targets: targets, + RawPatchStrategicMerge: &apiextv1.JSON{Raw: patchBytes}, + }, + }, + }, + }, + } +} + +func addManagedByAnnotationForClusterScopedPolicy(kcp *kyvernov1.ClusterPolicy) { + kcp.Annotations["app.kubernetes.io/managed-by"] = "nimbus-coco" +} diff --git a/pkg/adapter/nimbus-coco/utils/utils.go b/pkg/adapter/nimbus-coco/utils/utils.go new file mode 100644 index 00000000..650dee23 --- /dev/null +++ b/pkg/adapter/nimbus-coco/utils/utils.go @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2023 Authors of Nimbus + +package utils + +import ( + "fmt" + "strings" + + "golang.org/x/text/cases" + "golang.org/x/text/language" +) + +func GetGVK(kind string) string { + // Map to store the mappings of kinds to their corresponding API versions + kindToAPIVersion := map[string]string{ + "deployment": "apps/v1", + "pod": "v1", + "statefulset": "apps/v1", + "daemonset": "apps/v1", + "replicaset": "apps/v1", + } + + // Convert kind to lowercase to handle case insensitivity + kind = strings.ToLower(kind) + + // Retrieve API version from the map + apiVersion, exists := kindToAPIVersion[kind] + if !exists { + return "" + } + + switch kind { + case "replicaset": + kind = "ReplicaSet" + case "statefulset": + kind = "StatefulSet" + case "daemonset": + kind = "DaemonSet" + default: + kind = Title(kind) + } + + // Combine API version and kind to form the GroupVersionKind string + return fmt.Sprintf("%s/%s", apiVersion, Title(kind)) +} + +func Title(input string) string { + toTitle := cases.Title(language.Und) + + return toTitle.String(input) +} diff --git a/pkg/adapter/nimbus-coco/watcher/kcpwatcher.go b/pkg/adapter/nimbus-coco/watcher/kcpwatcher.go new file mode 100644 index 00000000..3820c188 --- /dev/null +++ b/pkg/adapter/nimbus-coco/watcher/kcpwatcher.go @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2023 Authors of Nimbus + +package watcher + +import ( + "context" + "time" + + "github.com/5GSEC/nimbus/pkg/adapter/k8s" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic/dynamicinformer" + "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/log" + + adapterutil "github.com/5GSEC/nimbus/pkg/adapter/util" +) + +var ( + factory dynamicinformer.DynamicSharedInformerFactory +) + +func init() { + factory = dynamicinformer.NewDynamicSharedInformerFactory(k8s.NewDynamicClient(), time.Minute) +} + +func kcpInformer() cache.SharedIndexInformer { + kcpGvr := schema.GroupVersionResource{ + Group: "kyverno.io", + Version: "v1", + Resource: "clusterpolicies", + } + informer := factory.ForResource(kcpGvr).Informer() + return informer +} + +// WatchKsps watches update and delete event for KyvernoClusterPolicies owned by +// NimbusPolicy or ClusterNimbusPolicy and put their info on respective channels. +func WatchKcps(ctx context.Context, updatedKcpCh, deletedKcpCh chan string) { + logger := log.FromContext(ctx) + informer := kcpInformer() + handlers := cache.ResourceEventHandlerFuncs{ + UpdateFunc: func(oldObj, newObj interface{}) { + oldU := oldObj.(*unstructured.Unstructured) + newU := newObj.(*unstructured.Unstructured) + + if adapterutil.IsOrphan(newU.GetOwnerReferences(), "NimbusPolicy") { + logger.V(4).Info("Ignoring orphan KyvernoClusterPolicy", "KyvernoClusterPolicy.Name", oldU.GetName(), "Operation", "Update") + return + } + + if oldU.GetGeneration() == newU.GetGeneration() { + return + } + + kcpName := newU.GetName() + updatedKcpCh <- kcpName + }, + DeleteFunc: func(obj interface{}) { + u := obj.(*unstructured.Unstructured) + if adapterutil.IsOrphan(u.GetOwnerReferences(), "NimbusPolicy") { + logger.V(4).Info("Ignoring orphan KyvernoClusterPolicy", "KyvernoClusterPolicy.Name", u.GetName(), "Operation", "Delete") + return + } + kcpName := u.GetName() + deletedKcpCh <- kcpName + }, + } + _, err := informer.AddEventHandler(handlers) + if err != nil { + logger.Error(err, "failed to add event handlers") + return + } + logger.Info("KyvernoClusterPolicy watcher started") + informer.Run(ctx.Done()) +}