diff --git a/Makefile b/Makefile index 5691636..3850ac8 100644 --- a/Makefile +++ b/Makefile @@ -69,7 +69,7 @@ KIND := $(TOOLS_BIN_DIR)/kind KUBECTL := $(TOOLS_BIN_DIR)/kubectl GOLANGCI_LINT_VERSION := "v1.62.2" -CLUSTERCTL_VERSION := "v1.9.3" +CLUSTERCTL_VERSION := "v1.9.4" KUSTOMIZE_VER := v5.3.0 KUSTOMIZE_BIN := kustomize @@ -308,7 +308,7 @@ deploy-projectsveltos: $(KUSTOMIZE) @echo 'Install libsveltos CRDs' $(KUBECTL) apply -f https://raw.githubusercontent.com/projectsveltos/libsveltos/$(TAG)/config/crd/bases/lib.projectsveltos.io_debuggingconfigurations.yaml - $(KUBECTL) apply -f https://raw.githubusercontent.com/projectsveltos/libsveltos/$(TAG)/config/crd/bases/lib.projectsveltos.io_sveltosclusters.yaml + $(KUBECTL) apply -f https://raw.githubusercontent.com/projectsveltos/libsveltos/$(TAG)/config/crd/bases/lib.projectsveltos.io_sveltosclusters.yaml # Install projectsveltos sveltoscluster-manager components @echo 'Install projectsveltos sveltoscluster-manager components' @@ -316,4 +316,4 @@ deploy-projectsveltos: $(KUSTOMIZE) $(KUSTOMIZE) build config/default | $(ENVSUBST) | $(KUBECTL) apply -f- @echo "Waiting for projectsveltos sveltoscluster-manager to be available..." - $(KUBECTL) wait --for=condition=Available deployment/sc-manager -n projectsveltos --timeout=$(TIMEOUT) + $(KUBECTL) wait --for=condition=Available deployment/sc-manager -n projectsveltos --timeout=$(TIMEOUT) \ No newline at end of file diff --git a/controllers/cluster_checks.go b/controllers/cluster_checks.go new file mode 100644 index 0000000..04bf7cf --- /dev/null +++ b/controllers/cluster_checks.go @@ -0,0 +1,291 @@ +/* +Copyright 2024. projectsveltos.io. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/go-logr/logr" + lua "github.com/yuin/gopher-lua" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" + + libsveltosv1beta1 "github.com/projectsveltos/libsveltos/api/v1beta1" + logs "github.com/projectsveltos/libsveltos/lib/logsettings" + sveltoslua "github.com/projectsveltos/libsveltos/lib/lua" +) + +func runChecks(ctx context.Context, remotConfig *rest.Config, checks []libsveltosv1beta1.ClusterCheck, + logger logr.Logger) error { + + for i := range checks { + pass, err := runCheck(ctx, remotConfig, &checks[i], logger) + if err != nil { + return err + } + if !pass { + logger.V(logs.LogInfo).Info(fmt.Sprintf("cluster check %s failed", checks[i].Name)) + return fmt.Errorf("cluster check %s failed", checks[i].Name) + } + } + + return nil +} + +func runCheck(ctx context.Context, remotConfig *rest.Config, check *libsveltosv1beta1.ClusterCheck, + logger logr.Logger) (bool, error) { + + resources, err := getResources(ctx, remotConfig, check.ResourceSelectors, logger) + if err != nil { + return false, err + } + + return validateCheck(check.Condition, resources, logger) +} + +// getResources returns resources matching ResourceSelectors. +func getResources(ctx context.Context, remotConfig *rest.Config, resourceSelectors []libsveltosv1beta1.ResourceSelector, + logger logr.Logger) ([]*unstructured.Unstructured, error) { + + resources := []*unstructured.Unstructured{} + for i := range resourceSelectors { + matching, err := getResourcesMatchinResourceSelector(ctx, remotConfig, &resourceSelectors[i], logger) + if err != nil { + return nil, err + } + + resources = append(resources, matching...) + } + + return resources, nil +} + +// getResourcesMatchinResourceSelector returns resources matching ResourceSelector. +func getResourcesMatchinResourceSelector(ctx context.Context, remotConfig *rest.Config, resourceSelector *libsveltosv1beta1.ResourceSelector, + logger logr.Logger) ([]*unstructured.Unstructured, error) { + + gvk := schema.GroupVersionKind{ + Group: resourceSelector.Group, + Version: resourceSelector.Version, + Kind: resourceSelector.Kind, + } + + dc := discovery.NewDiscoveryClientForConfigOrDie(remotConfig) + groupResources, err := restmapper.GetAPIGroupResources(dc) + if err != nil { + return nil, err + } + mapper := restmapper.NewDiscoveryRESTMapper(groupResources) + + mapping, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + if meta.IsNoMatchError(err) { + return nil, nil + } + return nil, err + } + + resourceId := schema.GroupVersionResource{ + Group: gvk.Group, + Version: gvk.Version, + Resource: mapping.Resource.Resource, + } + + options := metav1.ListOptions{} + + if len(resourceSelector.LabelFilters) > 0 { + labelFilter := "" + for i := range resourceSelector.LabelFilters { + if labelFilter != "" { + labelFilter += "," + } + f := resourceSelector.LabelFilters[i] + if f.Operation == libsveltosv1beta1.OperationEqual { + labelFilter += fmt.Sprintf("%s=%s", f.Key, f.Value) + } else { + labelFilter += fmt.Sprintf("%s!=%s", f.Key, f.Value) + } + } + + options.LabelSelector = labelFilter + } + + if resourceSelector.Namespace != "" { + options.FieldSelector += fmt.Sprintf("metadata.namespace=%s", resourceSelector.Namespace) + } + + if resourceSelector.Name != "" { + if options.FieldSelector != "" { + options.FieldSelector += "," + } + options.FieldSelector += fmt.Sprintf("metadata.name=%s", resourceSelector.Name) + } + + d := dynamic.NewForConfigOrDie(remotConfig) + var list *unstructured.UnstructuredList + list, err = d.Resource(resourceId).List(ctx, options) + if err != nil { + return nil, err + } + + logger.V(logs.LogDebug).Info(fmt.Sprintf("found %d resources", len(list.Items))) + + resources := []*unstructured.Unstructured{} + for i := range list.Items { + resource := &list.Items[i] + if !resource.GetDeletionTimestamp().IsZero() { + continue + } + isMatch, err := isMatchForEventSource(resource, resourceSelector.Evaluate, logger) + if err != nil { + return nil, err + } + + if isMatch { + resources = append(resources, resource) + } + } + + return resources, nil +} + +func isMatchForEventSource(resource *unstructured.Unstructured, script string, logger logr.Logger) (bool, error) { + if script == "" { + return true, nil + } + + l := lua.NewState() + defer l.Close() + + obj := sveltoslua.MapToTable(resource.UnstructuredContent()) + + if err := l.DoString(script); err != nil { + logger.V(logs.LogInfo).Info(fmt.Sprintf("doString failed: %v", err)) + return false, err + } + + l.SetGlobal("obj", obj) + + if err := l.CallByParam(lua.P{ + Fn: l.GetGlobal("evaluate"), // name of Lua function + NRet: 1, // number of returned values + Protect: true, // return err or panic + }, obj); err != nil { + logger.V(logs.LogInfo).Info(fmt.Sprintf("failed to evaluate health for resource: %v", err)) + return false, err + } + + lv := l.Get(-1) + tbl, ok := lv.(*lua.LTable) + if !ok { + logger.V(logs.LogInfo).Info(sveltoslua.LuaTableError) + return false, fmt.Errorf("%s", sveltoslua.LuaTableError) + } + + goResult := sveltoslua.ToGoValue(tbl) + resultJson, err := json.Marshal(goResult) + if err != nil { + logger.V(logs.LogInfo).Info(fmt.Sprintf("failed to marshal result: %v", err)) + return false, err + } + + var result matchStatus + err = json.Unmarshal(resultJson, &result) + if err != nil { + logger.V(logs.LogInfo).Info(fmt.Sprintf("failed to marshal result: %v", err)) + return false, err + } + + if result.Message != "" { + logger.V(logs.LogInfo).Info(fmt.Sprintf("message: %s", result.Message)) + } + + logger.V(logs.LogDebug).Info(fmt.Sprintf("is a match: %t", result.Matching)) + + return result.Matching, nil +} + +func validateCheck(luaScript string, resources []*unstructured.Unstructured, + logger logr.Logger) (bool, error) { + + if luaScript == "" { + return true, nil + } + + // Create a new Lua state + l := lua.NewState() + defer l.Close() + + // Load the Lua script + if err := l.DoString(luaScript); err != nil { + logger.V(logs.LogInfo).Info(fmt.Sprintf("doString failed: %v", err)) + return false, err + } + + // Create an argument table + argTable := l.NewTable() + for _, resource := range resources { + obj := sveltoslua.MapToTable(resource.UnstructuredContent()) + argTable.Append(obj) + } + + l.SetGlobal("resources", argTable) + + if err := l.CallByParam(lua.P{ + Fn: l.GetGlobal("evaluate"), // name of Lua function + NRet: 1, // number of returned values + Protect: true, // return err or panic + }, argTable); err != nil { + logger.V(logs.LogInfo).Info(fmt.Sprintf("failed to call evaluate function: %s", err.Error())) + return false, err + } + + lv := l.Get(-1) + tbl, ok := lv.(*lua.LTable) + if !ok { + logger.V(logs.LogInfo).Info(sveltoslua.LuaTableError) + return false, fmt.Errorf("%s", sveltoslua.LuaTableError) + } + + goResult := sveltoslua.ToGoValue(tbl) + resultJson, err := json.Marshal(goResult) + if err != nil { + logger.V(logs.LogInfo).Info(fmt.Sprintf("failed to marshal result: %v", err)) + return false, err + } + + var result checkStatus + err = json.Unmarshal(resultJson, &result) + if err != nil { + logger.V(logs.LogInfo).Info(fmt.Sprintf("failed to marshal result: %v", err)) + return false, err + } + + if result.Message != "" { + logger.V(logs.LogInfo).Info(fmt.Sprintf("message: %s", result.Message)) + } + + return result.Pass, nil +} diff --git a/controllers/sveltoscluster_controller.go b/controllers/sveltoscluster_controller.go index 1639668..1178914 100644 --- a/controllers/sveltoscluster_controller.go +++ b/controllers/sveltoscluster_controller.go @@ -69,6 +69,16 @@ type SveltosClusterReconciler struct { ShardKey string // when set, only clusters matching the ShardKey will be reconciled } +type matchStatus struct { + Matching bool `json:"matching"` + Message string `json:"message"` +} + +type checkStatus struct { + Pass bool `json:"pass"` + Message string `json:"message"` +} + //+kubebuilder:rbac:groups=lib.projectsveltos.io,resources=sveltosclusters,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=lib.projectsveltos.io,resources=sveltosclusters/status,verbs=get;update;patch //+kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;update @@ -186,30 +196,36 @@ func (r *SveltosClusterReconciler) reconcileNormal( logger.V(logs.LogInfo).Info(fmt.Sprintf("failed to get projectsveltos namespace: %v", err)) sveltosClusterScope.SveltosCluster.Status.FailureMessage = &errorMessage } else { - sveltosClusterScope.SveltosCluster.Status.Ready = true - currentVersion, err := k8s_utils.GetKubernetesVersion(ctx, config, logger) + err = r.runChecks(ctx, config, sveltosClusterScope.SveltosCluster, logger) if err != nil { - logger.V(logs.LogInfo).Info(fmt.Sprintf("failed to get cluster kubernetes version %v", err)) errorMessage := err.Error() sveltosClusterScope.SveltosCluster.Status.FailureMessage = &errorMessage } else { - currentSemVersion, err := semver.NewVersion(currentVersion) + sveltosClusterScope.SveltosCluster.Status.Ready = true + currentVersion, err := k8s_utils.GetKubernetesVersion(ctx, config, logger) if err != nil { - logger.Error(err, "failed to get semver for current version %s", currentVersion) + logger.V(logs.LogInfo).Info(fmt.Sprintf("failed to get cluster kubernetes version %v", err)) + errorMessage := err.Error() + sveltosClusterScope.SveltosCluster.Status.FailureMessage = &errorMessage } else { - kubernetesVersion := fmt.Sprintf("v%d.%d.%d", currentSemVersion.Major(), currentSemVersion.Minor(), currentSemVersion.Patch()) - sveltosClusterScope.SetLabel(versionLabel, - kubernetesVersion) - updateKubernetesVersionMetric(string(libsveltosv1beta1.ClusterTypeSveltos), sveltosClusterScope.SveltosCluster.Namespace, - sveltosClusterScope.SveltosCluster.Name, kubernetesVersion, logger) - } - sveltosClusterScope.SveltosCluster.Status.Version = currentVersion - logger.V(logs.LogDebug).Info(fmt.Sprintf("cluster version %s", currentVersion)) - if r.shouldRenewTokenRequest(sveltosClusterScope, logger) { - err = r.handleTokenRequestRenewal(ctx, sveltosClusterScope, config) + currentSemVersion, err := semver.NewVersion(currentVersion) if err != nil { - errorMessage := err.Error() - sveltosClusterScope.SveltosCluster.Status.FailureMessage = &errorMessage + logger.Error(err, "failed to get semver for current version %s", currentVersion) + } else { + kubernetesVersion := fmt.Sprintf("v%d.%d.%d", currentSemVersion.Major(), currentSemVersion.Minor(), currentSemVersion.Patch()) + sveltosClusterScope.SetLabel(versionLabel, + kubernetesVersion) + updateKubernetesVersionMetric(string(libsveltosv1beta1.ClusterTypeSveltos), sveltosClusterScope.SveltosCluster.Namespace, + sveltosClusterScope.SveltosCluster.Name, kubernetesVersion, logger) + } + sveltosClusterScope.SveltosCluster.Status.Version = currentVersion + logger.V(logs.LogDebug).Info(fmt.Sprintf("cluster version %s", currentVersion)) + if r.shouldRenewTokenRequest(sveltosClusterScope, logger) { + err = r.handleTokenRequestRenewal(ctx, sveltosClusterScope, config) + if err != nil { + errorMessage := err.Error() + sveltosClusterScope.SveltosCluster.Status.FailureMessage = &errorMessage + } } } } @@ -238,6 +254,7 @@ func updateConnectionStatus(sveltosClusterScope *scope.SveltosClusterScope, logg func (r *SveltosClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { _, err := ctrl.NewControllerManagedBy(mgr). For(&libsveltosv1beta1.SveltosCluster{}). + WithEventFilter(SveltosClusterPredicates(ctrl.Log)). WithOptions(controller.Options{ MaxConcurrentReconciles: r.ConcurrentReconciles, }). @@ -577,3 +594,15 @@ func (r *SveltosClusterReconciler) adjustTokenRequestRenewalOption( return tokenRequestRenewalOption.RenewTokenRequestInterval } + +func (r *SveltosClusterReconciler) runChecks(ctx context.Context, remotConfig *rest.Config, + sveltosCluster *libsveltosv1beta1.SveltosCluster, logger logr.Logger) error { + + if !sveltosCluster.Status.Ready { + // Run ReadinessChecks + return runChecks(ctx, remotConfig, sveltosCluster.Spec.ReadinessChecks, logger) + } + + // Run LivenessChecks + return runChecks(ctx, remotConfig, sveltosCluster.Spec.LivenessChecks, logger) +} diff --git a/controllers/sveltoscluster_predicate.go b/controllers/sveltoscluster_predicate.go new file mode 100644 index 0000000..e12667e --- /dev/null +++ b/controllers/sveltoscluster_predicate.go @@ -0,0 +1,70 @@ +/* +Copyright 2024. projectsveltos.io. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "reflect" + + "github.com/go-logr/logr" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + libsveltosv1beta1 "github.com/projectsveltos/libsveltos/api/v1beta1" + logs "github.com/projectsveltos/libsveltos/lib/logsettings" +) + +// SveltosClusterPredicates predicates for sveltos Cluster. ClusterProfileReconciler watches sveltos Cluster events +// and react to those by reconciling itself based on following predicates +func SveltosClusterPredicates(logger logr.Logger) predicate.Funcs { + return predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + newCluster := e.ObjectNew.(*libsveltosv1beta1.SveltosCluster) + oldCluster := e.ObjectOld.(*libsveltosv1beta1.SveltosCluster) + log := logger.WithValues("predicate", "updateEvent", + "namespace", newCluster.Namespace, + "cluster", newCluster.Name, + ) + + if oldCluster == nil { + log.V(logs.LogVerbose).Info("Old SveltosCluster is nil. Reconcile.") + return true + } + + // a Spec change migth change needs to be processed + if !reflect.DeepEqual(oldCluster.Spec, newCluster.Spec) { + log.V(logs.LogVerbose).Info( + "SveltosCluster Spec changed. Will attempt to reconcile.", + ) + return true + } + + // otherwise, return false + log.V(logs.LogVerbose).Info( + `SveltosCluster did not match expected conditions.`) + return false + }, + CreateFunc: func(e event.CreateEvent) bool { + return true + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return true + }, + GenericFunc: func(e event.GenericEvent) bool { + return false + }, + } +} diff --git a/go.mod b/go.mod index 1f5fa2c..1ff4e67 100644 --- a/go.mod +++ b/go.mod @@ -9,10 +9,11 @@ require ( github.com/onsi/ginkgo/v2 v2.22.2 github.com/onsi/gomega v1.36.2 github.com/pkg/errors v0.9.1 - github.com/projectsveltos/libsveltos v0.47.0 + github.com/projectsveltos/libsveltos v0.47.1-0.20250215100318-b7070076693e github.com/prometheus/client_golang v1.20.5 github.com/robfig/cron/v3 v3.0.1 github.com/spf13/pflag v1.0.6 + github.com/yuin/gopher-lua v1.1.1 k8s.io/api v0.32.1 k8s.io/apiextensions-apiserver v0.32.1 k8s.io/apimachinery v0.32.1 @@ -57,10 +58,12 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.17.9 // indirect + github.com/layeh/gopher-json v0.0.0-20201124131017-552bb3c4c3bf // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/projectsveltos/lua-utils/glua-strings v0.0.0-20250208125623-8a0f9e2ba46b // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.60.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect diff --git a/go.sum b/go.sum index 2389f9a..18d4530 100644 --- a/go.sum +++ b/go.sum @@ -106,6 +106,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/layeh/gopher-json v0.0.0-20201124131017-552bb3c4c3bf h1:bg6J/5S/AeTz7K9i/luJRj31BJ8f+LgYwKQBSOZxSEM= +github.com/layeh/gopher-json v0.0.0-20201124131017-552bb3c4c3bf/go.mod h1:E/q28EyUVBgBQnONAVPIdwvEsv4Ve0vaCA9JWim4+3I= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= @@ -130,8 +132,10 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/projectsveltos/libsveltos v0.47.0 h1:DP5dpqjJE++SEAhbSgK0JaLziVpD2jPNus3LyDUzuV8= -github.com/projectsveltos/libsveltos v0.47.0/go.mod h1:vWM0iJACIE49RSSGNVa6VY18LKlsh6b/8dubxdgYwzo= +github.com/projectsveltos/libsveltos v0.47.1-0.20250215100318-b7070076693e h1:twacEhCaX3dJa1emU7pDzN+sahUrXeYnYkRSPRpb9lY= +github.com/projectsveltos/libsveltos v0.47.1-0.20250215100318-b7070076693e/go.mod h1:vWM0iJACIE49RSSGNVa6VY18LKlsh6b/8dubxdgYwzo= +github.com/projectsveltos/lua-utils/glua-strings v0.0.0-20250208125623-8a0f9e2ba46b h1:jqMq/LPJvTsZRAmkIqwPRbfUqfVZtGrn0+kuHDUutQ4= +github.com/projectsveltos/lua-utils/glua-strings v0.0.0-20250208125623-8a0f9e2ba46b/go.mod h1:L5waR6GvgOHVQ/YnDxHW4p53DDQ/sF3ACZhtSpDARMw= github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= @@ -163,12 +167,14 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M= +github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts= diff --git a/test/fv/fv_suite_test.go b/test/fv/fv_suite_test.go index 23c428c..0a90751 100644 --- a/test/fv/fv_suite_test.go +++ b/test/fv/fv_suite_test.go @@ -37,9 +37,11 @@ import ( "k8s.io/client-go/kubernetes" clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2" "k8s.io/klog/v2/textlogger" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -116,6 +118,7 @@ var _ = BeforeSuite(func() { waitForClusterMachineToBeReady() + // This allows a pod in the management cluster to reach the managed cluster. remoteRestConfig := getManagedClusterRestConfig(kindWorkloadCluster) // Creates: @@ -131,7 +134,8 @@ var _ = BeforeSuite(func() { }) func generateKubeconfigWithTokenRequest(remoteRestConfig *rest.Config) string { - remoteClient, err := client.New(remoteRestConfig, client.Options{Scheme: scheme}) + // This client allows ginkgo, running from terminal, to reach the managed cluster + workloadClient, err := getKindWorkloadClusterClient() Expect(err).To(BeNil()) projectsveltos := "projectsveltos" @@ -141,7 +145,7 @@ func generateKubeconfigWithTokenRequest(remoteRestConfig *rest.Config) string { Name: projectsveltos, }, } - err = remoteClient.Create(context.TODO(), ns) + err = workloadClient.Create(context.TODO(), ns) if err != nil { Expect(apierrors.IsAlreadyExists(err)).To(BeTrue()) } @@ -153,7 +157,7 @@ func generateKubeconfigWithTokenRequest(remoteRestConfig *rest.Config) string { Name: projectsveltos, }, } - err = remoteClient.Create(context.TODO(), serviceAccount) + err = workloadClient.Create(context.TODO(), serviceAccount) if err != nil { Expect(apierrors.IsAlreadyExists(err)).To(BeTrue()) } @@ -175,7 +179,7 @@ func generateKubeconfigWithTokenRequest(remoteRestConfig *rest.Config) string { }, }, } - err = remoteClient.Create(context.TODO(), clusterrole) + err = workloadClient.Create(context.TODO(), clusterrole) if err != nil { Expect(apierrors.IsAlreadyExists(err)).To(BeTrue()) } @@ -199,18 +203,18 @@ func generateKubeconfigWithTokenRequest(remoteRestConfig *rest.Config) string { }, }, } - err = remoteClient.Create(context.TODO(), clusterrolebinding) + err = workloadClient.Create(context.TODO(), clusterrolebinding) if err != nil { Expect(apierrors.IsAlreadyExists(err)).To(BeTrue()) } - tokenRequest := getServiceAccountTokenRequest(remoteRestConfig, projectsveltos, projectsveltos) + tokenRequest := getServiceAccountTokenRequest(projectsveltos, projectsveltos) return getKubeconfigFromToken(remoteRestConfig, projectsveltos, projectsveltos, tokenRequest.Token) } // getServiceAccountTokenRequest returns token for a serviceaccount -func getServiceAccountTokenRequest(restConfig *rest.Config, - serviceAccountNamespace, serviceAccountName string) *authenticationv1.TokenRequestStatus { +func getServiceAccountTokenRequest(serviceAccountNamespace, serviceAccountName string, +) *authenticationv1.TokenRequestStatus { saExpirationInSecond := 365 * 24 * 60 * time.Minute expiration := int64(saExpirationInSecond.Seconds()) @@ -221,7 +225,9 @@ func getServiceAccountTokenRequest(restConfig *rest.Config, }, } - clientset, err := kubernetes.NewForConfig(restConfig) + workloadClusterRestConfig, err := getKindWorkloadClusterRestConfig() + Expect(err).To(BeNil()) + clientset, err := kubernetes.NewForConfig(workloadClusterRestConfig) Expect(err).To(BeNil()) By(fmt.Sprintf("Create Token for ServiceAccount %s/%s", serviceAccountNamespace, serviceAccountName)) @@ -234,7 +240,7 @@ func getServiceAccountTokenRequest(restConfig *rest.Config, } // getKubeconfigFromToken returns Kubeconfig to access management cluster from token. -func getKubeconfigFromToken(restConfig *rest.Config, namespace, serviceAccountName, token string) string { +func getKubeconfigFromToken(remoteRestConfig *rest.Config, namespace, serviceAccountName, token string) string { template := `apiVersion: v1 kind: Config clusters: @@ -254,7 +260,7 @@ contexts: user: %s current-context: sveltos-context` - data := fmt.Sprintf(template, restConfig.Host, base64.StdEncoding.EncodeToString(restConfig.CAData), + data := fmt.Sprintf(template, remoteRestConfig.Host, base64.StdEncoding.EncodeToString(remoteRestConfig.CAData), serviceAccountName, token, namespace, serviceAccountName) return data @@ -293,6 +299,29 @@ func createSveltosCluster(sveltosClusterNamespace, sveltosClusterName string) { TokenRequestRenewalOption: &libsveltosv1beta1.TokenRequestRenewalOption{ RenewTokenRequestInterval: metav1.Duration{Duration: time.Minute}, }, + ReadinessChecks: []libsveltosv1beta1.ClusterCheck{ + { + Name: "worker-nodes", + ResourceSelectors: []libsveltosv1beta1.ResourceSelector{ + { + Kind: "Node", + Group: "", + Version: "v1", + }, + }, + Condition: `function evaluate() + hs = {} + hs.pass = false + + for _, resource in ipairs(resources) do + if not (resource.metadata.labels and resource.metadata.labels["node-role.kubernetes.io/control-plane"]) then + hs.pass = true + end + end + return hs +end`, + }, + }, }, } @@ -333,3 +362,27 @@ func getManagedClusterRestConfig(workloadCluster *clusterv1.Cluster) *rest.Confi Expect(err).To(BeNil()) return remoteRestConfig } + +// getKindWorkloadClusterClient returns client to access the kind cluster used as workload cluster +func getKindWorkloadClusterClient() (client.Client, error) { + restConfig, err := getKindWorkloadClusterRestConfig() + if err != nil { + return nil, err + } + return client.New(restConfig, client.Options{Scheme: scheme}) +} + +// getKindWorkloadClusterRestConfig returns restConfig to access the kind cluster used as workload cluster +func getKindWorkloadClusterRestConfig() (*rest.Config, error) { + kubeconfigPath := "workload_kubeconfig" // this file is created in this directory by Makefile during cluster creation + config, err := clientcmd.LoadFromFile(kubeconfigPath) + if err != nil { + return nil, err + } + return clientcmd.NewDefaultClientConfig(*config, &clientcmd.ConfigOverrides{}).ClientConfig() +} + +func randomString() string { + const length = 10 + return util.RandomString(length) +} diff --git a/test/fv/liveness_check_test.go b/test/fv/liveness_check_test.go new file mode 100644 index 0000000..1059eae --- /dev/null +++ b/test/fv/liveness_check_test.go @@ -0,0 +1,130 @@ +/* +Copyright 2024. projectsveltos.io. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fv_test + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + libsveltosv1beta1 "github.com/projectsveltos/libsveltos/api/v1beta1" +) + +var _ = Describe("LivenessChecks", func() { + It("SveltosCluster livenessChecks", Label("FV"), func() { + namespace := randomString() + + By("Create a SveltosCluster with a LivenessChecks asking for namespace bar to exist") + sveltosCluster := &libsveltosv1beta1.SveltosCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: randomString(), + Namespace: kindWorkloadCluster.Namespace, + }, + Spec: libsveltosv1beta1.SveltosClusterSpec{ + ConsecutiveFailureThreshold: 1, + // fv-test creates a SveltosCluster. Here we simply reuse the Secret with Kubeconfig + KubeconfigName: "clusterapi-workload-sveltos-kubeconfig", + LivenessChecks: []libsveltosv1beta1.ClusterCheck{ + { + Name: "failing-check", + ResourceSelectors: []libsveltosv1beta1.ResourceSelector{ + { + Kind: "Namespace", + Group: "", + Version: "v1", + Name: namespace, + }, + }, + Condition: `function evaluate() + hs = {} + hs.pass = false + if #resources == 1 then + -- The namespace selected in ResourceSelector does not exist, so this test fails + hs.pass = true + end + return hs +end`, + }, + }, + }, + } + + Expect(k8sClient.Create(context.TODO(), sveltosCluster)).To(Succeed()) + + By("Verify SveltosCluster is ready") + // Verify SveltosCluster status moves to Ready + Eventually(func() bool { + currentSveltosCluster := &libsveltosv1beta1.SveltosCluster{} + err := k8sClient.Get(context.TODO(), + types.NamespacedName{Namespace: sveltosCluster.Namespace, Name: sveltosCluster.Name}, + currentSveltosCluster) + if err != nil { + return false + } + return currentSveltosCluster.Status.Ready + }, timeout, pollingInterval).Should(BeTrue()) + + By("Verify SveltosCluster is not healthy") + // Verify SveltosCluster is not healthy + Eventually(func() bool { + currentSveltosCluster := &libsveltosv1beta1.SveltosCluster{} + err := k8sClient.Get(context.TODO(), + types.NamespacedName{Namespace: sveltosCluster.Namespace, Name: sveltosCluster.Name}, + currentSveltosCluster) + if err != nil { + return false + } + return currentSveltosCluster.Status.ConnectionStatus == libsveltosv1beta1.ConnectionDown + }, timeout, pollingInterval).Should(BeTrue()) + + By("Create namespace in the managed cluster") + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + } + + remoteClient, err := getKindWorkloadClusterClient() + Expect(err).To(BeNil()) + Expect(remoteClient.Create(context.TODO(), ns)).To(Succeed()) + + By("Verify SveltosCluster is healthy") + // Verify SveltosCluster status is healthy + Eventually(func() bool { + currentSveltosCluster := &libsveltosv1beta1.SveltosCluster{} + err := k8sClient.Get(context.TODO(), + types.NamespacedName{Namespace: sveltosCluster.Namespace, Name: sveltosCluster.Name}, + currentSveltosCluster) + if err != nil { + return false + } + return currentSveltosCluster.Status.ConnectionStatus == libsveltosv1beta1.ConnectionHealthy + }, timeout, pollingInterval).Should(BeTrue()) + + By("Delete SveltosCluster") + currentSveltosCluster := &libsveltosv1beta1.SveltosCluster{} + Expect(k8sClient.Get(context.TODO(), + types.NamespacedName{Namespace: sveltosCluster.Namespace, Name: sveltosCluster.Name}, + currentSveltosCluster)).To(Succeed()) + Expect(k8sClient.Delete(context.TODO(), currentSveltosCluster)).To(Succeed()) + }) +}) diff --git a/test/fv/readiness_checks_test.go b/test/fv/readiness_checks_test.go new file mode 100644 index 0000000..e1bcd1e --- /dev/null +++ b/test/fv/readiness_checks_test.go @@ -0,0 +1,120 @@ +/* +Copyright 2024. projectsveltos.io. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fv_test + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + libsveltosv1beta1 "github.com/projectsveltos/libsveltos/api/v1beta1" +) + +var _ = Describe("ReadinessCheck", func() { + It("SveltosCluster readinessChecks", Label("FV"), func() { + namespace := randomString() + + By("Create a SveltosCluster with a ReadinessChecks asking for namespace bar to exist") + sveltosCluster := &libsveltosv1beta1.SveltosCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: randomString(), + Namespace: kindWorkloadCluster.Namespace, + }, + Spec: libsveltosv1beta1.SveltosClusterSpec{ + // fv-test creates a SveltosCluster. Here we simply reuse the Secret with Kubeconfig + KubeconfigName: "clusterapi-workload-sveltos-kubeconfig", + ReadinessChecks: []libsveltosv1beta1.ClusterCheck{ + { + Name: "failing-check", + ResourceSelectors: []libsveltosv1beta1.ResourceSelector{ + { + Kind: "Namespace", + Group: "", + Version: "v1", + Name: namespace, + }, + }, + Condition: `function evaluate() + hs = {} + hs.pass = false + if #resources == 1 then + -- The namespace selected in ResourceSelector does not exist, so this test fails + hs.pass = true + end + return hs +end`, + }, + }, + }, + } + + Expect(k8sClient.Create(context.TODO(), sveltosCluster)).To(Succeed()) + + By("Verify SveltosCluster is not ready") + // Verify SveltosCluster status never moves to Ready + Eventually(func() bool { + currentSveltosCluster := &libsveltosv1beta1.SveltosCluster{} + err := k8sClient.Get(context.TODO(), + types.NamespacedName{Namespace: sveltosCluster.Namespace, Name: sveltosCluster.Name}, + currentSveltosCluster) + if err != nil { + return false + } + if currentSveltosCluster.Status.Ready { + return false + } + return currentSveltosCluster.Status.FailureMessage != nil && + *currentSveltosCluster.Status.FailureMessage == "cluster check failing-check failed" + }, timeout, pollingInterval).Should(BeTrue()) + + By("Create namespace in the managed cluster") + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + } + + remoteClient, err := getKindWorkloadClusterClient() + Expect(err).To(BeNil()) + Expect(remoteClient.Create(context.TODO(), ns)).To(Succeed()) + + By("Verify SveltosCluster is ready") + // Verify SveltosCluster status moves to Ready + Eventually(func() bool { + currentSveltosCluster := &libsveltosv1beta1.SveltosCluster{} + err := k8sClient.Get(context.TODO(), + types.NamespacedName{Namespace: sveltosCluster.Namespace, Name: sveltosCluster.Name}, + currentSveltosCluster) + if err != nil { + return false + } + return currentSveltosCluster.Status.Ready + }, timeout, pollingInterval).Should(BeTrue()) + + By("Delete SveltosCluster") + currentSveltosCluster := &libsveltosv1beta1.SveltosCluster{} + Expect(k8sClient.Get(context.TODO(), + types.NamespacedName{Namespace: sveltosCluster.Namespace, Name: sveltosCluster.Name}, + currentSveltosCluster)).To(Succeed()) + Expect(k8sClient.Delete(context.TODO(), currentSveltosCluster)).To(Succeed()) + }) +})