From fd16439ff73468b3acf17fcd5934447aaa87f180 Mon Sep 17 00:00:00 2001 From: Stefan Bueringer Date: Tue, 1 Oct 2024 21:44:11 +0200 Subject: [PATCH] update --- .../clusterresourceset_controller_test.go | 11 +++++++-- exp/internal/controllers/suite_test.go | 24 +++++++++++++++---- .../machine/machine_controller_test.go | 5 ++++ .../machinedeployment_controller_test.go | 5 ++++ .../machinehealthcheck_controller_test.go | 14 ++++------- internal/test/envtest/environment.go | 2 +- 6 files changed, 44 insertions(+), 17 deletions(-) diff --git a/exp/addons/internal/controllers/clusterresourceset_controller_test.go b/exp/addons/internal/controllers/clusterresourceset_controller_test.go index 73671af1012a..da67a5391023 100644 --- a/exp/addons/internal/controllers/clusterresourceset_controller_test.go +++ b/exp/addons/internal/controllers/clusterresourceset_controller_test.go @@ -110,8 +110,15 @@ metadata: g.Expect(env.CreateAndWait(ctx, testCluster)).To(Succeed()) t.Log("Creating the remote Cluster kubeconfig") g.Expect(env.CreateKubeconfigSecret(ctx, testCluster)).To(Succeed()) - _, err = clusterCache.GetClient(ctx, client.ObjectKeyFromObject(testCluster)) - g.Expect(err).ToNot(HaveOccurred()) + // Set InfrastructureReady to true so ClusterCache creates the clusterAccessor. + patch := client.MergeFrom(testCluster.DeepCopy()) + testCluster.Status.InfrastructureReady = true + g.Expect(env.Status().Patch(ctx, testCluster, patch)).To(Succeed()) + + g.Eventually(func(g Gomega) { + _, err = clusterCache.GetClient(ctx, client.ObjectKeyFromObject(testCluster)) + g.Expect(err).ToNot(HaveOccurred()) + }, 1*time.Minute, 5*time.Second).Should(Succeed()) createConfigMapAndSecret(g, ns.Name, configmapName, secretName) return ns diff --git a/exp/internal/controllers/suite_test.go b/exp/internal/controllers/suite_test.go index 93194d2f8361..815d65a750e9 100644 --- a/exp/internal/controllers/suite_test.go +++ b/exp/internal/controllers/suite_test.go @@ -26,6 +26,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/cluster-api/api/v1beta1/index" + "sigs.k8s.io/cluster-api/controllers/clustercache" + "sigs.k8s.io/cluster-api/controllers/remote" "sigs.k8s.io/cluster-api/internal/test/envtest" ) @@ -42,12 +44,24 @@ func TestMain(m *testing.M) { } setupReconcilers := func(ctx context.Context, mgr ctrl.Manager) { - machinePoolReconciler := MachinePoolReconciler{ - Client: mgr.GetClient(), - recorder: mgr.GetEventRecorderFor("machinepool-controller"), - } - err := machinePoolReconciler.SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: 1}) + clusterCache, err := clustercache.SetupWithManager(ctx, mgr, clustercache.Options{ + SecretClient: mgr.GetClient(), + Cache: clustercache.CacheOptions{ + Indexes: []clustercache.CacheOptionsIndex{clustercache.NodeProviderIDIndex}, + }, + Client: clustercache.ClientOptions{ + UserAgent: remote.DefaultClusterAPIUserAgent("test-controller-manager"), + }, + }, controller.Options{MaxConcurrentReconciles: 10}) if err != nil { + panic(fmt.Sprintf("Failed to create new cluster cache tracker: %v", err)) + } + + if err := (&MachinePoolReconciler{ + Client: mgr.GetClient(), + ClusterCache: clusterCache, + recorder: mgr.GetEventRecorderFor("machinepool-controller"), + }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: 1}); err != nil { panic(fmt.Sprintf("Failed to set up machine pool reconciler: %v", err)) } } diff --git a/internal/controllers/machine/machine_controller_test.go b/internal/controllers/machine/machine_controller_test.go index f8d86dc49fcb..39d1b22a0833 100644 --- a/internal/controllers/machine/machine_controller_test.go +++ b/internal/controllers/machine/machine_controller_test.go @@ -2519,6 +2519,11 @@ func TestNodeToMachine(t *testing.T) { g.Expect(env.Create(ctx, testCluster)).To(Succeed()) g.Expect(env.CreateKubeconfigSecret(ctx, testCluster)).To(Succeed()) + // Set InfrastructureReady to true so ClusterCache creates the clusterAccessor. + testClusterOriginal := client.MergeFrom(testCluster.DeepCopy()) + testCluster.Status.InfrastructureReady = true + g.Expect(env.Status().Patch(ctx, testCluster, testClusterOriginal)).To(Succeed()) + g.Expect(env.Create(ctx, defaultBootstrap)).To(Succeed()) g.Expect(env.Create(ctx, targetNode)).To(Succeed()) g.Expect(env.Create(ctx, randomNode)).To(Succeed()) diff --git a/internal/controllers/machinedeployment/machinedeployment_controller_test.go b/internal/controllers/machinedeployment/machinedeployment_controller_test.go index 51e14f39d82c..2b5aecad8020 100644 --- a/internal/controllers/machinedeployment/machinedeployment_controller_test.go +++ b/internal/controllers/machinedeployment/machinedeployment_controller_test.go @@ -62,6 +62,11 @@ func TestMachineDeploymentReconciler(t *testing.T) { t.Log("Creating the Cluster Kubeconfig Secret") g.Expect(env.CreateKubeconfigSecret(ctx, cluster)).To(Succeed()) + // Set InfrastructureReady to true so ClusterCache creates the clusterAccessor. + patch := client.MergeFrom(cluster.DeepCopy()) + cluster.Status.InfrastructureReady = true + g.Expect(env.Status().Patch(ctx, cluster, patch)).To(Succeed()) + return ns, cluster } diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go b/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go index 37cd08653318..4e96fbbc1bbb 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go @@ -2351,7 +2351,7 @@ func createCluster(g *WithT, namespaceName string) *clusterv1.Cluster { }, } - g.Expect(env.Create(ctx, cluster)).To(Succeed()) + g.Expect(env.CreateAndWait(ctx, cluster)).To(Succeed()) // Make sure the cluster is in the cache before proceeding g.Eventually(func() error { @@ -2359,10 +2359,14 @@ func createCluster(g *WithT, namespaceName string) *clusterv1.Cluster { return env.Get(ctx, util.ObjectKey(cluster), &cl) }, timeout, 100*time.Millisecond).Should(Succeed()) + g.Expect(env.CreateKubeconfigSecret(ctx, cluster)).To(Succeed()) + // This is required for MHC to perform checks patchHelper, err := patch.NewHelper(cluster, env.Client) g.Expect(err).ToNot(HaveOccurred()) conditions.MarkTrue(cluster, clusterv1.InfrastructureReadyCondition) + // Set InfrastructureReady to true so ClusterCache creates the clusterAccessor. + cluster.Status.InfrastructureReady = true g.Expect(patchHelper.Patch(ctx, cluster)).To(Succeed()) // Wait for cluster in cache to be updated post-patch @@ -2375,14 +2379,6 @@ func createCluster(g *WithT, namespaceName string) *clusterv1.Cluster { return conditions.IsTrue(cluster, clusterv1.InfrastructureReadyCondition) }, timeout, 100*time.Millisecond).Should(BeTrue()) - g.Expect(env.CreateKubeconfigSecret(ctx, cluster)).To(Succeed()) - - // Set InfrastructureReady to true so ClusterCache creates the clusterAccessor. - patchHelper, err = patch.NewHelper(cluster, env.Client) - g.Expect(err).ToNot(HaveOccurred()) - cluster.Status.InfrastructureReady = true - g.Expect(patchHelper.Patch(ctx, cluster)).To(Succeed()) - return cluster } diff --git a/internal/test/envtest/environment.go b/internal/test/envtest/environment.go index 010edc5e9ef2..7365cd8eaff5 100644 --- a/internal/test/envtest/environment.go +++ b/internal/test/envtest/environment.go @@ -78,7 +78,7 @@ func init() { // This would lead to race conditions because input.M.Run() writes os.Stderr // while some go routines in controller-runtime use os.Stderr to write logs. logOptions := logs.NewOptions() - logOptions.Verbosity = logsv1.VerbosityLevel(6) // FIXME: change to 2 before merge + logOptions.Verbosity = logsv1.VerbosityLevel(8) // FIXME(sbueringer): change to 2 before merge if err := logsv1.ValidateAndApply(logOptions, nil); err != nil { klog.ErrorS(err, "Unable to validate and apply log options") os.Exit(1)