diff --git a/test/e2e/quick_start_test.go b/test/e2e/quick_start_test.go index 2d164480234d..d85a8353fdb6 100644 --- a/test/e2e/quick_start_test.go +++ b/test/e2e/quick_start_test.go @@ -187,7 +187,12 @@ var _ = Describe("When following the Cluster API quick-start check finalizers re InfrastructureProvider: ptr.To("docker"), PostMachinesProvisioned: func(proxy framework.ClusterProxy, namespace, clusterName string) { // This check ensures that finalizers are resilient - i.e. correctly re-reconciled - when removed. - framework.ValidateFinalizersResilience(ctx, proxy, namespace, clusterName) + framework.ValidateFinalizersResilience(ctx, proxy, namespace, clusterName, + framework.CoreFinalizersAssertion, + framework.KubeadmFinalizersAssertion, + framework.ExpFinalizersAssertion, + framework.InfraDockerFinalizersAssertion, + ) }, } }) @@ -205,7 +210,12 @@ var _ = Describe("When following the Cluster API quick-start with ClusterClass c InfrastructureProvider: ptr.To("docker"), PostMachinesProvisioned: func(proxy framework.ClusterProxy, namespace, clusterName string) { // This check ensures that finalizers are resilient - i.e. correctly re-reconciled - when removed. - framework.ValidateFinalizersResilience(ctx, proxy, namespace, clusterName) + framework.ValidateFinalizersResilience(ctx, proxy, namespace, clusterName, + framework.CoreFinalizersAssertion, + framework.KubeadmFinalizersAssertion, + framework.ExpFinalizersAssertion, + framework.InfraDockerFinalizersAssertion, + ) }, } }) diff --git a/test/framework/cluster_topology_helpers.go b/test/framework/cluster_topology_helpers.go index 927aea913555..32726c8e2435 100644 --- a/test/framework/cluster_topology_helpers.go +++ b/test/framework/cluster_topology_helpers.go @@ -84,8 +84,6 @@ func UpgradeClusterTopologyAndWaitForUpgrade(ctx context.Context, input UpgradeC Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling UpgradeClusterTopologyAndWaitForUpgrade") Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling UpgradeClusterTopologyAndWaitForUpgrade") Expect(input.ControlPlane).ToNot(BeNil(), "Invalid argument. input.ControlPlane can't be nil when calling UpgradeClusterTopologyAndWaitForUpgrade") - Expect(input.MachineDeployments).ToNot(BeEmpty(), "Invalid argument. input.MachineDeployments can't be empty when calling UpgradeClusterTopologyAndWaitForUpgrade") - Expect(input.MachinePools).ToNot(BeEmpty(), "Invalid argument. input.MachinePools can't be empty when calling UpgradeClusterTopologyAndWaitForUpgrade") Expect(input.KubernetesUpgradeVersion).ToNot(BeNil(), "Invalid argument. input.KubernetesUpgradeVersion can't be empty when calling UpgradeClusterTopologyAndWaitForUpgrade") mgmtClient := input.ClusterProxy.GetClient() diff --git a/test/framework/finalizers_helpers.go b/test/framework/finalizers_helpers.go index 49eaa3ce2b84..00e7c244447e 100644 --- a/test/framework/finalizers_helpers.go +++ b/test/framework/finalizers_helpers.go @@ -32,28 +32,45 @@ import ( clusterctlcluster "sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" addonsv1 "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1" + infraexpv1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1beta1" "sigs.k8s.io/cluster-api/util/patch" ) -// finalizerAssertion contains a list of expected Finalizers corresponding to a resource Kind. -var finalizerAssertion = map[string][]string{ - "Cluster": {clusterv1.ClusterFinalizer}, - "Machine": {clusterv1.MachineFinalizer}, - "MachineSet": {clusterv1.MachineSetTopologyFinalizer}, - "MachineDeployment": {clusterv1.MachineDeploymentTopologyFinalizer}, - "ClusterResourceSet": {addonsv1.ClusterResourceSetFinalizer}, - "DockerMachine": {infrav1.MachineFinalizer}, - "DockerCluster": {infrav1.ClusterFinalizer}, +// CoreFinalizersAssertion maps Cluster API core types to their expected finalizers. +var CoreFinalizersAssertion = map[string][]string{ + "Cluster": {clusterv1.ClusterFinalizer}, + "Machine": {clusterv1.MachineFinalizer}, + "MachineSet": {clusterv1.MachineSetTopologyFinalizer}, + "MachineDeployment": {clusterv1.MachineDeploymentTopologyFinalizer}, +} + +// ExpFinalizersAssertion maps experimental resource types to their expected finalizers. +var ExpFinalizersAssertion = map[string][]string{ + "ClusterResourceSet": {addonsv1.ClusterResourceSetFinalizer}, + "MachinePool": {expv1.MachinePoolFinalizer}, +} + +// InfraDockerFinalizersAssertion maps docker infrastructure resource types to their expected finalizers. +var InfraDockerFinalizersAssertion = map[string][]string{ + "DockerMachine": {infrav1.MachineFinalizer}, + "DockerCluster": {infrav1.ClusterFinalizer}, + "DockerMachinePool": {infraexpv1.MachinePoolFinalizer}, +} + +// KubeadmFinalizersAssertion maps Kubeadm resource types to their expected finalizers. +var KubeadmFinalizersAssertion = map[string][]string{ "KubeadmControlPlane": {controlplanev1.KubeadmControlPlaneFinalizer}, } // ValidateFinalizersResilience checks that expected finalizers are in place, deletes them, and verifies that expected finalizers are properly added again. -func ValidateFinalizersResilience(ctx context.Context, proxy ClusterProxy, namespace, clusterName string) { +func ValidateFinalizersResilience(ctx context.Context, proxy ClusterProxy, namespace, clusterName string, finalizerAssertions ...map[string][]string) { clusterKey := client.ObjectKey{Namespace: namespace, Name: clusterName} + allFinalizerAssertions := concatenateFinalizerAssertions(finalizerAssertions...) // Collect all objects where finalizers were initially set - objectsWithFinalizers := getObjectsWithFinalizers(ctx, proxy, namespace) + objectsWithFinalizers := getObjectsWithFinalizers(ctx, proxy, namespace, allFinalizerAssertions) // Setting the paused property on the Cluster resource will pause reconciliations, thereby having no effect on Finalizers. // This also makes debugging easier. @@ -72,7 +89,7 @@ func ValidateFinalizersResilience(ctx context.Context, proxy ClusterProxy, names forceMachineDeploymentTopologyReconcile(ctx, proxy.GetClient(), clusterKey) // Check that the Finalizers are as expected after further reconciliations. - assertFinalizersExist(ctx, proxy, namespace, objectsWithFinalizers) + assertFinalizersExist(ctx, proxy, namespace, objectsWithFinalizers, allFinalizerAssertions) } // removeFinalizers removes all Finalizers from objects in the owner graph. @@ -94,7 +111,7 @@ func removeFinalizers(ctx context.Context, proxy ClusterProxy, namespace string) } } -func getObjectsWithFinalizers(ctx context.Context, proxy ClusterProxy, namespace string) map[string]*unstructured.Unstructured { +func getObjectsWithFinalizers(ctx context.Context, proxy ClusterProxy, namespace string, allFinalizerAssertions map[string][]string) map[string]*unstructured.Unstructured { graph, err := clusterctlcluster.GetOwnerGraph(ctx, namespace, proxy.GetKubeconfigPath()) Expect(err).ToNot(HaveOccurred()) @@ -111,6 +128,8 @@ func getObjectsWithFinalizers(ctx context.Context, proxy ClusterProxy, namespace setFinalizers := obj.GetFinalizers() if len(setFinalizers) > 0 { + // assert if the expected finalizers are set on the resource + Expect(allFinalizerAssertions[node.Object.Kind]).To(Equal(setFinalizers), "for resource type ", node.Object.Kind) objsWithFinalizers[fmt.Sprintf("%s/%s/%s", node.Object.Kind, node.Object.Namespace, node.Object.Name)] = obj } } @@ -119,10 +138,10 @@ func getObjectsWithFinalizers(ctx context.Context, proxy ClusterProxy, namespace } // assertFinalizersExist ensures that current Finalizers match those in the initialObjectsWithFinalizers. -func assertFinalizersExist(ctx context.Context, proxy ClusterProxy, namespace string, initialObjsWithFinalizers map[string]*unstructured.Unstructured) { +func assertFinalizersExist(ctx context.Context, proxy ClusterProxy, namespace string, initialObjsWithFinalizers map[string]*unstructured.Unstructured, allFinalizerAssertions map[string][]string) { Eventually(func() error { var allErrs []error - finalObjsWithFinalizers := getObjectsWithFinalizers(ctx, proxy, namespace) + finalObjsWithFinalizers := getObjectsWithFinalizers(ctx, proxy, namespace, allFinalizerAssertions) for objKindNamespacedName, obj := range initialObjsWithFinalizers { // verify if finalizers for this resource were set on reconcile @@ -133,7 +152,7 @@ func assertFinalizersExist(ctx context.Context, proxy ClusterProxy, namespace st } // verify if this resource has the appropriate Finalizers set - expectedFinalizers, assert := finalizerAssertion[obj.GetKind()] + expectedFinalizers, assert := allFinalizerAssertions[obj.GetKind()] if !assert { continue } @@ -149,6 +168,18 @@ func assertFinalizersExist(ctx context.Context, proxy ClusterProxy, namespace st }).WithTimeout(1 * time.Minute).WithPolling(2 * time.Second).Should(Succeed()) } +// concatenateFinalizerAssertions concatenates all finalizer assertions into one map. +func concatenateFinalizerAssertions(finalizerAssertions ...map[string][]string) map[string][]string { + allFinalizerAssertions := make(map[string][]string, 0) + for i := range finalizerAssertions { + for kind, finalizers := range finalizerAssertions[i] { + allFinalizerAssertions[kind] = finalizers + } + } + + return allFinalizerAssertions +} + // forceMachineDeploymentTopologyReconcile forces reconciliation of the MachineDeployment. func forceMachineDeploymentTopologyReconcile(ctx context.Context, cli client.Client, clusterKey types.NamespacedName) { mdList := &clusterv1.MachineDeploymentList{}