diff --git a/test/e2e/autoscaler.go b/test/e2e/autoscaler.go index 99f9a83c24da..5092dba69ce5 100644 --- a/test/e2e/autoscaler.go +++ b/test/e2e/autoscaler.go @@ -62,8 +62,17 @@ type AutoscalerSpecInput struct { InfrastructureMachineTemplateKind string InfrastructureMachinePoolTemplateKind string InfrastructureMachinePoolKind string + InfrastructureAPIGroup string AutoscalerVersion string + // InstallOnManagementCluster steers if the autoscaler should get installed to the management or workload cluster. + // Depending on the CI environments, there may be no connectivity from the workload to the management cluster. + InstallOnManagementCluster bool + + // ScaleToAndFromZero enables tests to scale to and from zero. + // Note: This is only implemented for MachineDeployments. + ScaleToAndFromZero bool + // Allows to inject a function to be run after test namespace is created. // If not specified, this is a no-op. PostNamespaceCreated func(managementClusterProxy framework.ClusterProxy, workloadClusterNamespace string) @@ -115,6 +124,8 @@ func AutoscalerSpec(ctx context.Context, inputGetter func() AutoscalerSpecInput) infrastructureProvider = *input.InfrastructureProvider } + hasMachinePool := input.InfrastructureMachinePoolTemplateKind != "" + clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, ConfigCluster: clusterctl.ConfigClusterInput{ @@ -137,6 +148,7 @@ func AutoscalerSpec(ctx context.Context, inputGetter func() AutoscalerSpecInput) }, clusterResources) Expect(clusterResources.Cluster.Spec.Topology).NotTo(BeNil(), "Autoscaler test expected a Classy Cluster") + // Ensure the MachineDeploymentTopology has the autoscaler annotations. mdTopology := clusterResources.Cluster.Spec.Topology.Workers.MachineDeployments[0] Expect(mdTopology.Metadata.Annotations).NotTo(BeNil(), "MachineDeployment is expected to have autoscaler annotations") @@ -145,21 +157,27 @@ func AutoscalerSpec(ctx context.Context, inputGetter func() AutoscalerSpecInput) mdNodeGroupMaxSize, ok := mdTopology.Metadata.Annotations[clusterv1.AutoscalerMaxSizeAnnotation] Expect(ok).To(BeTrue(), "MachineDeploymentTopology %s does not have the %q autoscaler annotation", mdTopology.Name, clusterv1.AutoscalerMaxSizeAnnotation) - // Ensure the MachinePoolTopology does NOT have the autoscaler annotations so we can test MachineDeployments first. - mpTopology := clusterResources.Cluster.Spec.Topology.Workers.MachinePools[0] - if mpTopology.Metadata.Annotations != nil { - _, ok = mpTopology.Metadata.Annotations[clusterv1.AutoscalerMinSizeAnnotation] - Expect(ok).To(BeFalse(), "MachinePoolTopology %s does have the %q autoscaler annotation", mpTopology.Name, clusterv1.AutoscalerMinSizeAnnotation) - _, ok = mpTopology.Metadata.Annotations[clusterv1.AutoscalerMaxSizeAnnotation] - Expect(ok).To(BeFalse(), "MachinePoolTopology %s does have the %q autoscaler annotation", mpTopology.Name, clusterv1.AutoscalerMaxSizeAnnotation) + if hasMachinePool { + // Ensure the MachinePoolTopology does NOT have the autoscaler annotations so we can test MachineDeployments first. + mpTopology := clusterResources.Cluster.Spec.Topology.Workers.MachinePools[0] + if mpTopology.Metadata.Annotations != nil { + _, ok = mpTopology.Metadata.Annotations[clusterv1.AutoscalerMinSizeAnnotation] + Expect(ok).To(BeFalse(), "MachinePoolTopology %s does have the %q autoscaler annotation", mpTopology.Name, clusterv1.AutoscalerMinSizeAnnotation) + _, ok = mpTopology.Metadata.Annotations[clusterv1.AutoscalerMaxSizeAnnotation] + Expect(ok).To(BeFalse(), "MachinePoolTopology %s does have the %q autoscaler annotation", mpTopology.Name, clusterv1.AutoscalerMaxSizeAnnotation) + } } // Get a ClusterProxy so we can interact with the workload cluster workloadClusterProxy := input.BootstrapClusterProxy.GetWorkloadCluster(ctx, clusterResources.Cluster.Namespace, clusterResources.Cluster.Name) mdOriginalReplicas := *clusterResources.MachineDeployments[0].Spec.Replicas Expect(strconv.Itoa(int(mdOriginalReplicas))).To(Equal(mdNodeGroupMinSize), "MachineDeployment should have replicas as defined in %s", clusterv1.AutoscalerMinSizeAnnotation) - mpOriginalReplicas := *clusterResources.MachinePools[0].Spec.Replicas - Expect(int(mpOriginalReplicas)).To(Equal(1), "MachinePool should default to 1 replica via the MachinePool webhook") + + var mpOriginalReplicas int32 + if hasMachinePool { + mpOriginalReplicas = *clusterResources.MachinePools[0].Spec.Replicas + Expect(int(mpOriginalReplicas)).To(Equal(1), "MachinePool should default to 1 replica via the MachinePool webhook") + } By("Installing the autoscaler on the workload cluster") autoscalerWorkloadYAMLPath := input.E2EConfig.GetVariable(AutoscalerWorkloadYAMLPath) @@ -168,11 +186,13 @@ func AutoscalerSpec(ctx context.Context, inputGetter func() AutoscalerSpecInput) InfrastructureMachineTemplateKind: input.InfrastructureMachineTemplateKind, InfrastructureMachinePoolTemplateKind: input.InfrastructureMachinePoolTemplateKind, InfrastructureMachinePoolKind: input.InfrastructureMachinePoolKind, + InfrastructureAPIGroup: input.InfrastructureAPIGroup, WorkloadYamlPath: autoscalerWorkloadYAMLPath, ManagementClusterProxy: input.BootstrapClusterProxy, WorkloadClusterProxy: workloadClusterProxy, Cluster: clusterResources.Cluster, AutoscalerVersion: input.AutoscalerVersion, + AutoscalerOnManagementCluster: input.InstallOnManagementCluster, }, input.E2EConfig.GetIntervals(specName, "wait-controllers")...) By("Creating workload that forces the system to scale up") @@ -226,6 +246,50 @@ func AutoscalerSpec(ctx context.Context, inputGetter func() AutoscalerSpecInput) WaitForMachineDeployment: input.E2EConfig.GetIntervals(specName, "wait-controllers"), }) + if input.ScaleToAndFromZero { + By("Enabling autoscaler for the MachineDeployment to zero") + // Enable autoscaler on the MachineDeployment. + framework.EnableAutoscalerForMachineDeploymentTopologyAndWait(ctx, framework.EnableAutoscalerForMachineDeploymentTopologyAndWaitInput{ + ClusterProxy: input.BootstrapClusterProxy, + Cluster: clusterResources.Cluster, + NodeGroupMinSize: "0", + NodeGroupMaxSize: mdNodeGroupMaxSize, + WaitForAnnotationsToBeAdded: input.E2EConfig.GetIntervals(specName, "wait-autoscaler"), + }) + + By("Scaling the MachineDeployment scale up deployment to zero") + framework.ScaleScaleUpDeploymentAndWait(ctx, framework.ScaleScaleUpDeploymentAndWaitInput{ + ClusterProxy: workloadClusterProxy, + // We need to sum up the expected number of MachineDeployment replicas and the current + // number of MachinePool replicas because otherwise the pods get scheduled on the MachinePool nodes. + Replicas: mpOriginalReplicas + 0, + }, input.E2EConfig.GetIntervals(specName, "wait-autoscaler")...) + + By("Checking the MachineDeployment finished scaling down to zero") + framework.AssertMachineDeploymentReplicas(ctx, framework.AssertMachineDeploymentReplicasInput{ + Getter: input.BootstrapClusterProxy.GetClient(), + MachineDeployment: clusterResources.MachineDeployments[0], + Replicas: 0, + WaitForMachineDeployment: input.E2EConfig.GetIntervals(specName, "wait-controllers"), + }) + + By("Scaling the MachineDeployment scale up deployment to 1") + framework.ScaleScaleUpDeploymentAndWait(ctx, framework.ScaleScaleUpDeploymentAndWaitInput{ + ClusterProxy: workloadClusterProxy, + // We need to sum up the expected number of MachineDeployment replicas and the current + // number of MachinePool replicas because otherwise the pods get scheduled on the MachinePool nodes. + Replicas: mpOriginalReplicas + 1, + }, input.E2EConfig.GetIntervals(specName, "wait-autoscaler")...) + + By("Checking the MachineDeployment finished scaling up") + framework.AssertMachineDeploymentReplicas(ctx, framework.AssertMachineDeploymentReplicasInput{ + Getter: input.BootstrapClusterProxy.GetClient(), + MachineDeployment: clusterResources.MachineDeployments[0], + Replicas: 1, + WaitForMachineDeployment: input.E2EConfig.GetIntervals(specName, "wait-controllers"), + }) + } + By("Disabling the autoscaler for MachineDeployments to test MachinePools") framework.DisableAutoscalerForMachineDeploymentTopologyAndWait(ctx, framework.DisableAutoscalerForMachineDeploymentTopologyAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, @@ -239,67 +303,69 @@ func AutoscalerSpec(ctx context.Context, inputGetter func() AutoscalerSpecInput) WaitForDelete: input.E2EConfig.GetIntervals(specName, "wait-autoscaler"), }) - By("Enabling autoscaler for the MachinePool") - // Enable autoscaler on the MachinePool. - framework.EnableAutoscalerForMachinePoolTopologyAndWait(ctx, framework.EnableAutoscalerForMachinePoolTopologyAndWaitInput{ - ClusterProxy: input.BootstrapClusterProxy, - Cluster: clusterResources.Cluster, - NodeGroupMinSize: mpNodeGroupMinSize, - NodeGroupMaxSize: mpNodeGroupMaxSize, - WaitForAnnotationsToBeAdded: input.E2EConfig.GetIntervals(specName, "wait-autoscaler"), - }) - - By("Creating workload that forces the system to scale up") - framework.AddScaleUpDeploymentAndWait(ctx, framework.AddScaleUpDeploymentAndWaitInput{ - ClusterProxy: workloadClusterProxy, - }, input.E2EConfig.GetIntervals(specName, "wait-autoscaler")...) - - By("Checking the MachinePool is scaled up") - mpScaledUpReplicas := mpOriginalReplicas + 1 - framework.AssertMachinePoolReplicas(ctx, framework.AssertMachinePoolReplicasInput{ - Getter: input.BootstrapClusterProxy.GetClient(), - MachinePool: clusterResources.MachinePools[0], - Replicas: mpScaledUpReplicas, - WaitForMachinePool: input.E2EConfig.GetIntervals(specName, "wait-autoscaler"), - }) - - By("Disabling the autoscaler") - framework.DisableAutoscalerForMachinePoolTopologyAndWait(ctx, framework.DisableAutoscalerForMachinePoolTopologyAndWaitInput{ - ClusterProxy: input.BootstrapClusterProxy, - Cluster: clusterResources.Cluster, - WaitForAnnotationsToBeDropped: input.E2EConfig.GetIntervals(specName, "wait-controllers"), - }) - - By("Checking we can manually scale up the MachinePool") - // Scale up the MachinePool. Since autoscaler is disabled we should be able to do this. - mpExcessReplicas := mpScaledUpReplicas + 1 - framework.ScaleMachinePoolTopologyAndWait(ctx, framework.ScaleMachinePoolTopologyAndWaitInput{ - ClusterProxy: input.BootstrapClusterProxy, - Cluster: clusterResources.Cluster, - Replicas: mpExcessReplicas, - WaitForMachinePools: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), - Getter: input.BootstrapClusterProxy.GetClient(), - }) - - By("Checking enabling autoscaler will scale down the MachinePool to correct size") - // Enable autoscaler on the MachinePool. - framework.EnableAutoscalerForMachinePoolTopologyAndWait(ctx, framework.EnableAutoscalerForMachinePoolTopologyAndWaitInput{ - ClusterProxy: input.BootstrapClusterProxy, - Cluster: clusterResources.Cluster, - NodeGroupMinSize: mpNodeGroupMinSize, - NodeGroupMaxSize: mpNodeGroupMaxSize, - WaitForAnnotationsToBeAdded: input.E2EConfig.GetIntervals(specName, "wait-autoscaler"), - }) - - By("Checking the MachinePool is scaled down") - // Since we scaled up the MachinePool manually and the workload has not changed auto scaler - // should detect that there are unneeded nodes and scale down the MachinePool. - framework.AssertMachinePoolReplicas(ctx, framework.AssertMachinePoolReplicasInput{ - Getter: input.BootstrapClusterProxy.GetClient(), - MachinePool: clusterResources.MachinePools[0], - Replicas: mpScaledUpReplicas, - WaitForMachinePool: input.E2EConfig.GetIntervals(specName, "wait-controllers"), - }) + if hasMachinePool { + By("Enabling autoscaler for the MachinePool") + // Enable autoscaler on the MachinePool. + framework.EnableAutoscalerForMachinePoolTopologyAndWait(ctx, framework.EnableAutoscalerForMachinePoolTopologyAndWaitInput{ + ClusterProxy: input.BootstrapClusterProxy, + Cluster: clusterResources.Cluster, + NodeGroupMinSize: mpNodeGroupMinSize, + NodeGroupMaxSize: mpNodeGroupMaxSize, + WaitForAnnotationsToBeAdded: input.E2EConfig.GetIntervals(specName, "wait-autoscaler"), + }) + + By("Creating workload that forces the system to scale up") + framework.AddScaleUpDeploymentAndWait(ctx, framework.AddScaleUpDeploymentAndWaitInput{ + ClusterProxy: workloadClusterProxy, + }, input.E2EConfig.GetIntervals(specName, "wait-autoscaler")...) + + By("Checking the MachinePool is scaled up") + mpScaledUpReplicas := mpOriginalReplicas + 1 + framework.AssertMachinePoolReplicas(ctx, framework.AssertMachinePoolReplicasInput{ + Getter: input.BootstrapClusterProxy.GetClient(), + MachinePool: clusterResources.MachinePools[0], + Replicas: mpScaledUpReplicas, + WaitForMachinePool: input.E2EConfig.GetIntervals(specName, "wait-autoscaler"), + }) + + By("Disabling the autoscaler") + framework.DisableAutoscalerForMachinePoolTopologyAndWait(ctx, framework.DisableAutoscalerForMachinePoolTopologyAndWaitInput{ + ClusterProxy: input.BootstrapClusterProxy, + Cluster: clusterResources.Cluster, + WaitForAnnotationsToBeDropped: input.E2EConfig.GetIntervals(specName, "wait-controllers"), + }) + + By("Checking we can manually scale up the MachinePool") + // Scale up the MachinePool. Since autoscaler is disabled we should be able to do this. + mpExcessReplicas := mpScaledUpReplicas + 1 + framework.ScaleMachinePoolTopologyAndWait(ctx, framework.ScaleMachinePoolTopologyAndWaitInput{ + ClusterProxy: input.BootstrapClusterProxy, + Cluster: clusterResources.Cluster, + Replicas: mpExcessReplicas, + WaitForMachinePools: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), + Getter: input.BootstrapClusterProxy.GetClient(), + }) + + By("Checking enabling autoscaler will scale down the MachinePool to correct size") + // Enable autoscaler on the MachinePool. + framework.EnableAutoscalerForMachinePoolTopologyAndWait(ctx, framework.EnableAutoscalerForMachinePoolTopologyAndWaitInput{ + ClusterProxy: input.BootstrapClusterProxy, + Cluster: clusterResources.Cluster, + NodeGroupMinSize: mpNodeGroupMinSize, + NodeGroupMaxSize: mpNodeGroupMaxSize, + WaitForAnnotationsToBeAdded: input.E2EConfig.GetIntervals(specName, "wait-autoscaler"), + }) + + By("Checking the MachinePool is scaled down") + // Since we scaled up the MachinePool manually and the workload has not changed auto scaler + // should detect that there are unneeded nodes and scale down the MachinePool. + framework.AssertMachinePoolReplicas(ctx, framework.AssertMachinePoolReplicasInput{ + Getter: input.BootstrapClusterProxy.GetClient(), + MachinePool: clusterResources.MachinePools[0], + Replicas: mpScaledUpReplicas, + WaitForMachinePool: input.E2EConfig.GetIntervals(specName, "wait-controllers"), + }) + } By("PASSED!") }) diff --git a/test/framework/autoscaler_helpers.go b/test/framework/autoscaler_helpers.go index 4259b875281f..28b0f29a01df 100644 --- a/test/framework/autoscaler_helpers.go +++ b/test/framework/autoscaler_helpers.go @@ -52,6 +52,7 @@ type ApplyAutoscalerToWorkloadClusterInput struct { InfrastructureMachineTemplateKind string InfrastructureMachinePoolTemplateKind string InfrastructureMachinePoolKind string + InfrastructureAPIGroup string // WorkloadYamlPath should point the yaml that will be applied on the workload cluster. // The YAML file should: // - Be creating the autoscaler deployment in the workload cluster @@ -74,6 +75,8 @@ type ApplyAutoscalerToWorkloadClusterInput struct { ManagementClusterProxy ClusterProxy Cluster *clusterv1.Cluster WorkloadClusterProxy ClusterProxy + + AutoscalerOnManagementCluster bool } // ApplyAutoscalerToWorkloadCluster installs autoscaler on the workload cluster. @@ -88,11 +91,15 @@ func ApplyAutoscalerToWorkloadCluster(ctx context.Context, input ApplyAutoscaler workloadYamlTemplate, err := os.ReadFile(input.WorkloadYamlPath) Expect(err).ToNot(HaveOccurred(), "failed to load %s", workloadYamlTemplate) + if input.InfrastructureAPIGroup == "" { + input.InfrastructureAPIGroup = "infrastructure.cluster.x-k8s.io" + } + // Get a server address for the Management Cluster. // This address should be accessible from the workload cluster. serverAddr, mgtClusterCA := getServerAddrAndCA(ctx, input.ManagementClusterProxy) // Generate a token with the required permission that can be used by the autoscaler. - token := getAuthenticationTokenForAutoscaler(ctx, input.ManagementClusterProxy, input.Cluster.Namespace, input.Cluster.Name, input.InfrastructureMachineTemplateKind, input.InfrastructureMachinePoolTemplateKind, input.InfrastructureMachinePoolKind) + token := getAuthenticationTokenForAutoscaler(ctx, input.ManagementClusterProxy, input.Cluster.Namespace, input.Cluster.Name, input.InfrastructureAPIGroup, input.InfrastructureMachineTemplateKind, input.InfrastructureMachinePoolTemplateKind, input.InfrastructureMachinePoolKind) workloadYaml, err := ProcessYAML(&ProcessYAMLInput{ Template: workloadYamlTemplate, @@ -107,9 +114,8 @@ func ApplyAutoscalerToWorkloadCluster(ctx context.Context, input ApplyAutoscaler }, }) Expect(err).ToNot(HaveOccurred(), "failed to parse %s", workloadYamlTemplate) - Expect(input.WorkloadClusterProxy.CreateOrUpdate(ctx, workloadYaml)).To(Succeed(), "failed to apply %s", workloadYamlTemplate) - By("Wait for the autoscaler deployment and collect logs") + autoscalerProxy := input.WorkloadClusterProxy deployment := &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster-autoscaler", @@ -117,19 +123,27 @@ func ApplyAutoscalerToWorkloadCluster(ctx context.Context, input ApplyAutoscaler }, } - Expect(input.WorkloadClusterProxy.GetClient().Get(ctx, client.ObjectKeyFromObject(deployment), deployment)).To(Succeed(), fmt.Sprintf("failed to get Deployment %s", klog.KObj(deployment))) + if input.AutoscalerOnManagementCluster { + autoscalerProxy = input.ManagementClusterProxy + deployment.Namespace = input.Cluster.Namespace + } + + Expect(autoscalerProxy.CreateOrUpdate(ctx, workloadYaml)).To(Succeed(), "failed to apply %s", workloadYamlTemplate) + + By("Wait for the autoscaler deployment and collect logs") + Expect(autoscalerProxy.GetClient().Get(ctx, client.ObjectKeyFromObject(deployment), deployment)).To(Succeed(), fmt.Sprintf("failed to get Deployment %s", klog.KObj(deployment))) WaitForDeploymentsAvailable(ctx, WaitForDeploymentsAvailableInput{ - Getter: input.WorkloadClusterProxy.GetClient(), + Getter: autoscalerProxy.GetClient(), Deployment: deployment, }, intervals...) // Start streaming logs from the autoscaler deployment. WatchDeploymentLogsByName(ctx, WatchDeploymentLogsByNameInput{ - GetLister: input.WorkloadClusterProxy.GetClient(), - Cache: input.WorkloadClusterProxy.GetCache(ctx), - ClientSet: input.WorkloadClusterProxy.GetClientSet(), + GetLister: autoscalerProxy.GetClient(), + Cache: autoscalerProxy.GetCache(ctx), + ClientSet: autoscalerProxy.GetClientSet(), Deployment: deployment, - LogPath: filepath.Join(input.ArtifactFolder, "clusters", input.WorkloadClusterProxy.GetName(), "logs", deployment.GetNamespace()), + LogPath: filepath.Join(input.ArtifactFolder, "clusters", autoscalerProxy.GetName(), "logs", deployment.GetNamespace()), }) } @@ -143,7 +157,7 @@ type AddScaleUpDeploymentAndWaitInput struct { func AddScaleUpDeploymentAndWait(ctx context.Context, input AddScaleUpDeploymentAndWaitInput, intervals ...interface{}) { By("Create a scale up deployment with resource requests to force scale up") if input.ContainerImage == "" { - input.ContainerImage = "registry.k8s.io/pause" + input.ContainerImage = "registry.k8s.io/pause:3.10" } // gets the node size @@ -247,6 +261,34 @@ func DeleteScaleUpDeploymentAndWait(ctx context.Context, input DeleteScaleUpDepl }, input.WaitForDelete...).Should(Succeed()) } +// ScaleScaleUpDeploymentAndWaitInput is the input for ScaleScaleUpDeploymentAndWait. +type ScaleScaleUpDeploymentAndWaitInput struct { + ClusterProxy ClusterProxy + Name string + Replicas int32 +} + +// ScaleScaleUpDeploymentAndWait scales the scale up deployment to a given value and waits for it to become ready. +func ScaleScaleUpDeploymentAndWait(ctx context.Context, input ScaleScaleUpDeploymentAndWaitInput, intervals ...interface{}) { + By("Retrieving the scale up deployment") + deployment := &appsv1.Deployment{} + deploymentName := "scale-up" + if input.Name != "" { + deploymentName = input.Name + } + Expect(input.ClusterProxy.GetClient().Get(ctx, client.ObjectKey{Name: deploymentName, Namespace: metav1.NamespaceDefault}, deployment)).To(Succeed(), "failed to get the scale up deployment") + + By("Scaling the scale up deployment") + deployment.Spec.Replicas = &input.Replicas + Expect(input.ClusterProxy.GetClient().Update(ctx, deployment)).To(Succeed(), "failed to update the scale up deployment") + + By("Wait for the scale up deployment to become ready (this implies machines to be created)") + WaitForDeploymentsAvailable(ctx, WaitForDeploymentsAvailableInput{ + Getter: input.ClusterProxy.GetClient(), + Deployment: deployment, + }, intervals...) +} + type ProcessYAMLInput struct { Template []byte ClusterctlConfigPath string @@ -493,7 +535,7 @@ func EnableAutoscalerForMachinePoolTopologyAndWait(ctx context.Context, input En // getAuthenticationTokenForAutoscaler returns a bearer authenticationToken with minimal RBAC permissions that will be used // by the autoscaler running on the workload cluster to access the management cluster. -func getAuthenticationTokenForAutoscaler(ctx context.Context, managementClusterProxy ClusterProxy, namespace string, cluster string, infraMachineTemplateKind, infraMachinePoolTemplateKind, infraMachinePoolKind string) string { +func getAuthenticationTokenForAutoscaler(ctx context.Context, managementClusterProxy ClusterProxy, namespace string, cluster string, infraAPIGroup, infraMachineTemplateKind, infraMachinePoolTemplateKind, infraMachinePoolKind string) string { name := fmt.Sprintf("cluster-%s", cluster) sa := &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ @@ -516,7 +558,7 @@ func getAuthenticationTokenForAutoscaler(ctx context.Context, managementClusterP }, { Verbs: []string{"get", "list"}, - APIGroups: []string{"infrastructure.cluster.x-k8s.io"}, + APIGroups: []string{infraAPIGroup}, Resources: []string{infraMachineTemplateKind, infraMachinePoolTemplateKind, infraMachinePoolKind}, }, }, diff --git a/test/framework/deployment_helpers.go b/test/framework/deployment_helpers.go index 1da37edf203b..073212dc14c7 100644 --- a/test/framework/deployment_helpers.go +++ b/test/framework/deployment_helpers.go @@ -479,7 +479,7 @@ func DeployUnevictablePod(ctx context.Context, input DeployUnevictablePodInput) Containers: []corev1.Container{ { Name: "web", - Image: "registry.k8s.io/pause:latest", + Image: "registry.k8s.io/pause:3.10", }, }, }, diff --git a/test/framework/machinedeployment_helpers.go b/test/framework/machinedeployment_helpers.go index 3dc15faa8889..8a3f1ab422e2 100644 --- a/test/framework/machinedeployment_helpers.go +++ b/test/framework/machinedeployment_helpers.go @@ -648,5 +648,6 @@ func AssertMachineDeploymentReplicas(ctx context.Context, input AssertMachineDep g.Expect(input.Getter.Get(ctx, key, md)).To(Succeed(), fmt.Sprintf("failed to get MachineDeployment %s", klog.KObj(input.MachineDeployment))) g.Expect(md.Spec.Replicas).Should(Not(BeNil()), fmt.Sprintf("MachineDeployment %s replicas should not be nil", klog.KObj(md))) g.Expect(*md.Spec.Replicas).Should(Equal(input.Replicas), fmt.Sprintf("MachineDeployment %s replicas should match expected replicas", klog.KObj(md))) + g.Expect(md.Status.Replicas).Should(Equal(input.Replicas), fmt.Sprintf("MachineDeployment %s status.replicas should match expected replicas", klog.KObj(md))) }, input.WaitForMachineDeployment...).Should(Succeed()) } diff --git a/test/framework/machinepool_helpers.go b/test/framework/machinepool_helpers.go index 760b942343d7..af143feae605 100644 --- a/test/framework/machinepool_helpers.go +++ b/test/framework/machinepool_helpers.go @@ -365,5 +365,6 @@ func AssertMachinePoolReplicas(ctx context.Context, input AssertMachinePoolRepli g.Expect(input.Getter.Get(ctx, key, mp)).To(Succeed(), fmt.Sprintf("failed to get MachinePool %s", klog.KObj(input.MachinePool))) g.Expect(mp.Spec.Replicas).Should(Not(BeNil()), fmt.Sprintf("MachinePool %s replicas should not be nil", klog.KObj(mp))) g.Expect(*mp.Spec.Replicas).Should(Equal(input.Replicas), fmt.Sprintf("MachinePool %s replicas should match expected replicas", klog.KObj(mp))) + g.Expect(mp.Status.Replicas).Should(Equal(input.Replicas), fmt.Sprintf("MachinePool %s status.replicas should match expected replicas", klog.KObj(mp))) }, input.WaitForMachinePool...).Should(Succeed()) }