diff --git a/test/e2e/cluster_upgrade_runtimesdk.go b/test/e2e/cluster_upgrade_runtimesdk.go index cbbb04da1f8a..c1034fc1583e 100644 --- a/test/e2e/cluster_upgrade_runtimesdk.go +++ b/test/e2e/cluster_upgrade_runtimesdk.go @@ -21,6 +21,7 @@ import ( "fmt" "os" "path/filepath" + "strconv" "strings" "time" @@ -90,9 +91,13 @@ type ClusterUpgradeWithRuntimeSDKSpecInput struct { // If not specified, this is a no-op. PostUpgrade func(managementClusterProxy framework.ClusterProxy, workloadClusterNamespace, workloadClusterName string) + // ExtensionConfigName is the name of the ExtensionConfig. Defaults to "k8s-upgrade-with-runtimesdk". + ExtensionConfigName string + // ExtensionServiceNamespace is the namespace where the service for the Runtime SDK is located // and is used to configure in the test-namespace scoped ExtensionConfig. ExtensionServiceNamespace string + // ExtensionServiceName is the name of the service to configure in the test-namespace scoped ExtensionConfig. ExtensionServiceName string } @@ -133,6 +138,9 @@ func ClusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() Cl Expect(input.ExtensionServiceNamespace).ToNot(BeEmpty()) Expect(input.ExtensionServiceName).ToNot(BeEmpty()) + if input.ExtensionConfigName == "" { + input.ExtensionConfigName = specName + } if input.ControlPlaneMachineCount == nil { controlPlaneMachineCount = 1 @@ -161,8 +169,11 @@ func ClusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() Cl By("Deploy Test Extension ExtensionConfig") + // In this test we are defaulting all handlers to blocking because we expect the handlers to block the + // cluster lifecycle by default. Setting defaultAllHandlersToBlocking to true enforces that the test-extension + // automatically creates the ConfigMap with blocking preloaded responses. Expect(input.BootstrapClusterProxy.GetClient().Create(ctx, - extensionConfig(specName, namespace.Name, input.ExtensionServiceNamespace, input.ExtensionServiceName))). + extensionConfig(input.ExtensionConfigName, namespace.Name, input.ExtensionServiceNamespace, input.ExtensionServiceName, true, true))). To(Succeed(), "Failed to create the extension config") By("Creating a workload cluster; creation waits for BeforeClusterCreateHook to gate the operation") @@ -304,8 +315,8 @@ func ClusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() Cl if !input.SkipCleanup { // Delete the extensionConfig first to ensure the BeforeDeleteCluster hook doesn't block deletion. Eventually(func() error { - return input.BootstrapClusterProxy.GetClient().Delete(ctx, extensionConfig(specName, namespace.Name, input.ExtensionServiceNamespace, input.ExtensionServiceName)) - }, 10*time.Second, 1*time.Second).Should(Succeed(), "delete extensionConfig failed") + return input.BootstrapClusterProxy.GetClient().Delete(ctx, extensionConfig(input.ExtensionConfigName, namespace.Name, input.ExtensionServiceNamespace, input.ExtensionServiceName, true, true)) + }, 10*time.Second, 1*time.Second).Should(Succeed(), "Deleting ExtensionConfig failed") Byf("Deleting cluster %s", klog.KObj(clusterResources.Cluster)) // While https://github.com/kubernetes-sigs/cluster-api/issues/2955 is addressed in future iterations, there is a chance @@ -429,8 +440,8 @@ func machineSetPreflightChecksTestHandler(ctx context.Context, c client.Client, // We make sure this cluster-wide object does not conflict with others by using a random generated // name and a NamespaceSelector selecting on the namespace of the current test. // Thus, this object is "namespaced" to the current test even though it's a cluster-wide object. -func extensionConfig(name, namespace, extensionServiceNamespace, extensionServiceName string) *runtimev1.ExtensionConfig { - return &runtimev1.ExtensionConfig{ +func extensionConfig(name, namespace, extensionServiceNamespace, extensionServiceName string, selectNamespace, defaultAllHandlersToBlocking bool) *runtimev1.ExtensionConfig { + cfg := &runtimev1.ExtensionConfig{ ObjectMeta: metav1.ObjectMeta{ // Note: We have to use a constant name here as we have to be able to reference it in the ClusterClass // when configuring external patches. @@ -448,25 +459,24 @@ func extensionConfig(name, namespace, extensionServiceNamespace, extensionServic Namespace: extensionServiceNamespace, }, }, - NamespaceSelector: &metav1.LabelSelector{ - // Note: we are limiting the test extension to be used by the namespace where the test is run. - MatchExpressions: []metav1.LabelSelectorRequirement{ - { - Key: "kubernetes.io/metadata.name", - Operator: metav1.LabelSelectorOpIn, - Values: []string{namespace}, - }, - }, - }, Settings: map[string]string{ - // In the E2E test we are defaulting all handlers to blocking because cluster_upgrade_runtimesdk_test - // expects the handlers to block the cluster lifecycle by default. - // Setting this value to true enforces that the test-extension automatically creates the ConfigMap with - // blocking preloaded responses. - "defaultAllHandlersToBlocking": "true", + "defaultAllHandlersToBlocking": strconv.FormatBool(defaultAllHandlersToBlocking), }, }, } + if selectNamespace { + cfg.Spec.NamespaceSelector = &metav1.LabelSelector{ + // Note: we are limiting the test extension to be used by the namespace where the test is run. + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "kubernetes.io/metadata.name", + Operator: metav1.LabelSelectorOpIn, + Values: []string{namespace}, + }, + }, + } + } + return cfg } // Check that each hook in hooks has been called at least once by checking if its actualResponseStatus is in the hook response configmap. diff --git a/test/e2e/data/infrastructure-inmemory/main/bases/cluster-with-topology.yaml b/test/e2e/data/infrastructure-inmemory/main/bases/cluster-with-topology.yaml index 61fe1299299d..4c5357ccb844 100644 --- a/test/e2e/data/infrastructure-inmemory/main/bases/cluster-with-topology.yaml +++ b/test/e2e/data/infrastructure-inmemory/main/bases/cluster-with-topology.yaml @@ -12,6 +12,7 @@ spec: serviceDomain: ${SERVICE_DOMAIN:="cluster.local"} topology: class: in-memory + classNamespace: ${NAMESPACE} version: ${KUBERNETES_VERSION} controlPlane: replicas: ${CONTROL_PLANE_MACHINE_COUNT} @@ -20,3 +21,8 @@ spec: - class: default-worker name: md-0 replicas: ${WORKER_MACHINE_COUNT} + variables: + - name: kubeadmControlPlaneMaxSurge + value: "1" + - name: imageRepository + value: "kindest" diff --git a/test/e2e/data/infrastructure-inmemory/main/clusterclass-in-memory.yaml b/test/e2e/data/infrastructure-inmemory/main/clusterclass-in-memory.yaml index 945a2cb2b891..d6c6fbd3cdd0 100644 --- a/test/e2e/data/infrastructure-inmemory/main/clusterclass-in-memory.yaml +++ b/test/e2e/data/infrastructure-inmemory/main/clusterclass-in-memory.yaml @@ -1,56 +1,3 @@ -apiVersion: cluster.x-k8s.io/v1beta1 -kind: ClusterClass -metadata: - name: in-memory -spec: - controlPlane: - metadata: - annotations: - machineInfrastructure: - ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 - kind: InMemoryMachineTemplate - name: in-memory-control-plane - ref: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 - kind: KubeadmControlPlaneTemplate - name: in-memory-control-plane - machineHealthCheck: - unhealthyConditions: - - type: Ready - status: Unknown - timeout: 300s - - type: Ready - status: "False" - timeout: 300s - infrastructure: - ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 - kind: InMemoryClusterTemplate - name: in-memory-cluster - workers: - machineDeployments: - - class: default-worker - template: - bootstrap: - ref: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 - kind: KubeadmConfigTemplate - name: in-memory-default-worker-bootstraptemplate - infrastructure: - ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 - kind: InMemoryMachineTemplate - name: in-memory-default-worker-machinetemplate - machineHealthCheck: - unhealthyConditions: - - type: Ready - status: Unknown - timeout: 300s - - type: Ready - status: "False" - timeout: 300s ---- apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 kind: InMemoryClusterTemplate metadata: @@ -95,19 +42,19 @@ spec: behaviour: vm: provisioning: - startupDuration: "30s" + startupDuration: "10s" startupJitter: "0.2" node: provisioning: - startupDuration: "10s" + startupDuration: "2s" startupJitter: "0.2" apiServer: provisioning: - startupDuration: "10s" + startupDuration: "2s" startupJitter: "0.2" etcd: provisioning: - startupDuration: "10s" + startupDuration: "2s" startupJitter: "0.2" --- apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 @@ -120,19 +67,19 @@ spec: behaviour: vm: provisioning: - startupDuration: "30s" + startupDuration: "10s" startupJitter: "0.2" node: provisioning: - startupDuration: "10s" + startupDuration: "2s" startupJitter: "0.2" apiServer: provisioning: - startupDuration: "10s" + startupDuration: "2s" startupJitter: "0.2" etcd: provisioning: - startupDuration: "10s" + startupDuration: "2s" startupJitter: "0.2" --- apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 @@ -146,4 +93,62 @@ spec: nodeRegistration: criSocket: unix:///var/run/containerd/containerd.sock kubeletExtraArgs: - eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% \ No newline at end of file + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: ClusterClass +metadata: + name: in-memory +spec: + controlPlane: + metadata: + annotations: + machineInfrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: InMemoryMachineTemplate + name: in-memory-control-plane + ref: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + name: in-memory-control-plane + machineHealthCheck: + unhealthyConditions: + - type: Ready + status: Unknown + timeout: 300s + - type: Ready + status: "False" + timeout: 300s + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: InMemoryClusterTemplate + name: in-memory-cluster + workers: + machineDeployments: + - class: default-worker + template: + bootstrap: + ref: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: in-memory-default-worker-bootstraptemplate + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: InMemoryMachineTemplate + name: in-memory-default-worker-machinetemplate + machineHealthCheck: + unhealthyConditions: + - type: Ready + status: Unknown + timeout: 300s + - type: Ready + status: "False" + timeout: 300s + patches: + - name: test-patch + external: + generateExtension: generate-patches.scale + discoverVariablesExtension: discover-variables.scale diff --git a/test/e2e/scale.go b/test/e2e/scale.go index 56a2256777e7..1e1d60e15e19 100644 --- a/test/e2e/scale.go +++ b/test/e2e/scale.go @@ -18,6 +18,7 @@ package e2e import ( "bytes" + "cmp" "context" "fmt" "math" @@ -49,11 +50,14 @@ import ( ) const ( - scaleClusterCount = "CAPI_SCALE_CLUSTER_COUNT" - scaleConcurrency = "CAPI_SCALE_CONCURRENCY" - scaleControlPlaneMachineCount = "CAPI_SCALE_CONTROL_PLANE_MACHINE_COUNT" - scaleWorkerMachineCount = "CAPI_SCALE_WORKER_MACHINE_COUNT" - scaleMachineDeploymentCount = "CAPI_SCALE_MACHINE_DEPLOYMENT_COUNT" + scaleClusterCount = "CAPI_SCALE_CLUSTER_COUNT" + scaleConcurrency = "CAPI_SCALE_CONCURRENCY" + scaleControlPlaneMachineCount = "CAPI_SCALE_CONTROL_PLANE_MACHINE_COUNT" + scaleWorkerPerMachineDeploymentCount = "CAPI_SCALE_WORKER_PER_MACHINE_DEPLOYMENT_COUNT" + scaleMachineDeploymentCount = "CAPI_SCALE_MACHINE_DEPLOYMENT_COUNT" + scaleAdditionalClusterClassCount = "CAPI_SCALE_ADDITIONAL_CLUSTER_CLASS_COUNT" + scaleDeployClusterInSeparateNamespaces = "CAPI_SCALE_DEPLOY_CLUSTER_IN_SEPARATE_NAMESPACES" + scaleUseCrossNamespaceClusterClass = "CAPI_SCALE_USE_CROSS_NAMESPACE_CLUSTER_CLASS" // Note: Names must consist of lower case alphanumeric characters or '-'. scaleClusterNamePlaceholder = "scale-cluster-name-placeholder" @@ -84,10 +88,6 @@ type ScaleSpecInput struct { // Can be overridden by variable CAPI_SCALE_CLUSTER_COUNT. ClusterCount *int64 - // DeployClusterInSeparateNamespaces defines if each cluster should be deployed into its separate namespace. - // In this case The namespace name will be the name of the cluster. - DeployClusterInSeparateNamespaces bool - // Concurrency is the maximum concurrency of each of the scale operations. // If unspecified it defaults to 5. // Can be overridden by variable CAPI_SCALE_CONCURRENCY. @@ -98,12 +98,12 @@ type ScaleSpecInput struct { // Can be overridden by variable CAPI_SCALE_CONTROLPLANE_MACHINE_COUNT. ControlPlaneMachineCount *int64 - // WorkerMachineCount defines number of worker machines per machine deployment of the workload cluster. + // WorkerPerMachineDeploymentCount defines number of worker machines per machine deployment of the workload cluster. // If not specified, 1 will be used. - // Can be overridden by variable CAPI_SCALE_WORKER_MACHINE_COUNT. + // Can be overridden by variable CAPI_SCALE_WORKER_PER_MACHINE_DEPLOYMENT_COUNT. // The resulting number of worker nodes for each of the workload cluster will - // be MachineDeploymentCount*WorkerMachineCount (CAPI_SCALE_MACHINE_DEPLOYMENT_COUNT*CAPI_SCALE_WORKER_MACHINE_COUNT). - WorkerMachineCount *int64 + // be MachineDeploymentCount*WorkerPerMachineDeploymentCount (CAPI_SCALE_MACHINE_DEPLOYMENT_COUNT*CAPI_SCALE_WORKER_PER_MACHINE_DEPLOYMENT_COUNT). + WorkerPerMachineDeploymentCount *int64 // MachineDeploymentCount defines the number of MachineDeployments to be used per workload cluster. // If not specified, 1 will be used. @@ -112,9 +112,40 @@ type ScaleSpecInput struct { // It uses this machine deployment to create additional copies. // Names of the MachineDeployments will be overridden to "md-1", "md-2", etc. // The resulting number of worker nodes for each of the workload cluster will - // be MachineDeploymentCount*WorkerMachineCount (CAPI_SCALE_MACHINE_DEPLOYMENT_COUNT*CAPI_SCALE_WORKER_MACHINE_COUNT). + // be MachineDeploymentCount*WorkerPerMachineDeploymentCount (CAPI_SCALE_MACHINE_DEPLOYMENT_COUNT*CAPI_SCALE_WORKER_PER_MACHINE_DEPLOYMENT_COUNT). MachineDeploymentCount *int64 + // AdditionalClusterClassCount is the number of copies of the ClusterClasses that will be deployed. + // This can be used to test how Cluster API scales with a higher number of ClusterClasses. + // Can be overridden by variable CAPI_SCALE_ADDITIONAL_CLUSTER_CLASS_COUNT. + AdditionalClusterClassCount *int64 + + // ClusterClassName is the name of the ClusterClass. + // This is only required if AdditionalClusterClassCount is set and > 0. + ClusterClassName string + + // DeployClusterInSeparateNamespaces defines if each cluster should be deployed into its separate namespace. + // In this case The namespace name will be the name of the cluster. + // Can be overridden by variable CAPI_SCALE_DEPLOY_CLUSTER_IN_SEPARATE_NAMESPACES. + DeployClusterInSeparateNamespaces *bool + + // UseCrossNamespaceClusterClass configures Clusters that are deployed into separate Namespaces to + // use a single ClusterClass instead of using a ClusterClass in their namespace. + // Note: This can be only be true when DeployClusterInSeparateNamespaces is true. + // Can be overridden by variable CAPI_SCALE_USE_CROSS_NAMESPACE_CLUSTER_CLASS. + UseCrossNamespaceClusterClass *bool + + // ExtensionConfigName is the name of the ExtensionConfig. Defaults to "scale". + ExtensionConfigName string + + // ExtensionServiceNamespace is the namespace where the service for the Runtime Extension is located. + // Note: This should only be set if a Runtime Extension is used. + ExtensionServiceNamespace string + + // ExtensionServiceNamespace is the name where the service for the Runtime Extension is located. + // Note: This should only be set if a Runtime Extension is used. + ExtensionServiceName string + // Allows to inject a function to be run after test namespace is created. // If not specified, this is a no-op. PostNamespaceCreated func(managementClusterProxy framework.ClusterProxy, workloadClusterNamespace string) @@ -170,6 +201,12 @@ func ScaleSpec(ctx context.Context, inputGetter func() ScaleSpecInput) { Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersion)) + if input.ExtensionServiceNamespace != "" && input.ExtensionServiceName != "" { + if input.ExtensionConfigName == "" { + input.ExtensionConfigName = specName + } + } + // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. // We are pinning the namespace for the test to help with debugging and testing. // Example: Queries to look up state of the clusters can be re-used. @@ -200,67 +237,55 @@ func ScaleSpec(ctx context.Context, inputGetter func() ScaleSpecInput) { flavor = *input.Flavor } - controlPlaneMachineCount := ptr.To[int64](1) - if input.ControlPlaneMachineCount != nil { - controlPlaneMachineCount = input.ControlPlaneMachineCount - } - // If variable is defined that will take precedence. - if input.E2EConfig.HasVariable(scaleControlPlaneMachineCount) { - controlPlaneMachineCountStr := input.E2EConfig.GetVariable(scaleControlPlaneMachineCount) - controlPlaneMachineCountInt, err := strconv.Atoi(controlPlaneMachineCountStr) - Expect(err).ToNot(HaveOccurred()) - controlPlaneMachineCount = ptr.To[int64](int64(controlPlaneMachineCountInt)) - } - - workerMachineCount := ptr.To[int64](1) - if input.WorkerMachineCount != nil { - workerMachineCount = input.WorkerMachineCount - } - // If variable is defined that will take precedence. - if input.E2EConfig.HasVariable(scaleWorkerMachineCount) { - workerMachineCountStr := input.E2EConfig.GetVariable(scaleWorkerMachineCount) - workerMachineCountInt, err := strconv.Atoi(workerMachineCountStr) - Expect(err).ToNot(HaveOccurred()) - workerMachineCount = ptr.To[int64](int64(workerMachineCountInt)) - } - - machineDeploymentCount := ptr.To[int64](1) - if input.MachineDeploymentCount != nil { - machineDeploymentCount = input.MachineDeploymentCount - } - // If variable is defined that will take precedence. - if input.E2EConfig.HasVariable(scaleMachineDeploymentCount) { - machineDeploymentCountStr := input.E2EConfig.GetVariable(scaleMachineDeploymentCount) - machineDeploymentCountInt, err := strconv.Atoi(machineDeploymentCountStr) - Expect(err).ToNot(HaveOccurred()) - machineDeploymentCount = ptr.To[int64](int64(machineDeploymentCountInt)) - } - - clusterCount := int64(10) - if input.ClusterCount != nil { - clusterCount = *input.ClusterCount - } - // If variable is defined that will take precedence. - if input.E2EConfig.HasVariable(scaleClusterCount) { - clusterCountStr := input.E2EConfig.GetVariable(scaleClusterCount) - var err error - clusterCount, err = strconv.ParseInt(clusterCountStr, 10, 64) - Expect(err).NotTo(HaveOccurred(), "%q value should be integer", scaleClusterCount) + clusterCount := *cmp.Or(variableAsInt64(input.E2EConfig.GetVariableBestEffort(scaleClusterCount)), + input.ClusterCount, ptr.To[int64](10), + ) + concurrency := *cmp.Or(variableAsInt64(input.E2EConfig.GetVariableBestEffort(scaleConcurrency)), + input.Concurrency, ptr.To[int64](5), + ) + controlPlaneMachineCount := cmp.Or(variableAsInt64(input.E2EConfig.GetVariableBestEffort(scaleControlPlaneMachineCount)), + input.ControlPlaneMachineCount, ptr.To[int64](1), + ) + machineDeploymentCount := *cmp.Or(variableAsInt64(input.E2EConfig.GetVariableBestEffort(scaleMachineDeploymentCount)), + input.MachineDeploymentCount, ptr.To[int64](1), + ) + workerPerMachineDeploymentCount := cmp.Or(variableAsInt64(input.E2EConfig.GetVariableBestEffort(scaleWorkerPerMachineDeploymentCount)), + input.WorkerPerMachineDeploymentCount, ptr.To[int64](3), + ) + additionalClusterClassCount := *cmp.Or(variableAsInt64(input.E2EConfig.GetVariableBestEffort(scaleAdditionalClusterClassCount)), + input.AdditionalClusterClassCount, ptr.To[int64](0), + ) + deployClusterInSeparateNamespaces := *cmp.Or(variableAsBool(input.E2EConfig.GetVariableBestEffort(scaleDeployClusterInSeparateNamespaces)), + input.DeployClusterInSeparateNamespaces, ptr.To[bool](false), + ) + useCrossNamespaceClusterClass := *cmp.Or(variableAsBool(input.E2EConfig.GetVariableBestEffort(scaleUseCrossNamespaceClusterClass)), + input.UseCrossNamespaceClusterClass, ptr.To[bool](false), + ) + if useCrossNamespaceClusterClass { + Expect(deployClusterInSeparateNamespaces).To(BeTrue(), "deployClusterInSeparateNamespaces must be "+ + "true if useCrossNamespaceClusterClass is true") } - concurrency := int64(5) - if input.Concurrency != nil { - concurrency = *input.Concurrency + if input.ExtensionServiceNamespace != "" && input.ExtensionServiceName != "" { + // NOTE: test extension is already deployed in the management cluster. If for any reason in future we want + // to make this test more self-contained this test should be modified in order to create an additional + // management cluster; also the E2E test configuration should be modified introducing something like + // optional:true allowing to define which providers should not be installed by default in + // a management cluster. + By("Deploy Test Extension ExtensionConfig") + + // In this test we are defaulting all handlers to non-blocking because we don't expect the handlers to block the + // cluster lifecycle by default. Setting defaultAllHandlersToBlocking to false enforces that the test-extension + // automatically creates the ConfigMap with non-blocking preloaded responses. + defaultAllHandlersToBlocking := false + // select on the current namespace if the Clusters are all deployed in the current namespace + // This is necessary so in CI this test doesn't influence other tests by enabling lifecycle hooks + // in other test namespaces. + selectNamespace := !*input.DeployClusterInSeparateNamespaces + Expect(input.BootstrapClusterProxy.GetClient().Create(ctx, + extensionConfig(input.ExtensionConfigName, namespace.Name, input.ExtensionServiceNamespace, input.ExtensionServiceName, selectNamespace, defaultAllHandlersToBlocking))). + To(Succeed(), "Failed to create the extension config") } - // If variable is defined that will take precedence. - if input.E2EConfig.HasVariable(scaleConcurrency) { - concurrencyStr := input.E2EConfig.GetVariable(scaleConcurrency) - var err error - concurrency, err = strconv.ParseInt(concurrencyStr, 10, 64) - Expect(err).NotTo(HaveOccurred(), "%q value should be integer", scaleConcurrency) - } - - // TODO(ykakarap): Follow-up: Add support for legacy cluster templates. By("Create the ClusterClass to be used by all workload clusters") @@ -278,7 +303,7 @@ func ScaleSpec(ctx context.Context, inputGetter func() ScaleSpecInput) { ClusterName: scaleClusterNamePlaceholder, KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeFrom), ControlPlaneMachineCount: controlPlaneMachineCount, - WorkerMachineCount: workerMachineCount, + WorkerMachineCount: workerPerMachineDeploymentCount, }) Expect(baseWorkloadClusterTemplate).ToNot(BeNil(), "Failed to get the cluster template") @@ -291,17 +316,27 @@ func ScaleSpec(ctx context.Context, inputGetter func() ScaleSpecInput) { baseClusterClassYAML, baseClusterTemplateYAML := extractClusterClassAndClusterFromTemplate(baseWorkloadClusterTemplate) // Modify the baseClusterTemplateYAML so that it has the desired number of machine deployments. - baseClusterTemplateYAML = modifyMachineDeployments(baseClusterTemplateYAML, int(*machineDeploymentCount)) + baseClusterTemplateYAML = modifyMachineDeployments(baseClusterTemplateYAML, machineDeploymentCount) // If all clusters should be deployed in the same namespace (namespace.Name), // then deploy the ClusterClass in this namespace. - if !input.DeployClusterInSeparateNamespaces { + if !deployClusterInSeparateNamespaces || useCrossNamespaceClusterClass { if len(baseClusterClassYAML) > 0 { clusterClassYAML := bytes.Replace(baseClusterClassYAML, []byte(scaleClusterNamespacePlaceholder), []byte(namespace.Name), -1) log.Logf("Apply ClusterClass") Eventually(func() error { return input.BootstrapClusterProxy.CreateOrUpdate(ctx, clusterClassYAML) }, 1*time.Minute).Should(Succeed()) + + // Create additional unused instances of the ClusterClass + for i := range additionalClusterClassCount { + additionalName := fmt.Sprintf("%s-%d", input.ClusterClassName, i+1) + log.Logf("Apply additional ClusterClass %s/%s", namespace.Name, additionalName) + additionalClassYAML := bytes.Replace(clusterClassYAML, []byte(input.ClusterClassName), []byte(additionalName), -1) + Eventually(func() error { + return input.BootstrapClusterProxy.CreateOrUpdate(ctx, additionalClassYAML) + }, 1*time.Minute).Should(Succeed()) + } } else { log.Logf("ClusterClass already exists. Skipping creation.") } @@ -341,7 +376,10 @@ func ScaleSpec(ctx context.Context, inputGetter func() ScaleSpecInput) { Concurrency: concurrency, FailFast: input.FailFast, WorkerFunc: func(ctx context.Context, inputChan chan string, resultChan chan workResult, wg *sync.WaitGroup) { - createClusterWorker(ctx, input.BootstrapClusterProxy, inputChan, resultChan, wg, namespace.Name, input.DeployClusterInSeparateNamespaces, baseClusterClassYAML, baseClusterTemplateYAML, creator, input.PostScaleClusterNamespaceCreated) + createClusterWorker(ctx, input.BootstrapClusterProxy, inputChan, resultChan, wg, namespace.Name, + deployClusterInSeparateNamespaces, useCrossNamespaceClusterClass, + baseClusterClassYAML, baseClusterTemplateYAML, creator, input.PostScaleClusterNamespaceCreated, + additionalClusterClassCount, input.ClusterClassName) }, }) if err != nil { @@ -381,7 +419,7 @@ func ScaleSpec(ctx context.Context, inputGetter func() ScaleSpecInput) { Concurrency: concurrency, FailFast: input.FailFast, WorkerFunc: func(ctx context.Context, inputChan chan string, resultChan chan workResult, wg *sync.WaitGroup) { - upgradeClusterAndWaitWorker(ctx, inputChan, resultChan, wg, namespace.Name, input.DeployClusterInSeparateNamespaces, baseClusterTemplateYAML, upgrader) + upgradeClusterAndWaitWorker(ctx, inputChan, resultChan, wg, namespace.Name, deployClusterInSeparateNamespaces, baseClusterTemplateYAML, upgrader) }, }) if err != nil { @@ -405,6 +443,7 @@ func ScaleSpec(ctx context.Context, inputGetter func() ScaleSpecInput) { } if input.SkipCleanup { + By("PASSED!") return } @@ -422,7 +461,7 @@ func ScaleSpec(ctx context.Context, inputGetter func() ScaleSpecInput) { wg, input.BootstrapClusterProxy.GetClient(), namespace.Name, - input.DeployClusterInSeparateNamespaces, + deployClusterInSeparateNamespaces, input.E2EConfig.GetIntervals(specName, "wait-delete-cluster")..., ) }, @@ -445,6 +484,13 @@ func ScaleSpec(ctx context.Context, inputGetter func() ScaleSpecInput) { }) AfterEach(func() { + if !input.SkipCleanup { + if input.ExtensionServiceNamespace != "" && input.ExtensionServiceName != "" { + Eventually(func() error { + return input.BootstrapClusterProxy.GetClient().Delete(ctx, extensionConfig(input.ExtensionConfigName, namespace.Name, input.ExtensionServiceNamespace, input.ExtensionServiceName, true, true)) + }, 10*time.Second, 1*time.Second).Should(Succeed(), "Deleting ExtensionConfig failed") + } + } cancelWatches() }) } @@ -589,7 +635,7 @@ func getClusterCreateFn(clusterProxy framework.ClusterProxy) clusterCreator { type PostScaleClusterNamespaceCreated func(clusterProxy framework.ClusterProxy, clusterNamespace string, clusterName string, clusterClassYAML []byte, clusterTemplateYAML []byte) ([]byte, []byte) -func createClusterWorker(ctx context.Context, clusterProxy framework.ClusterProxy, inputChan <-chan string, resultChan chan<- workResult, wg *sync.WaitGroup, defaultNamespace string, deployClusterInSeparateNamespaces bool, baseClusterClassYAML, baseClusterTemplateYAML []byte, create clusterCreator, postScaleClusterNamespaceCreated PostScaleClusterNamespaceCreated) { +func createClusterWorker(ctx context.Context, clusterProxy framework.ClusterProxy, inputChan <-chan string, resultChan chan<- workResult, wg *sync.WaitGroup, defaultNamespace string, deployClusterInSeparateNamespaces, enableCrossNamespaceClusterClass bool, baseClusterClassYAML, baseClusterTemplateYAML []byte, create clusterCreator, postScaleClusterNamespaceCreated PostScaleClusterNamespaceCreated, additionalClusterClasses int64, clusterClassName string) { defer wg.Done() for { @@ -626,7 +672,7 @@ func createClusterWorker(ctx context.Context, clusterProxy framework.ClusterProx // * Adjust namespace in ClusterClass YAML. // * Create new namespace. if deployClusterInSeparateNamespaces { - log.Logf("Create namespace %", namespaceName) + log.Logf("Create namespace %s", namespaceName) _ = framework.CreateNamespace(ctx, framework.CreateNamespaceInput{ Creator: clusterProxy.GetClient(), Name: namespaceName, @@ -645,16 +691,33 @@ func createClusterWorker(ctx context.Context, clusterProxy framework.ClusterProx // If every cluster should be deployed in a separate namespace: // * Deploy ClusterClass in new namespace. - if deployClusterInSeparateNamespaces { + if deployClusterInSeparateNamespaces && !enableCrossNamespaceClusterClass { log.Logf("Apply ClusterClass in namespace %s", namespaceName) clusterClassYAML := bytes.Replace(customizedClusterClassYAML, []byte(scaleClusterNamespacePlaceholder), []byte(namespaceName), -1) Eventually(func() error { return clusterProxy.CreateOrUpdate(ctx, clusterClassYAML) }, 1*time.Minute).Should(Succeed()) + + // Create additional unused instances of the ClusterClass + for i := range additionalClusterClasses { + additionalName := fmt.Sprintf("%s-%d", clusterClassName, i+1) + log.Logf("Apply additional ClusterClass %s/%s", namespaceName, additionalName) + additionalClassYAML := bytes.Replace(clusterClassYAML, []byte(clusterClassName), []byte(additionalName), -1) + Eventually(func() error { + return clusterProxy.CreateOrUpdate(ctx, additionalClassYAML) + }, 1*time.Minute).Should(Succeed()) + } } // Adjust namespace and name in Cluster YAML - clusterTemplateYAML := bytes.Replace(customizedClusterTemplateYAML, []byte(scaleClusterNamespacePlaceholder), []byte(namespaceName), -1) + clusterTemplateYAML := customizedClusterTemplateYAML + if enableCrossNamespaceClusterClass { + // Set classNamespace to the defaultNamespace where the ClusterClass is located. + clusterTemplateYAML = bytes.Replace(clusterTemplateYAML, + []byte(fmt.Sprintf("classNamespace: %s", scaleClusterNamespacePlaceholder)), + []byte(fmt.Sprintf("classNamespace: %s", defaultNamespace)), -1) + } + clusterTemplateYAML = bytes.Replace(clusterTemplateYAML, []byte(scaleClusterNamespacePlaceholder), []byte(namespaceName), -1) clusterTemplateYAML = bytes.Replace(clusterTemplateYAML, []byte(scaleClusterNamePlaceholder), []byte(clusterName), -1) // Deploy Cluster. @@ -843,7 +906,7 @@ type workResult struct { err any } -func modifyMachineDeployments(baseClusterTemplateYAML []byte, count int) []byte { +func modifyMachineDeployments(baseClusterTemplateYAML []byte, count int64) []byte { Expect(baseClusterTemplateYAML).NotTo(BeEmpty(), "Invalid argument. baseClusterTemplateYAML cannot be empty when calling modifyMachineDeployments") Expect(count).To(BeNumerically(">=", 0), "Invalid argument. count cannot be less than 0 when calling modifyMachineDeployments") @@ -863,7 +926,7 @@ func modifyMachineDeployments(baseClusterTemplateYAML []byte, count int) []byte baseMD := cluster.Spec.Topology.Workers.MachineDeployments[0] allMDs := make([]clusterv1.MachineDeploymentTopology, count) allMDDigits := 1 + int(math.Log10(float64(count))) - for i := 1; i <= count; i++ { + for i := int64(1); i <= count; i++ { md := baseMD.DeepCopy() // This ensures we always have the right number of leading zeros in our machine deployment names, e.g. // count=1000 will lead to machine deployment names like md-0001, md-0002, so on. @@ -878,3 +941,25 @@ func modifyMachineDeployments(baseClusterTemplateYAML []byte, count int) []byte return modifiedClusterYAML } + +func variableAsInt64(variableValue string) *int64 { + if variableValue == "" { + return nil + } + + variableValueInt, err := strconv.ParseInt(variableValue, 10, 64) + Expect(err).ToNot(HaveOccurred()) + + return ptr.To[int64](variableValueInt) +} + +func variableAsBool(variableValue string) *bool { + if variableValue == "" { + return nil + } + + variableValueBool, err := strconv.ParseBool(variableValue) + Expect(err).ToNot(HaveOccurred()) + + return ptr.To[bool](variableValueBool) +} diff --git a/test/e2e/scale_test.go b/test/e2e/scale_test.go index 541dee0ec56e..7ced2167f090 100644 --- a/test/e2e/scale_test.go +++ b/test/e2e/scale_test.go @@ -28,18 +28,28 @@ var _ = Describe("When testing the machinery for scale testing using in-memory p // Note: This test does not support MachinePools. ScaleSpec(ctx, func() ScaleSpecInput { return ScaleSpecInput{ - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - InfrastructureProvider: ptr.To("in-memory"), - BootstrapClusterProxy: bootstrapClusterProxy, - ArtifactFolder: artifactFolder, - ClusterCount: ptr.To[int64](10), - Concurrency: ptr.To[int64](5), - Flavor: ptr.To(""), - ControlPlaneMachineCount: ptr.To[int64](1), - MachineDeploymentCount: ptr.To[int64](1), - WorkerMachineCount: ptr.To[int64](3), - SkipCleanup: skipCleanup, + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + InfrastructureProvider: ptr.To("in-memory"), + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + Flavor: ptr.To(""), + SkipCleanup: skipCleanup, + ClusterCount: ptr.To[int64](10), + Concurrency: ptr.To[int64](5), + ControlPlaneMachineCount: ptr.To[int64](1), + MachineDeploymentCount: ptr.To[int64](1), + WorkerPerMachineDeploymentCount: ptr.To[int64](3), + AdditionalClusterClassCount: ptr.To[int64](4), + ClusterClassName: "in-memory", + DeployClusterInSeparateNamespaces: ptr.To[bool](false), + UseCrossNamespaceClusterClass: ptr.To[bool](false), + // The runtime extension gets deployed to the test-extension-system namespace and is exposed + // by the test-extension-webhook-service. + // The below values are used when creating the cluster-wide ExtensionConfig to refer + // the actual service. + ExtensionServiceNamespace: "test-extension-system", + ExtensionServiceName: "test-extension-webhook-service", } }) }) diff --git a/test/framework/clusterctl/clusterctl_helpers.go b/test/framework/clusterctl/clusterctl_helpers.go index 806742d5b636..6ad78a17a21f 100644 --- a/test/framework/clusterctl/clusterctl_helpers.go +++ b/test/framework/clusterctl/clusterctl_helpers.go @@ -17,6 +17,7 @@ limitations under the License. package clusterctl import ( + "cmp" "context" "fmt" "os" @@ -446,11 +447,13 @@ func ApplyCustomClusterTemplateAndWait(ctx context.Context, input ApplyCustomClu }, input.WaitForClusterIntervals...) if result.Cluster.Spec.Topology != nil { - result.ClusterClass = framework.GetClusterClassByName(ctx, framework.GetClusterClassByNameInput{ - Getter: input.ClusterProxy.GetClient(), - Namespace: input.Namespace, - Name: result.Cluster.Spec.Topology.Class, - }) + if result.Cluster.Spec.Topology != nil { + result.ClusterClass = framework.GetClusterClassByName(ctx, framework.GetClusterClassByNameInput{ + Getter: input.ClusterProxy.GetClient(), + Namespace: cmp.Or(result.Cluster.Spec.Topology.ClassNamespace, result.Cluster.Namespace), + Name: result.Cluster.Spec.Topology.Class, + }) + } } log.Logf("Waiting for control plane of cluster %s to be initialized", klog.KRef(input.Namespace, input.ClusterName)) diff --git a/test/framework/clusterctl/e2e_config.go b/test/framework/clusterctl/e2e_config.go index b50440a902be..94e6de9d5372 100644 --- a/test/framework/clusterctl/e2e_config.go +++ b/test/framework/clusterctl/e2e_config.go @@ -800,6 +800,21 @@ func (c *E2EConfig) GetVariable(varName string) string { return value } +// GetVariableBestEffort returns a variable from environment variables or from the e2e config file. +// If the variable cannot be found it returns and empty string and does not fail. +func (c *E2EConfig) GetVariableBestEffort(varName string) string { + if value, ok := os.LookupEnv(varName); ok { + return value + } + + value, ok := c.Variables[varName] + if ok { + return value + } + + return "" +} + // GetInt64PtrVariable returns an Int64Ptr variable from the e2e config file. func (c *E2EConfig) GetInt64PtrVariable(varName string) *int64 { wCountStr := c.GetVariable(varName)