|
| 1 | +package __hypershift |
| 2 | + |
| 3 | +import ( |
| 4 | + "context" |
| 5 | + "fmt" |
| 6 | + . "github.com/onsi/ginkgo/v2" |
| 7 | + . "github.com/onsi/gomega" |
| 8 | + corev1 "k8s.io/api/core/v1" |
| 9 | + "k8s.io/utils/pointer" |
| 10 | + "sigs.k8s.io/controller-runtime/pkg/client" |
| 11 | + |
| 12 | + performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/performanceprofile/v2" |
| 13 | + manifestsutil "github.com/openshift/cluster-node-tuning-operator/pkg/util" |
| 14 | + testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils" |
| 15 | + testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/client" |
| 16 | + "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/discovery" |
| 17 | + "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/hypershift" |
| 18 | + testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/log" |
| 19 | + "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/nodepools" |
| 20 | + "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/nodes" |
| 21 | + "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/profiles" |
| 22 | + hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" |
| 23 | + kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1" |
| 24 | +) |
| 25 | + |
| 26 | +var _ = Describe("Multiple performance profile in hypershift", Ordered, func() { |
| 27 | + var profile, secondProfile *performancev2.PerformanceProfile |
| 28 | + var nodePools []*hypershiftv1beta1.NodePool |
| 29 | + var np *hypershiftv1beta1.NodePool |
| 30 | + var workerRTNodes []corev1.Node |
| 31 | + var err error |
| 32 | + |
| 33 | + nodeLabel := testutils.NodeSelectorLabels |
| 34 | + chkKubeletConfig := []string{"cat", "/rootfs/etc/kubernetes/kubelet.conf"} |
| 35 | + chkKubeletConfigFn := func(ctx context.Context, node *corev1.Node) (string, error) { |
| 36 | + out, err := nodes.ExecCommand(ctx, node, chkKubeletConfig) |
| 37 | + if err != nil { |
| 38 | + return "", err |
| 39 | + } |
| 40 | + output := testutils.ToString(out) |
| 41 | + return output, nil |
| 42 | + } |
| 43 | + |
| 44 | + BeforeAll(func() { |
| 45 | + By("Checking if discovery mode is enabled and performance profile is not found") |
| 46 | + if discovery.Enabled() && testutils.ProfileNotFound { |
| 47 | + Skip("Discovery mode enabled, performance profile not found") |
| 48 | + } |
| 49 | + workerRTNodes, err = nodes.GetByLabels(nodeLabel) |
| 50 | + Expect(err).ToNot(HaveOccurred()) |
| 51 | + profile, err = profiles.GetByNodeLabels(nodeLabel) |
| 52 | + Expect(err).ToNot(HaveOccurred()) |
| 53 | + hostedClusterName, err := hypershift.GetHostedClusterName() |
| 54 | + Expect(err).ToNot(HaveOccurred()) |
| 55 | + nodePools, err = ListNodePools(context.TODO(), testclient.ControlPlaneClient, hostedClusterName) |
| 56 | + Expect(err).ToNot(HaveOccurred()) |
| 57 | + Expect(nodePools).ToNot(BeEmpty(), "no node pools found") |
| 58 | + Expect(len(nodePools)).To(BeNumerically(">=", 2)) |
| 59 | + PrintNodePoolProfiles(nodePools) |
| 60 | + }) |
| 61 | + |
| 62 | + Context("Multiple Nodepool", Ordered, func() { |
| 63 | + isolated := performancev2.CPUSet("1-2") |
| 64 | + reserved := performancev2.CPUSet("0,3") |
| 65 | + policy := "best-effort" |
| 66 | + |
| 67 | + It("should verify support for different performance profiles on hosted cluster via multiple node pools", func() { |
| 68 | + By("Creating a deep copy of the performance profile for the second node pool") |
| 69 | + secondProfile = profile.DeepCopy() |
| 70 | + secondProfile.Name = "second-profile" |
| 71 | + np = nodePools[1] |
| 72 | + By("Creating the second performance profile in the control plane") |
| 73 | + Expect(testclient.ControlPlaneClient.Create(context.TODO(), secondProfile)).To(Succeed(), "Failed to create the performance profile") |
| 74 | + |
| 75 | + By("Attaching the tuning object to the second node pool") |
| 76 | + Expect(nodepools.AttachTuningObject(context.TODO(), testclient.ControlPlaneClient, secondProfile, nodePools[1])).To(Succeed()) |
| 77 | + |
| 78 | + By("Waiting for the second node pool configuration to start updating") |
| 79 | + err = nodepools.WaitForUpdatingConfig(context.TODO(), testclient.ControlPlaneClient, np.Name, np.Namespace) |
| 80 | + Expect(err).ToNot(HaveOccurred()) |
| 81 | + |
| 82 | + By("Waiting for the second node pool configuration to be ready") |
| 83 | + err = nodepools.WaitForConfigToBeReady(context.TODO(), testclient.ControlPlaneClient, np.Name, np.Namespace) |
| 84 | + Expect(err).ToNot(HaveOccurred()) |
| 85 | + |
| 86 | + By("Checking if reserved CPUs are correctly set on the worker nodes") |
| 87 | + for _, node := range workerRTNodes { |
| 88 | + By(fmt.Sprintf("Checking kubelet configuration for node %s", node.Name)) |
| 89 | + |
| 90 | + result, err := chkKubeletConfigFn(context.TODO(), &node) |
| 91 | + Expect(err).ToNot(HaveOccurred(), "Failed to fetch kubelet configuration") |
| 92 | + |
| 93 | + obj, err := manifestsutil.DeserializeObjectFromData([]byte(result), kubeletconfigv1beta1.AddToScheme) |
| 94 | + Expect(err).ToNot(HaveOccurred(), "Failed to deserialize kubelet configuration") |
| 95 | + |
| 96 | + kc, ok := obj.(*kubeletconfigv1beta1.KubeletConfiguration) |
| 97 | + Expect(ok).To(BeTrue(), "Deserialized object is not of type KubeletConfiguration") |
| 98 | + |
| 99 | + Expect(kc.ReservedSystemCPUs).To(Equal("0"), "ReservedSystemCPUs is not correctly set") |
| 100 | + Expect(kc.TopologyManagerPolicy).To(Equal("single-numa-node"), "TopologyManagerPolicy is not correctly set") |
| 101 | + } |
| 102 | + }) |
| 103 | + |
| 104 | + It("should verify that Performance Profile update re-creates only target nodepool nodes", func() { |
| 105 | + By("Printing node pool profiles before updating the performance profile") |
| 106 | + PrintNodePoolProfiles(nodePools) |
| 107 | + |
| 108 | + By("Modifying the second profile CPU and NUMA configurations") |
| 109 | + secondProfile.Spec.CPU = &performancev2.CPU{ |
| 110 | + BalanceIsolated: pointer.Bool(false), |
| 111 | + Reserved: &reserved, |
| 112 | + Isolated: &isolated, |
| 113 | + } |
| 114 | + secondProfile.Spec.NUMA = &performancev2.NUMA{ |
| 115 | + TopologyPolicy: &policy, |
| 116 | + } |
| 117 | + |
| 118 | + By("Updating the second node pool performance profile") |
| 119 | + profiles.UpdateWithRetry(secondProfile) |
| 120 | + |
| 121 | + By("Waiting for the second node pool configuration to start updating") |
| 122 | + err = nodepools.WaitForUpdatingConfig(context.TODO(), testclient.ControlPlaneClient, np.Name, np.Namespace) |
| 123 | + Expect(err).ToNot(HaveOccurred()) |
| 124 | + |
| 125 | + By("Waiting for the second node pool configuration to be ready") |
| 126 | + err = nodepools.WaitForConfigToBeReady(context.TODO(), testclient.ControlPlaneClient, np.Name, np.Namespace) |
| 127 | + Expect(err).ToNot(HaveOccurred()) |
| 128 | + |
| 129 | + NodePoolLabel := map[string]string{ |
| 130 | + fmt.Sprintf("%s/%s", "hypershift.openshift.io", "nodePool"): nodePools[1].Name, |
| 131 | + } |
| 132 | + workerRTNodes, err = nodes.GetByLabels(NodePoolLabel) |
| 133 | + Expect(err).ToNot(HaveOccurred()) |
| 134 | + |
| 135 | + By("Verifying the kubelet parameters were correctly updated for the worker nodes") |
| 136 | + for _, node := range workerRTNodes { |
| 137 | + By(fmt.Sprintf("Checking kubelet configuration for node %s", node.Name)) |
| 138 | + |
| 139 | + result, err := chkKubeletConfigFn(context.TODO(), &node) |
| 140 | + Expect(err).ToNot(HaveOccurred(), "Failed to fetch kubelet configuration") |
| 141 | + |
| 142 | + obj, err := manifestsutil.DeserializeObjectFromData([]byte(result), kubeletconfigv1beta1.AddToScheme) |
| 143 | + Expect(err).ToNot(HaveOccurred(), "Failed to deserialize kubelet configuration") |
| 144 | + |
| 145 | + kc, ok := obj.(*kubeletconfigv1beta1.KubeletConfiguration) |
| 146 | + Expect(ok).To(BeTrue(), "Deserialized object is not of type KubeletConfiguration") |
| 147 | + |
| 148 | + Expect(kc.ReservedSystemCPUs).To(Equal("0,3"), "ReservedSystemCPUs is not correctly set") |
| 149 | + Expect(kc.TopologyManagerPolicy).To(Equal("best-effort"), "TopologyManagerPolicy is not correctly set") |
| 150 | + } |
| 151 | + }) |
| 152 | + |
| 153 | + AfterAll(func() { |
| 154 | + By("Deleting the second Profile") |
| 155 | + Expect(nodepools.DeattachTuningObject(context.TODO(), testclient.ControlPlaneClient, secondProfile, nodePools[1])).To(Succeed()) |
| 156 | + |
| 157 | + By("Waiting for the second node pool configuration to start updating after profile deletion") |
| 158 | + err = nodepools.WaitForUpdatingConfig(context.TODO(), testclient.ControlPlaneClient, np.Name, np.Namespace) |
| 159 | + Expect(err).ToNot(HaveOccurred()) |
| 160 | + |
| 161 | + By("Waiting for the second node pool configuration to be ready after profile deletion") |
| 162 | + err = nodepools.WaitForConfigToBeReady(context.TODO(), testclient.ControlPlaneClient, np.Name, np.Namespace) |
| 163 | + Expect(err).ToNot(HaveOccurred()) |
| 164 | + |
| 165 | + PrintNodePoolProfiles(nodePools) |
| 166 | + }) |
| 167 | + }) |
| 168 | +}) |
| 169 | + |
| 170 | +func ListNodePools(ctx context.Context, c client.Client, hostedClusterName string) ([]*hypershiftv1beta1.NodePool, error) { |
| 171 | + npList := &hypershiftv1beta1.NodePoolList{} |
| 172 | + if err := c.List(ctx, npList); err != nil { |
| 173 | + return nil, err |
| 174 | + } |
| 175 | + |
| 176 | + var nodePools []*hypershiftv1beta1.NodePool |
| 177 | + for i := range npList.Items { |
| 178 | + np := &npList.Items[i] |
| 179 | + if np.Spec.ClusterName == hostedClusterName { |
| 180 | + nodePools = append(nodePools, np) |
| 181 | + } |
| 182 | + } |
| 183 | + |
| 184 | + if len(nodePools) == 0 { |
| 185 | + return nil, fmt.Errorf("no nodePools found for hosted cluster %q", hostedClusterName) |
| 186 | + } |
| 187 | + |
| 188 | + return nodePools, nil |
| 189 | +} |
| 190 | + |
| 191 | +func PrintNodePoolProfiles(nodePools []*hypershiftv1beta1.NodePool) { |
| 192 | + for _, np := range nodePools { |
| 193 | + for _, tuningConfig := range np.Spec.TuningConfig { |
| 194 | + testlog.Infof("NodePool %q is using profile: %q", np.Name, tuningConfig.Name) |
| 195 | + } |
| 196 | + if len(np.Spec.TuningConfig) == 0 { |
| 197 | + testlog.Infof("NodePool %q does not have a tuningConfig profile", np.Name) |
| 198 | + } |
| 199 | + } |
| 200 | +} |
0 commit comments