Skip to content

Commit 3f1036c

Browse files
committed
hypershift: Sanity test cases
- Added tests for Multiple Performance Profile under different nodepools - Adjusted nodepool utility to handle multiple nodepool - Added test suite run in makefile Signed-off-by: Sargun Narula <[email protected]>
1 parent 6d2e1ed commit 3f1036c

File tree

4 files changed

+284
-17
lines changed

4 files changed

+284
-17
lines changed

Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -250,7 +250,7 @@ pao-functests-mixedcpus: $(BINDATA)
250250
pao-functests-hypershift: $(BINDATA)
251251
@echo "Cluster Version"
252252
hack/show-cluster-version.sh
253-
hack/run-test.sh -t "./test/e2e/performanceprofile/functests/0_config ./test/e2e/performanceprofile/functests/1_performance ./test/e2e/performanceprofile/functests/3_performance_status ./test/e2e/performanceprofile/functests/6_mustgather_testing" -p "-vv --label-filter="!openshift" -r --fail-fast --flake-attempts=2 --timeout=2h --junit-report=report.xml" -m "Running Functional Tests over Hypershift"
253+
hack/run-test.sh -t "./test/e2e/performanceprofile/functests/0_config ./test/e2e/performanceprofile/functests/1_performance ./test/e2e/performanceprofile/functests/3_performance_status ./test/e2e/performanceprofile/functests/6_mustgather_testing ./test/e2e/performanceprofile/functests/12_hypershift " -p "-vv --label-filter="!openshift" -r --fail-fast --flake-attempts=2 --timeout=2h --junit-report=report.xml" -m "Running Functional Tests over Hypershift"
254254

255255
.PHONY: cluster-clean-pao
256256
cluster-clean-pao:
Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
//go:build !unittests
2+
// +build !unittests
3+
4+
package __hypershift_test
5+
6+
import (
7+
"context"
8+
"log"
9+
"os"
10+
"testing"
11+
"time"
12+
13+
"github.com/go-logr/stdr"
14+
. "github.com/onsi/ginkgo/v2"
15+
. "github.com/onsi/gomega"
16+
17+
"k8s.io/apimachinery/pkg/api/errors"
18+
19+
ctrllog "sigs.k8s.io/controller-runtime/pkg/log"
20+
21+
testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils"
22+
testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/client"
23+
testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/log"
24+
"github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/namespaces"
25+
nodeinspector "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/node_inspector"
26+
)
27+
28+
var _ = BeforeSuite(func() {
29+
// create test namespace
30+
err := testclient.Client.Create(context.TODO(), namespaces.TestingNamespace)
31+
if errors.IsAlreadyExists(err) {
32+
testlog.Warning("test namespace already exists, that is unexpected")
33+
return
34+
}
35+
Expect(err).ToNot(HaveOccurred())
36+
})
37+
38+
var _ = AfterSuite(func() {
39+
err := testclient.Client.Delete(context.TODO(), namespaces.TestingNamespace)
40+
Expect(err).ToNot(HaveOccurred())
41+
Expect(namespaces.WaitForDeletion(testutils.NamespaceTesting, 5*time.Minute)).To(Succeed())
42+
Expect(nodeinspector.Delete(context.TODO())).To(Succeed())
43+
})
44+
45+
func TestHypershift(t *testing.T) {
46+
ctrllog.SetLogger(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)))
47+
48+
RegisterFailHandler(Fail)
49+
50+
RunSpecs(t, "PAO Hypershift tests")
51+
}
Lines changed: 200 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,200 @@
1+
package __hypershift
2+
3+
import (
4+
"context"
5+
"fmt"
6+
. "github.com/onsi/ginkgo/v2"
7+
. "github.com/onsi/gomega"
8+
corev1 "k8s.io/api/core/v1"
9+
"k8s.io/utils/pointer"
10+
"sigs.k8s.io/controller-runtime/pkg/client"
11+
12+
performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/performanceprofile/v2"
13+
manifestsutil "github.com/openshift/cluster-node-tuning-operator/pkg/util"
14+
testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils"
15+
testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/client"
16+
"github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/discovery"
17+
"github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/hypershift"
18+
testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/log"
19+
"github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/nodepools"
20+
"github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/nodes"
21+
"github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/profiles"
22+
hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
23+
kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1"
24+
)
25+
26+
var _ = Describe("Multiple performance profile in hypershift", Ordered, func() {
27+
var profile, secondProfile *performancev2.PerformanceProfile
28+
var nodePools []*hypershiftv1beta1.NodePool
29+
var np *hypershiftv1beta1.NodePool
30+
var workerRTNodes []corev1.Node
31+
var err error
32+
33+
nodeLabel := testutils.NodeSelectorLabels
34+
chkKubeletConfig := []string{"cat", "/rootfs/etc/kubernetes/kubelet.conf"}
35+
chkKubeletConfigFn := func(ctx context.Context, node *corev1.Node) (string, error) {
36+
out, err := nodes.ExecCommand(ctx, node, chkKubeletConfig)
37+
if err != nil {
38+
return "", err
39+
}
40+
output := testutils.ToString(out)
41+
return output, nil
42+
}
43+
44+
BeforeAll(func() {
45+
By("Checking if discovery mode is enabled and performance profile is not found")
46+
if discovery.Enabled() && testutils.ProfileNotFound {
47+
Skip("Discovery mode enabled, performance profile not found")
48+
}
49+
workerRTNodes, err = nodes.GetByLabels(nodeLabel)
50+
Expect(err).ToNot(HaveOccurred())
51+
profile, err = profiles.GetByNodeLabels(nodeLabel)
52+
Expect(err).ToNot(HaveOccurred())
53+
hostedClusterName, err := hypershift.GetHostedClusterName()
54+
Expect(err).ToNot(HaveOccurred())
55+
nodePools, err = ListNodePools(context.TODO(), testclient.ControlPlaneClient, hostedClusterName)
56+
Expect(err).ToNot(HaveOccurred())
57+
Expect(nodePools).ToNot(BeEmpty(), "no node pools found")
58+
Expect(len(nodePools)).To(BeNumerically(">=", 2))
59+
PrintNodePoolProfiles(nodePools)
60+
})
61+
62+
Context("Multiple Nodepool", Ordered, func() {
63+
isolated := performancev2.CPUSet("1-2")
64+
reserved := performancev2.CPUSet("0,3")
65+
policy := "best-effort"
66+
67+
It("should verify support for different performance profiles on hosted cluster via multiple node pools", func() {
68+
By("Creating a deep copy of the performance profile for the second node pool")
69+
secondProfile = profile.DeepCopy()
70+
secondProfile.Name = "second-profile"
71+
np = nodePools[1]
72+
By("Creating the second performance profile in the control plane")
73+
Expect(testclient.ControlPlaneClient.Create(context.TODO(), secondProfile)).To(Succeed(), "Failed to create the performance profile")
74+
75+
By("Attaching the tuning object to the second node pool")
76+
Expect(nodepools.AttachTuningObject(context.TODO(), testclient.ControlPlaneClient, secondProfile, nodePools[1])).To(Succeed())
77+
78+
By("Waiting for the second node pool configuration to start updating")
79+
err = nodepools.WaitForUpdatingConfig(context.TODO(), testclient.ControlPlaneClient, np.Name, np.Namespace)
80+
Expect(err).ToNot(HaveOccurred())
81+
82+
By("Waiting for the second node pool configuration to be ready")
83+
err = nodepools.WaitForConfigToBeReady(context.TODO(), testclient.ControlPlaneClient, np.Name, np.Namespace)
84+
Expect(err).ToNot(HaveOccurred())
85+
86+
By("Checking if reserved CPUs are correctly set on the worker nodes")
87+
for _, node := range workerRTNodes {
88+
By(fmt.Sprintf("Checking kubelet configuration for node %s", node.Name))
89+
90+
result, err := chkKubeletConfigFn(context.TODO(), &node)
91+
Expect(err).ToNot(HaveOccurred(), "Failed to fetch kubelet configuration")
92+
93+
obj, err := manifestsutil.DeserializeObjectFromData([]byte(result), kubeletconfigv1beta1.AddToScheme)
94+
Expect(err).ToNot(HaveOccurred(), "Failed to deserialize kubelet configuration")
95+
96+
kc, ok := obj.(*kubeletconfigv1beta1.KubeletConfiguration)
97+
Expect(ok).To(BeTrue(), "Deserialized object is not of type KubeletConfiguration")
98+
99+
Expect(kc.ReservedSystemCPUs).To(Equal("0"), "ReservedSystemCPUs is not correctly set")
100+
Expect(kc.TopologyManagerPolicy).To(Equal("single-numa-node"), "TopologyManagerPolicy is not correctly set")
101+
}
102+
})
103+
104+
It("should verify that Performance Profile update re-creates only target nodepool nodes", func() {
105+
By("Printing node pool profiles before updating the performance profile")
106+
PrintNodePoolProfiles(nodePools)
107+
108+
By("Modifying the second profile CPU and NUMA configurations")
109+
secondProfile.Spec.CPU = &performancev2.CPU{
110+
BalanceIsolated: pointer.Bool(false),
111+
Reserved: &reserved,
112+
Isolated: &isolated,
113+
}
114+
secondProfile.Spec.NUMA = &performancev2.NUMA{
115+
TopologyPolicy: &policy,
116+
}
117+
118+
By("Updating the second node pool performance profile")
119+
profiles.UpdateWithRetry(secondProfile)
120+
121+
By("Waiting for the second node pool configuration to start updating")
122+
err = nodepools.WaitForUpdatingConfig(context.TODO(), testclient.ControlPlaneClient, np.Name, np.Namespace)
123+
Expect(err).ToNot(HaveOccurred())
124+
125+
By("Waiting for the second node pool configuration to be ready")
126+
err = nodepools.WaitForConfigToBeReady(context.TODO(), testclient.ControlPlaneClient, np.Name, np.Namespace)
127+
Expect(err).ToNot(HaveOccurred())
128+
129+
NodePoolLabel := map[string]string{
130+
fmt.Sprintf("%s/%s", "hypershift.openshift.io", "nodePool"): nodePools[1].Name,
131+
}
132+
workerRTNodes, err = nodes.GetByLabels(NodePoolLabel)
133+
Expect(err).ToNot(HaveOccurred())
134+
135+
By("Verifying the kubelet parameters were correctly updated for the worker nodes")
136+
for _, node := range workerRTNodes {
137+
By(fmt.Sprintf("Checking kubelet configuration for node %s", node.Name))
138+
139+
result, err := chkKubeletConfigFn(context.TODO(), &node)
140+
Expect(err).ToNot(HaveOccurred(), "Failed to fetch kubelet configuration")
141+
142+
obj, err := manifestsutil.DeserializeObjectFromData([]byte(result), kubeletconfigv1beta1.AddToScheme)
143+
Expect(err).ToNot(HaveOccurred(), "Failed to deserialize kubelet configuration")
144+
145+
kc, ok := obj.(*kubeletconfigv1beta1.KubeletConfiguration)
146+
Expect(ok).To(BeTrue(), "Deserialized object is not of type KubeletConfiguration")
147+
148+
Expect(kc.ReservedSystemCPUs).To(Equal("0,3"), "ReservedSystemCPUs is not correctly set")
149+
Expect(kc.TopologyManagerPolicy).To(Equal("best-effort"), "TopologyManagerPolicy is not correctly set")
150+
}
151+
})
152+
153+
AfterAll(func() {
154+
By("Deleting the second Profile")
155+
Expect(nodepools.DeattachTuningObject(context.TODO(), testclient.ControlPlaneClient, secondProfile, nodePools[1])).To(Succeed())
156+
157+
By("Waiting for the second node pool configuration to start updating after profile deletion")
158+
err = nodepools.WaitForUpdatingConfig(context.TODO(), testclient.ControlPlaneClient, np.Name, np.Namespace)
159+
Expect(err).ToNot(HaveOccurred())
160+
161+
By("Waiting for the second node pool configuration to be ready after profile deletion")
162+
err = nodepools.WaitForConfigToBeReady(context.TODO(), testclient.ControlPlaneClient, np.Name, np.Namespace)
163+
Expect(err).ToNot(HaveOccurred())
164+
165+
PrintNodePoolProfiles(nodePools)
166+
})
167+
})
168+
})
169+
170+
func ListNodePools(ctx context.Context, c client.Client, hostedClusterName string) ([]*hypershiftv1beta1.NodePool, error) {
171+
npList := &hypershiftv1beta1.NodePoolList{}
172+
if err := c.List(ctx, npList); err != nil {
173+
return nil, err
174+
}
175+
176+
var nodePools []*hypershiftv1beta1.NodePool
177+
for i := range npList.Items {
178+
np := &npList.Items[i]
179+
if np.Spec.ClusterName == hostedClusterName {
180+
nodePools = append(nodePools, np)
181+
}
182+
}
183+
184+
if len(nodePools) == 0 {
185+
return nil, fmt.Errorf("no nodePools found for hosted cluster %q", hostedClusterName)
186+
}
187+
188+
return nodePools, nil
189+
}
190+
191+
func PrintNodePoolProfiles(nodePools []*hypershiftv1beta1.NodePool) {
192+
for _, np := range nodePools {
193+
for _, tuningConfig := range np.Spec.TuningConfig {
194+
testlog.Infof("NodePool %q is using profile: %q", np.Name, tuningConfig.Name)
195+
}
196+
if len(np.Spec.TuningConfig) == 0 {
197+
testlog.Infof("NodePool %q does not have a tuningConfig profile", np.Name)
198+
}
199+
}
200+
}

test/e2e/performanceprofile/functests/utils/nodepools/nodepools.go

Lines changed: 32 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -67,14 +67,16 @@ func GetByClusterName(ctx context.Context, c client.Client, hostedClusterName st
6767

6868
// AttachTuningObject is attaches a tuning object into the nodepool associated with the hosted-cluster
6969
// The function is idempotent
70-
func AttachTuningObject(ctx context.Context, cli client.Client, object client.Object) error {
71-
hostedClusterName, err := hypershift.GetHostedClusterName()
72-
if err != nil {
73-
return err
74-
}
75-
np, err := GetByClusterName(ctx, cli, hostedClusterName)
76-
if err != nil {
77-
return err
70+
func AttachTuningObject(ctx context.Context, cli client.Client, object client.Object, nodePools ...*hypershiftv1beta1.NodePool) error {
71+
var np *hypershiftv1beta1.NodePool
72+
var err error
73+
if len(nodePools) > 0 && nodePools[0] != nil {
74+
np = nodePools[0]
75+
} else {
76+
np, err = GetNodePool(ctx, cli)
77+
if err != nil {
78+
return err
79+
}
7880
}
7981

8082
updatedTuningConfig := []corev1.LocalObjectReference{{Name: object.GetName()}}
@@ -91,14 +93,16 @@ func AttachTuningObject(ctx context.Context, cli client.Client, object client.Ob
9193
return nil
9294
}
9395

94-
func DeattachTuningObject(ctx context.Context, cli client.Client, object client.Object) error {
95-
hostedClusterName, err := hypershift.GetHostedClusterName()
96-
if err != nil {
97-
return err
98-
}
99-
np, err := GetByClusterName(ctx, cli, hostedClusterName)
100-
if err != nil {
101-
return err
96+
func DeattachTuningObject(ctx context.Context, cli client.Client, object client.Object, nodePools ...*hypershiftv1beta1.NodePool) error {
97+
var np *hypershiftv1beta1.NodePool
98+
var err error
99+
if len(nodePools) > 0 && nodePools[0] != nil {
100+
np = nodePools[0]
101+
} else {
102+
np, err = GetNodePool(ctx, cli)
103+
if err != nil {
104+
return err
105+
}
102106
}
103107
for i := range np.Spec.TuningConfig {
104108
if np.Spec.TuningConfig[i].Name == object.GetName() {
@@ -111,3 +115,15 @@ func DeattachTuningObject(ctx context.Context, cli client.Client, object client.
111115
}
112116
return nil
113117
}
118+
119+
func GetNodePool(ctx context.Context, cli client.Client) (*hypershiftv1beta1.NodePool, error) {
120+
hostedClusterName, err := hypershift.GetHostedClusterName()
121+
if err != nil {
122+
return nil, err
123+
}
124+
np, err := GetByClusterName(ctx, cli, hostedClusterName)
125+
if err != nil {
126+
return nil, err
127+
}
128+
return np, nil
129+
}

0 commit comments

Comments
 (0)