Skip to content

Commit

Permalink
E2E: Add support for hypershift to ovs dynamic pinning tests
Browse files Browse the repository at this point in the history
* Changes primarily done to check nodepools instead of
mcp for hypershift.

* Also using appropriate dataplane test client instead
of generic testclient.Client

Signed-off-by: Niranjan M.R <[email protected]>
  • Loading branch information
Niranjan M.R committed Sep 2, 2024
1 parent 38a3a71 commit 0175192
Showing 1 changed file with 47 additions and 49 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -20,24 +20,25 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/utils/cpuset"
"sigs.k8s.io/controller-runtime/pkg/client"

machineconfigv1 "github.com/openshift/api/machineconfiguration/v1"
performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/performanceprofile/v2"
testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils"
"github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/cgroup"
testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/client"
"github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/discovery"
"github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/hypershift"
"github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/images"
"github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/label"
testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/log"
"github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/mcps"
"github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/nodepools"
"github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/nodes"
"github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/pods"
"github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/profiles"
"github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/profilesupdate"
"github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/systemd"
)

Expand All @@ -53,7 +54,7 @@ var _ = Describe("[performance] Cgroups and affinity", Ordered, Label(string(lab
workerRTNode *corev1.Node
workerRTNodes []corev1.Node
profile, initialProfile *performancev2.PerformanceProfile
performanceMCP string
resourcePool string
ovsSliceCgroup string
ctx context.Context = context.Background()
ovsSystemdServices []string
Expand All @@ -74,10 +75,17 @@ var _ = Describe("[performance] Cgroups and affinity", Ordered, Label(string(lab
profile, err = profiles.GetByNodeLabels(testutils.NodeSelectorLabels)
Expect(err).ToNot(HaveOccurred())

performanceMCP, err = mcps.GetByProfile(profile)
Expect(err).ToNot(HaveOccurred())
if !hypershift.IsHypershiftCluster() {
resourcePool, err = mcps.GetByProfile(profile)
Expect(err).ToNot(HaveOccurred())
} else {
hostedClusterName, err := hypershift.GetHostedClusterName()
np, err := nodepools.GetByClusterName(ctx, testclient.ControlPlaneClient, hostedClusterName)
Expect(err).ToNot(HaveOccurred())
resourcePool = client.ObjectKeyFromObject(np).String()
}

isCgroupV2, err = cgroup.IsVersion2(ctx, testclient.Client)
isCgroupV2, err = cgroup.IsVersion2(ctx, testclient.DataPlaneClient)
Expect(err).ToNot(HaveOccurred())

ovsSystemdServices = ovsSystemdServicesOnOvsSlice(ctx, workerRTNode)
Expand Down Expand Up @@ -141,12 +149,12 @@ var _ = Describe("[performance] Cgroups and affinity", Ordered, Label(string(lab
initialProfile = profile.DeepCopy()
})
It("[test_id:64099] Activation file doesn't get deleted", func() {
performanceMCP, err := mcps.GetByProfile(profile)
Expect(err).ToNot(HaveOccurred())
//performanceMCP, err := mcps.GetByProfile(profile)
//Expect(err).ToNot(HaveOccurred())
policy := "best-effort"
// Need to make some changes to pp , causing system reboot
// and check if activation files is modified or deleted
profile, err = profiles.GetByNodeLabels(testutils.NodeSelectorLabels)
profile, err := profiles.GetByNodeLabels(testutils.NodeSelectorLabels)
Expect(err).ToNot(HaveOccurred(), "Unable to fetch latest performance profile")
currentPolicy := profile.Spec.NUMA.TopologyPolicy
if *currentPolicy == "best-effort" {
Expand All @@ -157,10 +165,13 @@ var _ = Describe("[performance] Cgroups and affinity", Ordered, Label(string(lab
}
By("Updating the performance profile")
profiles.UpdateWithRetry(profile)
By("Applying changes in performance profile and waiting until mcp will start updating")
mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdating, corev1.ConditionTrue)
By("Waiting for MCP being updated")
mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue)

testlog.Infof("Applying changes in performance profile and waiting until %s will start updating", resourcePool)
profilesupdate.WaitForTuningUpdating(ctx, profile)

testlog.Infof("Waiting when %s finishes updates", resourcePool)
profilesupdate.PostUpdateSync(ctx, profile)

By("Checking Activation file")
cmd := []string{"ls", activation_file}
for _, node := range workerRTNodes {
Expand All @@ -176,22 +187,14 @@ var _ = Describe("[performance] Cgroups and affinity", Ordered, Label(string(lab
Expect(err).ToNot(HaveOccurred())
currentSpec, _ := json.Marshal(profile.Spec)
spec, _ := json.Marshal(initialProfile.Spec)
performanceMCP, err := mcps.GetByProfile(profile)
Expect(err).ToNot(HaveOccurred())
// revert only if the profile changes.
if !bytes.Equal(currentSpec, spec) {
Expect(testclient.Client.Patch(context.TODO(), profile,
client.RawPatch(
types.JSONPatchType,
[]byte(fmt.Sprintf(`[{ "op": "replace", "path": "/spec", "value": %s }]`, spec)),
),
)).ToNot(HaveOccurred())
profiles.UpdateWithRetry(initialProfile)

By("Applying changes in performance profile and waiting until mcp will start updating")
mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdating, corev1.ConditionTrue)
testlog.Infof("Applying changes in performance profile and waiting until %s will start updating", resourcePool)
profilesupdate.WaitForTuningUpdating(ctx, profile)

By("Waiting when mcp finishes updates")
mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue)
testlog.Infof("Waiting when %s finishes updates", resourcePool)
profilesupdate.PostUpdateSync(ctx, profile)
}
})
})
Expand Down Expand Up @@ -311,7 +314,7 @@ var _ = Describe("[performance] Cgroups and affinity", Ordered, Label(string(lab
},
}
testpod.Spec.NodeSelector = map[string]string{testutils.LabelHostname: workerRTNode.Name}
err = testclient.Client.Create(ctx, testpod)
err = testclient.DataPlaneClient.Create(ctx, testpod)
Expect(err).ToNot(HaveOccurred())
testpod, err = pods.WaitForCondition(ctx, client.ObjectKeyFromObject(testpod), corev1.PodReady, corev1.ConditionTrue, 5*time.Minute)
Expect(err).ToNot(HaveOccurred())
Expand Down Expand Up @@ -367,7 +370,7 @@ var _ = Describe("[performance] Cgroups and affinity", Ordered, Label(string(lab
}

testpod1.Spec.NodeSelector = map[string]string{testutils.LabelHostname: workerRTNode.Name}
err = testclient.Client.Create(ctx, testpod1)
err = testclient.DataPlaneClient.Create(ctx, testpod1)
Expect(err).ToNot(HaveOccurred())
testpod1, err = pods.WaitForCondition(ctx, client.ObjectKeyFromObject(testpod1), corev1.PodReady, corev1.ConditionTrue, 5*time.Minute)
Expect(err).ToNot(HaveOccurred())
Expand All @@ -387,7 +390,7 @@ var _ = Describe("[performance] Cgroups and affinity", Ordered, Label(string(lab
},
}
testpod2.Spec.NodeSelector = map[string]string{testutils.LabelHostname: workerRTNode.Name}
err = testclient.Client.Create(ctx, testpod2)
err = testclient.DataPlaneClient.Create(ctx, testpod2)
Expect(err).ToNot(HaveOccurred())
testpod2, err = pods.WaitForCondition(ctx, client.ObjectKeyFromObject(testpod2), corev1.PodReady, corev1.ConditionTrue, 5*time.Minute)
Expect(err).ToNot(HaveOccurred())
Expand Down Expand Up @@ -446,13 +449,13 @@ var _ = Describe("[performance] Cgroups and affinity", Ordered, Label(string(lab
testNode := make(map[string]string)
testNode["kubernetes.io/hostname"] = workerRTNode.Name
dp.Spec.Template.Spec.NodeSelector = testNode
err := testclient.Client.Create(ctx, dp)
err := testclient.DataPlaneClient.Create(ctx, dp)
Expect(err).ToNot(HaveOccurred(), "Unable to create Deployment")

defer func() {
// delete deployment
testlog.Infof("Deleting Deployment %v", dp.Name)
err := testclient.Client.Delete(ctx, dp)
err := testclient.DataPlaneClient.Delete(ctx, dp)
Expect(err).ToNot(HaveOccurred())
}()
ovnPod, err := ovnCnfNodePod(ctx, workerRTNode)
Expand Down Expand Up @@ -494,10 +497,12 @@ var _ = Describe("[performance] Cgroups and affinity", Ordered, Label(string(lab

By("Updating the performance profile")
profiles.UpdateWithRetry(profile)
By("Applying changes in performance profile and waiting until mcp will start updating")
mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdating, corev1.ConditionTrue)
By("Waiting for MCP being updated")
mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue)

By("Applying changes in performance profile and waiting until mcp/nodepool will start updating")
profilesupdate.WaitForTuningUpdating(ctx, profile)

By("Waiting when mcp/nodepool finishes updates")
profilesupdate.PostUpdateSync(ctx, profile)

// After reboot we want the deployment to be ready before moving forward
desiredStatus := appsv1.DeploymentStatus{
Expand Down Expand Up @@ -558,7 +563,7 @@ var _ = Describe("[performance] Cgroups and affinity", Ordered, Label(string(lab
defer func() {
By("Delete Deployment")
testlog.Infof("Deleting Deployment %v", dp.Name)
err := testclient.Client.Delete(ctx, dp)
err := testclient.DataPlaneClient.Delete(ctx, dp)
Expect(err).ToNot(HaveOccurred())
}()

Expand Down Expand Up @@ -677,22 +682,15 @@ var _ = Describe("[performance] Cgroups and affinity", Ordered, Label(string(lab
Expect(err).ToNot(HaveOccurred())
currentSpec, _ := json.Marshal(profile.Spec)
spec, _ := json.Marshal(initialProfile.Spec)
performanceMCP, err := mcps.GetByProfile(profile)
Expect(err).ToNot(HaveOccurred())
// revert only if the profile changes.
if !bytes.Equal(currentSpec, spec) {
Expect(testclient.Client.Patch(context.TODO(), profile,
client.RawPatch(
types.JSONPatchType,
[]byte(fmt.Sprintf(`[{ "op": "replace", "path": "/spec", "value": %s }]`, spec)),
),
)).ToNot(HaveOccurred())
profiles.UpdateWithRetry(initialProfile)

By("Applying changes in performance profile and waiting until mcp will start updating")
mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdating, corev1.ConditionTrue)
profilesupdate.WaitForTuningUpdating(ctx, profile)

By("Waiting when mcp finishes updates")
mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue)
profilesupdate.PostUpdateSync(ctx, profile)
}
})
})
Expand Down Expand Up @@ -736,12 +734,12 @@ func checkCpuCount(ctx context.Context, workerNode *corev1.Node) {
// deleteTestPod removes guaranteed pod
func deleteTestPod(ctx context.Context, testpod *corev1.Pod) {
// it possible that the pod already was deleted as part of the test, in this case we want to skip teardown
err := testclient.Client.Get(ctx, client.ObjectKeyFromObject(testpod), testpod)
err := testclient.DataPlaneClient.Get(ctx, client.ObjectKeyFromObject(testpod), testpod)
if errors.IsNotFound(err) {
return
}

err = testclient.Client.Delete(ctx, testpod)
err = testclient.DataPlaneClient.Delete(ctx, testpod)
Expect(err).ToNot(HaveOccurred())

err = pods.WaitForDeletion(ctx, testpod, pods.DefaultDeletionTimeout*time.Second)
Expand All @@ -755,7 +753,7 @@ func ovnCnfNodePod(ctx context.Context, workerNode *corev1.Node) (corev1.Pod, er
options := &client.ListOptions{
Namespace: "openshift-ovn-kubernetes",
}
err := testclient.Client.List(ctx, ovnpods, options)
err := testclient.DataPlaneClient.List(ctx, ovnpods, options)
if err != nil {
return ovnKubeNodePod, err
}
Expand Down

0 comments on commit 0175192

Please sign in to comment.