Skip to content

Commit

Permalink
hypershift: Sanity test cases
Browse files Browse the repository at this point in the history
- Added tests for Multiple Performance Profile under different nodepools
- Adjusted nodepool utility to handle multiple nodepool
- Added test suite run in makefile

Signed-off-by: Sargun Narula <[email protected]>
  • Loading branch information
SargunNarula committed Oct 25, 2024
1 parent 6d2e1ed commit 3f1036c
Show file tree
Hide file tree
Showing 4 changed files with 284 additions and 17 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -250,7 +250,7 @@ pao-functests-mixedcpus: $(BINDATA)
pao-functests-hypershift: $(BINDATA)
@echo "Cluster Version"
hack/show-cluster-version.sh
hack/run-test.sh -t "./test/e2e/performanceprofile/functests/0_config ./test/e2e/performanceprofile/functests/1_performance ./test/e2e/performanceprofile/functests/3_performance_status ./test/e2e/performanceprofile/functests/6_mustgather_testing" -p "-vv --label-filter="!openshift" -r --fail-fast --flake-attempts=2 --timeout=2h --junit-report=report.xml" -m "Running Functional Tests over Hypershift"
hack/run-test.sh -t "./test/e2e/performanceprofile/functests/0_config ./test/e2e/performanceprofile/functests/1_performance ./test/e2e/performanceprofile/functests/3_performance_status ./test/e2e/performanceprofile/functests/6_mustgather_testing ./test/e2e/performanceprofile/functests/12_hypershift " -p "-vv --label-filter="!openshift" -r --fail-fast --flake-attempts=2 --timeout=2h --junit-report=report.xml" -m "Running Functional Tests over Hypershift"

.PHONY: cluster-clean-pao
cluster-clean-pao:
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
//go:build !unittests
// +build !unittests

package __hypershift_test

import (
"context"
"log"
"os"
"testing"
"time"

"github.com/go-logr/stdr"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"

"k8s.io/apimachinery/pkg/api/errors"

ctrllog "sigs.k8s.io/controller-runtime/pkg/log"

testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils"
testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/client"
testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/log"
"github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/namespaces"
nodeinspector "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/node_inspector"
)

var _ = BeforeSuite(func() {
// create test namespace
err := testclient.Client.Create(context.TODO(), namespaces.TestingNamespace)
if errors.IsAlreadyExists(err) {
testlog.Warning("test namespace already exists, that is unexpected")
return
}
Expect(err).ToNot(HaveOccurred())
})

var _ = AfterSuite(func() {
err := testclient.Client.Delete(context.TODO(), namespaces.TestingNamespace)
Expect(err).ToNot(HaveOccurred())
Expect(namespaces.WaitForDeletion(testutils.NamespaceTesting, 5*time.Minute)).To(Succeed())
Expect(nodeinspector.Delete(context.TODO())).To(Succeed())
})

func TestHypershift(t *testing.T) {
ctrllog.SetLogger(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)))

RegisterFailHandler(Fail)

RunSpecs(t, "PAO Hypershift tests")
}
200 changes: 200 additions & 0 deletions test/e2e/performanceprofile/functests/12_hypershift/hypershift.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,200 @@
package __hypershift

import (
"context"
"fmt"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"

performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/performanceprofile/v2"
manifestsutil "github.com/openshift/cluster-node-tuning-operator/pkg/util"
testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils"
testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/client"
"github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/discovery"
"github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/hypershift"
testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/log"
"github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/nodepools"
"github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/nodes"
"github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/profiles"
hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1"
)

var _ = Describe("Multiple performance profile in hypershift", Ordered, func() {
var profile, secondProfile *performancev2.PerformanceProfile
var nodePools []*hypershiftv1beta1.NodePool
var np *hypershiftv1beta1.NodePool
var workerRTNodes []corev1.Node
var err error

nodeLabel := testutils.NodeSelectorLabels
chkKubeletConfig := []string{"cat", "/rootfs/etc/kubernetes/kubelet.conf"}
chkKubeletConfigFn := func(ctx context.Context, node *corev1.Node) (string, error) {
out, err := nodes.ExecCommand(ctx, node, chkKubeletConfig)
if err != nil {
return "", err
}
output := testutils.ToString(out)
return output, nil
}

BeforeAll(func() {
By("Checking if discovery mode is enabled and performance profile is not found")
if discovery.Enabled() && testutils.ProfileNotFound {
Skip("Discovery mode enabled, performance profile not found")
}
workerRTNodes, err = nodes.GetByLabels(nodeLabel)
Expect(err).ToNot(HaveOccurred())
profile, err = profiles.GetByNodeLabels(nodeLabel)
Expect(err).ToNot(HaveOccurred())
hostedClusterName, err := hypershift.GetHostedClusterName()
Expect(err).ToNot(HaveOccurred())
nodePools, err = ListNodePools(context.TODO(), testclient.ControlPlaneClient, hostedClusterName)
Expect(err).ToNot(HaveOccurred())
Expect(nodePools).ToNot(BeEmpty(), "no node pools found")
Expect(len(nodePools)).To(BeNumerically(">=", 2))
PrintNodePoolProfiles(nodePools)
})

Context("Multiple Nodepool", Ordered, func() {
isolated := performancev2.CPUSet("1-2")
reserved := performancev2.CPUSet("0,3")
policy := "best-effort"

It("should verify support for different performance profiles on hosted cluster via multiple node pools", func() {
By("Creating a deep copy of the performance profile for the second node pool")
secondProfile = profile.DeepCopy()
secondProfile.Name = "second-profile"
np = nodePools[1]
By("Creating the second performance profile in the control plane")
Expect(testclient.ControlPlaneClient.Create(context.TODO(), secondProfile)).To(Succeed(), "Failed to create the performance profile")

By("Attaching the tuning object to the second node pool")
Expect(nodepools.AttachTuningObject(context.TODO(), testclient.ControlPlaneClient, secondProfile, nodePools[1])).To(Succeed())

By("Waiting for the second node pool configuration to start updating")
err = nodepools.WaitForUpdatingConfig(context.TODO(), testclient.ControlPlaneClient, np.Name, np.Namespace)
Expect(err).ToNot(HaveOccurred())

By("Waiting for the second node pool configuration to be ready")
err = nodepools.WaitForConfigToBeReady(context.TODO(), testclient.ControlPlaneClient, np.Name, np.Namespace)
Expect(err).ToNot(HaveOccurred())

By("Checking if reserved CPUs are correctly set on the worker nodes")
for _, node := range workerRTNodes {
By(fmt.Sprintf("Checking kubelet configuration for node %s", node.Name))

result, err := chkKubeletConfigFn(context.TODO(), &node)
Expect(err).ToNot(HaveOccurred(), "Failed to fetch kubelet configuration")

obj, err := manifestsutil.DeserializeObjectFromData([]byte(result), kubeletconfigv1beta1.AddToScheme)
Expect(err).ToNot(HaveOccurred(), "Failed to deserialize kubelet configuration")

kc, ok := obj.(*kubeletconfigv1beta1.KubeletConfiguration)
Expect(ok).To(BeTrue(), "Deserialized object is not of type KubeletConfiguration")

Expect(kc.ReservedSystemCPUs).To(Equal("0"), "ReservedSystemCPUs is not correctly set")
Expect(kc.TopologyManagerPolicy).To(Equal("single-numa-node"), "TopologyManagerPolicy is not correctly set")
}
})

It("should verify that Performance Profile update re-creates only target nodepool nodes", func() {
By("Printing node pool profiles before updating the performance profile")
PrintNodePoolProfiles(nodePools)

By("Modifying the second profile CPU and NUMA configurations")
secondProfile.Spec.CPU = &performancev2.CPU{
BalanceIsolated: pointer.Bool(false),
Reserved: &reserved,
Isolated: &isolated,
}
secondProfile.Spec.NUMA = &performancev2.NUMA{
TopologyPolicy: &policy,
}

By("Updating the second node pool performance profile")
profiles.UpdateWithRetry(secondProfile)

By("Waiting for the second node pool configuration to start updating")
err = nodepools.WaitForUpdatingConfig(context.TODO(), testclient.ControlPlaneClient, np.Name, np.Namespace)
Expect(err).ToNot(HaveOccurred())

By("Waiting for the second node pool configuration to be ready")
err = nodepools.WaitForConfigToBeReady(context.TODO(), testclient.ControlPlaneClient, np.Name, np.Namespace)
Expect(err).ToNot(HaveOccurred())

NodePoolLabel := map[string]string{
fmt.Sprintf("%s/%s", "hypershift.openshift.io", "nodePool"): nodePools[1].Name,
}
workerRTNodes, err = nodes.GetByLabels(NodePoolLabel)
Expect(err).ToNot(HaveOccurred())

By("Verifying the kubelet parameters were correctly updated for the worker nodes")
for _, node := range workerRTNodes {
By(fmt.Sprintf("Checking kubelet configuration for node %s", node.Name))

result, err := chkKubeletConfigFn(context.TODO(), &node)
Expect(err).ToNot(HaveOccurred(), "Failed to fetch kubelet configuration")

obj, err := manifestsutil.DeserializeObjectFromData([]byte(result), kubeletconfigv1beta1.AddToScheme)
Expect(err).ToNot(HaveOccurred(), "Failed to deserialize kubelet configuration")

kc, ok := obj.(*kubeletconfigv1beta1.KubeletConfiguration)
Expect(ok).To(BeTrue(), "Deserialized object is not of type KubeletConfiguration")

Expect(kc.ReservedSystemCPUs).To(Equal("0,3"), "ReservedSystemCPUs is not correctly set")
Expect(kc.TopologyManagerPolicy).To(Equal("best-effort"), "TopologyManagerPolicy is not correctly set")
}
})

AfterAll(func() {
By("Deleting the second Profile")
Expect(nodepools.DeattachTuningObject(context.TODO(), testclient.ControlPlaneClient, secondProfile, nodePools[1])).To(Succeed())

By("Waiting for the second node pool configuration to start updating after profile deletion")
err = nodepools.WaitForUpdatingConfig(context.TODO(), testclient.ControlPlaneClient, np.Name, np.Namespace)
Expect(err).ToNot(HaveOccurred())

By("Waiting for the second node pool configuration to be ready after profile deletion")
err = nodepools.WaitForConfigToBeReady(context.TODO(), testclient.ControlPlaneClient, np.Name, np.Namespace)
Expect(err).ToNot(HaveOccurred())

PrintNodePoolProfiles(nodePools)
})
})
})

func ListNodePools(ctx context.Context, c client.Client, hostedClusterName string) ([]*hypershiftv1beta1.NodePool, error) {
npList := &hypershiftv1beta1.NodePoolList{}
if err := c.List(ctx, npList); err != nil {
return nil, err
}

var nodePools []*hypershiftv1beta1.NodePool
for i := range npList.Items {
np := &npList.Items[i]
if np.Spec.ClusterName == hostedClusterName {
nodePools = append(nodePools, np)
}
}

if len(nodePools) == 0 {
return nil, fmt.Errorf("no nodePools found for hosted cluster %q", hostedClusterName)
}

return nodePools, nil
}

func PrintNodePoolProfiles(nodePools []*hypershiftv1beta1.NodePool) {
for _, np := range nodePools {
for _, tuningConfig := range np.Spec.TuningConfig {
testlog.Infof("NodePool %q is using profile: %q", np.Name, tuningConfig.Name)
}
if len(np.Spec.TuningConfig) == 0 {
testlog.Infof("NodePool %q does not have a tuningConfig profile", np.Name)
}
}
}
48 changes: 32 additions & 16 deletions test/e2e/performanceprofile/functests/utils/nodepools/nodepools.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,14 +67,16 @@ func GetByClusterName(ctx context.Context, c client.Client, hostedClusterName st

// AttachTuningObject is attaches a tuning object into the nodepool associated with the hosted-cluster
// The function is idempotent
func AttachTuningObject(ctx context.Context, cli client.Client, object client.Object) error {
hostedClusterName, err := hypershift.GetHostedClusterName()
if err != nil {
return err
}
np, err := GetByClusterName(ctx, cli, hostedClusterName)
if err != nil {
return err
func AttachTuningObject(ctx context.Context, cli client.Client, object client.Object, nodePools ...*hypershiftv1beta1.NodePool) error {
var np *hypershiftv1beta1.NodePool
var err error
if len(nodePools) > 0 && nodePools[0] != nil {
np = nodePools[0]
} else {
np, err = GetNodePool(ctx, cli)
if err != nil {
return err
}
}

updatedTuningConfig := []corev1.LocalObjectReference{{Name: object.GetName()}}
Expand All @@ -91,14 +93,16 @@ func AttachTuningObject(ctx context.Context, cli client.Client, object client.Ob
return nil
}

func DeattachTuningObject(ctx context.Context, cli client.Client, object client.Object) error {
hostedClusterName, err := hypershift.GetHostedClusterName()
if err != nil {
return err
}
np, err := GetByClusterName(ctx, cli, hostedClusterName)
if err != nil {
return err
func DeattachTuningObject(ctx context.Context, cli client.Client, object client.Object, nodePools ...*hypershiftv1beta1.NodePool) error {
var np *hypershiftv1beta1.NodePool
var err error
if len(nodePools) > 0 && nodePools[0] != nil {
np = nodePools[0]
} else {
np, err = GetNodePool(ctx, cli)
if err != nil {
return err
}
}
for i := range np.Spec.TuningConfig {
if np.Spec.TuningConfig[i].Name == object.GetName() {
Expand All @@ -111,3 +115,15 @@ func DeattachTuningObject(ctx context.Context, cli client.Client, object client.
}
return nil
}

func GetNodePool(ctx context.Context, cli client.Client) (*hypershiftv1beta1.NodePool, error) {
hostedClusterName, err := hypershift.GetHostedClusterName()
if err != nil {
return nil, err
}
np, err := GetByClusterName(ctx, cli, hostedClusterName)
if err != nil {
return nil, err
}
return np, nil
}

0 comments on commit 3f1036c

Please sign in to comment.