diff --git a/tests/cnf/ran/containernshide/tests/containernshide.go b/tests/cnf/ran/containernshide/tests/containernshide.go index d4a50154f..747cd9d7d 100644 --- a/tests/cnf/ran/containernshide/tests/containernshide.go +++ b/tests/cnf/ran/containernshide/tests/containernshide.go @@ -15,19 +15,19 @@ import ( ) var _ = Describe("Container Namespace Hiding", Label(tsparams.LabelContainerNSHideTestCases), func() { - It("should not have kubelet and crio using the same inode as systemd", reportxml.ID("53681"), func() { - By("Getting systemd inodes on cluster nodes") + It("verifies kubelet and crio are not using the same inode as systemd", reportxml.ID("53681"), func() { + By("getting systemd inodes on cluster nodes") systemdInodes, err := cluster.ExecCmdWithStdoutWithRetries(Spoke1APIClient, ranparam.RetryCount, ranparam.RetryInterval, "readlink /proc/1/ns/mnt") Expect(err).ToNot(HaveOccurred(), "Failed to check systemd inodes") - By("Getting kubelet inodes on cluster nodes") + By("getting kubelet inodes on cluster nodes") kubeletInodes, err := cluster.ExecCmdWithStdoutWithRetries( Spoke1APIClient, ranparam.RetryCount, ranparam.RetryInterval, "readlink /proc/$(pidof kubelet)/ns/mnt") Expect(err).ToNot(HaveOccurred(), "Failed to check kubelet inodes") - By("Getting crio inodes on cluster nodes") + By("getting crio inodes on cluster nodes") crioInodes, err := cluster.ExecCmdWithStdoutWithRetries(Spoke1APIClient, ranparam.RetryCount, ranparam.RetryInterval, "readlink /proc/$(pidof crio)/ns/mnt") Expect(err).ToNot(HaveOccurred(), "Failed to check crio inodes") @@ -38,7 +38,7 @@ var _ = Describe("Container Namespace Hiding", Label(tsparams.LabelContainerNSHi "Collected systemd inodes from different number of nodes than crio inodes") for host, systemdInode := range systemdInodes { - By(fmt.Sprintf("Checking inodes on host %s", host)) + By(fmt.Sprintf("checking inodes on host %s", host)) kubeletInode, ok := kubeletInodes[host] Expect(ok).To(BeTrue(), "Found systemd inode but not kubelet inode on node %s", host) diff --git a/tests/cnf/ran/gitopsztp/tests/ztp-argocd-acm-crs.go b/tests/cnf/ran/gitopsztp/tests/ztp-argocd-acm-crs.go index 2e8f53d11..f13fb31a3 100644 --- a/tests/cnf/ran/gitopsztp/tests/ztp-argocd-acm-crs.go +++ b/tests/cnf/ran/gitopsztp/tests/ztp-argocd-acm-crs.go @@ -18,10 +18,7 @@ import ( ) var _ = Describe("ZTP Argo CD ACM CR Tests", Label(tsparams.LabelArgoCdAcmCrsTestCases), func() { - var ( - acmPolicyGeneratorImage string - // oldAcmPolicyGeneratorContainer corev1.Container - ) + var acmPolicyGeneratorImage string BeforeEach(func() { By("verifying that ZTP meets the minimum version") @@ -53,7 +50,7 @@ var _ = Describe("ZTP Argo CD ACM CR Tests", Label(tsparams.LabelArgoCdAcmCrsTes // 54236 - Evaluating use of ACM's version of PolicyGenTemplates with our ZTP flow. This enables user created // content that does not depend on our ZTP container but works "seamlessly" with it. - It("should use ACM CRs to template a policy, deploy it, and validate it succeeded", reportxml.ID("54236"), func() { + It("uses ACM CRs to template a policy, deploy it, and validate it succeeded", reportxml.ID("54236"), func() { exists, err := gitdetails.UpdateArgoCdAppGitPath(tsparams.ArgoCdPoliciesAppName, tsparams.ZtpTestPathAcmCrs, true) if !exists { Skip(err.Error()) diff --git a/tests/cnf/ran/gitopsztp/tests/ztp-argocd-clusters-app.go b/tests/cnf/ran/gitopsztp/tests/ztp-argocd-clusters-app.go index 3500ae9ea..e0f17d104 100644 --- a/tests/cnf/ran/gitopsztp/tests/ztp-argocd-clusters-app.go +++ b/tests/cnf/ran/gitopsztp/tests/ztp-argocd-clusters-app.go @@ -23,7 +23,6 @@ var _ = Describe("ZTP Argo CD Clusters Tests", Label(tsparams.LabelArgoCdCluster if !versionInRange { Skip("ZTP Argo CD clusters app tests require ZTP 4.11 or later") } - }) AfterEach(func() { @@ -34,7 +33,7 @@ var _ = Describe("ZTP Argo CD Clusters Tests", Label(tsparams.LabelArgoCdCluster }) // 54238 - User modification of klustletaddonconfig via gitops - It("should override the KlusterletAddonConfiguration and verify the change", reportxml.ID("54238"), func() { + It("overrides the KlusterletAddonConfiguration and verifies the change", reportxml.ID("54238"), func() { exists, err := gitdetails.UpdateArgoCdAppGitPath( tsparams.ArgoCdClustersAppName, tsparams.ZtpTestPathClustersApp, true) if !exists { @@ -52,7 +51,7 @@ var _ = Describe("ZTP Argo CD Clusters Tests", Label(tsparams.LabelArgoCdCluster }) // 60619 - Image creation fails when NMstateConfig CR is empty - It("should not have NMStateConfig CR when nodeNetwork section not in siteConfig", reportxml.ID("60619"), func() { + It("does not have NMStateConfig CR when nodeNetwork section not in siteconfig", reportxml.ID("60619"), func() { // Update the git path manually so we can potentially skip the test before checking if the NM State // Config exists. gitDetails := tsparams.ArgoCdAppDetails[tsparams.ArgoCdClustersAppName] @@ -81,7 +80,7 @@ var _ = Describe("ZTP Argo CD Clusters Tests", Label(tsparams.LabelArgoCdCluster true) Expect(err).ToNot(HaveOccurred(), "Failed to update the Argo CD app with new git details") - By("validate the NM state config is gone on hub") + By("validating the NM state config is gone on hub") nmStateConfigList, err = assisted.ListNmStateConfigsInAllNamespaces(HubAPIClient) Expect(err).ToNot(HaveOccurred(), "Failed to list NM state configs") Expect(nmStateConfigList).To(BeEmpty(), "Found NM state config when it should be gone") diff --git a/tests/cnf/ran/gitopsztp/tests/ztp-argocd-hub-templating.go b/tests/cnf/ran/gitopsztp/tests/ztp-argocd-hub-templating.go index 00a34d6f4..d23ff5940 100644 --- a/tests/cnf/ran/gitopsztp/tests/ztp-argocd-hub-templating.go +++ b/tests/cnf/ran/gitopsztp/tests/ztp-argocd-hub-templating.go @@ -63,7 +63,7 @@ var _ = Describe("ZTP Argo CD Hub Templating Tests", Label(tsparams.LabelArgoCdH }) // 54240 - Hub-side ACM templating with TALM - It("should report an error for using autoindent function where not allowed", reportxml.ID("54240"), func() { + It("reports an error for using autoindent function where not allowed", reportxml.ID("54240"), func() { setupHubTemplateTest(tsparams.ZtpTestPathTemplatingAutoIndent) By("validating TALM reported a policy error") @@ -81,7 +81,7 @@ var _ = Describe("ZTP Argo CD Hub Templating Tests", Label(tsparams.LabelArgoCdH When("supported ACM hub side templating is used", func() { // 54240 - Hub-side ACM templating with TALM - It("should create the policy successfully with a valid template", reportxml.ID("54240"), func() { + It("creates the policy successfully with a valid template", reportxml.ID("54240"), func() { By("checking the ZTP version") versionInRange, err := version.IsVersionStringInRange(RANConfig.ZTPVersion, "4.16", "") Expect(err).ToNot(HaveOccurred(), "Failed to check if ZTP version is in range") @@ -167,6 +167,8 @@ func assertTalmPodLog(client *clients.Settings, expectedSubstring string) { } return podLog - }, tsparams.ArgoCdChangeTimeout, tsparams.ArgoCdChangeInterval). + }). + WithTimeout(tsparams.ArgoCdChangeTimeout). + WithPolling(tsparams.ArgoCdChangeInterval). Should(ContainSubstring(expectedSubstring), "Failed to assert TALM pod log contains %s", expectedSubstring) } diff --git a/tests/cnf/ran/gitopsztp/tests/ztp-argocd-node-delete.go b/tests/cnf/ran/gitopsztp/tests/ztp-argocd-node-delete.go index 977ee4476..842afea7f 100644 --- a/tests/cnf/ran/gitopsztp/tests/ztp-argocd-node-delete.go +++ b/tests/cnf/ran/gitopsztp/tests/ztp-argocd-node-delete.go @@ -75,7 +75,7 @@ var _ = Describe("ZTP Argo CD Node Deletion Tests", Label(tsparams.LabelArgoCdNo }) // 72463 - Delete and re-add a worker node from cluster - It("should delete a worker node from the cluster", reportxml.ID("72463"), func() { + It("deletes a worker node from the cluster", reportxml.ID("72463"), func() { By("updating the Argo CD git path to apply crAnnotation") exists, err := gitdetails.UpdateArgoCdAppGitPath( tsparams.ArgoCdClustersAppName, tsparams.ZtpTestPathNodeDeleteAddAnnotation, true) diff --git a/tests/cnf/ran/gitopsztp/tests/ztp-argocd-policies-app.go b/tests/cnf/ran/gitopsztp/tests/ztp-argocd-policies-app.go index 973983186..ceb8e6f9a 100644 --- a/tests/cnf/ran/gitopsztp/tests/ztp-argocd-policies-app.go +++ b/tests/cnf/ran/gitopsztp/tests/ztp-argocd-policies-app.go @@ -45,7 +45,7 @@ var _ = Describe("ZTP Argo CD Policies Tests", Label(tsparams.LabelArgoCdPolicie When("overriding the PGT policy's compliance and non-compliance intervals", func() { // 54241 - User override of policy intervals - It("should specify new intervals and verify they were applied", reportxml.ID("54241"), func() { + It("specifies new intervals and verifies they were applied", reportxml.ID("54241"), func() { By("updating Argo CD policies app") exists, err := gitdetails.UpdateArgoCdAppGitPath( tsparams.ArgoCdPoliciesAppName, tsparams.ZtpTestPathCustomInterval, true) @@ -86,7 +86,7 @@ var _ = Describe("ZTP Argo CD Policies Tests", Label(tsparams.LabelArgoCdPolicie }) // 54242 - Invalid time duration string for user override of policy intervals - It("should specify an invalid interval format and verify the app error", reportxml.ID("54242"), func() { + It("specifies an invalid interval format and verifies the app error", reportxml.ID("54242"), func() { By("updating Argo CD policies app") exists, err := gitdetails.UpdateArgoCdAppGitPath( tsparams.ArgoCdPoliciesAppName, tsparams.ZtpTestPathInvalidInterval, false) @@ -279,7 +279,7 @@ var _ = Describe("ZTP Argo CD Policies Tests", Label(tsparams.LabelArgoCdPolicie }) // 63516 - Reference non-existence source CR yaml file - It("verifies a proper error is returned in ArgoCD app when a non-existent "+ + It("verifies a proper error is returned in Argo CD app when a non-existent "+ "source-cr is used in PGT", reportxml.ID("63516"), func() { By("checking the ZTP version") versionInRange, err := version.IsVersionStringInRange(RANConfig.ZTPVersion, "4.14", "") diff --git a/tests/cnf/ran/gitopsztp/tests/ztp-bios-day-zero.go b/tests/cnf/ran/gitopsztp/tests/ztp-bios-day-zero.go index 0ba496579..8f0688827 100644 --- a/tests/cnf/ran/gitopsztp/tests/ztp-bios-day-zero.go +++ b/tests/cnf/ran/gitopsztp/tests/ztp-bios-day-zero.go @@ -18,13 +18,8 @@ import ( ) var _ = Describe("ZTP BIOS Configuration Tests", Label(tsparams.LabelBiosDayZeroTests), func() { - var ( - spokeClusterName string - nodeNames []string - ) - // 75196 - Check if spoke has required BIOS setting values applied - It("Verifies SNO spoke has required BIOS setting values applied", reportxml.ID("75196"), func() { + It("verifies SNO spoke has required BIOS setting values applied", reportxml.ID("75196"), func() { versionInRange, err := version.IsVersionStringInRange(RANConfig.ZTPVersion, "4.17", "") Expect(err).ToNot(HaveOccurred(), "Failed to check if ZTP version is in range") @@ -32,30 +27,20 @@ var _ = Describe("ZTP BIOS Configuration Tests", Label(tsparams.LabelBiosDayZero Skip("ZTP BIOS configuration tests require ZTP version of least 4.17") } - spokeClusterName, err = GetSpokeClusterName(HubAPIClient, Spoke1APIClient) + spokeClusterName, err := GetSpokeClusterName(HubAPIClient, Spoke1APIClient) Expect(err).ToNot(HaveOccurred(), "Failed to get SNO cluster name") - glog.V(tsparams.LogLevel).Infof("cluster name: %s", spokeClusterName) + glog.V(tsparams.LogLevel).Infof("Cluster name: %s", spokeClusterName) - nodeNames, err = GetNodeNames(Spoke1APIClient) + nodeNames, err := GetNodeNames(Spoke1APIClient) Expect(err).ToNot(HaveOccurred(), "Failed to get node names") glog.V(tsparams.LogLevel).Infof("Node names: %v", nodeNames) By("getting HFS for spoke") hfs, err := bmh.PullHFS(HubAPIClient, nodeNames[0], spokeClusterName) - Expect(err).ToNot( - HaveOccurred(), - "Failed to get HFS for spoke %s in cluster %s", - nodeNames[0], - spokeClusterName, - ) + Expect(err).ToNot(HaveOccurred(), "Failed to get HFS for spoke %s in cluster %s", nodeNames[0], spokeClusterName) hfsObject, err := hfs.Get() - Expect(err).ToNot( - HaveOccurred(), - "Failed to get HFS Obj for spoke %s in cluster %s", - nodeNames[0], - spokeClusterName, - ) + Expect(err).ToNot(HaveOccurred(), "Failed to get HFS Obj for spoke %s in cluster %s", nodeNames[0], spokeClusterName) By("comparing requsted BIOS settings to actual BIOS settings") hfsRequestedSettings := hfsObject.Spec.Settings @@ -65,10 +50,7 @@ var _ = Describe("ZTP BIOS Configuration Tests", Label(tsparams.LabelBiosDayZero Skip("hfs.spec.settings map is empty") } - Expect(hfsCurrentSettings).ToNot( - BeEmpty(), - "hfs.spec.settings map is not empty, but hfs.status.settings map is empty", - ) + Expect(hfsCurrentSettings).ToNot(BeEmpty(), "hfs.spec.settings map is not empty but hfs.status.settings map is empty") allSettingsMatch := true for param, value := range hfsRequestedSettings { @@ -84,16 +66,13 @@ var _ = Describe("ZTP BIOS Configuration Tests", Label(tsparams.LabelBiosDayZero glog.V(tsparams.LogLevel).Infof("Requested setting matches current: %s=%s", param, setting) } else { glog.V(tsparams.LogLevel).Infof( - "Requested setting %s value %s does not match current value %s", - param, - requestedSetting, - setting) + "Requested setting %s value %s does not match current value %s", param, requestedSetting, setting) allSettingsMatch = false } } - Expect(allSettingsMatch).To(BeTrueBecause("One or more requested settings does not match current settings")) + Expect(allSettingsMatch).To(BeTrue(), "One or more requested settings does not match current settings") }) }) @@ -124,10 +103,7 @@ func GetSpokeClusterName(hubAPIClient, spokeAPIClient *clients.Settings) (string // GetNodeNames gets node names in cluster. func GetNodeNames(spokeAPIClient *clients.Settings) ([]string, error) { - nodeList, err := nodes.List( - spokeAPIClient, - ) - + nodeList, err := nodes.List(spokeAPIClient) if err != nil { return nil, err } diff --git a/tests/cnf/ran/gitopsztp/tests/ztp-cluster-instance-delete.go b/tests/cnf/ran/gitopsztp/tests/ztp-cluster-instance-delete.go index a1dbd3dd1..eef694cbc 100644 --- a/tests/cnf/ran/gitopsztp/tests/ztp-cluster-instance-delete.go +++ b/tests/cnf/ran/gitopsztp/tests/ztp-cluster-instance-delete.go @@ -36,7 +36,6 @@ var _ = Describe("ZTP Siteconfig Operator's Cluster Instance Delete Tests", if !versionInRange { Skip("ZTP Siteconfig operator tests require ZTP 4.17 or later") } - }) AfterEach(func() { @@ -75,7 +74,7 @@ var _ = Describe("ZTP Siteconfig Operator's Cluster Instance Delete Tests", }) // 75374 - Detaching the AI multi-node openshift (MNO) spoke cluster. - It("Validate detaching the AI multi-node openshift spoke cluster", reportxml.ID("75374"), func() { + It("validates detaching the AI multi-node openshift spoke cluster", reportxml.ID("75374"), func() { By("checking spoke cluster type") spokeClusterType, err := rancluster.CheckSpokeClusterType(RANConfig.Spoke1APIClient) Expect(err).ToNot(HaveOccurred(), "Failed to fetch spoke cluster type") @@ -169,7 +168,7 @@ var _ = Describe("ZTP Siteconfig Operator's Cluster Instance Delete Tests", }) // 75376 - Detaching the AI single-node openshift (SNO) spoke cluster. - It("Validate detaching the AI single-node openshift spoke cluster", reportxml.ID("75376"), func() { + It("validates detaching the AI single-node openshift spoke cluster", reportxml.ID("75376"), func() { By("checking spoke cluster type") spokeClusterType, err := rancluster.CheckSpokeClusterType(RANConfig.Spoke1APIClient) Expect(err).ToNot(HaveOccurred(), "Failed to fetch spoke cluster type") diff --git a/tests/cnf/ran/gitopsztp/tests/ztp-generator.go b/tests/cnf/ran/gitopsztp/tests/ztp-generator.go index fe881e9c9..207da4609 100644 --- a/tests/cnf/ran/gitopsztp/tests/ztp-generator.go +++ b/tests/cnf/ran/gitopsztp/tests/ztp-generator.go @@ -43,109 +43,109 @@ var _ = Describe("ZTP Generator Tests", Label(tsparams.LabelGeneratorTestCases, }) // 54355 - Generation of CRs for a single site from ztp container - It("generates and installs time crs, manifests, and policies, and verifies they are present", - reportxml.ID("54355"), func() { - By("validating the image version for the site generator") - var ztpImageTag string - - // Since brew is a lot faster than skopeo, we want to use it if its available - brew, err := ranhelper.ExecLocalCommand(time.Minute, "which", "brew") - - if err != nil || brew == "" { - By("using skopeo to find the image tag") - cmd := fmt.Sprintf( - "skopeo list-tags docker://%s | grep %s", RANConfig.ZtpSiteGenerateImage, RANConfig.ZTPVersion) + - " | sort -V | tail -1 | tr -d '\"' | tr -d ','" - - output, err := ranhelper.ExecLocalCommand(time.Minute, "bash", "-c", cmd) - Expect(err).ToNot(HaveOccurred(), "Failed to get output from skopeo") - - ztpImageTag = strings.TrimSpace(output) - } else { - By("using brew to find the image tag") - cmd := "brew list-builds --package=ztp-site-generate-container --state=COMPLETE --quiet" + - fmt.Sprintf(" | grep %s", RANConfig.ZTPVersion) + - " | sort -V | tail -1 | awk '{ print $1 }' | sed -nr 's/.*-(v.*)$/\\1/p'" - - output, err := ranhelper.ExecLocalCommand(time.Minute, "bash", "-c", cmd) - Expect(err).ToNot(HaveOccurred(), "Failed to get output from brew") - - ztpImageTag = strings.TrimSpace(output) + It("generates and installs time crs, manifests, and policies, and "+ + "verifies they are present", reportxml.ID("54355"), func() { + By("validating the image version for the site generator") + var ztpImageTag string + + // Since brew is a lot faster than skopeo, we want to use it if its available + brew, err := ranhelper.ExecLocalCommand(time.Minute, "which", "brew") + + if err != nil || brew == "" { + By("using skopeo to find the image tag") + cmd := fmt.Sprintf( + "skopeo list-tags docker://%s | grep %s", RANConfig.ZtpSiteGenerateImage, RANConfig.ZTPVersion) + + " | sort -V | tail -1 | tr -d '\"' | tr -d ','" + + output, err := ranhelper.ExecLocalCommand(time.Minute, "bash", "-c", cmd) + Expect(err).ToNot(HaveOccurred(), "Failed to get output from skopeo") + + ztpImageTag = strings.TrimSpace(output) + } else { + By("using brew to find the image tag") + cmd := "brew list-builds --package=ztp-site-generate-container --state=COMPLETE --quiet" + + fmt.Sprintf(" | grep %s", RANConfig.ZTPVersion) + + " | sort -V | tail -1 | awk '{ print $1 }' | sed -nr 's/.*-(v.*)$/\\1/p'" + + output, err := ranhelper.ExecLocalCommand(time.Minute, "bash", "-c", cmd) + Expect(err).ToNot(HaveOccurred(), "Failed to get output from brew") + + ztpImageTag = strings.TrimSpace(output) + } + + glog.V(tsparams.LogLevel).Infof("Detected ZTP image tag '%s'", ztpImageTag) + + By("generating the install time CRs and manifests") + _, err = ranhelper.ExecLocalCommand( + time.Minute, + "podman", + "run", + "--rm", + "-v", + fmt.Sprintf("%s/siteconfig/:/resources:Z", siteConfigPath), + fmt.Sprintf("%s:%s", RANConfig.ZtpSiteGenerateImage, ztpImageTag), + "generator", + "install", + "-E", + "/resources/") + Expect(err).ToNot(HaveOccurred(), "Failed to generate the install time CRs and manifests") + + By("validating CRs and manifests were created") + installCRsDir := fmt.Sprintf("%s/siteconfig/out/generated_installCRs/", siteConfigPath) + siteDirs, err := os.ReadDir(installCRsDir) + Expect(err).ToNot(HaveOccurred(), "Failed to read installed CRs directory: %s", installCRsDir) + + for _, dir := range siteDirs { + siteDirPath := installCRsDir + dir.Name() + files, err := os.ReadDir(siteDirPath) + + Expect(err).ToNot(HaveOccurred(), "Failed to read files in site directory %s", siteDirPath) + Expect(len(files)).To( + BeNumerically(">", 9), "Failed to generate at least 9 files in site directory %s", siteDirPath) + } + + By("generating the policies") + _, err = ranhelper.ExecLocalCommand( + time.Minute, + "podman", + "run", + "--rm", + "-v", + fmt.Sprintf("%s/policygentemplates/:/resources:Z", siteConfigPath), + fmt.Sprintf("%s:%s", RANConfig.ZtpSiteGenerateImage, ztpImageTag), + "generator", + "config", + ".") + Expect(err).ToNot(HaveOccurred(), "Failed to generate policies") + + By("validating the policies were created") + expectedKind := []string{"Policy", "PlacementRule", "PlacementBinding"} + + // Expect to have at least 3 subdirs - common, group DU, site + policyCRsDir := fmt.Sprintf("%s/policygentemplates/out/generated_configCRs/", siteConfigPath) + configDirs, err := os.ReadDir(policyCRsDir) + Expect(err).ToNot(HaveOccurred(), "Failed to list generated CRs directory") + Expect(len(configDirs)).To(BeNumerically(">=", 3), "Not enough entries in generated CRs directory") + + for _, dir := range configDirs { + dirPath := policyCRsDir + dir.Name() + files, err := os.ReadDir(dirPath) + Expect(err).ToNot(HaveOccurred(), "Failed to list files in %s", dirPath) + Expect(len(files)).To(BeNumerically(">=", 3), "Not enough files in directory %s", dirPath) + + for _, file := range files { + filePath := dirPath + "/" + file.Name() + fileBytes, err := os.ReadFile(filePath) + Expect(err).ToNot(HaveOccurred(), "Failed to read file %s", filePath) + + fileContent := make(map[string]interface{}) + err = yaml.Unmarshal(fileBytes, &fileContent) + Expect(err).ToNot(HaveOccurred(), "Failed to unmarshal file %s as yaml", filePath) + + kind, ok := fileContent["kind"].(string) + Expect(ok).To(BeTrue(), "Failed to cast file %s kind to string", filePath) + Expect(kind).To(BeElementOf(expectedKind), "File %s is not one of the expected kinds", filePath) } - - glog.V(tsparams.LogLevel).Infof("Detected ZTP image tag '%s'", ztpImageTag) - - By("generating the install time CRs and manifests") - _, err = ranhelper.ExecLocalCommand( - time.Minute, - "podman", - "run", - "--rm", - "-v", - fmt.Sprintf("%s/siteconfig/:/resources:Z", siteConfigPath), - fmt.Sprintf("%s:%s", RANConfig.ZtpSiteGenerateImage, ztpImageTag), - "generator", - "install", - "-E", - "/resources/") - Expect(err).ToNot(HaveOccurred(), "Failed to generate the install time CRs and manifests") - - By("validating CRs and manifests were created") - installCRsDir := fmt.Sprintf("%s/siteconfig/out/generated_installCRs/", siteConfigPath) - siteDirs, err := os.ReadDir(installCRsDir) - Expect(err).ToNot(HaveOccurred(), "Failed to read installed CRs directory: %s", installCRsDir) - - for _, dir := range siteDirs { - siteDirPath := installCRsDir + dir.Name() - files, err := os.ReadDir(siteDirPath) - - Expect(err).ToNot(HaveOccurred(), "Failed to read files in site directory %s", siteDirPath) - Expect(len(files)).To( - BeNumerically(">", 9), "Failed to generate at least 9 files in site directory %s", siteDirPath) - } - - By("generating the policies") - _, err = ranhelper.ExecLocalCommand( - time.Minute, - "podman", - "run", - "--rm", - "-v", - fmt.Sprintf("%s/policygentemplates/:/resources:Z", siteConfigPath), - fmt.Sprintf("%s:%s", RANConfig.ZtpSiteGenerateImage, ztpImageTag), - "generator", - "config", - ".") - Expect(err).ToNot(HaveOccurred(), "Failed to generate policies") - - By("validating the policies were created") - expectedKind := []string{"Policy", "PlacementRule", "PlacementBinding"} - - // Expect to have at least 3 subdirs - common, group DU, site - policyCRsDir := fmt.Sprintf("%s/policygentemplates/out/generated_configCRs/", siteConfigPath) - configDirs, err := os.ReadDir(policyCRsDir) - Expect(err).ToNot(HaveOccurred(), "Failed to list generated CRs directory") - Expect(len(configDirs)).To(BeNumerically(">=", 3), "Not enough entries in generated CRs directory") - - for _, dir := range configDirs { - dirPath := policyCRsDir + dir.Name() - files, err := os.ReadDir(dirPath) - Expect(err).ToNot(HaveOccurred(), "Failed to list files in %s", dirPath) - Expect(len(files)).To(BeNumerically(">=", 3), "Not enough files in directory %s", dirPath) - - for _, file := range files { - filePath := dirPath + "/" + file.Name() - fileBytes, err := os.ReadFile(filePath) - Expect(err).ToNot(HaveOccurred(), "Failed to read file %s", filePath) - - fileContent := make(map[string]interface{}) - err = yaml.Unmarshal(fileBytes, &fileContent) - Expect(err).ToNot(HaveOccurred(), "Failed to unmarshal file %s as yaml", filePath) - - kind, ok := fileContent["kind"].(string) - Expect(ok).To(BeTrue(), "Failed to cast file %s kind to string", filePath) - Expect(kind).To(BeElementOf(expectedKind), "File %s is not one of the expected kinds", filePath) - } - } - }) + } + }) }) diff --git a/tests/cnf/ran/gitopsztp/tests/ztp-machine-config.go b/tests/cnf/ran/gitopsztp/tests/ztp-machine-config.go index 7a1e97626..9d1b5cef1 100644 --- a/tests/cnf/ran/gitopsztp/tests/ztp-machine-config.go +++ b/tests/cnf/ran/gitopsztp/tests/ztp-machine-config.go @@ -14,7 +14,7 @@ import ( var _ = Describe("ZTP Machine Config Tests", Label(tsparams.LabelMachineConfigTestCases), func() { // 54239 - Annotation on generated CRs for traceability - It("should find the ztp annotation present in the machine configs", reportxml.ID("54239"), func() { + It("finds the ztp annotation present in the machine configs", reportxml.ID("54239"), func() { machineConfigsToCheck := []string{ "container-mount-namespace-and-kubelet-conf-master", "container-mount-namespace-and-kubelet-conf-worker", diff --git a/tests/cnf/ran/gitopsztp/tests/ztp-spoke-checker.go b/tests/cnf/ran/gitopsztp/tests/ztp-spoke-checker.go index 2cc64695d..d040f6249 100644 --- a/tests/cnf/ran/gitopsztp/tests/ztp-spoke-checker.go +++ b/tests/cnf/ran/gitopsztp/tests/ztp-spoke-checker.go @@ -17,7 +17,7 @@ import ( var _ = Describe("ZTP Spoke Checker Tests", Label(tsparams.LabelSpokeCheckerTests), func() { When("a TunedPerformancePatch.yaml PGT disables the chronyd service", func() { // 54237 - Moving chronyd disable to tuned patch - It("should disable and inactivate the chronyd service on spoke", reportxml.ID("54237"), func() { + It("disables and inactivates the chronyd service on spoke", reportxml.ID("54237"), func() { By("verifying chronyd is inactive") // Use `| cat -` to explicitly ignore the return code since it will be nonzero when the service // is not active, which is expected here. @@ -53,14 +53,14 @@ var _ = Describe("ZTP Spoke Checker Tests", Label(tsparams.LabelSpokeCheckerTest }) // 60904 - Verifies list of pods in openshift-network-diagnostics namespace on spoke - It("should not have pods in the network diagnostics namespace", reportxml.ID("60904"), func() { + It("does not have pods in the network diagnostics namespace", reportxml.ID("60904"), func() { pods, err := pod.List(Spoke1APIClient, tsparams.NetworkDiagnosticsNamespace) Expect(err).ToNot(HaveOccurred(), "Failed to list pods in the network diagnostics namespace") Expect(pods).To(BeEmpty(), "Found pods in the network diagnostics namespace") }) // 60905 - Verifies list of pods in openshift-console namespace on spoke - It("should not have pods in the console namespace", reportxml.ID("60905"), func() { + It("does not have pods in the console namespace", reportxml.ID("60905"), func() { pods, err := pod.List(Spoke1APIClient, tsparams.ConsoleNamespace) Expect(err).ToNot(HaveOccurred(), "Failed to list pods in the console namespace") Expect(pods).To(BeEmpty(), "Found pods in the console namespace") diff --git a/tests/cnf/ran/gitopsztp/ztp_suite_test.go b/tests/cnf/ran/gitopsztp/ztp_suite_test.go index f48cd0175..96734b30e 100644 --- a/tests/cnf/ran/gitopsztp/ztp_suite_test.go +++ b/tests/cnf/ran/gitopsztp/ztp_suite_test.go @@ -48,6 +48,7 @@ var _ = BeforeSuite(func() { }) var _ = AfterSuite(func() { + By("deleting ZTP test namespace to clean up test suite") err := namespace.NewBuilder(HubAPIClient, tsparams.TestNamespace).DeleteAndWait(5 * time.Minute) Expect(err).ToNot(HaveOccurred(), "Failed to delete ZTP test namespace") }) diff --git a/tests/cnf/ran/powermanagement/powermanagement_suite_test.go b/tests/cnf/ran/powermanagement/powermanagement_suite_test.go index 20ce29cb3..7d9e1cc1c 100644 --- a/tests/cnf/ran/powermanagement/powermanagement_suite_test.go +++ b/tests/cnf/ran/powermanagement/powermanagement_suite_test.go @@ -4,13 +4,11 @@ import ( "runtime" "testing" - "github.com/golang/glog" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/openshift-kni/eco-goinfra/pkg/namespace" "github.com/openshift-kni/eco-goinfra/pkg/reportxml" . "github.com/openshift-kni/eco-gotests/tests/cnf/ran/internal/raninittools" - "github.com/openshift-kni/eco-gotests/tests/cnf/ran/internal/ranparam" "github.com/openshift-kni/eco-gotests/tests/cnf/ran/powermanagement/internal/tsparams" _ "github.com/openshift-kni/eco-gotests/tests/cnf/ran/powermanagement/tests" "github.com/openshift-kni/eco-gotests/tests/internal/reporter" @@ -31,19 +29,17 @@ var _ = BeforeSuite(func() { testNamespace := namespace.NewBuilder(Spoke1APIClient, tsparams.TestingNamespace). WithLabel("pod-security.kubernetes.io/enforce", "baseline") - glog.V(ranparam.LogLevel).Infof("Deleting test namespace ", tsparams.TestingNamespace) + By("deleting and recreating test namespace to ensure blank slate") err := testNamespace.DeleteAndWait(tsparams.PowerSaveTimeout) Expect(err).ToNot(HaveOccurred(), "Failed to delete namespace ", tsparams.TestingNamespace) - glog.V(ranparam.LogLevel).Infof("Creating test namespace ", tsparams.TestingNamespace) _, err = testNamespace.Create() Expect(err).ToNot(HaveOccurred(), "Failed to create namespace ", tsparams.TestingNamespace) }) var _ = AfterSuite(func() { + By("deleting test namespace to clean up test suite") testNamespace := namespace.NewBuilder(Spoke1APIClient, tsparams.TestingNamespace) - - glog.V(ranparam.LogLevel).Infof("Deleting test namespace", tsparams.TestingNamespace) err := testNamespace.DeleteAndWait(tsparams.PowerSaveTimeout) Expect(err).ToNot(HaveOccurred(), "Failed to delete namespace ", tsparams.TestingNamespace) diff --git a/tests/cnf/ran/powermanagement/tests/cpufreq.go b/tests/cnf/ran/powermanagement/tests/cpufreq.go index 1f4962b61..8c1a7f726 100644 --- a/tests/cnf/ran/powermanagement/tests/cpufreq.go +++ b/tests/cnf/ran/powermanagement/tests/cpufreq.go @@ -63,7 +63,6 @@ var _ = Describe("CPU frequency tuning tests change the core frequencies of isol When("reserved and isolated core frequency is configured via PerformanceProfile", func() { It("sets the reserved and isolated core frequency correctly on the DUT", func() { - versionInRange, err := version.IsVersionStringInRange(RANConfig.Spoke1OCPVersion, "4.16", "") Expect(err).ToNot(HaveOccurred(), "Failed to compare OCP version string") @@ -73,7 +72,6 @@ var _ = Describe("CPU frequency tuning tests change the core frequencies of isol err = helper.SetCPUFreq(perfProfile, &desiredIsolatedCoreFreq, &desiredReservedCoreFreq) Expect(err).ToNot(HaveOccurred(), "Failed to set CPU Freq") - }) }) }) diff --git a/tests/cnf/ran/powermanagement/tests/powersave.go b/tests/cnf/ran/powermanagement/tests/powersave.go index 87a5ff3d8..e88bef84b 100644 --- a/tests/cnf/ran/powermanagement/tests/powersave.go +++ b/tests/cnf/ran/powermanagement/tests/powersave.go @@ -59,7 +59,7 @@ var _ = Describe("Per-core runtime power states tuning", Label(tsparams.LabelPow return } - By("Restoring performance profile to original specs") + By("restoring performance profile to original specs") perfProfile.Definition.Spec = originalPerfProfileSpec _, err = perfProfile.Update(true) @@ -75,39 +75,39 @@ var _ = Describe("Per-core runtime power states tuning", Label(tsparams.LabelPow }) // 54571 - Install SNO node with standard DU profile that does not include WorkloadHints - It("Verifies expected kernel parameters with no workload hints specified in PerformanceProfile", - reportxml.ID("54571"), func() { - workloadHints := perfProfile.Definition.Spec.WorkloadHints - if workloadHints != nil { - Skip("WorkloadHints already present in perfProfile.Spec") - } + It("verifies expected kernel parameters with no workload hints "+ + "specified in PerformanceProfile", reportxml.ID("54571"), func() { + workloadHints := perfProfile.Definition.Spec.WorkloadHints + if workloadHints != nil { + Skip("WorkloadHints already present in perfProfile.Spec") + } - By("Checking for expected kernel parameters") - cmdline, err := cluster.ExecCommandOnSNOWithRetries(Spoke1APIClient, - ranparam.RetryCount, ranparam.RetryInterval, "cat /proc/cmdline") - Expect(err).ToNot(HaveOccurred(), "Failed to cat /proc/cmdline") - - // Expected default set of kernel parameters when no WorkloadHints are specified in PerformanceProfile - requiredKernelParms := []string{ - "nohz_full=[0-9,-]+", - "tsc=nowatchdog", - "nosoftlockup", - "nmi_watchdog=0", - "mce=off", - "skew_tick=1", - "intel_pstate=disable", - } - for _, parameter := range requiredKernelParms { - By(fmt.Sprintf("Checking /proc/cmdline for %s", parameter)) - rePattern := regexp.MustCompile(parameter) - Expect(rePattern.FindStringIndex(cmdline)). - ToNot(BeNil(), "Kernel parameter %s is missing from cmdline", parameter) - } - }) + By("checking for expected kernel parameters") + cmdline, err := cluster.ExecCommandOnSNOWithRetries(Spoke1APIClient, + ranparam.RetryCount, ranparam.RetryInterval, "cat /proc/cmdline") + Expect(err).ToNot(HaveOccurred(), "Failed to cat /proc/cmdline") + + // Expected default set of kernel parameters when no WorkloadHints are specified in PerformanceProfile + requiredKernelParms := []string{ + "nohz_full=[0-9,-]+", + "tsc=nowatchdog", + "nosoftlockup", + "nmi_watchdog=0", + "mce=off", + "skew_tick=1", + "intel_pstate=disable", + } + for _, parameter := range requiredKernelParms { + By(fmt.Sprintf("checking /proc/cmdline for %s", parameter)) + rePattern := regexp.MustCompile(parameter) + Expect(rePattern.FindStringIndex(cmdline)). + ToNot(BeNil(), "Kernel parameter %s is missing from cmdline", parameter) + } + }) // 54572 - Enable powersave at node level and then enable performance at node level - It("Enables powersave at node level and then enable performance at node level", reportxml.ID("54572"), func() { - By("Patching the performance profile with the workload hints") + It("enables powersave at node level and then enables performance at node level", reportxml.ID("54572"), func() { + By("patching the performance profile with the workload hints") err := helper.SetPowerModeAndWaitForMcpUpdate(perfProfile, *nodeList[0], true, false, true) Expect(err).ToNot(HaveOccurred(), "Failed to set power mode") @@ -122,72 +122,72 @@ var _ = Describe("Per-core runtime power states tuning", Label(tsparams.LabelPow // 54574 - Enable powersave at node level and then enable high performance at node level, check power // consumption with no workload pods. - It("Enable powersave, and then enable high performance at node level, check power consumption with no workload pods.", - reportxml.ID("54574"), func() { - testPodAnnotations := map[string]string{ - "cpu-load-balancing.crio.io": "disable", - "cpu-quota.crio.io": "disable", - "irq-load-balancing.crio.io": "disable", - "cpu-c-states.crio.io": "disable", - "cpu-freq-governor.crio.io": "performance", - } + It("enables powersave, enables high performance at node level, "+ + "and checks power consumption with no workload pods", reportxml.ID("54574"), func() { + testPodAnnotations := map[string]string{ + "cpu-load-balancing.crio.io": "disable", + "cpu-quota.crio.io": "disable", + "irq-load-balancing.crio.io": "disable", + "cpu-c-states.crio.io": "disable", + "cpu-freq-governor.crio.io": "performance", + } - cpuLimit := resource.MustParse("2") - memLimit := resource.MustParse("100Mi") - - By("Define test pod") - testpod, err := helper.DefineQoSTestPod( - tsparams.TestingNamespace, nodeName, cpuLimit.String(), cpuLimit.String(), memLimit.String(), memLimit.String()) - Expect(err).ToNot(HaveOccurred(), "Failed to define test pod") - - testpod.Definition.Annotations = testPodAnnotations - runtimeClass := fmt.Sprintf("%s-%s", components.ComponentNamePrefix, perfProfile.Definition.Name) - testpod.Definition.Spec.RuntimeClassName = &runtimeClass - - DeferCleanup(func() { - // Delete the test pod if it's still around when the function returns, like in a test case failure. - if testpod.Exists() { - By("Delete pod in case of a failure") - _, err = testpod.DeleteAndWait(tsparams.PowerSaveTimeout) - Expect(err).ToNot(HaveOccurred(), "Failed to delete test pod in case of failure") - } - }) - - By("Create test pod") - testpod, err = testpod.CreateAndWaitUntilRunning(tsparams.PowerSaveTimeout) - Expect(err).ToNot(HaveOccurred(), "Failed to create pod") - Expect(testpod.Object.Status.QOSClass).To(Equal(corev1.PodQOSGuaranteed), - "Test pod does not have QoS class of Guaranteed") - - cpusetOutput, err := testpod.ExecCommand([]string{"sh", `-c`, "taskset -c -p $$ | cut -d: -f2"}) - Expect(err).ToNot(HaveOccurred(), "Failed to get cpuset") - - By("Verify powersetting of cpus used by the pod") - trimmedOutput := strings.TrimSpace(cpusetOutput.String()) - cpusUsed, err := cpuset.Parse(trimmedOutput) - Expect(err).ToNot(HaveOccurred(), "Failed to parse cpuset output") - - targetCpus := cpusUsed.List() - checkCPUGovernorsAndResumeLatency(targetCpus, "n/a", "performance") - - By("Verify the rest of cpus have default power setting") - allCpus := nodeList[0].Object.Status.Capacity.Cpu() - cpus, err := cpuset.Parse(fmt.Sprintf("0-%d", allCpus.Value()-1)) - Expect(err).ToNot(HaveOccurred(), "Failed to parse cpuset") - - otherCPUs := cpus.Difference(cpusUsed) - // Verify cpus not assigned to the pod have default power settings. - checkCPUGovernorsAndResumeLatency(otherCPUs.List(), "0", "performance") - - By("Delete the pod") - _, err = testpod.DeleteAndWait(tsparams.PowerSaveTimeout) - Expect(err).ToNot(HaveOccurred(), "Failed to delete test pod") - - By("Verify after pod was deleted cpus assigned to container have default powersave settings") - checkCPUGovernorsAndResumeLatency(targetCpus, "0", "performance") + cpuLimit := resource.MustParse("2") + memLimit := resource.MustParse("100Mi") + + By("defining the test pod") + testpod, err := helper.DefineQoSTestPod( + tsparams.TestingNamespace, nodeName, cpuLimit.String(), cpuLimit.String(), memLimit.String(), memLimit.String()) + Expect(err).ToNot(HaveOccurred(), "Failed to define test pod") + + testpod.Definition.Annotations = testPodAnnotations + runtimeClass := fmt.Sprintf("%s-%s", components.ComponentNamePrefix, perfProfile.Definition.Name) + testpod.Definition.Spec.RuntimeClassName = &runtimeClass + + DeferCleanup(func() { + // Delete the test pod if it's still around when the function returns, like in a test case failure. + if testpod.Exists() { + By("deleting the test pod in case of a failure") + _, err = testpod.DeleteAndWait(tsparams.PowerSaveTimeout) + Expect(err).ToNot(HaveOccurred(), "Failed to delete test pod in case of failure") + } }) - Context("Collect power usage metrics", Ordered, func() { + By("creating the test pod") + testpod, err = testpod.CreateAndWaitUntilRunning(tsparams.PowerSaveTimeout) + Expect(err).ToNot(HaveOccurred(), "Failed to create pod") + Expect(testpod.Object.Status.QOSClass).To(Equal(corev1.PodQOSGuaranteed), + "Test pod does not have QoS class of Guaranteed") + + cpusetOutput, err := testpod.ExecCommand([]string{"sh", `-c`, "taskset -c -p $$ | cut -d: -f2"}) + Expect(err).ToNot(HaveOccurred(), "Failed to get cpuset") + + By("verifying powersetting of cpus used by the test pod") + trimmedOutput := strings.TrimSpace(cpusetOutput.String()) + cpusUsed, err := cpuset.Parse(trimmedOutput) + Expect(err).ToNot(HaveOccurred(), "Failed to parse cpuset output") + + targetCpus := cpusUsed.List() + checkCPUGovernorsAndResumeLatency(targetCpus, "n/a", "performance") + + By("verifying the rest of cpus have default power setting") + allCpus := nodeList[0].Object.Status.Capacity.Cpu() + cpus, err := cpuset.Parse(fmt.Sprintf("0-%d", allCpus.Value()-1)) + Expect(err).ToNot(HaveOccurred(), "Failed to parse cpuset") + + otherCPUs := cpus.Difference(cpusUsed) + // Verify cpus not assigned to the pod have default power settings. + checkCPUGovernorsAndResumeLatency(otherCPUs.List(), "0", "performance") + + By("deleting the test pod") + _, err = testpod.DeleteAndWait(tsparams.PowerSaveTimeout) + Expect(err).ToNot(HaveOccurred(), "Failed to delete test pod") + + By("verifying after the test pod was deleted cpus assigned to container have default powersave settings") + checkCPUGovernorsAndResumeLatency(targetCpus, "0", "performance") + }) + + When("collecting power usage metrics", Ordered, func() { var ( samplingInterval time.Duration powerState string @@ -206,7 +206,7 @@ var _ = Describe("Per-core runtime power states tuning", Label(tsparams.LabelPow Expect(err).ToNot(HaveOccurred(), "Failed to get power state for the performance profile") }) - It("Checks power usage for 'noworkload' scenario", func() { + It("checks power usage for 'noworkload' scenario", func() { duration, err := time.ParseDuration(RANConfig.NoWorkloadDuration) Expect(err).ToNot(HaveOccurred(), "Failed to parse no workload duration") @@ -219,7 +219,7 @@ var _ = Describe("Per-core runtime power states tuning", Label(tsparams.LabelPow } }) - It("Checks power usage for 'steadyworkload' scenario", func() { + It("checks power usage for 'steadyworkload' scenario", func() { duration, err := time.ParseDuration(RANConfig.WorkloadDuration) Expect(err).ToNot(HaveOccurred(), "Failed to parse steady workload duration") diff --git a/tests/cnf/ran/talm/talm_suite_test.go b/tests/cnf/ran/talm/talm_suite_test.go index c1f0fcecd..c685c2b53 100644 --- a/tests/cnf/ran/talm/talm_suite_test.go +++ b/tests/cnf/ran/talm/talm_suite_test.go @@ -39,6 +39,7 @@ var _ = BeforeSuite(func() { var _ = AfterSuite(func() { // Deleting the namespace after the suite finishes ensures all the CGUs created are deleted + By("deleting TALM test namespace to ensure test suite is cleaned up") err := setup.DeleteTalmTestNamespace() Expect(err).ToNot(HaveOccurred(), "Failed to delete TALM test namespace") }) diff --git a/tests/cnf/ran/talm/tests/talm-backup.go b/tests/cnf/ran/talm/tests/talm-backup.go index 60c8e7cd3..07a5285f4 100644 --- a/tests/cnf/ran/talm/tests/talm-backup.go +++ b/tests/cnf/ran/talm/tests/talm-backup.go @@ -27,20 +27,20 @@ var _ = Describe("TALM backup tests", Label(tsparams.LabelBackupTestCases), func ) BeforeEach(func() { - By("checking that the talm version is at least 4.11") + By("checking that the TALM version is at least 4.11") versionInRange, err := version.IsVersionStringInRange(RANConfig.HubOperatorVersions[ranparam.TALM], "4.11", "") - Expect(err).ToNot(HaveOccurred(), "Failed to compared talm version string") + Expect(err).ToNot(HaveOccurred(), "Failed to compared TALM version string") if !versionInRange { - Skip("backup tests require talm 4.11 or higher") + Skip("backup tests require TALM 4.11 or higher") } - By("checking that the talm version is at most 4.15") + By("checking that the TALM version is at most 4.15") versionInRange, err = version.IsVersionStringInRange(RANConfig.HubOperatorVersions[ranparam.TALM], "", "4.15") - Expect(err).ToNot(HaveOccurred(), "Failed to compare talm version string") + Expect(err).ToNot(HaveOccurred(), "Failed to compare TALM version string") if !versionInRange { - Skip("backup tests are deprecated for talm 4.16 and higher") + Skip("backup tests are deprecated for TALM 4.16 and higher") } }) @@ -61,7 +61,7 @@ var _ = Describe("TALM backup tests", Label(tsparams.LabelBackupTestCases), func Expect(errorList).To(BeEmpty(), "Failed to clean up test resources on spoke 1") }) - Context("with full disk for spoke1", func() { + When("spoke 1 has a full disk", func() { BeforeEach(func() { By("setting up filesystem to simulate low space") loopbackDevicePath, err = mount.PrepareEnvWithSmallMountPoint(Spoke1APIClient) @@ -75,7 +75,7 @@ var _ = Describe("TALM backup tests", Label(tsparams.LabelBackupTestCases), func }) // 50835 - Insufficient Backup Partition Size - It("should have a failed cgu for single spoke", reportxml.ID("50835"), func() { + It("has a failed cgu for spoke 1", reportxml.ID("50835"), func() { By("applying all the required CRs for backup") cguBuilder := cgu.NewCguBuilder(HubAPIClient, tsparams.CguName, tsparams.TestNamespace, 1). WithCluster(RANConfig.Spoke1Name). @@ -90,14 +90,14 @@ var _ = Describe("TALM backup tests", Label(tsparams.LabelBackupTestCases), func }) }) - Context("with CGU disabled", func() { + When("the CGU is disabled", func() { BeforeEach(func() { - By("checking that the talm version is at least 4.12") + By("checking that the TALM version is at least 4.12") versionInRange, err := version.IsVersionStringInRange(RANConfig.HubOperatorVersions[ranparam.TALM], "4.12", "") - Expect(err).ToNot(HaveOccurred(), "Failed to compare talm version string") + Expect(err).ToNot(HaveOccurred(), "Failed to compare TALM version string") if !versionInRange { - Skip("CGU disabled requires talm 4.12 or higher") + Skip("CGU disabled requires TALM 4.12 or higher") } }) @@ -162,7 +162,7 @@ var _ = Describe("TALM backup tests", Label(tsparams.LabelBackupTestCases), func }) // 74752 Unblock Backup in Batch OCP Upgrade - It("should not affect backup on second spoke in same batch", reportxml.ID("74752"), func() { + It("successfully backs up on second spoke in same batch", reportxml.ID("74752"), func() { By("applying all the required CRs for backup") // max concurrency of 2 so both spokes are in the same batch cguBuilder := cgu.NewCguBuilder(HubAPIClient, tsparams.CguName, tsparams.TestNamespace, 2). @@ -192,19 +192,19 @@ func assertBackupStatus(spokeName, expected string) { "Failed to pull cgu %s in namespace %s", tsparams.CguName, tsparams.TestNamespace) if cguBuilder.Object.Status.Backup == nil { - glog.V(tsparams.LogLevel).Info("backup struct not ready yet") + glog.V(tsparams.LogLevel).Info("Backup struct not ready yet") return "" } _, ok := cguBuilder.Object.Status.Backup.Status[spokeName] if !ok { - glog.V(tsparams.LogLevel).Info("cluster name as key did not appear yet") + glog.V(tsparams.LogLevel).Info("Cluster name as key did not appear yet") return "" } - glog.V(tsparams.LogLevel).Infof("[%s] %s backup status: %s\n", cguBuilder.Object.Name, spokeName, + glog.V(tsparams.LogLevel).Infof("[%s] %s backup status: %s", cguBuilder.Object.Name, spokeName, cguBuilder.Object.Status.Backup.Status[spokeName]) return cguBuilder.Object.Status.Backup.Status[spokeName] diff --git a/tests/cnf/ran/talm/tests/talm-batching.go b/tests/cnf/ran/talm/tests/talm-batching.go index 9686f4ccf..c7917d8b3 100644 --- a/tests/cnf/ran/talm/tests/talm-batching.go +++ b/tests/cnf/ran/talm/tests/talm-batching.go @@ -54,7 +54,7 @@ var _ = Describe("TALM Batching Tests", Label(tsparams.LabelBatchingTestCases), When("a single spoke is missing", Label(tsparams.LabelMissingSpokeTestCases), func() { // 47949 - Tests selected clusters must be non-compliant AND included in CGU. - It("should report a missing spoke", reportxml.ID("47949"), func() { + It("reports a missing spoke", reportxml.ID("47949"), func() { By("creating the CGU with non-existent cluster and policy") cguBuilder := cgu.NewCguBuilder(HubAPIClient, tsparams.CguName, tsparams.TestNamespace, 1). WithCluster(tsparams.NonExistentClusterName). @@ -72,7 +72,7 @@ var _ = Describe("TALM Batching Tests", Label(tsparams.LabelBatchingTestCases), When("a policy is missing", Label(tsparams.LabelMissingPolicyTestCases), func() { // 47955 - Tests upgrade rejected due to specified managed policies missing - It("should report the missing policy", reportxml.ID("47955"), func() { + It("reports the missing policy", reportxml.ID("47955"), func() { By("create and enable a CGU with a managed policy that does not exist") cguBuilder := cgu.NewCguBuilder(HubAPIClient, tsparams.CguName, tsparams.TestNamespace, 1). WithCluster(RANConfig.Spoke1Name). @@ -91,7 +91,7 @@ var _ = Describe("TALM Batching Tests", Label(tsparams.LabelBatchingTestCases), When("there is a catalog source", Label(tsparams.LabelCatalogSourceTestCases), func() { // 47952 - Tests upgrade failure of one cluster would not affect other clusters - It("should abort CGU when the first batch fails with the Abort batch timeout action", reportxml.ID("47952"), func() { + It("aborts CGU when the first batch fails with the Abort batch timeout action", reportxml.ID("47952"), func() { By("verifying the temporary namespace does not exist on spoke1") tempExistsOnSpoke1 := namespace.NewBuilder(Spoke1APIClient, tsparams.TemporaryNamespace).Exists() Expect(tempExistsOnSpoke1).To(BeFalse(), "Temporary namespace already exists on spoke 1") @@ -153,7 +153,7 @@ var _ = Describe("TALM Batching Tests", Label(tsparams.LabelBatchingTestCases), }) // 47952 - Tests upgrade failure of one cluster would not affect other clusters - It("should report the failed spoke when one spoke in a batch times out", reportxml.ID("47952"), func() { + It("reports the failed spoke when one spoke in a batch times out", reportxml.ID("47952"), func() { By("verifying the temporary namespace does not exist on spoke2") tempExistsOnSpoke2 := namespace.NewBuilder(Spoke2APIClient, tsparams.TemporaryNamespace).Exists() Expect(tempExistsOnSpoke2).To(BeFalse(), "Temporary namespace already exists on spoke 2") @@ -194,7 +194,7 @@ var _ = Describe("TALM Batching Tests", Label(tsparams.LabelBatchingTestCases), }) // 74753 upgrade failure of first batch would not affect second batch - It("should continue the CGU when the first batch fails with the Continue batch timeout"+ + It("continues the CGU when the first batch fails with the Continue batch timeout "+ "action", reportxml.ID("74753"), func() { By("verifying the temporary namespace does not exist on spoke1") tempExistsOnSpoke1 := namespace.NewBuilder(Spoke1APIClient, tsparams.TemporaryNamespace).Exists() @@ -236,70 +236,70 @@ var _ = Describe("TALM Batching Tests", Label(tsparams.LabelBatchingTestCases), }) // 54296 - Batch Timeout Calculation - It("should continue the CGU when the second batch fails with the Continue batch timeout action", - reportxml.ID("54296"), func() { - By("verifying the temporary namespace does not exist on spoke2") - tempExistsOnSpoke2 := namespace.NewBuilder(Spoke2APIClient, tsparams.TemporaryNamespace).Exists() - Expect(tempExistsOnSpoke2).To(BeFalse(), "Temporary namespace already exists on spoke 2") - - By("creating the temporary namespace on spoke1 only") - _, err = namespace.NewBuilder(Spoke1APIClient, tsparams.TemporaryNamespace).Create() - Expect(err).ToNot(HaveOccurred(), "Failed to create temporary namespace on spoke 1") - - expectedTimeout := 16 - - By("creating the CGU and associated resources") - // Max concurrency of one to ensure two batches are used. - cguBuilder := cgu.NewCguBuilder(HubAPIClient, tsparams.CguName, tsparams.TestNamespace, 1). - WithCluster(RANConfig.Spoke1Name). - WithCluster(RANConfig.Spoke2Name). - WithManagedPolicy(tsparams.PolicyName) - cguBuilder.Definition.Spec.RemediationStrategy.Timeout = expectedTimeout - cguBuilder.Definition.Spec.Enable = ptr.To(false) - - cguBuilder, err = helper.SetupCguWithCatSrc(cguBuilder) - Expect(err).ToNot(HaveOccurred(), "Failed to setup CGU") - - By("waiting to enable the CGU") - cguBuilder, err = helper.WaitToEnableCgu(cguBuilder) - Expect(err).ToNot(HaveOccurred(), "Failed to wait and enable the CGU") - - By("waiting for the CGU to timeout") - cguBuilder, err = cguBuilder.WaitForCondition(tsparams.CguTimeoutReasonCondition, 21*time.Minute) - Expect(err).ToNot(HaveOccurred(), "Failed to wait for CGU to timeout") - - By("validating that the policy succeeded on spoke1") - catSrcExistsOnSpoke1 := olm.NewCatalogSourceBuilder( - Spoke1APIClient, tsparams.CatalogSourceName, tsparams.TemporaryNamespace).Exists() - Expect(catSrcExistsOnSpoke1).To(BeTrue(), "Catalog source doesn't exist on spoke 1") - - By("validating that the policy failed on spoke2") - catSrcExistsOnSpoke2 := olm.NewCatalogSourceBuilder( - Spoke2APIClient, tsparams.CatalogSourceName, tsparams.TemporaryNamespace).Exists() - Expect(catSrcExistsOnSpoke2).To(BeFalse(), "Catalog source exists on spoke 2") - - By("validating that CGU timeout is recalculated for later batches after earlier batches complete") - startTime := cguBuilder.Object.Status.Status.StartedAt.Time - - // endTime may be zero even after timeout so just use now instead. - endTime := cguBuilder.Object.Status.Status.CompletedAt.Time - if endTime.IsZero() { - endTime = time.Now() - } + It("continues the CGU when the second batch fails with the Continue batch timeout "+ + "action", reportxml.ID("54296"), func() { + By("verifying the temporary namespace does not exist on spoke2") + tempExistsOnSpoke2 := namespace.NewBuilder(Spoke2APIClient, tsparams.TemporaryNamespace).Exists() + Expect(tempExistsOnSpoke2).To(BeFalse(), "Temporary namespace already exists on spoke 2") + + By("creating the temporary namespace on spoke1 only") + _, err = namespace.NewBuilder(Spoke1APIClient, tsparams.TemporaryNamespace).Create() + Expect(err).ToNot(HaveOccurred(), "Failed to create temporary namespace on spoke 1") + + expectedTimeout := 16 + + By("creating the CGU and associated resources") + // Max concurrency of one to ensure two batches are used. + cguBuilder := cgu.NewCguBuilder(HubAPIClient, tsparams.CguName, tsparams.TestNamespace, 1). + WithCluster(RANConfig.Spoke1Name). + WithCluster(RANConfig.Spoke2Name). + WithManagedPolicy(tsparams.PolicyName) + cguBuilder.Definition.Spec.RemediationStrategy.Timeout = expectedTimeout + cguBuilder.Definition.Spec.Enable = ptr.To(false) + + cguBuilder, err = helper.SetupCguWithCatSrc(cguBuilder) + Expect(err).ToNot(HaveOccurred(), "Failed to setup CGU") + + By("waiting to enable the CGU") + cguBuilder, err = helper.WaitToEnableCgu(cguBuilder) + Expect(err).ToNot(HaveOccurred(), "Failed to wait and enable the CGU") - elapsed := endTime.Sub(startTime) - glog.V(tsparams.LogLevel).Infof("start time: %v, end time: %v, elapsed: %v", startTime, endTime, elapsed) - // We expect that the total runtime should be about equal to the expected timeout. In - // particular, we expect it to be +/- one reconcile loop time (5 minutes). The first - // batch will complete successfully, so the second should use the entire remaining - // expected timout. - Expect(elapsed).To(BeNumerically("~", expectedTimeout*int(time.Minute), tsparams.TalmDefaultReconcileTime)) - }) + By("waiting for the CGU to timeout") + cguBuilder, err = cguBuilder.WaitForCondition(tsparams.CguTimeoutReasonCondition, 21*time.Minute) + Expect(err).ToNot(HaveOccurred(), "Failed to wait for CGU to timeout") + + By("validating that the policy succeeded on spoke1") + catSrcExistsOnSpoke1 := olm.NewCatalogSourceBuilder( + Spoke1APIClient, tsparams.CatalogSourceName, tsparams.TemporaryNamespace).Exists() + Expect(catSrcExistsOnSpoke1).To(BeTrue(), "Catalog source doesn't exist on spoke 1") + + By("validating that the policy failed on spoke2") + catSrcExistsOnSpoke2 := olm.NewCatalogSourceBuilder( + Spoke2APIClient, tsparams.CatalogSourceName, tsparams.TemporaryNamespace).Exists() + Expect(catSrcExistsOnSpoke2).To(BeFalse(), "Catalog source exists on spoke 2") + + By("validating that CGU timeout is recalculated for later batches after earlier batches complete") + startTime := cguBuilder.Object.Status.Status.StartedAt.Time + + // endTime may be zero even after timeout so just use now instead. + endTime := cguBuilder.Object.Status.Status.CompletedAt.Time + if endTime.IsZero() { + endTime = time.Now() + } + + elapsed := endTime.Sub(startTime) + glog.V(tsparams.LogLevel).Infof("start time: %v, end time: %v, elapsed: %v", startTime, endTime, elapsed) + // We expect that the total runtime should be about equal to the expected timeout. In + // particular, we expect it to be +/- one reconcile loop time (5 minutes). The first + // batch will complete successfully, so the second should use the entire remaining + // expected timout. + Expect(elapsed).To(BeNumerically("~", expectedTimeout*int(time.Minute), tsparams.TalmDefaultReconcileTime)) + }) }) When("there is a temporary namespace", Label(tsparams.LabelTempNamespaceTestCases), func() { // 47954 - Tests upgrade aborted due to short timeout. - It("should report the timeout value when one cluster is in a batch and it times out", reportxml.ID("47954"), func() { + It("reports the timeout value when one cluster is in a batch and it times out", reportxml.ID("47954"), func() { By("verifying the temporary namespace does not exist on spoke1") tempExistsOnSpoke1 := namespace.NewBuilder(Spoke1APIClient, tsparams.TemporaryNamespace).Exists() Expect(tempExistsOnSpoke1).To(BeFalse(), "Temporary namespace already exists on spoke 1") @@ -356,7 +356,7 @@ var _ = Describe("TALM Batching Tests", Label(tsparams.LabelBatchingTestCases), }) // 47947 - Tests successful ocp and operator upgrade with canaries and multiple batches. - It("should complete the CGU when two clusters are successful in a single batch", reportxml.ID("47947"), func() { + It("completes the CGU when two clusters are successful in a single batch", reportxml.ID("47947"), func() { By("creating the CGU and associated resources") cguBuilder := cgu.NewCguBuilder(HubAPIClient, tsparams.CguName, tsparams.TestNamespace, 1). WithManagedPolicy(tsparams.PolicyName) diff --git a/tests/cnf/ran/talm/tests/talm-blockingcr.go b/tests/cnf/ran/talm/tests/talm-blockingcr.go index f3c72876d..3a8bb0eae 100644 --- a/tests/cnf/ran/talm/tests/talm-blockingcr.go +++ b/tests/cnf/ran/talm/tests/talm-blockingcr.go @@ -39,14 +39,14 @@ var _ = Describe("TALM Blocking CRs Tests", Label(tsparams.LabelBlockingCRTestCa }) AfterEach(func() { - By("Cleaning up test resources on hub") + By("cleaning up test resources on hub") errList := setup.CleanupTestResourcesOnHub(HubAPIClient, tsparams.TestNamespace, blockingA) Expect(errList).To(BeEmpty(), "Failed to cleanup resources for blocking A on hub") errList = setup.CleanupTestResourcesOnHub(HubAPIClient, tsparams.TestNamespace, blockingB) Expect(errList).To(BeEmpty(), "Failed to cleanup resources for blocking B on hub") - By("Deleting test namespaces on spoke 1") + By("deleting test namespaces on spoke 1") err := namespace.NewBuilder(Spoke1APIClient, tsparams.TemporaryNamespace+blockingA). DeleteAndWait(5 * time.Minute) Expect(err).ToNot(HaveOccurred(), "Failed to delete namespace for blocking A on spoke 1") @@ -75,16 +75,16 @@ var _ = Describe("TALM Blocking CRs Tests", Label(tsparams.LabelBlockingCRTestCa cguA, cguB = waitToEnableCgus(cguA, cguB) - By("Waiting to verify if CGU B is blocked by A") + By("waiting to verify if CGU B is blocked by A") blockedMessage := fmt.Sprintf(tsparams.TalmBlockedMessage, tsparams.CguName+blockingA) err = helper.WaitForCguBlocked(cguB, blockedMessage) Expect(err).ToNot(HaveOccurred(), "Failed to wait for CGU B to be blocked") - By("Waiting for CGU A to succeed") + By("waiting for CGU A to succeed") _, err = cguA.WaitForCondition(tsparams.CguSuccessfulFinishCondition, 12*time.Minute) Expect(err).ToNot(HaveOccurred(), "Failed to wait for CGU A to succeed") - By("Waiting for CGU B to succeed") + By("waiting for CGU B to succeed") _, err = cguB.WaitForCondition(tsparams.CguSuccessfulFinishCondition, 17*time.Minute) Expect(err).ToNot(HaveOccurred(), "Failed to wait for CGU B to succeed") }) @@ -101,7 +101,7 @@ var _ = Describe("TALM Blocking CRs Tests", Label(tsparams.LabelBlockingCRTestCa Namespace: tsparams.TestNamespace, }} - By("Setting up CGU A with a faulty namespace") + By("setting up CGU A with a faulty namespace") tempNs := namespace.NewBuilder(HubAPIClient, tsparams.TemporaryNamespace+blockingA) tempNs.Definition.Kind = "faulty namespace" @@ -115,7 +115,7 @@ var _ = Describe("TALM Blocking CRs Tests", Label(tsparams.LabelBlockingCRTestCa cguA, err = cguA.Create() Expect(err).ToNot(HaveOccurred(), "Failed to create CGU") - By("Setting up CGU B correctly") + By("setting up CGU B correctly") cguB, err = helper.SetupCguWithNamespace(cguB, blockingB) Expect(err).ToNot(HaveOccurred(), "Failed to setup CGU B") @@ -123,15 +123,15 @@ var _ = Describe("TALM Blocking CRs Tests", Label(tsparams.LabelBlockingCRTestCa blockedMessage := fmt.Sprintf(tsparams.TalmBlockedMessage, tsparams.CguName+blockingA) - By("Waiting to verify if CGU B is blocked by A") + By("waiting to verify if CGU B is blocked by A") err = helper.WaitForCguBlocked(cguB, blockedMessage) Expect(err).ToNot(HaveOccurred(), "Failed to wait for CGU B to be blocked") - By("Waiting for CGU A to fail because of timeout") + By("waiting for CGU A to fail because of timeout") _, err = cguA.WaitForCondition(tsparams.CguTimeoutMessageCondition, 7*time.Minute) Expect(err).ToNot(HaveOccurred(), "Failed to wait for CGU A to fail") - By("Verifiying that CGU B is still blocked") + By("verifiying that CGU B is still blocked") err = helper.WaitForCguBlocked(cguB, blockedMessage) Expect(err).ToNot(HaveOccurred(), "Failed to verify that CGU B is still blocked") }) @@ -148,37 +148,37 @@ var _ = Describe("TALM Blocking CRs Tests", Label(tsparams.LabelBlockingCRTestCa Namespace: tsparams.TestNamespace, }} - By("Setting up CGU B") + By("setting up CGU B") cguB, err = helper.SetupCguWithNamespace(cguB, blockingB) Expect(err).ToNot(HaveOccurred(), "Failed to setup CGU B") - By("Waiting for the system to settle") + By("waiting for the system to settle") time.Sleep(tsparams.TalmSystemStablizationTime) - By("Enabling CGU B") + By("enabling CGU B") cguB.Definition.Spec.Enable = ptr.To(true) cguB, err = cguB.Update(true) Expect(err).ToNot(HaveOccurred(), "Failed to enable CGU B") - By("Waiting to verify if CGU B is blocked by A because it's missing") + By("waiting to verify if CGU B is blocked by A because it's missing") blockedMessage := fmt.Sprintf(tsparams.TalmMissingCRMessage, tsparams.CguName+blockingA) err = helper.WaitForCguBlocked(cguB, blockedMessage) Expect(err).ToNot(HaveOccurred(), "Failed to wait for CGU B to be blocked") - By("Setting up CGU A") + By("setting up CGU A") cguA, err = helper.SetupCguWithNamespace(cguA, blockingA) Expect(err).ToNot(HaveOccurred(), "Failed to setup CGU A") - By("Enabling CGU A") + By("enabling CGU A") cguA.Definition.Spec.Enable = ptr.To(true) cguA, err = cguA.Update(true) Expect(err).ToNot(HaveOccurred(), "Failed to enable CGU A") - By("Waiting for CGU A to succeed") + By("waiting for CGU A to succeed") _, err = cguA.WaitForCondition(tsparams.CguSucceededCondition, 12*time.Minute) Expect(err).ToNot(HaveOccurred(), "Failed to wait for CGU A to succeed") - By("Waiting for CGU B to succeed") + By("waiting for CGU B to succeed") _, err = cguB.WaitForCondition(tsparams.CguSucceededCondition, 17*time.Minute) Expect(err).ToNot(HaveOccurred(), "Failed to wait for CGU B to succeed") }) @@ -196,15 +196,13 @@ func getBlockingCGU(suffix string, timeout int) *cgu.CguBuilder { } func waitToEnableCgus(cguA *cgu.CguBuilder, cguB *cgu.CguBuilder) (*cgu.CguBuilder, *cgu.CguBuilder) { - var err error - - By("Waiting for the system to settle") + By("waiting for the system to settle") time.Sleep(tsparams.TalmSystemStablizationTime) - By("Enabling CGU A and CGU B") + By("enabling CGU A and CGU B") cguA.Definition.Spec.Enable = ptr.To(true) - cguA, err = cguA.Update(true) + cguA, err := cguA.Update(true) Expect(err).ToNot(HaveOccurred(), "Failed to enable CGU A") cguB.Definition.Spec.Enable = ptr.To(true) diff --git a/tests/cnf/ran/talm/tests/talm-canary.go b/tests/cnf/ran/talm/tests/talm-canary.go index 0958ab87c..32c8edd02 100644 --- a/tests/cnf/ran/talm/tests/talm-canary.go +++ b/tests/cnf/ran/talm/tests/talm-canary.go @@ -18,8 +18,6 @@ import ( ) var _ = Describe("TALM Canary Tests", Label(tsparams.LabelCanaryTestCases), func() { - var err error - BeforeEach(func() { By("checking that hub and two spokes are present") Expect(rancluster.AreClustersPresent([]*clients.Settings{HubAPIClient, Spoke1APIClient, Spoke2APIClient})). @@ -38,7 +36,7 @@ var _ = Describe("TALM Canary Tests", Label(tsparams.LabelCanaryTestCases), func }) // 47954 - Tests upgrade aborted due to short timeout. - It("should stop the CGU where first canary fails", reportxml.ID("47954"), func() { + It("stops the CGU when first canary fails", reportxml.ID("47954"), func() { var err error By("verifying the temporary namespace does not exist on spoke 1 and 2") @@ -84,7 +82,9 @@ var _ = Describe("TALM Canary Tests", Label(tsparams.LabelCanaryTestCases), func }) // 47947 - Tests successful ocp and operator upgrade with canaries and multiple batches. - It("should complete the CGU where all canaries are successful", reportxml.ID("47947"), func() { + It("completes the CGU when all canaries are successful", reportxml.ID("47947"), func() { + var err error + By("creating the CGU and associated resources") cguBuilder := cgu.NewCguBuilder(HubAPIClient, tsparams.CguName, tsparams.TestNamespace, 1). WithCluster(RANConfig.Spoke1Name). @@ -99,7 +99,7 @@ var _ = Describe("TALM Canary Tests", Label(tsparams.LabelCanaryTestCases), func cguBuilder, err = cguBuilder.WaitUntilClusterInProgress(RANConfig.Spoke2Name, 3*tsparams.TalmDefaultReconcileTime) Expect(err).ToNot(HaveOccurred(), "Failed to wait for batch remediation for spoke 2 to be in progress") - By("Making sure the non-canary cluster (spoke 1) has not started yet") + By("making sure the non-canary cluster (spoke 1) has not started yet") progress, ok := cguBuilder.Object.Status.Status.CurrentBatchRemediationProgress[RANConfig.Spoke1Name] if ok { Expect(progress.State).ToNot(Equal("InProgress"), "Batch remediation for non-canary cluster has already started") diff --git a/tests/cnf/ran/talm/tests/talm-precache.go b/tests/cnf/ran/talm/tests/talm-precache.go index ff5c7bcfb..8d528ba34 100644 --- a/tests/cnf/ran/talm/tests/talm-precache.go +++ b/tests/cnf/ran/talm/tests/talm-precache.go @@ -420,7 +420,7 @@ var _ = Describe("TALM precache", Label(tsparams.LabelPreCacheTestCases), func() Expect(err).ToNot(HaveOccurred(), "Failed to wait for all spoke 1 pods to be ready") }) - Context("precaching with one managed cluster powered off and unavailable", func() { + When("precaching with one managed cluster powered off and unavailable", func() { AfterEach(func() { By("cleaning up resources on hub") errorList := setup.CleanupTestResourcesOnHub(HubAPIClient, tsparams.TestNamespace, "") @@ -459,7 +459,7 @@ var _ = Describe("TALM precache", Label(tsparams.LabelPreCacheTestCases), func() }) }) - Context("batching with one managed cluster powered off and unavailable", Ordered, func() { + When("batching with one managed cluster powered off and unavailable", Ordered, func() { var cguBuilder *cgu.CguBuilder BeforeAll(func() { @@ -568,7 +568,7 @@ func assertPrecacheStatus(spokeName, expected string) { cguBuilder.Object.Name, spokeName, cguBuilder.Object.Status.Precaching.Status[spokeName]) return cguBuilder.Object.Status.Precaching.Status[spokeName] - }, 20*time.Minute, 15*time.Second).Should(Equal(expected)) + }).WithTimeout(20 * time.Minute).WithPolling(15 * time.Second).Should(Equal(expected)) } // checkPrecachePodLog checks that the pre cache pod has a log that says the pre cache is done. @@ -585,7 +585,7 @@ func checkPrecachePodLog(client *clients.Settings) error { } if len(podList) == 0 { - glog.V(tsparams.LogLevel).Info("precache pod does not exist on spoke - skip pod log check.") + glog.V(tsparams.LogLevel).Info("Precache pod does not exist on spoke - skip pod log check.") return true, nil } @@ -603,7 +603,7 @@ func checkPrecachePodLog(client *clients.Settings) error { }) if err != nil && plog != "" { - glog.V(tsparams.LogLevel).Infof("generated pod logs: ", plog) + glog.V(tsparams.LogLevel).Infof("Generated pod logs: ", plog) } return err @@ -651,7 +651,7 @@ func copyPoliciesWithSubscription(policies []*ocm.PolicyBuilder) ([]string, []st for index, policy := range policies { glog.V(tsparams.LogLevel).Infof( - "checking for subscriptions on policy %s in namespace %s", policy.Definition.Name, policy.Definition.Namespace) + "Checking for subscriptions on policy %s in namespace %s", policy.Definition.Name, policy.Definition.Namespace) template := policy.Object.Spec.PolicyTemplates[0] configPolicy, err := ranhelper.UnmarshalRaw[configurationPolicyv1.ConfigurationPolicy](template.ObjectDefinition.Raw)