diff --git a/Makefile b/Makefile index 2f49798fe6a..65423a0d506 100644 --- a/Makefile +++ b/Makefile @@ -371,7 +371,7 @@ deploy-olm: operator-sdk ## Build current branch operator image, bundle image, p IMG=$(THIS_OPERATOR_IMAGE) BUNDLE_IMG=$(THIS_BUNDLE_IMAGE) \ make docker-build docker-push bundle bundle-build bundle-push; \ rm -rf $(DEPLOY_TMP) - $(OPERATOR_SDK) run bundle $(THIS_BUNDLE_IMAGE) --namespace $(OADP_TEST_NAMESPACE) + operator-sdk run bundle $(THIS_BUNDLE_IMAGE) --namespace $(OADP_TEST_NAMESPACE) --index-image=quay.io/operator-framework/opm:v1.23.0 .PHONY: opm OPM = ./bin/opm @@ -425,6 +425,7 @@ sed -r "s/[&]* [!] $(CLUSTER_TYPE)|[!] $(CLUSTER_TYPE) [&]*//")) || $(CLUSTER_TY #TEST_FILTER := $(shell echo '! aws && ! gcp && ! azure' | sed -r "s/[&]* [!] $(CLUSTER_TYPE)|[!] $(CLUSTER_TYPE) [&]*//") SETTINGS_TMP=/tmp/test-settings +.PHONY: test-e2e-setup test-e2e-setup: mkdir -p $(SETTINGS_TMP) TARGET_CI_CRED_FILE="$(CI_CRED_FILE)" AZURE_RESOURCE_FILE="$(AZURE_RESOURCE_FILE)" CI_JSON_CRED_FILE="$(AZURE_CI_JSON_CRED_FILE)" \ @@ -432,7 +433,8 @@ test-e2e-setup: PROVIDER="$(VELERO_PLUGIN)" BUCKET="$(OADP_BUCKET)" BSL_REGION="$(BSL_REGION)" SECRET="$(CREDS_SECRET_REF)" TMP_DIR=$(SETTINGS_TMP) \ VSL_REGION="$(VSL_REGION)" BSL_AWS_PROFILE="$(BSL_AWS_PROFILE)" /bin/bash "tests/e2e/scripts/$(CLUSTER_TYPE)_settings.sh" -test-e2e: test-e2e-setup ## execute the oadp integration tests +.PHONY: test-e2e-ginkgo +test-e2e-ginkgo: test-e2e-setup ginkgo run -mod=mod tests/e2e/ -- -credentials=$(OADP_CRED_FILE) \ -velero_namespace=$(OADP_TEST_NAMESPACE) \ -settings=$(SETTINGS_TMP)/oadpcreds \ @@ -445,5 +447,31 @@ test-e2e: test-e2e-setup ## execute the oadp integration tests -artifact_dir=$(ARTIFACT_DIR) \ -oc_cli=$(OC_CLI) -test-e2e-cleanup: +.PHONY: test-e2e +test-e2e: volsync-install test-e2e-ginkgo + +.PHONY: test-e2e-cleanup +test-e2e-cleanup: volsync-uninstall rm -rf $(SETTINGS_TMP) + +.PHONY: volsync-install +volsync-install: + $(eval VS_CURRENT_CSV:=$(shell oc get subscription volsync-product -n openshift-operators -ojsonpath='{.status.currentCSV}')) + # OperatorGroup not required, volsync is global operator which has operatorgroup already. + # Create subscription for operator if not installed. + @if [ "$(VS_CURRENT_CSV)" == "" ]; then \ + $(OC_CLI) replace --force -f tests/e2e/volsync/volsync-sub.yaml; \ + else \ + echo $(VS_CURRENT_CSV) already installed; \ + fi + +.PHONY: volsync-uninstall +volsync-uninstall: + $(eval VS_CURRENT_CSV:=$(shell oc get subscription volsync-product -n openshift-operators -ojsonpath='{.status.currentCSV}')) + @if [ "$(VS_CURRENT_CSV)" != "" ]; then \ + echo "Uninstalling $(VS_CURRENT_CSV)"; \ + $(OC_CLI) delete subscription volsync-product -n openshift-operators && \ + $(OC_CLI) delete csv $(VS_CURRENT_CSV) -n openshift-operators; \ + else \ + echo No subscription found, skipping uninstall; \ + fi diff --git a/go.mod b/go.mod index 52244cf9a4a..d3196c4c4c8 100644 --- a/go.mod +++ b/go.mod @@ -24,13 +24,20 @@ require ( sigs.k8s.io/controller-runtime v0.12.1 ) -require github.com/google/go-cmp v0.5.9 +require ( + github.com/backube/volsync v0.4.0 + github.com/google/go-cmp v0.5.9 +) require ( cloud.google.com/go v0.100.2 // indirect cloud.google.com/go/compute v1.5.0 // indirect cloud.google.com/go/iam v0.1.1 // indirect cloud.google.com/go/storage v1.21.0 // indirect + gopkg.in/ini.v1 v1.66.2 // indirect +) + +require ( github.com/Azure/azure-sdk-for-go v61.4.0+incompatible // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3 // indirect @@ -70,7 +77,7 @@ require ( github.com/google/gnostic v0.6.9 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/googleapis/gax-go/v2 v2.2.0 // indirect - github.com/hashicorp/go-hclog v0.14.1 // indirect + github.com/hashicorp/go-hclog v1.0.0 // indirect github.com/hashicorp/go-plugin v1.4.3 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb // indirect @@ -83,6 +90,7 @@ require ( github.com/klauspost/compress v1.15.1 // indirect github.com/klauspost/cpuid/v2 v2.0.12 // indirect github.com/klauspost/pgzip v1.2.5 // indirect + github.com/konveyor/volume-snapshot-mover v0.0.0-20220826143717-ce62cb34467a github.com/kopia/kopia v0.10.7 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/mailru/easyjson v0.7.7 // indirect @@ -133,7 +141,6 @@ require ( google.golang.org/grpc v1.45.0 // indirect google.golang.org/protobuf v1.28.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/ini.v1 v1.66.2 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 4f4bb79a1ea..73c4d10e5c6 100644 --- a/go.sum +++ b/go.sum @@ -141,6 +141,8 @@ github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:l github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.43.31 h1:yJZIr8nMV1hXjAvvOLUFqZRJcHV7udPQBfhJqawDzI0= github.com/aws/aws-sdk-go v1.43.31/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/backube/volsync v0.4.0 h1:hyZgzCrDm0O40CeyU7cCZ3nzTvPZvN1lAyuHyUHRNNw= +github.com/backube/volsync v0.4.0/go.mod h1:F5RD2AxYwORN2TdgylgDL2gOE4ra6vESWA30LTPHO+Q= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= @@ -484,8 +486,9 @@ github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBt github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-hclog v0.14.1 h1:nQcJDQwIAGnmoUWp8ubocEX40cCml/17YkF6csQLReU= github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.0.0 h1:bkKf0BeBXcSYa7f5Fyi9gMuQ8gNsxeiNpZjR6VxNZeo= +github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= @@ -557,6 +560,8 @@ github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konveyor/volume-snapshot-mover v0.0.0-20220826143717-ce62cb34467a h1:9I1TmLcXEjU66u8IrdOSiExlXgQscCnRkYkKBQauqao= +github.com/konveyor/volume-snapshot-mover v0.0.0-20220826143717-ce62cb34467a/go.mod h1:ykj3RTgJyA5tsL2n4fRlk1LS2ln7wluHsryE+lMussY= github.com/kopia/kopia v0.10.7 h1:6s0ZIZW3Ge2ozzefddASy7CIUadp/5tF9yCDKQfAKKI= github.com/kopia/kopia v0.10.7/go.mod h1:0d9THPD+jwomPcXvPbCdmLyX6phQVP7AqcCcDEajfNA= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= diff --git a/tests/e2e/backup_restore_suite_test.go b/tests/e2e/backup_restore_suite_test.go index 472ea921180..491fe7b9ba5 100755 --- a/tests/e2e/backup_restore_suite_test.go +++ b/tests/e2e/backup_restore_suite_test.go @@ -7,10 +7,14 @@ import ( "log" "time" + volsync "github.com/backube/volsync/api/v1alpha1" "github.com/google/uuid" + vsmv1alpha1 "github.com/konveyor/volume-snapshot-mover/api/v1alpha1" . "github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2/types" . "github.com/onsi/gomega" + "github.com/openshift/oadp-operator/controllers" + "github.com/openshift/oadp-operator/pkg/common" . "github.com/openshift/oadp-operator/tests/e2e/lib" corev1 "k8s.io/api/core/v1" k8serror "k8s.io/apimachinery/pkg/api/errors" @@ -20,6 +24,20 @@ import ( type VerificationFunction func(client.Client, string) error +type appVerificationFunction func(bool, BackupRestoreType) VerificationFunction + +func dataMoverReady(preBackupState bool, appVerificationFunction appVerificationFunction) VerificationFunction { + return VerificationFunction(func(ocClient client.Client, appNamespace string) error { + // check volsync subscription exists + Eventually(InstalledSubscriptionCSV(ocClient, "openshift-operators", "volsync-product"), timeoutMultiplier*time.Minute*10, time.Second*10).ShouldNot(Equal("")) + // check volsync controller is ready + fmt.Printf("waiting for volsync controller readiness") + Eventually(IsDeploymentReady(ocClient, common.VolSyncDeploymentNamespace, common.VolSyncDeploymentName), timeoutMultiplier*time.Minute*10, time.Second*10).Should(BeTrue()) + Eventually(IsDeploymentReady(ocClient, namespace, common.DataMover), timeoutMultiplier*time.Minute*10, time.Second*10).Should(BeTrue()) + return appVerificationFunction(preBackupState, CSIDataMover)(ocClient, appNamespace) + }) +} + func mongoready(preBackupState bool, backupRestoreType BackupRestoreType) VerificationFunction { return VerificationFunction(func(ocClient client.Client, namespace string) error { Eventually(IsDCReady(ocClient, namespace, "todolist"), timeoutMultiplier*time.Minute*10, time.Second*10).Should(BeTrue()) @@ -59,29 +77,85 @@ var _ = Describe("AWS backup restore tests", func() { dpaCR.Name = testSuiteInstanceName }) - var lastInstallingApplicationNamespace string + type BackupRestoreCase struct { + ApplicationTemplate string + ApplicationNamespace string + Name string + BackupRestoreType BackupRestoreType + PreBackupVerify VerificationFunction + PostRestoreVerify VerificationFunction + MaxK8SVersion *K8sVersion + MinK8SVersion *K8sVersion + } + + var lastBRCase BackupRestoreCase var lastInstallTime time.Time var _ = ReportAfterEach(func(report SpecReport) { - if report.State == types.SpecStateSkipped { + if report.State == types.SpecStateSkipped || report.State == types.SpecStatePending { // do not run if the test is skipped return } GinkgoWriter.Println("Report after each: state: ", report.State.String()) if report.Failed() { // print namespace error events for app namespace - if lastInstallingApplicationNamespace != "" { - PrintNamespaceEventsAfterTime(lastInstallingApplicationNamespace, lastInstallTime) + if lastBRCase.ApplicationNamespace != "" { + GinkgoWriter.Println("Printing app namespace events") + PrintNamespaceEventsAfterTime(lastBRCase.ApplicationNamespace, lastInstallTime) } - GinkgoWriter.Println("Printing velero deployment pod logs") + GinkgoWriter.Println("Printing oadp namespace events") + PrintNamespaceEventsAfterTime(namespace, lastInstallTime) + if lastBRCase.BackupRestoreType == CSIDataMover { + GinkgoWriter.Println("Printing volsync namespace events") + PrintNamespaceEventsAfterTime(common.VolSyncDeploymentNamespace, lastInstallTime) + + pvcList := vsmv1alpha1.VolumeSnapshotBackupList{} + err := dpaCR.Client.List(context.Background(), &pvcList, &client.ListOptions{Namespace: lastBRCase.ApplicationNamespace}) + Expect(err).NotTo(HaveOccurred()) + GinkgoWriter.Printf("PVC app ns list %v\n", pvcList) + err = dpaCR.Client.List(context.Background(), &pvcList, &client.ListOptions{Namespace: namespace}) + Expect(err).NotTo(HaveOccurred()) + GinkgoWriter.Printf("PVC oadp ns list %v\n", pvcList) + + vsbList := vsmv1alpha1.VolumeSnapshotBackupList{} + err = dpaCR.Client.List(context.Background(), &vsbList, &client.ListOptions{Namespace: lastBRCase.ApplicationNamespace}) + Expect(err).NotTo(HaveOccurred()) + GinkgoWriter.Printf("VSB list %v\n", vsbList) + + vsrList := vsmv1alpha1.VolumeSnapshotRestoreList{} + err = dpaCR.Client.List(context.Background(), &vsrList, &client.ListOptions{Namespace: namespace}) + Expect(err).NotTo(HaveOccurred()) + GinkgoWriter.Printf("VSR list %v\n", vsrList) + + replicationSource := volsync.ReplicationSourceList{} + err = dpaCR.Client.List(context.Background(), &replicationSource, &client.ListOptions{Namespace: namespace}) + Expect(err).NotTo(HaveOccurred()) + GinkgoWriter.Printf("ReplicationSource list %v", replicationSource) + + replicationDestination := volsync.ReplicationDestinationList{} + err = dpaCR.Client.List(context.Background(), &replicationDestination, &client.ListOptions{Namespace: namespace}) + Expect(err).NotTo(HaveOccurred()) + GinkgoWriter.Printf("ReplicationDestination list %v", replicationDestination) + + volsyncIsReady, _ := IsDeploymentReady(dpaCR.Client, common.VolSyncDeploymentNamespace, common.VolSyncDeploymentName)() + fmt.Printf("volsync controller is ready: %v", volsyncIsReady) + + vsmIsReady, _ := IsDeploymentReady(dpaCR.Client, namespace, common.DataMover)() + fmt.Printf("volume-snapshot-mover is ready: %v", vsmIsReady) + + GinkgoWriter.Println("Printing volume-snapshot-mover deployment pod logs") + GinkgoWriter.Print(GetDeploymentPodContainerLogs(namespace, common.DataMover, common.DataMoverControllerContainer)) + } + logs, err := GetVeleroContainerLogs(namespace) Expect(err).NotTo(HaveOccurred()) GinkgoWriter.Println(logs) GinkgoWriter.Println("End of velero deployment pod logs") } // remove app namespace if leftover (likely previously failed before reaching uninstall applications) to clear items such as PVCs which are immutable so that next test can create new ones + err := dpaCR.Client.Delete(context.Background(), &corev1.Namespace{ObjectMeta: v1.ObjectMeta{ - Name: lastInstallingApplicationNamespace, - Namespace: lastInstallingApplicationNamespace, + Name: lastBRCase.ApplicationNamespace, + Namespace: lastBRCase.ApplicationNamespace, }}, &client.DeleteOptions{}) if k8serror.IsNotFound(err) { err = nil @@ -91,29 +165,17 @@ var _ = Describe("AWS backup restore tests", func() { Expect(err).ToNot(HaveOccurred()) }) - type BackupRestoreCase struct { - ApplicationTemplate string - ApplicationNamespace string - Name string - BackupRestoreType BackupRestoreType - PreBackupVerify VerificationFunction - PostRestoreVerify VerificationFunction - MaxK8SVersion *K8sVersion - MinK8SVersion *K8sVersion - } - - updateLastInstallingNamespace := func(namespace string) { - lastInstallingApplicationNamespace = namespace + updateLastInstallTime := func() { lastInstallTime = time.Now() } DescribeTable("backup and restore applications", func(brCase BackupRestoreCase, expectedErr error) { - if notVersionTarget, reason := NotServerVersionTarget(brCase.MinK8SVersion, brCase.MaxK8SVersion); notVersionTarget { - Skip(reason) + // Data Mover is only supported on aws and azure. + if brCase.BackupRestoreType == CSIDataMover && provider != "aws" && provider != "azure" { + Skip(provider + " unsupported data mover provider") } - - if provider == "azure" && brCase.BackupRestoreType == CSI { + if provider == "azure" && (brCase.BackupRestoreType == CSI || brCase.BackupRestoreType == CSIDataMover) { if brCase.MinK8SVersion == nil { brCase.MinK8SVersion = &K8sVersion{Major: "1", Minor: "23"} } @@ -122,10 +184,12 @@ var _ = Describe("AWS backup restore tests", func() { Skip(reason) } + lastBRCase = brCase + err := dpaCR.Build(brCase.BackupRestoreType) Expect(err).NotTo(HaveOccurred()) - updateLastInstallingNamespace(dpaCR.Namespace) + updateLastInstallTime() err = dpaCR.CreateOrUpdate(&dpaCR.CustomResource.Spec) Expect(err).NotTo(HaveOccurred()) @@ -139,14 +203,24 @@ var _ = Describe("AWS backup restore tests", func() { log.Printf("Waiting for restic pods to be running") Eventually(AreResticPodsRunning(namespace), timeoutMultiplier*time.Minute*3, time.Second*5).Should(BeTrue()) } - if brCase.BackupRestoreType == CSI { + if brCase.BackupRestoreType == CSI || brCase.BackupRestoreType == CSIDataMover { if provider == "aws" || provider == "ibmcloud" || provider == "gcp" || provider == "azure" { log.Printf("Creating VolumeSnapshotClass for CSI backuprestore of %s", brCase.Name) snapshotClassPath := fmt.Sprintf("./sample-applications/snapclass-csi/%s.yaml", provider) err = InstallApplication(dpaCR.Client, snapshotClassPath) Expect(err).ToNot(HaveOccurred()) } - + if brCase.BackupRestoreType == CSIDataMover { + dpaCR.Client.Create(context.Background(), &corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Name: controllers.ResticsecretName, + Namespace: dpaCR.Namespace, + }, + StringData: map[string]string{ + controllers.ResticPassword: "e2e-restic-password", + }, + }, &client.CreateOptions{}) + } } // TODO: check registry deployments are deleted @@ -158,16 +232,17 @@ var _ = Describe("AWS backup restore tests", func() { restoreName := fmt.Sprintf("%s-%s", brCase.Name, restoreUid.String()) // install app - updateLastInstallingNamespace(brCase.ApplicationNamespace) + updateLastInstallTime() log.Printf("Installing application for case %s", brCase.Name) - err = InstallApplication(dpaCR.Client, brCase.ApplicationTemplate) - Expect(err).ToNot(HaveOccurred()) - if brCase.BackupRestoreType == CSI { - log.Printf("Creating pvc for case %s", brCase.Name) + dpaCR.Client.Create(context.Background(), &corev1.Namespace{ObjectMeta: v1.ObjectMeta{Name: brCase.ApplicationNamespace}}, &client.CreateOptions{}) + if brCase.BackupRestoreType == CSI || brCase.BackupRestoreType == CSIDataMover { + log.Printf("Creating csi pvc for case %s", brCase.Name) pvcPath := fmt.Sprintf("./sample-applications/%s/pvc/%s.yaml", brCase.ApplicationNamespace, provider) err = InstallApplication(dpaCR.Client, pvcPath) Expect(err).ToNot(HaveOccurred()) } + err = InstallApplication(dpaCR.Client, brCase.ApplicationTemplate) + Expect(err).ToNot(HaveOccurred()) // wait for pods to be running Eventually(AreAppBuildsReady(dpaCR.Client, brCase.ApplicationNamespace), timeoutMultiplier*time.Minute*5, time.Second*5).Should(BeTrue()) @@ -186,7 +261,7 @@ var _ = Describe("AWS backup restore tests", func() { Expect(err).ToNot(HaveOccurred()) // wait for backup to not be running - Eventually(IsBackupDone(dpaCR.Client, namespace, backupName), timeoutMultiplier*time.Minute*12, time.Second*10).Should(BeTrue()) + Eventually(IsBackupDone(dpaCR.Client, namespace, backupName), timeoutMultiplier*time.Minute*20, time.Second*10).Should(BeTrue()) GinkgoWriter.Println(DescribeBackup(dpaCR.Client, backup)) Expect(BackupErrorLogs(dpaCR.Client, backup)).To(Equal([]string{})) @@ -209,12 +284,12 @@ var _ = Describe("AWS backup restore tests", func() { // Wait for namespace to be deleted Eventually(IsNamespaceDeleted(brCase.ApplicationNamespace), timeoutMultiplier*time.Minute*2, time.Second*5).Should(BeTrue()) - updateLastInstallingNamespace(brCase.ApplicationNamespace) + updateLastInstallTime() // run restore log.Printf("Creating restore %s for case %s", restoreName, brCase.Name) restore, err := CreateRestoreFromBackup(dpaCR.Client, namespace, backupName, restoreName) Expect(err).ToNot(HaveOccurred()) - Eventually(IsRestoreDone(dpaCR.Client, namespace, restoreName), timeoutMultiplier*time.Minute*4, time.Second*10).Should(BeTrue()) + Eventually(IsRestoreDone(dpaCR.Client, namespace, restoreName), timeoutMultiplier*time.Minute*20, time.Second*10).Should(BeTrue()) GinkgoWriter.Println(DescribeRestore(dpaCR.Client, restore)) Expect(RestoreErrorLogs(dpaCR.Client, restore)).To(Equal([]string{})) @@ -256,7 +331,7 @@ var _ = Describe("AWS backup restore tests", func() { }, Entry("MySQL application CSI", Label("ibmcloud", "aws", "gcp", "azure"), BackupRestoreCase{ - ApplicationTemplate: fmt.Sprintf("./sample-applications/mysql-persistent/mysql-persistent-csi.yaml"), + ApplicationTemplate: "./sample-applications/mysql-persistent/mysql-persistent-csi.yaml", ApplicationNamespace: "mysql-persistent", Name: "mysql-csi-e2e", BackupRestoreType: CSI, @@ -264,7 +339,7 @@ var _ = Describe("AWS backup restore tests", func() { PostRestoreVerify: mysqlReady(false, CSI), }, nil), Entry("Mongo application CSI", Label("ibmcloud", "aws", "gcp", "azure"), BackupRestoreCase{ - ApplicationTemplate: fmt.Sprintf("./sample-applications/mongo-persistent/mongo-persistent-csi.yaml"), + ApplicationTemplate: "./sample-applications/mongo-persistent/mongo-persistent-csi.yaml", ApplicationNamespace: "mongo-persistent", Name: "mongo-csi-e2e", BackupRestoreType: CSI, @@ -287,5 +362,21 @@ var _ = Describe("AWS backup restore tests", func() { PreBackupVerify: mysqlReady(true, RESTIC), PostRestoreVerify: mysqlReady(false, RESTIC), }, nil), + Entry("Mongo application DATAMOVER", BackupRestoreCase{ + ApplicationTemplate: "./sample-applications/mongo-persistent/mongo-persistent-csi.yaml", + ApplicationNamespace: "mongo-persistent", + Name: "mongo-datamover-e2e", + BackupRestoreType: CSIDataMover, + PreBackupVerify: dataMoverReady(true, mongoready), + PostRestoreVerify: dataMoverReady(false, mongoready), + }, nil), + Entry("MySQL application DATAMOVER", BackupRestoreCase{ + ApplicationTemplate: "./sample-applications/mysql-persistent/mysql-persistent-csi.yaml", + ApplicationNamespace: "mysql-persistent", + Name: "mysql-datamover-e2e", + BackupRestoreType: CSIDataMover, + PreBackupVerify: dataMoverReady(true, mysqlReady), + PostRestoreVerify: dataMoverReady(false, mysqlReady), + }, nil), ) }) diff --git a/tests/e2e/lib/apps.go b/tests/e2e/lib/apps.go index d488f98d5d3..b9151fcb549 100755 --- a/tests/e2e/lib/apps.go +++ b/tests/e2e/lib/apps.go @@ -24,6 +24,7 @@ import ( routev1 "github.com/openshift/api/route/v1" security "github.com/openshift/api/security/v1" templatev1 "github.com/openshift/api/template/v1" + operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" "github.com/vmware-tanzu/velero/pkg/label" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -48,6 +49,10 @@ var ( ) func InstallApplication(ocClient client.Client, file string) error { + return InstallApplicationWithRetries(ocClient, file, 3) +} + +func InstallApplicationWithRetries(ocClient client.Client, file string, retries int) error { template, err := os.ReadFile(file) if err != nil { return err @@ -66,47 +71,69 @@ func InstallApplication(ocClient client.Client, file string) error { } labels[e2eAppLabelKey] = "true" resource.SetLabels(labels) - err = ocClient.Create(context.Background(), &resource) - if apierrors.IsAlreadyExists(err) { - // if spec has changed for following kinds, update the resource - clusterResource := unstructured.Unstructured{ - Object: resource.Object, - } - err = ocClient.Get(context.Background(), types.NamespacedName{Name: resource.GetName(), Namespace: resource.GetNamespace()}, &clusterResource) - if err != nil { - return err - } - if _, metadataExists := clusterResource.Object["metadata"]; metadataExists { - // copy generation, resourceVersion, and annotations from the existing resource - resource.SetGeneration(clusterResource.GetGeneration()) - resource.SetResourceVersion(clusterResource.GetResourceVersion()) - resource.SetUID(clusterResource.GetUID()) - resource.SetManagedFields(clusterResource.GetManagedFields()) - resource.SetCreationTimestamp(clusterResource.GetCreationTimestamp()) - resource.SetDeletionTimestamp(clusterResource.GetDeletionTimestamp()) - } - needsUpdate := false - for key := range clusterResource.Object { - if key == "status" { - continue + resourceCreate := resource.DeepCopy() + for i := 0; i < retries; i++ { + err = ocClient.Create(context.Background(), resourceCreate) + if apierrors.IsAlreadyExists(err) { + // if spec has changed for following kinds, update the resource + clusterResource := unstructured.Unstructured{ + Object: resource.Object, } - if !reflect.DeepEqual(clusterResource.Object[key], resource.Object[key]) { - fmt.Println("diff found for key:", key) - ginkgo.GinkgoWriter.Println(cmp.Diff(clusterResource.Object[key], resource.Object[key])) - needsUpdate = true - clusterResource.Object[key] = resource.Object[key] - } - } - if needsUpdate { - fmt.Printf("updating resource: %s; name: %s\n", resource.GetKind(), resource.GetName()) - err = ocClient.Update(context.Background(), &clusterResource) + err = ocClient.Get(context.Background(), types.NamespacedName{Name: resource.GetName(), Namespace: resource.GetNamespace()}, &clusterResource) if err != nil { return err } + if _, metadataExists := clusterResource.Object["metadata"]; metadataExists { + // copy generation, resourceVersion, and annotations from the existing resource + resource.SetGeneration(clusterResource.GetGeneration()) + resource.SetResourceVersion(clusterResource.GetResourceVersion()) + resource.SetUID(clusterResource.GetUID()) + resource.SetManagedFields(clusterResource.GetManagedFields()) + resource.SetCreationTimestamp(clusterResource.GetCreationTimestamp()) + resource.SetDeletionTimestamp(clusterResource.GetDeletionTimestamp()) + resource.SetFinalizers(clusterResource.GetFinalizers()) + // append cluster labels to existing labels if they don't already exist + labels := resource.GetLabels() + if labels == nil { + labels = make(map[string]string) + } + for k, v := range clusterResource.GetLabels() { + if _, exists := labels[k]; !exists { + labels[k] = v + } + } + } + needsUpdate := false + for key := range clusterResource.Object { + if key == "status" { + // check we aren't hitting pending deletion finalizers + ginkgo.GinkgoWriter.Printf("%s has status %v", clusterResource.GroupVersionKind(), clusterResource.Object[key]) + continue + } + if !reflect.DeepEqual(clusterResource.Object[key], resource.Object[key]) { + fmt.Println("diff found for key:", key) + ginkgo.GinkgoWriter.Println(cmp.Diff(clusterResource.Object[key], resource.Object[key])) + needsUpdate = true + clusterResource.Object[key] = resource.Object[key] + } + } + if needsUpdate { + fmt.Printf("updating resource: %s; name: %s\n", resource.GroupVersionKind(), resource.GetName()) + err = ocClient.Update(context.Background(), &clusterResource) + } } - } else if err != nil { + // if no error, stop retrying + if err == nil { + break + } + // if error, retry + fmt.Printf("error creating or updating resource: %s; name: %s; error: %s; retrying for %d more times\n", resource.GroupVersionKind(), resource.GetName(), err, retries-i) + } + // if still error on this resource, return error + if err != nil { return err } + // next resource } return nil } @@ -365,6 +392,25 @@ func AreApplicationPodsRunning(namespace string) wait.ConditionFunc { } } +func InstalledSubscriptionCSV(ocClient client.Client, namespace, subscriptionName string) func() (string, error) { + return func() (string, error) { + // get operator-sdk subscription + subscription := &operatorsv1alpha1.Subscription{} + err := ocClient.Get(context.Background(), client.ObjectKey{ + Namespace: namespace, + Name: subscriptionName, + }, subscription) + if err != nil { + if apierrors.IsNotFound(err) { + return "", nil + } + ginkgo.GinkgoWriter.Write([]byte(fmt.Sprintf("Error getting subscription: %v\n", err))) + return "", err + } + return subscription.Status.InstalledCSV, nil + } +} + func PrintNamespaceEventsAfterTime(namespace string, startTime time.Time) { log.Println("Printing events for namespace: ", namespace) clientset, err := setUpClient() @@ -472,7 +518,7 @@ func VerifyBackupRestoreData(artifact_dir string, namespace string, routeName st return err } //Verifying backup-restore data only for CSI as of now. - if backupRestoretype == CSI || backupRestoretype == RESTIC { + if backupRestoretype == CSI || backupRestoretype == CSIDataMover || backupRestoretype == RESTIC { //check if backupfile exists. If true { compare data response with data from file} (post restore step) //else write data to backup-data.txt (prebackup step) if _, err := os.Stat(backupFile); err == nil { diff --git a/tests/e2e/lib/dpa_helpers.go b/tests/e2e/lib/dpa_helpers.go index c7f03e7b988..368f01b9c38 100755 --- a/tests/e2e/lib/dpa_helpers.go +++ b/tests/e2e/lib/dpa_helpers.go @@ -13,8 +13,10 @@ import ( "github.com/google/go-cmp/cmp" . "github.com/onsi/ginkgo/v2" buildv1 "github.com/openshift/api/build/v1" + "github.com/openshift/oadp-operator/controllers" "github.com/openshift/oadp-operator/pkg/common" + volsync "github.com/backube/volsync/api/v1alpha1" volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" appsv1 "github.com/openshift/api/apps/v1" security "github.com/openshift/api/security/v1" @@ -22,6 +24,7 @@ import ( oadpv1alpha1 "github.com/openshift/oadp-operator/api/v1alpha1" utils "github.com/openshift/oadp-operator/tests/e2e/utils" operators "github.com/operator-framework/api/pkg/operators/v1alpha1" + operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" velero "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -36,8 +39,9 @@ import ( type BackupRestoreType string const ( - CSI BackupRestoreType = "csi" - RESTIC BackupRestoreType = "restic" + CSI BackupRestoreType = "csi" + CSIDataMover BackupRestoreType = "csi-datamover" + RESTIC BackupRestoreType = "restic" ) type DpaCustomResource struct { @@ -55,6 +59,10 @@ var Dpa *oadpv1alpha1.DataProtectionApplication func (v *DpaCustomResource) Build(backupRestoreType BackupRestoreType) error { // Velero Instance creation spec with backupstorage location default to AWS. Would need to parameterize this later on to support multiple plugins. dpaInstance := oadpv1alpha1.DataProtectionApplication{ + TypeMeta: metav1.TypeMeta{ + Kind: "DataProtectionApplication", + APIVersion: "oadp.openshift.io/v1alpha1", + }, ObjectMeta: metav1.ObjectMeta{ Name: v.Name, Namespace: v.Namespace, @@ -100,26 +108,34 @@ func (v *DpaCustomResource) Build(backupRestoreType BackupRestoreType) error { for _, plugin := range dpaInstance.Spec.Configuration.Velero.DefaultPlugins { defaultPlugins[plugin] = emptyStruct{} } - featureFlags := make(map[string]emptyStruct) + veleroFeatureFlags := make(map[string]emptyStruct) for _, flag := range dpaInstance.Spec.Configuration.Velero.FeatureFlags { - featureFlags[flag] = emptyStruct{} + veleroFeatureFlags[flag] = emptyStruct{} } + dpaInstance.Spec.Features = &oadpv1alpha1.Features{DataMover: &oadpv1alpha1.DataMover{Enable: false}} switch backupRestoreType { case RESTIC: dpaInstance.Spec.Configuration.Restic.Enable = pointer.Bool(true) delete(defaultPlugins, oadpv1alpha1.DefaultPluginCSI) - delete(featureFlags, "EnableCSI") + delete(veleroFeatureFlags, "EnableCSI") case CSI: dpaInstance.Spec.Configuration.Restic.Enable = pointer.Bool(false) defaultPlugins[oadpv1alpha1.DefaultPluginCSI] = emptyStruct{} - featureFlags["EnableCSI"] = emptyStruct{} + veleroFeatureFlags["EnableCSI"] = emptyStruct{} + case CSIDataMover: + dpaInstance.Spec.Configuration.Restic.Enable = pointer.Bool(false) + defaultPlugins[oadpv1alpha1.DefaultPluginCSI] = emptyStruct{} + veleroFeatureFlags["EnableCSI"] = emptyStruct{} + dpaInstance.Spec.Features.DataMover.Enable = true + dpaInstance.Spec.Features.DataMover.CredentialName = controllers.ResticsecretName + dpaInstance.Spec.Features.DataMover.Timeout = "20m" } dpaInstance.Spec.Configuration.Velero.DefaultPlugins = make([]oadpv1alpha1.DefaultPlugin, 0) for k := range defaultPlugins { dpaInstance.Spec.Configuration.Velero.DefaultPlugins = append(dpaInstance.Spec.Configuration.Velero.DefaultPlugins, k) } dpaInstance.Spec.Configuration.Velero.FeatureFlags = make([]string, 0) - for k := range featureFlags { + for k := range veleroFeatureFlags { dpaInstance.Spec.Configuration.Velero.FeatureFlags = append(dpaInstance.Spec.Configuration.Velero.FeatureFlags, k) } v.CustomResource = &dpaInstance @@ -171,14 +187,26 @@ func (v *DpaCustomResource) CreateOrUpdateWithRetries(spec *oadpv1alpha1.DataPro ) for i := 0; i < retries; i++ { if cr, err = v.Get(); apierrors.IsNotFound(err) { - v.Build(v.backupRestoreType) - v.CustomResource.Spec = *spec + v.CustomResource = &oadpv1alpha1.DataProtectionApplication{ + TypeMeta: metav1.TypeMeta{ + Kind: "DataProtectionApplication", + APIVersion: "oadp.openshift.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: v.Name, + Namespace: v.Namespace, + }, + Spec: *spec.DeepCopy(), + } return v.Create() } else if err != nil { return err } - cr.Spec = *spec - if err = v.Client.Update(context.Background(), cr); err != nil { + crPatch := cr.DeepCopy() + spec.DeepCopyInto(&crPatch.Spec) + crPatch.ObjectMeta.ManagedFields = nil + if err = v.Client.Patch(context.Background(), crPatch, client.MergeFrom(cr), &client.PatchOptions{}); err != nil { + log.Println("error patching velero cr", err) if apierrors.IsConflict(err) && i < retries-1 { log.Println("conflict detected during DPA CreateOrUpdate, retrying for ", retries-i-1, " more times") time.Sleep(time.Second * 2) @@ -217,6 +245,8 @@ func (v *DpaCustomResource) SetClient() error { operators.AddToScheme(client.Scheme()) volumesnapshotv1.AddToScheme(client.Scheme()) buildv1.AddToScheme(client.Scheme()) + operatorsv1alpha1.AddToScheme(client.Scheme()) + volsync.AddToScheme(client.Scheme()) v.Client = client return nil diff --git a/tests/e2e/lib/kube_helpers.go b/tests/e2e/lib/kube_helpers.go index 5d7e7d58a45..fe50dbb6b50 100755 --- a/tests/e2e/lib/kube_helpers.go +++ b/tests/e2e/lib/kube_helpers.go @@ -247,12 +247,43 @@ func GetPodContainerLogs(namespace, podname, container string) (string, error) { return "", err } defer podLogs.Close() - buf := new(bytes.Buffer) _, err = io.Copy(buf, podLogs) if err != nil { return "", err } - str := buf.String() - return str, nil + return buf.String(), nil +} + +func GetDeploymentPodContainerLogs(namespace, deploymentName, containerName string) (string, error) { + clientset, err := setUpClient() + if err != nil { + return "", err + } + // get replicasets owned by deployment + replicasets, err := clientset.AppsV1().ReplicaSets(namespace).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return "", err + } + pods, err := clientset.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return "", err + } + var podLogs string + for _, r := range replicasets.Items { + if r.OwnerReferences[0].Name == deploymentName { + // get pods owned by replicasets + for _, p := range pods.Items { + if p.OwnerReferences[0].Name == r.Name { + podLogs += "pod logs for " + p.Name + ":" + thisPodLogs, err := GetPodContainerLogs(namespace, p.Name, containerName) + if err != nil { + return podLogs, err + } + podLogs += thisPodLogs + } + } + } + } + return podLogs, nil } diff --git a/tests/e2e/volsync/volsync-sub.yaml b/tests/e2e/volsync/volsync-sub.yaml new file mode 100644 index 00000000000..3f5f0eb8469 --- /dev/null +++ b/tests/e2e/volsync/volsync-sub.yaml @@ -0,0 +1,16 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + labels: + operators.coreos.com/volsync-product.openshift-operators: "" + name: volsync-product + namespace: openshift-operators +spec: + channel: stable + installPlanApproval: Automatic + name: volsync-product + source: redhat-operators + sourceNamespace: openshift-marketplace + # v0.4.1 was latest version of the operator. We comment this out so it always install the latest version. + # If it breaks in the future we can re-specify startingCSV to install a specific version. + # startingCSV: volsync-product.v0.4.1