Skip to content

Commit

Permalink
e2e: add framework for volumegroupsnapshot
Browse files Browse the repository at this point in the history
adding a framework to test the volumegroupsnapshot
for both cephfs and rbd and added a test case
for cephfs.

Signed-off-by: Madhu Rajanna <[email protected]>
  • Loading branch information
Madhu-1 authored and mergify[bot] committed Sep 12, 2024
1 parent 744b8e1 commit d2ddd52
Show file tree
Hide file tree
Showing 4 changed files with 588 additions and 10 deletions.
13 changes: 13 additions & 0 deletions e2e/cephfs.go
Original file line number Diff line number Diff line change
Expand Up @@ -2478,6 +2478,19 @@ var _ = Describe(cephfsType, func() {
}
})

By("test volumeGroupSnapshot", func() {
scName := "csi-cephfs-sc"
snapshotter, err := newCephFSVolumeGroupSnapshot(f, f.UniqueName, scName, false, deployTimeout, 3)
if err != nil {
framework.Failf("failed to create volumeGroupSnapshot Base: %v", err)
}

err = snapshotter.TestVolumeGroupSnapshot()
if err != nil {
framework.Failf("failed to test volumeGroupSnapshot: %v", err)
}
})

// FIXME: in case NFS testing is done, prevent deletion
// of the CephFS filesystem and related pool. This can
// probably be addressed in a nicer way, making sure
Expand Down
34 changes: 24 additions & 10 deletions e2e/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,9 @@ const (
rbdType = "rbd"
cephfsType = "cephfs"

volumesType = "volumes"
snapsType = "snaps"
volumesType = "volumes"
snapsType = "snaps"
groupSnapsType = "groupsnaps"

rookToolBoxPodLabel = "app=rook-ceph-tools"
rbdMountOptions = "mountOptions"
Expand Down Expand Up @@ -174,17 +175,20 @@ func validateOmapCount(f *framework.Framework, count int, driver, pool, mode str
volumeMode: volumesType,
driverType: cephfsType,
radosLsCmd: fmt.Sprintf("rados ls --pool=%s --namespace csi", pool),
radosLsCmdFilter: fmt.Sprintf("rados ls --pool=%s --namespace csi | grep -v default | grep -c ^csi.volume.",
radosLsCmdFilter: fmt.Sprintf(
"rados ls --pool=%s --namespace csi | grep -v default | grep -v csi.volume.group. | grep -c ^csi.volume.",
pool),
radosLsKeysCmd: fmt.Sprintf("rados listomapkeys csi.volumes.default --pool=%s --namespace csi", pool),
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.volumes.default --pool=%s --namespace csi|wc -l",
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.volumes.default --pool=%s --namespace csi | wc -l",
pool),
},
{
volumeMode: volumesType,
driverType: rbdType,
radosLsCmd: "rados ls " + rbdOptions(pool),
radosLsCmdFilter: fmt.Sprintf("rados ls %s | grep -v default | grep -c ^csi.volume.", rbdOptions(pool)),
volumeMode: volumesType,
driverType: rbdType,
radosLsCmd: "rados ls " + rbdOptions(pool),
radosLsCmdFilter: fmt.Sprintf(
"rados ls %s | grep -v default | grep -v csi.volume.group. | grep -c ^csi.volume.",
rbdOptions(pool)),
radosLsKeysCmd: "rados listomapkeys csi.volumes.default " + rbdOptions(pool),
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.volumes.default %s | wc -l", rbdOptions(pool)),
},
Expand All @@ -195,7 +199,7 @@ func validateOmapCount(f *framework.Framework, count int, driver, pool, mode str
radosLsCmdFilter: fmt.Sprintf("rados ls --pool=%s --namespace csi | grep -v default | grep -c ^csi.snap.",
pool),
radosLsKeysCmd: fmt.Sprintf("rados listomapkeys csi.snaps.default --pool=%s --namespace csi", pool),
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.snaps.default --pool=%s --namespace csi|wc -l",
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.snaps.default --pool=%s --namespace csi | wc -l",
pool),
},
{
Expand All @@ -206,6 +210,16 @@ func validateOmapCount(f *framework.Framework, count int, driver, pool, mode str
radosLsKeysCmd: "rados listomapkeys csi.snaps.default " + rbdOptions(pool),
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.snaps.default %s | wc -l", rbdOptions(pool)),
},
{
volumeMode: groupSnapsType,
driverType: cephfsType,
radosLsCmd: fmt.Sprintf("rados ls --pool=%s --namespace csi", pool),
radosLsCmdFilter: fmt.Sprintf("rados ls --pool=%s --namespace csi | grep -v default | grep -c ^csi.volume.group.",
pool),
radosLsKeysCmd: fmt.Sprintf("rados listomapkeys csi.groups.default --pool=%s --namespace csi", pool),
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.groups.default --pool=%s --namespace csi | wc -l",
pool),
},
}

for _, cmds := range radosListCommands {
Expand All @@ -228,7 +242,7 @@ func validateOmapCount(f *framework.Framework, count int, driver, pool, mode str
if err == nil {
continue
}
saveErr := err
saveErr := fmt.Errorf("failed to validate omap count for %s: %w", cmd, err)
if strings.Contains(err.Error(), "expected omap object count") {
stdOut, stdErr, err = execCommandInToolBoxPod(f, filterLessCmds[i], rookNamespace)
if err == nil {
Expand Down
119 changes: 119 additions & 0 deletions e2e/volumegroupsnapshot.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,119 @@
/*
Copyright 2024 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package e2e

import (
"context"
"fmt"

groupsnapapi "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
)

type cephFSVolumeGroupSnapshot struct {
*volumeGroupSnapshotterBase
}

var _ VolumeGroupSnapshotter = &cephFSVolumeGroupSnapshot{}

func newCephFSVolumeGroupSnapshot(f *framework.Framework, namespace,
storageClass string,
blockPVC bool,
timeout, totalPVCCount int,
) (VolumeGroupSnapshotter, error) {
base, err := newVolumeGroupSnapshotBase(f, namespace, storageClass, blockPVC, timeout, totalPVCCount)
if err != nil {
return nil, fmt.Errorf("failed to create volumeGroupSnapshotterBase: %w", err)
}

return &cephFSVolumeGroupSnapshot{
volumeGroupSnapshotterBase: base,
}, nil
}

func (c *cephFSVolumeGroupSnapshot) TestVolumeGroupSnapshot() error {
return c.volumeGroupSnapshotterBase.testVolumeGroupSnapshot(c)
}

func (c *cephFSVolumeGroupSnapshot) GetVolumeGroupSnapshotClass() (*groupsnapapi.VolumeGroupSnapshotClass, error) {
vgscPath := fmt.Sprintf("%s/%s", cephFSExamplePath, "groupsnapshotclass.yaml")
vgsc := &groupsnapapi.VolumeGroupSnapshotClass{}
err := unmarshal(vgscPath, vgsc)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal VolumeGroupSnapshotClass: %w", err)
}

vgsc.Parameters["csi.storage.k8s.io/group-snapshotter-secret-namespace"] = cephCSINamespace
vgsc.Parameters["csi.storage.k8s.io/group-snapshotter-secret-name"] = cephFSProvisionerSecretName
vgsc.Parameters["fsName"] = fileSystemName

fsID, err := getClusterID(c.framework)
if err != nil {
return nil, fmt.Errorf("failed to get clusterID: %w", err)
}
vgsc.Parameters["clusterID"] = fsID

return vgsc, nil
}

func (c *cephFSVolumeGroupSnapshot) ValidateResourcesForCreate(vgs *groupsnapapi.VolumeGroupSnapshot) error {
ctx := context.TODO()
metadataPool, err := getCephFSMetadataPoolName(c.framework, fileSystemName)
if err != nil {
return fmt.Errorf("failed getting cephFS metadata pool name: %w", err)
}

sourcePVCCount := len(vgs.Status.PVCVolumeSnapshotRefList)
// we are creating clones for each source PVC
clonePVCCount := len(vgs.Status.PVCVolumeSnapshotRefList)
totalPVCCount := sourcePVCCount + clonePVCCount
validateSubvolumeCount(c.framework, totalPVCCount, fileSystemName, subvolumegroup)

// we are creating 1 snapshot for each source PVC, validate the snapshot count
for _, pvcSnap := range vgs.Status.PVCVolumeSnapshotRefList {
pvc, err := c.framework.ClientSet.CoreV1().PersistentVolumeClaims(vgs.Namespace).Get(ctx,
pvcSnap.PersistentVolumeClaimRef.Name,
metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to get PVC: %w", err)
}
pv := pvc.Spec.VolumeName
pvObj, err := c.framework.ClientSet.CoreV1().PersistentVolumes().Get(ctx, pv, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to get PV: %w", err)
}
validateCephFSSnapshotCount(c.framework, 1, subvolumegroup, pvObj)
}
validateOmapCount(c.framework, totalPVCCount, cephfsType, metadataPool, volumesType)
validateOmapCount(c.framework, sourcePVCCount, cephfsType, metadataPool, snapsType)
validateOmapCount(c.framework, 1, cephfsType, metadataPool, groupSnapsType)

return nil
}

func (c *cephFSVolumeGroupSnapshot) ValidateResourcesForDelete() error {
metadataPool, err := getCephFSMetadataPoolName(c.framework, fileSystemName)
if err != nil {
return fmt.Errorf("failed getting cephFS metadata pool name: %w", err)
}
validateOmapCount(c.framework, 0, cephfsType, metadataPool, volumesType)
validateOmapCount(c.framework, 0, cephfsType, metadataPool, snapsType)
validateOmapCount(c.framework, 0, cephfsType, metadataPool, groupSnapsType)

return nil
}
Loading

0 comments on commit d2ddd52

Please sign in to comment.