Skip to content

Commit

Permalink
e2e: cephfs rados namespace test
Browse files Browse the repository at this point in the history
Signed-off-by: Praveen M <[email protected]>
  • Loading branch information
iPraveenParihar committed Oct 1, 2024
1 parent 995ef49 commit 459bca1
Show file tree
Hide file tree
Showing 4 changed files with 76 additions and 58 deletions.
79 changes: 46 additions & 33 deletions e2e/cephfs.go
Original file line number Diff line number Diff line change
Expand Up @@ -298,55 +298,68 @@ var _ = Describe(cephfsType, func() {
framework.Failf("failed getting cephFS metadata pool name: %v", getErr)
}

By("checking provisioner deployment is running", func() {
err := waitForDeploymentComplete(f.ClientSet, cephFSDeploymentName, cephCSINamespace, deployTimeout)
By("verify rados objects are within a namespace", func() {
framework.Logf("updating configmap with rados namespace %s", radosNamespace)
radosNamespace = "cephfs-ns"
err := createRadosNamespace(f)
if err != nil {
framework.Failf("timeout waiting for deployment %s: %v", cephFSDeploymentName, err)
framework.Failf("failed to create rados namespace: %v", err)
}
})

By("checking nodeplugin daemonset pods are running", func() {
err := waitForDaemonSets(cephFSDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
err = createConfigMap(cephFSDirPath, f.ClientSet, f)
if err != nil {
framework.Failf("timeout waiting for daemonset %s: %v", cephFSDeamonSetName, err)
framework.Failf("failed to create configmap: %v", err)
}
})

// test only if ceph-csi is deployed via helm
if helmTest {
By("verify PVC and app binding on helm installation", func() {
err := validatePVCAndAppBinding(pvcPath, appPath, f)
if err != nil {
framework.Failf("failed to validate CephFS pvc and application binding: %v", err)
}
// Deleting the storageclass and secret created by helm
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
if err != nil {
framework.Failf("failed to delete CephFS storageclass: %v", err)
}
err = deleteResource(cephFSExamplePath + "secret.yaml")
if err != nil {
framework.Failf("failed to delete CephFS storageclass: %v", err)
}
})
}
// delete csi pods
err = deletePodWithLabel("app in (ceph-csi-cephfs, csi-cephfsplugin, csi-cephfsplugin-provisioner)",
cephCSINamespace, false)
if err != nil {
framework.Failf("failed to delete pods with labels: %v", err)
}
// wait for csi pods to come up
err = waitForDaemonSets(cephFSDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
if err != nil {
framework.Failf("timeout waiting for daemonset pods: %v", err)
}
err = waitForDeploymentComplete(f.ClientSet, cephFSDeploymentName, cephCSINamespace, deployTimeout)
if err != nil {
framework.Failf("failed to delete CephFS storageclass: %v", err)
}

By("verify mountOptions support", func() {
err := createCephfsStorageClass(f.ClientSet, f, true, nil)
// create a PVC and bind it to an app
err = createCephfsStorageClass(f.ClientSet, f, true, nil)
if err != nil {
framework.Failf("failed to create CephFS storageclass: %v", err)
}

err = verifySeLinuxMountOption(f, pvcPath, appPath,
cephFSDeamonSetName, cephFSContainerName, cephCSINamespace)
pvc, pod, err := createPVCAndAppBinding(pvcPath, appPath, f, deployTimeout)
if err != nil {
framework.Failf("failed to verify mount options: %v", err)
framework.Failf("failed to validate CephFS pvc and application binding: %v", err)
}

validateOmapCount(f, 1, cephfsType, metadataPool, volumesType)

// delete resources
err = deletePod(pod.Name, pod.Namespace, f.ClientSet, deployTimeout)
if err != nil {
framework.Failf("failed to delete application: %v", err)
}
err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout)
if err != nil {
framework.Failf("failed to delete PVC: %v", err)
}
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
if err != nil {
framework.Failf("failed to delete CephFS storageclass: %v", err)
}

validateOmapCount(f, 1, cephfsType, metadataPool, volumesType)

// reset radosNamespace
radosNamespace = ""
err = createConfigMap(cephFSDirPath, f.ClientSet, f)
if err != nil {
framework.Failf("failed to create configmap: %v", err)
}
})

By("validate fuseMountOptions", func() {
Expand Down
9 changes: 9 additions & 0 deletions e2e/cephfs_helper.go
Original file line number Diff line number Diff line change
Expand Up @@ -187,6 +187,15 @@ func deleteBackingCephFSVolume(f *framework.Framework, pvc *v1.PersistentVolumeC
return nil
}

func cephfsOptions(pool string) string {
if radosNamespace != "" {
return "--pool=" + pool + " --namespace=" + radosNamespace
}

// default namespace is csi
return "--pool=" + pool + " --namespace=csi"
}

type cephfsSubVolume struct {
Name string `json:"name"`
}
Expand Down
3 changes: 3 additions & 0 deletions e2e/configmap.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,9 @@ func createConfigMap(pluginPath string, c kubernetes.Interface, f *framework.Fra
RBD: cephcsi.RBD{
RadosNamespace: radosNamespace,
},
CephFS: cephcsi.CephFS{
RadosNamespace: radosNamespace,
},
ReadAffinity: cephcsi.ReadAffinity{
Enabled: true,
CrushLocationLabels: []string{
Expand Down
43 changes: 18 additions & 25 deletions e2e/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -172,15 +172,12 @@ func validateOmapCount(f *framework.Framework, count int, driver, pool, mode str

radosListCommands := []radosListCommand{
{
volumeMode: volumesType,
driverType: cephfsType,
radosLsCmd: fmt.Sprintf("rados ls --pool=%s --namespace csi", pool),
radosLsCmdFilter: fmt.Sprintf(
"rados ls --pool=%s --namespace csi | grep -v default | grep -v csi.volume.group. | grep -c ^csi.volume.",
pool),
radosLsKeysCmd: fmt.Sprintf("rados listomapkeys csi.volumes.default --pool=%s --namespace csi", pool),
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.volumes.default --pool=%s --namespace csi | wc -l",
pool),
volumeMode: volumesType,
driverType: cephfsType,
radosLsCmd: "rados ls " + cephfsOptions(pool),
radosLsCmdFilter: fmt.Sprintf("rados ls %s | grep -v default | grep -c ^csi.volume.", cephfsOptions(pool)),
radosLsKeysCmd: "rados listomapkeys csi.volumes.default " + cephfsOptions(pool),
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.volumes.default %s | wc -l", cephfsOptions(pool)),
},
{
volumeMode: volumesType,
Expand All @@ -193,14 +190,12 @@ func validateOmapCount(f *framework.Framework, count int, driver, pool, mode str
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.volumes.default %s | wc -l", rbdOptions(pool)),
},
{
volumeMode: snapsType,
driverType: cephfsType,
radosLsCmd: fmt.Sprintf("rados ls --pool=%s --namespace csi", pool),
radosLsCmdFilter: fmt.Sprintf("rados ls --pool=%s --namespace csi | grep -v default | grep -c ^csi.snap.",
pool),
radosLsKeysCmd: fmt.Sprintf("rados listomapkeys csi.snaps.default --pool=%s --namespace csi", pool),
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.snaps.default --pool=%s --namespace csi | wc -l",
pool),
volumeMode: snapsType,
driverType: cephfsType,
radosLsCmd: "rados ls " + cephfsOptions(pool),
radosLsCmdFilter: fmt.Sprintf("rados ls %s | grep -v default | grep -c ^csi.snap.", cephfsOptions(pool)),
radosLsKeysCmd: "rados listomapkeys csi.snaps.default " + cephfsOptions(pool),
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.snaps.default %s | wc -l", cephfsOptions(pool)),
},
{
volumeMode: snapsType,
Expand All @@ -211,14 +206,12 @@ func validateOmapCount(f *framework.Framework, count int, driver, pool, mode str
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.snaps.default %s | wc -l", rbdOptions(pool)),
},
{
volumeMode: groupSnapsType,
driverType: cephfsType,
radosLsCmd: fmt.Sprintf("rados ls --pool=%s --namespace csi", pool),
radosLsCmdFilter: fmt.Sprintf("rados ls --pool=%s --namespace csi | grep -v default | grep -c ^csi.volume.group.",
pool),
radosLsKeysCmd: fmt.Sprintf("rados listomapkeys csi.groups.default --pool=%s --namespace csi", pool),
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.groups.default --pool=%s --namespace csi | wc -l",
pool),
volumeMode: groupSnapsType,
driverType: cephfsType,
radosLsCmd: "rados ls" + cephfsOptions(pool),
radosLsCmdFilter: fmt.Sprintf("rados ls %s | grep -v default | grep -c ^csi.volume.group.", cephfsOptions(pool)),
radosLsKeysCmd: "rados listomapkeys csi.groups.default " + cephfsOptions(pool),
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.groups.default %s | wc -l", cephfsOptions(pool)),
},
}

Expand Down

0 comments on commit 459bca1

Please sign in to comment.