Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Syncing latest changes from upstream devel for ceph-csi #403

Merged
merged 5 commits into from
Oct 22, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions PendingReleaseNotes.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
- CSI metrics for sidecars are now exposed at `POD_IP`:`SIDECAR_ENDPOINT`/`metrics`
path. Check sidecar container spec for `SIDECAR_ENDPOINT`
value [PR](https://github.com/ceph/ceph-csi/pull/4887)
- cephfs: support omap data store in radosnamespace [PR](https://github.com/ceph/ceph-csi/pull/4661)
- helm: Support setting nodepluigin and provisioner annotations

## NOTE
2 changes: 2 additions & 0 deletions api/deploy/kubernetes/csi-config-map.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,8 @@ type CephFS struct {
NetNamespaceFilePath string `json:"netNamespaceFilePath"`
// SubvolumeGroup contains the name of the SubvolumeGroup for CSI volumes
SubvolumeGroup string `json:"subvolumeGroup"`
// RadosNamespace is a rados namespace in the filesystem metadata pool
RadosNamespace string `json:"radosNamespace"`
// KernelMountOptions contains the kernel mount options for CephFS volumes
KernelMountOptions string `json:"kernelMountOptions"`
// FuseMountOptions contains the fuse mount options for CephFS volumes
Expand Down
1 change: 1 addition & 0 deletions charts/ceph-csi-cephfs/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ serviceAccounts:
# cephFS:
# subvolumeGroup: "csi"
# netNamespaceFilePath: "{{ .kubeletDir }}/plugins/{{ .driverName }}/net"
# radosNamespace: "csi"
csiConfig: []

# Labels to apply to all resources
Expand Down
8 changes: 7 additions & 1 deletion deploy/csi-config-map-sample.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,11 @@ kind: ConfigMap
# The "cephFS.fuseMountOptions" fields are common separated mount options
# for `Ceph FUSE driver`. Setting this will override the fusemountoptions
# command line flag.
# The "cephFS.radosNamespace" is optional and represents a radosNamespace in the
# metadata pool. If any given, the omap data of cephFS will be stored within
# this radosNamespace.
# NOTE: Make sure you don't add radosNamespace option to a currently in use
# configuration as it will cause issues.
# network namespace specified by the "cephFS.netNamespaceFilePath".
# The "nfs.netNamespaceFilePath" fields are the various network namespace
# path for the Ceph cluster identified by the <cluster-id>, This will be used
Expand Down Expand Up @@ -78,7 +83,8 @@ data:
"subvolumeGroup": "<subvolumegroup for cephFS volumes>"
"netNamespaceFilePath": "<kubeletRootPath>/plugins/cephfs.csi.ceph.com/net",
"kernelMountOptions": "<kernelMountOptions for cephFS volumes>",
"fuseMountOptions": "<fuseMountOptions for cephFS volumes>"
"fuseMountOptions": "<fuseMountOptions for cephFS volumes>",
"radosNamespace": "<rados-namespace>"
}
"nfs": {
"netNamespaceFilePath": "<kubeletRootPath>/plugins/nfs.csi.ceph.com/net",
Expand Down
116 changes: 116 additions & 0 deletions e2e/cephfs.go
Original file line number Diff line number Diff line change
Expand Up @@ -2491,6 +2491,122 @@ var _ = Describe(cephfsType, func() {
}
})

By("verify rados objects are within a namespace", func() {
updateRadosNamespace := func(radosNamespaceName string) {
framework.Logf("updating configmap with rados namespace %s", radosNamespace)
radosNamespace = radosNamespaceName
err := deleteConfigMap(cephFSDirPath)
if err != nil {
framework.Failf("failed to delete configmap:: %v", err)
}
err = createConfigMap(cephFSDirPath, f.ClientSet, f)
if err != nil {
framework.Failf("failed to create configmap: %v", err)
}

// delete csi pods
err = deletePodWithLabel("app in (ceph-csi-cephfs, csi-cephfsplugin, csi-cephfsplugin-provisioner)",
cephCSINamespace, false)
if err != nil {
framework.Failf("failed to delete pods with labels: %v", err)
}
// wait for csi pods to come up
err = waitForDaemonSets(cephFSDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
if err != nil {
framework.Failf("timeout waiting for daemonset pods: %v", err)
}
err = waitForDeploymentComplete(f.ClientSet, cephFSDeploymentName, cephCSINamespace, deployTimeout)
if err != nil {
framework.Failf("timeout waiting for deployment pods: %v", err)
}
}

// radosNamespace is a global variable, so we need to save the old value
// and restore it after the test.
oldRadosNamespace := radosNamespace
newRadosNamespace := "cephfs-ns"

updateRadosNamespace(newRadosNamespace)
defer func() {
updateRadosNamespace(oldRadosNamespace)
}()

err := deleteResource(cephFSExamplePath + "storageclass.yaml")
if err != nil {
framework.Failf("failed to delete CephFS storageclass: %v", err)
}
err = createCephfsStorageClass(f.ClientSet, f, true, nil)
if err != nil {
framework.Failf("failed to create CephFS storageclass: %v", err)
}
// create a PVC and bind it to an app
pvc, pod, err := createPVCAndAppBinding(pvcPath, appPath, f, deployTimeout)
if err != nil {
framework.Failf("failed to validate CephFS pvc and application binding: %v", err)
}

// snapshot test
err = deleteResource(cephFSExamplePath + "snapshotclass.yaml")
if err != nil {
framework.Failf("failed to delete CephFS snapshotclass: %v", err)
}
err = createCephFSSnapshotClass(f)
if err != nil {
framework.Failf("failed to create CephFS snapshot class: %v", err)
}
snap := getSnapshot(snapshotPath)
snap.Namespace = f.UniqueName
snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name
snap.Name = f.UniqueName
err = createSnapshot(&snap, deployTimeout)
if err != nil {
framework.Failf("failed to create snapshot (%s): %v", snap.Name, err)
}

// restore pvc test
pvcClone, err := loadPVC(pvcClonePath)
if err != nil {
framework.Failf("failed to load PVC: %v", err)
}
pvcClone.Namespace = f.UniqueName
pvcClone.Spec.DataSource.Name = snap.Name
// create PVC from the snapshot
err = createPVCAndvalidatePV(f.ClientSet, pvcClone, deployTimeout)
if err != nil {
framework.Failf("failed to create pvc clone: %v", err)
}

// validate OMAP count
validateOmapCount(f, 2, cephfsType, metadataPool, volumesType)
validateOmapCount(f, 1, cephfsType, metadataPool, snapsType)

// delete resources
err = deletePod(pod.Name, pod.Namespace, f.ClientSet, deployTimeout)
if err != nil {
framework.Failf("failed to delete application: %v", err)
}
err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout)
if err != nil {
framework.Failf("failed to delete PVC: %v", err)
}
err = deletePVCAndValidatePV(f.ClientSet, pvcClone, deployTimeout)
if err != nil {
framework.Failf("failed to delete pvc clone: %v", err)
}
err = deleteSnapshot(&snap, deployTimeout)
if err != nil {
framework.Failf("failed to delete snapshot (%s): %v", f.UniqueName, err)
}
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
if err != nil {
framework.Failf("failed to delete CephFS storageclass: %v", err)
}

// validate OMAP count
validateOmapCount(f, 0, cephfsType, metadataPool, volumesType)
validateOmapCount(f, 0, cephfsType, metadataPool, snapsType)
})

// FIXME: in case NFS testing is done, prevent deletion
// of the CephFS filesystem and related pool. This can
// probably be addressed in a nicer way, making sure
Expand Down
9 changes: 9 additions & 0 deletions e2e/cephfs_helper.go
Original file line number Diff line number Diff line change
Expand Up @@ -187,6 +187,15 @@ func deleteBackingCephFSVolume(f *framework.Framework, pvc *v1.PersistentVolumeC
return nil
}

func cephfsOptions(pool string) string {
if radosNamespace != "" {
return "--pool=" + pool + " --namespace=" + radosNamespace
}

// default namespace is csi
return "--pool=" + pool + " --namespace=csi"
}

type cephfsSubVolume struct {
Name string `json:"name"`
}
Expand Down
3 changes: 3 additions & 0 deletions e2e/configmap.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,9 @@ func createConfigMap(pluginPath string, c kubernetes.Interface, f *framework.Fra
RBD: cephcsi.RBD{
RadosNamespace: radosNamespace,
},
CephFS: cephcsi.CephFS{
RadosNamespace: radosNamespace,
},
ReadAffinity: cephcsi.ReadAffinity{
Enabled: true,
CrushLocationLabels: []string{
Expand Down
40 changes: 17 additions & 23 deletions e2e/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -174,13 +174,11 @@ func validateOmapCount(f *framework.Framework, count int, driver, pool, mode str
{
volumeMode: volumesType,
driverType: cephfsType,
radosLsCmd: fmt.Sprintf("rados ls --pool=%s --namespace csi", pool),
radosLsCmdFilter: fmt.Sprintf(
"rados ls --pool=%s --namespace csi | grep -v default | grep -v csi.volume.group. | grep -c ^csi.volume.",
pool),
radosLsKeysCmd: fmt.Sprintf("rados listomapkeys csi.volumes.default --pool=%s --namespace csi", pool),
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.volumes.default --pool=%s --namespace csi | wc -l",
pool),
radosLsCmd: "rados ls " + cephfsOptions(pool),
radosLsCmdFilter: fmt.Sprintf("rados ls %s | grep -v default | grep -v csi.volume.group. | grep -c ^csi.volume.",
cephfsOptions(pool)),
radosLsKeysCmd: "rados listomapkeys csi.volumes.default " + cephfsOptions(pool),
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.volumes.default %s | wc -l", cephfsOptions(pool)),
},
{
volumeMode: volumesType,
Expand All @@ -193,14 +191,12 @@ func validateOmapCount(f *framework.Framework, count int, driver, pool, mode str
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.volumes.default %s | wc -l", rbdOptions(pool)),
},
{
volumeMode: snapsType,
driverType: cephfsType,
radosLsCmd: fmt.Sprintf("rados ls --pool=%s --namespace csi", pool),
radosLsCmdFilter: fmt.Sprintf("rados ls --pool=%s --namespace csi | grep -v default | grep -c ^csi.snap.",
pool),
radosLsKeysCmd: fmt.Sprintf("rados listomapkeys csi.snaps.default --pool=%s --namespace csi", pool),
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.snaps.default --pool=%s --namespace csi | wc -l",
pool),
volumeMode: snapsType,
driverType: cephfsType,
radosLsCmd: "rados ls " + cephfsOptions(pool),
radosLsCmdFilter: fmt.Sprintf("rados ls %s | grep -v default | grep -c ^csi.snap.", cephfsOptions(pool)),
radosLsKeysCmd: "rados listomapkeys csi.snaps.default " + cephfsOptions(pool),
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.snaps.default %s | wc -l", cephfsOptions(pool)),
},
{
volumeMode: snapsType,
Expand All @@ -211,14 +207,12 @@ func validateOmapCount(f *framework.Framework, count int, driver, pool, mode str
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.snaps.default %s | wc -l", rbdOptions(pool)),
},
{
volumeMode: groupSnapsType,
driverType: cephfsType,
radosLsCmd: fmt.Sprintf("rados ls --pool=%s --namespace csi", pool),
radosLsCmdFilter: fmt.Sprintf("rados ls --pool=%s --namespace csi | grep -v default | grep -c ^csi.volume.group.",
pool),
radosLsKeysCmd: fmt.Sprintf("rados listomapkeys csi.groups.default --pool=%s --namespace csi", pool),
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.groups.default --pool=%s --namespace csi | wc -l",
pool),
volumeMode: groupSnapsType,
driverType: cephfsType,
radosLsCmd: "rados ls" + cephfsOptions(pool),
radosLsCmdFilter: fmt.Sprintf("rados ls %s | grep -v default | grep -c ^csi.volume.group.", cephfsOptions(pool)),
radosLsKeysCmd: "rados listomapkeys csi.groups.default " + cephfsOptions(pool),
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.groups.default %s | wc -l", cephfsOptions(pool)),
},
}

Expand Down
1 change: 1 addition & 0 deletions internal/cephfs/core/volume.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,7 @@ type SubVolume struct {
VolID string // subvolume id.
FsName string // filesystem name.
SubvolumeGroup string // subvolume group name where subvolume will be created.
RadosNamespace string // rados namespace where omap data will be stored.
Pool string // pool name where subvolume will be created.
Features []string // subvolume features.
Size int64 // subvolume size.
Expand Down
5 changes: 2 additions & 3 deletions internal/cephfs/groupcontrollerserver.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ import (
"github.com/ceph/ceph-csi/internal/cephfs/core"
cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors"
"github.com/ceph/ceph-csi/internal/cephfs/store"
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
"github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"

Expand Down Expand Up @@ -455,7 +454,7 @@ func (cs *ControllerServer) createSnapshotAndAddMapping(

return nil, err
}
j, err := store.VolumeGroupJournal.Connect(vgo.Monitors, fsutil.RadosNamespace, cr)
j, err := store.VolumeGroupJournal.Connect(vgo.Monitors, vgo.RadosNamespace, cr)
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -637,7 +636,7 @@ func (cs *ControllerServer) deleteSnapshotsAndUndoReservation(ctx context.Contex
return err
}

j, err := store.VolumeGroupJournal.Connect(vgo.Monitors, fsutil.RadosNamespace, cr)
j, err := store.VolumeGroupJournal.Connect(vgo.Monitors, vgo.RadosNamespace, cr)
if err != nil {
return err
}
Expand Down
9 changes: 4 additions & 5 deletions internal/cephfs/store/backingsnapshot.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ package store
import (
"context"

fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
"github.com/ceph/ceph-csi/internal/util/log"
"github.com/ceph/ceph-csi/internal/util/reftracker"
"github.com/ceph/ceph-csi/internal/util/reftracker/radoswrapper"
Expand All @@ -45,7 +44,7 @@ func AddSnapshotBackedVolumeRef(
}
defer ioctx.Destroy()

ioctx.SetNamespace(fsutil.RadosNamespace)
ioctx.SetNamespace(volOptions.RadosNamespace)

var (
backingSnapID = volOptions.BackingSnapshotID
Expand Down Expand Up @@ -90,7 +89,7 @@ func AddSnapshotBackedVolumeRef(

if created && !deleted {
log.ErrorLog(ctx, "orphaned reftracker object %s (pool %s, namespace %s)",
backingSnapID, volOptions.MetadataPool, fsutil.RadosNamespace)
backingSnapID, volOptions.MetadataPool, volOptions.RadosNamespace)
}
}()

Expand Down Expand Up @@ -118,7 +117,7 @@ func UnrefSnapshotBackedVolume(
}
defer ioctx.Destroy()

ioctx.SetNamespace(fsutil.RadosNamespace)
ioctx.SetNamespace(volOptions.RadosNamespace)

var (
backingSnapID = volOptions.BackingSnapshotID
Expand Down Expand Up @@ -159,7 +158,7 @@ func UnrefSelfInSnapshotBackedVolumes(
}
defer ioctx.Destroy()

ioctx.SetNamespace(fsutil.RadosNamespace)
ioctx.SetNamespace(snapParentVolOptions.RadosNamespace)

return reftracker.Remove(
radoswrapper.NewIOContext(ioctx),
Expand Down
Loading
Loading