diff --git a/PendingReleaseNotes.md b/PendingReleaseNotes.md index 368257832f1..76de0dfb873 100644 --- a/PendingReleaseNotes.md +++ b/PendingReleaseNotes.md @@ -9,6 +9,7 @@ - CSI metrics for sidecars are now exposed at `POD_IP`:`SIDECAR_ENDPOINT`/`metrics` path. Check sidecar container spec for `SIDECAR_ENDPOINT` value [PR](https://github.com/ceph/ceph-csi/pull/4887) +- cephfs: support omap data store in radosnamespace [PR](https://github.com/ceph/ceph-csi/pull/4661) - helm: Support setting nodepluigin and provisioner annotations ## NOTE diff --git a/api/deploy/kubernetes/csi-config-map.go b/api/deploy/kubernetes/csi-config-map.go index 0c418e23d11..fb88be6b151 100644 --- a/api/deploy/kubernetes/csi-config-map.go +++ b/api/deploy/kubernetes/csi-config-map.go @@ -36,6 +36,8 @@ type CephFS struct { NetNamespaceFilePath string `json:"netNamespaceFilePath"` // SubvolumeGroup contains the name of the SubvolumeGroup for CSI volumes SubvolumeGroup string `json:"subvolumeGroup"` + // RadosNamespace is a rados namespace in the filesystem metadata pool + RadosNamespace string `json:"radosNamespace"` // KernelMountOptions contains the kernel mount options for CephFS volumes KernelMountOptions string `json:"kernelMountOptions"` // FuseMountOptions contains the fuse mount options for CephFS volumes diff --git a/charts/ceph-csi-cephfs/values.yaml b/charts/ceph-csi-cephfs/values.yaml index 18a9895a8a2..814b6a41830 100644 --- a/charts/ceph-csi-cephfs/values.yaml +++ b/charts/ceph-csi-cephfs/values.yaml @@ -28,6 +28,7 @@ serviceAccounts: # cephFS: # subvolumeGroup: "csi" # netNamespaceFilePath: "{{ .kubeletDir }}/plugins/{{ .driverName }}/net" +# radosNamespace: "csi" csiConfig: [] # Labels to apply to all resources diff --git a/deploy/csi-config-map-sample.yaml b/deploy/csi-config-map-sample.yaml index e0263a0d8c9..7413d5e4552 100644 --- a/deploy/csi-config-map-sample.yaml +++ b/deploy/csi-config-map-sample.yaml @@ -32,6 +32,11 @@ kind: ConfigMap # The "cephFS.fuseMountOptions" fields are common separated mount options # for `Ceph FUSE driver`. Setting this will override the fusemountoptions # command line flag. +# The "cephFS.radosNamespace" is optional and represents a radosNamespace in the +# metadata pool. If any given, the omap data of cephFS will be stored within +# this radosNamespace. +# NOTE: Make sure you don't add radosNamespace option to a currently in use +# configuration as it will cause issues. # network namespace specified by the "cephFS.netNamespaceFilePath". # The "nfs.netNamespaceFilePath" fields are the various network namespace # path for the Ceph cluster identified by the , This will be used @@ -78,7 +83,8 @@ data: "subvolumeGroup": "" "netNamespaceFilePath": "/plugins/cephfs.csi.ceph.com/net", "kernelMountOptions": "", - "fuseMountOptions": "" + "fuseMountOptions": "", + "radosNamespace": "" } "nfs": { "netNamespaceFilePath": "/plugins/nfs.csi.ceph.com/net", diff --git a/e2e/cephfs.go b/e2e/cephfs.go index c6050441b93..a239b600aae 100644 --- a/e2e/cephfs.go +++ b/e2e/cephfs.go @@ -2491,6 +2491,122 @@ var _ = Describe(cephfsType, func() { } }) + By("verify rados objects are within a namespace", func() { + updateRadosNamespace := func(radosNamespaceName string) { + framework.Logf("updating configmap with rados namespace %s", radosNamespace) + radosNamespace = radosNamespaceName + err := deleteConfigMap(cephFSDirPath) + if err != nil { + framework.Failf("failed to delete configmap:: %v", err) + } + err = createConfigMap(cephFSDirPath, f.ClientSet, f) + if err != nil { + framework.Failf("failed to create configmap: %v", err) + } + + // delete csi pods + err = deletePodWithLabel("app in (ceph-csi-cephfs, csi-cephfsplugin, csi-cephfsplugin-provisioner)", + cephCSINamespace, false) + if err != nil { + framework.Failf("failed to delete pods with labels: %v", err) + } + // wait for csi pods to come up + err = waitForDaemonSets(cephFSDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout) + if err != nil { + framework.Failf("timeout waiting for daemonset pods: %v", err) + } + err = waitForDeploymentComplete(f.ClientSet, cephFSDeploymentName, cephCSINamespace, deployTimeout) + if err != nil { + framework.Failf("timeout waiting for deployment pods: %v", err) + } + } + + // radosNamespace is a global variable, so we need to save the old value + // and restore it after the test. + oldRadosNamespace := radosNamespace + newRadosNamespace := "cephfs-ns" + + updateRadosNamespace(newRadosNamespace) + defer func() { + updateRadosNamespace(oldRadosNamespace) + }() + + err := deleteResource(cephFSExamplePath + "storageclass.yaml") + if err != nil { + framework.Failf("failed to delete CephFS storageclass: %v", err) + } + err = createCephfsStorageClass(f.ClientSet, f, true, nil) + if err != nil { + framework.Failf("failed to create CephFS storageclass: %v", err) + } + // create a PVC and bind it to an app + pvc, pod, err := createPVCAndAppBinding(pvcPath, appPath, f, deployTimeout) + if err != nil { + framework.Failf("failed to validate CephFS pvc and application binding: %v", err) + } + + // snapshot test + err = deleteResource(cephFSExamplePath + "snapshotclass.yaml") + if err != nil { + framework.Failf("failed to delete CephFS snapshotclass: %v", err) + } + err = createCephFSSnapshotClass(f) + if err != nil { + framework.Failf("failed to create CephFS snapshot class: %v", err) + } + snap := getSnapshot(snapshotPath) + snap.Namespace = f.UniqueName + snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name + snap.Name = f.UniqueName + err = createSnapshot(&snap, deployTimeout) + if err != nil { + framework.Failf("failed to create snapshot (%s): %v", snap.Name, err) + } + + // restore pvc test + pvcClone, err := loadPVC(pvcClonePath) + if err != nil { + framework.Failf("failed to load PVC: %v", err) + } + pvcClone.Namespace = f.UniqueName + pvcClone.Spec.DataSource.Name = snap.Name + // create PVC from the snapshot + err = createPVCAndvalidatePV(f.ClientSet, pvcClone, deployTimeout) + if err != nil { + framework.Failf("failed to create pvc clone: %v", err) + } + + // validate OMAP count + validateOmapCount(f, 2, cephfsType, metadataPool, volumesType) + validateOmapCount(f, 1, cephfsType, metadataPool, snapsType) + + // delete resources + err = deletePod(pod.Name, pod.Namespace, f.ClientSet, deployTimeout) + if err != nil { + framework.Failf("failed to delete application: %v", err) + } + err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) + if err != nil { + framework.Failf("failed to delete PVC: %v", err) + } + err = deletePVCAndValidatePV(f.ClientSet, pvcClone, deployTimeout) + if err != nil { + framework.Failf("failed to delete pvc clone: %v", err) + } + err = deleteSnapshot(&snap, deployTimeout) + if err != nil { + framework.Failf("failed to delete snapshot (%s): %v", f.UniqueName, err) + } + err = deleteResource(cephFSExamplePath + "storageclass.yaml") + if err != nil { + framework.Failf("failed to delete CephFS storageclass: %v", err) + } + + // validate OMAP count + validateOmapCount(f, 0, cephfsType, metadataPool, volumesType) + validateOmapCount(f, 0, cephfsType, metadataPool, snapsType) + }) + // FIXME: in case NFS testing is done, prevent deletion // of the CephFS filesystem and related pool. This can // probably be addressed in a nicer way, making sure diff --git a/e2e/cephfs_helper.go b/e2e/cephfs_helper.go index ae6fb9f3901..6deb976d10a 100644 --- a/e2e/cephfs_helper.go +++ b/e2e/cephfs_helper.go @@ -187,6 +187,15 @@ func deleteBackingCephFSVolume(f *framework.Framework, pvc *v1.PersistentVolumeC return nil } +func cephfsOptions(pool string) string { + if radosNamespace != "" { + return "--pool=" + pool + " --namespace=" + radosNamespace + } + + // default namespace is csi + return "--pool=" + pool + " --namespace=csi" +} + type cephfsSubVolume struct { Name string `json:"name"` } diff --git a/e2e/configmap.go b/e2e/configmap.go index 6ad3978c510..b230210503b 100644 --- a/e2e/configmap.go +++ b/e2e/configmap.go @@ -60,6 +60,9 @@ func createConfigMap(pluginPath string, c kubernetes.Interface, f *framework.Fra RBD: cephcsi.RBD{ RadosNamespace: radosNamespace, }, + CephFS: cephcsi.CephFS{ + RadosNamespace: radosNamespace, + }, ReadAffinity: cephcsi.ReadAffinity{ Enabled: true, CrushLocationLabels: []string{ diff --git a/e2e/utils.go b/e2e/utils.go index 0e29f53c168..7e0f94d6128 100644 --- a/e2e/utils.go +++ b/e2e/utils.go @@ -174,13 +174,11 @@ func validateOmapCount(f *framework.Framework, count int, driver, pool, mode str { volumeMode: volumesType, driverType: cephfsType, - radosLsCmd: fmt.Sprintf("rados ls --pool=%s --namespace csi", pool), - radosLsCmdFilter: fmt.Sprintf( - "rados ls --pool=%s --namespace csi | grep -v default | grep -v csi.volume.group. | grep -c ^csi.volume.", - pool), - radosLsKeysCmd: fmt.Sprintf("rados listomapkeys csi.volumes.default --pool=%s --namespace csi", pool), - radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.volumes.default --pool=%s --namespace csi | wc -l", - pool), + radosLsCmd: "rados ls " + cephfsOptions(pool), + radosLsCmdFilter: fmt.Sprintf("rados ls %s | grep -v default | grep -v csi.volume.group. | grep -c ^csi.volume.", + cephfsOptions(pool)), + radosLsKeysCmd: "rados listomapkeys csi.volumes.default " + cephfsOptions(pool), + radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.volumes.default %s | wc -l", cephfsOptions(pool)), }, { volumeMode: volumesType, @@ -193,14 +191,12 @@ func validateOmapCount(f *framework.Framework, count int, driver, pool, mode str radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.volumes.default %s | wc -l", rbdOptions(pool)), }, { - volumeMode: snapsType, - driverType: cephfsType, - radosLsCmd: fmt.Sprintf("rados ls --pool=%s --namespace csi", pool), - radosLsCmdFilter: fmt.Sprintf("rados ls --pool=%s --namespace csi | grep -v default | grep -c ^csi.snap.", - pool), - radosLsKeysCmd: fmt.Sprintf("rados listomapkeys csi.snaps.default --pool=%s --namespace csi", pool), - radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.snaps.default --pool=%s --namespace csi | wc -l", - pool), + volumeMode: snapsType, + driverType: cephfsType, + radosLsCmd: "rados ls " + cephfsOptions(pool), + radosLsCmdFilter: fmt.Sprintf("rados ls %s | grep -v default | grep -c ^csi.snap.", cephfsOptions(pool)), + radosLsKeysCmd: "rados listomapkeys csi.snaps.default " + cephfsOptions(pool), + radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.snaps.default %s | wc -l", cephfsOptions(pool)), }, { volumeMode: snapsType, @@ -211,14 +207,12 @@ func validateOmapCount(f *framework.Framework, count int, driver, pool, mode str radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.snaps.default %s | wc -l", rbdOptions(pool)), }, { - volumeMode: groupSnapsType, - driverType: cephfsType, - radosLsCmd: fmt.Sprintf("rados ls --pool=%s --namespace csi", pool), - radosLsCmdFilter: fmt.Sprintf("rados ls --pool=%s --namespace csi | grep -v default | grep -c ^csi.volume.group.", - pool), - radosLsKeysCmd: fmt.Sprintf("rados listomapkeys csi.groups.default --pool=%s --namespace csi", pool), - radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.groups.default --pool=%s --namespace csi | wc -l", - pool), + volumeMode: groupSnapsType, + driverType: cephfsType, + radosLsCmd: "rados ls" + cephfsOptions(pool), + radosLsCmdFilter: fmt.Sprintf("rados ls %s | grep -v default | grep -c ^csi.volume.group.", cephfsOptions(pool)), + radosLsKeysCmd: "rados listomapkeys csi.groups.default " + cephfsOptions(pool), + radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.groups.default %s | wc -l", cephfsOptions(pool)), }, } diff --git a/internal/cephfs/core/volume.go b/internal/cephfs/core/volume.go index 42aa6b01fbd..25b281ee325 100644 --- a/internal/cephfs/core/volume.go +++ b/internal/cephfs/core/volume.go @@ -101,6 +101,7 @@ type SubVolume struct { VolID string // subvolume id. FsName string // filesystem name. SubvolumeGroup string // subvolume group name where subvolume will be created. + RadosNamespace string // rados namespace where omap data will be stored. Pool string // pool name where subvolume will be created. Features []string // subvolume features. Size int64 // subvolume size. diff --git a/internal/cephfs/groupcontrollerserver.go b/internal/cephfs/groupcontrollerserver.go index e808a055b3f..0433aa0fbaf 100644 --- a/internal/cephfs/groupcontrollerserver.go +++ b/internal/cephfs/groupcontrollerserver.go @@ -27,7 +27,6 @@ import ( "github.com/ceph/ceph-csi/internal/cephfs/core" cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors" "github.com/ceph/ceph-csi/internal/cephfs/store" - fsutil "github.com/ceph/ceph-csi/internal/cephfs/util" "github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util/log" @@ -455,7 +454,7 @@ func (cs *ControllerServer) createSnapshotAndAddMapping( return nil, err } - j, err := store.VolumeGroupJournal.Connect(vgo.Monitors, fsutil.RadosNamespace, cr) + j, err := store.VolumeGroupJournal.Connect(vgo.Monitors, vgo.RadosNamespace, cr) if err != nil { return nil, err } @@ -637,7 +636,7 @@ func (cs *ControllerServer) deleteSnapshotsAndUndoReservation(ctx context.Contex return err } - j, err := store.VolumeGroupJournal.Connect(vgo.Monitors, fsutil.RadosNamespace, cr) + j, err := store.VolumeGroupJournal.Connect(vgo.Monitors, vgo.RadosNamespace, cr) if err != nil { return err } diff --git a/internal/cephfs/store/backingsnapshot.go b/internal/cephfs/store/backingsnapshot.go index 4bd876dcb6a..43846a1445d 100644 --- a/internal/cephfs/store/backingsnapshot.go +++ b/internal/cephfs/store/backingsnapshot.go @@ -19,7 +19,6 @@ package store import ( "context" - fsutil "github.com/ceph/ceph-csi/internal/cephfs/util" "github.com/ceph/ceph-csi/internal/util/log" "github.com/ceph/ceph-csi/internal/util/reftracker" "github.com/ceph/ceph-csi/internal/util/reftracker/radoswrapper" @@ -45,7 +44,7 @@ func AddSnapshotBackedVolumeRef( } defer ioctx.Destroy() - ioctx.SetNamespace(fsutil.RadosNamespace) + ioctx.SetNamespace(volOptions.RadosNamespace) var ( backingSnapID = volOptions.BackingSnapshotID @@ -90,7 +89,7 @@ func AddSnapshotBackedVolumeRef( if created && !deleted { log.ErrorLog(ctx, "orphaned reftracker object %s (pool %s, namespace %s)", - backingSnapID, volOptions.MetadataPool, fsutil.RadosNamespace) + backingSnapID, volOptions.MetadataPool, volOptions.RadosNamespace) } }() @@ -118,7 +117,7 @@ func UnrefSnapshotBackedVolume( } defer ioctx.Destroy() - ioctx.SetNamespace(fsutil.RadosNamespace) + ioctx.SetNamespace(volOptions.RadosNamespace) var ( backingSnapID = volOptions.BackingSnapshotID @@ -159,7 +158,7 @@ func UnrefSelfInSnapshotBackedVolumes( } defer ioctx.Destroy() - ioctx.SetNamespace(fsutil.RadosNamespace) + ioctx.SetNamespace(snapParentVolOptions.RadosNamespace) return reftracker.Remove( radoswrapper.NewIOContext(ioctx), diff --git a/internal/cephfs/store/fsjournal.go b/internal/cephfs/store/fsjournal.go index c9f9a16d7e9..3e3c676e5df 100644 --- a/internal/cephfs/store/fsjournal.go +++ b/internal/cephfs/store/fsjournal.go @@ -23,7 +23,6 @@ import ( "github.com/ceph/ceph-csi/internal/cephfs/core" cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors" - fsutil "github.com/ceph/ceph-csi/internal/cephfs/util" "github.com/ceph/ceph-csi/internal/journal" "github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util/log" @@ -87,8 +86,7 @@ func CheckVolExists(ctx context.Context, setMetadata bool, ) (*VolumeIdentifier, error) { var vid VolumeIdentifier - // Connect to cephfs' default radosNamespace (csi) - j, err := VolJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr) + j, err := VolJournal.Connect(volOptions.Monitors, volOptions.RadosNamespace, cr) if err != nil { return nil, err } @@ -228,8 +226,7 @@ func UndoVolReservation( } defer cr.DeleteCredentials() - // Connect to cephfs' default radosNamespace (csi) - j, err := VolJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr) + j, err := VolJournal.Connect(volOptions.Monitors, volOptions.RadosNamespace, cr) if err != nil { return err } @@ -283,8 +280,7 @@ func ReserveVol(ctx context.Context, volOptions *VolumeOptions, secret map[strin return nil, err } - // Connect to cephfs' default radosNamespace (csi) - j, err := VolJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr) + j, err := VolJournal.Connect(volOptions.Monitors, volOptions.RadosNamespace, cr) if err != nil { return nil, err } @@ -329,8 +325,7 @@ func ReserveSnap( err error ) - // Connect to cephfs' default radosNamespace (csi) - j, err := SnapJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr) + j, err := SnapJournal.Connect(volOptions.Monitors, volOptions.RadosNamespace, cr) if err != nil { return nil, err } @@ -368,8 +363,7 @@ func UndoSnapReservation( snapName string, cr *util.Credentials, ) error { - // Connect to cephfs' default radosNamespace (csi) - j, err := SnapJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr) + j, err := SnapJournal.Connect(volOptions.Monitors, volOptions.RadosNamespace, cr) if err != nil { return err } @@ -403,8 +397,7 @@ func CheckSnapExists( setMetadata bool, cr *util.Credentials, ) (*SnapshotIdentifier, error) { - // Connect to cephfs' default radosNamespace (csi) - j, err := SnapJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr) + j, err := SnapJournal.Connect(volOptions.Monitors, volOptions.RadosNamespace, cr) if err != nil { return nil, err } diff --git a/internal/cephfs/store/volumegroup.go b/internal/cephfs/store/volumegroup.go index ea1c66e4d75..1140dcf6cc2 100644 --- a/internal/cephfs/store/volumegroup.go +++ b/internal/cephfs/store/volumegroup.go @@ -22,7 +22,6 @@ import ( "github.com/ceph/ceph-csi/internal/cephfs/core" cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors" - fsutil "github.com/ceph/ceph-csi/internal/cephfs/util" "github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util/log" @@ -130,6 +129,13 @@ func NewVolumeGroupOptionsFromID( err) } + if volOptions.RadosNamespace, err = util.GetCephFSRadosNamespace(util.CsiConfigFile, vi.ClusterID); err != nil { + return nil, nil, fmt.Errorf( + "failed to fetch rados namespace using clusterID (%s): %w", + vi.ClusterID, + err) + } + err = volOptions.Connect(cr) if err != nil { return nil, nil, err @@ -154,7 +160,7 @@ func NewVolumeGroupOptionsFromID( return nil, nil, err } - j, err := VolumeGroupJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr) + j, err := VolumeGroupJournal.Connect(volOptions.Monitors, volOptions.RadosNamespace, cr) if err != nil { return nil, nil, err } @@ -189,8 +195,7 @@ func CheckVolumeGroupSnapExists( volOptions *VolumeGroupOptions, cr *util.Credentials, ) (*VolumeGroupSnapshotIdentifier, error) { - // Connect to cephfs' default radosNamespace (csi) - j, err := VolumeGroupJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr) + j, err := VolumeGroupJournal.Connect(volOptions.Monitors, volOptions.RadosNamespace, cr) if err != nil { return nil, err } @@ -237,8 +242,7 @@ func ReserveVolumeGroup( ) vgsi.RequestName = volOptions.RequestName - // Connect to cephfs' default radosNamespace (csi) - j, err := VolumeGroupJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr) + j, err := VolumeGroupJournal.Connect(volOptions.Monitors, volOptions.RadosNamespace, cr) if err != nil { return nil, err } @@ -271,8 +275,7 @@ func UndoVolumeGroupReservation( vgsi *VolumeGroupSnapshotIdentifier, cr *util.Credentials, ) error { - // Connect to cephfs' default radosNamespace (csi) - j, err := VolumeGroupJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr) + j, err := VolumeGroupJournal.Connect(volOptions.Monitors, volOptions.RadosNamespace, cr) if err != nil { return err } diff --git a/internal/cephfs/store/volumeoptions.go b/internal/cephfs/store/volumeoptions.go index 396de7f5f7e..285b15ffcdb 100644 --- a/internal/cephfs/store/volumeoptions.go +++ b/internal/cephfs/store/volumeoptions.go @@ -188,6 +188,13 @@ func GetClusterInformation(options map[string]string) (*cephcsi.ClusterInfo, err return nil, err } + radosNamespace, err := util.GetCephFSRadosNamespace(util.CsiConfigFile, clusterID) + if err != nil { + err = fmt.Errorf("failed to fetch rados namespace using clusterID (%s): %w", clusterID, err) + + return nil, err + } + subvolumeGroup, err := util.CephFSSubvolumeGroup(util.CsiConfigFile, clusterID) if err != nil { err = fmt.Errorf("failed to fetch subvolumegroup using clusterID (%s): %w", clusterID, err) @@ -199,6 +206,7 @@ func GetClusterInformation(options map[string]string) (*cephcsi.ClusterInfo, err Monitors: strings.Split(monitors, ","), } clusterData.CephFS.SubvolumeGroup = subvolumeGroup + clusterData.CephFS.RadosNamespace = radosNamespace return clusterData, nil } @@ -229,6 +237,7 @@ func getVolumeOptions(vo map[string]string) (*VolumeOptions, error) { opts.ClusterID = clusterData.ClusterID opts.Monitors = strings.Join(clusterData.Monitors, ",") opts.SubvolumeGroup = clusterData.CephFS.SubvolumeGroup + opts.RadosNamespace = clusterData.CephFS.RadosNamespace if err = extractOption(&opts.FsName, "fsName", vo); err != nil { return nil, err @@ -405,6 +414,10 @@ func NewVolumeOptionsFromVolID( return nil, nil, fmt.Errorf("failed to fetch subvolumegroup list using clusterID (%s): %w", vi.ClusterID, err) } + if volOptions.RadosNamespace, err = util.GetCephFSRadosNamespace(util.CsiConfigFile, vi.ClusterID); err != nil { + return nil, nil, fmt.Errorf("failed to fetch rados namespace using clusterID (%s): %w", vi.ClusterID, err) + } + cr, err := util.NewAdminCredentials(secrets) if err != nil { return nil, nil, err @@ -434,8 +447,7 @@ func NewVolumeOptionsFromVolID( return nil, nil, err } - // Connect to cephfs' default radosNamespace (csi) - j, err := VolJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr) + j, err := VolJournal.Connect(volOptions.Monitors, volOptions.RadosNamespace, cr) if err != nil { return nil, nil, err } @@ -788,6 +800,13 @@ func NewSnapshotOptionsFromID( err) } + if volOptions.RadosNamespace, err = util.GetCephFSRadosNamespace(util.CsiConfigFile, vi.ClusterID); err != nil { + return &volOptions, nil, &sid, fmt.Errorf( + "failed to fetch rados namespace using clusterID (%s): %w", + vi.ClusterID, + err) + } + err = volOptions.Connect(cr) if err != nil { return &volOptions, nil, &sid, err @@ -812,8 +831,7 @@ func NewSnapshotOptionsFromID( return &volOptions, nil, &sid, err } - // Connect to cephfs' default radosNamespace (csi) - j, err := SnapJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr) + j, err := SnapJournal.Connect(volOptions.Monitors, volOptions.RadosNamespace, cr) if err != nil { return &volOptions, nil, &sid, err } diff --git a/internal/rbd/group/util.go b/internal/rbd/group/util.go index 0271cfc81c5..f32b24cd8ad 100644 --- a/internal/rbd/group/util.go +++ b/internal/rbd/group/util.go @@ -74,7 +74,7 @@ func (cvg *commonVolumeGroup) initCommonVolumeGroup( return fmt.Errorf("failed to get MONs for cluster id %q: %w", csiID.ClusterID, err) } - namespace, err := util.GetRadosNamespace(util.CsiConfigFile, csiID.ClusterID) + namespace, err := util.GetRBDRadosNamespace(util.CsiConfigFile, csiID.ClusterID) if err != nil { return fmt.Errorf("failed to get RADOS namespace for cluster id %q: %w", csiID.ClusterID, err) } diff --git a/internal/rbd/manager.go b/internal/rbd/manager.go index b8c9c2f7da9..325943e9e0b 100644 --- a/internal/rbd/manager.go +++ b/internal/rbd/manager.go @@ -97,7 +97,7 @@ func (mgr *rbdManager) getVolumeGroupJournal(clusterID string) (journal.VolumeGr return nil, fmt.Errorf("failed to find MONs for cluster %q: %w", clusterID, err) } - ns, err := util.GetRadosNamespace(util.CsiConfigFile, clusterID) + ns, err := util.GetRBDRadosNamespace(util.CsiConfigFile, clusterID) if err != nil { return nil, fmt.Errorf("failed to find the RADOS namespace for cluster %q: %w", clusterID, err) } diff --git a/internal/rbd/rbd_util.go b/internal/rbd/rbd_util.go index d623327a837..b76187aa0f1 100644 --- a/internal/rbd/rbd_util.go +++ b/internal/rbd/rbd_util.go @@ -1023,7 +1023,7 @@ func genSnapFromSnapID( } rbdSnap.JournalPool = rbdSnap.Pool - rbdSnap.RadosNamespace, err = util.GetRadosNamespace(util.CsiConfigFile, rbdSnap.ClusterID) + rbdSnap.RadosNamespace, err = util.GetRBDRadosNamespace(util.CsiConfigFile, rbdSnap.ClusterID) if err != nil { return nil, err } @@ -1134,7 +1134,7 @@ func generateVolumeFromVolumeID( return rbdVol, err } - rbdVol.RadosNamespace, err = util.GetRadosNamespace(util.CsiConfigFile, rbdVol.ClusterID) + rbdVol.RadosNamespace, err = util.GetRBDRadosNamespace(util.CsiConfigFile, rbdVol.ClusterID) if err != nil { return rbdVol, err } @@ -1357,7 +1357,7 @@ func genVolFromVolumeOptions( return nil, err } - rbdVol.RadosNamespace, err = util.GetRadosNamespace(util.CsiConfigFile, rbdVol.ClusterID) + rbdVol.RadosNamespace, err = util.GetRBDRadosNamespace(util.CsiConfigFile, rbdVol.ClusterID) if err != nil { return nil, err } diff --git a/internal/util/csiconfig.go b/internal/util/csiconfig.go index 6d3e7109ae0..766b8145f68 100644 --- a/internal/util/csiconfig.go +++ b/internal/util/csiconfig.go @@ -31,6 +31,10 @@ const ( // This was hardcoded once and defaults to the old value to keep backward compatibility. defaultCsiSubvolumeGroup = "csi" + // defaultCsiCephFSRadosNamespace defines the default RADOS namespace used for storing + // CSI-specific objects and keys for CephFS volumes. + defaultCsiCephFSRadosNamespace = "csi" + // CsiConfigFile is the location of the CSI config file. CsiConfigFile = "/etc/ceph-csi-config/config.json" @@ -96,8 +100,8 @@ func Mons(pathToConfig, clusterID string) (string, error) { return strings.Join(cluster.Monitors, ","), nil } -// GetRadosNamespace returns the namespace for the given clusterID. -func GetRadosNamespace(pathToConfig, clusterID string) (string, error) { +// GetRBDRadosNamespace returns the namespace for the given clusterID. +func GetRBDRadosNamespace(pathToConfig, clusterID string) (string, error) { cluster, err := readClusterInfo(pathToConfig, clusterID) if err != nil { return "", err @@ -106,6 +110,21 @@ func GetRadosNamespace(pathToConfig, clusterID string) (string, error) { return cluster.RBD.RadosNamespace, nil } +// GetCephFSRadosNamespace returns the namespace for the given clusterID. +// If not set, it returns the default value "csi". +func GetCephFSRadosNamespace(pathToConfig, clusterID string) (string, error) { + cluster, err := readClusterInfo(pathToConfig, clusterID) + if err != nil { + return "", err + } + + if cluster.CephFS.RadosNamespace == "" { + return defaultCsiCephFSRadosNamespace, nil + } + + return cluster.CephFS.RadosNamespace, nil +} + // GetRBDMirrorDaemonCount returns the number of mirror daemon count for the // given clusterID. func GetRBDMirrorDaemonCount(pathToConfig, clusterID string) (int, error) { diff --git a/vendor/github.com/ceph/ceph-csi/api/deploy/kubernetes/csi-config-map.go b/vendor/github.com/ceph/ceph-csi/api/deploy/kubernetes/csi-config-map.go index 0c418e23d11..fb88be6b151 100644 --- a/vendor/github.com/ceph/ceph-csi/api/deploy/kubernetes/csi-config-map.go +++ b/vendor/github.com/ceph/ceph-csi/api/deploy/kubernetes/csi-config-map.go @@ -36,6 +36,8 @@ type CephFS struct { NetNamespaceFilePath string `json:"netNamespaceFilePath"` // SubvolumeGroup contains the name of the SubvolumeGroup for CSI volumes SubvolumeGroup string `json:"subvolumeGroup"` + // RadosNamespace is a rados namespace in the filesystem metadata pool + RadosNamespace string `json:"radosNamespace"` // KernelMountOptions contains the kernel mount options for CephFS volumes KernelMountOptions string `json:"kernelMountOptions"` // FuseMountOptions contains the fuse mount options for CephFS volumes