Skip to content

Commit

Permalink
Expose ceph full threshold settings in StorageCluster CR
Browse files Browse the repository at this point in the history
When the clusters reach full, nearfull, or backfill full thresholds ceph
will raise health warnings and stop allowing IO or backfill depending on
the threshold. These settings require special ceph commands instead of
being generic ceph config.
The settings from the StorageCluster CR is passed to the CephCluster CR
in the spec.storage section.

Signed-off-by: Malay Kumar Parida <[email protected]>
  • Loading branch information
malayparida2000 committed Jun 7, 2024
1 parent 2290924 commit e23a2b6
Show file tree
Hide file tree
Showing 2 changed files with 18 additions and 0 deletions.
15 changes: 15 additions & 0 deletions api/v1/storagecluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -198,6 +198,21 @@ type ManageCephCluster struct {
// default DOWN/OUT interval) when it is draining. This is only relevant when `managePodBudgets` is `true` in cephCluster CR.
// The default value is `30` minutes.
OsdMaintenanceTimeout time.Duration `json:"osdMaintenanceTimeout,omitempty"`
// FullRatio is the ratio at which the cluster is considered full and ceph will stop accepting writes. Default is 0.95.
// +kubebuilder:validation:Minimum=0.0
// +kubebuilder:validation:Maximum=1.0
// +nullable
FullRatio *float64 `json:"fullRatio,omitempty"`
// NearFullRatio is the ratio at which the cluster is considered nearly full and will raise a ceph health warning. Default is 0.85.
// +kubebuilder:validation:Minimum=0.0
// +kubebuilder:validation:Maximum=1.0
// +nullable
NearFullRatio *float64 `json:"nearFullRatio,omitempty"`
// BackfillFullRatio is the ratio at which the cluster is too full for backfill. Backfill will be disabled if above this threshold. Default is 0.90.
// +kubebuilder:validation:Minimum=0.0
// +kubebuilder:validation:Maximum=1.0
// +nullable
BackfillFullRatio *float64 `json:"backfillFullRatio,omitempty"`
}

// ManageCephConfig defines how to reconcile the Ceph configuration
Expand Down
3 changes: 3 additions & 0 deletions controllers/storagecluster/cephcluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -459,6 +459,9 @@ func newCephCluster(sc *ocsv1.StorageCluster, cephImage string, kmsConfigMap *co
StorageClassDeviceSets: newStorageClassDeviceSets(sc),
Store: osdStore,
FlappingRestartIntervalHours: 24,
FullRatio: sc.Spec.ManagedResources.CephCluster.FullRatio,
NearFullRatio: sc.Spec.ManagedResources.CephCluster.NearFullRatio,
BackfillFullRatio: sc.Spec.ManagedResources.CephCluster.BackfillFullRatio,
},
Placement: rookCephv1.PlacementSpec{
"all": getPlacement(sc, "all"),
Expand Down

0 comments on commit e23a2b6

Please sign in to comment.