From dc64df06d39d63d1aa77b26fb7ae67835958c409 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Moritz=20Wanzenb=C3=B6ck?= Date: Thu, 9 Nov 2023 12:34:37 +0100 Subject: [PATCH 1/2] add overprovision parameter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds a new overprovision parameter that calculates the capacity of a given storage pool based on the existing reserved space. While we would like to use LINSTOR directly for that, it does not seem to be implemented: the "query-size-info" call seems to return inconsistent results. The advantage of the new parameter is that people can opt-in to this new calculation by setting the parameter. Signed-off-by: Moritz Wanzenböck --- CHANGELOG.md | 2 + pkg/client/linstor.go | 31 ++++++++++++++-- pkg/client/linstor_test.go | 2 +- pkg/client/mock.go | 2 +- pkg/driver/driver.go | 2 +- .../highlevelclient/high_level_client.go | 37 +++++++++++++++++++ pkg/volume/parameter.go | 13 +++++++ pkg/volume/paramkey_enumer.go | 7 ++-- pkg/volume/volume.go | 2 +- 9 files changed, 88 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6420ec5..86a9765 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added - Support to delete downloaded backups after restore operation (k8s only). +- New parameter `overProvision`: when set available capacity on a node is calculated by taking into account + the reserved capacity in the pool based on existing volumes. ## [1.2.3] - 2023-08-31 diff --git a/pkg/client/linstor.go b/pkg/client/linstor.go index 198071b..288135f 100644 --- a/pkg/client/linstor.go +++ b/pkg/client/linstor.go @@ -635,7 +635,7 @@ func (s *Linstor) Detach(ctx context.Context, volId, node string) error { } // CapacityBytes returns the amount of free space in the storage pool specified by the params and topology. -func (s *Linstor) CapacityBytes(ctx context.Context, storagePool string, segments map[string]string) (int64, error) { +func (s *Linstor) CapacityBytes(ctx context.Context, storagePool string, overProvision *float64, segments map[string]string) (int64, error) { log := s.log.WithField("storage-pool", storagePool).WithField("segments", segments) var requestedStoragePools []string @@ -703,8 +703,33 @@ func (s *Linstor) CapacityBytes(ctx context.Context, storagePool string, segment continue } - if storagePool == "" || storagePool == sp.StoragePoolName { - log.Trace("adding storage pool capacity") + if sp.ProviderKind == lapi.DISKLESS { + log.Trace("not adding diskless pool") + continue + } + + if storagePool != "" && storagePool != sp.StoragePoolName { + log.Trace("not a requested storage pool") + continue + } + + if overProvision != nil { + virtualCapacity := float64(sp.TotalCapacity) * *overProvision + + reservedCapacity, err := s.client.ReservedCapacity(ctx, sp.NodeName, sp.StoragePoolName) + if err != nil { + return 0, fmt.Errorf("failed to fetch reserved capacity: %w", err) + } + + if reservedCapacity > int64(virtualCapacity) { + log.Trace("ignoring pool with exhausted capacity") + continue + } + + log.WithField("add-capacity", int64(virtualCapacity)-reservedCapacity).Trace("adding storage pool capacity") + total += int64(virtualCapacity) - reservedCapacity + } else { + log.WithField("add-capacity", sp.FreeCapacity).Trace("adding storage pool capacity") total += sp.FreeCapacity } } diff --git a/pkg/client/linstor_test.go b/pkg/client/linstor_test.go index da813be..d739ee3 100644 --- a/pkg/client/linstor_test.go +++ b/pkg/client/linstor_test.go @@ -342,7 +342,7 @@ func TestLinstor_CapacityBytes(t *testing.T) { testcase := &testcases[i] t.Run(testcase.name, func(t *testing.T) { - cap, err := cl.CapacityBytes(context.Background(), testcase.storagePool, testcase.topology) + cap, err := cl.CapacityBytes(context.Background(), testcase.storagePool, nil, testcase.topology) assert.NoError(t, err) assert.Equal(t, testcase.expectedCapacity, cap) }) diff --git a/pkg/client/mock.go b/pkg/client/mock.go index 89f1926..e3aafa8 100644 --- a/pkg/client/mock.go +++ b/pkg/client/mock.go @@ -242,7 +242,7 @@ func (s *MockStorage) Status(ctx context.Context, volId string) ([]string, *csi. return nodes, &csi.VolumeCondition{Abnormal: false, Message: "All replicas normal"}, nil } -func (s *MockStorage) CapacityBytes(ctx context.Context, sp string, segments map[string]string) (int64, error) { +func (s *MockStorage) CapacityBytes(ctx context.Context, pool string, overProvision *float64, segments map[string]string) (int64, error) { return 50000000, nil } diff --git a/pkg/driver/driver.go b/pkg/driver/driver.go index 2691739..63a2e5d 100644 --- a/pkg/driver/driver.go +++ b/pkg/driver/driver.go @@ -829,7 +829,7 @@ func (d Driver) GetCapacity(ctx context.Context, req *csi.GetCapacityRequest) (* for _, segment := range accessibleSegments { d.log.WithField("segment", segment).Debug("Checking capacity of segment") - bytes, err := d.Storage.CapacityBytes(ctx, params.StoragePool, segment) + bytes, err := d.Storage.CapacityBytes(ctx, params.StoragePool, params.OverProvision, segment) if err != nil { return nil, status.Errorf(codes.Internal, "%v", err) } diff --git a/pkg/linstor/highlevelclient/high_level_client.go b/pkg/linstor/highlevelclient/high_level_client.go index 2d15f05..ce9aae7 100644 --- a/pkg/linstor/highlevelclient/high_level_client.go +++ b/pkg/linstor/highlevelclient/high_level_client.go @@ -158,6 +158,43 @@ func (c *HighLevelClient) NodesForTopology(ctx context.Context, segments map[str return result, nil } +func (c *HighLevelClient) ReservedCapacity(ctx context.Context, node, pool string) (int64, error) { + ress, err := c.Resources.GetResourceView(ctx, &lapi.ListOpts{ + Node: []string{node}, + StoragePool: []string{pool}, + }) + if err != nil { + return 0, err + } + + var reserved int64 + for i := range ress { + res := &ress[i] + + // can never be too careful with LINSTOR filtering + if res.NodeName != node { + continue + } + + for j := range res.Volumes { + vol := &res.Volumes[j] + if vol.StoragePoolName != pool { + continue + } + + // Last layer is the storage layer + if len(vol.LayerDataList) > 0 { + storageVol, ok := vol.LayerDataList[len(vol.LayerDataList)-1].Data.(*lapi.StorageVolume) + if ok { + reserved += storageVol.UsableSizeKib + } + } + } + } + + return reserved, nil +} + type NodeCacheProvider struct { lapi.NodeProvider timeout time.Duration diff --git a/pkg/volume/parameter.go b/pkg/volume/parameter.go index 4b76075..a7933a5 100644 --- a/pkg/volume/parameter.go +++ b/pkg/volume/parameter.go @@ -40,6 +40,7 @@ const ( postmountxfsopts resourcegroup usepvcname + overprovision ) // Parameters configuration for linstor volumes. @@ -91,6 +92,10 @@ type Parameters struct { Properties map[string]string // UsePvcName derives the volume name from the PVC name+namespace, if that information is available. UsePvcName bool + // OverProvision determines how much free capacity is reported. + // If set, free capacity is calculated by (TotalCapacity * OverProvision) - ReservedCapacity. + // If not set, the free capacity is taken directly from LINSTOR. + OverProvision *float64 } const DefaultDisklessStoragePoolName = "DfltDisklessStorPool" @@ -229,6 +234,14 @@ func NewParameters(params map[string]string, topologyPrefix string) (Parameters, // This parameter was unused. It is just parsed to not break any old storage classes that might be using // it. Storage sizes are handled via CSI requests directly. log.Warnf("using useless parameter '%s'", rawkey) + + case overprovision: + f, err := strconv.ParseFloat(v, 64) + if err != nil { + return p, err + } + + p.OverProvision = &f } } diff --git a/pkg/volume/paramkey_enumer.go b/pkg/volume/paramkey_enumer.go index 6e14da3..856eae7 100644 --- a/pkg/volume/paramkey_enumer.go +++ b/pkg/volume/paramkey_enumer.go @@ -6,9 +6,9 @@ import ( "fmt" ) -const _paramKeyName = "allowremotevolumeaccessautoplaceclientlistdisklessonremainingdisklessstoragepooldonotplacewithregexencryptionfsoptslayerlistmountoptsnodelistplacementcountplacementpolicyreplicasondifferentreplicasonsamesizekibstoragepoolpostmountxfsoptsresourcegroupusepvcname" +const _paramKeyName = "allowremotevolumeaccessautoplaceclientlistdisklessonremainingdisklessstoragepooldonotplacewithregexencryptionfsoptslayerlistmountoptsnodelistplacementcountplacementpolicyreplicasondifferentreplicasonsamesizekibstoragepoolpostmountxfsoptsresourcegroupusepvcnameoverprovision" -var _paramKeyIndex = [...]uint16{0, 23, 32, 42, 61, 80, 99, 109, 115, 124, 133, 141, 155, 170, 189, 203, 210, 221, 237, 250, 260} +var _paramKeyIndex = [...]uint16{0, 23, 32, 42, 61, 80, 99, 109, 115, 124, 133, 141, 155, 170, 189, 203, 210, 221, 237, 250, 260, 273} func (i paramKey) String() string { if i < 0 || i >= paramKey(len(_paramKeyIndex)-1) { @@ -17,7 +17,7 @@ func (i paramKey) String() string { return _paramKeyName[_paramKeyIndex[i]:_paramKeyIndex[i+1]] } -var _paramKeyValues = []paramKey{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19} +var _paramKeyValues = []paramKey{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20} var _paramKeyNameToValueMap = map[string]paramKey{ _paramKeyName[0:23]: 0, @@ -40,6 +40,7 @@ var _paramKeyNameToValueMap = map[string]paramKey{ _paramKeyName[221:237]: 17, _paramKeyName[237:250]: 18, _paramKeyName[250:260]: 19, + _paramKeyName[260:273]: 20, } // paramKeyString retrieves an enum value from the enum constants string name. diff --git a/pkg/volume/volume.go b/pkg/volume/volume.go index 39eec58..cd91080 100644 --- a/pkg/volume/volume.go +++ b/pkg/volume/volume.go @@ -103,7 +103,7 @@ type Querier interface { // AllocationSizeKiB returns the number of KiB required to provision required bytes. AllocationSizeKiB(requiredBytes, limitBytes int64) (int64, error) // CapacityBytes returns the amount of free space, in bytes, in the storage pool specified by the params and topology. - CapacityBytes(ctx context.Context, pool string, segments map[string]string) (int64, error) + CapacityBytes(ctx context.Context, pool string, overProvision *float64, segments map[string]string) (int64, error) } // Mounter handles the filesystems located on volumes. From add38c48fa79f15ac61ae82b0a8aec9190353cc8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Moritz=20Wanzenb=C3=B6ck?= Date: Thu, 9 Nov 2023 14:20:47 +0100 Subject: [PATCH 2/2] add cache for resource view MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With the addition of the overprovision parameter, we potentially start a lot of requests for the resource view. So we add a cache for this call, in the same way we have a cache for the node state and storage pools. Signed-off-by: Moritz Wanzenböck --- .../highlevelclient/high_level_client.go | 87 ++++++++++++++++--- 1 file changed, 73 insertions(+), 14 deletions(-) diff --git a/pkg/linstor/highlevelclient/high_level_client.go b/pkg/linstor/highlevelclient/high_level_client.go index ce9aae7..1580ed2 100644 --- a/pkg/linstor/highlevelclient/high_level_client.go +++ b/pkg/linstor/highlevelclient/high_level_client.go @@ -25,6 +25,7 @@ import ( "sync" "time" + lc "github.com/LINBIT/golinstor" lapi "github.com/LINBIT/golinstor/client" "github.com/container-storage-interface/spec/lib/go/csi" @@ -52,6 +53,11 @@ func NewHighLevelClient(options ...lapi.Option) (*HighLevelClient, error) { timeout: 1 * time.Minute, } + c.Resources = &ResourceCacheProvider{ + ResourceProvider: c.Resources, + timeout: 1 * time.Minute, + } + return &HighLevelClient{Client: c}, nil } @@ -168,6 +174,7 @@ func (c *HighLevelClient) ReservedCapacity(ctx context.Context, node, pool strin } var reserved int64 + for i := range ress { res := &ress[i] @@ -215,7 +222,7 @@ func (n *NodeCacheProvider) GetAll(ctx context.Context, opts ...*lapi.ListOpts) if n.nodesUpdated.Add(n.timeout).After(now) { return filter(n.nodes, func(node lapi.Node) string { return node.Name - }, opts...), nil + }, nil, opts...), nil } nodes, err := n.NodeProvider.GetAll(ctx) @@ -228,7 +235,7 @@ func (n *NodeCacheProvider) GetAll(ctx context.Context, opts ...*lapi.ListOpts) return filter(n.nodes, func(node lapi.Node) string { return node.Name - }, opts...), nil + }, nil, opts...), nil } func (n *NodeCacheProvider) GetStoragePoolView(ctx context.Context, opts ...*lapi.ListOpts) ([]lapi.StoragePool, error) { @@ -238,9 +245,11 @@ func (n *NodeCacheProvider) GetStoragePoolView(ctx context.Context, opts ...*lap now := time.Now() if n.poolsUpdated.Add(n.timeout).After(now) { - return filter(n.pools, func(pool lapi.StoragePool) string { - return pool.NodeName - }, opts...), nil + return filter(n.pools, + func(pool lapi.StoragePool) string { return pool.NodeName }, + func(pool lapi.StoragePool) string { return pool.StoragePoolName }, + opts..., + ), nil } pools, err := n.NodeProvider.GetStoragePoolView(ctx) @@ -251,30 +260,80 @@ func (n *NodeCacheProvider) GetStoragePoolView(ctx context.Context, opts ...*lap n.pools = pools n.poolsUpdated = now - return filter(n.pools, func(pool lapi.StoragePool) string { - return pool.NodeName - }, opts...), nil + return filter(n.pools, + func(pool lapi.StoragePool) string { return pool.NodeName }, + func(pool lapi.StoragePool) string { return pool.StoragePoolName }, + opts..., + ), nil +} + +type ResourceCacheProvider struct { + lapi.ResourceProvider + timeout time.Duration + resourceViewMu sync.Mutex + resourceViewUpdated time.Time + resourceView []lapi.ResourceWithVolumes +} + +func (r *ResourceCacheProvider) GetResourceView(ctx context.Context, opts ...*lapi.ListOpts) ([]lapi.ResourceWithVolumes, error) { + r.resourceViewMu.Lock() + defer r.resourceViewMu.Unlock() + + now := time.Now() + + if r.resourceViewUpdated.Add(r.timeout).After(now) { + return filter(r.resourceView, + func(res lapi.ResourceWithVolumes) string { return res.NodeName }, + func(res lapi.ResourceWithVolumes) string { return res.Props[lc.KeyStorPoolName] }, + opts..., + ), nil + } + + view, err := r.ResourceProvider.GetResourceView(ctx) + if err != nil { + return nil, err + } + + r.resourceView = view + r.resourceViewUpdated = now + + return filter(r.resourceView, + func(res lapi.ResourceWithVolumes) string { return res.NodeName }, + func(res lapi.ResourceWithVolumes) string { return res.Props[lc.KeyStorPoolName] }, + opts..., + ), nil } -func filter[T any](items []T, getNodeName func(T) string, opts ...*lapi.ListOpts) []T { +func filter[T any](items []T, getNodeName, getPoolName func(T) string, opts ...*lapi.ListOpts) []T { filterNames := make(map[string]struct{}) + filterPools := make(map[string]struct{}) for _, o := range opts { for _, n := range o.Node { filterNames[n] = struct{}{} } - } - if len(filterNames) == 0 { - return items + for _, sp := range o.StoragePool { + filterPools[sp] = struct{}{} + } } var result []T for _, item := range items { - if _, ok := filterNames[getNodeName(item)]; ok { - result = append(result, item) + if len(filterNames) > 0 { + if _, ok := filterNames[getNodeName(item)]; !ok { + continue + } + } + + if len(filterPools) > 0 { + if _, ok := filterPools[getPoolName(item)]; !ok { + continue + } } + + result = append(result, item) } return result