Skip to content

Commit

Permalink
Merge pull request #120 from jp39/hostpath-auto
Browse files Browse the repository at this point in the history
Add "auto" provisioning type.
  • Loading branch information
ccremer authored Aug 19, 2024
2 parents 829d4a5 + a234118 commit 5884d5b
Show file tree
Hide file tree
Showing 7 changed files with 136 additions and 64 deletions.
31 changes: 31 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,16 @@ but the `PersistentVolume` objects will have a [NodeAffinity][node affinity] con

![architecture with Hostpath](architecture.hostpath.drawio.svg "Architecture with Hostpath provisioning")

As a third option, if the ZFS host is part of the cluster, you can let the provisioner choose
whether [NFS][nfs] or [HostPath][hostpath] is used with the `Auto` mode. If the requested
[AccessModes][access modes] in the Persistent Volume Claim contains `ReadWriteOnce` (the volume
can only be accessed by pods running on the same node), or `ReadWriteOncePod` (the volume can only
be accessed by one single Pod at any time), then [HostPath][hostpath] will be used and
the [NodeAffinity][node affinity] will be configured on the `PersistentVolume` objects so the
scheduler will automatically place the corresponding Pods onto the ZFS host. Otherwise
[NFS][nfs] will be used and [NodeAffinity][node affinity] will not be set. If multiple (exclusive)
[AccessModes][access modes] are given, [NFS][nfs] takes precedence.

Currently all ZFS attributes are inherited from the parent dataset.

For more information about external storage in kubernetes, see
Expand Down Expand Up @@ -85,6 +95,26 @@ parameters:
```
For NFS, you can also specify other options, as described in [exports(5)][man exports].
The following example configures a storage class using the `Auto` type. The provisioner
will decide whether [HostPath][hostpath] or [NFS][nfs] will be used based on the
[AccessModess][access modes] requested by the persistent volume claim.

```yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: zfs-nfs
provisioner: pv.kubernetes.io/zfs
reclaimPolicy: Retain
parameters:
parentDataset: tank/kubernetes
hostname: storage-1.domain.tld
type: auto
node: storage-1 # the name of the node where the ZFS datasets are located.
shareProperties: rw,no_root_squash
reserveSpace: true
```

## Notes

### Reclaim policy
Expand Down Expand Up @@ -189,3 +219,4 @@ I (@ccremer) have been allowed to take over maintenance for this repository.
[helm chart]: https://github.com/ccremer/kubernetes-zfs-provisioner/blob/master/charts/kubernetes-zfs-provisioner/README.md
[gentics]: https://www.gentics.com/genticscms/index.en.html
[gentics repo]: https://github.com/gentics/kubernetes-zfs-provisioner
[access modes]: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes
2 changes: 1 addition & 1 deletion charts/kubernetes-zfs-provisioner/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ storageClass:
# policy: "Delete"
# # -- NFS export properties (see `exports(5)`)
# shareProperties: ""
# # -- Provision type, one of [`nfs`, `hostpath`]
# # -- Provision type, one of [`nfs`, `hostpath`, `auto`]
# type: "nfs"
# # -- Override `kubernetes.io/hostname` from `hostName` parameter for
# # `HostPath` node affinity
Expand Down
51 changes: 32 additions & 19 deletions pkg/provisioner/parameters.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,31 +19,34 @@ const (
parameters:
parentDataset: tank/volumes
hostname: my-zfs-host.localdomain
type: nfs|hostpath
type: nfs|hostPath|auto
shareProperties: rw=10.0.0.0/8,no_root_squash
node: my-zfs-host
reserveSpace: true|false
*/

type ProvisioningType string

const (
Nfs ProvisioningType = "nfs"
HostPath ProvisioningType = "hostPath"
Auto ProvisioningType = "auto"
)

type (
// ZFSStorageClassParameters represents the parameters on the `StorageClass`
// object. It is used to ease access and validate those parameters at run time.
ZFSStorageClassParameters struct {
// ParentDataset of the zpool. Needs to be existing on the target ZFS host.
ParentDataset string
// Hostname of the target ZFS host. Will be used to connect over SSH.
Hostname string
NFS *NFSParameters
HostPath *HostPathParameters
ReserveSpace bool
}
NFSParameters struct {
// ShareProperties specifies additional properties to pass to 'zfs create sharenfs=%s'.
ShareProperties string
}
HostPathParameters struct {
// NodeName overrides the hostname if the Kubernetes node name is different than the ZFS target host. Used for Affinity
NodeName string
Hostname string
Type ProvisioningType
// NFSShareProperties specifies additional properties to pass to 'zfs create sharenfs=%s'.
NFSShareProperties string
// HostPathNodeName overrides the hostname if the Kubernetes node name is different than the ZFS target host. Used for Affinity
HostPathNodeName string
ReserveSpace bool
}
)

Expand Down Expand Up @@ -79,16 +82,26 @@ func NewStorageClassParameters(parameters map[string]string) (*ZFSStorageClassPa
typeParam := parameters[TypeParameter]
switch typeParam {
case "hostpath", "hostPath", "HostPath", "Hostpath", "HOSTPATH":
p.HostPath = &HostPathParameters{NodeName: parameters[NodeNameParameter]}
return p, nil
p.Type = HostPath
case "nfs", "Nfs", "NFS":
p.Type = Nfs
case "auto", "Auto", "AUTO":
p.Type = Auto
default:
return nil, fmt.Errorf("invalid '%s' parameter value: %s", TypeParameter, typeParam)
}

if p.Type == HostPath || p.Type == Auto {
p.HostPathNodeName = parameters[NodeNameParameter]
}

if p.Type == Nfs || p.Type == Auto {
shareProps := parameters[SharePropertiesParameter]
if shareProps == "" {
shareProps = "on"
}
p.NFS = &NFSParameters{ShareProperties: shareProps}
return p, nil
default:
return nil, fmt.Errorf("invalid '%s' parameter value: %s", TypeParameter, typeParam)
p.NFSShareProperties = shareProps
}

return p, nil
}
10 changes: 5 additions & 5 deletions pkg/provisioner/parameters_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ func TestNewStorageClassParameters(t *testing.T) {
SharePropertiesParameter: "rw",
},
},
want: &ZFSStorageClassParameters{NFS: &NFSParameters{ShareProperties: "rw"}},
want: &ZFSStorageClassParameters{NFSShareProperties: "rw"},
},
{
name: "GivenCorrectSpec_WhenTypeNfsWithoutProperties_ThenReturnNfsParametersWithDefault",
Expand All @@ -88,7 +88,7 @@ func TestNewStorageClassParameters(t *testing.T) {
TypeParameter: "nfs",
},
},
want: &ZFSStorageClassParameters{NFS: &NFSParameters{ShareProperties: "on"}},
want: &ZFSStorageClassParameters{NFSShareProperties: "on"},
},
{
name: "GivenCorrectSpec_WhenTypeHostPath_ThenReturnHostPathParameters",
Expand All @@ -100,7 +100,7 @@ func TestNewStorageClassParameters(t *testing.T) {
NodeNameParameter: "my-node",
},
},
want: &ZFSStorageClassParameters{HostPath: &HostPathParameters{NodeName: "my-node"}},
want: &ZFSStorageClassParameters{HostPathNodeName: "my-node"},
},
}
for _, tt := range tests {
Expand All @@ -112,8 +112,8 @@ func TestNewStorageClassParameters(t *testing.T) {
return
}
assert.NoError(t, err)
assert.Equal(t, tt.want.NFS, result.NFS)
assert.Equal(t, tt.want.HostPath, result.HostPath)
assert.Equal(t, tt.want.NFSShareProperties, result.NFSShareProperties)
assert.Equal(t, tt.want.HostPathNodeName, result.HostPathNodeName)
})
}
}
93 changes: 58 additions & 35 deletions pkg/provisioner/provision.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,9 @@ package provisioner
import (
"context"
"fmt"
"slices"
"strconv"

"k8s.io/klog/v2"

"github.com/ccremer/kubernetes-zfs-provisioner/pkg/zfs"

v1 "k8s.io/api/core/v1"
Expand All @@ -24,8 +23,9 @@ func (p *ZFSProvisioner) Provision(ctx context.Context, options controller.Provi
datasetPath := fmt.Sprintf("%s/%s", parameters.ParentDataset, options.PVName)
properties := make(map[string]string)

if parameters.NFS != nil {
properties["sharenfs"] = parameters.NFS.ShareProperties
useHostPath := canUseHostPath(parameters, options)
if !useHostPath {
properties[ShareNfsProperty] = parameters.NFSShareProperties
}

var reclaimPolicy v1.PersistentVolumeReclaimPolicy
Expand Down Expand Up @@ -73,28 +73,44 @@ func (p *ZFSProvisioner) Provision(ctx context.Context, options controller.Provi
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: reclaimPolicy,
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteMany, v1.ReadOnlyMany, v1.ReadWriteOnce},
AccessModes: createAccessModes(options, useHostPath),
Capacity: v1.ResourceList{
v1.ResourceStorage: options.PVC.Spec.Resources.Requests[v1.ResourceStorage],
},
PersistentVolumeSource: createVolumeSource(parameters, dataset),
NodeAffinity: createNodeAffinity(parameters),
PersistentVolumeSource: createVolumeSource(parameters, dataset, useHostPath),
NodeAffinity: createNodeAffinity(parameters, useHostPath),
},
}
return pv, controller.ProvisioningFinished, nil
}

func createVolumeSource(parameters *ZFSStorageClassParameters, dataset *zfs.Dataset) v1.PersistentVolumeSource {
if parameters.NFS != nil {
return v1.PersistentVolumeSource{
NFS: &v1.NFSVolumeSource{
Server: parameters.Hostname,
Path: dataset.Mountpoint,
ReadOnly: false,
},
func canUseHostPath(parameters *ZFSStorageClassParameters, options controller.ProvisionOptions) bool {
switch parameters.Type {
case Nfs:
return false
case HostPath:
return true
case Auto:
if !slices.Contains(options.PVC.Spec.AccessModes, v1.ReadOnlyMany) && !slices.Contains(options.PVC.Spec.AccessModes, v1.ReadWriteMany) {
return true
}
}
if parameters.HostPath != nil {
return false
}

func createAccessModes(options controller.ProvisionOptions, useHostPath bool) []v1.PersistentVolumeAccessMode {
if slices.Contains(options.PVC.Spec.AccessModes, v1.ReadWriteOncePod) {
return []v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}
}
accessModes := []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
if !useHostPath {
accessModes = append(accessModes, v1.ReadOnlyMany, v1.ReadWriteMany)
}
return accessModes
}

func createVolumeSource(parameters *ZFSStorageClassParameters, dataset *zfs.Dataset, useHostPath bool) v1.PersistentVolumeSource {
if useHostPath {
hostPathType := v1.HostPathDirectory
return v1.PersistentVolumeSource{
HostPath: &v1.HostPathVolumeSource{
Expand All @@ -103,27 +119,34 @@ func createVolumeSource(parameters *ZFSStorageClassParameters, dataset *zfs.Data
},
}
}
klog.Exitf("Programmer error: Missing implementation for volume source: %v", parameters)
return v1.PersistentVolumeSource{}

return v1.PersistentVolumeSource{
NFS: &v1.NFSVolumeSource{
Server: parameters.Hostname,
Path: dataset.Mountpoint,
ReadOnly: false,
},
}
}

func createNodeAffinity(parameters *ZFSStorageClassParameters) *v1.VolumeNodeAffinity {
if parameters.HostPath != nil {
node := parameters.HostPath.NodeName
if node == "" {
node = parameters.Hostname
}
return &v1.VolumeNodeAffinity{Required: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Values: []string{node},
Operator: v1.NodeSelectorOpIn,
Key: v1.LabelHostname,
},
func createNodeAffinity(parameters *ZFSStorageClassParameters, useHostPath bool) *v1.VolumeNodeAffinity {
if !useHostPath {
return nil
}

node := parameters.HostPathNodeName
if node == "" {
node = parameters.Hostname
}
return &v1.VolumeNodeAffinity{Required: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Values: []string{node},
Operator: v1.NodeSelectorOpIn,
Key: v1.LabelHostname,
},
},
}}}
}
return nil
},
}}}
}
12 changes: 8 additions & 4 deletions pkg/provisioner/provision_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,10 @@ func TestProvisionNfs(t *testing.T) {
pv, _, err := p.Provision(context.Background(), options)
require.NoError(t, err)
assertBasics(t, stub, pv, expectedDatasetName, expectedHost)
assert.Contains(t, pv.Spec.AccessModes, v1.ReadWriteOnce)
// Pods located on other nodes can mount this PV
assert.Contains(t, pv.Spec.AccessModes, v1.ReadOnlyMany)
assert.Contains(t, pv.Spec.AccessModes, v1.ReadWriteMany)

assert.Equal(t, v1.PersistentVolumeReclaimDelete, pv.Spec.PersistentVolumeReclaimPolicy)

Expand All @@ -66,10 +70,6 @@ func TestProvisionNfs(t *testing.T) {
func assertBasics(t *testing.T, stub *zfsStub, pv *v1.PersistentVolume, expectedDataset string, expectedHost string) {
stub.AssertExpectations(t)

assert.Contains(t, pv.Spec.AccessModes, v1.ReadWriteOnce)
assert.Contains(t, pv.Spec.AccessModes, v1.ReadOnlyMany)
assert.Contains(t, pv.Spec.AccessModes, v1.ReadWriteMany)

assert.Contains(t, pv.Annotations, "my/annotation")
assert.Equal(t, expectedDataset, pv.Annotations[DatasetPathAnnotation])
assert.Equal(t, expectedHost, pv.Annotations[ZFSHostAnnotation])
Expand Down Expand Up @@ -111,6 +111,10 @@ func TestProvisionHostPath(t *testing.T) {
pv, _, err := p.Provision(context.Background(), options)
require.NoError(t, err)
assertBasics(t, stub, pv, expectedDatasetName, expectedHost)
assert.Contains(t, pv.Spec.AccessModes, v1.ReadWriteOnce)
// Pods located on other nodes cannot mount this PV
assert.NotContains(t, pv.Spec.AccessModes, v1.ReadOnlyMany)
assert.NotContains(t, pv.Spec.AccessModes, v1.ReadWriteMany)

assert.Equal(t, policy, pv.Spec.PersistentVolumeReclaimPolicy)

Expand Down
1 change: 1 addition & 0 deletions pkg/provisioner/provisioner.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ const (

RefQuotaProperty = "refquota"
RefReservationProperty = "refreservation"
ShareNfsProperty = "sharenfs"
ManagedByProperty = "io.kubernetes.pv.zfs:managed_by"
ReclaimPolicyProperty = "io.kubernetes.pv.zfs:reclaim_policy"
)
Expand Down

0 comments on commit 5884d5b

Please sign in to comment.