From f76284eb492ffad0152cea90b0d1b26e46f9aab4 Mon Sep 17 00:00:00 2001 From: sujeet01 Date: Thu, 17 Aug 2023 12:55:02 +0530 Subject: [PATCH 1/4] Add 'go mod tidy' to 'test' target --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index 79251e5..5394218 100755 --- a/Makefile +++ b/Makefile @@ -136,6 +136,7 @@ vet: ## Run go vet against code. test: generate-mocks fmt vet envtest ## Run tests. KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test ./... -coverprofile cover.out + go mod tidy .PHONY: add-license add-license: addlicense ## Add license headers to all go files. From aa9be38f7218bc1dd0f7aa9b51ec323f5badde96 Mon Sep 17 00:00:00 2001 From: sujeet01 Date: Thu, 17 Aug 2023 13:14:00 +0530 Subject: [PATCH 2/4] Add volume readiness check and corresponding tests for 'CreateVolume' This commit introduces a new function, 'waitForVolumeAvailability', to the 'CreateVolume' function of our CSI driver. This function ensures that a volume reaches the 'Available' state before the 'CreateVolume' request is completed. To validate this functionality, corresponding tests have been added. This enhancement improves reliability and provides clearer error messages for debugging. --- pkg/driver/constants.go | 8 + pkg/driver/controller.go | 35 +++- pkg/driver/controller_test.go | 342 ++++++++++++++++++++++------------ 3 files changed, 261 insertions(+), 124 deletions(-) diff --git a/pkg/driver/constants.go b/pkg/driver/constants.go index ffdb1e5..21dafb3 100644 --- a/pkg/driver/constants.go +++ b/pkg/driver/constants.go @@ -15,6 +15,8 @@ package driver import ( + "time" + "github.com/onmetal/onmetal-csi-driver/pkg/utils" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -46,4 +48,10 @@ const ( CSIDriverName = "csi.onmetal.de" topologyKey = "topology." + CSIDriverName + "/zone" volumeFieldOwner = client.FieldOwner("csi.onmetal.de/volume") + + // Constants for volume polling mechanism + + waitVolumeInitDelay = 1 * time.Second // Initial delay before starting to poll for volume status + waitVolumeFactor = 1.1 // Factor by which the delay increases with each poll attempt + waitVolumeActiveSteps = 5 // Number of consecutive active steps to wait for volume status change ) diff --git a/pkg/driver/controller.go b/pkg/driver/controller.go index da11db5..7a3d343 100755 --- a/pkg/driver/controller.go +++ b/pkg/driver/controller.go @@ -31,6 +31,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog/v2" "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/client" @@ -129,7 +130,12 @@ func (d *driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) if err := d.onmetalClient.Patch(ctx, volume, client.Apply, volumeFieldOwner, client.ForceOwnership); err != nil { return nil, status.Errorf(codes.Internal, "Failed to patch volume %s: %v", client.ObjectKeyFromObject(volume), err) } - klog.InfoS("Applied volume", "Volume", client.ObjectKeyFromObject(volume)) + + if err := waitForVolumeAvailability(ctx, d.onmetalClient, volume); err != nil { + return nil, fmt.Errorf("failed to confirm availability of the volume: %w", err) + } + + klog.InfoS("Volume applied and is now available", "Volume", client.ObjectKeyFromObject(volume)) return &csi.CreateVolumeResponse{ Volume: &csi.Volume{ @@ -148,6 +154,31 @@ func (d *driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) }, nil } +// waitForVolumeAvailability is a helper function that waits for a volume to become available. +// It uses an exponential backoff strategy to periodically check the status of the volume. +// The function returns an error if the volume does not become available within the specified number of attempts. +func waitForVolumeAvailability(ctx context.Context, onmetalClient client.Client, volume *storagev1alpha1.Volume) error { + backoff := wait.Backoff{ + Duration: waitVolumeInitDelay, + Factor: waitVolumeFactor, + Steps: waitVolumeActiveSteps, + } + + err := wait.ExponentialBackoffWithContext(ctx, backoff, func(ctx context.Context) (bool, error) { + err := onmetalClient.Get(ctx, client.ObjectKey{Namespace: volume.Namespace, Name: volume.Name}, volume) + if err == nil && volume.Status.State == storagev1alpha1.VolumeStateAvailable { + return true, nil + } + return false, err + }) + + if wait.Interrupted(err) { + return fmt.Errorf("volume %s did not reach 'Available' state within the defined timeout: %w", client.ObjectKeyFromObject(volume), err) + } + + return err +} + func (d *driver) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) { klog.InfoS("Deleting volume", "Volume", req.GetVolumeId()) if req.GetVolumeId() == "" { @@ -162,7 +193,7 @@ func (d *driver) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) if err := d.onmetalClient.Delete(ctx, vol); err != nil { return nil, status.Errorf(codes.Internal, "Failed to delete volume %s: %v", client.ObjectKeyFromObject(vol), err) } - klog.InfoS("Deleted deleted volume", "Volume", req.GetVolumeId()) + klog.InfoS("Deleted volume", "Volume", req.GetVolumeId()) return &csi.DeleteVolumeResponse{}, nil } diff --git a/pkg/driver/controller_test.go b/pkg/driver/controller_test.go index 632970d..ede9542 100644 --- a/pkg/driver/controller_test.go +++ b/pkg/driver/controller_test.go @@ -75,53 +75,60 @@ var _ = Describe("Controller", func() { By("creating a volume through the csi driver") volSize := int64(5 * 1024 * 1024 * 1024) - res, err := drv.CreateVolume(ctx, &csi.CreateVolumeRequest{ - Name: "volume", - CapacityRange: &csi.CapacityRange{RequiredBytes: volSize}, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - Parameters: map[string]string{ - "type": volumeClassExpandOnly.Name, - "fstype": "ext4", - }, - AccessibilityRequirements: &csi.TopologyRequirement{ - Requisite: []*csi.Topology{ + errCh := make(chan error) + + go func() { + defer GinkgoRecover() + res, err := drv.CreateVolume(ctx, &csi.CreateVolumeRequest{ + Name: "volume", + CapacityRange: &csi.CapacityRange{RequiredBytes: volSize}, + VolumeCapabilities: []*csi.VolumeCapability{ { - Segments: map[string]string{ - topologyKey: "volumepool", + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, }, }, }, - Preferred: []*csi.Topology{ - { - Segments: map[string]string{ - topologyKey: "volumepool", + Parameters: map[string]string{ + "type": volumeClassExpandOnly.Name, + "fstype": "ext4", + }, + AccessibilityRequirements: &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{ + topologyKey: "volumepool", + }, + }, + }, + Preferred: []*csi.Topology{ + { + Segments: map[string]string{ + topologyKey: "volumepool", + }, }, }, }, - }, - }) - Expect(err).NotTo(HaveOccurred()) - Expect(res.Volume).To(SatisfyAll( - HaveField("VolumeId", "volume"), - HaveField("CapacityBytes", volSize), - HaveField("AccessibleTopology", ContainElement( - HaveField("Segments", HaveKeyWithValue("topology.csi.onmetal.de/zone", "volumepool"))), - ), - HaveField("VolumeContext", SatisfyAll( - HaveKeyWithValue("volume_id", "volume"), - HaveKeyWithValue("volume_name", "volume"), - HaveKeyWithValue("volume_pool", "volumepool"), - HaveKeyWithValue("fstype", "ext4"), - HaveKeyWithValue("creation_time", ContainSubstring(strconv.Itoa(time.Now().Year()))))), - )) - - By("patching the volume state to be available") + }) + errCh <- err + + Expect(err).NotTo(HaveOccurred()) + Expect(res.Volume).To(SatisfyAll( + HaveField("VolumeId", "volume"), + HaveField("CapacityBytes", volSize), + HaveField("AccessibleTopology", ContainElement( + HaveField("Segments", HaveKeyWithValue("topology.csi.onmetal.de/zone", "volumepool"))), + ), + HaveField("VolumeContext", SatisfyAll( + HaveKeyWithValue("volume_id", "volume"), + HaveKeyWithValue("volume_name", "volume"), + HaveKeyWithValue("volume_pool", "volumepool"), + HaveKeyWithValue("fstype", "ext4"), + HaveKeyWithValue("creation_time", ContainSubstring(strconv.Itoa(time.Now().Year()))))), + )) + }() + + By("waiting for the volume to be created") volume = &storagev1alpha1.Volume{ ObjectMeta: metav1.ObjectMeta{ Namespace: ns.Name, @@ -131,95 +138,158 @@ var _ = Describe("Controller", func() { Eventually(Object(volume)).Should(SatisfyAll( HaveField("Status.State", storagev1alpha1.VolumeStatePending), )) + + By("patching the volume state to make it available") volumeBase := volume.DeepCopy() volume.Status.State = storagev1alpha1.VolumeStateAvailable Expect(k8sClient.Status().Patch(ctx, volume, client.MergeFrom(volumeBase))).To(Succeed()) + Eventually(Object(volume)).Should(SatisfyAll( + HaveField("Status.State", storagev1alpha1.VolumeStateAvailable), + )) + + err := <-errCh + Expect(err).NotTo(HaveOccurred()) }) It("should not assign the volume to a volume pool if the pool is not available", func(ctx SpecContext) { By("creating a volume through the csi driver") volSize := int64(5 * 1024 * 1024 * 1024) - res, err := drv.CreateVolume(ctx, &csi.CreateVolumeRequest{ - Name: "volume-wrong-pool", - CapacityRange: &csi.CapacityRange{RequiredBytes: volSize}, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - Parameters: map[string]string{ - "type": "slow", - "fstype": "ext4", - }, - AccessibilityRequirements: &csi.TopologyRequirement{ - Requisite: []*csi.Topology{ + errCh := make(chan error) + go func() { + defer GinkgoRecover() + res, err := drv.CreateVolume(ctx, &csi.CreateVolumeRequest{ + Name: "volume-wrong-pool", + CapacityRange: &csi.CapacityRange{RequiredBytes: volSize}, + VolumeCapabilities: []*csi.VolumeCapability{ { - Segments: map[string]string{ - topologyKey: "foo", + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, }, }, }, - Preferred: []*csi.Topology{ - { - Segments: map[string]string{ - topologyKey: "foo", + Parameters: map[string]string{ + "type": "slow", + "fstype": "ext4", + }, + AccessibilityRequirements: &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{ + topologyKey: "foo", + }, + }, + }, + Preferred: []*csi.Topology{ + { + Segments: map[string]string{ + topologyKey: "foo", + }, }, }, }, + }) + errCh <- err + + Expect(err).NotTo(HaveOccurred()) + Expect(res.Volume).To(SatisfyAll( + HaveField("VolumeId", "volume-wrong-pool"), + HaveField("CapacityBytes", volSize), + HaveField("AccessibleTopology", ContainElement( + HaveField("Segments", HaveKeyWithValue("topology.csi.onmetal.de/zone", "foo"))), + ), + HaveField("VolumeContext", SatisfyAll( + HaveKeyWithValue("volume_id", "volume-wrong-pool"), + HaveKeyWithValue("volume_name", "volume-wrong-pool"), + HaveKeyWithValue("volume_pool", ""), + HaveKeyWithValue("fstype", "ext4"), + HaveKeyWithValue("creation_time", ContainSubstring(strconv.Itoa(time.Now().Year()))))), + )) + }() + + By("waiting for the volume to be created") + volume := &storagev1alpha1.Volume{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns.Name, + Name: "volume-wrong-pool", }, - }) - Expect(err).NotTo(HaveOccurred()) - Expect(res.Volume).To(SatisfyAll( - HaveField("VolumeId", "volume-wrong-pool"), - HaveField("CapacityBytes", volSize), - HaveField("AccessibleTopology", ContainElement( - HaveField("Segments", HaveKeyWithValue("topology.csi.onmetal.de/zone", "foo"))), - ), - HaveField("VolumeContext", SatisfyAll( - HaveKeyWithValue("volume_id", "volume-wrong-pool"), - HaveKeyWithValue("volume_name", "volume-wrong-pool"), - HaveKeyWithValue("volume_pool", ""), - HaveKeyWithValue("fstype", "ext4"), - HaveKeyWithValue("creation_time", ContainSubstring(strconv.Itoa(time.Now().Year()))))), + } + Eventually(Object(volume)).Should(SatisfyAll( + HaveField("Status.State", storagev1alpha1.VolumeStatePending), )) + + By("patching the volume state to make it available") + volumeBase := volume.DeepCopy() + volume.Status.State = storagev1alpha1.VolumeStateAvailable + Expect(k8sClient.Status().Patch(ctx, volume, client.MergeFrom(volumeBase))).To(Succeed()) + Eventually(Object(volume)).Should(SatisfyAll( + HaveField("Status.State", storagev1alpha1.VolumeStateAvailable), + )) + err := <-errCh + Expect(err).NotTo(HaveOccurred()) }) It("should delete a volume", func(ctx SpecContext) { By("creating a volume through the csi driver") volSize := int64(5 * 1024 * 1024 * 1024) - _, err := drv.CreateVolume(ctx, &csi.CreateVolumeRequest{ - Name: "volume-to-delete", - CapacityRange: &csi.CapacityRange{RequiredBytes: volSize}, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - Parameters: map[string]string{ - "type": "slow", - "fstype": "ext4", - }, - AccessibilityRequirements: &csi.TopologyRequirement{ - Requisite: []*csi.Topology{ + errCh := make(chan error) + + go func() { + _, err := drv.CreateVolume(ctx, &csi.CreateVolumeRequest{ + Name: "volume-to-delete", + CapacityRange: &csi.CapacityRange{RequiredBytes: volSize}, + VolumeCapabilities: []*csi.VolumeCapability{ { - Segments: map[string]string{ - topologyKey: "volumepool", + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, }, }, }, - Preferred: []*csi.Topology{ - { - Segments: map[string]string{ - topologyKey: "volumepool", + Parameters: map[string]string{ + "type": "slow", + "fstype": "ext4", + }, + AccessibilityRequirements: &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{ + topologyKey: "volumepool", + }, + }, + }, + Preferred: []*csi.Topology{ + { + Segments: map[string]string{ + topologyKey: "volumepool", + }, }, }, }, + }) + errCh <- err + Expect(err).NotTo(HaveOccurred()) + + }() + + By("waiting for the volume to be created") + volume := &storagev1alpha1.Volume{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns.Name, + Name: "volume-to-delete", }, - }) + } + Eventually(Object(volume)).Should(SatisfyAll( + HaveField("Status.State", storagev1alpha1.VolumeStatePending), + )) + + By("patching the volume state to make it available") + volumeBase := volume.DeepCopy() + volume.Status.State = storagev1alpha1.VolumeStateAvailable + Expect(k8sClient.Status().Patch(ctx, volume, client.MergeFrom(volumeBase))).To(Succeed()) + Eventually(Object(volume)).Should(SatisfyAll( + HaveField("Status.State", storagev1alpha1.VolumeStateAvailable), + )) + + err := <-errCh Expect(err).NotTo(HaveOccurred()) By("deleting the volume through the csi driver") @@ -294,37 +364,65 @@ var _ = Describe("Controller", func() { By("creating a volume with volume class other than expand only") volSize := int64(5 * 1024 * 1024 * 1024) - _, err := drv.CreateVolume(ctx, &csi.CreateVolumeRequest{ - Name: "volume-not-expand", - CapacityRange: &csi.CapacityRange{RequiredBytes: volSize}, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, - }, - Parameters: map[string]string{ - "type": volumeClass.Name, - "fstype": "ext4", - }, - AccessibilityRequirements: &csi.TopologyRequirement{ - Requisite: []*csi.Topology{ + errCh := make(chan error) + + go func() { + defer GinkgoRecover() + _, err := drv.CreateVolume(ctx, &csi.CreateVolumeRequest{ + Name: "volume-not-expand", + CapacityRange: &csi.CapacityRange{RequiredBytes: volSize}, + VolumeCapabilities: []*csi.VolumeCapability{ { - Segments: map[string]string{ - topologyKey: "volumepool", + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, }, }, }, - Preferred: []*csi.Topology{ - { - Segments: map[string]string{ - topologyKey: "volumepool", + Parameters: map[string]string{ + "type": volumeClass.Name, + "fstype": "ext4", + }, + AccessibilityRequirements: &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{ + topologyKey: "volumepool", + }, + }, + }, + Preferred: []*csi.Topology{ + { + Segments: map[string]string{ + topologyKey: "volumepool", + }, }, }, }, + }) + errCh <- err + Expect(err).NotTo(HaveOccurred()) + }() + + By("waiting for the volume to be created") + volume := &storagev1alpha1.Volume{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns.Name, + Name: "volume-not-expand", }, - }) + } + Eventually(Object(volume)).Should(SatisfyAll( + HaveField("Status.State", storagev1alpha1.VolumeStatePending), + )) + + By("patching the volume state to make it available") + volumeBase := volume.DeepCopy() + volume.Status.State = storagev1alpha1.VolumeStateAvailable + Expect(k8sClient.Status().Patch(ctx, volume, client.MergeFrom(volumeBase))).To(Succeed()) + Eventually(Object(volume)).Should(SatisfyAll( + HaveField("Status.State", storagev1alpha1.VolumeStateAvailable), + )) + + err := <-errCh Expect(err).NotTo(HaveOccurred()) By("resizing the volume") From d9807ad553ca29ca55de321b730970d076cc9351 Mon Sep 17 00:00:00 2001 From: Andreas Fritzler Date: Tue, 29 Aug 2023 11:31:32 +0200 Subject: [PATCH 3/4] Restructure go routines in CreateVolume call --- pkg/driver/controller_test.go | 427 ++++++++++++++++------------------ 1 file changed, 206 insertions(+), 221 deletions(-) diff --git a/pkg/driver/controller_test.go b/pkg/driver/controller_test.go index ede9542..babe31c 100644 --- a/pkg/driver/controller_test.go +++ b/pkg/driver/controller_test.go @@ -75,221 +75,210 @@ var _ = Describe("Controller", func() { By("creating a volume through the csi driver") volSize := int64(5 * 1024 * 1024 * 1024) - errCh := make(chan error) + // Start a go routine to patch the created Volume to an available state in order to succeed + // the CreateVolume call as it waits for a Volume to reach an available state. go func() { defer GinkgoRecover() - res, err := drv.CreateVolume(ctx, &csi.CreateVolumeRequest{ - Name: "volume", - CapacityRange: &csi.CapacityRange{RequiredBytes: volSize}, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, + By("waiting for the volume to be created") + volume = &storagev1alpha1.Volume{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns.Name, + Name: "volume", }, - Parameters: map[string]string{ - "type": volumeClassExpandOnly.Name, - "fstype": "ext4", + } + Eventually(Object(volume)).Should(SatisfyAll( + HaveField("Status.State", storagev1alpha1.VolumeStatePending), + )) + + By("patching the volume state to make it available") + volumeBase := volume.DeepCopy() + volume.Status.State = storagev1alpha1.VolumeStateAvailable + Expect(k8sClient.Status().Patch(ctx, volume, client.MergeFrom(volumeBase))).To(Succeed()) + Eventually(Object(volume)).Should(SatisfyAll( + HaveField("Status.State", storagev1alpha1.VolumeStateAvailable), + )) + }() + + By("creating a Volume") + res, err := drv.CreateVolume(ctx, &csi.CreateVolumeRequest{ + Name: "volume", + CapacityRange: &csi.CapacityRange{RequiredBytes: volSize}, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, }, - AccessibilityRequirements: &csi.TopologyRequirement{ - Requisite: []*csi.Topology{ - { - Segments: map[string]string{ - topologyKey: "volumepool", - }, + }, + Parameters: map[string]string{ + "type": volumeClassExpandOnly.Name, + "fstype": "ext4", + }, + AccessibilityRequirements: &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{ + topologyKey: "volumepool", }, }, - Preferred: []*csi.Topology{ - { - Segments: map[string]string{ - topologyKey: "volumepool", - }, + }, + Preferred: []*csi.Topology{ + { + Segments: map[string]string{ + topologyKey: "volumepool", }, }, }, - }) - errCh <- err - - Expect(err).NotTo(HaveOccurred()) - Expect(res.Volume).To(SatisfyAll( - HaveField("VolumeId", "volume"), - HaveField("CapacityBytes", volSize), - HaveField("AccessibleTopology", ContainElement( - HaveField("Segments", HaveKeyWithValue("topology.csi.onmetal.de/zone", "volumepool"))), - ), - HaveField("VolumeContext", SatisfyAll( - HaveKeyWithValue("volume_id", "volume"), - HaveKeyWithValue("volume_name", "volume"), - HaveKeyWithValue("volume_pool", "volumepool"), - HaveKeyWithValue("fstype", "ext4"), - HaveKeyWithValue("creation_time", ContainSubstring(strconv.Itoa(time.Now().Year()))))), - )) - }() - - By("waiting for the volume to be created") - volume = &storagev1alpha1.Volume{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: ns.Name, - Name: "volume", }, - } - Eventually(Object(volume)).Should(SatisfyAll( - HaveField("Status.State", storagev1alpha1.VolumeStatePending), - )) - - By("patching the volume state to make it available") - volumeBase := volume.DeepCopy() - volume.Status.State = storagev1alpha1.VolumeStateAvailable - Expect(k8sClient.Status().Patch(ctx, volume, client.MergeFrom(volumeBase))).To(Succeed()) - Eventually(Object(volume)).Should(SatisfyAll( - HaveField("Status.State", storagev1alpha1.VolumeStateAvailable), - )) - - err := <-errCh + }) Expect(err).NotTo(HaveOccurred()) + Expect(res.Volume).To(SatisfyAll( + HaveField("VolumeId", "volume"), + HaveField("CapacityBytes", volSize), + HaveField("AccessibleTopology", ContainElement( + HaveField("Segments", HaveKeyWithValue("topology.csi.onmetal.de/zone", "volumepool"))), + ), + HaveField("VolumeContext", SatisfyAll( + HaveKeyWithValue("volume_id", "volume"), + HaveKeyWithValue("volume_name", "volume"), + HaveKeyWithValue("volume_pool", "volumepool"), + HaveKeyWithValue("fstype", "ext4"), + HaveKeyWithValue("creation_time", ContainSubstring(strconv.Itoa(time.Now().Year()))))), + )) }) It("should not assign the volume to a volume pool if the pool is not available", func(ctx SpecContext) { By("creating a volume through the csi driver") volSize := int64(5 * 1024 * 1024 * 1024) - errCh := make(chan error) + go func() { defer GinkgoRecover() - res, err := drv.CreateVolume(ctx, &csi.CreateVolumeRequest{ - Name: "volume-wrong-pool", - CapacityRange: &csi.CapacityRange{RequiredBytes: volSize}, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, + By("waiting for the volume to be created") + volume := &storagev1alpha1.Volume{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns.Name, + Name: "volume-wrong-pool", }, - Parameters: map[string]string{ - "type": "slow", - "fstype": "ext4", + } + Eventually(Object(volume)).Should(SatisfyAll( + HaveField("Status.State", storagev1alpha1.VolumeStatePending), + )) + + By("patching the volume state to make it available") + volumeBase := volume.DeepCopy() + volume.Status.State = storagev1alpha1.VolumeStateAvailable + Expect(k8sClient.Status().Patch(ctx, volume, client.MergeFrom(volumeBase))).To(Succeed()) + Eventually(Object(volume)).Should(SatisfyAll( + HaveField("Status.State", storagev1alpha1.VolumeStateAvailable), + )) + }() + + By("creating a Volume") + res, err := drv.CreateVolume(ctx, &csi.CreateVolumeRequest{ + Name: "volume-wrong-pool", + CapacityRange: &csi.CapacityRange{RequiredBytes: volSize}, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, }, - AccessibilityRequirements: &csi.TopologyRequirement{ - Requisite: []*csi.Topology{ - { - Segments: map[string]string{ - topologyKey: "foo", - }, + }, + Parameters: map[string]string{ + "type": "slow", + "fstype": "ext4", + }, + AccessibilityRequirements: &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{ + topologyKey: "foo", }, }, - Preferred: []*csi.Topology{ - { - Segments: map[string]string{ - topologyKey: "foo", - }, + }, + Preferred: []*csi.Topology{ + { + Segments: map[string]string{ + topologyKey: "foo", }, }, }, - }) - errCh <- err - - Expect(err).NotTo(HaveOccurred()) - Expect(res.Volume).To(SatisfyAll( - HaveField("VolumeId", "volume-wrong-pool"), - HaveField("CapacityBytes", volSize), - HaveField("AccessibleTopology", ContainElement( - HaveField("Segments", HaveKeyWithValue("topology.csi.onmetal.de/zone", "foo"))), - ), - HaveField("VolumeContext", SatisfyAll( - HaveKeyWithValue("volume_id", "volume-wrong-pool"), - HaveKeyWithValue("volume_name", "volume-wrong-pool"), - HaveKeyWithValue("volume_pool", ""), - HaveKeyWithValue("fstype", "ext4"), - HaveKeyWithValue("creation_time", ContainSubstring(strconv.Itoa(time.Now().Year()))))), - )) - }() - - By("waiting for the volume to be created") - volume := &storagev1alpha1.Volume{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: ns.Name, - Name: "volume-wrong-pool", }, - } - Eventually(Object(volume)).Should(SatisfyAll( - HaveField("Status.State", storagev1alpha1.VolumeStatePending), - )) - - By("patching the volume state to make it available") - volumeBase := volume.DeepCopy() - volume.Status.State = storagev1alpha1.VolumeStateAvailable - Expect(k8sClient.Status().Patch(ctx, volume, client.MergeFrom(volumeBase))).To(Succeed()) - Eventually(Object(volume)).Should(SatisfyAll( - HaveField("Status.State", storagev1alpha1.VolumeStateAvailable), - )) - err := <-errCh + }) Expect(err).NotTo(HaveOccurred()) + Expect(res.Volume).To(SatisfyAll( + HaveField("VolumeId", "volume-wrong-pool"), + HaveField("CapacityBytes", volSize), + HaveField("AccessibleTopology", ContainElement( + HaveField("Segments", HaveKeyWithValue("topology.csi.onmetal.de/zone", "foo"))), + ), + HaveField("VolumeContext", SatisfyAll( + HaveKeyWithValue("volume_id", "volume-wrong-pool"), + HaveKeyWithValue("volume_name", "volume-wrong-pool"), + HaveKeyWithValue("volume_pool", ""), + HaveKeyWithValue("fstype", "ext4"), + HaveKeyWithValue("creation_time", ContainSubstring(strconv.Itoa(time.Now().Year()))))), + )) }) It("should delete a volume", func(ctx SpecContext) { By("creating a volume through the csi driver") volSize := int64(5 * 1024 * 1024 * 1024) - errCh := make(chan error) go func() { - _, err := drv.CreateVolume(ctx, &csi.CreateVolumeRequest{ - Name: "volume-to-delete", - CapacityRange: &csi.CapacityRange{RequiredBytes: volSize}, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, + By("waiting for the volume to be created") + volume := &storagev1alpha1.Volume{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns.Name, + Name: "volume-to-delete", }, - Parameters: map[string]string{ - "type": "slow", - "fstype": "ext4", + } + Eventually(Object(volume)).Should(SatisfyAll( + HaveField("Status.State", storagev1alpha1.VolumeStatePending), + )) + + By("patching the volume state to make it available") + volumeBase := volume.DeepCopy() + volume.Status.State = storagev1alpha1.VolumeStateAvailable + Expect(k8sClient.Status().Patch(ctx, volume, client.MergeFrom(volumeBase))).To(Succeed()) + Eventually(Object(volume)).Should(SatisfyAll( + HaveField("Status.State", storagev1alpha1.VolumeStateAvailable), + )) + }() + + By("creating a Volume") + _, err := drv.CreateVolume(ctx, &csi.CreateVolumeRequest{ + Name: "volume-to-delete", + CapacityRange: &csi.CapacityRange{RequiredBytes: volSize}, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, }, - AccessibilityRequirements: &csi.TopologyRequirement{ - Requisite: []*csi.Topology{ - { - Segments: map[string]string{ - topologyKey: "volumepool", - }, + }, + Parameters: map[string]string{ + "type": "slow", + "fstype": "ext4", + }, + AccessibilityRequirements: &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{ + topologyKey: "volumepool", }, }, - Preferred: []*csi.Topology{ - { - Segments: map[string]string{ - topologyKey: "volumepool", - }, + }, + Preferred: []*csi.Topology{ + { + Segments: map[string]string{ + topologyKey: "volumepool", }, }, }, - }) - errCh <- err - Expect(err).NotTo(HaveOccurred()) - - }() - - By("waiting for the volume to be created") - volume := &storagev1alpha1.Volume{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: ns.Name, - Name: "volume-to-delete", }, - } - Eventually(Object(volume)).Should(SatisfyAll( - HaveField("Status.State", storagev1alpha1.VolumeStatePending), - )) - - By("patching the volume state to make it available") - volumeBase := volume.DeepCopy() - volume.Status.State = storagev1alpha1.VolumeStateAvailable - Expect(k8sClient.Status().Patch(ctx, volume, client.MergeFrom(volumeBase))).To(Succeed()) - Eventually(Object(volume)).Should(SatisfyAll( - HaveField("Status.State", storagev1alpha1.VolumeStateAvailable), - )) - - err := <-errCh + }) Expect(err).NotTo(HaveOccurred()) By("deleting the volume through the csi driver") @@ -364,65 +353,61 @@ var _ = Describe("Controller", func() { By("creating a volume with volume class other than expand only") volSize := int64(5 * 1024 * 1024 * 1024) - errCh := make(chan error) go func() { defer GinkgoRecover() - _, err := drv.CreateVolume(ctx, &csi.CreateVolumeRequest{ - Name: "volume-not-expand", - CapacityRange: &csi.CapacityRange{RequiredBytes: volSize}, - VolumeCapabilities: []*csi.VolumeCapability{ - { - AccessMode: &csi.VolumeCapability_AccessMode{ - Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, - }, - }, + By("waiting for the volume to be created") + volume := &storagev1alpha1.Volume{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns.Name, + Name: "volume-not-expand", }, - Parameters: map[string]string{ - "type": volumeClass.Name, - "fstype": "ext4", + } + Eventually(Object(volume)).Should(SatisfyAll( + HaveField("Status.State", storagev1alpha1.VolumeStatePending), + )) + + By("patching the volume state to make it available") + volumeBase := volume.DeepCopy() + volume.Status.State = storagev1alpha1.VolumeStateAvailable + Expect(k8sClient.Status().Patch(ctx, volume, client.MergeFrom(volumeBase))).To(Succeed()) + Eventually(Object(volume)).Should(SatisfyAll( + HaveField("Status.State", storagev1alpha1.VolumeStateAvailable), + )) + }() + + By("creating a Volume") + _, err := drv.CreateVolume(ctx, &csi.CreateVolumeRequest{ + Name: "volume-not-expand", + CapacityRange: &csi.CapacityRange{RequiredBytes: volSize}, + VolumeCapabilities: []*csi.VolumeCapability{ + { + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, }, - AccessibilityRequirements: &csi.TopologyRequirement{ - Requisite: []*csi.Topology{ - { - Segments: map[string]string{ - topologyKey: "volumepool", - }, + }, + Parameters: map[string]string{ + "type": volumeClass.Name, + "fstype": "ext4", + }, + AccessibilityRequirements: &csi.TopologyRequirement{ + Requisite: []*csi.Topology{ + { + Segments: map[string]string{ + topologyKey: "volumepool", }, }, - Preferred: []*csi.Topology{ - { - Segments: map[string]string{ - topologyKey: "volumepool", - }, + }, + Preferred: []*csi.Topology{ + { + Segments: map[string]string{ + topologyKey: "volumepool", }, }, }, - }) - errCh <- err - Expect(err).NotTo(HaveOccurred()) - }() - - By("waiting for the volume to be created") - volume := &storagev1alpha1.Volume{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: ns.Name, - Name: "volume-not-expand", }, - } - Eventually(Object(volume)).Should(SatisfyAll( - HaveField("Status.State", storagev1alpha1.VolumeStatePending), - )) - - By("patching the volume state to make it available") - volumeBase := volume.DeepCopy() - volume.Status.State = storagev1alpha1.VolumeStateAvailable - Expect(k8sClient.Status().Patch(ctx, volume, client.MergeFrom(volumeBase))).To(Succeed()) - Eventually(Object(volume)).Should(SatisfyAll( - HaveField("Status.State", storagev1alpha1.VolumeStateAvailable), - )) - - err := <-errCh + }) Expect(err).NotTo(HaveOccurred()) By("resizing the volume") @@ -433,7 +418,7 @@ var _ = Describe("Controller", func() { RequiredBytes: newVolumeSize, }, }) - Expect((err)).Should(MatchError("volume class resize policy does not allow resizing")) + Expect(err).Should(MatchError("volume class resize policy does not allow resizing")) }) It("should publish/unpublish a volume on a node", func(ctx SpecContext) { From f84c009d179ff647cd9fce3855c60756e3980082 Mon Sep 17 00:00:00 2001 From: Andreas Fritzler Date: Tue, 29 Aug 2023 13:40:52 +0200 Subject: [PATCH 4/4] refactor: refactor log message for applied volume state - Change the log message from "Volume applied and is now available" to "Applied volume" with additional information about the volume state. Signed-off-by: Andreas Fritzler --- pkg/driver/controller.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/driver/controller.go b/pkg/driver/controller.go index 7a3d343..ba25bee 100755 --- a/pkg/driver/controller.go +++ b/pkg/driver/controller.go @@ -135,7 +135,7 @@ func (d *driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) return nil, fmt.Errorf("failed to confirm availability of the volume: %w", err) } - klog.InfoS("Volume applied and is now available", "Volume", client.ObjectKeyFromObject(volume)) + klog.InfoS("Applied volume", "Volume", client.ObjectKeyFromObject(volume), "State", storagev1alpha1.VolumeStateAvailable) return &csi.CreateVolumeResponse{ Volume: &csi.Volume{ @@ -173,7 +173,7 @@ func waitForVolumeAvailability(ctx context.Context, onmetalClient client.Client, }) if wait.Interrupted(err) { - return fmt.Errorf("volume %s did not reach 'Available' state within the defined timeout: %w", client.ObjectKeyFromObject(volume), err) + return fmt.Errorf("volume %s did not reach '%s' state within the defined timeout: %w", client.ObjectKeyFromObject(volume), storagev1alpha1.VolumeStateAvailable, err) } return err