diff --git a/go.mod b/go.mod index 9c7cc5e0..80c03d55 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,9 @@ module github.com/dell/csi-vxflexos/v2 // In order to run unit tests on Windows, you need a stubbed Windows implementation // of the gofsutil package. Use the following replace statements if necessary. -go 1.22 +go 1.22.0 + +toolchain go1.22.5 require ( github.com/akutz/memconn v0.1.0 @@ -16,7 +18,7 @@ require ( github.com/dell/dell-csi-extensions/volumeGroupSnapshot v1.6.0 github.com/dell/gocsi v1.11.0 github.com/dell/gofsutil v1.16.1 - github.com/dell/goscaleio v1.15.0 + github.com/dell/goscaleio v1.16.1-0.20240923113203-f072f0cb0a88 github.com/fsnotify/fsnotify v1.5.1 github.com/google/uuid v1.6.0 github.com/gorilla/mux v1.8.0 @@ -73,7 +75,7 @@ require ( go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.17.0 // indirect golang.org/x/oauth2 v0.20.0 // indirect - golang.org/x/sys v0.21.0 // indirect + golang.org/x/sys v0.25.0 // indirect golang.org/x/term v0.21.0 // indirect golang.org/x/text v0.16.0 // indirect golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect diff --git a/go.sum b/go.sum index acf27b13..cc1b92b2 100644 --- a/go.sum +++ b/go.sum @@ -116,8 +116,10 @@ github.com/dell/gocsi v1.11.0 h1:P84VOPd1V55JQjx4tfd/6QOlVQRQkYUqmGqbzPKeyUQ= github.com/dell/gocsi v1.11.0/go.mod h1:LzGAsEIjBxVXJuabzsG3/MsdCOczxDE1IWOBxzXIUhw= github.com/dell/gofsutil v1.16.1 h1:BzdxMdIDgKzinlYyi5G3pi27Jw0cmtqRHM5UsIkoE+w= github.com/dell/gofsutil v1.16.1/go.mod h1:bZ43qAOqKzGJxCRvkTVD7GCFMNkK37ur84mmMuxQshE= -github.com/dell/goscaleio v1.15.0 h1:DzI1ZlQhdIR+V4AKGOMwz1Viu2bAtj3N6kTyixB0Qg8= -github.com/dell/goscaleio v1.15.0/go.mod h1:h7SCmReARG/szFWBMQGETGkZObknhS45lQipQbtdmJ8= +github.com/dell/goscaleio v1.16.1-0.20240912175217-52d904077dc7 h1:X3CuTHtIPo8+3JZOl9z+mmoym5TTP2mYAIWKtSX/hoc= +github.com/dell/goscaleio v1.16.1-0.20240912175217-52d904077dc7/go.mod h1:h7SCmReARG/szFWBMQGETGkZObknhS45lQipQbtdmJ8= +github.com/dell/goscaleio v1.16.1-0.20240923113203-f072f0cb0a88 h1:+JewlsDL4XH/lupX7r4Z8M3D19MF9K2U8y33SCs3LGY= +github.com/dell/goscaleio v1.16.1-0.20240923113203-f072f0cb0a88/go.mod h1:b2mbdhG3IHp8YfvDrj7jWHdJNkqyrUhWNDEvo4GVEUc= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= @@ -606,6 +608,8 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= diff --git a/service/controller.go b/service/controller.go index 7dcef099..723ec4b2 100644 --- a/service/controller.go +++ b/service/controller.go @@ -23,6 +23,11 @@ import ( "sync" "time" + "github.com/dell/csi-vxflexos/v2/k8sutils" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "sigs.k8s.io/yaml" + "golang.org/x/net/context" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" @@ -129,6 +134,8 @@ const ( sioReplicationGroupExists = "The Replication Consistency Group already exists" sioReplicationPairExists = "A Replication Pair for the specified local volume already exists" + + DriverConfigParamsYaml = "driver-config-params.yaml" ) // Extra metadata field names for propagating to goscaleio and beyond. @@ -322,6 +329,7 @@ func (s *service) CreateVolume( Log.Printf("Size %d is less than 3GB, rounding to 3GB", size/bytesInGiB) size = minNfsSize } + contentSource := req.GetVolumeContentSource() if contentSource != nil { snapshotSource := contentSource.GetSnapshot() @@ -1166,6 +1174,45 @@ func (s *service) DeleteVolume( return &csi.DeleteVolumeResponse{}, nil } +func (s *service) findNetworkInterfaceIPs() ([]string, error) { + if K8sClientset == nil { + err := k8sutils.CreateKubeClientSet() + if err != nil { + Log.Errorf("Failed to create Kubernetes clientset: %v", err) + return []string{}, err + } + K8sClientset = k8sutils.Clientset + } + + // Get the ConfigMap + configMap, err := K8sClientset.CoreV1().ConfigMaps(DriverNamespace).Get(context.TODO(), DriverConfigMap, metav1.GetOptions{}) + if err != nil { + Log.Errorf("Failed to get the ConfigMap: %v", err) + return []string{}, err + } + + var configData map[string]interface{} + var allNetworkInterfaceIPs []string + + if configParamsYaml, ok := configMap.Data[DriverConfigParamsYaml]; ok { + err := yaml.Unmarshal([]byte(configParamsYaml), &configData) + if err != nil { + Log.Errorf("Failed to unmarshal the ConfigMap params: %v", err) + return []string{}, err + } + + if interfaceNames, ok := configData["interfaceNames"].(map[string]interface{}); ok { + for _, ipAddressList := range interfaceNames { + + ipAddresses := strings.Split(ipAddressList.(string), ",") + allNetworkInterfaceIPs = append(allNetworkInterfaceIPs, ipAddresses...) + } + return allNetworkInterfaceIPs, nil + } + } + return []string{}, fmt.Errorf("failed to get the Network Interface IPs") +} + func (s *service) ControllerPublishVolume( ctx context.Context, req *csi.ControllerPublishVolumeRequest) ( @@ -1241,15 +1288,24 @@ func (s *service) ControllerPublishVolume( err.Error()) } - sdcIPs, err := s.getSDCIPs(nodeID, systemID) - if err != nil { - return nil, status.Errorf(codes.NotFound, "%s", err.Error()) - } else if len(sdcIPs) == 0 { - return nil, status.Errorf(codes.NotFound, "%s", "received empty sdcIPs") + var ipAddresses []string + + ipAddresses, err = s.findNetworkInterfaceIPs() + Log.Printf("ControllerPublish - No network interfaces found, trying to get SDC IPs") + if err != nil || len(ipAddresses) == 0 { + + // get SDC IPs if Network Interface IPs not found + ipAddresses, err = s.getSDCIPs(nodeID, systemID) + if err != nil { + return nil, status.Errorf(codes.NotFound, "%s", err.Error()) + } else if len(ipAddresses) == 0 { + return nil, status.Errorf(codes.NotFound, "%s", "received empty sdcIPs") + } } + Log.Printf("ControllerPublish - ipAddresses %v", ipAddresses) externalAccess := s.opts.ExternalAccess - publishContext["host"] = sdcIPs[0] + publishContext["host"] = ipAddresses[0] fsc := req.GetVolumeCapability() if fsc == nil { @@ -1267,7 +1323,7 @@ func (s *service) ControllerPublishVolume( errUnknownAccessMode) } // Export for NFS - resp, err := s.exportFilesystem(ctx, req, adminClient, fs, sdcIPs, externalAccess, nodeID, publishContext, am) + resp, err := s.exportFilesystem(ctx, req, adminClient, fs, ipAddresses, externalAccess, nodeID, publishContext, am) return resp, err } volID := getVolumeIDFromCsiVolumeID(csiVolID) @@ -1564,15 +1620,20 @@ func (s *service) ControllerUnpublishVolume( err.Error()) } - sdcIPs, err := s.getSDCIPs(nodeID, systemID) - if err != nil { - return nil, status.Errorf(codes.NotFound, "%s", err.Error()) - } else if len(sdcIPs) == 0 { - return nil, status.Errorf(codes.NotFound, "%s", "received empty sdcIPs") + var ipAddresses []string + ipAddresses, err = s.findNetworkInterfaceIPs() + if err != nil || len(ipAddresses) == 0 { + + ipAddresses, err = s.getSDCIPs(nodeID, systemID) + if err != nil { + return nil, status.Errorf(codes.NotFound, "%s", err.Error()) + } else if len(ipAddresses) == 0 { + return nil, status.Errorf(codes.NotFound, "%s", "received empty sdcIPs") + } } // unexport for NFS - err = s.unexportFilesystem(ctx, req, adminClient, fs, req.GetVolumeId(), sdcIPs, nodeID) + err = s.unexportFilesystem(ctx, req, adminClient, fs, req.GetVolumeId(), ipAddresses, nodeID) if err != nil { return nil, err } diff --git a/service/features/array-config/config b/service/features/array-config/config index 9da7f6f7..9f23dc0e 100644 --- a/service/features/array-config/config +++ b/service/features/array-config/config @@ -6,7 +6,7 @@ "insecure": true, "isDefault": true, "systemID": "14dbbf5617523654", - "nasName": "dummy-name" + "nasName": "dummy-nas-server" }, { "endpoint": "http://127.0.0.2", @@ -16,6 +16,6 @@ "isDefault": false, "systemID": "15dbbf5617523655", "AllSystemNames" : "15dbbf5617523655-previous-name", - "nasName": "dummy-name" + "nasName": "dummy-nas-server" } ] diff --git a/service/features/array-config/config.2 b/service/features/array-config/config.2 index 2730c6cf..3418f020 100644 --- a/service/features/array-config/config.2 +++ b/service/features/array-config/config.2 @@ -6,7 +6,7 @@ "insecure": true, "isDefault": true, "systemID": "14dbbf5617523654" - "nasName": "dummy-name" + "nasName": "dummy-nas-server" }, { "endpoint": "http://127.0.0.2", @@ -15,7 +15,7 @@ "insecure": true, "isDefault": false, "systemID": "15dbbf5617523655" - "nasName": "dummy-name" + "nasName": "dummy-nas-server" }, { "username": "admin", @@ -24,6 +24,6 @@ "endpoint": "https://1.2.3.4", "insecure": true, "isDefault": false - "nasName": "dummy-name" + "nasName": "dummy-nas-server" } ] diff --git a/service/features/controller_publish_unpublish.feature b/service/features/controller_publish_unpublish.feature index c85515cb..1a8d23df 100644 --- a/service/features/controller_publish_unpublish.feature +++ b/service/features/controller_publish_unpublish.feature @@ -42,6 +42,18 @@ Feature: VxFlex OS CSI interface And I call UnpublishVolume nfs And no error was received Then a valid UnpublishVolumeResponse is returned + + Scenario: a Basic NFS controller Publish and unpublish no error with SDC dependency + Given a VxFlexOS service + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I induce SDC dependency + And I call NFS PublishVolume with "single-writer" + Then a valid PublishVolumeResponse is returned + And I call UnpublishVolume nfs + And no error was received + Then a valid UnpublishVolumeResponse is returned Scenario: a Basic NFS controller Publish and unpublish NFS export not found error Given a VxFlexOS service @@ -117,7 +129,7 @@ Feature: VxFlex OS CSI interface Then a valid PublishVolumeResponse is returned And I call NFS PublishVolume with "single-node-multi-writer" Then a valid PublishVolumeResponse is returned - + Scenario: a Basic NFS controller Publish incompatible access mode error Given a VxFlexOS service When I specify CreateVolumeMountRequest "nfs" @@ -259,7 +271,7 @@ Feature: VxFlex OS CSI interface And a valid volume When I call Probe And I call PublishVolume with - Then the error contains "Expecting this volume id only on default system. Aborting operation" + Then the error contains "expecting this volume id only on default system. aborting operation" Examples: | access | diff --git a/service/features/csi_extension.feature b/service/features/csi_extension.feature index d89e0f63..726eadd1 100644 --- a/service/features/csi_extension.feature +++ b/service/features/csi_extension.feature @@ -111,7 +111,7 @@ Examples: When I call Probe And I induce error "LegacyVolumeConflictError" And I call CreateVolumeSnapshotGroup - Then the error contains "Expecting this volume id only on default system" + Then the error contains "expecting this volume id only on default system" @vg Scenario: Snapshot a block volume consistency group with wrong system diff --git a/service/features/replication.feature b/service/features/replication.feature index 9bc9c510..a5c994f5 100644 --- a/service/features/replication.feature +++ b/service/features/replication.feature @@ -31,7 +31,7 @@ Scenario Outline: Test CreateRemoteVolume | "sourcevol" | "PeerMdmError" | "PeerMdmError" | | "sourcevol" | "CreateVolumeError" | "create volume induced error" | | "sourcevol" | "BadVolIDError" | "failed to provide" | - | "sourcevol" | "BadRemoteSystemIDError" | "System 15dbbf5617523655 not found" | + | "sourcevol" | "BadRemoteSystemIDError" | "system 15dbbf5617523655 not found" | | "sourcevol" | "ProbePrimaryError" | "PodmonControllerProbeError" | | "sourcevol" | "ProbeSecondaryError" | "PodmonControllerProbeError" | diff --git a/service/features/service.feature b/service/features/service.feature index 5545ba5f..ab1797d2 100644 --- a/service/features/service.feature +++ b/service/features/service.feature @@ -23,11 +23,28 @@ Feature: VxFlex OS CSI interface | "14dbbf5617523654" | "none" | | "15dbbf5617523655" | "none" | + Scenario: Identity GetPluginInfo good call Given a VxFlexOS service When I call GetPluginInfo When I call BeforeServe + Then configMap is updated + Then a valid GetPlugInfoResponse is returned + + Scenario Outline: Identity GetPluginInfo bad call + Given a VxFlexOS service + When I call GetPluginInfo + And I induce error + When I call BeforeServe + Then configMap is updated Then a valid GetPlugInfoResponse is returned + Examples: + | error | + | "UpdateConfigMapUnmarshalError" | + | "GetIPAddressByInterfaceError" | + | "UpdateConfigK8sClientError" | + | "UpdateConfigFormatError" | + | "ConfigMapNotFoundError" | Scenario Outline: Dynamic log config change Given a VxFlexOS service @@ -413,11 +430,6 @@ Feature: VxFlex OS CSI interface And I call CreateVolumeSize nfs "volume3" "16" Then the error contains "'Volume name' already exists and size is different" - Scenario: Call NodeGetInfo and validate NodeId - Given a VxFlexOS service - When I call NodeGetInfo - Then a valid NodeGetInfoResponse is returned - Scenario: Call NodeGetInfo with invalid MaxVolumesPerNode Given a VxFlexOS service And an invalid MaxVolumesPerNode @@ -434,6 +446,11 @@ Feature: VxFlex OS CSI interface When I call GetNodeLabels with invalid node Then the error contains "Unable to fetch the node labels" + Scenario: Call GetNodeUID with invalid node + Given a VxFlexOS service + When I call GetNodeUID with invalid node + Then the error contains "Unable to fetch the node details" + Scenario: Call NodeGetInfo with invalid volume limit node labels Given a VxFlexOS service When I call NodeGetInfo with invalid volume limit node labels @@ -444,6 +461,16 @@ Feature: VxFlex OS CSI interface When I call NodeGetInfo with valid volume limit node labels Then the Volume limit is set + Scenario: Call NodeGetInfo and validate Node UID + Given a VxFlexOS service + When I call NodeGetInfo with a valid Node UID + Then a valid NodeGetInfoResponse with node UID is returned + + Scenario: Call GetNodeUID + Given a VxFlexOS service + When I call GetNodeUID + Then a valid node uid is returned + Scenario: Call ParseInt64FromContext to validate EnvMaxVolumesPerNode Given a VxFlexOS service When I set invalid EnvMaxVolumesPerNode @@ -454,6 +481,11 @@ Feature: VxFlex OS CSI interface When I call GetNodeLabels with unset KubernetesClient Then the error contains "init client failed with error" + Scenario: Call GetNodeUID with invalid KubernetesClient + Given a VxFlexOS service + When I call GetNodeUID with unset KubernetesClient + Then the error contains "init client failed with error" + Scenario: Call GetCapacity without specifying Storage Pool Name (this returns overall capacity) Given a VxFlexOS service When I call Probe @@ -1227,7 +1259,7 @@ Feature: VxFlex OS CSI interface Examples: | configPath | errorMsg | | "features/array-config/DO_NOT_EXIST" | "does not exist" | - | "features/array-config/unable_to_parse" | "Unable to parse the credentials" | + | "features/array-config/unable_to_parse" | "unable to parse the credentials" | | "features/array-config/zero_length" | "no arrays are provided in vxflexos-creds secret" | | "features/array-config/duplicate_system_ID" | "duplicate system ID" | | "features/array-config/invalid_system_name" | "invalid value for system name" | @@ -1487,7 +1519,7 @@ Feature: VxFlex OS CSI interface Scenario: Parse IP with no Mask When I call parseMask with ip "192.168.1.34" - Then the error contains "Parse Mask: Error parsing mask" + Then the error contains "parse mask: error parsing mask" Scenario: External Access Already Added Given an NFSExport instance with nfsexporthost @@ -1496,4 +1528,28 @@ Feature: VxFlex OS CSI interface Examples: | nfsexporthost | externalAccess | errorMsg | | "127.0.0.1/255.255.255.255" | "127.0.0.1/255.255.255.255" | "external access exists" | - | "127.1.1.0/255.255.255.255" | "127.0.0.1/255.255.255.255" | "external access does not exist" | \ No newline at end of file + | "127.1.1.0/255.255.255.255" | "127.0.0.1/255.255.255.255" | "external access does not exist" | + + Scenario: Get NAS server id from name + Given a VxFlexOS service + And I call Probe + When I call Get NAS server from name + And I induce error + Then the error contains + Examples: + | systemid | nasservername | error | errorMsg | + | "15dbbf5617523655" | "dummy-nas-server" | "" | "none" | + | "15dbbf5617523655" | "invalid-nas-server-id" | "NasNotFoundError" | "could not find given NAS server by name" | + | "15dbbf5617523655" | "" | "" | "NAS server not provided" | + + Scenario: Ping a NAS server by name + Given a VxFlexOS service + And I call Probe + And I induce error + When I call ping NAS server + Then the error contains + Examples: + | systemid | nasserver | error | errorMsg | + | "15dbbf5617523655" | "63ec8e0d-4551-29a7-e79c-b202f2b914f3" | "" | "none" | + | "15dbbf5617523655" | "invalid-nas-server" | "NasNotFoundError" | "NAS server not found" | + \ No newline at end of file diff --git a/service/node.go b/service/node.go index 197f34c0..3f8b344c 100644 --- a/service/node.go +++ b/service/node.go @@ -1,4 +1,4 @@ -// Copyright © 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved. +// Copyright © 2019-2024 Dell Inc. or its subsidiaries. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -42,6 +42,7 @@ var ( // GetNodeLabels - Get the node labels GetNodeLabels = getNodelabels + GetNodeUID = getNodeUID ) const ( @@ -364,7 +365,6 @@ func (s *service) NodeUnpublishVolume( Log.Errorf("ephemeralNodeUnpublish returned error: %s", err.Error()) return nil, err } - } // Idempotent need to return ok if not published @@ -454,61 +454,59 @@ func (s *service) getSystemName(_ context.Context, systems []string) bool { // It also makes sure private directory(privDir) is created func (s *service) nodeProbe(ctx context.Context) error { // make sure the kernel module is loaded - if !kmodLoaded(s.opts) { - return status.Error(codes.FailedPrecondition, - "scini kernel module not loaded") - } + if kmodLoaded(s.opts) { + // fetch the SDC GUID + if s.opts.SdcGUID == "" { + // try to query the SDC GUID + guid, err := goscaleio.DrvCfgQueryGUID() + if err != nil { + return status.Errorf(codes.FailedPrecondition, + "unable to get SDC GUID via config or automatically, error: %s", err.Error()) + } - // fetch the SDC GUID - if s.opts.SdcGUID == "" { - // try to query the SDC GUID - guid, err := goscaleio.DrvCfgQueryGUID() - if err != nil { - return status.Errorf(codes.FailedPrecondition, - "unable to get SDC GUID via config or automatically, error: %s", err.Error()) + s.opts.SdcGUID = guid + Log.WithField("guid", s.opts.SdcGUID).Info("set SDC GUID") } - s.opts.SdcGUID = guid - Log.WithField("guid", s.opts.SdcGUID).Info("set SDC GUID") - } - - // fetch the systemIDs - var err error - if len(connectedSystemID) == 0 { - connectedSystemID, err = getSystemsKnownToSDC() - if err != nil { - return status.Errorf(codes.FailedPrecondition, "%s", err.Error()) + // fetch the systemIDs + var err error + if len(connectedSystemID) == 0 { + connectedSystemID, err = getSystemsKnownToSDC() + if err != nil { + return status.Errorf(codes.FailedPrecondition, "%s", err.Error()) + } } - } - // rename SDC - /* - case1: if IsSdcRenameEnabled=true and prefix given then set the prefix+worker_node_name for sdc name. - case2: if IsSdcRenameEnabled=true and prefix not given then set worker_node_name for sdc name. - */ - if s.opts.IsSdcRenameEnabled { - err = s.renameSDC(s.opts) - if err != nil { - return err + // rename SDC + // case1: if IsSdcRenameEnabled=true and prefix given then set the prefix+worker_node_name for sdc name. + // case2: if IsSdcRenameEnabled=true and prefix not given then set worker_node_name for sdc name. + // + if s.opts.IsSdcRenameEnabled { + err = s.renameSDC(s.opts) + if err != nil { + return err + } } - } - // support for pre-approved guid - if s.opts.IsApproveSDCEnabled { - Log.Infof("Approve SDC enabled") - if err := s.approveSDC(s.opts); err != nil { - return err + // support for pre-approved guid + if s.opts.IsApproveSDCEnabled { + Log.Infof("Approve SDC enabled") + if err := s.approveSDC(s.opts); err != nil { + return err + } } - } - // get all the system names and IDs. - s.getSystemName(ctx, connectedSystemID) + // get all the system names and IDs. + s.getSystemName(ctx, connectedSystemID) - // make sure privDir is pre-created - if _, err := mkdir(s.privDir); err != nil { - return status.Errorf(codes.Internal, - "plugin private dir: %s creation error: %s", - s.privDir, err.Error()) + // make sure privDir is pre-created + if _, err := mkdir(s.privDir); err != nil { + return status.Errorf(codes.Internal, + "plugin private dir: %s creation error: %s", + s.privDir, err.Error()) + } + } else { + Log.Infof("scini module not loaded, perhaps it was intentional") } return nil @@ -546,7 +544,6 @@ func (s *service) approveSDC(opts Opts) error { } Log.Warnf("Array RestrictedSdcMode is %s, driver only supports GUID RestrictedSdcMode If SDC becomes restricted again, driver will not be able to approve", system.System.RestrictedSdcMode) - } } @@ -709,7 +706,7 @@ func (s *service) NodeGetCapabilities( } // NodeGetInfo returns Node information -// NodeId is the identifier of the node and will match the SDC GUID +// NodeId is the identifier of the node. If SDC is installed, SDC GUID will be appended to NodeId // MaxVolumesPerNode (optional) is left as 0 which means unlimited // AccessibleTopology will be set with the VxFlex OS SystemID func (s *service) NodeGetInfo( @@ -720,31 +717,17 @@ func (s *service) NodeGetInfo( // Fetch SDC GUID if s.opts.SdcGUID == "" { if err := s.nodeProbe(ctx); err != nil { - return nil, err + Log.Infof("failed to probe node: %s", err) } } // Fetch Node ID if len(connectedSystemID) == 0 { if err := s.nodeProbe(ctx); err != nil { - return nil, err + Log.Infof("failed to probe node: %s", err) } } - // Create the topology keys - // csi-vxflexos.dellemc.com/: - topology := map[string]string{} - for _, sysID := range connectedSystemID { - isNFS, err := s.checkNFS(ctx, sysID) - if err != nil { - return nil, err - } - if isNFS { - topology[Name+"/"+sysID+"-nfs"] = "true" - } - topology[Name+"/"+sysID] = SystemTopologySystemValue - } - var maxVxflexosVolumesPerNode int64 if len(connectedSystemID) != 0 { // Check for node label 'max-vxflexos-volumes-per-node'. If present set 'MaxVolumesPerNode' to this value. @@ -772,8 +755,33 @@ func (s *service) NodeGetInfo( Log.Debugf("MaxVolumesPerNode: %v\n", maxVxflexosVolumesPerNode) + // Create the topology keys + // csi-vxflexos.dellemc.com/: + Log.Infof("Arrays: %+v", s.opts.arrays) + topology := map[string]string{} + for _, array := range s.opts.arrays { + isNFS, err := s.checkNFS(ctx, array.SystemID) + if err != nil { + return nil, err + } + if isNFS { + topology[Name+"/"+array.SystemID+"-nfs"] = "true" + } + topology[Name+"/"+array.SystemID] = SystemTopologySystemValue + } + + nodeID, err := GetNodeUID(ctx, s) + if err != nil { + return nil, status.Error(codes.InvalidArgument, GetMessage("Could not fetch node UID")) + } + + if s.opts.SdcGUID != "" { + nodeID = s.opts.SdcGUID + } + + Log.Debugf("NodeId: %v\n", nodeID) return &csi.NodeGetInfoResponse{ - NodeId: s.opts.SdcGUID, + NodeId: nodeID, AccessibleTopology: &csi.Topology{ Segments: topology, }, @@ -856,7 +864,7 @@ func (s *service) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVolume } } } - if len(mounts) == 0 || mounted == false || err != nil { + if len(mounts) == 0 || !mounted || err != nil { healthy = false message = fmt.Sprintf("volPath: %s is not mounted: %v", volPath, err) } @@ -934,7 +942,6 @@ func (s *service) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolum err = s.nodeProbe(ctx) if err != nil { Log.Error("nodeProbe failed with error :" + err.Error()) - return nil, err } volumePath := req.GetVolumePath() @@ -1039,3 +1046,7 @@ func (s *service) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolum func getNodelabels(ctx context.Context, s *service) (map[string]string, error) { return s.GetNodeLabels(ctx) } + +func getNodeUID(ctx context.Context, s *service) (string, error) { + return s.GetNodeUID(ctx) +} diff --git a/service/service.go b/service/service.go index 64fa5536..cd385bb6 100644 --- a/service/service.go +++ b/service/service.go @@ -75,7 +75,10 @@ const ( DefaultLogLevel = logrus.DebugLevel // ParamCSILogLevel csi driver log level - ParamCSILogLevel = "CSI_LOG_LEVEL" + ParamCSILogLevel = "CSI_LOG_LEVEL" + DriverNamespace = "vxflexos" + DriverConfigMap = "vxflexos-config-params" + ConfigMapFilePath = "/vxflexos-config-params/driver-config-params.yaml" ) var ( @@ -133,6 +136,11 @@ type Service interface { ProcessMapSecretChange() error } +type NetworkInterface interface { + InterfaceByName(name string) (*net.Interface, error) + Addrs(interfaceObj *net.Interface) ([]net.Addr, error) +} + // Opts defines service configuration options. type Opts struct { // map from system name to ArrayConnectionData @@ -178,6 +186,20 @@ type service struct { connectedSystemNameToID map[string]string } +type Config struct { + InterfaceNames map[string]string `yaml:"interfaceNames"` +} + +type GetIPAddressByInterfacefunc func(string, NetworkInterface) (string, error) + +func (s *service) InterfaceByName(name string) (*net.Interface, error) { + return net.InterfaceByName(name) +} + +func (s *service) Addrs(interfaceObj *net.Interface) ([]net.Addr, error) { + return interfaceObj.Addrs() +} + // Process dynamic changes to configMap or Secret. func (s *service) ProcessMapSecretChange() error { // Update dynamic config params @@ -242,6 +264,10 @@ func (s *service) logCsiNodeTopologyKeys() error { } csiNodes, err := K8sClientset.StorageV1().CSINodes().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + Log.WithError(err).Error("unable to get node list") + return err + } node, err := s.NodeGetInfo(context.Background(), nil) if node != nil { Log.WithField("node info", node.NodeId).Info("NodeInfo ID") @@ -483,12 +509,132 @@ func (s *service) BeforeServe( s.adminClients = make(map[string]*sio.Client) s.systems = make(map[string]*sio.System) + // Update the ConfigMap with the Interface IPs + s.updateConfigMap(s.getIPAddressByInterface, ConfigMapFilePath) + if _, ok := csictx.LookupEnv(ctx, "X_CSI_VXFLEXOS_NO_PROBE_ON_START"); !ok { return s.doProbe(ctx) } return nil } +func (s *service) updateConfigMap(getIPAddressByInterfacefunc GetIPAddressByInterfacefunc, configFilePath string) { + configFileData, err := os.ReadFile(configFilePath) + if err != nil { + Log.Errorf("Failed to read ConfigMap file: %v", err) + return + } + + var config Config + err = yaml.Unmarshal(configFileData, &config) + if err != nil { + Log.Errorf("Failed to parse configMap data: %v", err) + return + } + + updateInterfaceNamesWithIPs := map[string]string{} + for node, interfaceList := range config.InterfaceNames { + + if !strings.EqualFold(node, s.opts.KubeNodeName) { + continue + } + interfaces := strings.Split(interfaceList, ",") + var ipAddresses []string + + for _, interfaceName := range interfaces { + interfaceName = strings.TrimSpace(interfaceName) + + // Find the IP of the Interfaces + ipAddress, err := getIPAddressByInterfacefunc(interfaceName, &service{}) + if err != nil { + Log.Printf("Error while getting IP address for interface %s: %v\n", interfaceName, err) + continue + } + ipAddresses = append(ipAddresses, ipAddress) + } + if len(ipAddresses) > 0 { + updateInterfaceNamesWithIPs[node] = strings.Join(ipAddresses, ",") + } + } + + // Get the Kubernetes ClientSet + if K8sClientset == nil { + err = k8sutils.CreateKubeClientSet() + if err != nil { + Log.Errorf("Failed to create Kubernetes ClientSet: %v", err) + return + } + K8sClientset = k8sutils.Clientset + } + + // Get the vxflexos-config-params ConfigMap + cm, err := K8sClientset.CoreV1().ConfigMaps(DriverNamespace).Get(context.TODO(), DriverConfigMap, metav1.GetOptions{}) + if err != nil { + Log.Errorf("Failed to get ConfigMap: %v", err) + return + } + + var configData map[string]interface{} + if existingYaml, ok := cm.Data["driver-config-params.yaml"]; ok { + + err := yaml.Unmarshal([]byte(existingYaml), &configData) + if err != nil { + Log.Errorf("Failed to parse ConfigMap data: %v", err) + return + } + + // Check and update Interfaces with the IPs + if interfaceNames, ok := configData["interfaceNames"].(map[string]interface{}); ok { + for node, ipAddressList := range updateInterfaceNamesWithIPs { + interfaceNames[node] = ipAddressList + } + } else { + Log.Errorf("interfaceNames key missing or not in expected format") + return + } + + updatedYaml, err := yaml.Marshal(configData) + if err != nil { + Log.Errorf("Failed to marshal updated data: %v", err) + return + } + cm.Data["driver-config-params.yaml"] = string(updatedYaml) + } + + // Update the vxflexos-config-params ConfigMap + _, err = K8sClientset.CoreV1().ConfigMaps("vxflexos").Update(context.TODO(), cm, metav1.UpdateOptions{}) + if err != nil { + Log.Errorf("Failed to update ConfigMap: %v", err) + return + } + + Log.Infof("ConfigMap updated successfully") +} + +func (s *service) getIPAddressByInterface(interfaceName string, networkInterface NetworkInterface) (string, error) { + interfaceObj, err := networkInterface.InterfaceByName(interfaceName) + if err != nil { + return "", err + } + + addrs, err := networkInterface.Addrs(interfaceObj) + if err != nil { + return "", err + } + + for _, addr := range addrs { + ipNet, ok := addr.(*net.IPNet) + if !ok { + continue + } + if ipNet.IP.To4() != nil { + return ipNet.IP.String(), nil + } + } + + return "", fmt.Errorf("no IPv4 address found for interface %s", interfaceName) +} + func (s *service) checkNFS(ctx context.Context, systemID string) (bool, error) { err := s.systemProbeAll(ctx) if err != nil { @@ -515,6 +661,16 @@ func (s *service) checkNFS(ctx context.Context, systemID string) (bool, error) { array := arrayConData[systemID] if strings.TrimSpace(array.NasName) == "" { Log.Warnf("nasName value not found in secret, it is mandatory parameter for NFS volume operations") + } else { + nasserver, err := s.getNASServerIDFromName(systemID, array.NasName) + if err != nil { + return false, err + } + + err = s.pingNAS(systemID, nasserver) + if err != nil { + return false, err + } } // Even though NasName is not present in secret but PowerFlex version is >=4.0; we support NFS. return true, nil @@ -542,7 +698,7 @@ func (s *service) doProbe(ctx context.Context) error { } if err := s.nodeProbe(ctx); err != nil { - return err + Log.Infof("nodeProbe failed: %s", err.Error()) } } return nil @@ -800,13 +956,13 @@ func getArrayConfig(_ context.Context) (map[string]*ArrayConnectionData, error) if err != nil { Log.Errorf("Found error %v while checking stat of file %s ", err, ArrayConfigFile) if os.IsNotExist(err) { - return nil, fmt.Errorf("File %s does not exist", ArrayConfigFile) + return nil, fmt.Errorf("file %s does not exist", ArrayConfigFile) } } config, err := os.ReadFile(filepath.Clean(ArrayConfigFile)) if err != nil { - return nil, fmt.Errorf("File %s errors: %v", ArrayConfigFile, err) + return nil, fmt.Errorf("file %s errors: %v", ArrayConfigFile, err) } if string(config) != "" { @@ -815,7 +971,7 @@ func getArrayConfig(_ context.Context) (map[string]*ArrayConnectionData, error) config, _ = yaml.JSONToYAML(config) err = yaml.Unmarshal(config, &creds) if err != nil { - return nil, fmt.Errorf("Unable to parse the credentials: %v", err) + return nil, fmt.Errorf("unable to parse the credentials: %v", err) } if len(creds) == 0 { @@ -1010,15 +1166,15 @@ func Contains(slice []string, element string) bool { // parseMask converts the subnet mask from CIDR notation to the dotted-decimal format // An input of x.x.x.x/32 will return 255.255.255.255 func parseMask(ipaddr string) (mask string, err error) { - removeExtra := regexp.MustCompile("^(.*[\\/])") + removeExtra := regexp.MustCompile(`^(.*[\\/])`) asd := ipaddr[len(ipaddr)-3:] findSubnet := removeExtra.ReplaceAll([]byte(asd), []byte("")) subnet, err := strconv.ParseInt(string(findSubnet), 10, 64) if err != nil { - return "", errors.New("Parse Mask: Error parsing mask") + return "", errors.New("parse mask: error parsing mask") } if subnet < 0 || subnet > 32 { - return "", errors.New("Invalid subnet mask") + return "", errors.New("invalid subnet mask") } var buff bytes.Buffer for i := 0; i < int(subnet); i++ { @@ -1399,8 +1555,8 @@ func (s *service) checkVolumesMap(volumeID string) error { for _, vol := range vols { if vol.ID == volumeID { // legacy volume found on non-default system, this is an error - Log.WithError(err).Errorf("Found volume id %s on non-default system %s. Expecting this volume id only on default system. Aborting operation ", volumeID, systemID) - return fmt.Errorf("Found volume id %s on non-default system %s. Expecting this volume id only on default system. Aborting operation ", volumeID, systemID) + Log.WithError(err).Errorf("found volume id %s on non-default system %s. expecting this volume id only on default system. aborting operation ", volumeID, systemID) + return fmt.Errorf("found volume id %s on non-default system %s. expecting this volume id only on default system. aborting operation ", volumeID, systemID) } } } @@ -1455,7 +1611,7 @@ func (s *service) getSystem(systemID string) (*siotypes.System, error) { return system, nil } } - return nil, fmt.Errorf("System %s not found", systemID) + return nil, fmt.Errorf("system %s not found", systemID) } func (s *service) getPeerMdms(systemID string) ([]*siotypes.PeerMDM, error) { @@ -1597,7 +1753,7 @@ func (s *service) expandReplicationPair(ctx context.Context, req *csi.Controller func (s *service) getNASServerIDFromName(systemID, nasName string) (string, error) { if nasName == "" { Log.Printf("NAS server not provided.") - return "", nil + return "", errors.New("NAS server not provided") } system, err := s.adminClients[systemID].FindSystem(systemID, "", "") if err != nil { @@ -1610,6 +1766,30 @@ func (s *service) getNASServerIDFromName(systemID, nasName string) (string, erro return nas.ID, nil } +func (s *service) pingNAS(systemID string, nasID string) error { + system, err := s.adminClients[systemID].FindSystem(systemID, "", "") + if err != nil { + return errors.New("system not found: " + systemID) + } + + nas, err := system.GetNASByIDName(nasID, "") + if err != nil { + return errors.New("NAS server not found: " + nasID) + } + + fileInterface, err := system.GetFileInterface(nas.CurrentPreferredIPv4InterfaceID) + if fileInterface.IPAddress == "" || err != nil { + return errors.New("file interface not found for NAS server " + nasID) + } + + err = system.PingNAS(nas.ID, fileInterface.IPAddress) + if err != nil { + return errors.New("could not ping NAS server " + nas.ID) + } + + return nil +} + func (s *service) GetNfsTopology(systemID string) []*csi.Topology { nfsTopology := new(csi.Topology) nfsTopology.Segments = map[string]string{Name + "/" + systemID + "-nfs": "true"} @@ -1634,6 +1814,23 @@ func (s *service) GetNodeLabels(_ context.Context) (map[string]string, error) { return node.Labels, nil } +func (s *service) GetNodeUID(_ context.Context) (string, error) { + if K8sClientset == nil { + err := k8sutils.CreateKubeClientSet() + if err != nil { + return "", status.Error(codes.Internal, GetMessage("init client failed with error: %v", err)) + } + K8sClientset = k8sutils.Clientset + } + + // access the API to fetch node object + node, err := K8sClientset.CoreV1().Nodes().Get(context.TODO(), s.opts.KubeNodeName, v1.GetOptions{}) + if err != nil { + return "", status.Error(codes.Internal, GetMessage("Unable to fetch the node details. Error: %v", err)) + } + return string(node.UID), nil +} + // GetMessage - Get message func GetMessage(format string, args ...interface{}) string { str := fmt.Sprintf(format, args...) diff --git a/service/service_test.go b/service/service_test.go index a67aeae3..ee3809cb 100644 --- a/service/service_test.go +++ b/service/service_test.go @@ -35,7 +35,7 @@ func TestMain(m *testing.M) { opts := godog.Options{ Format: "pretty", Paths: []string{"features"}, - // Tags: "", + Tags: "", } status := godog.TestSuite{ diff --git a/service/service_unit_test.go b/service/service_unit_test.go index 3ca56230..c6e8b622 100644 --- a/service/service_unit_test.go +++ b/service/service_unit_test.go @@ -14,15 +14,48 @@ package service import ( + "context" "errors" "fmt" + "net" "testing" + v1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/fake" + csi "github.com/container-storage-interface/spec/lib/go/csi" siotypes "github.com/dell/goscaleio/types/v1" "github.com/stretchr/testify/assert" ) +type mockService struct{} + +func (s *mockService) InterfaceByName(interfaceName string) (*net.Interface, error) { + if interfaceName == "" { + return nil, fmt.Errorf("invalid interface name") + } else if interfaceName != "eth0" { + return nil, nil + } + return &net.Interface{ + Name: interfaceName, + }, + nil +} + +func (s *mockService) Addrs(interfaceObj *net.Interface) ([]net.Addr, error) { + if interfaceObj == nil { + return nil, fmt.Errorf("invalid interface object") + } + return []net.Addr{ + &net.IPNet{ + IP: net.IPv4(10, 0, 0, 1), + }, + }, nil +} + func TestGetVolSize(t *testing.T) { tests := []struct { cr *csi.CapacityRange @@ -446,3 +479,155 @@ func TestValidateQoSParameters(t *testing.T) { }) } } + +func TestGetIPAddressByInterface(t *testing.T) { + tests := []struct { + name string + interfaceName string + expectedIP string + expectedError error + }{ + { + name: "Valid Interface Name", + interfaceName: "eth0", + expectedIP: "10.0.0.1", + expectedError: nil, + }, + { + name: "Wrong Interface Name", + interfaceName: "eth1", + expectedIP: "", + expectedError: fmt.Errorf("invalid interface object"), + }, + { + name: "Empty Interface Name", + interfaceName: "", + expectedIP: "", + expectedError: fmt.Errorf("invalid interface name"), + }, + } + + for _, tt := range tests { + s := &service{} + t.Run(tt.name, func(t *testing.T) { + interfaceIP, err := s.getIPAddressByInterface(tt.interfaceName, &mockService{}) + assert.Equal(t, err, tt.expectedError) + assert.Equal(t, interfaceIP, tt.expectedIP) + }) + } +} + +func TestFindNetworkInterfaceIPs(t *testing.T) { + tests := []struct { + name string + expectedError error + client kubernetes.Interface + configMapData map[string]string + createConfigMap func(map[string]string, kubernetes.Interface) + }{ + { + name: "Error getting K8sClient", + expectedError: fmt.Errorf("unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined"), + client: nil, + configMapData: nil, + createConfigMap: func(map[string]string, kubernetes.Interface) { + }, + }, + { + name: "Error getting ConfigMap", + expectedError: &k8serrors.StatusError{ + ErrStatus: metav1.Status{ + Status: metav1.StatusFailure, + Message: "configmaps \"vxflexos-config-params\" not found", + Reason: metav1.StatusReasonNotFound, + Details: &metav1.StatusDetails{ + Name: "vxflexos-config-params", + Kind: "configmaps", + }, + Code: 404, + }, + }, + client: fake.NewSimpleClientset(), + configMapData: nil, + createConfigMap: func(map[string]string, kubernetes.Interface) { + }, + }, + { + name: "No Error", + expectedError: nil, + client: fake.NewSimpleClientset(), + configMapData: map[string]string{ + "driver-config-params.yaml": `interfaceNames: + worker1: 127.1.1.12`, + }, + createConfigMap: func(data map[string]string, clientSet kubernetes.Interface) { + configMap := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: DriverConfigMap, + Namespace: DriverNamespace, + }, + Data: data, + } + // Create a ConfigMap using fake ClientSet + _, err := clientSet.CoreV1().ConfigMaps(DriverNamespace).Create(context.TODO(), configMap, metav1.CreateOptions{}) + if err != nil { + Log.Fatalf("failed to create configMaps: %v", err) + } + }, + }, + { + name: "Error unmarshalling ConfigMap params", + expectedError: errors.New("error converting YAML to JSON: yaml: line 1: did not find expected node content"), + client: fake.NewSimpleClientset(), + configMapData: map[string]string{ + "driver-config-params.yaml": `[interfaces:`, + }, + createConfigMap: func(data map[string]string, clientSet kubernetes.Interface) { + configMap := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: DriverConfigMap, + Namespace: DriverNamespace, + }, + Data: data, + } + // Create a ConfigMap using fake ClientSet + _, err := clientSet.CoreV1().ConfigMaps(DriverNamespace).Create(context.TODO(), configMap, metav1.CreateOptions{}) + if err != nil { + Log.Fatalf("failed to create configMaps: %v", err) + } + }, + }, + { + name: "Error getting the Network Interface IPs", + expectedError: fmt.Errorf("failed to get the Network Interface IPs"), + client: fake.NewSimpleClientset(), + configMapData: map[string]string{ + "params-yaml": ``, + }, + createConfigMap: func(data map[string]string, clientSet kubernetes.Interface) { + configMap := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: DriverConfigMap, + Namespace: DriverNamespace, + }, + Data: data, + } + // Create a ConfigMap using fake ClientSet + _, err := clientSet.CoreV1().ConfigMaps(DriverNamespace).Create(context.TODO(), configMap, metav1.CreateOptions{}) + if err != nil { + Log.Fatalf("failed to create configMaps: %v", err) + } + }, + }, + } + + for _, tt := range tests { + s := &service{} + t.Run(tt.name, func(t *testing.T) { + K8sClientset = tt.client + tt.createConfigMap(tt.configMapData, tt.client) + _, err := s.findNetworkInterfaceIPs() + assert.Equal(t, err, tt.expectedError) + }) + } +} diff --git a/service/step_defs_test.go b/service/step_defs_test.go index d1944ee4..9203e416 100644 --- a/service/step_defs_test.go +++ b/service/step_defs_test.go @@ -147,6 +147,8 @@ type feature struct { nodeLabels map[string]string maxVolSize int64 nfsExport types.NFSExport + nodeUID string + nas types.NAS } func (f *feature) checkGoRoutines(tag string) { @@ -1340,12 +1342,27 @@ func (f *feature) iInduceError(errtype string) error { f.service.adminClients[arrayID2] = nil f.service.systems[arrayID2] = nil stepHandlersErrors.PodmonControllerProbeError = true + case "UpdateConfigMapUnmarshalError": + stepHandlersErrors.UpdateConfigMapUnmarshalError = true + case "GetIPAddressByInterfaceError": + stepHandlersErrors.GetIPAddressByInterfaceError = true + case "UpdateConfigK8sClientError": + stepHandlersErrors.UpdateConfigK8sClientError = true + case "UpdateConfigFormatError": + stepHandlersErrors.UpdateConfigFormatError = true + case "ConfigMapNotFoundError": + stepHandlersErrors.ConfigMapNotFoundError = true default: fmt.Println("Ensure that the error is handled in the handlers section.") } return nil } +func (f *feature) iInduceSDCDependency() error { + sdcDependencyOnNFS = true + return nil +} + func (f *feature) getControllerPublishVolumeRequest(accessType string) *csi.ControllerPublishVolumeRequest { capability := new(csi.VolumeCapability) block := new(csi.VolumeCapability_Block) @@ -1639,6 +1656,32 @@ func (f *feature) iCallPublishVolumeWithNFS(arg1 string) error { req.VolumeCapability.AccessMode = accessMode } + clientSet := fake.NewSimpleClientset() + K8sClientset = clientSet + + configMapData := map[string]string{ + "driver-config-params.yaml": `interfaceNames: + worker1: 127.1.1.11`, + } + + if sdcDependencyOnNFS { + configMapData = map[string]string{} + K8sClientset = nil + } + + configMap := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: DriverConfigMap, + Namespace: DriverNamespace, + }, + Data: configMapData, + } + // Create a ConfigMap using fake ClientSet + _, err := clientSet.CoreV1().ConfigMaps(DriverNamespace).Create(context.TODO(), configMap, metav1.CreateOptions{}) + if err != nil { + log.Fatalf("failed to create configMap: %v", err) + } + log.Printf("Calling controllerPublishVolume") f.publishVolumeResponse, f.err = f.service.ControllerPublishVolume(*ctx, req) if f.err != nil { @@ -1784,6 +1827,33 @@ func (f *feature) iCallUnpublishVolumeNFS() error { req = f.getControllerUnpublishVolumeRequestNFS() f.unpublishVolumeRequest = req } + + clientSet := fake.NewSimpleClientset() + K8sClientset = clientSet + + configMapData := map[string]string{ + "driver-config-params.yaml": `interfaceNames: + worker1: 127.1.1.12`, + } + + if sdcDependencyOnNFS { + configMapData = map[string]string{} + K8sClientset = nil + } + + configMap := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: DriverConfigMap, + Namespace: DriverNamespace, + }, + Data: configMapData, + } + // Create a ConfigMap using fake ClientSet + _, err := clientSet.CoreV1().ConfigMaps(DriverNamespace).Create(context.TODO(), configMap, metav1.CreateOptions{}) + if err != nil { + Log.Fatalf("failed to create configMaps: %v", err) + } + log.Printf("Calling controllerUnpublishVolume: %s", req.VolumeId) f.unpublishVolumeResponse, f.err = f.service.ControllerUnpublishVolume(*ctx, req) if f.err != nil { @@ -1816,6 +1886,7 @@ func (f *feature) iCallNodeGetInfo() error { } func (f *feature) iCallNodeGetInfoWithValidVolumeLimitNodeLabels() error { + f.setFakeNode() ctx := new(context.Context) req := new(csi.NodeGetInfoRequest) f.service.opts.SdcGUID = "9E56672F-2F4B-4A42-BFF4-88B6846FBFDA" @@ -1834,6 +1905,38 @@ func (f *feature) iCallNodeGetInfoWithInvalidVolumeLimitNodeLabels() error { return nil } +func (f *feature) iCallNodeGetInfoWithValidNodeUID() error { + ctx := new(context.Context) + req := new(csi.NodeGetInfoRequest) + GetNodeUID = mockGetNodeUID + f.service.opts.SdcGUID = "" + f.nodeGetInfoResponse, f.err = f.service.NodeGetInfo(*ctx, req) + fmt.Printf("NodeGetInfoResponse: %v", f.nodeGetInfoResponse) + return nil +} + +func (f *feature) iCallGetNodeUID() error { + f.setFakeNode() + ctx := new(context.Context) + nodeUID := "" + nodeUID, err := f.service.GetNodeUID(*ctx) + + fmt.Printf("Node UID: %v", nodeUID) + if err != nil { + return err + } + f.nodeUID = nodeUID + return nil +} + +func (f *feature) aValidNodeUIDIsReturned() error { + if f.nodeUID == "" { + return errors.New("Unable to fetch the node UID") + } + fmt.Printf("Node UID: %v", f.nodeUID) + return nil +} + //nolint:revive func mockGetNodeLabels(ctx context.Context, s *service) (map[string]string, error) { labels := map[string]string{"csi-vxflexos.dellemc.com/05d539c3cdc5280f-nfs": "true", "csi-vxflexos.dellemc.com/0e7a082862fedf0f": "csi-vxflexos.dellemc.com"} @@ -1852,12 +1955,18 @@ func mockGetNodeLabelsWithInvalidVolumeLimits(ctx context.Context, s *service) ( return labels, nil } +//nolint:revive +func mockGetNodeUID(ctx context.Context, s *service) (string, error) { + return "1aa4c285-d41b-4911-bf3e-621253bfbade", nil +} + func (f *feature) setFakeNode() (*v1.Node, error) { f.service.opts.KubeNodeName = "node1" fakeNode := &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "node1", Labels: map[string]string{"label1": "value1", "label2": "value2"}, + UID: "1aa4c285-d41b-4911-bf3e-621253bfbade", }, } return K8sClientset.CoreV1().Nodes().Create(context.TODO(), fakeNode, metav1.CreateOptions{}) @@ -1907,6 +2016,19 @@ func (f *feature) iCallGetNodeLabelsWithUnsetKubernetesClient() error { return nil } +func (f *feature) iCallGetNodeUIDWithInvalidNode() error { + f.service.opts.KubeNodeName = "node2" + _, f.err = f.service.GetNodeUID(f.context) + return nil +} + +func (f *feature) iCallGetNodeUIDWithUnsetKubernetesClient() error { + K8sClientset = nil + ctx := new(context.Context) + f.nodeUID, f.err = f.service.GetNodeUID(*ctx) + return nil +} + func (f *feature) iCallNodeProbe() error { ctx := new(context.Context) req := new(csi.ProbeRequest) @@ -1932,6 +2054,17 @@ func (f *feature) aValidNodeGetInfoResponseIsReturned() error { return nil } +func (f *feature) aValidNodeGetInfoResponseWithNodeUIDIsReturned() error { + if f.err != nil { + return f.err + } + if f.nodeGetInfoResponse.NodeId == "" { + return errors.New("expected NodeGetInfoResponse to contain NodeID but it was null") + } + fmt.Printf("NodeID %s\n", f.nodeGetInfoResponse.NodeId) + return nil +} + func (f *feature) theVolumeLimitIsSet() error { if f.err != nil { return f.err @@ -3038,6 +3171,66 @@ func (f *feature) thereAreNoRemainingMounts() error { return nil } +func (f *feature) theConfigMapIsUpdated() error { + // Initializing a fake Kubernetes ClientSet + clientSet := fake.NewSimpleClientset() + K8sClientset = clientSet + if stepHandlersErrors.UpdateConfigK8sClientError { + K8sClientset = nil + } + + data := `interfaceNames: + worker1: "eth1" + worker2: "eth2"` + if stepHandlersErrors.UpdateConfigMapUnmarshalError { + data = `[interfaces:` + } else if stepHandlersErrors.UpdateConfigFormatError { + data = `interfaceName:` + } + + configMapData := map[string]string{ + "driver-config-params.yaml": data, + } + configMap := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: DriverConfigMap, + Namespace: DriverNamespace, + }, + Data: configMapData, + } + + tmpFile, err := os.Create("driver-config-params.yaml") + if err != nil { + Log.Errorf("Error creating temp file: %v", err) + } + if _, err := tmpFile.Write([]byte(data)); err != nil { + Log.Errorf("Error writing to temp file: %v", err) + } + + if !stepHandlersErrors.ConfigMapNotFoundError { + // Create a ConfigMap using fake ClientSet + _, err = clientSet.CoreV1().ConfigMaps(DriverNamespace).Create(context.TODO(), configMap, metav1.CreateOptions{}) + if err != nil { + Log.Errorf("failed to create configmap: %v", err) + } + } + + // Mocking the GetIPAddressByInterface function + GetIPAddressByInterface := func(string, NetworkInterface) (string, error) { + return "10.0.0.1", nil + } + if stepHandlersErrors.GetIPAddressByInterfaceError { + GetIPAddressByInterface = func(string, NetworkInterface) (string, error) { + return "", fmt.Errorf("error geting the IP address of the interface") + } + } + + s := &service{} + s.opts.KubeNodeName = "worker1" + s.updateConfigMap(GetIPAddressByInterface, "driver-config-params.yaml") + return nil +} + func (f *feature) iCallBeforeServe() error { ctxOSEnviron := interface{}("os.Environ") stringSlice := make([]string, 0) @@ -3068,6 +3261,9 @@ func (f *feature) iCallBeforeServe() error { if perr != nil { f.err = perr } + if stepHandlersErrors.UpdateConfigK8sClientError { + K8sClientset = nil + } f.err = f.service.BeforeServe(ctx, nil, listener) listener.Close() return nil @@ -4466,6 +4662,18 @@ func (f *feature) iSpecifyExternalAccess(externalAccess string) error { return nil } +func (f *feature) iCallGetNASServerIDFromName(systemID string, name string) error { + id := "" + id, f.err = f.service.getNASServerIDFromName(systemID, name) + fmt.Printf("NAS server id for %s is : %s\n", name, id) + return nil +} + +func (f *feature) iCallPingNASServer(systemID string, name string) error { + f.err = f.service.pingNAS(systemID, name) + return nil +} + func FeatureContext(s *godog.ScenarioContext) { f := &feature{} s.Step(`^a VxFlexOS service$`, f.aVxFlexOSService) @@ -4529,9 +4737,11 @@ func FeatureContext(s *godog.ScenarioContext) { s.Step(`^I call NodeGetInfo$`, f.iCallNodeGetInfo) s.Step(`^I call Node Probe$`, f.iCallNodeProbe) s.Step(`^a valid NodeGetInfoResponse is returned$`, f.aValidNodeGetInfoResponseIsReturned) + s.Step(`^a valid NodeGetInfoResponse with node UID is returned$`, f.aValidNodeGetInfoResponseWithNodeUIDIsReturned) s.Step(`^the Volume limit is set$`, f.theVolumeLimitIsSet) s.Step(`^an invalid MaxVolumesPerNode$`, f.anInvalidMaxVolumesPerNode) s.Step(`^I call GetNodeLabels$`, f.iCallGetNodeLabels) + s.Step(`^I call NodeGetInfo with a valid Node UID$`, f.iCallNodeGetInfoWithValidNodeUID) s.Step(`^a valid label is returned$`, f.aValidLabelIsReturned) s.Step(`^I set invalid EnvMaxVolumesPerNode$`, f.iSetInvalidEnvMaxVolumesPerNode) s.Step(`^I call GetNodeLabels with invalid node$`, f.iCallGetNodeLabelsWithInvalidNode) @@ -4579,6 +4789,8 @@ func FeatureContext(s *godog.ScenarioContext) { s.Step(`^I call NodeUnpublishVolume "([^"]*)"$`, f.iCallNodeUnpublishVolume) s.Step(`^there are no remaining mounts$`, f.thereAreNoRemainingMounts) s.Step(`^I call BeforeServe$`, f.iCallBeforeServe) + s.Step(`^configMap is updated$`, f.theConfigMapIsUpdated) + s.Step(`^I induce SDC dependency$`, f.iInduceSDCDependency) s.Step(`^I call NodeStageVolume$`, f.iCallNodeStageVolume) s.Step(`^I call NodeUnstageVolume with "([^"]*)"$`, f.iCallNodeUnstageVolumeWith) s.Step(`^I call NodeGetCapabilities "([^"]*)"$`, f.iCallNodeGetCapabilities) @@ -4679,6 +4891,12 @@ func FeatureContext(s *godog.ScenarioContext) { s.Step(`^I call externalAccessAlreadyAdded with externalAccess "([^"]*)"`, f.iCallexternalAccessAlreadyAdded) s.Step(`^an NFSExport instance with nfsexporthost "([^"]*)"`, f.iCallGivenNFSExport) s.Step(`^I specify External Access "([^"]*)"`, f.iSpecifyExternalAccess) + s.Step(`^I call Get NAS server from name "([^"]*)" "([^"]*)"$`, f.iCallGetNASServerIDFromName) + s.Step(`^I call ping NAS server "([^"]*)" "([^"]*)"$`, f.iCallPingNASServer) + s.Step(`^I call GetNodeUID$`, f.iCallGetNodeUID) + s.Step(`^a valid node uid is returned$`, f.aValidNodeUIDIsReturned) + s.Step(`^I call GetNodeUID with invalid node$`, f.iCallGetNodeUIDWithInvalidNode) + s.Step(`^I call GetNodeUID with unset KubernetesClient$`, f.iCallGetNodeUIDWithUnsetKubernetesClient) s.After(func(ctx context.Context, _ *godog.Scenario, _ error) (context.Context, error) { if f.server != nil { diff --git a/service/step_handlers_test.go b/service/step_handlers_test.go index d34483f5..bc2cd680 100644 --- a/service/step_handlers_test.go +++ b/service/step_handlers_test.go @@ -32,12 +32,13 @@ import ( ) var ( - debug bool - sdcMappings []types.MappedSdcInfo - sdcMappingsID string - setSdcNameSuccess bool - sdcIDToName map[string]string - isQuotaEnabled bool + debug bool + sdcMappings []types.MappedSdcInfo + sdcMappingsID string + setSdcNameSuccess bool + sdcIDToName map[string]string + isQuotaEnabled bool + sdcDependencyOnNFS bool stepHandlersErrors struct { FindVolumeIDError bool @@ -105,6 +106,11 @@ var ( NoVolIDSDCError bool NoVolError bool SetSdcNameError bool + UpdateConfigMapUnmarshalError bool + GetIPAddressByInterfaceError bool + UpdateConfigK8sClientError bool + UpdateConfigFormatError bool + ConfigMapNotFoundError bool } ) @@ -159,6 +165,7 @@ func getHandler() http.Handler { treeQuotaIDToGracePeriod = make(map[string]string) treeQuotaIDToHardLimit = make(map[string]string) debug = false + sdcDependencyOnNFS = false stepHandlersErrors.FindVolumeIDError = false stepHandlersErrors.GetVolByIDError = false stepHandlersErrors.SIOGatewayVolumeNotFoundError = false @@ -224,6 +231,11 @@ func getHandler() http.Handler { stepHandlersErrors.NoVolError = false stepHandlersErrors.SetSdcNameError = false stepHandlersErrors.ApproveSdcError = false + stepHandlersErrors.UpdateConfigMapUnmarshalError = false + stepHandlersErrors.GetIPAddressByInterfaceError = false + stepHandlersErrors.UpdateConfigK8sClientError = false + stepHandlersErrors.UpdateConfigFormatError = false + stepHandlersErrors.ConfigMapNotFoundError = false sdcMappings = sdcMappings[:0] sdcMappingsID = "" return handler @@ -258,6 +270,7 @@ func getRouter() http.Handler { scaleioRouter.HandleFunc("/rest/v1/file-tree-quotas", handleFileTreeQuotas) scaleioRouter.HandleFunc("/rest/v1/file-tree-quotas/{id}", handleGetFileTreeQuotas) scaleioRouter.HandleFunc("/api/instances/System/action/querySystemLimits", handleGetSystemLimits) + scaleioRouter.HandleFunc("/rest/v1/nas-servers/{id}/ping", handleNasServerPing) return scaleioRouter } @@ -392,6 +405,16 @@ func handleNasInstances(w http.ResponseWriter, _ *http.Request) { returnJSONFile("features", "get_nas_servers.json", w, nil) } +// handleSystemInstances implements POST /rest/v1/nas-servers/{id}/ping +func handleNasServerPing(w http.ResponseWriter, r *http.Request) { + handleGetFileInterface(w, r) + + if inducedError.Error() == "NasNotFoundError" { + writeError(w, "nas server not found", http.StatusNotFound, codes.NotFound) + return + } +} + func handleGetNasInstances(w http.ResponseWriter, _ *http.Request) { if stepHandlersErrors.NasServerNotFoundError { writeError(w, "nas server not found", http.StatusNotFound, codes.NotFound)