diff --git a/Dockerfile b/Dockerfile index 8b11d9f..e170b9a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,55 +1,14 @@ -# some arguments that must be supplied -ARG GOPROXY -ARG GOVERSION -ARG BASEIMAGE - -# Stage to build the driver -FROM golang:${GOVERSION} as builder -ARG GOPROXY -RUN mkdir -p /go/src -COPY ./ /go/src/ -WORKDIR /go/src/ -RUN CGO_ENABLED=0 \ - make build - -# Stage to build the driver image -FROM $BASEIMAGE AS driver -# install necessary packages -# alphabetical order for easier maintenance -RUN microdnf install -y \ - e4fsprogs \ - libaio \ - libuuid \ - nfs-utils \ - numactl \ - xfsprogs && \ - microdnf clean all -# copy in the driver -COPY --from=builder /go/src/csi-isilon / -ENTRYPOINT ["/csi-isilon"] - -# Stage to check for critical and high CVE issues via Trivy (https://github.com/aquasecurity/trivy) -# will break image build if CRITICAL issues found -# will print out all HIGH issues found -FROM driver as cvescan -# run trivy and clean up all traces after -RUN microdnf install -y --enablerepo=ubi-8-baseos tar && \ - microdnf clean all && \ - curl https://raw.githubusercontent.com/aquasecurity/trivy/master/contrib/install.sh | sh && \ - trivy fs -s CRITICAL --exit-code 1 / && \ - trivy fs -s HIGH / && \ - trivy image --reset && \ - rm ./bin/trivy - -# final stage -# simple stage to use the driver image as the resultant image -FROM driver as final - -LABEL vendor="Dell Inc." \ - name="csi-isilon" \ - summary="CSI Driver for Dell EMC PowerScale" \ - description="CSI Driver for provisioning persistent storage from Dell EMC PowerScale" \ - version="1.4.0" \ - license="Apache-2.0" - -COPY ./licenses /licenses +ARG GOPROXY + +FROM centos:8 +ARG GOPROXY +RUN yum install -y libaio +RUN yum install -y libuuid +RUN yum install -y numactl +RUN yum install -y xfsprogs +RUN yum install -y e4fsprogs +RUN yum install -y nfs-utils +RUN yum --enablerepo=cr update -y +RUN yum clean all +COPY "csi-isilon" . +ENTRYPOINT ["/csi-isilon"] diff --git a/Dockerfile.podman b/Dockerfile.podman new file mode 100644 index 0000000..1c5a733 --- /dev/null +++ b/Dockerfile.podman @@ -0,0 +1,55 @@ +# some arguments that must be supplied +ARG GOPROXY +ARG GOVERSION +ARG BASEIMAGE + +# Stage to build the driver +FROM golang:${GOVERSION} as builder +ARG GOPROXY +RUN mkdir -p /go/src +COPY ./ /go/src/ +WORKDIR /go/src/ +RUN CGO_ENABLED=0 \ + make build + +# Stage to build the driver image +FROM $BASEIMAGE AS driver +# install necessary packages +# alphabetical order for easier maintenance +RUN microdnf install -y \ + e4fsprogs \ + libaio \ + libuuid \ + nfs-utils \ + numactl \ + xfsprogs && \ + microdnf clean all +# copy in the driver +COPY --from=builder /go/src/csi-isilon / +ENTRYPOINT ["/csi-isilon"] + +# Stage to check for critical and high CVE issues via Trivy (https://github.com/aquasecurity/trivy) +# will break image build if CRITICAL issues found +# will print out all HIGH issues found +FROM driver as cvescan +# run trivy and clean up all traces after +RUN microdnf install -y --enablerepo=ubi-8-baseos tar && \ + microdnf clean all && \ + curl https://raw.githubusercontent.com/aquasecurity/trivy/master/contrib/install.sh | sh && \ + trivy fs -s CRITICAL --exit-code 1 / && \ + trivy fs -s HIGH / && \ + trivy image --reset && \ + rm ./bin/trivy + +# final stage +# simple stage to use the driver image as the resultant image +FROM driver as final + +LABEL vendor="Dell Inc." \ + name="csi-isilon" \ + summary="CSI Driver for Dell EMC PowerScale" \ + description="CSI Driver for provisioning persistent storage from Dell EMC PowerScale" \ + version="1.5.0" \ + license="Apache-2.0" + +COPY ./licenses /licenses diff --git a/Gopkg.toml b/Gopkg.toml index 2d261bf..803960b 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -9,5 +9,5 @@ unused-packages = true [[constraint]] - name = "github.com/rexray/gocsi" - version = "0.4.0" + name = "github.com/dell/gocsi" + version = "1.2.3" diff --git a/Makefile b/Makefile index 2ceaf59..6e1410c 100644 --- a/Makefile +++ b/Makefile @@ -41,12 +41,18 @@ build: dependencies check GOOS=linux CGO_ENABLED=0 go build # Generates the docker container (but does not push) -docker: dependencies - make -f docker.mk docker +podman-build: + make -f docker.mk podman-build +dev-build: build + make -f docker.mk docker-build + # Pushes container to the repository -push: docker - make -f docker.mk push +podman-build-image-push: podman-build + make -f docker.mk podman-build-image-push + +dev-build-image-push: dev-build + make -f docker.mk docker-build-image-push # Windows or Linux; requires no hardware unit-test: diff --git a/README.md b/README.md index df2d5bf..2fb79cb 100644 --- a/README.md +++ b/README.md @@ -1,455 +1,50 @@ -# CSI Driver for PowerScale +# CSI Driver for Dell EMC PowerScale -[![Go Report Card](https://goreportcard.com/badge/github.com/dell/csi-isilon)](https://goreportcard.com/report/github.com/dell/csi-isilon) -[![License](https://img.shields.io/github/license/dell/csi-isilon)](https://github.com/dell/csi-isilon/blob/master/LICENSE) -[![Docker](https://img.shields.io/docker/pulls/dellemc/csi-isilon.svg?logo=docker)](https://hub.docker.com/r/dellemc/csi-isilon) -[![Last Release](https://img.shields.io/github/v/release/dell/csi-isilon?label=latest&style=flat-square)](https://github.com/dell/csi-isilon/releases) +[![Go Report Card](https://goreportcard.com/badge/github.com/dell/csi-isilon?style=flat-square)](https://goreportcard.com/report/github.com/dell/csi-isilon) +[![License](https://img.shields.io/github/license/dell/csi-isilon?style=flat-square&color=blue&label=License)](https://github.com/dell/csi-isilon/blob/master/LICENSE) +[![Docker](https://img.shields.io/docker/pulls/dellemc/csi-isilon.svg?logo=docker&style=flat-square&label=Pulls)](https://hub.docker.com/r/dellemc/csi-isilon) +[![Last Release](https://img.shields.io/github/v/release/dell/csi-isilon?label=Latest&style=flat-square&logo=go)](https://github.com/dell/csi-isilon/releases) + +**Repository for CSI Driver for Dell EMC PowerScale** ## Description -CSI Driver for PowerScale is a Container Storage Interface ([CSI](https://github.com/container-storage-interface/spec)) -driver for Dell EMC PowerScale. It supports CSI specification version 1.1. +CSI Driver for Dell EMC PowerScale is a Container Storage Interface ([CSI](https://github.com/container-storage-interface/spec)) driver that provides support for provisioning persistent storage using Dell EMC PowerScale storage array. + +It supports CSI specification version 1.2. -This project may be compiled as a stand-alone binary using Golang that, when -run, provides a valid CSI endpoint. This project can also be built -as a Golang plug-in in order to extend the functionality of other programs. +This project may be compiled as a stand-alone binary using Golang that, when run, provides a valid CSI endpoint. It also can be used as a precompiled container image. ## Support The CSI Driver for Dell EMC PowerScale image, which is the built driver code, is available on Dockerhub and is officially supported by Dell EMC. -The source code for CSI Driver for Dell EMC PowerScale available on Github is unsupported and provided solely under the terms of the license attached to the -source code. +The source code for CSI Driver for Dell EMC PowerScale available on Github is unsupported and provided solely under the terms of the license attached to the source code. For clarity, Dell EMC does not provide support for any source code modifications. For any CSI driver issues, questions or feedback, join the [Dell EMC Container community]() -## Overview - -PowerScale CSI plugins implement an interface between CSI enabled Container Orchestrator(CO) and PowerScale Storage Array. It allows dynamically provisioning PowerScale volumes -and attaching them to workloads. - -## Introduction -The CSI Driver For Dell EMC PowerScale conforms to CSI spec 1.1 - * Support for Kubernetes 1.17, 1.18 and 1.19 - * Will add support for other orchestrators over time - * The CSI specification is documented here: https://github.com/container-storage-interface/spec. The driver uses CSI v1.1. - -## CSI Driver For Dell EMC PowerScale Capabilities - -|Capability | Supported | Not supported | -|------------|-----------| --------------| -|Provisioning | Static and dynamic provisioning of volumes, Volume expansion, Create volume from snapshots, Volume Cloning, CSI Ephemeral Inline Volumes| Generic Ephemeral Volumes | -|Export, Mount | Mount volume as file system, Topology support for volumes, mount options | Raw volumes| -|Data protection | Creation of snapshots| | -|Installer | Helm3, Dell CSI Operator (for OpenShift platform) | | -|Access mode | SINGLE_NODE_WRITER , MULTI_NODE_READER_ONLY , MULTI_NODE_MULTI_WRITER| -|Kubernetes | v1.17, v1.18, v1.19 | v1.15 or previous versions, v1.20 or higher versions| -|OS | RHEL 7.x, CentOS 7.x, Ubuntu 20.0.4 | other Linux variants| -|PowerScale | OneFS 8.1, 8.2, 9.0 and 9.1 | Previous versions| -|Protocol | NFS | SMB, CIFS| -|OpenShift| 4.5, 4.6 | -|Docker EE| 3.1* | - -Note: CoreOS worker nodes are only supported with RedHat OpenShift platform - -## Installation overview - -Installation in a Kubernetes cluster should be done using the scripts within the `dell-csi-helm-installer` directory. - -For more information, consult the [README.md](dell-csi-helm-installer/README.md) - -The controller section of the Helm chart installs the following components in a Stateful Set: - -* CSI Driver for PowerScale -* Kubernetes Provisioner, which provisions the provisioning volumes -* Kubernetes Attacher, which attaches the volumes to the containers -* Kubernetes Snapshotter, which provides snapshot support -* Kubernetes Resizer, which provides resize support - -The node section of the Helm chart installs the following component in a Daemon Set: - -* CSI Driver for PowerScale -* Kubernetes Registrar, which handles the driver registration - -### Prerequisites - -Before you install CSI Driver for PowerScale, verify the requirements that are mentioned in this topic are installed and configured. - -#### Requirements - -* Install Kubernetes. -* Configure Docker service -* Install Helm v3 -* Install volume snapshot components -* Deploy PowerScale driver using Helm - -**Note:** There is no feature gate that needs to be set explicitly for csi drivers from 1.17 onwards. All the required feature gates are either beta/GA. - -## Configure Docker service - -The mount propagation in Docker must be configured on all Kubernetes nodes before installing CSI Driver for PowerScale. - -### Procedure - -1. Edit the service section of */etc/systemd/system/multi-user.target.wants/docker.service* file as follows: - - ``` - [Service] - ... - MountFlags=shared - ``` - -2. Restart the Docker service with systemctl daemon-reload and - - ``` - systemctl daemon-reload - systemctl restart docker - ``` - -## Install volume snapshot components - -### Install Snapshot Beta CRDs -To install snapshot crds specify `--snapshot-crd` flag to driver installation script `dell-csi-helm-installer/csi-install.sh` during driver installation - -### [Install Common Snapshot Controller](), if not already installed for the cluster - - ``` - kubectl apply -f https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/v3.0.2/deploy/kubernetes/snapshot-controller/rbac-snapshot-controller.yaml - kubectl apply -f https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/v3.0.2/deploy/kubernetes/snapshot-controller/setup-snapshot-controller.yaml - ``` - -## Install CSI Driver for PowerScale - -Install CSI Driver for PowerScale using this procedure. - -*Before you begin* - * You must clone the source [git repository](https://github.com/dell/csi-isilon), ready for below procedure. - * In the `dell-csi-helm-installer` directory, there should be two shell scripts, *csi-install.sh* and *csi-uninstall.sh*. These scripts - handle some of the pre and post operations that cannot be performed in the helm chart. - -Procedure - -1. Collect information from the PowerScale Systems like IP address, username and password. Make a note of the value for these parameters as they must be entered in the secret.yaml and values file. - -2. Copy the helm/csi-isilon/values.yaml into a new location with name say *my-isilon-settings.yaml*, to customize settings for installation. - -3. Edit *my-isilon-settings.yaml* to set the following parameters for your installation: - - The following table lists the primary configurable parameters of the PowerScale driver helm chart and their default values. More detailed information can be - found in the [`values.yaml`](helm/csi-isilon/values.yaml) file in this repository. - - | Parameter | Description | Required | Default | - | --------- | ----------- | -------- |-------- | - | isiIP | "isiIP" defines the HTTPs endpoint of the PowerScale OneFS API server | true | - | - | isiPort | "isiPort" defines the HTTPs port number of the PowerScale OneFS API server | false | 8080 | - | isiInsecure | "isiInsecure" specifies whether the PowerScale OneFS API server's certificate chain and host name should be verified. | false | true | - | isiAccessZone | The name of the access zone a volume can be created in | false | System | - | volumeNamePrefix | "volumeNamePrefix" defines a string prepended to each volume created by the CSI driver. | false | k8s | - | controllerCount | "controllerCount" defines the number of csi-powerscale controller nodes to deploy to the Kubernetes release.| true | 2 | - | enableDebug | Indicates whether debug level logs should be logged | false | true | - | verbose | Indicates what content of the OneFS REST API message should be logged in debug level logs | false | 1 | - | enableQuota | Indicates whether the provisioner should attempt to set (later unset) quota on a newly provisioned volume. This requires SmartQuotas to be enabled.| false | true | - | noProbeOnStart | Indicates whether the controller/node should probe during initialization | false | false | - | isiPath | The default base path for the volumes to be created, this will be used if a storage class does not have the IsiPath parameter specified| false - | /ifs/data/csi | - | autoProbe | Enable auto probe. | false | true | - | nfsV3 | Specify whether to set the version to v3 when mounting an NFS export. If the value is "false", then the default version supported will be used (i.e. the mount command will not explicitly specify "-o vers=3" option). This flag has now been deprecated and will be removed in a future release. Please useStorageClass.mountOptions if you want to specify 'vers=3' as a mount option. | false | false | - | enableCustomTopology | Indicates PowerScale FQDN/IP which will be fetched from node label and the same will be used by controller and node pod to establish connection to Array. This requires enableCustomTopology to be enabled. | false | false | - | ***Storage Class parameters*** | Following parameters are related to Storage Class | - | name | "storageClass.name" defines the name of the storage class to be defined. | false | isilon | - | isDefault | "storageClass.isDefault" defines whether the primary storage class should be the default. | false | true | - | reclaimPolicy | "storageClass.reclaimPolicy" defines what will happen when a volume is removed from the Kubernetes API. Valid values are "Retain" and "Delete".| false | Delete | - | accessZone | The Access Zone where the Volume would be created | false | System | - | AzServiceIP | Access Zone service IP if different from isiIP, specify here and refer in storageClass | false | | - | rootClientEnabled | When a PVC is being created, it takes the storage class' value of "storageclass.rootClientEnabled"| false | false | - | ***Controller parameters*** | Set nodeSelector and tolerations for controller | - | nodeSelector | Define nodeSelector for the controllers, if required | false | | - | tolerations | Define tolerations for the controllers, if required | false | | - - Note: User should provide all boolean values with double quotes. This applicable only for my-isilon-settings.yaml. Ex: "true"/"false" - - Note: controllerCount parameter value should not exceed number of nodes in the kubernetes cluster. Otherwise some of the controller pods will be in "Pending" state till new nodes are available for scheduling. The installer will exit with a WARNING on the same. - -4. Create namespace - - Run `kubectl create namespace isilon` to create the isilon namespace. Specify the same namespace name while installing the driver. - Note: CSI PowerScale also supports installation of driver in custom namespace. - -5. Create a secret file for the OneFS credentials by editing the secret.yaml present under helm directory. Replace the values for the username and password parameters. - - Use the following command to convert username/password to base64 encoded string - ``` - echo -n 'admin' | base64 - echo -n 'password' | base64 - ``` - Run `kubectl create -f secret.yaml` to create the secret. - - Note: The username specified in secret.yaml must be from the authentication providers of PowerScale. The user must have enough privileges to perform the actions. The suggested privileges are as follows: - ``` - ISI_PRIV_LOGIN_PAPI - ISI_PRIV_NFS - ISI_PRIV_QUOTA - ISI_PRIV_SNAPSHOT - ISI_PRIV_IFS_RESTORE - ISI_PRIV_NS_IFS_ACCESS - ISI_PRIV_LOGIN_SSH - ``` - -6. Install OneFS CA certificates by following the instructions from next section, if you want to validate OneFS API server's certificates. If not, create an empty - secret using the following command and empty secret should be created for the successful CSI Driver for DELL EMC Powerscale installation. - ``` - kubectl create -f emptysecret.yaml - ``` - -7. Install CSI driver for PowerScale by following the instructions from [README](dell-csi-helm-installer/README.md) - - -## Certificate validation for OneFS REST API calls - -The CSI driver exposes an install parameter 'isiInsecure' which determines if the driver -performs client-side verification of the OneFS certificates. The 'isiInsecure' parameter is set to true by default and the driver does not verify the OneFS certificates. - -If the isiInsecure is set to false, then the secret isilon-certs must contain the CA certificate for OneFS. -If this secret is an empty secret, then the validation of the certificate fails, and the driver fails to start. - -If the isiInsecure parameter is set to false and a previous installation attempt to create the empty secret, then this secret must be deleted and re-created using the CA certs. If the OneFS certificate is self-signed, then perform the following steps: - -### Procedure - -1. To fetch the certificate, run `openssl s_client -showcerts -connect /dev/null | openssl x509 -outform PEM > ca_cert.pem` - -2. To create the secret, run `kubectl create secret generic isilon-certs --from-file=ca_cert.pem -n isilon` - -## Upgrade CSI Driver for DELL EMC PowerScale from version v1.3.0 to v1.4.0 - -* Verify that all pre-requisites to install CSI Driver for DELL EMC PowerScale v1.4.0 are fulfilled. - -* Clone the repository https://github.com/dell/csi-powerscale , Copy the helm/csi-isilon/values.yaml into a new location with name say my-isilon-settings.yaml, - to customize settings for installation. Edit my-isilon-settings.yaml as per the requirements. - -* Change to directory dell-csi-helm-installer to install the DELL EMC PowerScale - `cd dell-csi-helm-installer` - -* Upgrade the CSI Driver for DELL EMC PowerScale v1.4.0 using following command. - - ##### `./csi-install.sh --namespace isilon --values ./my-isilon-settings.yaml --upgrade` - -## Test deploying a simple pod with PowerScale storage - -Test the deployment workflow of a simple pod on PowerScale storage. - -1. **Creating a volume:** - - Create a file `pvc.yaml` using sample yaml files located at test/sample_files/ - - - Execute the following command to create volume - ``` - kubectl create -f $PWD/pvc.yaml - ``` - - Result: After executing the above command PVC will be created in the default namespace, and the user can see the pvc by executing `kubectl get pvc`. - Note: Verify system for the new volume - -3. **Attach the volume to Host** - - To attach a volume to a host, create a new application(Pod) and use the PVC created above in the Pod. This scenario is explained using the Nginx application. Create `nginx.yaml` - using sample yaml files located at test/sample_files/. - - Execute the following command to mount the volume to Kubernetes node - ``` - kubectl create -f $PWD/nginx.yaml - ``` - - Result: After executing the above command, new nginx pod will be successfully created and started in the default namespace. - Note: Verify PowerScale system for host to be part of clients/rootclients field of export created for volume and used by nginx application. - -4. **Create Snapshot** - - The following procedure will create a snapshot of the volume in the container using VolumeSnapshot objects defined in snap.yaml. The sample file for snapshot creation is located - at test/sample_files/ - - Execute the following command to create snapshot - ``` - kubectl create -f $PWD/snap.yaml - ``` - - The spec.source section contains the volume that will be snapped in the default namespace. For example, if the volume to be snapped is testvolclaim1, then the created snapshot is named testvolclaim1-snap1. Verify the PowerScale system for newly created snapshot. - - Note: - - * User can see the snapshots using `kubectl get volumesnapshot` - * Notice that this VolumeSnapshot class has a reference to a snapshotClassName:isilon-snapclass. The CSI Driver for PowerScale installation creates this class - as its default snapshot class. - * You can see its definition using `kubectl get volumesnapshotclasses isilon-snapclass -o yaml`. - -5. **Create Volume from Snapshot** - - The following procedure will create a new volume from a given snapshot which is specified in spec dataSource field. - - The sample file for volume creation from snapshot is located under test/sample_files/ - - Execute the following command to create snapshot - ``` - kubectl create -f $PWD/volume_from_snap.yaml - ``` - - Verify the PowerScale system for newly created volume from snapshot. - -6. **Delete Snapshot** - - Execute the following commands to delete the snapshot - - ``` - kubectl get volumesnapshot - kubectl delete volumesnapshot testvolclaim1-snap1 - ``` - -7. **Create new volume from existing volume(volume clone)** - - The following procedure will create a new volume from another existing volume which is specified in spec dataSource field. - - The sample file for volume creation from volume is located at test/sample_files/ - - Execute the following command to create snapshot - ``` - kubectl create -f $PWD/volume_from_volume.yaml - ``` - - Verify the PowerScale system for new created volume from volume. - -8. **To Unattach the volume from Host** - - Delete the nginx application to unattach the volume from host - - `kubectl delete -f nginx.yaml` - -9. **To delete the volume** - - ``` - kubectl get pvc - kubectl delete pvc testvolclaim1 - kubectl get pvc - ``` -## Topology Support - - From version 1.4.0, the CSI Powerscale driver supports Topology by default which forces volumes to be placed on worker nodes that have connectivity to the backend storage, as a result of which the nodes which have access to PowerScale Array are appropriately labelled. The driver leverages these labels to ensure that the driver components (controller, node) are spawned only on nodes wherein these labels exist. - - This covers use cases where: - - The csi-powerscale Driver may not be installed or running on some nodes where Users have chosen to restrict the nodes on accessing the powerscale storage array. - - We support CustomTopology which enables users to apply labels for nodes - "csi-isilon.dellemc.com/XX.XX.XX.XX=csi-isilon.dellemc.com" and expect the labels to be - honored by the driver. - - When “enableCustomTopology” is set to “true”, CSI driver fetches custom labels “csi-isilon.dellemc.com/XX.XX.XX.XX=csi-isilon.dellemc.com” applied on worker nodes, and use them to initialize node pod with custom PowerScale FQDN/IP. - - -## Topology Usage - - In order to utilize the Topology feature create a custom storage class with volumeBindingMode set to WaitForFirstConsumer and specify the desired topology labels within allowedTopologies field of this custom storage class. This ensures that pod scheduling takes advantage of the topology and the node selected has access to provisioned volumes. - - A sample manifest file is available at `helm/samples/storageclass/isilon.yaml` to create a storage class with Topology support. - -For additional information, see the [Kubernetes Topology documentation](https://kubernetes-csi.github.io/docs/topology.html). - -## Volume creation from datasource (i.e., from another volume or snapshot) - -Volumes can be created by pre-populating data in them from a data source. The data source can be another existing volume or a snapshot. - -For volume request from another volume, the PowerScale user must have SSH privilege(ISI_PRIV_LOGIN_SSH) assigned to him. - -For READ-WRITE volume request from snapshot, the PowerScale user must have SSH privilege(ISI_PRIV_LOGIN_SSH) assigned to him. - -Note: Only one READ-ONLY volume can be created from snapshot at any point in time. This operation is space efficient and fast compared to READ-WRITE volumes. - - -## Install CSI-PowerScale driver using dell-csi-operator in OpenShift - -CSI Driver for Dell EMC PowerScale can also be installed via the new Dell EMC Storage Operator. - -The Dell EMC Storage CSI Operator is a Kubernetes Operator, which can be used to install and manage the CSI Drivers provided by Dell EMC for various storage platforms. This operator is available as a community operator for upstream Kubernetes and can be deployed using OperatorHub.io. It is also available as a community operator for OpenShift clusters and can be deployed using OpenShift Container Platform. Both these methods of installation use OLM (Operator Lifecycle Manager). - -The operator can also be deployed directly by following the instructions available here - https://github.com/dell/dell-csi-operator. - -There are sample manifests provided which can be edited to do an easy installation of the driver. Please note that the deployment of the driver using the operator doesn’t use any Helm charts and the installation & configuration parameters will be slightly different from the ones specified via the Helm installer. - -Kubernetes Operators make it easy to deploy and manage entire lifecycle of complex Kubernetes applications. Operators use Custom Resource Definitions (CRD) which represents the application and use custom controllers to manage them. - -### Listing CSI-PowerScale drivers -User can query for csi-powerscale driver using the following command -`kubectl get csiisilon --all-namespaces` - -### Procedure to create new CSI-PowerScale driver +## Building +This project is a Go module (see golang.org Module information for explanation). +The dependencies for this project are in the go.mod file. -1. Create namespace +To build the source, execute `make clean build`. - Run `kubectl create namespace isilon` to create the isilon namespace. - -2. Create *isilon-creds* - - Create a file called isilon-creds.yaml with the following content - ```yaml - apiVersion: v1 - kind: Secret - metadata: - name: isilon-creds - namespace: isilon - type: Opaque - data: - # set username to the base64 encoded username - username: - # set password to the base64 encoded password - password: - ``` - - Replace the values for the username and password parameters. These values can be optioned using base64 encoding as described in the following example: - ``` - echo -n "myusername" | base64 - echo -n "mypassword" | base64 - ``` - - Run `kubectl create -f isilon-creds.yaml` command to create the secret. - -3. Create a CR (Custom Resource) for PowerScale using the sample files provided - [here](https://github.com/dell/dell-csi-operator/tree/master/samples) . +To run unit tests, execute `make unit-test`. -4. Execute the following command to create PowerScale custom resource - ```kubectl create -f ``` . - The above command will deploy the CSI-PowerScale driver. - -5. User can configure the following parameters in CR - - The following table lists the primary configurable parameters of the PowerScale driver and their default values. - - | Parameter | Description | Required | Default | - | --------- | ----------- | -------- |-------- | - | ***Common parameters for node and controller*** | - | CSI_ENDPOINT | The UNIX socket address for handling gRPC calls | No | /var/run/csi/csi.sock | - | X_CSI_DEBUG | To enable debug mode | No | false | - | X_CSI_ISI_ENDPOINT | HTTPs endpoint of the PowerScale OneFS API server | Yes | | - | X_CSI_ISI_INSECURE | Specifies whether SSL security needs to be enabled for communication between PowerScale and CSI Driver | No | true | - | X_CSI_ISI_PATH | Base path for the volumes to be created | Yes | | - | X_CSI_ISI_AUTOPROBE | To enable auto probing for driver | No | true | - | X_CSI_ISILON_NO_PROBE_ON_START | Indicates whether the controller/node should probe during initialization | Yes | | - | ***Controller parameters*** | - | X_CSI_MODE | Driver starting mode | No | controller | - | X_CSI_ISI_ACCESS_ZONE | Name of the access zone a volume can be created in | No | System | - | X_CSI_ISI_QUOTA_ENABLED | To enable SmartQuotas | Yes | | - | ***Node parameters*** | - | X_CSI_ISILON_NFS_V3 | Set the version to v3 when mounting an NFS export. If the value is "false", then the default version supported will be used | Yes | | - | X_CSI_MODE | Driver starting mode | No | node | +To build a podman based image, execute `make podman-build`. -## Support for Docker EE +You can run an integration test on a Linux system by populating the env files at `test/integration/` with values for your Dell EMC PowerScale systems and then run "`make integration-test`". -The CSI Driver for Dell EMC PowerScale supports Docker EE & deployment on clusters bootstrapped with UCP (Universal Control Plane). +## Runtime Dependencies +Both the Controller and the Node portions of the driver can only be run on nodes which have network connectivity to a “`PowerScale Cluster`” (which is used by the driver). -*UCP version 3.3.3 supports kubernetes 1.18 and CSI driver can be installed on UCP 3.1 with Helm. With Docker EE 3.1, we also supports those UCP versions which -leverage k8s 1.17. +## Driver Installation +Please consult the [Installation Guide](https://dell.github.io/storage-plugin-docs/docs/installation/) -The installation process for the driver on such clusters remains the same as the installation process on upstream clusters. +## Using Driver +A number of test helm charts and scripts are found in the directory test/helm. Please refer to the section `Testing Drivers` in the [Documentation](https://dell.github.io/storage-plugin-docs/docs/installation/test/) for more info. -On UCP based clusters, kubectl may not be installed by default, it is important that [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) is installed prior to the installation of the driver. +## Documentation +For more detailed information on the driver, please refer to [Dell Storage Documentation](https://dell.github.io/storage-plugin-docs/docs/) -The worker nodes in UCP backed clusters may run any of the OSs which we support with upstream clusters. +For a detailed set of information on supported platforms and driver capabilities, please refer to the [Features and Capabilities Documentation](https://dell.github.io/storage-plugin-docs/docs/dell-csi-driver/) diff --git a/ReleaseNotes.md b/ReleaseNotes.md index a611635..d73e0eb 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -1,29 +1,23 @@ -# Release Notes - CSI Driver for PowerScale v1.4.0 +# Release Notes - CSI PowerScale v1.5.0 [![Go Report Card](https://goreportcard.com/badge/github.com/dell/csi-isilon)](https://goreportcard.com/report/github.com/dell/csi-isilon) [![License](https://img.shields.io/github/license/dell/csi-isilon)](https://github.com/dell/csi-isilon/blob/master/LICENSE) [![Docker](https://img.shields.io/docker/pulls/dellemc/csi-isilon.svg?logo=docker)](https://hub.docker.com/r/dellemc/csi-isilon) ## New Features/Changes -- Added support for OpenShift 4.6 with RHEL and CoreOS worker nodes -- Added support for Red Hat Enterprise Linux (RHEL) 7.9 -- Added support for Ubuntu 20.04 -- Added support for Controller high availability (multiple-controllers) -- Added Topology support -- Added support for CSI Ephemeral Inline Volumes -- Added support for mount options -- Enhancements to volume creation from data source -- Enhanced support for Docker EE 3.1 +- Added support for Kubernetes v1.20 +- Added support for OpenShift 4.7 with RHEL and CoreOS worker nodes +- Added support for Red Hat Enterprise Linux (RHEL) 8.x +- Added multi-cluster support through single instance of driver installation +- Added support for custom networks for NFS I/O traffic +- SSH permissions are no longer required. You can safely revoke the privilege ISI_PRIV_LOGIN_SSH for the CSI driver user -## Resolved Issues - | Problem summary | Found in version | Resolved in version | - | --------------- | ---------------- | ------------------- | - | POD creation fails in OpenShift and Kubernetes environments, if hostname is not an FQDN | v1.3.0 | v1.4.0 | - | When creating volume from a snapshot or volume from volume, the owner of the new files or folders that are copied from the source snapshot is the Isilon user who is specified in secret.yaml. So the original owner of a file or folder might not be the owner of the newly created file or folder. | | v1.4.0 | +## Fixed Issues +- There are no Fixed issues in this release. ## Known Issues | Issue | Resolution or workaround, if known | | ----- | ---------------------------------- | | Creating snapshot fails if the parameter IsiPath in volume snapshot class and related storage class are not the same. The driver uses the incorrect IsiPath parameter and tries to locate the source volume due to the inconsistency. | Ensure IsiPath in VolumeSnapshotClass yaml and related storageClass yaml are the same. | - | While deleting a volume, if there are files or folders created on the volume that are owned by different users. If the Isilon credentials used are for a nonprivileged Isilon user, the delete volume action fails. It is due to the limitation in Linux permission control. | To perform the delete volume action, the user account must be assigned a role that has the privilege ISI_PRIV_IFS_RESTORE. The user account must have the following set of privileges to ensure that all the CSI Isilon driver capabilities work properly:
* ISI_PRIV_LOGIN_PAPI
* ISI_PRIV_NFS
* ISI_PRIV_QUOTA
* ISI_PRIV_SNAPSHOT
* ISI_PRIV_IFS_RESTORE
* ISI_PRIV_NS_IFS_ACCESS
* ISI_PRIV_LOGIN_SSH
In some cases, ISI_PRIV_BACKUP is also required, for example, when files owned by other users have mode bits set to 700. | + | While deleting a volume, if there are files or folders created on the volume that are owned by different users. If the Isilon credentials used are for a nonprivileged Isilon user, the delete volume action fails. It is due to the limitation in Linux permission control. | To perform the delete volume action, the user account must be assigned a role that has the privilege ISI_PRIV_IFS_RESTORE. The user account must have the following set of privileges to ensure that all the CSI Isilon driver capabilities work properly:
* ISI_PRIV_LOGIN_PAPI
* ISI_PRIV_NFS
* ISI_PRIV_QUOTA
* ISI_PRIV_SNAPSHOT
* ISI_PRIV_IFS_RESTORE
* ISI_PRIV_NS_IFS_ACCESS
In some cases, ISI_PRIV_BACKUP is also required, for example, when files owned by other users have mode bits set to 700. | | If hostname is mapped to loopback IP in /etc/hosts file, and pods are created using 1.3.0.1 release, after upgrade to 1.4.0 there is a possibility of "localhost" as stale entry in export | We recommend you not to map hostname to loopback IP in /etc/hosts file | | If the length of the nodeID exceeds 128 characters, driver fails to update CSINode object and installation fails. This is due to a limitation set by CSI spec which doesn't allow nodeID to be greater than 128 characters. | The CSI PowerScale driver uses the hostname for building the nodeID which is set in CSINode resource object, hence we recommend not having very long hostnames in order to avoid this issue. This current limitation of 128 characters is likely to be relaxed in future kubernetes versions as per this issue in the community: https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver/issues/581 diff --git a/common/constants/consts.go b/common/constants/consts.go index 3129e5c..dbbb586 100644 --- a/common/constants/consts.go +++ b/common/constants/consts.go @@ -53,4 +53,7 @@ const ( // VolumeSnapshotsPath is the snapshot directory base path on PowerScale array VolumeSnapshotsPath = "/ifs/.snapshot" + + //IsilonConfigFile isilon-creds file with credential info of isilon clusters + IsilonConfigFile = "/isilon-configs/config" ) diff --git a/common/constants/envvars.go b/common/constants/envvars.go index 734d190..ab37310 100644 --- a/common/constants/envvars.go +++ b/common/constants/envvars.go @@ -21,22 +21,10 @@ const ( // EnvCSIEndpoint is the name of the unix domain socket that the csi driver is listening on EnvCSIEndpoint = "CSI_ENDPOINT" - // EnvEndpoint is the name of the enviroment variable used to set the - // HTTPS endpoint of the Isilon OneFS API server - EnvEndpoint = "X_CSI_ISI_ENDPOINT" - // EnvPort is the name of the enviroment variable used to set the // HTTPS port number of the Isilon OneFS API server EnvPort = "X_CSI_ISI_PORT" - // EnvUser is the name of the enviroment variable used to set the - // username when authenticating to the Isilon OneFS API server - EnvUser = "X_CSI_ISI_USER" - - // EnvPassword is the name of the enviroment variable used to set the - // user's password when authenticating to the Isilon OneFS API server - EnvPassword = "X_CSI_ISI_PASSWORD" - // EnvInsecure is the name of the enviroment variable used to specify // that the Isilon OneFS API server's certificate chain and host name should not // be verified @@ -80,4 +68,10 @@ const ( // EnvKubeConfigPath indicates kubernetes configuration that has to be used by CSI Driver EnvKubeConfigPath = "KUBECONFIG" + + // EnvAllowedNetworks indicates list of networks on which NFS traffic is allowed + EnvAllowedNetworks = "X_CSI_ALLOWED_NETWORKS" + + // EnvIsilonConfigFile specifies the filepath containing Isilon cluster's config details + EnvIsilonConfigFile = "X_CSI_ISILON_CONFIG_PATH" ) diff --git a/common/utils/logging.go b/common/utils/logging.go new file mode 100644 index 0000000..e5f5edd --- /dev/null +++ b/common/utils/logging.go @@ -0,0 +1,150 @@ +package utils + +import ( + "context" + "fmt" + "github.com/dell/gocsi" + "github.com/sirupsen/logrus" + "os" + "reflect" + "runtime" + "strconv" + "strings" + "sync" + "time" +) + +var singletonLog *logrus.Logger +var once sync.Once + +// Constants used for logging +const ( + // Default log format will output [INFO]: 2006-01-02T15:04:05Z07:00 - Log message + defaultLogFormat = "time=\"%time%\" level=%lvl% %clusterName% %runid% msg=\"%msg%\"" + defaultTimestampFormat = time.RFC3339 + ClusterName = "clusterName" + PowerScaleLogger = "powerscalelog" + LogFields = "fields" + RequestID = "requestid" + RunID = "runid" +) + +// Formatter implements logrus.Formatter interface. +type Formatter struct { + //logrus.TextFormatter + // Timestamp format + TimestampFormat string + // Available standard keys: time, msg, lvl + // Also can include custom fields but limited to strings. + // All of fields need to be wrapped inside %% i.e %time% %msg% + LogFormat string + + CallerPrettyfier func(*runtime.Frame) (function string, file string) +} + +// Format building log message. +func (f *Formatter) Format(entry *logrus.Entry) ([]byte, error) { + output := f.LogFormat + if output == "" { + output = defaultLogFormat + } + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = defaultTimestampFormat + } + + output = strings.Replace(output, "%time%", entry.Time.Format(timestampFormat), 1) + output = strings.Replace(output, "%msg%", entry.Message, 1) + level := strings.ToUpper(entry.Level.String()) + output = strings.Replace(output, "%lvl%", strings.ToLower(level), 1) + + fields := entry.Data + + runID, ok := fields[RunID] + if ok { + output = strings.Replace(output, "%runid%", fmt.Sprintf("runid=%v", runID), 1) + } else { + output = strings.Replace(output, "%runid%", "", 1) + } + + clusterName, ok := fields[ClusterName] + if ok { + output = strings.Replace(output, "%clusterName%", fmt.Sprintf("clusterName=%v", clusterName), 1) + } else { + output = strings.Replace(output, "%clusterName%", "", 1) + } + + for k, val := range entry.Data { + switch v := val.(type) { + case string: + output = strings.Replace(output, "%"+k+"%", v, 1) + case int: + s := strconv.Itoa(v) + output = strings.Replace(output, "%"+k+"%", s, 1) + case bool: + s := strconv.FormatBool(v) + output = strings.Replace(output, "%"+k+"%", s, 1) + } + } + + var fileVal string + if entry.HasCaller() { + if f.CallerPrettyfier != nil { + _, fileVal = f.CallerPrettyfier(entry.Caller) + } else { + fileVal = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + } + + if fileVal != "" { + output = fmt.Sprintf("%s file=\"%s\"", output, fileVal) + } + } + + output = fmt.Sprintf("%s\n", output) + + return []byte(output), nil +} + +// GetLogger function to get custom logging +func GetLogger() *logrus.Logger { + once.Do(func() { + singletonLog = logrus.New() + fmt.Println("csi-powerscale logger initiated. This should be called only once.") + var debug bool + debugStr := os.Getenv(gocsi.EnvVarDebug) + debug, _ = strconv.ParseBool(debugStr) + if debug { + singletonLog.Level = logrus.DebugLevel + singletonLog.SetReportCaller(true) + singletonLog.Formatter = &Formatter{ + CallerPrettyfier: func(f *runtime.Frame) (string, string) { + filename1 := strings.Split(f.File, "dell/csi-powerscale") + if len(filename1) > 1 { + return fmt.Sprintf("%s()", f.Function), fmt.Sprintf("dell/csi-powerscale%s:%d", filename1[1], f.Line) + } + + filename2 := strings.Split(f.File, "dell/goisilon") + if len(filename2) > 1 { + return fmt.Sprintf("%s()", f.Function), fmt.Sprintf("dell/goisilon%s:%d", filename2[1], f.Line) + } + + return fmt.Sprintf("%s()", f.Function), fmt.Sprintf("%s:%d", f.File, f.Line) + }, + } + } else { + singletonLog.Formatter = &Formatter{} + } + }) + + return singletonLog +} + +// GetRunIDLogger returns the current runID logger +func GetRunIDLogger(ctx context.Context) *logrus.Entry { + tempLog := ctx.Value(PowerScaleLogger) + if ctx.Value(PowerScaleLogger) != nil && reflect.TypeOf(tempLog) == reflect.TypeOf(&logrus.Entry{}) { + return ctx.Value(PowerScaleLogger).(*logrus.Entry) + } + return nil +} diff --git a/common/utils/utils.go b/common/utils/utils.go index 457e986..47fedd1 100644 --- a/common/utils/utils.go +++ b/common/utils/utils.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "gopkg.in/yaml.v2" "net" "os" "path" @@ -31,9 +32,9 @@ import ( gournal "github.com/akutz/gournal" "github.com/container-storage-interface/spec/lib/go/csi" "github.com/dell/csi-isilon/common/constants" + csictx "github.com/dell/gocsi/context" isi "github.com/dell/goisilon" "github.com/google/uuid" - csictx "github.com/rexray/gocsi/context" log "github.com/sirupsen/logrus" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -81,6 +82,7 @@ func ConfigureLogger(debugEnabled bool) context.Context { // ParseBooleanFromContext parses an environment variable into a boolean value. If an error is encountered, default is set to false, and error is logged func ParseBooleanFromContext(ctx context.Context, key string) bool { + log := GetRunIDLogger(ctx) if val, ok := csictx.LookupEnv(ctx, key); ok { b, err := strconv.ParseBool(val) if err != nil { @@ -93,8 +95,24 @@ func ParseBooleanFromContext(ctx context.Context, key string) bool { return false } +// ParseArrayFromContext parses an environment variable into an array of string +func ParseArrayFromContext(ctx context.Context, key string) []string { + log := GetRunIDLogger(ctx) + var values []string + + if val, ok := csictx.LookupEnv(ctx, key); ok { + err := yaml.Unmarshal([]byte(val), &values) + if err != nil { + log.Errorf("invalid array value for '%s'", key) + return values + } + } + return values +} + // ParseUintFromContext parses an environment variable into a uint value. If an error is encountered, default is set to 0, and error is logged func ParseUintFromContext(ctx context.Context, key string) uint { + log := GetRunIDLogger(ctx) if val, ok := csictx.LookupEnv(ctx, key); ok { i, err := strconv.ParseUint(val, 10, 0) if err != nil { @@ -152,8 +170,8 @@ func GetNewUUID() (string, error) { } // LogMap logs the key-value entries of a given map -func LogMap(mapName string, m map[string]string) { - +func LogMap(ctx context.Context, mapName string, m map[string]string) { + log := GetRunIDLogger(ctx) log.Debugf("map '%s':", mapName) for key, value := range m { log.Debugf(" [%s]='%s'", key, value) @@ -187,6 +205,9 @@ var QuotaIDPattern = regexp.MustCompile(fmt.Sprintf("^%s(.*)", CSIQuotaIDPrefix) // VolumeIDSeparator is the separator that separates volume name and export ID (two components that a normalized volume ID is comprised of) var VolumeIDSeparator = "=_=_=" +// SnapshotIDSeparator is the separator that separates snapshot id and cluster name (two components that a normalized snapshot ID is comprised of) +var SnapshotIDSeparator = "=_=_=" + // VolumeIDPattern is the regex pattern that identifies the quota id set in the export's description field set by csi driver var VolumeIDPattern = regexp.MustCompile(fmt.Sprintf("^(.+)%s(\\d+)%s(.+)$", VolumeIDSeparator, VolumeIDSeparator)) @@ -199,6 +220,9 @@ var NodeIDPattern = regexp.MustCompile(fmt.Sprintf("^(.+)%s(.+)%s(.+)$", NodeIDS // ExportConflictMessagePattern is the regex pattern that identifies the error message of export conflict var ExportConflictMessagePattern = regexp.MustCompile(fmt.Sprintf("^Export rules (\\d+) and (\\d+) conflict on '(.+)'$")) +// DummyHostNodeID is nodeID used for adding dummy client in client field of export +var DummyHostNodeID = "localhost=#=#=localhost=#=#=127.0.0.1" + // GetQuotaIDWithCSITag formats a given quota id with the CSI tag, e.g. AABpAQEAAAAAAAAAAAAAQA0AAAAAAAAA -> CSI_QUOTA_ID:AABpAQEAAAAAAAAAAAAAQA0AAAAAAAAA func GetQuotaIDWithCSITag(quotaID string) string { @@ -210,7 +234,8 @@ func GetQuotaIDWithCSITag(quotaID string) string { } // GetQuotaIDFromDescription extracts quota id from the description field of export -func GetQuotaIDFromDescription(export isi.Export) (string, error) { +func GetQuotaIDFromDescription(ctx context.Context, export isi.Export) (string, error) { + log := GetRunIDLogger(ctx) log.Debugf("try to extract quota id from the description field of export (id:'%d', path: '%s', description : '%s')", export.ID, export.Paths, export.Description) @@ -234,7 +259,8 @@ func GetQuotaIDFromDescription(export isi.Export) (string, error) { } // GetFQDNByIP returns the FQDN based on the parsed ip address -func GetFQDNByIP(ip string) (string, error) { +func GetFQDNByIP(ctx context.Context, ip string) (string, error) { + log := GetRunIDLogger(ctx) names, err := net.LookupAddr(ip) if err != nil { log.Errorf("error getting FQDN: '%s'", err) @@ -330,41 +356,83 @@ func GetAccessMode(req *csi.ControllerPublishVolumeRequest) (*csi.VolumeCapabili return &(am.Mode), nil } -// GetNormalizedVolumeID combines volume name (i.e. the directory name), export ID and access zone to form the normalized volume ID -// e.g. k8s-e89c9d089e + 19 + csi0zone => k8s-e89c9d089e=_=_=19=_=_=csi0zone -func GetNormalizedVolumeID(volName string, exportID int, accessZone string) string { +// GetNormalizedVolumeID combines volume name (i.e. the directory name), export ID, access zone and clusterName to form the normalized volume ID +// e.g. k8s-e89c9d089e + 19 + csi0zone + cluster1 => k8s-e89c9d089e=_=_=19=_=_=csi0zone=_=_=cluster1 +func GetNormalizedVolumeID(ctx context.Context, volName string, exportID int, accessZone, clusterName string) string { + log := GetRunIDLogger(ctx) - volID := fmt.Sprintf("%s%s%s%s%s", volName, VolumeIDSeparator, strconv.Itoa(exportID), VolumeIDSeparator, accessZone) + volID := fmt.Sprintf("%s%s%s%s%s%s%s", volName, VolumeIDSeparator, strconv.Itoa(exportID), VolumeIDSeparator, accessZone, VolumeIDSeparator, clusterName) - log.Debugf("combined volume name '%s' with export ID '%d' and access zone '%s' to form volume ID '%s'", - volName, exportID, accessZone, volID) + log.Debugf("combined volume name '%s' with export ID '%d', access zone '%s' and cluster name '%s' to form volume ID '%s'", + volName, exportID, accessZone, clusterName, volID) return volID } -// ParseNormalizedVolumeID parses the volume ID (following the pattern '^(.+)=_=_=(d+)=_=_=(.+)$') to extract the volume name, export ID and access zone that make up the volume ID -// e.g. k8s-e89c9d089e=_=_=19=_=_=csi0zone => k8s-e89c9d089e, 19, csi0zone -func ParseNormalizedVolumeID(volID string) (string, int, string, error) { +// ParseNormalizedVolumeID parses the volume ID(using VolumeIDSeparator) to extract the volume name, export ID, access zone and cluster name(optional) that make up the volume ID +// e.g. k8s-e89c9d089e=_=_=19=_=_=csi0zone => k8s-e89c9d089e, 19, csi0zone, "" +// e.g. k8s-e89c9d089e=_=_=19=_=_=csi0zone=_=_=cluster1 => k8s-e89c9d089e, 19, csi0zone, cluster1 +func ParseNormalizedVolumeID(ctx context.Context, volID string) (string, int, string, string, error) { + log := GetRunIDLogger(ctx) + tokens := strings.Split(volID, VolumeIDSeparator) + if len(tokens) < 3 { + return "", 0, "", "", fmt.Errorf("volume ID '%s' cannot be split into tokens", volID) + } - matches := VolumeIDPattern.FindStringSubmatch(volID) + var clusterName string + exportID, err := strconv.Atoi(tokens[1]) + if err != nil { + return "", 0, "", "", err + } - if len(matches) < 4 { - return "", 0, "", fmt.Errorf("volume ID '%s' cannot match the expected '^(.+)=_=_=(d+)=_=_=(.+)$' pattern", volID) + if len(tokens) > 3 { + clusterName = tokens[3] } - exportID, err := strconv.Atoi(matches[2]) - if err != nil { - return "", 0, "", err + log.Debugf("volume ID '%s' parsed into volume name '%s', export ID '%d', access zone '%s' and cluster name '%s'", + volID, tokens[0], exportID, tokens[2], clusterName) + + return tokens[0], exportID, tokens[2], clusterName, nil +} + +// GetNormalizedSnapshotID combines snapshotID ID and cluster name to form the normalized snapshot ID +// e.g. 12345 + cluster1 => 12345=_=_=cluster1 +func GetNormalizedSnapshotID(ctx context.Context, snapshotID, clusterName string) string { + log := GetRunIDLogger(ctx) + + snapID := fmt.Sprintf("%s%s%s", snapshotID, SnapshotIDSeparator, clusterName) + + log.Debugf("combined snapshot id '%s' and cluster name '%s' to form normalized snapshot ID '%s'", + snapshotID, clusterName, snapID) + + return snapID +} + +// ParseNormalizedSnapshotID parses the normalized snapshot ID(using SnapshotIDSeparator) to extract the snapshot ID and cluster name(optional) that make up the normalized snapshot ID +// e.g. 12345 => 12345, "" +// e.g. 12345=_=_=cluster1 => 12345, cluster1 +func ParseNormalizedSnapshotID(ctx context.Context, snapID string) (string, string, error) { + log := GetRunIDLogger(ctx) + tokens := strings.Split(snapID, SnapshotIDSeparator) + if len(tokens) < 1 { + return "", "", fmt.Errorf("snapshot ID '%s' cannot be split into tokens", snapID) + } + + var clusterName string + if len(tokens) > 1 { + clusterName = tokens[1] } - log.Debugf("volume ID '%s' parsed into volume name '%s', export ID '%d' and access zone '%s'", - volID, matches[1], exportID, matches[3]) + log.Debugf("normalized snapshot ID '%s' parsed into snapshot ID '%s' and cluster name '%s'", + snapID, tokens[0], clusterName) - return matches[1], exportID, matches[3], nil + return tokens[0], clusterName, nil } //ParseNodeID parses NodeID to node name, node FQDN and IP address using pattern '^(.+)=#=#=(.+)=#=#=(.+)' -func ParseNodeID(nodeID string) (string, string, string, error) { +func ParseNodeID(ctx context.Context, nodeID string) (string, string, string, error) { + log := GetRunIDLogger(ctx) + matches := NodeIDPattern.FindStringSubmatch(nodeID) if len(matches) < 4 { @@ -411,3 +479,9 @@ func GetVolumeNameFromExportPath(exportPath string) string { } return fmt.Sprint(exportPath[strings.LastIndex(exportPath, "/")+1:]) } + +// GetMessageWithRunID returns message with runID information +func GetMessageWithRunID(runid string, format string, args ...interface{}) string { + str := fmt.Sprintf(format, args...) + return fmt.Sprintf(" runid=%s %s", runid, str) +} diff --git a/common/utils/utils_test.go b/common/utils/utils_test.go index 3f04fe0..8efb939 100644 --- a/common/utils/utils_test.go +++ b/common/utils/utils_test.go @@ -16,6 +16,7 @@ package utils import ( + "context" "fmt" "testing" @@ -57,25 +58,28 @@ func TestRemoveStringsFromSlice(t *testing.T) { } func TestGetNormalizedVolumeID(t *testing.T) { + ctx := context.Background() - volID := GetNormalizedVolumeID("k8s-e89c9d089e", 19, "csi0zone") + volID := GetNormalizedVolumeID(ctx, "k8s-e89c9d089e", 19, "csi0zone", "cluster1") - assert.Equal(t, "k8s-e89c9d089e=_=_=19=_=_=csi0zone", volID) + assert.Equal(t, "k8s-e89c9d089e=_=_=19=_=_=csi0zone=_=_=cluster1", volID) } func TestParseNormalizedVolumeID(t *testing.T) { + ctx := context.Background() - volName, exportID, accessZone, err := ParseNormalizedVolumeID("k8s-e89c9d089e=_=_=19=_=_=csi0zone") + volName, exportID, accessZone, clusterName, err := ParseNormalizedVolumeID(ctx, "k8s-e89c9d089e=_=_=19=_=_=csi0zone=_=_=cluster1") assert.Equal(t, "k8s-e89c9d089e", volName) assert.Equal(t, 19, exportID) assert.Equal(t, "csi0zone", accessZone) + assert.Equal(t, "cluster1", clusterName) assert.Nil(t, err) - _, _, _, err = ParseNormalizedVolumeID("totally bogus") + _, _, _, _, err = ParseNormalizedVolumeID(ctx, "totally bogus") assert.NotNil(t, err) - _, _, _, err = ParseNormalizedVolumeID("k8s-e89c9d089e=_=_=not_an_integer=_=_=csi0zone") + _, _, _, _, err = ParseNormalizedVolumeID(ctx, "k8s-e89c9d089e=_=_=not_an_integer=_=_=csi0zone") assert.NotNil(t, err) } @@ -106,7 +110,8 @@ func TestGetExportIDFromConflictMessage(t *testing.T) { } func TestGetFQDNByIP(t *testing.T) { - fqdn, _ := GetFQDNByIP("111.111.111.111") + ctx := context.Background() + fqdn, _ := GetFQDNByIP(ctx, "111.111.111.111") fmt.Println(fqdn) assert.Equal(t, fqdn, "") } diff --git a/csi-utils/csiutils.go b/csi-utils/csiutils.go new file mode 100644 index 0000000..1187051 --- /dev/null +++ b/csi-utils/csiutils.go @@ -0,0 +1,52 @@ +package csi_utils + +import ( + "errors" + "fmt" + log "github.com/sirupsen/logrus" + "net" +) + +// GetNFSClientIP is used to fetch IP address from networks on which NFS traffic is allowed +func GetNFSClientIP(allowedNetworks []string) (string, error) { + var nodeIP string + + addrs, err := net.InterfaceAddrs() + if err != nil { + log.Errorf("Encountered error while fetching system IP addresses: %+v\n", err.Error()) + return "", err + } + + // Populate map to optimize the algorithm for O(n) + networks := make(map[string]bool) + for _, cnet := range allowedNetworks { + networks[cnet] = false + } + + for _, a := range addrs { + switch v := a.(type) { + case *net.IPNet: + if v.IP.To4() != nil { + ip, cnet, err := net.ParseCIDR(a.String()) + log.Debugf("IP address: %s and Network: %s", ip, cnet) + if err != nil { + log.Errorf("Encountered error while parsing IP address %v", a) + continue + } + + if _, ok := networks[cnet.String()]; ok { + log.Infof("Found IP address: %s", ip) + nodeIP = ip.String() + return nodeIP, nil + } + } + } + } + + // If a valid IP address matching allowedNetworks is not found return error + if nodeIP == "" { + return "", errors.New(fmt.Sprintf("No valid IP address found matching against allowedNetworks %v", allowedNetworks)) + } + + return nodeIP, nil +} diff --git a/dell-csi-helm-installer/README.md b/dell-csi-helm-installer/README.md index dc89c58..f5721a2 100644 --- a/dell-csi-helm-installer/README.md +++ b/dell-csi-helm-installer/README.md @@ -77,7 +77,6 @@ Options: --node-verify-user[=] Username to SSH to worker nodes as, used to validate node requirements. Default is root --skip-verify Skip the kubernetes configuration verification to use the CSI driver, default will run verification --skip-verify-node Skip worker node verification checks - --snapshot-crd Install snapshot CRDs. Default will not install Snapshot classes. -h Help ``` @@ -105,7 +104,6 @@ Options: --node-verify-user[=] Username to SSH to worker nodes as, used to validate node requirements. Default is root --skip-verify Skip the kubernetes configuration verification to use the CSI driver, default will run verification --skip-verify-node Skip worker node verification checks - --snapshot-crd Install snapshot CRDs. Default will not install Snapshot classes. -h Help ``` diff --git a/dell-csi-helm-installer/beta-snapshot-crd/snapshot.storage.k8s.io_volumesnapshotclasses.yaml b/dell-csi-helm-installer/beta-snapshot-crd/snapshot.storage.k8s.io_volumesnapshotclasses.yaml deleted file mode 100644 index 4aa980c..0000000 --- a/dell-csi-helm-installer/beta-snapshot-crd/snapshot.storage.k8s.io_volumesnapshotclasses.yaml +++ /dev/null @@ -1,85 +0,0 @@ - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.2.5 - api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/260" - creationTimestamp: null - name: volumesnapshotclasses.snapshot.storage.k8s.io -spec: - additionalPrinterColumns: - - JSONPath: .driver - name: Driver - type: string - - JSONPath: .deletionPolicy - description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass - should be deleted when its bound VolumeSnapshot is deleted. - name: DeletionPolicy - type: string - - JSONPath: .metadata.creationTimestamp - name: Age - type: date - group: snapshot.storage.k8s.io - names: - kind: VolumeSnapshotClass - listKind: VolumeSnapshotClassList - plural: volumesnapshotclasses - singular: volumesnapshotclass - preserveUnknownFields: false - scope: Cluster - subresources: {} - validation: - openAPIV3Schema: - description: VolumeSnapshotClass specifies parameters that a underlying storage - system uses when creating a volume snapshot. A specific VolumeSnapshotClass - is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses - are non-namespaced - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - deletionPolicy: - description: deletionPolicy determines whether a VolumeSnapshotContent created - through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot - is deleted. Supported values are "Retain" and "Delete". "Retain" means - that the VolumeSnapshotContent and its physical snapshot on underlying - storage system are kept. "Delete" means that the VolumeSnapshotContent - and its physical snapshot on underlying storage system are deleted. Required. - enum: - - Delete - - Retain - type: string - driver: - description: driver is the name of the storage driver that handles this - VolumeSnapshotClass. Required. - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - parameters: - additionalProperties: - type: string - description: parameters is a key-value map with storage driver specific - parameters for creating snapshots. These values are opaque to Kubernetes. - type: object - required: - - deletionPolicy - - driver - type: object - version: v1beta1 - versions: - - name: v1beta1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/dell-csi-helm-installer/beta-snapshot-crd/snapshot.storage.k8s.io_volumesnapshotcontents.yaml b/dell-csi-helm-installer/beta-snapshot-crd/snapshot.storage.k8s.io_volumesnapshotcontents.yaml deleted file mode 100644 index 34c51ad..0000000 --- a/dell-csi-helm-installer/beta-snapshot-crd/snapshot.storage.k8s.io_volumesnapshotcontents.yaml +++ /dev/null @@ -1,233 +0,0 @@ - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.2.5 - api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/260" - creationTimestamp: null - name: volumesnapshotcontents.snapshot.storage.k8s.io -spec: - additionalPrinterColumns: - - JSONPath: .status.readyToUse - description: Indicates if a snapshot is ready to be used to restore a volume. - name: ReadyToUse - type: boolean - - JSONPath: .status.restoreSize - description: Represents the complete size of the snapshot in bytes - name: RestoreSize - type: integer - - JSONPath: .spec.deletionPolicy - description: Determines whether this VolumeSnapshotContent and its physical snapshot - on the underlying storage system should be deleted when its bound VolumeSnapshot - is deleted. - name: DeletionPolicy - type: string - - JSONPath: .spec.driver - description: Name of the CSI driver used to create the physical snapshot on the - underlying storage system. - name: Driver - type: string - - JSONPath: .spec.volumeSnapshotClassName - description: Name of the VolumeSnapshotClass to which this snapshot belongs. - name: VolumeSnapshotClass - type: string - - JSONPath: .spec.volumeSnapshotRef.name - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent - object is bound. - name: VolumeSnapshot - type: string - - JSONPath: .metadata.creationTimestamp - name: Age - type: date - group: snapshot.storage.k8s.io - names: - kind: VolumeSnapshotContent - listKind: VolumeSnapshotContentList - plural: volumesnapshotcontents - singular: volumesnapshotcontent - preserveUnknownFields: false - scope: Cluster - subresources: - status: {} - validation: - openAPIV3Schema: - description: VolumeSnapshotContent represents the actual "on-disk" snapshot - object in the underlying storage system - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - spec: - description: spec defines properties of a VolumeSnapshotContent created - by the underlying storage system. Required. - properties: - deletionPolicy: - description: deletionPolicy determines whether this VolumeSnapshotContent - and its physical snapshot on the underlying storage system should - be deleted when its bound VolumeSnapshot is deleted. Supported values - are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent - and its physical snapshot on underlying storage system are kept. "Delete" - means that the VolumeSnapshotContent and its physical snapshot on - underlying storage system are deleted. In dynamic snapshot creation - case, this field will be filled in with the "DeletionPolicy" field - defined in the VolumeSnapshotClass the VolumeSnapshot refers to. For - pre-existing snapshots, users MUST specify this field when creating - the VolumeSnapshotContent object. Required. - enum: - - Delete - - Retain - type: string - driver: - description: driver is the name of the CSI driver used to create the - physical snapshot on the underlying storage system. This MUST be the - same as the name returned by the CSI GetPluginName() call for that - driver. Required. - type: string - source: - description: source specifies from where a snapshot will be created. - This field is immutable after creation. Required. - properties: - snapshotHandle: - description: snapshotHandle specifies the CSI "snapshot_id" of a - pre-existing snapshot on the underlying storage system. This field - is immutable. - type: string - volumeHandle: - description: volumeHandle specifies the CSI "volume_id" of the volume - from which a snapshot should be dynamically taken from. This field - is immutable. - type: string - type: object - volumeSnapshotClassName: - description: name of the VolumeSnapshotClass to which this snapshot - belongs. - type: string - volumeSnapshotRef: - description: volumeSnapshotRef specifies the VolumeSnapshot object to - which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName - field must reference to this VolumeSnapshotContent's name for the - bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent - object, name and namespace of the VolumeSnapshot object MUST be provided - for binding to happen. This field is immutable after creation. Required. - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: 'If referring to a piece of an object instead of an - entire object, this string should contain a valid JSON/Go field - access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within - a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" - (container with index 2 in this pod). This syntax is chosen only - to have some well-defined way of referencing a part of an object. - TODO: this design is not final and this field is subject to change - in the future.' - type: string - kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' - type: string - resourceVersion: - description: 'Specific resourceVersion to which this reference is - made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' - type: string - uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' - type: string - type: object - required: - - deletionPolicy - - driver - - source - - volumeSnapshotRef - type: object - status: - description: status represents the current information of a snapshot. - properties: - creationTime: - description: creationTime is the timestamp when the point-in-time snapshot - is taken by the underlying storage system. In dynamic snapshot creation - case, this field will be filled in with the "creation_time" value - returned from CSI "CreateSnapshotRequest" gRPC call. For a pre-existing - snapshot, this field will be filled with the "creation_time" value - returned from the CSI "ListSnapshots" gRPC call if the driver supports - it. If not specified, it indicates the creation time is unknown. The - format of this field is a Unix nanoseconds time encoded as an int64. - On Unix, the command `date +%s%N` returns the current time in nanoseconds - since 1970-01-01 00:00:00 UTC. - format: int64 - type: integer - error: - description: error is the latest observed error during snapshot creation, - if any. - properties: - message: - description: 'message is a string detailing the encountered error - during snapshot creation if specified. NOTE: message may be logged, - and it should not contain sensitive information.' - type: string - time: - description: time is the timestamp when the error was encountered. - format: date-time - type: string - type: object - readyToUse: - description: readyToUse indicates if a snapshot is ready to be used - to restore a volume. In dynamic snapshot creation case, this field - will be filled in with the "ready_to_use" value returned from CSI - "CreateSnapshotRequest" gRPC call. For a pre-existing snapshot, this - field will be filled with the "ready_to_use" value returned from the - CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, - this field will be set to "True". If not specified, it means the readiness - of a snapshot is unknown. - type: boolean - restoreSize: - description: restoreSize represents the complete size of the snapshot - in bytes. In dynamic snapshot creation case, this field will be filled - in with the "size_bytes" value returned from CSI "CreateSnapshotRequest" - gRPC call. For a pre-existing snapshot, this field will be filled - with the "size_bytes" value returned from the CSI "ListSnapshots" - gRPC call if the driver supports it. When restoring a volume from - this snapshot, the size of the volume MUST NOT be smaller than the - restoreSize if it is specified, otherwise the restoration will fail. - If not specified, it indicates that the size is unknown. - format: int64 - minimum: 0 - type: integer - snapshotHandle: - description: snapshotHandle is the CSI "snapshot_id" of a snapshot on - the underlying storage system. If not specified, it indicates that - dynamic snapshot creation has either failed or it is still in progress. - type: string - type: object - required: - - spec - type: object - version: v1beta1 - versions: - - name: v1beta1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/dell-csi-helm-installer/beta-snapshot-crd/snapshot.storage.k8s.io_volumesnapshots.yaml b/dell-csi-helm-installer/beta-snapshot-crd/snapshot.storage.k8s.io_volumesnapshots.yaml deleted file mode 100644 index 483706f..0000000 --- a/dell-csi-helm-installer/beta-snapshot-crd/snapshot.storage.k8s.io_volumesnapshots.yaml +++ /dev/null @@ -1,188 +0,0 @@ - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.2.5 - api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/260" - creationTimestamp: null - name: volumesnapshots.snapshot.storage.k8s.io -spec: - additionalPrinterColumns: - - JSONPath: .status.readyToUse - description: Indicates if a snapshot is ready to be used to restore a volume. - name: ReadyToUse - type: boolean - - JSONPath: .spec.source.persistentVolumeClaimName - description: Name of the source PVC from where a dynamically taken snapshot will - be created. - name: SourcePVC - type: string - - JSONPath: .spec.source.volumeSnapshotContentName - description: Name of the VolumeSnapshotContent which represents a pre-provisioned - snapshot. - name: SourceSnapshotContent - type: string - - JSONPath: .status.restoreSize - description: Represents the complete size of the snapshot. - name: RestoreSize - type: string - - JSONPath: .spec.volumeSnapshotClassName - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot. - name: SnapshotClass - type: string - - JSONPath: .status.boundVolumeSnapshotContentName - description: The name of the VolumeSnapshotContent to which this VolumeSnapshot - is bound. - name: SnapshotContent - type: string - - JSONPath: .status.creationTime - description: Timestamp when the point-in-time snapshot is taken by the underlying - storage system. - name: CreationTime - type: date - - JSONPath: .metadata.creationTimestamp - name: Age - type: date - group: snapshot.storage.k8s.io - names: - kind: VolumeSnapshot - listKind: VolumeSnapshotList - plural: volumesnapshots - singular: volumesnapshot - preserveUnknownFields: false - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - description: VolumeSnapshot is a user's request for either creating a point-in-time - snapshot of a persistent volume, or binding to a pre-existing snapshot. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - spec: - description: 'spec defines the desired characteristics of a snapshot requested - by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots - Required.' - properties: - source: - description: source specifies where a snapshot will be created from. - This field is immutable after creation. Required. - properties: - persistentVolumeClaimName: - description: persistentVolumeClaimName specifies the name of the - PersistentVolumeClaim object in the same namespace as the VolumeSnapshot - object where the snapshot should be dynamically taken from. This - field is immutable. - type: string - volumeSnapshotContentName: - description: volumeSnapshotContentName specifies the name of a pre-existing - VolumeSnapshotContent object. This field is immutable. - type: string - type: object - volumeSnapshotClassName: - description: 'volumeSnapshotClassName is the name of the VolumeSnapshotClass - requested by the VolumeSnapshot. If not specified, the default snapshot - class will be used if one exists. If not specified, and there is no - default snapshot class, dynamic snapshot creation will fail. Empty - string is not allowed for this field. TODO(xiangqian): a webhook validation - on empty string. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshot-classes' - type: string - required: - - source - type: object - status: - description: 'status represents the current information of a snapshot. NOTE: - status can be modified by sources other than system controllers, and must - not be depended upon for accuracy. Controllers should only use information - from the VolumeSnapshotContent object after verifying that the binding - is accurate and complete.' - properties: - boundVolumeSnapshotContentName: - description: 'boundVolumeSnapshotContentName represents the name of - the VolumeSnapshotContent object to which the VolumeSnapshot object - is bound. If not specified, it indicates that the VolumeSnapshot object - has not been successfully bound to a VolumeSnapshotContent object - yet. NOTE: Specified boundVolumeSnapshotContentName alone does not - mean binding is valid. Controllers MUST always verify bidirectional - binding between VolumeSnapshot and VolumeSnapshotContent to - avoid possible security issues.' - type: string - creationTime: - description: creationTime is the timestamp when the point-in-time snapshot - is taken by the underlying storage system. In dynamic snapshot creation - case, this field will be filled in with the "creation_time" value - returned from CSI "CreateSnapshotRequest" gRPC call. For a pre-existing - snapshot, this field will be filled with the "creation_time" value - returned from the CSI "ListSnapshots" gRPC call if the driver supports - it. If not specified, it indicates that the creation time of the snapshot - is unknown. - format: date-time - type: string - error: - description: error is the last observed error during snapshot creation, - if any. This field could be helpful to upper level controllers(i.e., - application controller) to decide whether they should continue on - waiting for the snapshot to be created based on the type of error - reported. - properties: - message: - description: 'message is a string detailing the encountered error - during snapshot creation if specified. NOTE: message may be logged, - and it should not contain sensitive information.' - type: string - time: - description: time is the timestamp when the error was encountered. - format: date-time - type: string - type: object - readyToUse: - description: readyToUse indicates if a snapshot is ready to be used - to restore a volume. In dynamic snapshot creation case, this field - will be filled in with the "ready_to_use" value returned from CSI - "CreateSnapshotRequest" gRPC call. For a pre-existing snapshot, this - field will be filled with the "ready_to_use" value returned from the - CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, - this field will be set to "True". If not specified, it means the readiness - of a snapshot is unknown. - type: boolean - restoreSize: - anyOf: - - type: integer - - type: string - description: restoreSize represents the complete size of the snapshot - in bytes. In dynamic snapshot creation case, this field will be filled - in with the "size_bytes" value returned from CSI "CreateSnapshotRequest" - gRPC call. For a pre-existing snapshot, this field will be filled - with the "size_bytes" value returned from the CSI "ListSnapshots" - gRPC call if the driver supports it. When restoring a volume from - this snapshot, the size of the volume MUST NOT be smaller than the - restoreSize if it is specified, otherwise the restoration will fail. - If not specified, it indicates that the size is unknown. - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - required: - - spec - type: object - version: v1beta1 - versions: - - name: v1beta1 - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/dell-csi-helm-installer/csi-install.sh b/dell-csi-helm-installer/csi-install.sh index 7c14fa0..3070646 100755 --- a/dell-csi-helm-installer/csi-install.sh +++ b/dell-csi-helm-installer/csi-install.sh @@ -11,14 +11,12 @@ SCRIPTDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" DRIVERDIR="${SCRIPTDIR}/../helm" VERIFYSCRIPT="${SCRIPTDIR}/verify.sh" -SNAPCLASSDIR="${SCRIPTDIR}/beta-snapshot-crd" PROG="${0}" NODE_VERIFY=1 VERIFY=1 MODE="install" WATCHLIST="" -# version of Snapshot CRD to install. Default is none ("") -INSTALL_CRD="" + # export the name of the debug log, so child processes will see it export DEBUGLOG="${SCRIPTDIR}/install-debug.log" declare -a VALIDDRIVERS @@ -47,7 +45,6 @@ function usage() { decho " --node-verify-user[=] Username to SSH to worker nodes as, used to validate node requirements. Default is root" decho " --skip-verify Skip the kubernetes configuration verification to use the CSI driver, default will run verification" decho " --skip-verify-node Skip worker node verification checks" - decho " --snapshot-crd Install snapshot CRDs. Default will not install Snapshot classes." decho " -h Help" decho @@ -256,41 +253,6 @@ function kubectl_safe() { fi } -# -# install_snapshot_crds -# Downloads and installs snapshot CRDs -function install_snapshot_crd() { - if [ "${INSTALL_CRD}" == "" ]; then - return - fi - log step "Checking and installing snapshot crds" - - declare -A SNAPCLASSES=( - ["volumesnapshotclasses"]="snapshot.storage.k8s.io_volumesnapshotclasses.yaml" - ["volumesnapshotcontents"]="snapshot.storage.k8s.io_volumesnapshotcontents.yaml" - ["volumesnapshots"]="snapshot.storage.k8s.io_volumesnapshots.yaml" - ) - - for C in "${!SNAPCLASSES[@]}"; do - F="${SNAPCLASSES[$C]}" - # check if custom resource exists - kubectl_safe "get customresourcedefinitions" "Failed to get crds" | grep "${C}" --quiet - - if [[ $? -ne 0 ]]; then - # make sure CRD exists - if [ ! -f "${SNAPCLASSDIR}/${SNAPCLASSES[$C]}" ]; then - decho "Unable to to find Snapshot Classes at ${SNAPCLASSDIR}" - exit 1 - fi - # create the custom resource - kubectl_safe "create -f ${SNAPCLASSDIR}/${SNAPCLASSES[$C]}" "Failed to create Volume Snapshot Beta CRD: ${C}" - fi - done - - sleep 10s - log step_success -} - # # verify_kubernetes # will run a driver specific function to verify environmental requirements @@ -302,9 +264,6 @@ function verify_kubernetes() { if [ $NODE_VERIFY -eq 0 ]; then EXTRA_OPTS="$EXTRA_OPTS --skip-verify-node" fi - if [ "${INSTALL_CRD}" == "yes" ]; then - EXTRA_OPTS="$EXTRA_OPTS --snapshot-crd" - fi "${VERIFYSCRIPT}" --namespace "${NS}" --release "${RELEASE}" --values "${VALUES}" --node-verify-user "${NODEUSER}" ${EXTRA_OPTS} VERIFYRC=$? case $VERIFYRC in @@ -344,10 +303,6 @@ while getopts ":h-:" optchar; do skip-verify-node) NODE_VERIFY=0 ;; - # SNAPSHOT_CRD - snapshot-crd) - INSTALL_CRD="yes" - ;; upgrade) MODE="upgrade" ;; @@ -435,11 +390,6 @@ header check_for_driver "${MODE}" verify_kubernetes -if [[ "${INSTALL_CRD}" != "" ]]; then - install_snapshot_crd -fi - - # all good, keep processing install_driver "${MODE}" diff --git a/dell-csi-helm-installer/csi-offline-bundle.sh b/dell-csi-helm-installer/csi-offline-bundle.sh index 997b4c0..8d02c55 100755 --- a/dell-csi-helm-installer/csi-offline-bundle.sh +++ b/dell-csi-helm-installer/csi-offline-bundle.sh @@ -243,7 +243,6 @@ if [ "${MODE}" == "helm" ]; then REQUIRED_FILES=( "${HELMDIR}" "${INSTALLERDIR}" - "${REPODIR}/*.pdf" "${REPODIR}/*.md" "${REPODIR}/LICENSE" ) diff --git a/dell-csi-helm-installer/verify-csi-isilon.sh b/dell-csi-helm-installer/verify-csi-isilon.sh new file mode 100644 index 0000000..e43aaf3 --- /dev/null +++ b/dell-csi-helm-installer/verify-csi-isilon.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# +# Copyright (c) 2020 Dell Inc., or its subsidiaries. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 + +# +# verify-csi-isilon method +function verify-csi-isilon() { + verify_k8s_versions "1.18" "1.20" + verify_openshift_versions "4.5" "4.6" + verify_namespace "${NS}" + verify_required_secrets "${RELEASE}-creds" + verify_optional_secrets "${RELEASE}-certs" + verify_alpha_snap_resources + verify_snap_requirements + verify_helm_3 +} diff --git a/dell-csi-helm-installer/verify.sh b/dell-csi-helm-installer/verify.sh index 0ebb497..bd808a1 100755 --- a/dell-csi-helm-installer/verify.sh +++ b/dell-csi-helm-installer/verify.sh @@ -21,75 +21,25 @@ fi declare -a VALIDDRIVERS -# verify-csi-powermax method -function verify-csi-powermax() { - verify_k8s_versions "1.17" "1.20" - verify_openshift_versions "4.5" "4.6" - verify_namespace "${NS}" - verify_required_secrets "${RELEASE}-creds" - verify_optional_secrets "${RELEASE}-certs" - verify_optional_secrets "csirevproxy-tls-secret" - verify_alpha_snap_resources - verify_beta_snap_requirements - verify_iscsi_installation - verify_helm_3 -} -# -# verify-csi-isilon method -function verify-csi-isilon() { - verify_k8s_versions "1.17" "1.20" - verify_openshift_versions "4.5" "4.6" - verify_namespace "${NS}" - verify_required_secrets "${RELEASE}-creds" - verify_optional_secrets "${RELEASE}-certs" - verify_alpha_snap_resources - verify_beta_snap_requirements - verify_helm_3 -} - -# -# verify-csi-vxflexos method -function verify-csi-vxflexos() { - verify_k8s_versions "1.17" "1.20" - verify_openshift_versions "4.5" "4.6" - verify_namespace "${NS}" - verify_required_secrets "${RELEASE}-creds" - verify_sdc_installation - verify_alpha_snap_resources - verify_beta_snap_requirements - verify_helm_3 -} - -# verify-csi-powerstore method -function verify-csi-powerstore() { - verify_k8s_versions "1.17" "1.20" - verify_openshift_versions "4.5" "4.6" - verify_namespace "${NS}" - verify_required_secrets "${RELEASE}-creds" - verify_alpha_snap_resources - verify_beta_snap_requirements - verify_powerstore_node_configuration - verify_helm_3 -} +# source-verify-driver will call the proper method to source verification method scripts +function source-verify-driver() { + if [ -z "${1}" ]; then + decho "Expected one argument, the driver name, to verify-driver. Received none." + exit $EXIT_ERROR + fi + local D="${1}" -# verify-csi-unity method -function verify-csi-unity() { - verify_k8s_versions "1.17" "1.20" - verify_openshift_versions "4.5" "4.6" - verify_namespace "${NS}" - verify_required_secrets "${RELEASE}-creds" - verify_required_secrets "${RELEASE}-certs-0" - verify_alpha_snap_resources - verify_unity_protocol_installation - verify_beta_snap_requirements - verify_helm_3 + # check if a script, matching a specific name, exists + local SCRIPTNAME="verify-$D.sh" + # check if the script exists, and source it + if [ -f "${SCRIPTDIR}/${SCRIPTNAME}" ]; then + source "${SCRIPTDIR}/${SCRIPTNAME}" + else + echo "Unable to find ${SCRIPTDIR}/${SCRIPTNAME}" + fi } -# if testing routines are found, source them for possible execution -if [ -f "${SCRIPTDIR}/test-functions.sh" ]; then - source "${SCRIPTDIR}/test-functions.sh" -fi # # verify-driver will call the proper method to verify a specific driver @@ -99,6 +49,8 @@ function verify-driver() { exit $EXIT_ERROR fi local D="${1}" + # source a script containing verification methods + source-verify-driver "${D}" # check if a verify-$DRIVER function exists # if not, error and exit # if yes, check to see if it should be run and run it @@ -130,7 +82,6 @@ function usage() { decho " --skip-verify-node Skip worker node verification checks" decho " --release[=] Name to register with helm, default value will match the driver name" decho " --node-verify-user[=] Username to SSH to worker nodes as, used to validate node requirements. Default is root" - decho " --snapshot-crd Signifies that the Snapshot CRDs will be installed as part of installation." decho " -h Help" decho @@ -143,63 +94,6 @@ function header() { echo "|- Kubernetes Version: ${kMajorVersion}.${kMinorVersion}" } -# Check if the SDC is installed and the kernel module loaded -function verify_sdc_installation() { - if [ ${NODE_VERIFY} -eq 0 ]; then - return - fi - log step "Verifying the SDC installation" - - local SDC_MINION_NODES=$(run_command kubectl get nodes -o wide | grep -v -e master -e INTERNAL -e infra | awk ' { print $6; }') - - error=0 - missing=() - for node in $SDC_MINION_NODES; do - # check is the scini kernel module is loaded - run_command ssh ${NODEUSER}@$node "/sbin/lsmod | grep scini" >/dev/null 2>&1 - rv=$? - if [ $rv -ne 0 ]; then - missing+=($node) - error=1 - found_warning "SDC was not found on node: $node" - fi - done - check_error error -} - -function verify_powerstore_node_configuration() { - if [ ${NODE_VERIFY} -eq 0 ]; then - return - fi - - log step "Verifying PowerStore node configuration" - decho - - if ls "${VALUES}" >/dev/null; then - if grep -c "scsiProtocol:[[:blank:]]\+FC" "${VALUES}" >/dev/null; then - log arrow - verify_fc_installation - elif grep -c "scsiProtocol:[[:blank:]]\+ISCSI" "${VALUES}" >/dev/null; then - log arrow - verify_iscsi_installation "small" - elif grep -c "scsiProtocol:[[:blank:]]\+auto" "${VALUES}" >/dev/null; then - log arrow - verify_iscsi_installation "small" - log arrow - verify_fc_installation "small" - elif grep -c "scsiProtocol:[[:blank:]]\+None" "${VALUES}" >/dev/null; then - log step_warning - found_warning "Neither FC nor iSCSI connection is activated, please be sure that NFS settings are correct" - else - log step_failure - found_error "Incorrect scsiProtocol value, must be 'FC', 'ISCSI', 'auto' or 'None'" - fi - else - log step_failure - found_error "${VALUES} doesn't exists" - fi -} - # Check if the iSCSI client is installed function verify_iscsi_installation() { if [ ${NODE_VERIFY} -eq 0 ]; then @@ -228,48 +122,6 @@ function verify_iscsi_installation() { check_error error } - -function verify_unity_protocol_installation() { -if [ ${NODE_VERIFY} -eq 0 ]; then - return - fi - - log smart_step "Verifying sshpass installation.." - SSHPASS=$(which sshpass) - if [ -z "$SSHPASS" ]; then - found_warning "sshpass is not installed. It is mandatory to have ssh pass software for multi node kubernetes setup." - fi - - - log smart_step "Verifying iSCSI installation" "$1" - - error=0 - for node in $MINION_NODES; do - # check if the iSCSI client is installed - echo - echo -n "Enter the ${NODEUSER} password of ${node}: " - read -s nodepassword - echo - echo "$nodepassword" > protocheckfile - chmod 0400 protocheckfile - unset nodepassword - run_command sshpass -f protocheckfile ssh -o StrictHostKeyChecking=no ${NODEUSER}@"${node}" "cat /etc/iscsi/initiatorname.iscsi" > /dev/null 2>&1 - rv=$? - if [ $rv -ne 0 ]; then - error=1 - found_warning "iSCSI client is either not found on node: $node or not able to verify" - fi - run_command sshpass -f protocheckfile ssh -o StrictHostKeyChecking=no ${NODEUSER}@"${node}" "pgrep iscsid" > /dev/null 2>&1 - rv1=$? - if [ $rv1 -ne 0 ]; then - error=1 - found_warning "iscsid service is either not running on node: $node or not able to verify" - fi - rm -f protocheckfile - done - check_error error -} - # Check if the fc is installed function verify_fc_installation() { if [ ${NODE_VERIFY} -eq 0 ]; then @@ -429,32 +281,28 @@ function verify_alpha_snap_resources() { check_error error } -# verify that the requirements for beta snapshot support exist -function verify_beta_snap_requirements() { - log step "Verifying beta snapshot support" +# verify that the requirements for snapshot support exist +function verify_snap_requirements() { + log step "Verifying snapshot support" decho log arrow - log smart_step "Verifying that beta snapshot CRDs are available" "small" + log smart_step "Verifying that snapshot CRDs are available" "small" error=0 # check for the CRDs. These are required for installation CRDS=("VolumeSnapshotClasses" "VolumeSnapshotContents" "VolumeSnapshots") for C in "${CRDS[@]}"; do # Verify if snapshot related CRDs are there on the system. If not install them. - run_command kubectl explain ${C} 2> /dev/null | grep "^VERSION.*v1beta1$" --quiet + run_command kubectl explain ${C} 2>&1 >/dev/null if [ $? -ne 0 ]; then error=1 - if [ "${INSTALL_CRD}" == "yes" ]; then - found_warning "The beta CRD for ${C} is not installed. They will be installed because --snapshot-crd was specified" - else - found_error "The beta CRD for ${C} is not installed. These can be installed by specifying --snapshot-crd during installation" - fi + found_error "The CRD for ${C} is not installed. These need to be installed by the Kubernetes administrator" fi done check_error error log arrow - log smart_step "Verifying that beta snapshot controller is available" "small" + log smart_step "Verifying that the snapshot controller is available" "small" error=0 # check for the snapshot-controller. These are strongly suggested but not required @@ -514,20 +362,12 @@ function summary() { decho log section "Verification Complete - ${VERSTATUS}" # print all the WARNINGS - NON_CRD_WARNINGS=0 if [ "${#WARNINGS[@]}" -ne 0 ]; then log warnings for E in "${WARNINGS[@]}"; do decho "- ${E}" - decho ${E} | grep --quiet "^The beta CRD for VolumeSnapshot" - if [ $? -ne 0 ]; then - NON_CRD_WARNINGS=1 - fi done RC=$EXIT_WARNING - if [ "${INSTALL_CRD}" == "yes" -a ${NON_CRD_WARNINGS} -eq 0 ]; then - RC=$EXIT_SUCCESS - fi fi # print all the ERRORS @@ -593,8 +433,6 @@ EXIT_ERROR=99 WARNINGS=() ERRORS=() -INSTALL_CRD="no" - # make sure kubectl is available kubectl --help >&/dev/null || { decho "kubectl required for verification... exiting" @@ -619,10 +457,6 @@ while getopts ":h-:" optchar; do case "${optchar}" in -) case "${OPTARG}" in - # INSTALL_CRD. Signifies that we were asked to install the CRDs - snapshot-crd) - INSTALL_CRD="yes" - ;; skip-verify-node) NODE_VERIFY=0 ;; diff --git a/docker.mk b/docker.mk index 71333e4..f254e6b 100644 --- a/docker.mk +++ b/docker.mk @@ -1,14 +1,19 @@ # docker makefile, included from Makefile, will build/push images with docker or podman -# Includes the following generated file to get semantic version information -include semver.mk +docker-build: + @echo "Building docker image: $(REGISTRY)/$(IMAGENAME):$(IMAGETAG)" + docker build -t "$(REGISTRY)/$(IMAGENAME):$(IMAGETAG)" --build-arg GOPROXY=$(GOPROXY) --build-arg GOVERSION=$(GOVERSION) . -docker: - @echo "Base Images is set to: $(BASEIMAGE)" +docker-build-image-push: + @echo "Pushing: $(REGISTRY)/$(IMAGENAME):$(IMAGETAG)" + docker push "$(REGISTRY)/$(IMAGENAME):$(IMAGETAG)" + +podman-build: + @echo "Base Image is set to: $(BASEIMAGE)" @echo "Building: $(REGISTRY)/$(IMAGENAME):$(IMAGETAG)" - $(BUILDER) build -t "$(REGISTRY)/$(IMAGENAME):$(IMAGETAG)" --target $(BUILDSTAGE) --build-arg GOPROXY=$(GOPROXY) --build-arg BASEIMAGE=$(BASEIMAGE) --build-arg GOVERSION=$(GOVERSION) . + $(BUILDER) build -t "$(REGISTRY)/$(IMAGENAME):$(IMAGETAG)" -f Dockerfile.podman --target $(BUILDSTAGE) --build-arg GOPROXY=$(GOPROXY) --build-arg BASEIMAGE=$(BASEIMAGE) --build-arg GOVERSION=$(GOVERSION) . -push: +podman-build-image-push: @echo "Pushing: $(REGISTRY)/$(IMAGENAME):$(IMAGETAG)" $(BUILDER) push "$(REGISTRY)/$(IMAGENAME):$(IMAGETAG)" diff --git a/go.mod b/go.mod index 198b4a4..d1d67b0 100644 --- a/go.mod +++ b/go.mod @@ -1,23 +1,27 @@ module github.com/dell/csi-isilon require ( - github.com/DATA-DOG/godog v0.7.13 - github.com/Showmax/go-fqdn v0.0.0-20180501083314-6f60894d629f + github.com/Showmax/go-fqdn v1.0.0 github.com/akutz/gournal v0.5.0 - github.com/container-storage-interface/spec v1.1.0 - github.com/dell/gofsutil v1.1.0 - github.com/dell/goisilon v1.3.0 - github.com/golang/protobuf v1.3.2 + github.com/container-storage-interface/spec v1.2.0 + github.com/cucumber/gherkin-go/v9 v9.2.0 + github.com/cucumber/godog v0.10.0 + github.com/cucumber/messages-go/v10 v10.0.3 + github.com/dell/gocsi v1.2.3 + github.com/dell/gofsutil v1.5.0 + github.com/dell/goisilon v1.4.0 + github.com/fsnotify/fsnotify v1.4.7 + github.com/golang/protobuf v1.4.2 github.com/google/uuid v1.1.1 github.com/gorilla/mux v1.7.3 github.com/kubernetes-csi/csi-lib-utils v0.7.0 - github.com/rexray/gocsi v1.1.0 - github.com/sirupsen/logrus v1.4.2 - github.com/stretchr/testify v1.4.0 + github.com/sirupsen/logrus v1.6.0 + github.com/stretchr/testify v1.6.1 golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 - google.golang.org/grpc v1.26.0 + google.golang.org/grpc v1.27.0 + gopkg.in/yaml.v2 v2.2.8 k8s.io/apimachinery v0.18.6 k8s.io/client-go v0.18.6 ) -go 1.13 +go 1.15 diff --git a/go.sum b/go.sum index 7abbc94..9ec3cd3 100644 --- a/go.sum +++ b/go.sum @@ -16,6 +16,8 @@ github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbt github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Showmax/go-fqdn v0.0.0-20180501083314-6f60894d629f h1:JqQetNUOVIen9o9K9c+BHgYePFGXQmedq/A6F58Xu+w= github.com/Showmax/go-fqdn v0.0.0-20180501083314-6f60894d629f/go.mod h1:nxfWvpOWKx1oAU7G3U8UYWL/iY6EKdjjv1w/S8HDsvg= +github.com/Showmax/go-fqdn v1.0.0 h1:0rG5IbmVliNT5O19Mfuvna9LL7zlHyRfsSvBPZmF9tM= +github.com/Showmax/go-fqdn v1.0.0/go.mod h1:SfrFBzmDCtCGrnHhoDjuvFnKsWjEQX/Q9ARZvOrJAko= github.com/akutz/gosync v0.1.0 h1:naxPT/aDYDh79PMwM3XmencmNQeYmpNFSZy4ZE9zIW0= github.com/akutz/gosync v0.1.0/go.mod h1:I8I4aiqJI1nqaeYOOB1WS+CgRJVVPqhct9Y4njywM84= github.com/akutz/gournal v0.5.0 h1:ELlKqTTp9dmaaadDvO19YxUmdMghYuSi23AxoSL/g98= @@ -23,6 +25,7 @@ github.com/akutz/gournal v0.5.0/go.mod h1:w7Ucz8IOvtgsEL1321IY8bIUoASU/khBjAy/L6 github.com/akutz/memconn v0.1.0/go.mod h1:Jo8rI7m0NieZyLI5e2CDlRdRqRRB4S7Xp77ukDjH+Fw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/aslakhellesoy/gox v1.0.100/go.mod h1:AJl542QsKKG96COVsv0N74HHzVQgDIQPceVUh1aeU2M= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -31,6 +34,8 @@ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/container-storage-interface/spec v1.1.0 h1:qPsTqtR1VUPvMPeK0UnCZMtXaKGyyLPG8gj/wG6VqMs= github.com/container-storage-interface/spec v1.1.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= +github.com/container-storage-interface/spec v1.2.0 h1:bD9KIVgaVKKkQ/UbVUY9kCaH/CJbhNxe0eeB4JeJV2s= +github.com/container-storage-interface/spec v1.2.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= github.com/coreos/bbolt v1.3.3 h1:n6AiVyVRKQFNb6mJlwESEvvLoDyiTzXX7ORAUlkeBdY= github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.13+incompatible h1:8F3hqu9fGYLBifCmRCJsicFqDx/D68Rt3q1JMazcgBQ= @@ -41,14 +46,27 @@ github.com/coreos/go-systemd v0.0.0-20190612170431-362f06ec6bc1 h1:3zpwNeYYPklii github.com/coreos/go-systemd v0.0.0-20190612170431-362f06ec6bc1/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cucumber/gherkin-go/v11 v11.0.0/go.mod h1:CX33k2XU2qog4e+TFjOValoq6mIUq0DmVccZs238R9w= +github.com/cucumber/gherkin-go/v9 v9.2.0/go.mod h1:W/+Z5yOowYWXRMlC6lJvM9LFDAFfsicZ1sstjPKfWWQ= +github.com/cucumber/godog v0.10.0/go.mod h1:0Q+MOUg8Z9AhzLV+nNMbThQ2x1b17yYwGyahApTLjJA= +github.com/cucumber/messages-go/v10 v10.0.1/go.mod h1:kA5T38CBlBbYLU12TIrJ4fk4wSkVVOgyh7Enyy8WnSg= +github.com/cucumber/messages-go/v10 v10.0.3/go.mod h1:9jMZ2Y8ZxjLY6TG2+x344nt5rXstVVDYSdS5ySfI1WY= +github.com/cucumber/messages-go/v9 v9.0.3/go.mod h1:TICon2O2emBWMY1eeQvog6b+zK5c+puAFO6avjzC/JA= github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dell/gocsi v1.2.3 h1:bm9/YHLW1XXTA4lwVeBnAo73tMTwIEXtBsKtGMiGM/k= +github.com/dell/gocsi v1.2.3/go.mod h1:XdKbyrU7s0l/TkTDFQyW4yY10pzQjGRmpiguk+WKL58= github.com/dell/gofsutil v1.1.0 h1:bIi/hCqdkWIL1owEKHOZ/+FF8DbDwTZ70OM4YMTR1/o= github.com/dell/gofsutil v1.1.0/go.mod h1:48eHpMRl0+07uGEnQ7/RE6pTOAVEl74utlGjd0QX/Os= +github.com/dell/gofsutil v1.5.0 h1:CY9rz8RsXREpXvoyektCd3GgOGmG+PRkwPWKec7td+k= +github.com/dell/gofsutil v1.5.0/go.mod h1:98Wpcg7emz4iGgY16fd4MKpnal2SX2hBiwP5ghHlvhg= github.com/dell/goisilon v1.3.0 h1:OV1vCpq4ILsheas2vRabp4u5AYV1/wAeWXknMB5UqIc= github.com/dell/goisilon v1.3.0/go.mod h1:FnAVgQNR6ijG888MC9TlWV2t8IazURs4WM73ZNBQaSI= +github.com/dell/goisilon v1.4.0 h1:1zktsa0RrUzXv98/DUnU1vU6f58M/xcMr4slx+V1bPY= +github.com/dell/goisilon v1.4.0/go.mod h1:FnAVgQNR6ijG888MC9TlWV2t8IazURs4WM73ZNBQaSI= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= @@ -73,6 +91,7 @@ github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1 github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= @@ -90,12 +109,21 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= @@ -114,6 +142,7 @@ github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= @@ -121,8 +150,13 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.1 h1:LgwPEIdyJmF9Ug9nINVNspG6Z6P8/TM0yKdQ5h3VQaQ= github.com/grpc-ecosystem/grpc-gateway v1.9.1/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/go-immutable-radix v1.2.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-memdb v1.2.1/go.mod h1:OSvLJ662Jim8hMM+gWGyhktyWk2xPCnWMc7DWIqtkGA= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.0.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q= @@ -140,17 +174,22 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kubernetes-csi/csi-lib-utils v0.7.0 h1:t1cS7HTD7z5D7h9iAdjWuHtMxJPb9s1fIv34rxytzqs= github.com/kubernetes-csi/csi-lib-utils v0.7.0/go.mod h1:bze+2G9+cmoHxN6+WyG1qT4MDxgZJMLGwc7V4acPNm0= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -161,6 +200,7 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.4.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -192,13 +232,13 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/rexray/gocsi v1.1.0 h1:MkstGTZ1x4uf9AtwhOwzovYYYkPM5ZCRFU8ek9+rAy0= -github.com/rexray/gocsi v1.1.0/go.mod h1:kr6L70GxUU6Gu8ehq2dWQmwdILR1tmE05c/OYaTvlx0= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/sirupsen/logrus v0.0.0-20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= @@ -214,6 +254,8 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/thecodeteam/gosync v0.1.0 h1:RcD9owCaiK0Jg1rIDPgirdcLCL1jCD6XlDVSg0MfHmE= github.com/thecodeteam/gosync v0.1.0/go.mod h1:43QHsngcnWc8GE1aCmi7PEypslflHjCzXFleuWKEb00= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= @@ -296,6 +338,7 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3 golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -312,10 +355,21 @@ google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZi google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= @@ -327,8 +381,10 @@ gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bl gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/helm/csi-isilon/Chart.yaml b/helm/csi-isilon/Chart.yaml index 72abe75..d44c948 100644 --- a/helm/csi-isilon/Chart.yaml +++ b/helm/csi-isilon/Chart.yaml @@ -1,6 +1,6 @@ name: csi-isilon -version: 1.4.0 -appVersion: 1.4.0 +version: 1.5.0 +appVersion: 1.5.0 description: | PowerScale CSI (Container Storage Interface) driver Kubernetes integration. This chart includes everything required to provision via CSI as diff --git a/helm/csi-isilon/driver-image.yaml b/helm/csi-isilon/driver-image.yaml index dd01007..3e8aff7 100644 --- a/helm/csi-isilon/driver-image.yaml +++ b/helm/csi-isilon/driver-image.yaml @@ -1,4 +1,4 @@ # IT IS RECOMMENDED YOU DO NOT CHANGE THE IMAGES TO BE DOWNLOADED. images: # "images.driver" defines the container images used for the driver container. - driver: dellemc/csi-isilon:v1.4.0.000R + driver: dellemc/csi-isilon:v1.5.0 diff --git a/helm/csi-isilon/k8s-1.17-values.yaml b/helm/csi-isilon/k8s-1.17-values.yaml deleted file mode 100644 index 123c118..0000000 --- a/helm/csi-isilon/k8s-1.17-values.yaml +++ /dev/null @@ -1,23 +0,0 @@ -kubeversion: "v1.17" - -# IT IS RECOMMENDED YOU DO NOT CHANGE THE IMAGES TO BE DOWNLOADED. -images: - # "images.attacher" defines the container images used for the csi attacher - # container. - attacher: k8s.gcr.io/sig-storage/csi-attacher:v3.0.0 - - # "images.provisioner" defines the container images used for the csi provisioner - # container. - provisioner: k8s.gcr.io/sig-storage/csi-provisioner:v2.0.2 - - # "images.snapshotter" defines the container image used for the csi snapshotter - snapshotter: k8s.gcr.io/sig-storage/csi-snapshotter:v3.0.2 - - # "images.registrar" defines the container images used for the csi registrar - # container. - registrar: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.0.1 - - # "images.resizer" defines the container images used for the csi resizer - # container. - resizer: quay.io/k8scsi/csi-resizer:v1.0.0 - diff --git a/helm/csi-isilon/k8s-1.18-values.yaml b/helm/csi-isilon/k8s-1.18-values.yaml index 5cd4d1f..e592943 100644 --- a/helm/csi-isilon/k8s-1.18-values.yaml +++ b/helm/csi-isilon/k8s-1.18-values.yaml @@ -4,20 +4,20 @@ kubeversion: "v1.18" images: # "images.attacher" defines the container images used for the csi attacher # container. - attacher: k8s.gcr.io/sig-storage/csi-attacher:v3.0.0 + attacher: k8s.gcr.io/sig-storage/csi-attacher:v3.1.0 # "images.provisioner" defines the container images used for the csi provisioner # container. - provisioner: k8s.gcr.io/sig-storage/csi-provisioner:v2.0.2 + provisioner: k8s.gcr.io/sig-storage/csi-provisioner:v2.1.0 # "images.snapshotter" defines the container image used for the csi snapshotter - snapshotter: k8s.gcr.io/sig-storage/csi-snapshotter:v3.0.2 + snapshotter: k8s.gcr.io/sig-storage/csi-snapshotter:v3.0.3 # "images.registrar" defines the container images used for the csi registrar # container. - registrar: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.0.1 + registrar: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.1.0 # "images.resizer" defines the container images used for the csi resizer # container. - resizer: quay.io/k8scsi/csi-resizer:v1.0.0 + resizer: quay.io/k8scsi/csi-resizer:v1.1.0 diff --git a/helm/csi-isilon/k8s-1.19-values.yaml b/helm/csi-isilon/k8s-1.19-values.yaml index 1bbbdf5..4c776f6 100644 --- a/helm/csi-isilon/k8s-1.19-values.yaml +++ b/helm/csi-isilon/k8s-1.19-values.yaml @@ -4,20 +4,20 @@ kubeversion: "v1.19" images: # "images.attacher" defines the container images used for the csi attacher # container. - attacher: k8s.gcr.io/sig-storage/csi-attacher:v3.0.0 + attacher: k8s.gcr.io/sig-storage/csi-attacher:v3.1.0 # "images.provisioner" defines the container images used for the csi provisioner # container. - provisioner: k8s.gcr.io/sig-storage/csi-provisioner:v2.0.2 + provisioner: k8s.gcr.io/sig-storage/csi-provisioner:v2.1.0 # "images.snapshotter" defines the container image used for the csi snapshotter - snapshotter: k8s.gcr.io/sig-storage/csi-snapshotter:v3.0.2 + snapshotter: k8s.gcr.io/sig-storage/csi-snapshotter:v3.0.3 # "images.registrar" defines the container images used for the csi registrar # container. - registrar: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.0.1 + registrar: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.1.0 # "images.resizer" defines the container images used for the csi resizer # container. - resizer: quay.io/k8scsi/csi-resizer:v1.0.0 + resizer: quay.io/k8scsi/csi-resizer:v1.1.0 diff --git a/helm/csi-isilon/k8s-1.20-values.yaml b/helm/csi-isilon/k8s-1.20-values.yaml index fdba4b0..580ef3a 100644 --- a/helm/csi-isilon/k8s-1.20-values.yaml +++ b/helm/csi-isilon/k8s-1.20-values.yaml @@ -4,20 +4,20 @@ kubeversion: "v1.20" images: # "images.attacher" defines the container images used for the csi attacher # container. - attacher: k8s.gcr.io/sig-storage/csi-attacher:v3.0.0 + attacher: k8s.gcr.io/sig-storage/csi-attacher:v3.1.0 # "images.provisioner" defines the container images used for the csi provisioner # container. - provisioner: k8s.gcr.io/sig-storage/csi-provisioner:v2.0.2 + provisioner: k8s.gcr.io/sig-storage/csi-provisioner:v2.1.0 # "images.snapshotter" defines the container image used for the csi snapshotter - snapshotter: k8s.gcr.io/sig-storage/csi-snapshotter:v3.0.2 + snapshotter: k8s.gcr.io/sig-storage/csi-snapshotter:v4.0.0 # "images.registrar" defines the container images used for the csi registrar # container. - registrar: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.0.1 + registrar: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.1.0 # "images.resizer" defines the container images used for the csi resizer # container. - resizer: quay.io/k8scsi/csi-resizer:v1.0.0 + resizer: quay.io/k8scsi/csi-resizer:v1.1.0 diff --git a/helm/csi-isilon/templates/controller.yaml b/helm/csi-isilon/templates/controller.yaml index d754a89..7922f57 100644 --- a/helm/csi-isilon/templates/controller.yaml +++ b/helm/csi-isilon/templates/controller.yaml @@ -99,6 +99,10 @@ spec: {{- else }} replicas: {{ required "Must provide the number of controller instances to create." .Values.controllerCount }} {{- end }} + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 template: metadata: labels: @@ -159,6 +163,7 @@ spec: - "--v=5" - "--feature-gates=Topology=true" - "--leader-election" + - "--extra-create-metadata" env: - name: ADDRESS value: /var/run/csi/csi.sock @@ -192,28 +197,16 @@ spec: value: controller - name: X_CSI_DEBUG value: "{{ .Values.enableDebug }}" + - name: X_CSI_ISI_INSECURE + value: "{{ .Values.isiInsecure }}" - name: X_CSI_VERBOSE value: "{{ .Values.verbose }}" - - name: X_CSI_ISI_ENDPOINT - value: {{ required "Must provide an Isilon REST API gateway HTTPS endpoint." .Values.isiIP }} - name: X_CSI_ISI_PORT value: "{{ .Values.isiPort }}" - - name: X_CSI_ISI_USER - valueFrom: - secretKeyRef: - name: {{ .Release.Name }}-creds - key: username - - name: X_CSI_ISI_PASSWORD - valueFrom: - secretKeyRef: - name: {{ .Release.Name }}-creds - key: password - name: X_CSI_ISI_AUTOPROBE value: "{{ .Values.autoProbe }}" - name: X_CSI_ISI_QUOTA_ENABLED value: "{{ .Values.enableQuota }}" - - name: X_CSI_ISI_INSECURE - value: "{{ .Values.isiInsecure }}" - name: X_CSI_ISI_ACCESS_ZONE value: {{ .Values.isiAccessZone }} - name: X_CSI_CUSTOM_TOPOLOGY_ENABLED @@ -227,16 +220,30 @@ spec: fieldRef: fieldPath: spec.nodeName - name: SSL_CERT_DIR - value: /certs + value: /certs + - name: X_CSI_ISILON_CONFIG_PATH + value: /isilon-configs/config volumeMounts: - name: socket-dir mountPath: /var/run/csi - name: certs mountPath: /certs - readOnly: true + readOnly: true + - name: isilon-configs + mountPath: /isilon-configs volumes: - name: socket-dir emptyDir: - name: certs + projected: + sources: +{{- range $i, $e := until (int .Values.certSecretCount ) }} + - secret: + name: {{ print $.Release.Name "-certs-" $e }} + items: + - key: cert-{{ $e }} + path: cert-{{ $e }} +{{- end }} + - name: isilon-configs secret: - secretName: {{ .Release.Name }}-certs + secretName: {{ .Release.Name }}-creds diff --git a/helm/csi-isilon/templates/node.yaml b/helm/csi-isilon/templates/node.yaml index a66b166..c8c8d58 100644 --- a/helm/csi-isilon/templates/node.yaml +++ b/helm/csi-isilon/templates/node.yaml @@ -30,7 +30,6 @@ rules: - apiGroups: ["storage.k8s.io"] resources: ["volumeattachments"] verbs: ["get", "list", "watch", "update"] - --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 @@ -61,8 +60,10 @@ spec: spec: serviceAccount: {{ .Release.Name }}-node hostNetwork: true - containers: + dnsPolicy: ClusterFirstWithHostNet + containers: - name: driver + command: ["/csi-isilon"] securityContext: privileged: true capabilities: @@ -70,7 +71,6 @@ spec: allowPrivilegeEscalation: true image: {{ required "Must provide the Isilon driver container image." .Values.images.driver }} imagePullPolicy: Always - command: [ "/csi-isilon" ] env: - name: CSI_ENDPOINT value: /var/lib/kubelet/plugins/csi-isilon/csi_sock @@ -79,47 +79,39 @@ spec: - name: X_CSI_DEBUG value: "{{ .Values.enableDebug }}" - name: X_CSI_ISI_INSECURE - value: "{{ .Values.isiInsecure }}" + value: "{{ .Values.isiInsecure }}" + - name: X_CSI_ALLOWED_NETWORKS + value: "{{ .Values.allowedNetworks }}" - name: X_CSI_VERBOSE - value: "{{ .Values.verbose }}" + value: "{{ .Values.verbose }}" - name: X_CSI_PRIVATE_MOUNT_DIR value: "/var/lib/kubelet/plugins/csi-isilon/disks" - - name: X_CSI_ISI_ENDPOINT - value: {{ required "Must provide an Isilon REST API gateway HTTPS endpoint." .Values.isiIP }} - name: X_CSI_ISI_PORT value: "{{ .Values.isiPort }}" - name: X_CSI_ISI_PATH - value: {{ .Values.isiPath }} + value: {{ .Values.isiPath }} - name: X_CSI_ISILON_NO_PROBE_ON_START - value: "{{ .Values.noProbeOnStart }}" + value: "{{ .Values.noProbeOnStart }}" - name: X_CSI_ISILON_NFS_V3 - value: "{{ .Values.nfsV3 }}" - - name: X_CSI_ISI_USER - valueFrom: - secretKeyRef: - name: {{ .Release.Name }}-creds - key: username - - name: X_CSI_ISI_PASSWORD - valueFrom: - secretKeyRef: - name: {{ .Release.Name }}-creds - key: password + value: "{{ .Values.nfsV3 }}" - name: X_CSI_ISI_AUTOPROBE - value: "{{ .Values.autoProbe }}" + value: "{{ .Values.autoProbe }}" - name: X_CSI_NODE_NAME valueFrom: fieldRef: - fieldPath: spec.nodeName + fieldPath: spec.nodeName - name: X_CSI_NODE_IP valueFrom: fieldRef: - fieldPath: status.hostIP + fieldPath: status.hostIP - name: SSL_CERT_DIR - value: /certs + value: /certs - name: X_CSI_ISI_QUOTA_ENABLED value: "{{ .Values.enableQuota }}" - name: X_CSI_CUSTOM_TOPOLOGY_ENABLED value: "{{ .Values.enableCustomTopology }}" + - name: X_CSI_ISILON_CONFIG_PATH + value: /isilon-configs/config volumeMounts: - name: driver-path mountPath: /var/lib/kubelet/plugins/csi-isilon @@ -129,18 +121,17 @@ spec: mountPath: /var/lib/kubelet/pods mountPropagation: "Bidirectional" - name: dev - mountPath: /dev + mountPath: /dev - name: certs mountPath: /certs - readOnly: true + readOnly: true + - name: isilon-configs + mountPath: /isilon-configs - name: registrar image: {{ required "Must provide the CSI node registrar container image." .Values.images.registrar }} args: - "--v=5" - "--csi-address=$(ADDRESS)" - #- --mode=node-register - #- --driver-requires-attachment=true - #- --pod-info-mount-version=v1 - --kubelet-registration-path=/var/lib/kubelet/plugins/csi-isilon/csi_sock env: - name: ADDRESS @@ -177,5 +168,15 @@ spec: path: /dev type: Directory - name: certs + projected: + sources: +{{- range $i, $e := until (int .Values.certSecretCount ) }} + - secret: + name: {{ print $.Release.Name "-certs-" $e }} + items: + - key: cert-{{ $e }} + path: cert-{{ $e }} +{{- end }} + - name: isilon-configs secret: - secretName: {{ .Release.Name }}-certs + secretName: {{ .Release.Name }}-creds diff --git a/helm/csi-isilon/templates/storageclass.yaml b/helm/csi-isilon/templates/storageclass.yaml deleted file mode 100644 index 38bb6ab..0000000 --- a/helm/csi-isilon/templates/storageclass.yaml +++ /dev/null @@ -1,28 +0,0 @@ -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: {{ required "Must provide a storage class name." .Values.storageClass.name}} - annotations: - storageclass.beta.kubernetes.io/is-default-class: {{ .Values.storageClass.isDefault | quote }} - "helm.sh/resource-policy": keep -provisioner: csi-isilon.dellemc.com -reclaimPolicy: {{ required "Must provide a storage class reclaim policy." .Values.storageClass.reclaimPolicy }} -allowVolumeExpansion: true -parameters: - AccessZone: {{ required "Must provide an access zone." .Values.storageClass.accessZone }} - IsiPath: {{ required "Must provide an isiPath." .Values.storageClass.isiPath }} - # AccessZone groupnet service IP. Update AzServiceIP in values.yaml if different than isiIP. - # AzServiceIP: {{ .Values.storageClass.AzServiceIP }} - {{ if .Values.storageClass.AzServiceIP }} - AzServiceIP: {{ .Values.storageClass.AzServiceIP }} - {{ else }} - AzServiceIP: {{ required "A valid .Values.isiIP is required when .Values.storageClass.AzServiceIP is not defined." .Values.isiIP }} - {{ end }} - # When a PVC is being created, it takes the storage class' value of "storageclass.rootClientEnabled", - # which determines, when a node mounts the PVC, in NodeStageVolume, whether to add the k8s node to - # the "Root clients" field (when true) or "Clients" field (when false) of the NFS export - {{ if .Values.storageClass.rootClientEnabled }} - RootClientEnabled: {{ .Values.storageClass.rootClientEnabled | quote }} - {{ else }} - RootClientEnabled: "false" - {{ end }} diff --git a/helm/csi-isilon/templates/volumesnapshotclass.yaml b/helm/csi-isilon/templates/volumesnapshotclass.yaml index 2b5e8de..68c65fc 100644 --- a/helm/csi-isilon/templates/volumesnapshotclass.yaml +++ b/helm/csi-isilon/templates/volumesnapshotclass.yaml @@ -1,4 +1,8 @@ +{{- if eq .Values.kubeversion "v1.20" }} +apiVersion: snapshot.storage.k8s.io/v1 +{{- else }} apiVersion: snapshot.storage.k8s.io/v1beta1 +{{- end}} kind: VolumeSnapshotClass metadata: name: "isilon-snapclass" @@ -6,4 +10,4 @@ driver: csi-isilon.dellemc.com deletionPolicy: Delete parameters: #IsiPath should match with respective storageClass IsiPath - IsiPath: {{ required "Must provide an isiPath." .Values.storageClass.isiPath }} + IsiPath: {{ required "Must provide an isiPath." .Values.isiPath }} diff --git a/helm/csi-isilon/values.yaml b/helm/csi-isilon/values.yaml index a68abe1..e7587d9 100644 --- a/helm/csi-isilon/values.yaml +++ b/helm/csi-isilon/values.yaml @@ -1,5 +1,11 @@ -# "isiIP" defines the HTTPs endpoint of the PowerScale OneFS API server -isiIP: 1.1.1.1 +# Represents number of certificate secrets, which user is going to create for ssl authentication. (isilon-cert-0..isilon-cert-n) +# Minimum value should be 1 +certSecretCount: 1 + +# Custom networks for PowerScale export +# Please specify list of networks which can be used for NFS I/O traffic, CIDR format should be used +# ex: 192.168.1.0/24 +allowedNetworks: [] # "isiPort" defines the HTTPs port number of the PowerScale OneFS API server isiPort: "8080" @@ -50,30 +56,6 @@ nfsV3: "false" # If enableCustomTopology is set to true, then do not specify allowedTopologies in storage class enableCustomTopology: "false" -# The installation process will generate multiple storageclasses based on these parameters. -# Only the primary storageclass for the driver will be marked default if specified. -storageClass: - # "storageClass.name" defines the name of the storage class to be defined. - name: isilon - - # "storageClass.isDefault" defines whether the primary storage class should be the # default. - isDefault: "true" - - # "storageClass.reclaimPolicy" defines what will happen when a volume is - # removed from the Kubernetes API. Valid values are "Retain" and "Delete". - reclaimPolicy: Delete - accessZone: System - # The default base path for the volumes to be created, this will be used if a storage class does not have the IsiPath parameter specified - # Ensure that this path exists on Isilon. - isiPath: "/ifs/data/csi" - #Access Zone service IP if different from isiIP, specify here and refer in storageClass - #AzServiceIP : 192.168.2.1 - - # When a PVC is being created, it takes the storage class' value of "storageclass.rootClientEnabled", - # which determines, when a node mounts the PVC, in NodeStageVolume, whether to add the k8s node to - # the "Root clients" field (when true) or "Clients" field (when false) of the NFS export - rootClientEnabled: "false" - controller: # Define nodeSelector for the controllers, if required diff --git a/helm/emptysecret.yaml b/helm/emptysecret.yaml index cc04647..2462770 100644 --- a/helm/emptysecret.yaml +++ b/helm/emptysecret.yaml @@ -1,7 +1,8 @@ apiVersion: v1 kind: Secret metadata: - name: isilon-certs + name: isilon-certs-0 namespace: isilon type: Opaque -data: \ No newline at end of file +data: + cert-0: "" diff --git a/helm/samples/storageclass/isilon-mountoption.yaml b/helm/samples/storageclass/isilon-mountoption.yaml new file mode 100644 index 0000000..8cceb7e --- /dev/null +++ b/helm/samples/storageclass/isilon-mountoption.yaml @@ -0,0 +1,35 @@ +# This is a sample manifest for utilizing the mount options. +# PVCs created using this storage class will be scheduled +# only on the nodes with access to Isilon + +# Provide mount options through "mountOptions" attribute +# to create PVCs with mount options. + +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: isilon-mountoption +provisioner: csi-isilon.dellemc.com +reclaimPolicy: Delete +allowVolumeExpansion: true +parameters: + AccessZone: System + IsiPath: "/ifs/data/csi" + # AccessZone groupnet service IP. Update AzServiceIP in values.yaml if different than isiIP. + #AzServiceIP : 192.168.2.1 + # When a PVC is being created, it takes the storage class' value of "storageclass.rootClientEnabled", + # which determines, when a node mounts the PVC, in NodeStageVolume, whether to add the k8s node to + # the "Root clients" field (when true) or "Clients" field (when false) of the NFS export + RootClientEnabled: "false" + # Name of PowerScale cluster where pv will be provisioned + # This name should match with name of one of the cluster configs in isilon-creds secret + # If this parameter is not specified, then default cluster config in isilon-creds secret will be considered if available + #ClusterName: "" + +# volumeBindingMode controls when volume binding and dynamic provisioning should occur. +# Immediate mode indicates that volume binding and dynamic provisioning occurs once the PersistentVolumeClaim is created +# WaitForFirstConsumer mode will delay the binding and provisioning of a PersistentVolume +# until a Pod using the PersistentVolumeClaim is created +volumeBindingMode: Immediate + +mountOptions: ["", "", ..., ""] diff --git a/helm/samples/storageclass/isilon-plain.yaml b/helm/samples/storageclass/isilon-plain.yaml new file mode 100644 index 0000000..b0efd87 --- /dev/null +++ b/helm/samples/storageclass/isilon-plain.yaml @@ -0,0 +1,28 @@ +# This is a sample manifest to create plain storageclass without any features like topology, mountOptions etc. + +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: isilon-plain +provisioner: csi-isilon.dellemc.com +reclaimPolicy: Delete +allowVolumeExpansion: true +parameters: + AccessZone: System + IsiPath: "/ifs/data/csi" + # AccessZone groupnet service IP. Update AzServiceIP in values.yaml if different than isiIP. + #AzServiceIP : 192.168.2.1 + # When a PVC is being created, it takes the storage class' value of "storageclass.rootClientEnabled", + # which determines, when a node mounts the PVC, in NodeStageVolume, whether to add the k8s node to + # the "Root clients" field (when true) or "Clients" field (when false) of the NFS export + RootClientEnabled: "false" + # Name of PowerScale cluster where pv will be provisioned + # This name should match with name of one of the cluster configs in isilon-creds secret + # If this parameter is not specified, then default cluster config in isilon-creds secret will be considered if available + #ClusterName: "" + +# volumeBindingMode controls when volume binding and dynamic provisioning should occur. +# Immediate mode indicates that volume binding and dynamic provisioning occurs once the PersistentVolumeClaim is created +# WaitForFirstConsumer mode will delay the binding and provisioning of a PersistentVolume +# until a Pod using the PersistentVolumeClaim is created +volumeBindingMode: Immediate diff --git a/helm/samples/storageclass/isilon-topology.yaml b/helm/samples/storageclass/isilon-topology.yaml new file mode 100644 index 0000000..f2e1d5d --- /dev/null +++ b/helm/samples/storageclass/isilon-topology.yaml @@ -0,0 +1,39 @@ +# This is a sample manifest for utilizing the topology feature. +# PVCs created using this storage class will be scheduled +# only on the nodes with access to Isilon + +# Change all instances of to the IP of the PowerScale OneFS API server + +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: isilon-topology +provisioner: csi-isilon.dellemc.com +reclaimPolicy: Delete +allowVolumeExpansion: true +parameters: + AccessZone: System + IsiPath: "/ifs/data/csi" + # AccessZone groupnet service IP. Update AzServiceIP in values.yaml if different than isiIP. + #AzServiceIP : 192.168.2.1 + # When a PVC is being created, it takes the storage class' value of "storageclass.rootClientEnabled", + # which determines, when a node mounts the PVC, in NodeStageVolume, whether to add the k8s node to + # the "Root clients" field (when true) or "Clients" field (when false) of the NFS export + RootClientEnabled: "false" + # Name of PowerScale cluster where pv will be provisioned + # This name should match with name of one of the cluster configs in isilon-creds secret + # If this parameter is not specified, then default cluster config in isilon-creds secret will be considered if available + #ClusterName: "" + +# volumeBindingMode controls when volume binding and dynamic provisioning should occur. +# Immediate mode indicates that volume binding and dynamic provisioning occurs once the PersistentVolumeClaim is created +# WaitForFirstConsumer mode will delay the binding and provisioning of a PersistentVolume +# until a Pod using the PersistentVolumeClaim is created +volumeBindingMode: WaitForFirstConsumer +# allowedTopologies helps scheduling pod on worker nodes which matches all of below expressions +# If enableCustomTopology is set to true in helm values.yaml, then do not specify allowedTopologies +allowedTopologies: + - matchLabelExpressions: + - key: csi-isilon.dellemc.com/ + values: + - csi-isilon.dellemc.com diff --git a/helm/samples/storageclass/isilon.yaml b/helm/samples/storageclass/isilon.yaml index 6a2cb89..8eca78d 100644 --- a/helm/samples/storageclass/isilon.yaml +++ b/helm/samples/storageclass/isilon.yaml @@ -23,6 +23,10 @@ parameters: # which determines, when a node mounts the PVC, in NodeStageVolume, whether to add the k8s node to # the "Root clients" field (when true) or "Clients" field (when false) of the NFS export RootClientEnabled: "false" + # Name of PowerScale cluster where pv will be provisioned + # This name should match with name of one of the cluster configs in isilon-creds secret + # If this parameter is not specified, then default cluster config in isilon-creds secret will be considered if available + #ClusterName: "" # volumeBindingMode controls when volume binding and dynamic provisioning should occur. # Immediate mode indicates that volume binding and dynamic provisioning occurs once the PersistentVolumeClaim is created diff --git a/helm/samples/volumesnapshotclass/volsnapclass_beta.yaml b/helm/samples/volumesnapshotclass/volsnapclass_beta.yaml new file mode 100644 index 0000000..2089408 --- /dev/null +++ b/helm/samples/volumesnapshotclass/volsnapclass_beta.yaml @@ -0,0 +1,16 @@ +# For kubernetes version 18 and 19 (beta snaps) +# This is a sample manifest for creating snapshotclass with IsiPath other than default +# pvc is created with sc which has some different IsiPath e.g. /ifs/custom +# to create a snapshot for this pvc volumesnapshotclass must also be initilized with same IsiPath (i.e. /ifs/custom ) to work snapshot feature +apiVersion: snapshot.storage.k8s.io/v1beta1 +kind: VolumeSnapshotClass +metadata: + name: "isilon-snapclass-custom" +driver: csi-isilon.dellemc.com +#The deletionPolicy of a volume snapshot class can either be Retain or Delete +#If the deletionPolicy is Delete, then the underlying storage snapshot will be deleted along with the VolumeSnapshotContent object. +#If the deletionPolicy is Retain, then both the underlying snapshot and VolumeSnapshotContent remain +deletionPolicy: Delete +parameters: +#IsiPath should match with respective storageClass IsiPath + IsiPath: "/ifs/custom" diff --git a/helm/samples/volumesnapshotclass/volsnapclass_v1.yaml b/helm/samples/volumesnapshotclass/volsnapclass_v1.yaml new file mode 100644 index 0000000..96652d0 --- /dev/null +++ b/helm/samples/volumesnapshotclass/volsnapclass_v1.yaml @@ -0,0 +1,16 @@ +# For kubernetes version 20 (v1 snaps) +# This is a sample manifest for creating snapshotclass with IsiPath other than default +# pvc is created with sc which has some different IsiPath e.g. /ifs/custom +# to create a snapshot for this pvc volumesnapshotclass must also be initilized with same IsiPath (i.e. /ifs/custom ) to work snapshot feature +apiVersion: snapshot.storage.k8s.io/v1 +kind: VolumeSnapshotClass +metadata: + name: "isilon-snapclass-custom" +driver: csi-isilon.dellemc.com +#The deletionPolicy of a volume snapshot class can either be Retain or Delete +#If the deletionPolicy is Delete, then the underlying storage snapshot will be deleted along with the VolumeSnapshotContent object. +#If the deletionPolicy is Retain, then both the underlying snapshot and VolumeSnapshotContent remain +deletionPolicy: Delete +parameters: +#IsiPath should match with respective storageClass IsiPath + IsiPath: "/ifs/custom" diff --git a/helm/secret.json b/helm/secret.json new file mode 100644 index 0000000..e4e4444 --- /dev/null +++ b/helm/secret.json @@ -0,0 +1,20 @@ +{ + "isilonClusters": [ + { + "clusterName": "cluster1", + "username": "user", + "password": "password", + "isiIP": "1.2.3.4", + "isDefaultCluster": true + }, + { + "clusterName": "cluster2", + "username": "user", + "password": "password", + "isiIP": "1.2.3.5", + "isiPort": "8080", + "isiInsecure": true, + "isiPath": "/ifs/data/csi", + } + ] +} diff --git a/helm/secret.yaml b/helm/secret.yaml deleted file mode 100644 index 7bc1d18..0000000 --- a/helm/secret.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: isilon-creds - namespace: isilon -type: Opaque -data: - # set username to the base64 encoded username - username: YWRtaW4= - # set password to the base64 encoded password - password: SXMxbDBu diff --git a/main.go b/main.go index 6baed4e..228dbad 100644 --- a/main.go +++ b/main.go @@ -27,7 +27,7 @@ import ( "strings" "github.com/dell/csi-isilon/provider" - "github.com/rexray/gocsi" + "github.com/dell/gocsi" ) // main is ignored when this package is built as a go plug-in @@ -94,4 +94,7 @@ const usage = ` X_CSI_ISI_ENDPOINT Specifies the name of the Isilon system to interact with. The default value is default. + + X_CSI_ISILON_CONFIG_PATH + Specifies the filepath containing Isilon cluster's config details. ` diff --git a/overrides.mk b/overrides.mk index e93157a..94eaced 100644 --- a/overrides.mk +++ b/overrides.mk @@ -3,12 +3,15 @@ # # DEFAULT values -DEFAULT_BASEIMAGE="registry.access.redhat.com/ubi8/ubi-minimal:8.2-349" -DEFAULT_GOVERSION="1.13.12" +DEFAULT_BASEIMAGE="registry.access.redhat.com/ubi8/ubi-minimal:8.3-230" +DEFAULT_GOVERSION="1.15.6" DEFAULT_REGISTRY="" DEFAULT_IMAGENAME="isilon" DEFAULT_BUILDSTAGE="final" -DEFAULT_IMAGETAG=$(shell date +%Y%m%d%H%M%S) +ifeq ($(origin BUILD_TIMESTAMP), undefined) +BUILD_TIMESTAMP := $(shell date +%Y%m%d%H%M%S) +endif +DEFAULT_IMAGETAG=$(BUILD_TIMESTAMP) DEFAULT_GOPROXY="direct" # set the BASEIMAGE if needed diff --git a/provider/provider.go b/provider/provider.go index 5630878..7e54c56 100644 --- a/provider/provider.go +++ b/provider/provider.go @@ -19,7 +19,7 @@ package provider import ( "github.com/dell/csi-isilon/common/utils" "github.com/dell/csi-isilon/service" - "github.com/rexray/gocsi" + "github.com/dell/gocsi" log "github.com/sirupsen/logrus" "google.golang.org/grpc" ) diff --git a/service/controller.go b/service/controller.go index 94622ea..b2b4a4c 100644 --- a/service/controller.go +++ b/service/controller.go @@ -24,18 +24,17 @@ import ( "strings" "time" - timestamp "github.com/golang/protobuf/ptypes/timestamp" - "golang.org/x/net/context" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" + fPath "path" "github.com/container-storage-interface/spec/lib/go/csi" "github.com/dell/csi-isilon/common/constants" "github.com/dell/csi-isilon/common/utils" isi "github.com/dell/goisilon" isiApi "github.com/dell/goisilon/api" - log "github.com/sirupsen/logrus" - fPath "path" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) // constants @@ -54,6 +53,16 @@ const ( RootClientEnabledParamDefault = "false" DeleteSnapshotMarker = "DELETE_SNAPSHOT" IgnoreDotAndDotDotSubDirs = 2 + ClusterNameParam = "ClusterName" + + // These are available when enabling --extra-create-metadata for the external-provisioner. + csiPersistentVolumeName = "csi.storage.k8s.io/pv/name" + csiPersistentVolumeClaimName = "csi.storage.k8s.io/pvc/name" + csiPersistentVolumeClaimNamespace = "csi.storage.k8s.io/pvc/namespace" + // These map to the above fields in the form of HTTP header names. + headerPersistentVolumeName = "x-csi-pv-name" + headerPersistentVolumeClaimName = "x-csi-pv-claimname" + headerPersistentVolumeClaimNamespace = "x-csi-pv-namespace" ) // validateVolSize uses the CapacityRange range params to determine what size @@ -96,9 +105,31 @@ func (s *service) CreateVolume( isROVolumeFromSnapshot bool snapshotTrackingDir string snapshotTrackingDirEntryForVolume string + clusterName string ) + + params := req.GetParameters() + + if _, ok := params[ClusterNameParam]; ok { + if params[ClusterNameParam] == "" { + clusterName = s.defaultIsiClusterName + } else { + clusterName = params[ClusterNameParam] + } + } + + isiConfig, err := s.getIsilonConfig(ctx, &clusterName) + if err != nil { + return nil, err + } + + // Fetch log handler + ctx, _, runID := GetRunIDLog(ctx) + ctx, log := setClusterContext(ctx, clusterName) + log.Debugf("Cluster Name: %v", clusterName) + // auto probe - if err := s.autoProbe(ctx); err != nil { + if err := s.autoProbe(ctx, isiConfig); err != nil { return nil, status.Error(codes.FailedPrecondition, err.Error()) } @@ -108,7 +139,6 @@ func (s *service) CreateVolume( return nil, status.Error(codes.InvalidArgument, err.Error()) } - params := req.GetParameters() if _, ok := params[AccessZoneParam]; ok { if params[AccessZoneParam] == "" { accessZone = s.opts.AccessZone @@ -121,28 +151,28 @@ func (s *service) CreateVolume( } if _, ok := params[IsiPathParam]; ok { if params[IsiPathParam] == "" { - isiPath = s.opts.Path + isiPath = isiConfig.IsiPath } else { isiPath = params[IsiPathParam] } } else { - // use the default isiPath if not setu in the storage class - isiPath = s.opts.Path + // use the default isiPath if not set in the storage class + isiPath = isiConfig.IsiPath } // When custom topology is enabled it takes precedence over the current default behavior // Set azServiceIP to updated endpoint when custom topology is enabled if s.opts.CustomTopologyEnabled { - azServiceIP = s.opts.Endpoint + azServiceIP = isiConfig.IsiIP } else if _, ok := params[AzServiceIPParam]; ok { azServiceIP = params[AzServiceIPParam] if azServiceIP == "" { // use the endpoint if empty in the storage class - azServiceIP = s.opts.Endpoint + azServiceIP = isiConfig.IsiIP } } else { // use the endpoint if not set in the storage class - azServiceIP = s.opts.Endpoint + azServiceIP = isiConfig.IsiIP } if val, ok := params[RootClientEnabledParam]; ok { @@ -160,34 +190,49 @@ func (s *service) CreateVolume( rootClientEnabled = RootClientEnabledParamDefault } + //CSI specific metada for authorization + var headerMetadata = addMetaData(params) + // check volume content source in the request isROVolumeFromSnapshot = false if contentSource = req.GetVolumeContentSource(); contentSource != nil { // Fetch source snapshot ID or volume ID from content source if snapshot := contentSource.GetSnapshot(); snapshot != nil { - sourceSnapshotID = snapshot.GetSnapshotId() + normalizedSnapshotID := snapshot.GetSnapshotId() + + // parse the input snapshot id and fetch it's components + var snapshotSrcClusterName string + sourceSnapshotID, snapshotSrcClusterName, err = utils.ParseNormalizedSnapshotID(ctx, normalizedSnapshotID) + if err != nil { + return nil, status.Error(codes.InvalidArgument, utils.GetMessageWithRunID(runID, "failed to parse snapshot ID '%s', error : '%v'", normalizedSnapshotID, err)) + } + + if snapshotSrcClusterName != "" && snapshotSrcClusterName != clusterName { + return nil, status.Error(codes.InvalidArgument, utils.GetMessageWithRunID(runID, "source snapshot's cluster name '%s' and new volume's cluster name '%s' doesn't match", snapshotSrcClusterName, clusterName)) + } + log.Infof("Creating volume from snapshot ID: '%s'", sourceSnapshotID) // Get snapshot path - if snapshotIsiPath, err = s.isiSvc.GetSnapshotIsiPath(isiPath, sourceSnapshotID); err != nil { + if snapshotIsiPath, err = isiConfig.isiSvc.GetSnapshotIsiPath(ctx, isiPath, sourceSnapshotID); err != nil { return nil, status.Error(codes.Internal, err.Error()) } log.Debugf("The Isilon directory path of snapshot is= '%s'", snapshotIsiPath) vcs := req.GetVolumeCapabilities() if len(vcs) == 0 { - return nil, status.Error(codes.InvalidArgument, "volume capabilty is required") + return nil, status.Error(codes.InvalidArgument, utils.GetMessageWithRunID(runID, "volume capabilty is required")) } for _, vc := range vcs { if vc == nil { - return nil, status.Error(codes.InvalidArgument, "volume capabilty is required") + return nil, status.Error(codes.InvalidArgument, utils.GetMessageWithRunID(runID, "volume capabilty is required")) } am := vc.GetAccessMode() if am == nil { - return nil, status.Error(codes.InvalidArgument, "access mode is required") + return nil, status.Error(codes.InvalidArgument, utils.GetMessageWithRunID(runID, "access mode is required")) } if am.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY { @@ -205,23 +250,23 @@ func (s *service) CreateVolume( if isROVolumeFromSnapshot { path = snapshotIsiPath accessZone = constants.DefaultAccessZone - snapshotSrc, err := s.isiSvc.GetSnapshot(sourceSnapshotID) + snapshotSrc, err := isiConfig.isiSvc.GetSnapshot(ctx, sourceSnapshotID) if err != nil { return nil, fmt.Errorf("failed to get snapshot id '%s', error '%v'", sourceSnapshotID, err) } snapshotName := snapshotSrc.Name // Populate names for snapshot's tracking dir, snapshot tracking dir entry for this volume - snapshotTrackingDir = s.isiSvc.GetSnapshotTrackingDirName(snapshotName) + snapshotTrackingDir = isiConfig.isiSvc.GetSnapshotTrackingDirName(snapshotName) snapshotTrackingDirEntryForVolume = fPath.Join(snapshotTrackingDir, req.GetName()) // Check if entry for this volume is present in snapshot tracking dir - if s.isiSvc.IsVolumeExistent(isiPath, "", snapshotTrackingDirEntryForVolume) { + if isiConfig.isiSvc.IsVolumeExistent(ctx, isiPath, "", snapshotTrackingDirEntryForVolume) { log.Debugf("the path '%s' has already existed", path) foundVol = true } else { // Allow creation of only one active volume from a snapshot at any point in time - totalSubDirectories, _ := s.isiSvc.GetSubDirectoryCount(isiPath, snapshotTrackingDir) + totalSubDirectories, _ := isiConfig.isiSvc.GetSubDirectoryCount(ctx, isiPath, snapshotTrackingDir) if totalSubDirectories > 2 { return nil, fmt.Errorf("another RO volume from this snapshot is already present") } @@ -230,7 +275,7 @@ func (s *service) CreateVolume( path = utils.GetPathForVolume(isiPath, req.GetName()) // to ensure idempotency, check if the volume still exists. // k8s might have made the same CreateVolume call in quick succession and the volume was already created in the first run - if s.isiSvc.IsVolumeExistent(isiPath, "", req.GetName()) { + if isiConfig.isiSvc.IsVolumeExistent(ctx, isiPath, "", req.GetName()) { log.Debugf("the path '%s' has already existed", path) foundVol = true } @@ -238,19 +283,20 @@ func (s *service) CreateVolume( if !foundVol && isROVolumeFromSnapshot { // Create an entry for this volume in snapshot tracking dir - if err = s.isiSvc.CreateVolume(isiPath, snapshotTrackingDir); err != nil { + if err = isiConfig.isiSvc.CreateVolume(ctx, isiPath, snapshotTrackingDir); err != nil { return nil, err } - if err = s.isiSvc.CreateVolume(isiPath, snapshotTrackingDirEntryForVolume); err != nil { + if err = isiConfig.isiSvc.CreateVolume(ctx, isiPath, snapshotTrackingDirEntryForVolume); err != nil { return nil, err } } - if export, err = s.isiSvc.GetExportWithPathAndZone(path, accessZone); err != nil || export == nil { + if export, err = isiConfig.isiSvc.GetExportWithPathAndZone(ctx, path, accessZone); err != nil || export == nil { + var errMsg string if err == nil { if foundVol { - return nil, status.Error(codes.Internal, "the export may not be ready yet and the path is '"+path+"'") + return nil, status.Error(codes.Internal, utils.GetMessageWithRunID(runID, "the export may not be ready yet and the path is '"+path+"'")) } } else { // internal error @@ -261,13 +307,13 @@ func (s *service) CreateVolume( exportID = 0 } else { exportID = export.ID - log.Debugf("id of the corresonding nfs export of existing volume '%s' has been resolved to '%d'", req.GetName(), exportID) + log.Debugf("id of the corresponding nfs export of existing volume '%s' has been resolved to '%d'", req.GetName(), exportID) if exportID != 0 { if foundVol || isROVolumeFromSnapshot { - return s.getCreateVolumeResponse(exportID, req.GetName(), path, export.Zone, sizeInBytes, azServiceIP, rootClientEnabled, sourceSnapshotID, sourceVolumeID), nil + return s.getCreateVolumeResponse(ctx, exportID, req.GetName(), path, export.Zone, sizeInBytes, azServiceIP, rootClientEnabled, sourceSnapshotID, sourceVolumeID, clusterName), nil } // in case the export exists but no related volume (directory) - if err = s.isiSvc.UnexportByIDWithZone(exportID, accessZone); err != nil { + if err = isiConfig.isiSvc.UnexportByIDWithZone(ctx, exportID, accessZone); err != nil { return nil, status.Error(codes.Internal, err.Error()) } exportID = 0 @@ -276,18 +322,25 @@ func (s *service) CreateVolume( // create volume (directory) with ACL 0777 if !isROVolumeFromSnapshot { - if err = s.isiSvc.CreateVolume(isiPath, req.GetName()); err != nil { - return nil, err + if len(headerMetadata) == 0 { + if err = isiConfig.isiSvc.CreateVolume(ctx, isiPath, req.GetName()); err != nil { + return nil, err + } + } else { + log.Debugf("create volume with header metadata '%s' has been resolved to '%v'", req.GetName(), headerMetadata) + if err = isiConfig.isiSvc.CreateVolumeWithMetaData(ctx, isiPath, req.GetName(), headerMetadata); err != nil { + return nil, err + } } } // if volume content source is not null and new volume request is not for RO volume from snapshot, // copy content from the datasource if contentSource != nil && !isROVolumeFromSnapshot { - err = s.createVolumeFromSource(isiPath, contentSource, req, sizeInBytes) + err = s.createVolumeFromSource(ctx, isiConfig, isiPath, contentSource, req, sizeInBytes) if err != nil { // Clear volume since the volume creation is not successful - if err := s.isiSvc.DeleteVolume(isiPath, req.GetName()); err != nil { + if err := isiConfig.isiSvc.DeleteVolume(ctx, isiPath, req.GetName()); err != nil { log.Infof("Delete volume in CreateVolume returned error '%s'", err) } return nil, err @@ -296,10 +349,10 @@ func (s *service) CreateVolume( if !foundVol && !isROVolumeFromSnapshot { // create quota - if quotaID, err = s.isiSvc.CreateQuota(path, req.GetName(), sizeInBytes, s.opts.QuotaEnabled); err != nil { + if quotaID, err = isiConfig.isiSvc.CreateQuota(ctx, path, req.GetName(), sizeInBytes, s.opts.QuotaEnabled); err != nil { log.Errorf("error creating quota ('%s', '%d' bytes), abort, also roll back by deleting the newly created volume: '%v'", req.GetName(), sizeInBytes, err) //roll back, delete the newly created volume - if err = s.isiSvc.DeleteVolume(isiPath, req.GetName()); err != nil { + if err = isiConfig.isiSvc.DeleteVolume(ctx, isiPath, req.GetName()); err != nil { return nil, fmt.Errorf("rollback (deleting volume '%s') failed with error : '%v'", req.GetName(), err) } return nil, fmt.Errorf("error creating quota ('%s', '%d' bytes), abort, also succesfully rolled back by deleting the newly created volume", req.GetName(), sizeInBytes) @@ -309,12 +362,19 @@ func (s *service) CreateVolume( // export volume in the given access zone, also add normalized quota id to the description field, in DeleteVolume, // the quota ID will be used for the quota to be directly deleted by ID if isROVolumeFromSnapshot { - if exportID, err = s.isiSvc.ExportVolumeWithZone(path, "", accessZone, ""); err == nil && exportID != 0 { + if exportID, err = isiConfig.isiSvc.ExportVolumeWithZone(ctx, path, "", accessZone, ""); err == nil && exportID != 0 { // get the export and retry if not found to ensure the export has been created for i := 0; i < MaxRetries; i++ { - if export, _ := s.isiSvc.GetExportByIDWithZone(exportID, accessZone); export != nil { + if export, _ := isiConfig.isiSvc.GetExportByIDWithZone(ctx, exportID, accessZone); export != nil { + // Add dummy localhost entry for pvc security + if !isiConfig.isiSvc.IsHostAlreadyAdded(ctx, exportID, accessZone, utils.DummyHostNodeID) { + err = isiConfig.isiSvc.AddExportClientNetworkIdentifierByIDWithZone(ctx, exportID, accessZone, utils.DummyHostNodeID, isiConfig.isiSvc.AddExportClientByIDWithZone) + if err != nil { + log.Debugf("Error while adding dummy localhost entry to export '%d'", exportID) + } + } // return the response - return s.getCreateVolumeResponse(exportID, req.GetName(), path, accessZone, sizeInBytes, azServiceIP, rootClientEnabled, sourceSnapshotID, sourceVolumeID), nil + return s.getCreateVolumeResponse(ctx, exportID, req.GetName(), path, accessZone, sizeInBytes, azServiceIP, rootClientEnabled, sourceSnapshotID, sourceVolumeID, clusterName), nil } time.Sleep(RetrySleepTime) log.Printf("Begin to retry '%d' time(s), for export id '%d' and path '%s'\n", i+1, exportID, path) @@ -324,60 +384,75 @@ func (s *service) CreateVolume( } } else { - if exportID, err = s.isiSvc.ExportVolumeWithZone(isiPath, req.GetName(), accessZone, utils.GetQuotaIDWithCSITag(quotaID)); err == nil && exportID != 0 { + if exportID, err = isiConfig.isiSvc.ExportVolumeWithZone(ctx, isiPath, req.GetName(), accessZone, utils.GetQuotaIDWithCSITag(quotaID)); err == nil && exportID != 0 { // get the export and retry if not found to ensure the export has been created for i := 0; i < MaxRetries; i++ { - if export, _ := s.isiSvc.GetExportByIDWithZone(exportID, accessZone); export != nil { + if export, _ := isiConfig.isiSvc.GetExportByIDWithZone(ctx, exportID, accessZone); export != nil { + // Add dummy localhost entry for pvc security + if !isiConfig.isiSvc.IsHostAlreadyAdded(ctx, exportID, accessZone, utils.DummyHostNodeID) { + err = isiConfig.isiSvc.AddExportClientNetworkIdentifierByIDWithZone(ctx, exportID, accessZone, utils.DummyHostNodeID, isiConfig.isiSvc.AddExportClientByIDWithZone) + if err != nil { + log.Debugf("Error while adding dummy localhost entry to export '%d'", exportID) + } + } // return the response - return s.getCreateVolumeResponse(exportID, req.GetName(), path, accessZone, sizeInBytes, azServiceIP, rootClientEnabled, sourceSnapshotID, sourceVolumeID), nil + return s.getCreateVolumeResponse(ctx, exportID, req.GetName(), path, accessZone, sizeInBytes, azServiceIP, rootClientEnabled, sourceSnapshotID, sourceVolumeID, clusterName), nil } time.Sleep(RetrySleepTime) log.Printf("Begin to retry '%d' time(s), for export id '%d' and path '%s'\n", i+1, exportID, path) } } else { // clear quota and delete volume since the export cannot be created - if error := s.isiSvc.ClearQuotaByID(quotaID); error != nil { + if error := isiConfig.isiSvc.ClearQuotaByID(ctx, quotaID); error != nil { log.Infof("Clear Quota returned error '%s'", error) } - if error := s.isiSvc.DeleteVolume(isiPath, req.GetName()); error != nil { + if error := isiConfig.isiSvc.DeleteVolume(ctx, isiPath, req.GetName()); error != nil { log.Infof("Delete volume in CreateVolume returned error '%s'", error) } return nil, err } } - return nil, status.Error(codes.Internal, "the export id '"+strconv.Itoa(exportID)+"' and path '"+path+"' may not be ready yet after retrying") + return nil, status.Error(codes.Internal, utils.GetMessageWithRunID(runID, "the export id '"+strconv.Itoa(exportID)+"' and path '"+path+"' may not be ready yet after retrying")) } -func (s *service) createVolumeFromSnapshot(isiPath, srcSnapshotID, dstVolumeName string, sizeInBytes int64) error { +func (s *service) createVolumeFromSnapshot(ctx context.Context, isiConfig *IsilonClusterConfig, + isiPath, normalizedSnapshotID, dstVolumeName string, sizeInBytes int64) error { var snapshotSrc isi.Snapshot var err error - if snapshotSrc, err = s.isiSvc.GetSnapshot(srcSnapshotID); err != nil { + + // parse the input snapshot id and fetch it's components + srcSnapshotID, _, err := utils.ParseNormalizedSnapshotID(ctx, normalizedSnapshotID) + if err != nil { + return err + } + + if snapshotSrc, err = isiConfig.isiSvc.GetSnapshot(ctx, srcSnapshotID); err != nil { return fmt.Errorf("failed to get snapshot id '%s', error '%v'", srcSnapshotID, err) } // check source snapshot size - size := s.isiSvc.GetSnapshotSize(isiPath, snapshotSrc.Name) + size := isiConfig.isiSvc.GetSnapshotSize(ctx, isiPath, snapshotSrc.Name) if size > sizeInBytes { - return fmt.Errorf("Specified size '%d' is smaller than source snapshot size '%d'", sizeInBytes, size) + return fmt.Errorf("specified size '%d' is smaller than source snapshot size '%d'", sizeInBytes, size) } - if _, err = s.isiSvc.CopySnapshot(isiPath, snapshotSrc.Id, dstVolumeName); err != nil { + if _, err = isiConfig.isiSvc.CopySnapshot(ctx, isiPath, snapshotSrc.Id, dstVolumeName); err != nil { return fmt.Errorf("failed to copy snapshot id '%s', error '%s'", srcSnapshotID, err.Error()) } return nil } -func (s *service) createVolumeFromVolume(isiPath, srcVolumeName, dstVolumeName string, sizeInBytes int64) error { +func (s *service) createVolumeFromVolume(ctx context.Context, isiConfig *IsilonClusterConfig, isiPath, srcVolumeName, dstVolumeName string, sizeInBytes int64) error { var err error - if s.isiSvc.IsVolumeExistent(isiPath, "", srcVolumeName) { + if isiConfig.isiSvc.IsVolumeExistent(ctx, isiPath, "", srcVolumeName) { // check source volume size - size := s.isiSvc.GetVolumeSize(isiPath, srcVolumeName) + size := isiConfig.isiSvc.GetVolumeSize(ctx, isiPath, srcVolumeName) if size > sizeInBytes { - return fmt.Errorf("Specified size '%d' is smaller than source volume size '%d'", sizeInBytes, size) + return fmt.Errorf("specified size '%d' is smaller than source volume size '%d'", sizeInBytes, size) } - if _, err = s.isiSvc.CopyVolume(isiPath, srcVolumeName, dstVolumeName); err != nil { + if _, err = isiConfig.isiSvc.CopyVolume(ctx, isiPath, srcVolumeName, dstVolumeName); err != nil { return fmt.Errorf("failed to copy volume name '%s', error '%v'", srcVolumeName, err) } } else { @@ -388,37 +463,39 @@ func (s *service) createVolumeFromVolume(isiPath, srcVolumeName, dstVolumeName s } func (s *service) createVolumeFromSource( + ctx context.Context, + isiConfig *IsilonClusterConfig, isiPath string, contentSource *csi.VolumeContentSource, req *csi.CreateVolumeRequest, sizeInBytes int64) error { if contentSnapshot := contentSource.GetSnapshot(); contentSnapshot != nil { // create volume from source snapshot - if err := s.createVolumeFromSnapshot(isiPath, contentSnapshot.GetSnapshotId(), req.GetName(), sizeInBytes); err != nil { + if err := s.createVolumeFromSnapshot(ctx, isiConfig, isiPath, contentSnapshot.GetSnapshotId(), req.GetName(), sizeInBytes); err != nil { return status.Error(codes.Internal, err.Error()) } } if contentVolume := contentSource.GetVolume(); contentVolume != nil { // create volume from source volume - srcVolumeName, _, _, err := utils.ParseNormalizedVolumeID(contentVolume.GetVolumeId()) + srcVolumeName, _, _, _, err := utils.ParseNormalizedVolumeID(ctx, contentVolume.GetVolumeId()) if err != nil { return status.Error(codes.Internal, err.Error()) } - if err := s.createVolumeFromVolume(isiPath, srcVolumeName, req.GetName(), sizeInBytes); err != nil { + if err := s.createVolumeFromVolume(ctx, isiConfig, isiPath, srcVolumeName, req.GetName(), sizeInBytes); err != nil { return status.Error(codes.Internal, err.Error()) } } return nil } -func (s *service) getCreateVolumeResponse(exportID int, volName, path, accessZone string, sizeInBytes int64, azServiceIP, rootClientEnabled, sourceSnapshotID, sourceVolumeID string) *csi.CreateVolumeResponse { +func (s *service) getCreateVolumeResponse(ctx context.Context, exportID int, volName, path, accessZone string, sizeInBytes int64, azServiceIP, rootClientEnabled, sourceSnapshotID, sourceVolumeID, clusterName string) *csi.CreateVolumeResponse { return &csi.CreateVolumeResponse{ - Volume: s.getCSIVolume(exportID, volName, path, accessZone, sizeInBytes, azServiceIP, rootClientEnabled, sourceSnapshotID, sourceVolumeID), + Volume: s.getCSIVolume(ctx, exportID, volName, path, accessZone, sizeInBytes, azServiceIP, rootClientEnabled, sourceSnapshotID, sourceVolumeID, clusterName), } } -func (s *service) getCSIVolume(exportID int, volName, path, accessZone string, sizeInBytes int64, azServiceIP, rootClientEnabled, sourceSnapshotID, sourceVolumeID string) *csi.Volume { +func (s *service) getCSIVolume(ctx context.Context, exportID int, volName, path, accessZone string, sizeInBytes int64, azServiceIP, rootClientEnabled, sourceSnapshotID, sourceVolumeID, clusterName string) *csi.Volume { // Make the additional volume attributes attributes := map[string]string{ "ID": strconv.Itoa(exportID), @@ -427,7 +504,12 @@ func (s *service) getCSIVolume(exportID int, volName, path, accessZone string, s "AccessZone": accessZone, "AzServiceIP": azServiceIP, "RootClientEnabled": rootClientEnabled, + "ClusterName": clusterName, } + + // Fetch log handler + ctx, log, _ := GetRunIDLog(ctx) + log.Debugf("Attributes '%v'", attributes) // Set content source as part of create volume response if volume is created from snapshot or existing volume @@ -453,7 +535,7 @@ func (s *service) getCSIVolume(exportID int, volName, path, accessZone string, s } vi := &csi.Volume{ - VolumeId: utils.GetNormalizedVolumeID(volName, exportID, accessZone), + VolumeId: utils.GetNormalizedVolumeID(ctx, volName, exportID, accessZone, clusterName), CapacityBytes: sizeInBytes, VolumeContext: attributes, ContentSource: contentSource, @@ -465,23 +547,37 @@ func (s *service) DeleteVolume( ctx context.Context, req *csi.DeleteVolumeRequest) ( *csi.DeleteVolumeResponse, error) { - // TODO more checks need to be done, e.g. if access mode is VolumeCapability_AccessMode_MULTI_NODE_XXX, then other nodes might still be using this volume, thus the delete should be skipped + // Fetch log handler + ctx, _, _ = GetRunIDLog(ctx) - // probe - if err := s.autoProbe(ctx); err != nil { - return nil, err - } - s.logStatistics() // validate request - if err := s.ValidateDeleteVolumeRequest(req); err != nil { + if err := s.ValidateDeleteVolumeRequest(ctx, req); err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } - volName, exportID, accessZone, _ := utils.ParseNormalizedVolumeID(req.GetVolumeId()) + // parse the input volume id and fetch it's components + volName, exportID, accessZone, clusterName, err := utils.ParseNormalizedVolumeID(ctx, req.GetVolumeId()) + if err != nil { + return nil, status.Error(codes.NotFound, err.Error()) + } + + ctx, log := setClusterContext(ctx, clusterName) + log.Debugf("Cluster Name: %v", clusterName) + + isiConfig, err := s.getIsilonConfig(ctx, &clusterName) + if err != nil { + return nil, err + } + + // probe + if err := s.autoProbe(ctx, isiConfig); err != nil { + return nil, err + } + s.logStatistics() quotaEnabled := s.opts.QuotaEnabled - export, err := s.isiSvc.GetExportByIDWithZone(exportID, accessZone) + export, err := isiConfig.isiSvc.GetExportByIDWithZone(ctx, exportID, accessZone) if err != nil { if jsonError, ok := err.(*isiApi.JSONError); ok { if jsonError.StatusCode == 404 { @@ -498,10 +594,10 @@ func (s *service) DeleteVolume( exportPath := (*export.Paths)[0] - isROVolumeFromSnapshot := s.isiSvc.isROVolumeFromSnapshot(exportPath) + isROVolumeFromSnapshot := isiConfig.isiSvc.isROVolumeFromSnapshot(exportPath) // If it is a RO volume and dataSource is snapshot if isROVolumeFromSnapshot { - if err := s.processSnapshotTrackingDirectoryDuringDeleteVolume(volName, export); err != nil { + if err := s.processSnapshotTrackingDirectoryDuringDeleteVolume(ctx, volName, export, isiConfig); err != nil { return nil, err } return &csi.DeleteVolumeResponse{}, nil @@ -511,7 +607,7 @@ func (s *service) DeleteVolume( // to ensure idempotency, check if the volume and export still exists. // k8s might have made the same DeleteVolume call in quick succession and the volume was already deleted in the first run log.Debugf("controller begins to delete volume, name '%s', quotaEnabled '%t'", volName, quotaEnabled) - if err := s.isiSvc.DeleteQuotaByExportIDWithZone(volName, exportID, accessZone); err != nil { + if err := isiConfig.isiSvc.DeleteQuotaByExportIDWithZone(ctx, volName, exportID, accessZone); err != nil { jsonError, ok := err.(*isiApi.JSONError) if ok { if jsonError.StatusCode != 404 { @@ -522,7 +618,7 @@ func (s *service) DeleteVolume( } } - if !s.isiSvc.IsVolumeExistent(isiPath, "", volName) { + if !isiConfig.isiSvc.IsVolumeExistent(ctx, isiPath, "", volName) { log.Debugf("volume '%s' not found, skip calling delete directory.", volName) } else { // Before deleting the Volume, we would like to check if there are any @@ -533,7 +629,7 @@ func (s *service) DeleteVolume( {[]byte("path"), []byte(path)}, {[]byte("zone"), []byte(accessZone)}, } - exports, err := s.isiSvc.GetExportsWithParams(params) + exports, err := isiConfig.isiSvc.GetExportsWithParams(ctx, params) if err != nil { jsonError, ok := err.(*isiApi.JSONError) if ok { @@ -546,14 +642,14 @@ func (s *service) DeleteVolume( if exports != nil && exports.Total == 1 && exports.Exports[0].ID == exportID { log.Infof("controller begins to unexport id '%d', target path '%s', access zone '%s'", exportID, volName, accessZone) - if err := s.isiSvc.UnexportByIDWithZone(exportID, accessZone); err != nil { + if err := isiConfig.isiSvc.UnexportByIDWithZone(ctx, exportID, accessZone); err != nil { return nil, err } } else if exports != nil && exports.Total > 1 { return nil, fmt.Errorf("exports found for volume %s in AccessZone %s. It is not safe to delete the volume", volName, accessZone) } - if err := s.isiSvc.DeleteVolume(isiPath, volName); err != nil { + if err := isiConfig.isiSvc.DeleteVolume(ctx, isiPath, volName); err != nil { return nil, err } } @@ -561,12 +657,17 @@ func (s *service) DeleteVolume( } func (s *service) processSnapshotTrackingDirectoryDuringDeleteVolume( + ctx context.Context, volName string, - export isi.Export) error { + export isi.Export, + isiConfig *IsilonClusterConfig) error { exportPath := (*export.Paths)[0] + // Fetch log handler + ctx, log, _ := GetRunIDLog(ctx) + // Get snapshot name - snapshotName, err := s.isiSvc.GetSnapshotNameFromIsiPath(exportPath) + snapshotName, err := isiConfig.isiSvc.GetSnapshotNameFromIsiPath(ctx, exportPath) if err != nil { return err } @@ -574,22 +675,22 @@ func (s *service) processSnapshotTrackingDirectoryDuringDeleteVolume( // Populate names for snapshot's tracking dir, snapshot tracking dir entry for this volume // and snapshot delete marker - snapshotTrackingDir := s.isiSvc.GetSnapshotTrackingDirName(snapshotName) + snapshotTrackingDir := isiConfig.isiSvc.GetSnapshotTrackingDirName(snapshotName) snapshotTrackingDirEntryForVolume := path.Join(snapshotTrackingDir, volName) snapshotTrackingDirDeleteMarker := path.Join(snapshotTrackingDir, DeleteSnapshotMarker) // Delete the snapshot tracking directory entry for this volume - isiPath, _, _ := s.isiSvc.GetSnapshotIsiPathComponents(exportPath) + isiPath, _, _ := isiConfig.isiSvc.GetSnapshotIsiPathComponents(exportPath) log.Debugf("Delete the snapshot tracking directory entry '%s' for volume '%s'", snapshotTrackingDirEntryForVolume, volName) - if s.isiSvc.IsVolumeExistent(isiPath, "", snapshotTrackingDirEntryForVolume) { - if err := s.isiSvc.DeleteVolume(isiPath, snapshotTrackingDirEntryForVolume); err != nil { + if isiConfig.isiSvc.IsVolumeExistent(ctx, isiPath, "", snapshotTrackingDirEntryForVolume) { + if err := isiConfig.isiSvc.DeleteVolume(ctx, isiPath, snapshotTrackingDirEntryForVolume); err != nil { return err } } // Get subdirectories count of snapshot tracking dir. // Every directory will have two subdirectory entries . and .. - totalSubDirectories, err := s.isiSvc.GetSubDirectoryCount(isiPath, snapshotTrackingDir) + totalSubDirectories, err := isiConfig.isiSvc.GetSubDirectoryCount(ctx, isiPath, snapshotTrackingDir) if err != nil { log.Errorf("failed to get subdirectories count of snapshot tracking dir '%s'", snapshotTrackingDir) return nil @@ -597,22 +698,22 @@ func (s *service) processSnapshotTrackingDirectoryDuringDeleteVolume( // Delete snapshot tracking directory, if required (i.e., if there is a // snapshot delete marker as a result of snapshot deletion on k8s side) - if s.isiSvc.IsVolumeExistent(isiPath, "", snapshotTrackingDirDeleteMarker) { + if isiConfig.isiSvc.IsVolumeExistent(ctx, isiPath, "", snapshotTrackingDirDeleteMarker) { // There are no more volumes present which were created using this snapshot // This indicates that there are only three subdirectories ., .. and snapshot delete marker. if totalSubDirectories == 3 { - err = s.isiSvc.UnexportByIDWithZone(export.ID, "") + err = isiConfig.isiSvc.UnexportByIDWithZone(ctx, export.ID, "") if err != nil { log.Errorf("failed to delete snapshot directory export with id '%v'", export.ID) return nil } // Delete snapshot tracking directory - if err := s.isiSvc.DeleteVolume(isiPath, snapshotTrackingDir); err != nil { + if err := isiConfig.isiSvc.DeleteVolume(ctx, isiPath, snapshotTrackingDir); err != nil { log.Errorf("error while deleting snapshot tracking directory '%s'", path.Join(isiPath, snapshotName)) return nil } // Delete snapshot - err = s.isiSvc.client.RemoveSnapshot(context.Background(), -1, snapshotName) + err = isiConfig.isiSvc.client.RemoveSnapshot(context.Background(), -1, snapshotName) if err != nil { log.Errorf("error deleting snapshot: '%s'", err.Error()) return nil @@ -622,7 +723,7 @@ func (s *service) processSnapshotTrackingDirectoryDuringDeleteVolume( if totalSubDirectories == 2 { // Delete snapshot tracking directory - if err := s.isiSvc.DeleteVolume(isiPath, snapshotTrackingDir); err != nil { + if err := isiConfig.isiSvc.DeleteVolume(ctx, isiPath, snapshotTrackingDir); err != nil { log.Errorf("error while deleting snapshot tracking directory '%s'", path.Join(isiPath, snapshotName)) return nil } @@ -634,17 +735,33 @@ func (s *service) processSnapshotTrackingDirectoryDuringDeleteVolume( func (s *service) ControllerExpandVolume( ctx context.Context, req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) { + // Fetch log handler + ctx, _, _ = GetRunIDLog(ctx) - volName, exportID, accessZone, err := utils.ParseNormalizedVolumeID(req.GetVolumeId()) + volName, exportID, accessZone, clusterName, err := utils.ParseNormalizedVolumeID(ctx, req.GetVolumeId()) if err != nil { return nil, status.Error(codes.NotFound, err.Error()) } + + ctx, log := setClusterContext(ctx, clusterName) + log.Debugf("Cluster Name: %v", clusterName) + + isiConfig, err := s.getIsilonConfig(ctx, &clusterName) + if err != nil { + return nil, err + } + + // auto probe + if err := s.autoProbe(ctx, isiConfig); err != nil { + return nil, status.Error(codes.FailedPrecondition, err.Error()) + } + requiredBytes := req.GetCapacityRange().GetRequiredBytes() // when Quota is disabled, always return success // Otherwise, update the quota size as requested if s.opts.QuotaEnabled { - quota, err := s.isiSvc.GetVolumeQuota(volName, exportID, accessZone) + quota, err := isiConfig.isiSvc.GetVolumeQuota(ctx, volName, exportID, accessZone) if err != nil { return nil, status.Error(codes.NotFound, err.Error()) } @@ -655,7 +772,7 @@ func (s *service) ControllerExpandVolume( return &csi.ControllerExpandVolumeResponse{CapacityBytes: quotaSize, NodeExpansionRequired: false}, nil } - if err = s.isiSvc.UpdateQuotaSize(quota.Id, requiredBytes); err != nil { + if err = isiConfig.isiSvc.UpdateQuotaSize(ctx, quota.Id, requiredBytes); err != nil { return nil, status.Error(codes.Internal, err.Error()) } } @@ -663,12 +780,12 @@ func (s *service) ControllerExpandVolume( return &csi.ControllerExpandVolumeResponse{CapacityBytes: requiredBytes, NodeExpansionRequired: false}, nil } -func (s *service) getAddClientFunc(rootClientEnabled bool) (addClientFunc func(exportID int, accessZone, clientIP string) error) { +func (s *service) getAddClientFunc(rootClientEnabled bool, isiConfig *IsilonClusterConfig) (addClientFunc func(ctx context.Context, exportID int, accessZone, clientIP string) error) { if rootClientEnabled { - return s.isiSvc.AddExportRootClientByIDWithZone + return isiConfig.isiSvc.AddExportRootClientByIDWithZone } - return s.isiSvc.AddExportClientByIDWithZone + return isiConfig.isiSvc.AddExportClientByIDWithZone } /* @@ -685,6 +802,9 @@ func (s *service) ControllerPublishVolume( isROVolumeFromSnapshot bool ) + // Fetch log handler + ctx, log, runID := GetRunIDLog(ctx) + volumeContext := req.GetVolumeContext() if volumeContext != nil { log.Printf("VolumeContext:") @@ -693,19 +813,27 @@ func (s *service) ControllerPublishVolume( } } - if err := s.autoProbe(ctx); err != nil { - return nil, err - } - volID := req.GetVolumeId() if volID == "" { return nil, status.Error(codes.InvalidArgument, - "volume ID is required") + utils.GetMessageWithRunID(runID, "volume ID is required")) } - volName, exportID, accessZone, err := utils.ParseNormalizedVolumeID(volID) + volName, exportID, accessZone, clusterName, err := utils.ParseNormalizedVolumeID(ctx, volID) if err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("failed to parse volume ID '%s', error : '%v'", volID, err)) + return nil, status.Error(codes.InvalidArgument, utils.GetMessageWithRunID(runID, "failed to parse volume ID '%s', error : '%v'", volID, err)) + } + + ctx, log = setClusterContext(ctx, clusterName) + log.Debugf("Cluster Name: %v", clusterName) + + isiConfig, err := s.getIsilonConfig(ctx, &clusterName) + if err != nil { + return nil, err + } + + if err := s.autoProbe(ctx, isiConfig); err != nil { + return nil, err } if exportID == 0 { @@ -713,53 +841,53 @@ func (s *service) ControllerPublishVolume( } if exportPath = volumeContext[ExportPathParam]; exportPath == "" { - exportPath = utils.GetPathForVolume(s.opts.Path, volName) + exportPath = utils.GetPathForVolume(isiConfig.IsiPath, volName) } - isROVolumeFromSnapshot = s.isiSvc.isROVolumeFromSnapshot(exportPath) + isROVolumeFromSnapshot = isiConfig.isiSvc.isROVolumeFromSnapshot(exportPath) if isROVolumeFromSnapshot { log.Info("Volume source is snapshot") accessZone = constants.DefaultAccessZone - if export, err := s.isiSvc.GetExportWithPathAndZone(exportPath, accessZone); err != nil || export == nil { - return nil, status.Errorf(codes.Internal, "error retrieving export for '%s'", exportPath) + if export, err := isiConfig.isiSvc.GetExportWithPathAndZone(ctx, exportPath, accessZone); err != nil || export == nil { + return nil, status.Errorf(codes.Internal, utils.GetMessageWithRunID(runID, "error retrieving export for '%s'", exportPath)) } } else { isiPath = utils.GetIsiPathFromExportPath(exportPath) - vol, err := s.isiSvc.GetVolume(isiPath, "", volName) + vol, err := isiConfig.isiSvc.GetVolume(ctx, isiPath, "", volName) if err != nil || vol.Name == "" { return nil, status.Errorf(codes.Internal, - "failure checking volume status before controller publish: '%s'", - err.Error()) + utils.GetMessageWithRunID(runID, "failure checking volume status before controller publish: '%s'", + err.Error())) } } nodeID := req.GetNodeId() if nodeID == "" { return nil, status.Error(codes.InvalidArgument, - "node ID is required") + utils.GetMessageWithRunID(runID, "node ID is required")) } vc := req.GetVolumeCapability() if vc == nil { return nil, status.Error(codes.InvalidArgument, - "volume capability is required") + utils.GetMessageWithRunID(runID, "volume capability is required")) } am := vc.GetAccessMode() if am == nil { return nil, status.Error(codes.InvalidArgument, - "access mode is required") + utils.GetMessageWithRunID(runID, "access mode is required")) } if am.Mode == csi.VolumeCapability_AccessMode_UNKNOWN { return nil, status.Error(codes.InvalidArgument, - errUnknownAccessMode) + utils.GetMessageWithRunID(runID, errUnknownAccessMode)) } vcs := []*csi.VolumeCapability{req.GetVolumeCapability()} if !checkValidAccessTypes(vcs) { return nil, status.Error(codes.InvalidArgument, - errUnknownAccessType) + utils.GetMessageWithRunID(runID, errUnknownAccessType)) } rootClientEnabled := false @@ -769,7 +897,7 @@ func (s *service) ControllerPublishVolume( rootClientEnabled = val } - addClientFunc := s.getAddClientFunc(rootClientEnabled) + addClientFunc := s.getAddClientFunc(rootClientEnabled, isiConfig) switch am.Mode { case csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER: @@ -777,30 +905,42 @@ func (s *service) ControllerPublishVolume( err = fmt.Errorf("unsupported access mode: '%s'", am.String()) break } - err = s.isiSvc.AddExportClientNetworkIdentifierByIDWithZone(exportID, accessZone, nodeID, addClientFunc) + + if !isiConfig.isiSvc.IsHostAlreadyAdded(ctx, exportID, accessZone, utils.DummyHostNodeID) { + err = isiConfig.isiSvc.AddExportClientNetworkIdentifierByIDWithZone(ctx, exportID, accessZone, utils.DummyHostNodeID, isiConfig.isiSvc.AddExportClientByIDWithZone) + } + + err = isiConfig.isiSvc.AddExportClientNetworkIdentifierByIDWithZone(ctx, exportID, accessZone, nodeID, addClientFunc) if err == nil && rootClientEnabled { - err = s.isiSvc.AddExportClientNetworkIdentifierByIDWithZone(exportID, accessZone, nodeID, s.isiSvc.AddExportClientByIDWithZone) + err = isiConfig.isiSvc.AddExportClientNetworkIdentifierByIDWithZone(ctx, exportID, accessZone, nodeID, isiConfig.isiSvc.AddExportClientByIDWithZone) } case csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY: - err = s.isiSvc.AddExportClientNetworkIdentifierByIDWithZone(exportID, accessZone, nodeID, s.isiSvc.AddExportReadOnlyClientByIDWithZone) + err = isiConfig.isiSvc.AddExportClientNetworkIdentifierByIDWithZone(ctx, exportID, accessZone, nodeID, isiConfig.isiSvc.AddExportReadOnlyClientByIDWithZone) case csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER: if isROVolumeFromSnapshot { err = fmt.Errorf("unsupported access mode: '%s'", am.String()) break } - if s.isiSvc.OtherClientsAlreadyAdded(exportID, accessZone, nodeID) { - return nil, status.Errorf(codes.FailedPrecondition, "export '%d' in access zone '%s' already has other clients added to it, and the access mode is SINGLE_NODE_WRITER, thus the request fails", exportID, accessZone) + if isiConfig.isiSvc.OtherClientsAlreadyAdded(ctx, exportID, accessZone, nodeID) { + return nil, status.Errorf(codes.FailedPrecondition, utils.GetMessageWithRunID(runID, + "export '%d' in access zone '%s' already has other clients added to it, and the access mode is "+ + "SINGLE_NODE_WRITER, thus the request fails", exportID, accessZone)) } - err = s.isiSvc.AddExportClientNetworkIdentifierByIDWithZone(exportID, accessZone, nodeID, addClientFunc) + + if !isiConfig.isiSvc.IsHostAlreadyAdded(ctx, exportID, accessZone, utils.DummyHostNodeID) { + err = isiConfig.isiSvc.AddExportClientNetworkIdentifierByIDWithZone(ctx, exportID, accessZone, utils.DummyHostNodeID, isiConfig.isiSvc.AddExportClientByIDWithZone) + } + err = isiConfig.isiSvc.AddExportClientNetworkIdentifierByIDWithZone(ctx, exportID, accessZone, nodeID, addClientFunc) if err == nil && rootClientEnabled { - err = s.isiSvc.AddExportClientNetworkIdentifierByIDWithZone(exportID, accessZone, nodeID, s.isiSvc.AddExportClientByIDWithZone) + err = isiConfig.isiSvc.AddExportClientNetworkIdentifierByIDWithZone(ctx, exportID, accessZone, nodeID, isiConfig.isiSvc.AddExportClientByIDWithZone) } default: - return nil, status.Errorf(codes.InvalidArgument, "unsupported access mode: '%s'", am.String()) + return nil, status.Errorf(codes.InvalidArgument, utils.GetMessageWithRunID(runID, "unsupported access mode: '%s'", am.String())) } if err != nil { - return nil, status.Errorf(codes.Internal, "internal error occured when attempting to add client ip '%s' to export '%d', error : '%v'", nodeID, exportID, err) + return nil, status.Errorf(codes.Internal, utils.GetMessageWithRunID(runID, + "internal error occured when attempting to add client ip '%s' to export '%d', error : '%v'", nodeID, exportID, err)) } return &csi.ControllerPublishVolumeResponse{}, nil } @@ -813,14 +953,27 @@ func (s *service) ValidateVolumeCapabilities( exportPath string isiPath string ) - if err := s.autoProbe(ctx); err != nil { - return nil, err - } + // Fetch log handler + ctx, log, runID := GetRunIDLog(ctx) + + // parse the input volume id and fetch it's components volID := req.GetVolumeId() - volName, _, _, err := utils.ParseNormalizedVolumeID(volID) + volName, _, _, clusterName, err := utils.ParseNormalizedVolumeID(ctx, volID) + if err != nil { + return nil, status.Error(codes.NotFound, err.Error()) + } + + ctx, log = setClusterContext(ctx, clusterName) + log.Debugf("Cluster Name: %v", clusterName) + + isiConfig, err := s.getIsilonConfig(ctx, &clusterName) if err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("failed to parse volume ID '%s', error : '%v'", volID, err)) + return nil, err + } + + if err := s.autoProbe(ctx, isiConfig); err != nil { + return nil, err } volumeContext := req.GetVolumeContext() @@ -829,11 +982,11 @@ func (s *service) ValidateVolumeCapabilities( } isiPath = utils.GetIsiPathFromExportPath(exportPath) - vol, err := s.getVolByName(isiPath, volName) + vol, err := s.getVolByName(ctx, isiPath, volName, isiConfig) if err != nil { return nil, status.Errorf(codes.Internal, - "failure checking volume status for capabilities: '%s'", - err.Error()) + utils.GetMessageWithRunID(runID, "failure checking volume status for capabilities: '%s'", + err.Error())) } vcs := req.GetVolumeCapabilities() @@ -854,65 +1007,73 @@ func (s *service) ValidateVolumeCapabilities( func (s *service) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) { - var ( - exports isi.ExportList - resume string - err error - ) - resp := new(csi.ListVolumesResponse) - if req.MaxEntries == 0 && req.StartingToken == "" { - // The value of max_entries is zero and no starting token in the request means no restriction - exports, err = s.isiSvc.GetExports() - } else { - maxEntries := strconv.Itoa(int(req.MaxEntries)) - if req.StartingToken == "" { - // Get the first page if there's no starting token - if req.MaxEntries < 0 { - return nil, status.Error(codes.InvalidArgument, "Invalid max entries") - } - exports, resume, err = s.isiSvc.GetExportsWithLimit(maxEntries) - if err != nil { - return nil, status.Error(codes.Internal, "Cannot get exports with limit") - } + // TODO The below implementation(commented code) doesn't work for multi-cluster. + // Add multi-cluster support by considering both MaxEntries and StartingToken(if specified) attributes. + /* + var ( + exports isi.ExportList + resume string + err error + ) + resp := new(csi.ListVolumesResponse) + if req.MaxEntries == 0 && req.StartingToken == "" { + // The value of max_entries is zero and no starting token in the request means no restriction + exports, err = s.isiSvc.GetExports() } else { - // Continue to get exports based on the previous call - exports, resume, err = s.isiSvc.GetExportsWithResume(req.StartingToken) - if err != nil { - // The starting token is not valid, return the gRPC aborted code to indicate - return nil, status.Error(codes.Aborted, "The starting token is not valid") + maxEntries := strconv.Itoa(int(req.MaxEntries)) + if req.StartingToken == "" { + // Get the first page if there's no starting token + if req.MaxEntries < 0 { + return nil, status.Error(codes.InvalidArgument, "Invalid max entries") + } + exports, resume, err = s.isiSvc.GetExportsWithLimit(maxEntries) + if err != nil { + return nil, status.Error(codes.Internal, "Cannot get exports with limit") + } + } else { + // Continue to get exports based on the previous call + exports, resume, err = s.isiSvc.GetExportsWithResume(req.StartingToken) + if err != nil { + // The starting token is not valid, return the gRPC aborted code to indicate + return nil, status.Error(codes.Aborted, "The starting token is not valid") + } } + resp.NextToken = resume } - resp.NextToken = resume - } - - // Count the number of entries - num := 0 - for _, export := range exports { - paths := export.Paths - for range *paths { - num++ - } - } - // Convert exports to entries - entries := make([]*csi.ListVolumesResponse_Entry, num) - i := 0 - for _, export := range exports { - paths := export.Paths - for _, path := range *paths { - // TODO get the capacity range, not able to get now - volName := utils.GetVolumeNameFromExportPath(path) - // Not able to get "rootClientEnabled", it's read from the volume's storage class - // and added to "volumeContext" in CreateVolume, and read in NodeStageVolume. - // The value is not relevant here so just pass default value "false" here. - volume := s.getCSIVolume(export.ID, volName, path, export.Zone, 0, s.opts.Endpoint, "false", "", "") - entries[i] = &csi.ListVolumesResponse_Entry{ - Volume: volume, + + // Count the number of entries + num := 0 + for _, export := range exports { + paths := export.Paths + for range *paths { + num++ } - i++ } - } - resp.Entries = entries - return resp, nil + // Convert exports to entries + entries := make([]*csi.ListVolumesResponse_Entry, num) + i := 0 + for _, export := range exports { + paths := export.Paths + for _, path := range *paths { + // TODO get the capacity range, not able to get now + volName := utils.GetVolumeNameFromExportPath(path) + // Not able to get "rootClientEnabled", it's read from the volume's storage class + // and added to "volumeContext" in CreateVolume, and read in NodeStageVolume. + // The value is not relevant here so just pass default value "false" here. + // update with input cluster config + clusterConfig := IsilonClusterConfig{} + volume := s.getCSIVolume(export.ID, volName, path, export.Zone, 0, clusterConfig.IsiIP, "false", "", "", "") + entries[i] = &csi.ListVolumesResponse_Entry{ + Volume: volume, + } + i++ + } + } + resp.Entries = entries + return resp, nil + */ + + return nil, status.Error(codes.Unimplemented, "") } func (s *service) ListSnapshots(context.Context, @@ -925,23 +1086,40 @@ func (s *service) ControllerUnpublishVolume( req *csi.ControllerUnpublishVolumeRequest) ( *csi.ControllerUnpublishVolumeResponse, error) { + // Fetch log handler + ctx, log, runID := GetRunIDLog(ctx) + if req.VolumeId == "" { - return nil, status.Errorf(codes.InvalidArgument, "ControllerUnpublishVolumeRequest.VolumeId is empty") + return nil, status.Errorf(codes.InvalidArgument, utils.GetMessageWithRunID(runID, "ControllerUnpublishVolumeRequest.VolumeId is empty")) } - _, exportID, accessZone, err := utils.ParseNormalizedVolumeID(req.VolumeId) + _, exportID, accessZone, clusterName, err := utils.ParseNormalizedVolumeID(ctx, req.VolumeId) if err != nil { - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("failed to parse volume ID '%s', error : '%s'", req.VolumeId, err.Error())) + return nil, status.Error(codes.InvalidArgument, utils.GetMessageWithRunID(runID, "failed to parse volume ID '%s', error : '%s'", req.VolumeId, err.Error())) + } + + ctx, log = setClusterContext(ctx, clusterName) + log.Debugf("Cluster Name: %v", clusterName) + + isiConfig, err := s.getIsilonConfig(ctx, &clusterName) + if err != nil { + return nil, err + } + + // auto probe + if err := s.autoProbe(ctx, isiConfig); err != nil { + return nil, status.Error(codes.FailedPrecondition, utils.GetMessageWithRunID(runID, err.Error())) } nodeID := req.GetNodeId() if nodeID == "" { return nil, status.Error(codes.InvalidArgument, - "node ID is required") + utils.GetMessageWithRunID(runID, "node ID is required")) } - if err := s.isiSvc.RemoveExportClientByIDWithZone(exportID, accessZone, nodeID); err != nil { - return nil, status.Errorf(codes.Internal, "error encountered when trying to remove client '%s' from export '%d' with access zone '%s'", nodeID, exportID, accessZone) + if err := isiConfig.isiSvc.RemoveExportClientByIDWithZone(ctx, exportID, accessZone, nodeID); err != nil { + return nil, status.Errorf(codes.Internal, utils.GetMessageWithRunID(runID, "error encountered when"+ + " trying to remove client '%s' from export '%d' with access zone '%s' on cluster '%s'", nodeID, exportID, accessZone, clusterName)) } return &csi.ControllerUnpublishVolumeResponse{}, nil @@ -952,7 +1130,29 @@ func (s *service) GetCapacity( req *csi.GetCapacityRequest) ( *csi.GetCapacityResponse, error) { - if err := s.autoProbe(ctx); err != nil { + var clusterName string + params := req.GetParameters() + + // Fetch log handler + ctx, log, runID := GetRunIDLog(ctx) + + if _, ok := params[ClusterNameParam]; ok { + if params[ClusterNameParam] == "" { + clusterName = s.defaultIsiClusterName + } else { + clusterName = params[ClusterNameParam] + } + } + + ctx, log = setClusterContext(ctx, clusterName) + log.Debugf("Cluster Name: %v", clusterName) + + isiConfig, err := s.getIsilonConfig(ctx, &clusterName) + if err != nil { + return nil, err + } + + if err := s.autoProbe(ctx, isiConfig); err != nil { log.Error("Failed to probe with error: " + err.Error()) return nil, err } @@ -963,19 +1163,19 @@ func (s *service) GetCapacity( supported, reason := validateVolumeCaps(vcs, nil) if !supported { log.Errorf("GetVolumeCapabilities failed with error: '%s'", reason) - return nil, status.Errorf(codes.InvalidArgument, reason) + return nil, status.Errorf(codes.InvalidArgument, utils.GetMessageWithRunID(runID, reason)) } } //pass the key(s) to rest api keyArray := []string{"ifs.bytes.avail"} - stat, err := s.isiSvc.GetStatistics(keyArray) + stat, err := isiConfig.isiSvc.GetStatistics(ctx, keyArray) if err != nil || len(stat.StatsList) < 1 { - return nil, status.Errorf(codes.Internal, "Could not retrieve capacity. Error '%s'", err.Error()) + return nil, status.Errorf(codes.Internal, utils.GetMessageWithRunID(runID, "Could not retrieve capacity. Error '%s'", err.Error())) } if stat.StatsList[0].Error != "" { - return nil, status.Errorf(codes.Internal, "Could not retrieve capacity. Data returned error '%s'", stat.StatsList[0].Error) + return nil, status.Errorf(codes.Internal, utils.GetMessageWithRunID("Could not retrieve capacity. Data returned error '%s'", stat.StatsList[0].Error)) } remainingCapInBytes := stat.StatsList[0].Value @@ -1051,19 +1251,19 @@ func (s *service) ControllerGetCapabilities( }, nil } -func (s *service) controllerProbe(ctx context.Context) error { +func (s *service) controllerProbe(ctx context.Context, clusterConfig *IsilonClusterConfig) error { + // Fetch log handler + ctx, log, _ := GetRunIDLog(ctx) - if err := s.validateOptsParameters(); err != nil { + if err := s.validateOptsParameters(clusterConfig); err != nil { return fmt.Errorf("controller probe failed : '%v'", err) } - if s.isiSvc == nil { - - return errors.New("s.isiSvc (type isiService) is nil, probe failed") - + if clusterConfig.isiSvc == nil { + return errors.New("clusterConfig.isiSvc (type isiService) is nil, probe failed") } - if err := s.isiSvc.TestConnection(); err != nil { + if err := clusterConfig.isiSvc.TestConnection(ctx); err != nil { return fmt.Errorf("controller probe failed : '%v'", err) } @@ -1080,9 +1280,27 @@ func (s *service) CreateSnapshot( req *csi.CreateSnapshotRequest) ( *csi.CreateSnapshotResponse, error) { + // Fetch log handler + ctx, log, runID := GetRunIDLog(ctx) + + log.Infof("CreateSnapshot started") + // parse the input volume id and fetch it's components + _, _, _, clusterName, err := utils.ParseNormalizedVolumeID(ctx, req.GetSourceVolumeId()) + if err != nil { + return nil, status.Error(codes.NotFound, utils.GetMessageWithRunID(runID, err.Error())) + } + + ctx, log = setClusterContext(ctx, clusterName) + log.Debugf("Cluster Name: %v", clusterName) + + isiConfig, err := s.getIsilonConfig(ctx, &clusterName) + if err != nil { + return nil, err + } + // auto probe - if err := s.autoProbe(ctx); err != nil { - return nil, status.Error(codes.FailedPrecondition, err.Error()) + if err := s.autoProbe(ctx, isiConfig); err != nil { + return nil, status.Error(codes.FailedPrecondition, utils.GetMessageWithRunID(runID, err.Error())) } // validate request and get details of the request @@ -1096,67 +1314,80 @@ func (s *service) CreateSnapshot( params = req.GetParameters() if _, ok := params[IsiPathParam]; ok { if params[IsiPathParam] == "" { - isiPath = s.opts.Path + isiPath = isiConfig.IsiPath } else { isiPath = params[IsiPathParam] } } else { // use the default isiPath if not set in the storage class - isiPath = s.opts.Path + isiPath = isiConfig.IsiPath } - srcVolumeID, snapshotName, err := s.validateCreateSnapshotRequest(req, isiPath) + srcVolumeID, snapshotName, err := s.validateCreateSnapshotRequest(ctx, req, isiPath, isiConfig) if err != nil { - return nil, status.Error(codes.InvalidArgument, err.Error()) + return nil, status.Error(codes.InvalidArgument, utils.GetMessageWithRunID(runID, err.Error())) } + log.Infof("snapshot name is '%s' and source volume ID is '%s' ", snapshotName, srcVolumeID) // check if snapshot already exists var snapshotByName isi.Snapshot - if snapshotByName, err = s.isiSvc.GetSnapshot(snapshotName); snapshotByName != nil { + log.Infof("check for existence of snapshot '%s'", snapshotName) + if snapshotByName, err = isiConfig.isiSvc.GetSnapshot(ctx, snapshotName); snapshotByName != nil { if path.Base(snapshotByName.Path) == srcVolumeID { // return the existent snapshot - return s.getCreateSnapshotResponse(strconv.FormatInt(snapshotByName.Id, 10), req.GetSourceVolumeId(), snapshotByName.Created, s.isiSvc.GetSnapshotSize(isiPath, snapshotName)), nil + return s.getCreateSnapshotResponse(ctx, strconv.FormatInt(snapshotByName.Id, 10), req.GetSourceVolumeId(), snapshotByName.Created, isiConfig.isiSvc.GetSnapshotSize(ctx, isiPath, snapshotName), clusterName), nil } // return already exists error return nil, status.Error(codes.AlreadyExists, - fmt.Sprintf("a snapshot with name '%s' already exists but is incompatible with the specified source volume id '%s'", snapshotName, req.GetSourceVolumeId())) + utils.GetMessageWithRunID(runID, "a snapshot with name '%s' already exists but is "+ + "incompatible with the specified source volume id '%s'", snapshotName, req.GetSourceVolumeId())) } // create new snapshot for source direcory path := utils.GetPathForVolume(isiPath, srcVolumeID) - if snapshotNew, err = s.isiSvc.CreateSnapshot(path, snapshotName); err != nil { - return nil, status.Error(codes.Internal, err.Error()) + if snapshotNew, err = isiConfig.isiSvc.CreateSnapshot(ctx, path, snapshotName); err != nil { + return nil, status.Error(codes.Internal, utils.GetMessageWithRunID(runID, err.Error())) } - _, _ = s.isiSvc.GetSnapshot(snapshotName) + _, _ = isiConfig.isiSvc.GetSnapshot(ctx, snapshotName) + log.Infof("snapshot creation is successful") // return the response - return s.getCreateSnapshotResponse(strconv.FormatInt(snapshotNew.Id, 10), req.GetSourceVolumeId(), snapshotNew.Created, s.isiSvc.GetSnapshotSize(isiPath, snapshotName)), nil + return s.getCreateSnapshotResponse(ctx, strconv.FormatInt(snapshotNew.Id, 10), req.GetSourceVolumeId(), snapshotNew.Created, isiConfig.isiSvc.GetSnapshotSize(ctx, isiPath, snapshotName), clusterName), nil } // validateCreateSnapshotRequest validate the input params in CreateSnapshotRequest func (s *service) validateCreateSnapshotRequest( - req *csi.CreateSnapshotRequest, isiPath string) (string, string, error) { - srcVolumeID, _, _, err := utils.ParseNormalizedVolumeID(req.GetSourceVolumeId()) + ctx context.Context, + req *csi.CreateSnapshotRequest, isiPath string, isiConfig *IsilonClusterConfig) (string, string, error) { + + // Fetch log handler + ctx, log, runID := GetRunIDLog(ctx) + + srcVolumeID, _, _, clusterName, err := utils.ParseNormalizedVolumeID(ctx, req.GetSourceVolumeId()) if err != nil { - return "", "", status.Error(codes.InvalidArgument, err.Error()) + return "", "", status.Error(codes.InvalidArgument, utils.GetMessageWithRunID(runID, err.Error())) } - if !s.isiSvc.IsVolumeExistent(isiPath, "", srcVolumeID) { + ctx, log = setClusterContext(ctx, clusterName) + log.Debugf("Cluster Name: %v", clusterName) + + if !isiConfig.isiSvc.IsVolumeExistent(ctx, isiPath, "", srcVolumeID) { return "", "", status.Error(codes.InvalidArgument, - "source volume id is invalid") + utils.GetMessageWithRunID(runID, "source volume id is invalid")) } snapshotName := req.GetName() if snapshotName == "" { return "", "", status.Error(codes.InvalidArgument, - "name cannot be empty") + utils.GetMessageWithRunID(runID, "name cannot be empty")) } return srcVolumeID, snapshotName, nil } -func (s *service) getCreateSnapshotResponse(snapshotID string, sourceVolumeID string, creationTime, sizeInBytes int64) *csi.CreateSnapshotResponse { +func (s *service) getCreateSnapshotResponse(ctx context.Context, snapshotID string, sourceVolumeID string, creationTime, sizeInBytes int64, clusterName string) *csi.CreateSnapshotResponse { + snapID := utils.GetNormalizedSnapshotID(ctx, snapshotID, clusterName) return &csi.CreateSnapshotResponse{ - Snapshot: s.getCSISnapshot(snapshotID, sourceVolumeID, creationTime, sizeInBytes), + Snapshot: s.getCSISnapshot(snapID, sourceVolumeID, creationTime, sizeInBytes), } } @@ -1180,18 +1411,38 @@ func (s *service) DeleteSnapshot( ctx context.Context, req *csi.DeleteSnapshotRequest) ( *csi.DeleteSnapshotResponse, error) { - if err := s.autoProbe(ctx); err != nil { + + // Fetch log handler + ctx, log, runID := GetRunIDLog(ctx) + + log.Infof("DeleteSnapshot started") + if req.GetSnapshotId() == "" { + return nil, status.Errorf(codes.FailedPrecondition, utils.GetMessageWithRunID(runID, "snapshot id to be deleted is required")) + } + + // parse the input snapshot id and fetch it's components + snapshotID, clusterName, err := utils.ParseNormalizedSnapshotID(ctx, req.GetSnapshotId()) + if err != nil { + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("failed to parse snapshot ID '%s', error : '%v'", req.GetSnapshotId(), err)) + } + + ctx, log = setClusterContext(ctx, clusterName) + log.Debugf("Cluster Name: %v", clusterName) + + isiConfig, err := s.getIsilonConfig(ctx, &clusterName) + if err != nil { return nil, err } - if req.GetSnapshotId() == "" { - return nil, status.Errorf(codes.FailedPrecondition, "snapshot id to be deleted is required") + if err := s.autoProbe(ctx, isiConfig); err != nil { + return nil, err } - id, err := strconv.ParseInt(req.GetSnapshotId(), 10, 64) + + id, err := strconv.ParseInt(snapshotID, 10, 64) if err != nil { - return nil, status.Errorf(codes.Internal, "cannot convert snapshot to integer: '%s'", err.Error()) + return nil, status.Errorf(codes.Internal, utils.GetMessageWithRunID(runID, "cannot convert snapshot to integer: '%s'", err.Error())) } - snapshot, err := s.isiSvc.GetSnapshot(req.SnapshotId) + snapshot, err := isiConfig.isiSvc.GetSnapshot(ctx, snapshotID) // Idempotency check if err != nil { jsonError, ok := err.(*isiApi.JSONError) @@ -1203,24 +1454,24 @@ func (s *service) DeleteSnapshot( } // Internal server error if the error is not about "not found" if err != nil { - return nil, status.Errorf(codes.Internal, "cannot check the existence of the snapshot: '%s'", err.Error()) + return nil, status.Errorf(codes.Internal, utils.GetMessageWithRunID(runID, "cannot check the existence of the snapshot: '%s'", err.Error())) } } else { if jsonError.StatusCode == 404 { return &csi.DeleteSnapshotResponse{}, nil } - return nil, status.Errorf(codes.Internal, "cannot check the existence of the snapshot: '%s'", err.Error()) + return nil, status.Errorf(codes.Internal, utils.GetMessageWithRunID(runID, "cannot check the existence of the snapshot: '%s'", err.Error())) } } // Get snapshot path - snapshotIsiPath, err := s.isiSvc.GetSnapshotIsiPath(s.opts.Path, req.GetSnapshotId()) + snapshotIsiPath, err := isiConfig.isiSvc.GetSnapshotIsiPath(ctx, isiConfig.IsiPath, snapshotID) if err != nil { - return nil, status.Error(codes.Internal, err.Error()) + return nil, status.Error(codes.Internal, utils.GetMessageWithRunID(runID, err.Error())) } log.Debugf("The Isilon directory path of snapshot is= %v", snapshotIsiPath) - export, err := s.isiSvc.GetExportWithPathAndZone(snapshotIsiPath, "") + export, err := isiConfig.isiSvc.GetExportWithPathAndZone(ctx, snapshotIsiPath, "") if err != nil { // internal error return nil, err @@ -1230,45 +1481,51 @@ func (s *service) DeleteSnapshot( // Check if there are any RO volumes created from this snapshot // Note: This is true only for RO volumes from snapshots if export != nil { - if err := s.processSnapshotTrackingDirectoryDuringDeleteSnapshot(export, snapshotIsiPath, &deleteSnapshot); err != nil { + if err := s.processSnapshotTrackingDirectoryDuringDeleteSnapshot(ctx, export, snapshotIsiPath, &deleteSnapshot, isiConfig); err != nil { return nil, err } } if deleteSnapshot { - err = s.isiSvc.DeleteSnapshot(id, "") + err = isiConfig.isiSvc.DeleteSnapshot(ctx, id, "") if err != nil { - return nil, status.Errorf(codes.Internal, "error deleteing snapshot: '%s'", err.Error()) + return nil, status.Errorf(codes.Internal, utils.GetMessageWithRunID(runID, "error deleting snapshot: '%s'", err.Error())) } } + log.Infof("Snapshot with id '%s' deleted", snapshotID) return &csi.DeleteSnapshotResponse{}, nil } func (s *service) processSnapshotTrackingDirectoryDuringDeleteSnapshot( + ctx context.Context, export isi.Export, snapshotIsiPath string, - deleteSnapshot *bool) error { + deleteSnapshot *bool, + isiConfig *IsilonClusterConfig) error { + + // Fetch log handler + ctx, log, _ := GetRunIDLog(ctx) // Populate names for snapshot's tracking dir and snapshot delete marker - isiPath, snapshotName, _ := s.isiSvc.GetSnapshotIsiPathComponents(snapshotIsiPath) - snapshotTrackingDir := s.isiSvc.GetSnapshotTrackingDirName(snapshotName) + isiPath, snapshotName, _ := isiConfig.isiSvc.GetSnapshotIsiPathComponents(snapshotIsiPath) + snapshotTrackingDir := isiConfig.isiSvc.GetSnapshotTrackingDirName(snapshotName) snapshotTrackingDirDeleteMarker := path.Join(snapshotTrackingDir, DeleteSnapshotMarker) // Check if the snapshot tracking dir is present (this indicates // there were some RO volumes created from this snapshot) // Get subdirectories count of snapshot tracking dir. // Every directory will have two subdirectory entries . and .. - totalSubDirectories, _ := s.isiSvc.GetSubDirectoryCount(isiPath, snapshotTrackingDir) + totalSubDirectories, _ := isiConfig.isiSvc.GetSubDirectoryCount(ctx, isiPath, snapshotTrackingDir) // There are no more volumes present which were created using this snapshot // Every directory will have two subdirectories . and .. if totalSubDirectories == IgnoreDotAndDotDotSubDirs || totalSubDirectories == 0 { - if err := s.isiSvc.UnexportByIDWithZone(export.ID, ""); err != nil { + if err := isiConfig.isiSvc.UnexportByIDWithZone(ctx, export.ID, ""); err != nil { return err } // Delete snapshot tracking directory - if err := s.isiSvc.DeleteVolume(isiPath, snapshotTrackingDir); err != nil { + if err := isiConfig.isiSvc.DeleteVolume(ctx, isiPath, snapshotTrackingDir); err != nil { log.Errorf("error while deleting snapshot tracking directory '%s'", path.Join(isiPath, snapshotTrackingDir)) } } else { @@ -1276,7 +1533,7 @@ func (s *service) processSnapshotTrackingDirectoryDuringDeleteSnapshot( // Set a marker in snapshot tracking dir to delete snapshot, once // all the volumes created from this snapshot were deleted log.Debugf("set DeleteSnapshotMarker marker in snapshot tracking dir") - if err := s.isiSvc.CreateVolume(isiPath, snapshotTrackingDirDeleteMarker); err != nil { + if err := isiConfig.isiSvc.CreateVolume(ctx, isiPath, snapshotTrackingDirDeleteMarker); err != nil { return err } } @@ -1343,3 +1600,20 @@ func checkValidAccessTypes(vcs []*csi.VolumeCapability) bool { } return true } + +func addMetaData(params map[string]string) map[string]string { + // CSI specific metadata header for authorization + var headerMetadata = make(map[string]string) + if _, ok := params[csiPersistentVolumeName]; ok { + headerMetadata[headerPersistentVolumeName] = params[csiPersistentVolumeName] + } + + if _, ok := params[csiPersistentVolumeClaimName]; ok { + headerMetadata[headerPersistentVolumeClaimName] = params[csiPersistentVolumeClaimName] + } + + if _, ok := params[csiPersistentVolumeClaimNamespace]; ok { + headerMetadata[headerPersistentVolumeClaimNamespace] = params[csiPersistentVolumeClaimNamespace] + } + return headerMetadata +} diff --git a/service/features/controller_create_delete_snapshot.feature b/service/features/controller_create_delete_snapshot.feature index 9e30980..3f0ddb3 100644 --- a/service/features/controller_create_delete_snapshot.feature +++ b/service/features/controller_create_delete_snapshot.feature @@ -11,6 +11,18 @@ Feature: Isilon CSI interface And I call CreateSnapshot "volume2=_=_=19=_=_=System" "create_snapshot_name" "/ifs/data/csi-isilon" Then a valid CreateSnapshotResponse is returned + Scenario: Create snapshot with cluster name in volume id good scenario + Given a Isilon service + When I call Probe + And I call CreateSnapshot "volume2=_=_=19=_=_=System=_=_=cluster1" "create_snapshot_name" "/ifs/data/csi-isilon" + Then a valid CreateSnapshotResponse is returned + + Scenario: Create snapshot with cluster name in volume id whose config doesn't exists + Given a Isilon service + When I call Probe + And I call CreateSnapshot "volume2=_=_=19=_=_=System=_=_=cluster2" "create_snapshot_name" "/ifs/data/csi-isilon" + Then the error contains "failed to get cluster config details for clusterName: 'cluster2'" + Scenario: Create snapshot with internal server error Given a Isilon service When I call Probe @@ -30,7 +42,7 @@ Feature: Isilon CSI interface | "volume2=_=_=19=_=_=System" | "existent_comp_snapshot_name" | "/ifs/data/csi-isilon" | "none" | | "volume2=_=_=19=_=_=System" | "existent_comp_snapshot_name" | "/ifs/data/csi-isilon" | "none" | | "volume2=_=_=19=_=_=System" | "existent_comp_snapshot_name_longer_than_max" | "/ifs/data/csi-isilon" | "already exists but is incompatible" | - | "volume2=_=_=19" | "existent_comp_snapshot_name" | "/ifs/data/csi-isilon" | "cannot match the expected" | + | "volume2=_=_=19" | "existent_comp_snapshot_name" | "/ifs/data/csi-isilon" | "cannot be split into tokens" | | "volume2=_=_=19=_=_=System" | "create_snapshot_name" | "" | "none" | | "volume2=_=_=19=_=_=System" | "create_snapshot_name" | "none" | "none" | | "volume2=_=_=19=_=_=System" | "" | "/ifs/data/csi-isilon" | "name cannot be empty" | @@ -50,7 +62,7 @@ Feature: Isilon CSI interface | "GetSnapshotError" | "cannot check the existence of the snapshot" | | "DeleteSnapshotError" | "error deleteing snapshot" | -@todo +@todo Scenario Outline: Controller delete snapshot various use cases from examples Given a Isilon service When I call Probe @@ -60,6 +72,7 @@ Feature: Isilon CSI interface Examples: | snapshotId | errormsg | | "34" | "none" | + | "34=_=_=cluster2" | "failed to get cluster config details for clusterName: 'cluster2'" | | "" | "snapshot id to be deleted is required" | | "404" | "none" | | "str" | "cannot convert snapshot to integer" | diff --git a/service/features/controller_create_delete_volume.feature b/service/features/controller_create_delete_volume.feature index cbc6ac8..4ba849e 100644 --- a/service/features/controller_create_delete_volume.feature +++ b/service/features/controller_create_delete_volume.feature @@ -11,6 +11,12 @@ Feature: Isilon CSI interface And I call CreateVolume "volume1" Then a valid CreateVolumeResponse is returned + Scenario: Create volume good scenario with persistent metadata + Given a Isilon service + When I call Probe + And I call CreateVolume with persistent metadata "volume1" + Then a valid CreateVolumeResponse is returned + Scenario: Create volume good scenario with quota enabled Given a Isilon service And I enable quota @@ -33,22 +39,31 @@ Feature: Isilon CSI interface | "GetExportInternalError" | "EOF" | | "none" | "none" | - Scenario Outline: Create volume with parameters + Scenario Outline: Create volume with parameters Given a Isilon service When I call Probe - And I call CreateVolume with params + And I call CreateVolume with params Then the error contains Examples: - | volumeName | rangeInGiB | accessZone | isiPath | AzServiceIP | errormsg | - | "volume1" | 8 | "" | "/ifs/data/csi-isilon" | "127.0.0.1" | "none" | - | "volume1" | 8 | "System" | "" | "127.0.0.1" | "none" | - | "volume1" | 8 | "none" | "/ifs/data/csi-isilon" | "127.0.0.1" | "none" | - | "volume1" | 8 | "System" | "none" | "127.0.0.1" | "none" | - | "volume1" | 8 | "System" | "/ifs/data/csi-isilon" | "none" | "none" | - | "volume1" | -1 | "System" | "/ifs/data/csi-isilon" | "none" | "must not be negative" | - | "" | 0 | "System" | "/ifs/data/csi-isilon" | "none" | "name cannot be empty" | - | "volume1" | 8 | "none" | "/ifs/data/csi-isilon" | "" | "none" | + | volumeName | rangeInGiB | accessZone | isiPath | AzServiceIP | clusterName | errormsg | + | "volume1" | 8 | "" | "/ifs/data/csi-isilon" | "127.0.0.1" | "none" | "none" | + | "volume1" | 8 | "System" | "" | "127.0.0.1" | "none" | "none" | + | "volume1" | 8 | "none" | "/ifs/data/csi-isilon" | "127.0.0.1" | "none" | "none" | + | "volume1" | 8 | "System" | "none" | "127.0.0.1" | "none" | "none" | + | "volume1" | 8 | "System" | "/ifs/data/csi-isilon" | "none" | "none" | "none" | + | "volume1" | -1 | "System" | "/ifs/data/csi-isilon" | "none" | "none" | "must not be negative" | + | "" | 0 | "System" | "/ifs/data/csi-isilon" | "none" | "none" | "name cannot be empty" | + | "volume1" | 8 | "none" | "/ifs/data/csi-isilon" | "none" | "none" | "none" | + | "volume1" | 8 | "" | "/ifs/data/csi-isilon" | "127.0.0.1" | "cluster1" | "none" | + | "volume1" | 8 | "System" | "" | "127.0.0.1" | "cluster1" | "none" | + | "volume1" | 8 | "none" | "/ifs/data/csi-isilon" | "127.0.0.1" | "cluster1" | "none" | + | "volume1" | 8 | "System" | "none" | "127.0.0.1" | "cluster1" | "none" | + | "volume1" | 8 | "System" | "/ifs/data/csi-isilon" | "none" | "cluster1" | "none" | + | "volume1" | -1 | "System" | "/ifs/data/csi-isilon" | "none" | "cluster1" | "must not be negative" | + | "" | 0 | "System" | "/ifs/data/csi-isilon" | "none" | "cluster1" | "name cannot be empty" | + | "volume1" | 8 | "none" | "/ifs/data/csi-isilon" | "none" | "cluster1" | "none" | + | "volume1" | 8 | "none" | "/ifs/data/csi-isilon" | "127.0.0.1" | "cluster2" | "failed to get cluster config details for clusterName: 'cluster2'" | Scenario Outline: Create volume with different volume and export status and induce server errors Given a Isilon service @@ -103,10 +118,12 @@ Feature: Isilon CSI interface Then the error contains Examples: - | volumeID | errormsg | - | "volume1=_=_=43=_=_=System" | "none" | - | "volume1=_=_=43" | "failed to parse volume ID" | - | "" | "no volume id is provided by the DeleteVolumeRequest instance" | + | volumeID | errormsg | + | "volume1=_=_=43=_=_=System" | "none" | + | "volume1=_=_=43=_=_=System=_=_=cluster1" | "none" | + | "volume1=_=_=43" | "failed to parse volume ID" | + | "" | "no volume id is provided by the DeleteVolumeRequest instance" | + | "volume1=_=_=43=_=_=System=_=_=cluster2" | "failed to get cluster config details for clusterName: 'cluster2'" | Scenario Outline: Delete volume with induced errors Given a Isilon service diff --git a/service/features/controller_expand_volume.feature b/service/features/controller_expand_volume.feature index 08a2bb6..4ab0d7f 100644 --- a/service/features/controller_expand_volume.feature +++ b/service/features/controller_expand_volume.feature @@ -11,6 +11,18 @@ Feature: Isilon CSI interface When I call ControllerExpandVolume "volume1=_=_=557=_=_=System" "108589934592" Then a valid ControllerExpandVolumeResponse is returned + Scenario: Controller Expand volume good scenario with Quota enabled and non-existing volume + Given a Isilon service + And I enable quota + When I call ControllerExpandVolume "volume1=_=_=557=_=_=System=_=_=cluster1" "108589934592" + Then a valid ControllerExpandVolumeResponse is returned + + Scenario: Controller Expand volume negative scenario with Quota enabled + Given a Isilon service + And I enable quota + When I call ControllerExpandVolume "volume1=_=_=557=_=_=System=_=_=cluster2" "108589934592" + Then the error contains "failed to get cluster config details for clusterName: 'cluster2'" + Scenario: Controller Expand volume good scenario with Quota disabled Given a Isilon service When I call ControllerExpandVolume "volume1=_=_=557=_=_=System" "108589934592" diff --git a/service/features/service.feature b/service/features/service.feature index b702f49..d16ee66 100644 --- a/service/features/service.feature +++ b/service/features/service.feature @@ -59,6 +59,17 @@ Feature: Isilon CSI interface When I call GetCapacity Then a valid GetCapacityResponse is returned + Scenario: Call GetCapacity with cluster name + Given a Isilon service + When I call GetCapacity with params "cluster1" + Then a valid GetCapacityResponse is returned + + Scenario: Call GetCapacity with non-existing cluster config + Given a Isilon service + When I call Probe + And I call GetCapacity with params "cluster2" + Then the error contains "failed to get cluster config details for clusterName: 'cluster2'" + Scenario: Call GetCapacity with invalid capabilities Given a Isilon service When I call Probe @@ -74,8 +85,8 @@ Feature: Isilon CSI interface Examples: | induced | errormsg | - | "StatsError" | "Could not retrieve capacity. Data returned error 'Error'" | - | "InstancesError" | "Could not retrieve capacity. Error 'Error retrieving Statistics'" | + | "StatsError" | "runid=Could not retrieve capacity. Data returned error" | + | "InstancesError" | "runid=1 Could not retrieve capacity. Error 'Error retrieving Statistics'" | | "none" | "none" | Scenario: Call NodeGetInfo @@ -110,6 +121,8 @@ Feature: Isilon CSI interface Examples: | volumeID | accessType | errormsg | | "volume2=_=_=43=_=_=System" | "single-writer" | "none" | + | "volume2=_=_=43=_=_=System=_=_=cluster1" | "single-writer" | "none" | + | "volume2=_=_=43=_=_=System=_=_=cluster2" | "single-writer" | "failed to get cluster config details for clusterName: 'cluster2'" | | "volume2=_=_=43" | "single-writer" | "failed to parse volume ID" | | "volume2=_=_=0=_=_=System" | "single-writer" | "invalid export ID" | | "volume2=_=_=43=_=_=System" | "multiple-reader" | "none" | @@ -134,6 +147,7 @@ Feature: Isilon CSI interface | "" | "ControllerUnpublishVolumeRequest.VolumeId is empty" | | "volume2=_=_=43" | "failed to parse volume ID" | + @todo Scenario Outline: Calls to ListVolumes Given a Isilon service When I call ListVolumes with max entries starting token @@ -194,13 +208,13 @@ Feature: Isilon CSI interface Examples: | user | mode | serviceErr | connectionErr | errormsg | | "blah" | "controller" | "none" | "none" | "none" | - | "blah" | "controller" | "none" | "ControllerHasNoConnectionError" | "controller probe failed" | - | "" | "controller" | "none" | "none" | "controller probe failed" | + | "blah" | "controller" | "none" | "ControllerHasNoConnectionError" | "probe of all isilon clusters failed" | + | "" | "controller" | "none" | "none" | "probe of all isilon clusters failed" | | "blah" | "node" | "none" | "none" | "none" | - | "blah" | "node" | "none" | "NodeHasNoConnectionError" | "node probe failed" | - | "blah" | "unknown" | "none" | "none" | "Service mode not set" | - | "blah" | "controller" | "noIsiService" | "none" | "s.isiSvc (type isiService) is nil" | - | "blah" | "node" | "noIsiService" | "none" | "s.isiSvc (type isiService) is nil" | + | "blah" | "node" | "none" | "NodeHasNoConnectionError" | "probe of all isilon clusters failed" | + | "blah" | "unknown" | "none" | "none" | "probe of all isilon clusters failed" | + | "blah" | "controller" | "noIsiService" | "none" | "probe of all isilon clusters failed" | + | "blah" | "node" | "noIsiService" | "none" | "probe of all isilon clusters failed" | Scenario Outline: Calling logStatistics different times Given a Isilon service @@ -211,6 +225,7 @@ Feature: Isilon CSI interface | times | errormsg | | 100 | "none" | + @todo Scenario: Calling BeforeServe Given a Isilon service When I call BeforeServe @@ -232,7 +247,7 @@ Feature: Isilon CSI interface Given I induce error "noIsiService" And a Isilon service with params "blah" "controller" When I call autoProbe - Then the error contains "s.isiSvc (type isiService) is nil, probe failed" + Then the error contains "clusterConfig.isiSvc (type isiService) is nil, probe failed" Scenario: Calling functions with autoProbe failed Given a Isilon service diff --git a/service/identity.go b/service/identity.go index 2010d87..b8adb5e 100644 --- a/service/identity.go +++ b/service/identity.go @@ -87,7 +87,7 @@ func (s *service) Probe( rep := new(csi.ProbeResponse) rep.Ready = ready - if err := s.probe(ctx); err != nil { + if err := s.probeAllClusters(ctx); err != nil { rep.Ready.Value = false return rep, err } diff --git a/service/isiService.go b/service/isiService.go index f6dd43d..440616f 100644 --- a/service/isiService.go +++ b/service/isiService.go @@ -18,14 +18,14 @@ package service import ( "context" "fmt" - "github.com/dell/csi-isilon/common/constants" "path" "strings" + "github.com/dell/csi-isilon/common/constants" + utils "github.com/dell/csi-isilon/common/utils" isi "github.com/dell/goisilon" "github.com/dell/goisilon/api" - log "github.com/sirupsen/logrus" ) type isiService struct { @@ -33,12 +33,15 @@ type isiService struct { client *isi.Client } -func (svc *isiService) CopySnapshot(isiPath string, srcSnapshotID int64, dstVolumeName string) (isi.Volume, error) { +func (svc *isiService) CopySnapshot(ctx context.Context, isiPath string, srcSnapshotID int64, dstVolumeName string) (isi.Volume, error) { + // Fetch log handler + log := utils.GetRunIDLogger(ctx) + log.Debugf("begin to copy snapshot '%d'", srcSnapshotID) var volumeNew isi.Volume var err error - if volumeNew, err = svc.client.CopySnapshotWithIsiPath(context.Background(), isiPath, srcSnapshotID, "", dstVolumeName); err != nil { + if volumeNew, err = svc.client.CopySnapshotWithIsiPath(ctx, isiPath, srcSnapshotID, "", dstVolumeName); err != nil { log.Errorf("copy snapshot failed, '%s'", err.Error()) return nil, err } @@ -46,12 +49,15 @@ func (svc *isiService) CopySnapshot(isiPath string, srcSnapshotID int64, dstVolu return volumeNew, nil } -func (svc *isiService) CopyVolume(isiPath, srcVolumeName, dstVolumeName string) (isi.Volume, error) { +func (svc *isiService) CopyVolume(ctx context.Context, isiPath, srcVolumeName, dstVolumeName string) (isi.Volume, error) { + // Fetch log handler + log := utils.GetRunIDLogger(ctx) + log.Debugf("begin to copy volume '%s'", srcVolumeName) var volumeNew isi.Volume var err error - if volumeNew, err = svc.client.CopyVolumeWithIsiPath(context.Background(), isiPath, srcVolumeName, dstVolumeName); err != nil { + if volumeNew, err = svc.client.CopyVolumeWithIsiPath(ctx, isiPath, srcVolumeName, dstVolumeName); err != nil { log.Errorf("copy volume failed, '%s'", err.Error()) return nil, err } @@ -59,12 +65,15 @@ func (svc *isiService) CopyVolume(isiPath, srcVolumeName, dstVolumeName string) return volumeNew, nil } -func (svc *isiService) CreateSnapshot(path, snapshotName string) (isi.Snapshot, error) { +func (svc *isiService) CreateSnapshot(ctx context.Context, path, snapshotName string) (isi.Snapshot, error) { + // Fetch log handler + log := utils.GetRunIDLogger(ctx) + log.Debugf("begin to create snapshot '%s'", snapshotName) var snapshot isi.Snapshot var err error - if snapshot, err = svc.client.CreateSnapshotWithPath(context.Background(), path, snapshotName); err != nil { + if snapshot, err = svc.client.CreateSnapshotWithPath(ctx, path, snapshotName); err != nil { log.Errorf("create snapshot failed, '%s'", err.Error()) return nil, err } @@ -72,10 +81,13 @@ func (svc *isiService) CreateSnapshot(path, snapshotName string) (isi.Snapshot, return snapshot, nil } -func (svc *isiService) CreateVolume(isiPath, volName string) error { +func (svc *isiService) CreateVolume(ctx context.Context, isiPath, volName string) error { + // Fetch log handler + log := utils.GetRunIDLogger(ctx) + log.Debugf("begin to create volume '%s'", volName) - if _, err := svc.client.CreateVolumeWithIsipath(context.Background(), isiPath, volName); err != nil { + if _, err := svc.client.CreateVolumeWithIsipath(ctx, isiPath, volName); err != nil { log.Errorf("create volume failed, '%s'", err.Error()) return err } @@ -83,12 +95,30 @@ func (svc *isiService) CreateVolume(isiPath, volName string) error { return nil } -func (svc *isiService) GetExports() (isi.ExportList, error) { +func (svc *isiService) CreateVolumeWithMetaData(ctx context.Context, isiPath, volName string, metadata map[string]string) error { + // Fetch log handler + log := utils.GetRunIDLogger(ctx) + + log.Debugf("begin to create volume '%s'", volName) + log.Debugf("header metadata '%v'", metadata) + + if _, err := svc.client.CreateVolumeWithIsipathMetaData(ctx, isiPath, volName, metadata); err != nil { + log.Errorf("create volume failed, '%s'", err.Error()) + return err + } + + return nil +} + +func (svc *isiService) GetExports(ctx context.Context) (isi.ExportList, error) { + // Fetch log handler + log := utils.GetRunIDLogger(ctx) + log.Debug("begin getting exports for Isilon") var exports isi.ExportList var err error - if exports, err = svc.client.GetExports(context.Background()); err != nil { + if exports, err = svc.client.GetExports(ctx); err != nil { log.Error("failed to get exports") return nil, err } @@ -96,12 +126,15 @@ func (svc *isiService) GetExports() (isi.ExportList, error) { return exports, nil } -func (svc *isiService) GetExportByIDWithZone(exportID int, accessZone string) (isi.Export, error) { +func (svc *isiService) GetExportByIDWithZone(ctx context.Context, exportID int, accessZone string) (isi.Export, error) { + // Fetch log handler + log := utils.GetRunIDLogger(ctx) + log.Debugf("begin getting export by id '%d' with access zone '%s' for Isilon", exportID, accessZone) var export isi.Export var err error - if export, err = svc.client.GetExportByIDWithZone(context.Background(), exportID, accessZone); err != nil { + if export, err = svc.client.GetExportByIDWithZone(ctx, exportID, accessZone); err != nil { log.Error("failed to get export by id with access zone") return nil, err } @@ -109,14 +142,17 @@ func (svc *isiService) GetExportByIDWithZone(exportID int, accessZone string) (i return export, nil } -func (svc *isiService) ExportVolumeWithZone(isiPath, volName, accessZone, description string) (int, error) { +func (svc *isiService) ExportVolumeWithZone(ctx context.Context, isiPath, volName, accessZone, description string) (int, error) { + // Fetch log handler + log := utils.GetRunIDLogger(ctx) + log.Debugf("begin to export volume '%s' with access zone '%s' in Isilon path '%s'", volName, accessZone, isiPath) var exportID int var err error path := utils.GetPathForVolume(isiPath, volName) - if exportID, err = svc.client.ExportVolumeWithZoneAndPath(context.Background(), path, accessZone, description); err != nil { + if exportID, err = svc.client.ExportVolumeWithZoneAndPath(ctx, path, accessZone, description); err != nil { log.Errorf("Export volume failed, volume '%s', access zone '%s' , id %d error '%s'", volName, accessZone, exportID, err.Error()) return -1, err } @@ -125,7 +161,10 @@ func (svc *isiService) ExportVolumeWithZone(isiPath, volName, accessZone, descri return exportID, nil } -func (svc *isiService) CreateQuota(path, volName string, sizeInBytes int64, quotaEnabled bool) (string, error) { +func (svc *isiService) CreateQuota(ctx context.Context, path, volName string, sizeInBytes int64, quotaEnabled bool) (string, error) { + // Fetch log handler + log := utils.GetRunIDLogger(ctx) + log.Debugf("begin to create quota for '%s', size '%d', quota enabled: '%t'", volName, sizeInBytes, quotaEnabled) // if quotas are enabled, we need to set a quota on the volume @@ -140,7 +179,7 @@ func (svc *isiService) CreateQuota(path, volName string, sizeInBytes int64, quot var isQuotaActivated bool var checkLicErr error - isQuotaActivated, checkLicErr = svc.client.IsQuotaLicenseActivated(context.Background()) + isQuotaActivated, checkLicErr = svc.client.IsQuotaLicenseActivated(ctx) if checkLicErr != nil { log.Errorf("failed to check SmartQuotas license info: '%v'", checkLicErr) @@ -154,7 +193,7 @@ func (svc *isiService) CreateQuota(path, volName string, sizeInBytes int64, quot // create quota with container set to true var quotaID string var err error - if quotaID, err = svc.client.CreateQuotaWithPath(context.Background(), path, true, sizeInBytes); err != nil { + if quotaID, err = svc.client.CreateQuotaWithPath(ctx, path, true, sizeInBytes); err != nil { if (isQuotaActivated) && (checkLicErr == nil) { return "", fmt.Errorf("SmartQuotas is activated, but creating quota failed with error: '%v'", err) } @@ -173,20 +212,23 @@ func (svc *isiService) CreateQuota(path, volName string, sizeInBytes int64, quot return "", nil } -func (svc *isiService) DeleteQuotaByExportIDWithZone(volName string, exportID int, accessZone string) error { +func (svc *isiService) DeleteQuotaByExportIDWithZone(ctx context.Context, volName string, exportID int, accessZone string) error { + // Fetch log handler + log := utils.GetRunIDLogger(ctx) + log.Debugf("begin to delete quota for volume name : '%s', export ID : '%d'", volName, exportID) var export isi.Export var err error var quotaID string - if export, err = svc.client.GetExportByIDWithZone(context.Background(), exportID, accessZone); err != nil { + if export, err = svc.client.GetExportByIDWithZone(ctx, exportID, accessZone); err != nil { return fmt.Errorf("failed to get export '%s':'%d' with access zone '%s', skip DeleteQuotaByID. error : '%s'", volName, exportID, accessZone, err.Error()) } log.Debugf("export (id : '%d') corresponding to path '%s' found, description field is '%s'", export.ID, volName, export.Description) - if quotaID, err = utils.GetQuotaIDFromDescription(export); err != nil { + if quotaID, err = utils.GetQuotaIDFromDescription(ctx, export); err != nil { return err } @@ -197,27 +239,30 @@ func (svc *isiService) DeleteQuotaByExportIDWithZone(volName string, exportID in log.Debugf("deleting quota with id '%s' for path '%s'", quotaID, volName) - if err = svc.client.ClearQuotaByID(context.Background(), quotaID); err != nil { + if err = svc.client.ClearQuotaByID(ctx, quotaID); err != nil { return err } return nil } -func (svc *isiService) GetVolumeQuota(volName string, exportID int, accessZone string) (isi.Quota, error) { +func (svc *isiService) GetVolumeQuota(ctx context.Context, volName string, exportID int, accessZone string) (isi.Quota, error) { + // Fetch log handler + log := utils.GetRunIDLogger(ctx) + log.Debugf("begin to get quota for volume name : '%s', export ID : '%d'", volName, exportID) var export isi.Export var err error var quotaID string - if export, err = svc.client.GetExportByIDWithZone(context.Background(), exportID, accessZone); err != nil { + if export, err = svc.client.GetExportByIDWithZone(ctx, exportID, accessZone); err != nil { return nil, fmt.Errorf("failed to get export '%s':'%d' with access zone '%s', error: '%s'", volName, exportID, accessZone, err.Error()) } log.Debugf("export (id : '%d') corresponding to path '%s' found, description field is '%s'", export.ID, volName, export.Description) - if quotaID, err = utils.GetQuotaIDFromDescription(export); err != nil { + if quotaID, err = utils.GetQuotaIDFromDescription(ctx, export); err != nil { return nil, err } @@ -227,63 +272,77 @@ func (svc *isiService) GetVolumeQuota(volName string, exportID int, accessZone s } log.Debugf("get quota by id '%s'", quotaID) - return svc.client.GetQuotaByID(context.Background(), quotaID) + return svc.client.GetQuotaByID(ctx, quotaID) } -func (svc *isiService) UpdateQuotaSize(quotaID string, updatedSize int64) error { +func (svc *isiService) UpdateQuotaSize(ctx context.Context, quotaID string, updatedSize int64) error { + // Fetch log handler + log := utils.GetRunIDLogger(ctx) log.Debugf("updating quota by id '%s' with size '%d'", quotaID, updatedSize) - if err := svc.client.UpdateQuotaSizeByID(context.Background(), quotaID, updatedSize); err != nil { + if err := svc.client.UpdateQuotaSizeByID(ctx, quotaID, updatedSize); err != nil { return fmt.Errorf("failed to update quota '%s' with size '%d', error: '%s'", quotaID, updatedSize, err.Error()) } return nil } -func (svc *isiService) UnexportByIDWithZone(exportID int, accessZone string) error { +func (svc *isiService) UnexportByIDWithZone(ctx context.Context, exportID int, accessZone string) error { + // Fetch log handler + log := utils.GetRunIDLogger(ctx) + log.Debugf("begin to unexport NFS export with ID '%d' in access zone '%s'", exportID, accessZone) - if err := svc.client.UnexportByIDWithZone(context.Background(), exportID, accessZone); err != nil { + if err := svc.client.UnexportByIDWithZone(ctx, exportID, accessZone); err != nil { return fmt.Errorf("failed to unexport volume directory '%d' in access zone '%s' : '%s'", exportID, accessZone, err.Error()) } return nil } -func (svc *isiService) GetExportsWithParams(params api.OrderedValues) (isi.Exports, error) { +func (svc *isiService) GetExportsWithParams(ctx context.Context, params api.OrderedValues) (isi.Exports, error) { + // Fetch log handler + log := utils.GetRunIDLogger(ctx) + log.Debugf("begin to get exports with params..") var exports isi.Exports var err error - if exports, err = svc.client.GetExportsWithParams(context.Background(), params); err != nil { + if exports, err = svc.client.GetExportsWithParams(ctx, params); err != nil { return nil, fmt.Errorf("failed to get exports with params") } return exports, nil } -func (svc *isiService) DeleteVolume(isiPath, volName string) error { +func (svc *isiService) DeleteVolume(ctx context.Context, isiPath, volName string) error { + // Fetch log handler + log := utils.GetRunIDLogger(ctx) + log.Debugf("begin to delete volume directory '%s'", volName) - if err := svc.client.DeleteVolumeWithIsiPath(context.Background(), isiPath, volName); err != nil { + if err := svc.client.DeleteVolumeWithIsiPath(ctx, isiPath, volName); err != nil { return fmt.Errorf("failed to delete volume directory '%v' : '%v'", volName, err) } return nil } -func (svc *isiService) ClearQuotaByID(quotaID string) error { +func (svc *isiService) ClearQuotaByID(ctx context.Context, quotaID string) error { if quotaID != "" { - if err := svc.client.ClearQuotaByID(context.Background(), quotaID); err != nil { + if err := svc.client.ClearQuotaByID(ctx, quotaID); err != nil { return fmt.Errorf("failed to clear quota for '%s' : '%v'", quotaID, err) } } return nil } -func (svc *isiService) TestConnection() error { +func (svc *isiService) TestConnection(ctx context.Context) error { + // Fetch log handler + ctx, log, _ := GetRunIDLog(ctx) + log.Debugf("test connection client, user name : '%s'", svc.client.API.User()) - if _, err := svc.client.GetClusterConfig(context.Background()); err != nil { + if _, err := svc.client.GetClusterConfig(ctx); err != nil { log.Errorf("error encountered, test connection failed : '%v'", err) return err } @@ -297,12 +356,15 @@ func (svc *isiService) GetNFSExportURLForPath(ip string, dirPath string) string return fmt.Sprintf("%s:%s", ip, dirPath) } -func (svc *isiService) GetVolume(isiPath, volID, volName string) (isi.Volume, error) { +func (svc *isiService) GetVolume(ctx context.Context, isiPath, volID, volName string) (isi.Volume, error) { + // Fetch log handler + log := utils.GetRunIDLogger(ctx) + log.Debugf("begin getting volume with id '%s' and name '%s' for Isilon", volID, volName) var vol isi.Volume var err error - if vol, err = svc.client.GetVolumeWithIsiPath(context.Background(), isiPath, volID, volName); err != nil { + if vol, err = svc.client.GetVolumeWithIsiPath(ctx, isiPath, volID, volName); err != nil { log.Errorf("failed to get volume '%s'", err) return nil, err } @@ -310,10 +372,13 @@ func (svc *isiService) GetVolume(isiPath, volID, volName string) (isi.Volume, er return vol, nil } -func (svc *isiService) GetVolumeSize(isiPath, name string) int64 { +func (svc *isiService) GetVolumeSize(ctx context.Context, isiPath, name string) int64 { + // Fetch log handler + log := utils.GetRunIDLogger(ctx) + log.Debugf("begin getting volume size with name '%s' for Isilon", name) - size, err := svc.client.GetVolumeSize(context.Background(), isiPath, name) + size, err := svc.client.GetVolumeSize(ctx, isiPath, name) if err != nil { log.Errorf("failed to get volume size '%s'", err.Error()) return 0 @@ -322,10 +387,13 @@ func (svc *isiService) GetVolumeSize(isiPath, name string) int64 { return size } -func (svc *isiService) GetStatistics(keys []string) (isi.Stats, error) { +func (svc *isiService) GetStatistics(ctx context.Context, keys []string) (isi.Stats, error) { + // Fetch log handler + log := utils.GetRunIDLogger(ctx) + var stat isi.Stats var err error - if stat, err = svc.client.GetStatistics(context.Background(), keys); err != nil { + if stat, err = svc.client.GetStatistics(ctx, keys); err != nil { log.Errorf("failed to get array statistics '%s'", err) return nil, err } @@ -333,25 +401,31 @@ func (svc *isiService) GetStatistics(keys []string) (isi.Stats, error) { return stat, nil } -func (svc *isiService) IsVolumeExistent(isiPath, volID, name string) bool { +func (svc *isiService) IsVolumeExistent(ctx context.Context, isiPath, volID, name string) bool { + // Fetch log handler + log := utils.GetRunIDLogger(ctx) + log.Debugf("check if volume (id :'%s', name '%s') already exists", volID, name) - isExistent := svc.client.IsVolumeExistentWithIsiPath(context.Background(), isiPath, volID, name) + isExistent := svc.client.IsVolumeExistentWithIsiPath(ctx, isiPath, volID, name) log.Debugf("volume (id :'%s', name '%s') already exists : '%v'", volID, name, isExistent) return isExistent } -func (svc *isiService) OtherClientsAlreadyAdded(exportID int, accessZone string, nodeID string) bool { - export, _ := svc.GetExportByIDWithZone(exportID, accessZone) +func (svc *isiService) OtherClientsAlreadyAdded(ctx context.Context, exportID int, accessZone string, nodeID string) bool { + // Fetch log handler + log := utils.GetRunIDLogger(ctx) + + export, _ := svc.GetExportByIDWithZone(ctx, exportID, accessZone) if export == nil { log.Debugf("failed to get export by id '%d' with access zone '%s', return true for otherClientsAlreadyAdded as a safer return value", exportID, accessZone) return true } - clientName, clientFQDN, clientIP, err := utils.ParseNodeID(nodeID) + clientName, clientFQDN, clientIP, err := utils.ParseNodeID(ctx, nodeID) if err != nil { log.Debugf("failed to parse node ID '%s', return true for otherClientsAlreadyAdded as a safer return value", nodeID) return true @@ -359,6 +433,8 @@ func (svc *isiService) OtherClientsAlreadyAdded(exportID int, accessZone string, clientFieldsNotEmpty := len(*export.Clients) > 0 || len(*export.ReadOnlyClients) > 0 || len(*export.ReadWriteClients) > 0 || len(*export.RootClients) > 0 + clientFieldLength := len(*export.Clients) + isNodeInClientFields := utils.IsStringInSlices(clientName, *export.Clients, *export.ReadOnlyClients, *export.ReadWriteClients, *export.RootClients) isNodeFQDNInClientFields := utils.IsStringInSlices(clientFQDN, *export.Clients, *export.ReadOnlyClients, *export.ReadWriteClients, *export.RootClients) @@ -367,23 +443,43 @@ func (svc *isiService) OtherClientsAlreadyAdded(exportID int, accessZone string, isNodeInClientFields = isNodeInClientFields || utils.IsStringInSlices(clientIP, *export.Clients, *export.ReadOnlyClients, *export.ReadWriteClients, *export.RootClients) } + clientName, clientFQDN, clientIP, err = utils.ParseNodeID(ctx, utils.DummyHostNodeID) + if err != nil { + log.Debugf("failed to parse node ID '%s', return true for otherClientsAlreadyAdded as a safer return value", nodeID) + return true + } + + // Additional check for dummy localhost entry + isLocalHostInClientFields := utils.IsStringInSlices(clientName, *export.Clients, *export.ReadOnlyClients, *export.ReadWriteClients, *export.RootClients) + if !isLocalHostInClientFields { + isLocalHostInClientFields = utils.IsStringInSlices(clientFQDN, *export.Clients, *export.ReadOnlyClients, *export.ReadWriteClients, *export.RootClients) + if !isLocalHostInClientFields { + isLocalHostInClientFields = utils.IsStringInSlices(clientIP, *export.Clients, *export.ReadOnlyClients, *export.ReadWriteClients, *export.RootClients) + } + } + + if clientFieldLength == 1 && isLocalHostInClientFields { + clientFieldsNotEmpty = false + } return clientFieldsNotEmpty && !isNodeInClientFields && !isNodeFQDNInClientFields } -func (svc *isiService) AddExportClientNetworkIdentifierByIDWithZone(exportID int, accessZone, nodeID string, addClientFunc func(exportID int, accessZone, clientIP string) error) error { +func (svc *isiService) AddExportClientNetworkIdentifierByIDWithZone(ctx context.Context, exportID int, accessZone, nodeID string, addClientFunc func(ctx context.Context, exportID int, accessZone, clientIP string) error) error { + // Fetch log handler + log := utils.GetRunIDLogger(ctx) // try adding by client FQDN first as it is preferred over IP for its stableness. // OneFS API will return error if it cannot resolve the client FQDN , // in that case, fall back to adding by IP - _, clientFQDN, clientIP, err := utils.ParseNodeID(nodeID) + _, clientFQDN, clientIP, err := utils.ParseNodeID(ctx, nodeID) if err != nil { return err } log.Debugf("AddExportClientNetworkIdentifierByID client FQDN '%s' client IP '%s'", clientFQDN, clientIP) - if err = addClientFunc(exportID, accessZone, clientFQDN); err == nil { + if err = addClientFunc(ctx, exportID, accessZone, clientFQDN); err == nil { //adding by client FQDN is successful, no need to trying adding by IP return nil @@ -391,40 +487,52 @@ func (svc *isiService) AddExportClientNetworkIdentifierByIDWithZone(exportID int log.Errorf("failed to add client FQDN '%s' to export id '%d' : '%v'", clientFQDN, exportID, err) - if err := addClientFunc(exportID, accessZone, clientIP); err != nil { + if err := addClientFunc(ctx, exportID, accessZone, clientIP); err != nil { return fmt.Errorf("failed to add client ip '%s' to export id '%d' : '%v'", clientIP, exportID, err) } return nil } -func (svc *isiService) AddExportClientByIDWithZone(exportID int, accessZone, clientIP string) error { +func (svc *isiService) AddExportClientByIDWithZone(ctx context.Context, exportID int, accessZone, clientIP string) error { + // Fetch log handler + log := utils.GetRunIDLogger(ctx) + log.Debugf("AddExportClientByID client '%s'", clientIP) - if err := svc.client.AddExportClientsByIDWithZone(context.Background(), exportID, accessZone, []string{clientIP}); err != nil { + if err := svc.client.AddExportClientsByIDWithZone(ctx, exportID, accessZone, []string{clientIP}); err != nil { return fmt.Errorf("failed to add client to export id '%d' with access zone '%s' : '%s'", exportID, accessZone, err.Error()) } return nil } -func (svc *isiService) AddExportRootClientByIDWithZone(exportID int, accessZone, clientIP string) error { +func (svc *isiService) AddExportRootClientByIDWithZone(ctx context.Context, exportID int, accessZone, clientIP string) error { + // Fetch log handler + log := utils.GetRunIDLogger(ctx) + log.Debugf("AddExportRootClientByID client '%s'", clientIP) - if err := svc.client.AddExportRootClientsByIDWithZone(context.Background(), exportID, accessZone, []string{clientIP}); err != nil { + if err := svc.client.AddExportRootClientsByIDWithZone(ctx, exportID, accessZone, []string{clientIP}); err != nil { return fmt.Errorf("failed to add client to export id '%d' with access zone '%s' : '%s'", exportID, accessZone, err.Error()) } return nil } -func (svc *isiService) AddExportReadOnlyClientByIDWithZone(exportID int, accessZone, clientIP string) error { +func (svc *isiService) AddExportReadOnlyClientByIDWithZone(ctx context.Context, exportID int, accessZone, clientIP string) error { + // Fetch log handler + log := utils.GetRunIDLogger(ctx) + log.Debugf("AddExportReadOnlyClientByID client '%s'", clientIP) - if err := svc.client.AddExportReadOnlyClientsByIDWithZone(context.Background(), exportID, accessZone, []string{clientIP}); err != nil { + if err := svc.client.AddExportReadOnlyClientsByIDWithZone(ctx, exportID, accessZone, []string{clientIP}); err != nil { return fmt.Errorf("failed to add read only client to export id '%d' with access zone '%s' : '%s'", exportID, accessZone, err.Error()) } return nil } -func (svc *isiService) RemoveExportClientByIDWithZone(exportID int, accessZone, nodeID string) error { +func (svc *isiService) RemoveExportClientByIDWithZone(ctx context.Context, exportID int, accessZone, nodeID string) error { + // Fetch log handler + log := utils.GetRunIDLogger(ctx) + // it could either be IP or FQDN that has been added to the export's client fields, should consider both during the removal - clientName, clientFQDN, clientIP, err := utils.ParseNodeID(nodeID) + clientName, clientFQDN, clientIP, err := utils.ParseNodeID(ctx, nodeID) if err != nil { return err } @@ -435,7 +543,7 @@ func (svc *isiService) RemoveExportClientByIDWithZone(exportID int, accessZone, log.Debugf("RemoveExportClientByName client '%v'", clientsToRemove) - if err := svc.client.RemoveExportClientsByIDWithZone(context.Background(), exportID, accessZone, clientsToRemove); err != nil { + if err := svc.client.RemoveExportClientsByIDWithZone(ctx, exportID, accessZone, clientsToRemove); err != nil { //Return success if export doesn't exist if notFoundErr, ok := err.(*api.JSONError); ok { if notFoundErr.StatusCode == 404 { @@ -449,42 +557,54 @@ func (svc *isiService) RemoveExportClientByIDWithZone(exportID int, accessZone, return nil } -func (svc *isiService) GetExportsWithLimit(limit string) (isi.ExportList, string, error) { +func (svc *isiService) GetExportsWithLimit(ctx context.Context, limit string) (isi.ExportList, string, error) { + // Fetch log handler + log := utils.GetRunIDLogger(ctx) + log.Debug("begin getting exports for Isilon") var exports isi.Exports var err error - if exports, err = svc.client.GetExportsWithLimit(context.Background(), limit); err != nil { + if exports, err = svc.client.GetExportsWithLimit(ctx, limit); err != nil { log.Error("failed to get exports") return nil, "", err } return exports.Exports, exports.Resume, nil } -func (svc *isiService) GetExportsWithResume(resume string) (isi.ExportList, string, error) { +func (svc *isiService) GetExportsWithResume(ctx context.Context, resume string) (isi.ExportList, string, error) { + // Fetch log handler + log := utils.GetRunIDLogger(ctx) + log.Debug("begin getting exports for Isilon") var exports isi.Exports var err error - if exports, err = svc.client.GetExportsWithResume(context.Background(), resume); err != nil { + if exports, err = svc.client.GetExportsWithResume(ctx, resume); err != nil { log.Error("failed to get exports: " + err.Error()) return nil, "", err } return exports.Exports, exports.Resume, nil } -func (svc *isiService) DeleteSnapshot(id int64, name string) error { +func (svc *isiService) DeleteSnapshot(ctx context.Context, id int64, name string) error { + // Fetch log handler + log := utils.GetRunIDLogger(ctx) + log.Debugf("begin to delete snapshot '%s'", name) - if err := svc.client.RemoveSnapshot(context.Background(), id, name); err != nil { + if err := svc.client.RemoveSnapshot(ctx, id, name); err != nil { log.Errorf("delete snapshot failed, '%s'", err.Error()) return err } return nil } -func (svc *isiService) GetSnapshot(idendity string) (isi.Snapshot, error) { - log.Debugf("begin getting snapshot with id|name '%s' for Isilon", idendity) +func (svc *isiService) GetSnapshot(ctx context.Context, identity string) (isi.Snapshot, error) { + // Fetch log handler + log := utils.GetRunIDLogger(ctx) + + log.Debugf("begin getting snapshot with id|name '%s' for Isilon", identity) var snapshot isi.Snapshot var err error - if snapshot, err = svc.client.GetIsiSnapshotByIdentity(context.Background(), idendity); err != nil { + if snapshot, err = svc.client.GetIsiSnapshotByIdentity(ctx, identity); err != nil { log.Errorf("failed to get snapshot '%s'", err.Error()) return nil, err } @@ -492,9 +612,12 @@ func (svc *isiService) GetSnapshot(idendity string) (isi.Snapshot, error) { return snapshot, nil } -func (svc *isiService) GetSnapshotSize(isiPath, name string) int64 { +func (svc *isiService) GetSnapshotSize(ctx context.Context, isiPath, name string) int64 { + // Fetch log handler + log := utils.GetRunIDLogger(ctx) + log.Debugf("begin getting snapshot size with name '%s' for Isilon", name) - size, err := svc.client.GetSnapshotFolderSize(context.Background(), isiPath, name) + size, err := svc.client.GetSnapshotFolderSize(ctx, isiPath, name) if err != nil { log.Errorf("failed to get snapshot size '%s'", err.Error()) return 0 @@ -503,11 +626,14 @@ func (svc *isiService) GetSnapshotSize(isiPath, name string) int64 { return size } -func (svc *isiService) GetExportWithPathAndZone(path, accessZone string) (isi.Export, error) { +func (svc *isiService) GetExportWithPathAndZone(ctx context.Context, path, accessZone string) (isi.Export, error) { + // Fetch log handler + log := utils.GetRunIDLogger(ctx) + log.Debugf("begin getting export with target path '%s' and access zone '%s' for Isilon", path, accessZone) var export isi.Export var err error - if export, err = svc.client.GetExportWithPathAndZone(context.Background(), path, accessZone); err != nil { + if export, err = svc.client.GetExportWithPathAndZone(ctx, path, accessZone); err != nil { log.Error("failed to get export with target path '" + path + "' and access zone '" + accessZone + "': '" + err.Error() + "'") return nil, err } @@ -515,8 +641,8 @@ func (svc *isiService) GetExportWithPathAndZone(path, accessZone string) (isi.Ex return export, nil } -func (svc *isiService) GetSnapshotIsiPath(isiPath string, sourceSnapshotID string) (string, error) { - return svc.client.GetSnapshotIsiPath(context.Background(), isiPath, sourceSnapshotID) +func (svc *isiService) GetSnapshotIsiPath(ctx context.Context, isiPath string, sourceSnapshotID string) (string, error) { + return svc.client.GetSnapshotIsiPath(ctx, isiPath, sourceSnapshotID) } func (svc *isiService) isROVolumeFromSnapshot(isiPath string) bool { @@ -526,7 +652,10 @@ func (svc *isiService) isROVolumeFromSnapshot(isiPath string) bool { return false } -func (svc *isiService) GetSnapshotNameFromIsiPath(snapshotIsiPath string) (string, error) { +func (svc *isiService) GetSnapshotNameFromIsiPath(ctx context.Context, snapshotIsiPath string) (string, error) { + // Fetch log handler + log := utils.GetRunIDLogger(ctx) + if !svc.isROVolumeFromSnapshot(snapshotIsiPath) { log.Debugf("invalid snapshot isilon path- '%s'", snapshotIsiPath) return "", fmt.Errorf("invalid snapshot isilon path") @@ -557,11 +686,14 @@ func (svc *isiService) GetSnapshotTrackingDirName(snapshotName string) string { return "." + "csi-" + snapshotName + "-tracking-dir" } -func (svc *isiService) GetSubDirectoryCount(isiPath, directory string) (int64, error) { +func (svc *isiService) GetSubDirectoryCount(ctx context.Context, isiPath, directory string) (int64, error) { + // Fetch log handler + log := utils.GetRunIDLogger(ctx) + var totalSubDirectories int64 - if svc.IsVolumeExistent(isiPath, "", directory) { + if svc.IsVolumeExistent(ctx, isiPath, "", directory) { // Check if there are any entries for volumes present in snapshot tracking dir - dirDetails, err := svc.GetVolume(isiPath, "", directory) + dirDetails, err := svc.GetVolume(ctx, isiPath, "", directory) if err != nil { return 0, err } @@ -586,3 +718,33 @@ func (svc *isiService) GetSubDirectoryCount(isiPath, directory string) (int64, e return 0, fmt.Errorf("failed to get subdirectory count for directory '%s'", directory) } + +func (svc *isiService) IsHostAlreadyAdded(ctx context.Context, exportID int, accessZone string, nodeID string) bool { + // Fetch log handler + log := utils.GetRunIDLogger(ctx) + + export, _ := svc.GetExportByIDWithZone(ctx, exportID, accessZone) + + if export == nil { + log.Debugf("failed to get export by id '%d' with access zone '%s', return true for LocalhostAlreadyAdded as a safer return value", exportID, accessZone) + return true + } + + clientName, clientFQDN, clientIP, err := utils.ParseNodeID(ctx, nodeID) + if err != nil { + log.Debugf("failed to parse node ID '%s', return true for LocalhostAlreadyAdded as a safer return value", nodeID) + return true + } + + clientFieldsNotEmpty := len(*export.Clients) > 0 || len(*export.ReadOnlyClients) > 0 || len(*export.ReadWriteClients) > 0 || len(*export.RootClients) > 0 + + isNodeInClientFields := utils.IsStringInSlices(clientName, *export.Clients, *export.ReadOnlyClients, *export.ReadWriteClients, *export.RootClients) + + isNodeFQDNInClientFields := utils.IsStringInSlices(clientFQDN, *export.Clients, *export.ReadOnlyClients, *export.ReadWriteClients, *export.RootClients) + + if clientIP != "" { + isNodeInClientFields = isNodeInClientFields || utils.IsStringInSlices(clientIP, *export.Clients, *export.ReadOnlyClients, *export.ReadWriteClients, *export.RootClients) + } + + return clientFieldsNotEmpty && isNodeInClientFields || isNodeFQDNInClientFields +} diff --git a/service/mount.go b/service/mount.go index 2c258eb..c0cf6b4 100644 --- a/service/mount.go +++ b/service/mount.go @@ -17,22 +17,26 @@ package service */ import ( "fmt" + "github.com/sirupsen/logrus" "os" "strings" csi "github.com/container-storage-interface/spec/lib/go/csi" "github.com/dell/gofsutil" - log "github.com/sirupsen/logrus" "golang.org/x/net/context" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) func publishVolume( + ctx context.Context, req *csi.NodePublishVolumeRequest, nfsExportURL string, nfsV3 bool) error { + // Fetch log handler + ctx, log := GetLogger(ctx) + volCap := req.GetVolumeCapability() if volCap == nil { return status.Error(codes.InvalidArgument, @@ -60,7 +64,7 @@ func publishVolume( } // make sure target is created - _, err := mkdir(target) + _, err := mkdir(ctx, target) if err != nil { return status.Error(codes.FailedPrecondition, fmt.Sprintf("Could not create '%s': '%s'", target, err.Error())) } @@ -76,14 +80,13 @@ func publishVolume( mntOptions = append(mntOptions, rwOption) - f := log.Fields{ + f := logrus.Fields{ "ID": req.VolumeId, "TargetPath": target, "ExportPath": nfsExportURL, "AccessMode": accMode.GetMode(), } log.WithFields(f).Info("Node publish volume params ") - ctx := context.Background() mnts, err := gofsutil.GetMounts(ctx) if err != nil { return status.Errorf(codes.Internal, @@ -128,7 +131,12 @@ func publishVolume( // unpublishVolume removes the mount to the target path func unpublishVolume( + ctx context.Context, req *csi.NodeUnpublishVolumeRequest, filterStr string) error { + + // Fetch log handler + ctx, log := GetLogger(ctx) + target := req.GetTargetPath() if target == "" { return status.Error(codes.InvalidArgument, @@ -136,7 +144,6 @@ func unpublishVolume( } log.Debugf("attempting to unmount '%s'", target) - ctx := context.Background() mnts, err := gofsutil.GetMounts(ctx) if err != nil { return status.Errorf(codes.Internal, @@ -174,7 +181,9 @@ func unpublishVolume( // mkdir creates the directory specified by path if needed. // return pair is a bool flag of whether dir was created, and an error -func mkdir(path string) (bool, error) { +func mkdir(ctx context.Context, path string) (bool, error) { + // Fetch log handler + ctx, log := GetLogger(ctx) st, err := os.Stat(path) if os.IsNotExist(err) { if err := os.Mkdir(path, 0750); err != nil { diff --git a/service/node.go b/service/node.go index 3042dd9..b01342c 100644 --- a/service/node.go +++ b/service/node.go @@ -21,7 +21,7 @@ import ( csi "github.com/container-storage-interface/spec/lib/go/csi" "github.com/dell/csi-isilon/common/constants" "github.com/dell/csi-isilon/common/utils" - log "github.com/sirupsen/logrus" + csiutils "github.com/dell/csi-isilon/csi-utils" "golang.org/x/net/context" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -63,43 +63,64 @@ func (s *service) NodePublishVolume( ctx context.Context, req *csi.NodePublishVolumeRequest) ( *csi.NodePublishVolumeResponse, error) { - // Probe the node if required and make sure startup called - if err := s.autoProbe(ctx); err != nil { - log.Error("nodeProbe failed with error :" + err.Error()) - return nil, err - } + + // Fetch log handler + ctx, log, runID := GetRunIDLog(ctx) volumeContext := req.GetVolumeContext() if volumeContext == nil { - return nil, status.Error(codes.InvalidArgument, "VolumeContext is nil, skip NodePublishVolume") + return nil, status.Error(codes.InvalidArgument, utils.GetMessageWithRunID(runID, "VolumeContext is nil, skip NodePublishVolume")) } - - utils.LogMap("VolumeContext", volumeContext) + utils.LogMap(ctx, "VolumeContext", volumeContext) isEphemeralVolume := volumeContext["csi.storage.k8s.io/ephemeral"] == "true" + var clusterName string + var err error + if isEphemeralVolume { + clusterName = volumeContext["ClusterName"] + } else { + // parse the input volume id and fetch it's components + _, _, _, clusterName, _ = utils.ParseNormalizedVolumeID(ctx, req.GetVolumeId()) + } + + isiConfig, err := s.getIsilonConfig(ctx, &clusterName) + if err != nil { + return nil, err + } + + ctx, log = setClusterContext(ctx, clusterName) + log.Debugf("Cluster Name: %v", clusterName) + + // Probe the node if required and make sure startup called + if err := s.autoProbe(ctx, isiConfig); err != nil { + log.Error("nodeProbe failed with error :" + err.Error()) + return nil, err + } + if isEphemeralVolume { return s.ephemeralNodePublish(ctx, req) } path := volumeContext["Path"] if path == "" { - return nil, status.Error(codes.FailedPrecondition, fmt.Sprintf("no entry keyed by 'Path' found in VolumeContext of volume id : '%s', name '%s', skip NodePublishVolume", req.GetVolumeId(), volumeContext["name"])) + return nil, status.Error(codes.FailedPrecondition, utils.GetMessageWithRunID(runID, "no entry keyed by 'Path' found in VolumeContext of volume id : '%s', name '%s', skip NodePublishVolume", req.GetVolumeId(), volumeContext["name"])) } volName := volumeContext["Name"] if volName == "" { - return nil, status.Error(codes.FailedPrecondition, fmt.Sprintf("no entry keyed by 'Name' found in VolumeContext of volume id : '%s', name '%s', skip NodePublishVolume", req.GetVolumeId(), volumeContext["name"])) + return nil, status.Error(codes.FailedPrecondition, utils.GetMessageWithRunID(runID, "no entry keyed by 'Name' found in VolumeContext of volume id : '%s', name '%s', skip NodePublishVolume", req.GetVolumeId(), volumeContext["name"])) } - isROVolumeFromSnapshot := s.isiSvc.isROVolumeFromSnapshot(path) + isROVolumeFromSnapshot := isiConfig.isiSvc.isROVolumeFromSnapshot(path) if isROVolumeFromSnapshot { log.Info("Volume source is snapshot") - if export, err := s.isiSvc.GetExportWithPathAndZone(path, ""); err != nil || export == nil { - return nil, status.Errorf(codes.Internal, "error retrieving export for '%s'", path) + if export, err := isiConfig.isiSvc.GetExportWithPathAndZone(ctx, path, ""); err != nil || export == nil { + return nil, status.Errorf(codes.Internal, utils.GetMessageWithRunID(runID, "error retrieving export for '%s'", path)) } } else { // Parse the target path and empty volume name to get the volume isiPath := utils.GetIsiPathFromExportPath(path) - if _, err := s.getVolByName(isiPath, volName); err != nil { + + if _, err := s.getVolByName(ctx, isiPath, volName, isiConfig); err != nil { log.Errorf("Error in getting '%s' Volume '%v'", volName, err) return nil, err } @@ -109,19 +130,19 @@ func (s *service) NodePublishVolume( // Set azServiceIP to updated endpoint when custom topology is enabled var azServiceIP string if s.opts.CustomTopologyEnabled { - azServiceIP = s.opts.Endpoint + azServiceIP = isiConfig.IsiIP } else { azServiceIP = volumeContext[AzServiceIPParam] } - f := log.Fields{ + f := map[string]interface{}{ "ID": req.VolumeId, "Name": volumeContext["Name"], "TargetPath": req.GetTargetPath(), "AzServiceIP": azServiceIP, } log.WithFields(f).Info("Calling publishVolume") - if err := publishVolume(req, s.isiSvc.GetNFSExportURLForPath(azServiceIP, path), s.opts.NfsV3); err != nil { + if err := publishVolume(ctx, req, isiConfig.isiSvc.GetNFSExportURLForPath(azServiceIP, path), s.opts.NfsV3); err != nil { return nil, err } @@ -132,19 +153,34 @@ func (s *service) NodeUnpublishVolume( ctx context.Context, req *csi.NodeUnpublishVolumeRequest) ( *csi.NodeUnpublishVolumeResponse, error) { + // Fetch log handler + ctx, log, runID := GetRunIDLog(ctx) log.Debug("executing NodeUnpublishVolume") volID := req.GetVolumeId() if volID == "" { - return nil, status.Error(codes.FailedPrecondition, "no VolumeID found in request") + return nil, status.Error(codes.FailedPrecondition, utils.GetMessageWithRunID(runID, "no VolumeID found in request")) } log.Infof("The volume ID fetched from NodeUnPublish req is %s", volID) - volName, exportID, accessZone, _ := utils.ParseNormalizedVolumeID(req.GetVolumeId()) + volName, exportID, accessZone, clusterName, _ := utils.ParseNormalizedVolumeID(ctx, req.GetVolumeId()) if volName == "" { volName = volID } + ctx, log = setClusterContext(ctx, clusterName) + log.Debugf("Cluster Name: %v", clusterName) + isiConfig, err := s.getIsilonConfig(ctx, &clusterName) + if err != nil { + return nil, err + } + + // Probe the node if required + if err := s.autoProbe(ctx, isiConfig); err != nil { + log.Error("nodeProbe failed with error :" + err.Error()) + return nil, err + } + ephemeralVolName := fmt.Sprintf("ephemeral-%s", volID) filePath := req.TargetPath + "/" + ephemeralVolName var isEphemeralVolume bool @@ -169,19 +205,19 @@ func (s *service) NodeUnpublishVolume( // Check if it is a RO volume from snapshot // We need not execute this logic for ephemeral volumes. if !isExportIDEmpty { - export, err := s.isiSvc.GetExportByIDWithZone(exportID, accessZone) + export, err := isiConfig.isiSvc.GetExportByIDWithZone(ctx, exportID, accessZone) if err != nil { return nil, err } exportPath := (*export.Paths)[0] - isROVolumeFromSnapshot := s.isiSvc.isROVolumeFromSnapshot(exportPath) + isROVolumeFromSnapshot := isiConfig.isiSvc.isROVolumeFromSnapshot(exportPath) // If it is a RO volume from snapshot if isROVolumeFromSnapshot { volName = exportPath } } - if err := unpublishVolume(req, volName); err != nil { + if err := unpublishVolume(ctx, req, volName); err != nil { return nil, err } @@ -195,22 +231,24 @@ func (s *service) NodeUnpublishVolume( return &csi.NodeUnpublishVolumeResponse{}, nil } -func (s *service) nodeProbe(ctx context.Context) error { +func (s *service) nodeProbe(ctx context.Context, isiConfig *IsilonClusterConfig) error { - if err := s.validateOptsParameters(); err != nil { + // Fetch log handler + ctx, _, _ = GetRunIDLog(ctx) + + if err := s.validateOptsParameters(isiConfig); err != nil { return fmt.Errorf("node probe failed : '%v'", err) } - if s.isiSvc == nil { - - return errors.New("s.isiSvc (type isiService) is nil, probe failed") - + if isiConfig.isiSvc == nil { + return errors.New("clusterConfig.isiSvc (type isiService) is nil, probe failed") } - if err := s.isiSvc.TestConnection(); err != nil { + if err := isiConfig.isiSvc.TestConnection(ctx); err != nil { return fmt.Errorf("node probe failed : '%v'", err) } + ctx, log := setClusterContext(ctx, isiConfig.ClusterName) log.Debug("node probe succeeded") return nil @@ -253,35 +291,50 @@ func (s *service) NodeGetInfo( req *csi.NodeGetInfoRequest) ( *csi.NodeGetInfoResponse, error) { + // Fetch log handler + ctx, log, _ := GetRunIDLog(ctx) + nodeID, err := s.getPowerScaleNodeID(ctx) + log.Debugf("Node ID of worker node is '%s'", nodeID) if (err) != nil { return nil, err } + // If Custom Topology is enabled we do not add node labels to the worker node if s.opts.CustomTopologyEnabled { return &csi.NodeGetInfoResponse{NodeId: nodeID}, nil } - // As NodeGetInfo is invoked only once during driver registration, we validate - // connectivity with backend PowerScale Array upto MaxIsiConnRetries, before adding topology keys - var connErr error - for i := 0; i < constants.MaxIsiConnRetries; i++ { - connErr = s.isiSvc.TestConnection() - if connErr == nil { - break + // If Custom Topology is not enabled, proceed with adding node labels for all + // PowerScale clusters part of secret.json + isiClusters := s.getIsilonClusters() + topology := make(map[string]string) + + for cluster := range isiClusters { + // Validate if we have valid clusterConfig + if isiClusters[cluster].isiSvc == nil { + continue } - time.Sleep(RetrySleepTime) - } - if connErr != nil { - return &csi.NodeGetInfoResponse{NodeId: nodeID}, nil - } + // As NodeGetInfo is invoked only once during driver registration, we validate + // connectivity with backend PowerScale Array upto MaxIsiConnRetries, before adding topology keys + var connErr error + for i := 0; i < constants.MaxIsiConnRetries; i++ { + connErr = isiClusters[cluster].isiSvc.TestConnection(ctx) + if connErr == nil { + break + } + time.Sleep(RetrySleepTime) + } - // Create the topology keys - // .dellemc.com/: - topology := map[string]string{} - topology[constants.PluginName+"/"+s.opts.Endpoint] = constants.PluginName + if connErr != nil { + continue + } + // Create the topology keys + // .dellemc.com/: + topology[constants.PluginName+"/"+isiClusters[cluster].IsiIP] = constants.PluginName + } // Create NodeGetInfoResponse including nodeID and AccessibleTopology information return &csi.NodeGetInfoResponse{ NodeId: nodeID, @@ -297,6 +350,9 @@ func (s *service) NodeGetVolumeStats( } func (s *service) ephemeralNodePublish(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) { + // Fetch log handler + ctx, log, _ := GetRunIDLog(ctx) + log.Info("Received request to node publish Ephemeral Volume..") volID := req.GetVolumeId() @@ -408,11 +464,14 @@ func (s *service) ephemeralNodePublish(ctx context.Context, req *csi.NodePublish func (s *service) ephemeralNodeUnpublish( ctx context.Context, req *csi.NodeUnpublishVolumeRequest) error { + // Fetch log handler + ctx, log, runID := GetRunIDLog(ctx) + log.Infof("Request received for Ephemeral NodeUnpublish..") volumeID := req.GetVolumeId() log.Infof("The volID is %s", volumeID) if volumeID == "" { - return status.Error(codes.InvalidArgument, "volume ID is required") + return status.Error(codes.InvalidArgument, utils.GetMessageWithRunID(runID, "volume ID is required")) } nodeID, nodeIDErr := s.getPowerScaleNodeID(ctx) @@ -432,7 +491,7 @@ func (s *service) ephemeralNodeUnpublish( // Before deleting the volume on PowerScale, // Cleaning up the directories we created. - volName, _, _, err := utils.ParseNormalizedVolumeID(req.GetVolumeId()) + volName, _, _, _, err := utils.ParseNormalizedVolumeID(ctx, req.GetVolumeId()) if err != nil { return err } @@ -456,13 +515,28 @@ func (s *service) ephemeralNodeUnpublish( } func (s *service) getPowerScaleNodeID(ctx context.Context) (string, error) { + var nodeIP string + var err error - nodeIP, err := s.GetCSINodeIP(ctx) - if (err) != nil { - return "", err + // Fetch log handler + ctx, log, _ := GetRunIDLog(ctx) + + // When valid list of allowedNetworks is being given as part of values.yaml, we need + // to fetch first IP from matching network + if len(s.opts.allowedNetworks) > 0 { + log.Debugf("Fetching IP address of custom network for NFS I/O traffic") + nodeIP, err = csiutils.GetNFSClientIP(s.opts.allowedNetworks) + if err != nil { + return "", err + } + } else { + nodeIP, err = s.GetCSINodeIP(ctx) + if (err) != nil { + return "", err + } } - nodeFQDN, err := utils.GetFQDNByIP(nodeIP) + nodeFQDN, err := utils.GetFQDNByIP(ctx, nodeIP) if (err) != nil { return "", err } diff --git a/service/service.go b/service/service.go index 2c405eb..18c37af 100644 --- a/service/service.go +++ b/service/service.go @@ -17,27 +17,40 @@ package service */ import ( "context" + "encoding/json" "errors" "fmt" - "github.com/dell/csi-isilon/common/k8sutils" + "google.golang.org/grpc/metadata" + "io/ioutil" "net" + "path/filepath" "runtime" "strings" + "sync" + "sync/atomic" "time" + "github.com/dell/csi-isilon/common/k8sutils" + csi "github.com/container-storage-interface/spec/lib/go/csi" "github.com/dell/csi-isilon/common/constants" "github.com/dell/csi-isilon/common/utils" "github.com/dell/csi-isilon/core" + "github.com/dell/gocsi" + csictx "github.com/dell/gocsi/context" isi "github.com/dell/goisilon" - "github.com/rexray/gocsi" - csictx "github.com/rexray/gocsi/context" + "github.com/fsnotify/fsnotify" + "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +//To maintain runid for Non debug mode. Note: CSI will not generate runid if CSI_DEBUG=false +var runid int64 +var isilonConfigFile string + // Manifest is the SP's manifest. var Manifest = map[string]string{ "url": "http://github.com/dell/csi-isilon", @@ -56,11 +69,7 @@ type Service interface { // Opts defines service configuration options. type Opts struct { - Endpoint string Port string - EndpointURL string - User string - Password string AccessZone string Path string Insecure bool @@ -71,15 +80,42 @@ type Opts struct { NfsV3 bool CustomTopologyEnabled bool KubeConfigPath string + allowedNetworks []string } type service struct { - opts Opts - mode string - nodeID string - nodeIP string - isiSvc *isiService - statisticsCounter int + opts Opts + mode string + nodeID string + nodeIP string + statisticsCounter int + isiClusters *sync.Map + defaultIsiClusterName string +} + +//IsilonClusters To unmarshal secret.json file +type IsilonClusters struct { + IsilonClusters []IsilonClusterConfig `json:"isilonClusters"` +} + +//IsilonClusterConfig To hold config details of a isilon cluster +type IsilonClusterConfig struct { + ClusterName string `json:"clusterName"` + IsiIP string `json:"isiIP"` + IsiPort string `json:"isiPort,omitempty"` + EndpointURL string + User string `json:"username"` + Password string `json:"password"` + IsiInsecure *bool `json:"isiInsecure,omitempty"` + IsiPath string `json:"isiPath,omitempty"` + IsDefaultCluster bool `json:"isDefaultCluster,omitempty"` + isiSvc *isiService +} + +//To display the IsilonClusterConfig of a cluster +func (s IsilonClusterConfig) String() string { + return fmt.Sprintf("ClusterName: %s, IsiIP: %s, IsiPort: %s, EndpointURL: %s, User: %s, IsiInsecure: %v, IsiPath: %s, IsDefaultCluster: %v, isiSvc: %v", + s.ClusterName, s.IsiIP, s.IsiPort, s.EndpointURL, s.User, *s.IsiInsecure, s.IsiPath, s.IsDefaultCluster, s.isiSvc) } // New returns a new Service. @@ -87,7 +123,7 @@ func New() Service { return &service{} } -func (s *service) initializeService(ctx context.Context) { +func (s *service) initializeServiceOpts(ctx context.Context) { // Get the SP's operating mode. s.mode = csictx.Getenv(ctx, gocsi.EnvVarMode) @@ -100,17 +136,6 @@ func (s *service) initializeService(ctx context.Context) { opts.Port = constants.DefaultPortNumber } - if ep, ok := csictx.LookupEnv(ctx, constants.EnvEndpoint); ok { - opts.Endpoint = ep - opts.EndpointURL = fmt.Sprintf("https://%s:%s", ep, opts.Port) - } - if user, ok := csictx.LookupEnv(ctx, constants.EnvUser); ok { - opts.User = user - } - if pw, ok := csictx.LookupEnv(ctx, constants.EnvPassword); ok { - opts.Password = pw - } - if path, ok := csictx.LookupEnv(ctx, constants.EnvPath); ok { if path == "" { path = constants.DefaultIsiPath @@ -141,6 +166,13 @@ func (s *service) initializeService(ctx context.Context) { opts.KubeConfigPath = kubeConfigPath } + if cfgFile, ok := csictx.LookupEnv(ctx, constants.EnvIsilonConfigFile); ok { + isilonConfigFile = cfgFile + } else { + isilonConfigFile = constants.IsilonConfigFile + } + + opts.allowedNetworks = utils.ParseArrayFromContext(ctx, constants.EnvAllowedNetworks) opts.QuotaEnabled = utils.ParseBooleanFromContext(ctx, constants.EnvQuotaEnabled) opts.Insecure = utils.ParseBooleanFromContext(ctx, constants.EnvInsecure) opts.AutoProbe = utils.ParseBooleanFromContext(ctx, constants.EnvAutoProbe) @@ -150,10 +182,6 @@ func (s *service) initializeService(ctx context.Context) { opts.CustomTopologyEnabled = utils.ParseBooleanFromContext(ctx, constants.EnvCustomTopologyEnabled) s.opts = opts - - clientCtx := utils.ConfigureLogger(opts.DebugEnabled) - - s.isiSvc, _ = s.GetIsiService(clientCtx) } // ValidateCreateVolumeRequest validates the CreateVolumeRequest parameter for a CreateVolume operation @@ -190,7 +218,7 @@ func isVolumeTypeBlock(vcs []*csi.VolumeCapability) bool { } // ValidateDeleteVolumeRequest validates the DeleteVolumeRequest parameter for a DeleteVolume operation -func (s *service) ValidateDeleteVolumeRequest( +func (s *service) ValidateDeleteVolumeRequest(ctx context.Context, req *csi.DeleteVolumeRequest) error { if req.GetVolumeId() == "" { @@ -198,7 +226,7 @@ func (s *service) ValidateDeleteVolumeRequest( "no volume id is provided by the DeleteVolumeRequest instance") } - _, _, _, err := utils.ParseNormalizedVolumeID(req.GetVolumeId()) + _, _, _, _, err := utils.ParseNormalizedVolumeID(ctx, req.GetVolumeId()) if err != nil { return status.Error(codes.InvalidArgument, fmt.Sprintf("failed to parse volume ID '%s', error : '%v'", req.GetVolumeId(), err)) } @@ -206,23 +234,43 @@ func (s *service) ValidateDeleteVolumeRequest( return nil } -func (s *service) probe(ctx context.Context) error { +func (s *service) probeAllClusters(ctx context.Context) error { + isilonClusters := s.getIsilonClusters() - log.Debug("calling probe") + probeSuccessCount := 0 + for i := range isilonClusters { + err := s.probe(ctx, isilonClusters[i]) + if err == nil { + probeSuccessCount++ + } else { + log.Debugf("Probe failed for isilon cluster '%s' error:'%s'", isilonClusters[i].ClusterName, err) + } + } + + if probeSuccessCount == 0 { + return fmt.Errorf("probe of all isilon clusters failed") + } + + return nil +} + +func (s *service) probe(ctx context.Context, clusterConfig *IsilonClusterConfig) error { + log.Debugf("calling probe for cluster '%s'", clusterConfig.ClusterName) // Do a controller probe if strings.EqualFold(s.mode, constants.ModeController) { - if err := s.controllerProbe(ctx); err != nil { + if err := s.controllerProbe(ctx, clusterConfig); err != nil { return err } } else if strings.EqualFold(s.mode, constants.ModeNode) { - if err := s.nodeProbe(ctx); err != nil { + if err := s.nodeProbe(ctx, clusterConfig); err != nil { return err } } else { return status.Error(codes.FailedPrecondition, "Service mode not set") } + return nil } @@ -237,12 +285,12 @@ func (s *service) probeOnStart(ctx context.Context) error { log.Debug("X_CSI_ISILON_NO_PROBE_ON_START is false, executing 'probeOnStart'") - return s.probe(ctx) + return s.probeAllClusters(ctx) } -func (s *service) autoProbe(ctx context.Context) error { +func (s *service) autoProbe(ctx context.Context, isiConfig *IsilonClusterConfig) error { - if s.isiSvc != nil { + if isiConfig.isiSvc != nil { log.Debug("isiSvc already initialized, skip probing") return nil } @@ -253,10 +301,10 @@ func (s *service) autoProbe(ctx context.Context) error { } log.Debug("start auto-probing") - return s.probe(ctx) + return s.probe(ctx, isiConfig) } -func (s *service) GetIsiClient(clientCtx context.Context) (*isi.Client, error) { +func (s *service) GetIsiClient(clientCtx context.Context, isiConfig *IsilonClusterConfig) (*isi.Client, error) { // First we fetch node labels using kubernetes API and check, if label // .dellemc.com/: @@ -281,12 +329,12 @@ func (s *service) GetIsiClient(clientCtx context.Context) (*isi.Client, error) { log.Infof("Topology label %s:%s available on node", lkey, lval) tList := strings.SplitAfter(lkey, "/") if len(tList) != 0 { - s.opts.Endpoint = tList[1] - s.opts.EndpointURL = fmt.Sprintf("https://%s:%s", s.opts.Endpoint, s.opts.Port) + isiConfig.IsiIP = tList[1] + isiConfig.EndpointURL = fmt.Sprintf("https://%s:%s", isiConfig.IsiIP, isiConfig.IsiPort) customTopologyFound = true } else { log.Errorf("Fetching PowerScale FQDN/IP from topology label %s:%s failed, using isiIP "+ - "%s as PowerScale FQDN/IP", lkey, lval, s.opts.Endpoint) + "%s as PowerScale FQDN/IP", lkey, lval, isiConfig.IsiIP) } break } @@ -300,41 +348,38 @@ func (s *service) GetIsiClient(clientCtx context.Context) (*isi.Client, error) { client, err := isi.NewClientWithArgs( clientCtx, - s.opts.EndpointURL, - s.opts.Insecure, + isiConfig.EndpointURL, + *isiConfig.IsiInsecure, s.opts.Verbose, - s.opts.User, + isiConfig.User, "", - s.opts.Password, - s.opts.Path, + isiConfig.Password, + isiConfig.IsiPath, ) if err != nil { - log.Errorf("init client failed : '%s'", err.Error()) + log.Errorf("init client failed for isilon cluster '%s': '%s'", isiConfig.ClusterName, err.Error()) return nil, err } return client, nil } -func (s *service) GetIsiService(clientCtx context.Context) (*isiService, error) { +func (s *service) GetIsiService(clientCtx context.Context, isiConfig *IsilonClusterConfig) (*isiService, error) { var isiClient *isi.Client var err error - if isiClient, err = s.GetIsiClient(clientCtx); err != nil { - + if isiClient, err = s.GetIsiClient(clientCtx, isiConfig); err != nil { return nil, err } return &isiService{ - endpoint: s.opts.Endpoint, + endpoint: isiConfig.IsiIP, client: isiClient, }, nil } -func (s *service) validateOptsParameters() error { - - if s.opts.User == "" || s.opts.Password == "" || s.opts.Endpoint == "" { - - return fmt.Errorf("invalid isi service parameters, at least one of endpoint, userame and password is empty. endpoint : endpoint '%s', username : '%s'", s.opts.Endpoint, s.opts.User) +func (s *service) validateOptsParameters(clusterConfig *IsilonClusterConfig) error { + if clusterConfig.User == "" || clusterConfig.Password == "" || clusterConfig.IsiIP == "" { + return fmt.Errorf("invalid isi service parameters, at least one of endpoint, username and password is empty. endpoint : endpoint '%s', username : '%s'", clusterConfig.IsiIP, clusterConfig.User) } return nil @@ -343,9 +388,6 @@ func (s *service) validateOptsParameters() error { func (s *service) logServiceStats() { fields := map[string]interface{}{ - "endpoint": s.opts.Endpoint, - "user": s.opts.User, - "password": "", "path": s.opts.Path, "insecure": s.opts.Insecure, "autoprobe": s.opts.AutoProbe, @@ -354,20 +396,225 @@ func (s *service) logServiceStats() { "mode": s.mode, } - if s.opts.Password != "" { - fields["password"] = "******" - } - log.WithFields(fields).Infof("Configured '%s'", constants.PluginName) } func (s *service) BeforeServe( ctx context.Context, sp *gocsi.StoragePlugin, lis net.Listener) error { - s.initializeService(ctx) + s.initializeServiceOpts(ctx) s.logServiceStats() + + //Update the storage array list + s.isiClusters = new(sync.Map) + err := s.syncIsilonConfigs(ctx) + if err != nil { + return err + } + + //Dynamically load the config + go s.loadIsilonConfigs(ctx, isilonConfigFile) + return s.probeOnStart(ctx) } +func (s *service) loadIsilonConfigs(ctx context.Context, configFile string) error { + log.Info("Updating cluster config details") + watcher, _ := fsnotify.NewWatcher() + defer watcher.Close() + + parentFolder, _ := filepath.Abs(filepath.Dir(configFile)) + log.Debug("Config folder: ", parentFolder) + done := make(chan bool) + go func() { + for { + select { + case event, ok := <-watcher.Events: + if !ok { + return + } + if event.Op&fsnotify.Create == fsnotify.Create && event.Name == parentFolder+"/..data" { + log.Infof("**************** Cluster config file modified. Updating cluster config details: %s****************", event.Name) + err := s.syncIsilonConfigs(ctx) + if err != nil { + log.Debug("Cluster configuration array length:", s.getIsilonClusterLength()) + log.Error("Invalid configuration in secret.json. Error:", err) + } + } + + case err, ok := <-watcher.Errors: + if !ok { + return + } + log.Error("cluster config file load error:", err) + } + } + }() + err := watcher.Add(parentFolder) + if err != nil { + log.Error("Unable to add file watcher for folder ", parentFolder) + return err + } + <-done + return nil +} + +//Returns the size of arrays +func (s *service) getIsilonClusterLength() (length int) { + length = 0 + s.isiClusters.Range(func(_, _ interface{}) bool { + length++ + return true + }) + return +} + +var syncMutex sync.Mutex + +//Reads the credentials from secrets and initialize all arrays. +func (s *service) syncIsilonConfigs(ctx context.Context) error { + log.Info("************* Synchronizing Isilon Clusters' config **************") + syncMutex.Lock() + defer syncMutex.Unlock() + + configBytes, err := ioutil.ReadFile(isilonConfigFile) + if err != nil { + return fmt.Errorf("file ('%s') error: %v", isilonConfigFile, err) + } + + if string(configBytes) != "" { + log.Debugf("Current isilon configs:") + s.isiClusters.Range(handler) + newIsilonConfigs, defaultClusterName, err := s.getNewIsilonConfigs(configBytes) + if err != nil { + return err + } + + // Update the isiClusters sync.Map + s.isiClusters.Range(func(key interface{}, value interface{}) bool { + s.isiClusters.Delete(key) + return true + }) + + for k, v := range newIsilonConfigs { + s.isiClusters.Store(k, v) + } + log.Debugf("New isilon configs:") + s.isiClusters.Range(handler) + + s.defaultIsiClusterName = defaultClusterName + if s.defaultIsiClusterName == "" { + log.Warnf("no default cluster name/config available") + } + } else { + return errors.New("isilon cluster details are not provided in isilon-creds secret") + } + + return nil +} + +func (s *service) getNewIsilonConfigs(configBytes []byte) (map[interface{}]interface{}, string, error) { + var noOfDefaultClusters int + var defaultIsiClusterName string + + jsonConfig := new(IsilonClusters) + err := json.Unmarshal(configBytes, &jsonConfig) + if err != nil { + return nil, defaultIsiClusterName, fmt.Errorf("unable to parse islon clusters' config details [%v]", err) + } + + if len(jsonConfig.IsilonClusters) == 0 { + return nil, defaultIsiClusterName, errors.New("cluster details are not provided in isilon-creds secret") + } + + if len(jsonConfig.IsilonClusters) > 1 && s.opts.CustomTopologyEnabled { + return nil, defaultIsiClusterName, errors.New("custom topology is enabled and it expects single cluster config in secret") + } + + newIsiClusters := make(map[interface{}]interface{}) + for i, config := range jsonConfig.IsilonClusters { + log.Debugf("parsing config details for cluster %v", config.ClusterName) + if config.ClusterName == "" { + return nil, defaultIsiClusterName, fmt.Errorf("invalid value for clusterName at index [%d]", i) + } + if config.User == "" { + return nil, defaultIsiClusterName, fmt.Errorf("invalid value for username at index [%d]", i) + } + if config.Password == "" { + return nil, defaultIsiClusterName, fmt.Errorf("invalid value for password at index [%d]", i) + } + if config.IsiIP == "" { + return nil, defaultIsiClusterName, fmt.Errorf("invalid value for isiIP at index [%d]", i) + } + if config.IsiPort == "" { + config.IsiPort = s.opts.Port + } + if config.IsiInsecure == nil { + config.IsiInsecure = &s.opts.Insecure + } + if config.IsiPath == "" { + config.IsiPath = s.opts.Path + } + + config.EndpointURL = fmt.Sprintf("https://%s:%s", config.IsiIP, config.IsiPort) + clientCtx := utils.ConfigureLogger(s.opts.DebugEnabled) + config.isiSvc, _ = s.GetIsiService(clientCtx, &config) + + newConfig := IsilonClusterConfig{} + newConfig = config + + if config.IsDefaultCluster { + noOfDefaultClusters++ + if noOfDefaultClusters > 1 { + return nil, defaultIsiClusterName, fmt.Errorf("'isDefaultCluster' attribute set for multiple isilon cluster configs in 'isilonClusters': %s. Only one cluster should be marked as default cluster", config.ClusterName) + } + } + + if _, ok := newIsiClusters[config.ClusterName]; ok { + return nil, defaultIsiClusterName, fmt.Errorf("duplicate cluster name [%s] found in input isilonClusters", config.ClusterName) + } + newIsiClusters[config.ClusterName] = &newConfig + if config.IsDefaultCluster { + defaultIsiClusterName = config.ClusterName + } + + fields := map[string]interface{}{ + "ClusterName": config.ClusterName, + "IsiIP": config.IsiIP, + "IsiPort": config.IsiPort, + "Username": config.User, + "Password": "*******", + "IsiInsecure": *config.IsiInsecure, + "IsiPath": config.IsiPath, + "IsDefaultCluster": config.IsDefaultCluster, + } + log.WithFields(fields).Infof("new config details for cluster %s", config.ClusterName) + } + return newIsiClusters, defaultIsiClusterName, nil +} + +func handler(key, value interface{}) bool { + log.Debugf(value.(*IsilonClusterConfig).String()) + return true +} + +// Returns details of a cluster with name clusterName +func (s *service) getIsilonClusterConfig(clusterName string) *IsilonClusterConfig { + if cluster, ok := s.isiClusters.Load(clusterName); ok { + return cluster.(*IsilonClusterConfig) + } + return nil +} + +// Returns details of all isilon clusters +func (s *service) getIsilonClusters() []*IsilonClusterConfig { + list := make([]*IsilonClusterConfig, 0) + s.isiClusters.Range(func(key interface{}, value interface{}) bool { + list = append(list, value.(*IsilonClusterConfig)) + return true + }) + return list +} + // GetCSINodeID gets the id of the CSI node which regards the node name as node id func (s *service) GetCSINodeID(ctx context.Context) (string, error) { // if the node id has already been initialized, return it @@ -375,7 +622,7 @@ func (s *service) GetCSINodeID(ctx context.Context) (string, error) { return s.nodeID, nil } // node id couldn't be read from env variable while initializing service, return with error - return "", errors.New("Cannot get node id") + return "", errors.New("cannot get node id") } // GetCSINodeIP gets the IP of the CSI node @@ -385,14 +632,14 @@ func (s *service) GetCSINodeIP(ctx context.Context) (string, error) { return s.nodeIP, nil } // node id couldn't be read from env variable while initializing service, return with error - return "", errors.New("Cannot get node IP") + return "", errors.New("cannot get node IP") } -func (s *service) getVolByName(isiPath, volName string) (isi.Volume, error) { +func (s *service) getVolByName(ctx context.Context, isiPath, volName string, isiConfig *IsilonClusterConfig) (isi.Volume, error) { // The `GetVolume` API returns a slice of volumes, but when only passing // in a volume ID, the response will be just the one volume - vol, err := s.isiSvc.GetVolume(isiPath, "", volName) + vol, err := isiConfig.isiSvc.GetVolume(ctx, isiPath, "", volName) if err != nil { return nil, err } @@ -416,3 +663,135 @@ func (s *service) logStatistics() { } } } + +func (s *service) getIsiPathForVolumeFromClusterConfig(clusterConfig *IsilonClusterConfig) string { + if clusterConfig.IsiPath == "" { + return s.opts.Path + } + return clusterConfig.IsiPath +} + +//Set cluster name in log messages and re-initialize the context +func setClusterContext(ctx context.Context, clusterName string) (context.Context, *logrus.Entry) { + return setLogFieldsInContext(ctx, clusterName, utils.ClusterName) +} + +//Set runID in log messages and re-initialize the context +func setRunIDContext(ctx context.Context, runID string) (context.Context, *logrus.Entry) { + return setLogFieldsInContext(ctx, runID, utils.RunID) +} + +var logMutex sync.Mutex + +//Common method to get log and context +func setLogFieldsInContext(ctx context.Context, logParam string, logType string) (context.Context, *logrus.Entry) { + logMutex.Lock() + defer logMutex.Unlock() + + fields := logrus.Fields{} + fields, ok := ctx.Value(utils.LogFields).(logrus.Fields) + if !ok { + fields = logrus.Fields{} + } + if fields == nil { + fields = logrus.Fields{} + } + fields[logType] = logParam + ulog, ok := ctx.Value(utils.PowerScaleLogger).(*logrus.Entry) + if !ok { + ulog = utils.GetLogger().WithFields(fields) + } + ulog = ulog.WithFields(fields) + ctx = context.WithValue(ctx, utils.PowerScaleLogger, ulog) + ctx = context.WithValue(ctx, utils.LogFields, fields) + return ctx, ulog +} + +// GetLogger creates custom logger handler +func GetLogger(ctx context.Context) (context.Context, *logrus.Entry) { + var rid string + fields := logrus.Fields{} + if ctx == nil { + return ctx, utils.GetLogger().WithFields(fields) + } + + headers, ok := metadata.FromIncomingContext(ctx) + if ok { + reqid, ok := headers[csictx.RequestIDKey] + if ok && len(reqid) > 0 { + rid = reqid[0] + } + } + + fields, _ = ctx.Value(utils.LogFields).(logrus.Fields) + if fields == nil { + fields = logrus.Fields{} + } + + if ok { + fields[utils.RequestID] = rid + } + + logMutex.Lock() + defer logMutex.Unlock() + l := utils.GetLogger() + logWithFields := l.WithFields(fields) + ctx = context.WithValue(ctx, utils.PowerScaleLogger, logWithFields) + ctx = context.WithValue(ctx, utils.LogFields, fields) + return ctx, logWithFields +} + +// GetRunIDLog function returns logger with runID +func GetRunIDLog(ctx context.Context) (context.Context, *logrus.Entry, string) { + var rid string + fields := logrus.Fields{} + if ctx == nil { + return ctx, utils.GetLogger().WithFields(fields), rid + } + + headers, ok := metadata.FromIncomingContext(ctx) + if ok { + reqid, ok := headers[csictx.RequestIDKey] + if ok && len(reqid) > 0 { + rid = reqid[0] + } else { + atomic.AddInt64(&runid, 1) + rid = fmt.Sprintf("%d", runid) + } + } + + fields, _ = ctx.Value(utils.LogFields).(logrus.Fields) + if fields == nil { + fields = logrus.Fields{} + } + + if ok { + fields[utils.RunID] = rid + } + + logMutex.Lock() + defer logMutex.Unlock() + l := utils.GetLogger() + log := l.WithFields(fields) + ctx = context.WithValue(ctx, utils.PowerScaleLogger, log) + ctx = context.WithValue(ctx, utils.LogFields, fields) + return ctx, log, rid +} + +// getIsilonConfig returns the cluster config +func (s *service) getIsilonConfig(ctx context.Context, clusterName *string) (*IsilonClusterConfig, error) { + if *clusterName == "" { + log.Infof("Request doesn't include cluster name. Use default cluster '%s'", s.defaultIsiClusterName) + *clusterName = s.defaultIsiClusterName + if s.defaultIsiClusterName == "" { + return nil, fmt.Errorf("no default cluster config available to continue with request") + } + } + + isiConfig := s.getIsilonClusterConfig(*clusterName) + if isiConfig == nil { + return nil, fmt.Errorf("failed to get cluster config details for clusterName: '%s'", *clusterName) + } + + return isiConfig, nil +} diff --git a/service/service_test.go b/service/service_test.go index 0b94990..036df9b 100644 --- a/service/service_test.go +++ b/service/service_test.go @@ -22,7 +22,7 @@ import ( "os" "testing" - "github.com/DATA-DOG/godog" + "github.com/cucumber/godog" ) func TestMain(m *testing.M) { diff --git a/service/step_defs_test.go b/service/step_defs_test.go index 3793019..ace9c88 100644 --- a/service/step_defs_test.go +++ b/service/step_defs_test.go @@ -26,12 +26,13 @@ import ( "os" "runtime" "strings" + "sync" - "github.com/DATA-DOG/godog" csi "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/cucumber/godog" "github.com/dell/csi-isilon/common/utils" + "github.com/dell/gocsi" "github.com/dell/gofsutil" - "github.com/rexray/gocsi" "golang.org/x/net/context" "google.golang.org/grpc/metadata" "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -102,11 +103,12 @@ var inducedErrors struct { } const ( - Volume1 = "d0f055a700000000" - datafile = "test/tmp/datafile" - datadir = "test/tmp/datadir" - datafile2 = "test/tmp/datafile2" - datadir2 = "test/tmp/datadir2" + Volume1 = "d0f055a700000000" + datafile = "test/tmp/datafile" + datadir = "test/tmp/datadir" + datafile2 = "test/tmp/datafile2" + datadir2 = "test/tmp/datadir2" + clusterName1 = "cluster1" ) func (f *feature) aIsilonService() error { @@ -148,16 +150,21 @@ func (f *feature) aIsilonService() error { handler := getHandler() // Get or reuse the cached service f.getService() + clusterConfig := f.service.getIsilonClusterConfig(clusterName1) if handler != nil && os.Getenv("CSI_ISILON_ENDPOINT") == "" { if f.server == nil { f.server = httptest.NewServer(handler) } log.Printf("server url: %s\n", f.server.URL) - f.service.opts.EndpointURL = f.server.URL + clusterConfig.EndpointURL = f.server.URL + //f.service.opts.EndpointURL = f.server.URL } else { f.server = nil } - f.service.isiSvc, _ = f.service.GetIsiService(context.Background()) + isiSvc, _ := f.service.GetIsiService(context.Background(), clusterConfig) + updatedClusterConfig, _ := f.service.isiClusters.Load(clusterName1) + updatedClusterConfig.(*IsilonClusterConfig).isiSvc = isiSvc + f.service.isiClusters.Store(clusterName1, updatedClusterConfig) f.checkGoRoutines("end aIsilonService") f.service.logServiceStats() return nil @@ -179,36 +186,48 @@ func (f *feature) getService() *service { testNodeHasNoConnection = false svc := new(service) var opts Opts - opts.User = "blah" - opts.Password = "blah" - opts.Endpoint = "127.0.0.1" - opts.EndpointURL = "http://127.0.0.1" + opts.AccessZone = "System" + opts.Path = "/ifs/data/csi-isilon" + opts.Insecure = true + opts.DebugEnabled = true + opts.Verbose = 1 + + newConfig := IsilonClusterConfig{} + newConfig.ClusterName = clusterName1 + newConfig.IsiIP = "127.0.0.1" + newConfig.IsiPort = "8080" + newConfig.EndpointURL = "http://127.0.0.1" + newConfig.User = "blah" + newConfig.Password = "blah" + newConfig.IsiInsecure = &opts.Insecure + newConfig.IsiPath = "/ifs/data/csi-isilon" + newConfig.IsDefaultCluster = true + if os.Getenv("CSI_ISILON_ENDPOINT") != "" { - opts.EndpointURL = os.Getenv("CSI_ISILON_ENDPOINT") + newConfig.EndpointURL = os.Getenv("CSI_ISILON_ENDPOINT") } if os.Getenv("CSI_ISILON_USERID") != "" { - opts.User = os.Getenv("CSI_ISILON_USERID") + newConfig.User = os.Getenv("CSI_ISILON_USERID") } if os.Getenv("CSI_ISILON_PASSWORD") != "" { - opts.Password = os.Getenv("CSI_ISILON_PASSWORD") + newConfig.Password = os.Getenv("CSI_ISILON_PASSWORD") } if os.Getenv("CSI_ISILON_PATH") != "" { - opts.Path = os.Getenv("CSI_ISILON_PATH") + newConfig.IsiPath = os.Getenv("CSI_ISILON_PATH") } if os.Getenv("CSI_ISILON_ZONE") != "" { opts.AccessZone = os.Getenv("CSI_ISILON_ZONE") } - opts.Path = "/ifs/data/csi-isilon" - opts.Insecure = true - opts.DebugEnabled = true - opts.Verbose = 1 svc.opts = opts svc.mode = "controller" f.service = svc - f.service.nodeID = "k8s-rhel76-qual=#=#=1.2.3.4" + f.service.nodeID = fmt.Sprintf("k8s-rhel76-qual=#=#=1.2.3.4=#=#=#{clusterName1}") f.service.nodeIP = "1.2.3.4" + f.service.defaultIsiClusterName = clusterName1 + f.service.isiClusters = new(sync.Map) + f.service.isiClusters.Store(newConfig.ClusterName, &newConfig) utils.ConfigureLogger(opts.DebugEnabled) @@ -216,7 +235,9 @@ func (f *feature) getService() *service { } func (f *feature) iSetEmptyPassword() error { - f.service.opts.Password = "" + cluster, _ := f.service.isiClusters.Load(clusterName1) + cluster.(*IsilonClusterConfig).Password = "" + f.service.isiClusters.Store(clusterName1, cluster) return nil } @@ -244,7 +265,8 @@ func FeatureContext(s *godog.Suite) { s.Step(`^an invalid ProbeResponse is returned$`, f.anInvalidProbeResponseIsReturned) s.Step(`^I set empty password for Isilon service$`, f.iSetEmptyPassword) s.Step(`^I call CreateVolume "([^"]*)"$`, f.iCallCreateVolume) - s.Step(`^I call CreateVolume with params "([^"]*)" (-?\d+) "([^"]*)" "([^"]*)" "([^"]*)"$`, f.iCallCreateVolumeWithParams) + s.Step(`^I call CreateVolume with persistent metadata "([^"]*)"$`, f.iCallCreateVolumeWithPersistentMetadata) + s.Step(`^I call CreateVolume with params "([^"]*)" (-?\d+) "([^"]*)" "([^"]*)" "([^"]*)" "([^"]*)"$`, f.iCallCreateVolumeWithParams) s.Step(`^I call DeleteVolume "([^"]*)"$`, f.iCallDeleteVolume) s.Step(`^a valid CreateVolumeResponse is returned$`, f.aValidCreateVolumeResponseIsReturned) s.Step(`^a valid DeleteVolumeResponse is returned$`, f.aValidDeleteVolumeResponseIsReturned) @@ -254,6 +276,7 @@ func FeatureContext(s *godog.Suite) { s.Step(`^a valid ControllerGetCapabilitiesResponse is returned$`, f.aValidControllerGetCapabilitiesResponseIsReturned) s.Step(`^I call ValidateVolumeCapabilities with voltype "([^"]*)" access "([^"]*)"$`, f.iCallValidateVolumeCapabilitiesWithVoltypeAccess) s.Step(`^I call GetCapacity$`, f.iCallGetCapacity) + s.Step(`^I call GetCapacity with params "([^"]*)"$`, f.iCallGetCapacityWithParams) s.Step(`^a valid GetCapacityResponse is returned$`, f.aValidGetCapacityResponseIsReturned) s.Step(`^I call GetCapacity with Invalid access mode$`, f.iCallGetCapacityWithInvalidAccessMode) s.Step(`^I call NodeGetInfo$`, f.iCallNodeGetInfo) @@ -301,9 +324,8 @@ func FeatureContext(s *godog.Suite) { // GetPluginInfo func (f *feature) iCallGetPluginInfo() error { - ctx := new(context.Context) req := new(csi.GetPluginInfoRequest) - f.getPluginInfoResponse, f.err = f.service.GetPluginInfo(*ctx, req) + f.getPluginInfoResponse, f.err = f.service.GetPluginInfo(context.Background(), req) if f.err != nil { return f.err } @@ -321,9 +343,8 @@ func (f *feature) aValidGetPlugInfoResponseIsReturned() error { } func (f *feature) iCallGetPluginCapabilities() error { - ctx := new(context.Context) req := new(csi.GetPluginCapabilitiesRequest) - f.getPluginCapabilitiesResponse, f.err = f.service.GetPluginCapabilities(*ctx, req) + f.getPluginCapabilitiesResponse, f.err = f.service.GetPluginCapabilities(context.Background(), req) if f.err != nil { return f.err } @@ -346,17 +367,16 @@ func (f *feature) aValidGetPluginCapabilitiesResponseIsReturned() error { } func (f *feature) iCallProbe() error { - ctx := new(context.Context) req := new(csi.ProbeRequest) f.checkGoRoutines("before probe") - f.probeResponse, f.err = f.service.Probe(*ctx, req) + f.probeResponse, f.err = f.service.Probe(context.Background(), req) f.checkGoRoutines("after probe") return nil } func (f *feature) iCallAutoProbe() error { f.checkGoRoutines("before auto probe") - f.err = f.service.autoProbe(context.Background()) + f.err = f.service.autoProbe(context.Background(), f.service.getIsilonClusterConfig(clusterName1)) f.checkGoRoutines("after auto probe") return nil } @@ -399,7 +419,34 @@ func getTypicalCreateVolumeRequest() *csi.CreateVolumeRequest { return req } -func getCreateVolumeRequestWithParams(rangeInGiB int64, accessZone, isiPath, AzServiceIP string) *csi.CreateVolumeRequest { +func getCreateVolumeRequestWithMetaData() *csi.CreateVolumeRequest { + req := new(csi.CreateVolumeRequest) + req.Name = "volume1" + capacityRange := new(csi.CapacityRange) + capacityRange.RequiredBytes = 8 * 1024 * 1024 * 1024 + req.CapacityRange = capacityRange + mount := new(csi.VolumeCapability_MountVolume) + capability := new(csi.VolumeCapability) + accessType := new(csi.VolumeCapability_Mount) + accessType.Mount = mount + capability.AccessType = accessType + accessMode := new(csi.VolumeCapability_AccessMode) + accessMode.Mode = csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER + capability.AccessMode = accessMode + capabilities := make([]*csi.VolumeCapability, 0) + capabilities = append(capabilities, capability) + parameters := make(map[string]string) + parameters[AccessZoneParam] = "System" + parameters[IsiPathParam] = "/ifs/data/csi-isilon" + parameters[csiPersistentVolumeName] = "pv-name" + parameters[csiPersistentVolumeClaimName] = "pv-claimname" + parameters[csiPersistentVolumeClaimNamespace] = "pv-namespace" + req.Parameters = parameters + req.VolumeCapabilities = capabilities + return req +} + +func getCreateVolumeRequestWithParams(rangeInGiB int64, accessZone, isiPath, AzServiceIP, clusterName string) *csi.CreateVolumeRequest { req := new(csi.CreateVolumeRequest) req.Name = "volume1" capacityRange := new(csi.CapacityRange) @@ -425,6 +472,12 @@ func getCreateVolumeRequestWithParams(rangeInGiB int64, accessZone, isiPath, AzS if AzServiceIP != "none" { parameters[AzServiceIPParam] = AzServiceIP } + if clusterName != "none" { + parameters[ClusterNameParam] = clusterName + } + parameters[csiPersistentVolumeName] = "pv-name" + parameters[csiPersistentVolumeClaimName] = "pv-claimname" + parameters[csiPersistentVolumeClaimNamespace] = "pv-namespace" req.Parameters = parameters req.VolumeCapabilities = capabilities return req @@ -479,11 +532,10 @@ func getAccessMode(accessType string) *csi.VolumeCapability_AccessMode { } func (f *feature) iCallCreateVolume(name string) error { - ctx := new(context.Context) req := getTypicalCreateVolumeRequest() f.createVolumeRequest = req req.Name = name - f.createVolumeResponse, f.err = f.service.CreateVolume(*ctx, req) + f.createVolumeResponse, f.err = f.service.CreateVolume(context.Background(), req) if f.err != nil { log.Printf("CreateVolume call failed: %s\n", f.err.Error()) } @@ -495,14 +547,29 @@ func (f *feature) iCallCreateVolume(name string) error { return nil } -func (f *feature) iCallCreateVolumeWithParams(name string, rangeInGiB int, accessZone, isiPath, AzServiceIP string) error { - ctx := new(context.Context) - req := getCreateVolumeRequestWithParams(int64(rangeInGiB), accessZone, isiPath, AzServiceIP) +func (f *feature) iCallCreateVolumeWithPersistentMetadata(name string) error { + req := getCreateVolumeRequestWithMetaData() + f.createVolumeRequest = req + req.Name = name + f.createVolumeResponse, f.err = f.service.CreateVolume(context.Background(), req) + if f.err != nil { + log.Printf("CreateVolume call failed: %s\n", f.err.Error()) + } + if f.createVolumeResponse != nil { + log.Printf("vol id %s\n", f.createVolumeResponse.GetVolume().VolumeId) + stepHandlersErrors.ExportNotFoundError = false + stepHandlersErrors.VolumeNotExistError = false + } + return nil +} + +func (f *feature) iCallCreateVolumeWithParams(name string, rangeInGiB int, accessZone, isiPath, AzServiceIP, clusterName string) error { + req := getCreateVolumeRequestWithParams(int64(rangeInGiB), accessZone, isiPath, AzServiceIP, clusterName) f.createVolumeRequest = req req.Name = name stepHandlersErrors.ExportNotFoundError = true stepHandlersErrors.VolumeNotExistError = true - f.createVolumeResponse, f.err = f.service.CreateVolume(*ctx, req) + f.createVolumeResponse, f.err = f.service.CreateVolume(context.Background(), req) if f.err != nil { log.Printf("CreateVolume call failed: %s\n", f.err.Error()) } @@ -515,7 +582,6 @@ func (f *feature) iCallCreateVolumeWithParams(name string, rangeInGiB int, acces } func (f *feature) iCallDeleteVolume(name string) error { - ctx := new(context.Context) if f.deleteVolumeRequest == nil { req := getTypicalDeleteVolumeRequest() f.deleteVolumeRequest = req @@ -523,7 +589,9 @@ func (f *feature) iCallDeleteVolume(name string) error { req := f.deleteVolumeRequest req.VolumeId = name - f.deleteVolumeResponse, f.err = f.service.DeleteVolume(*ctx, req) + ctx, log, _ := GetRunIDLog(context.Background()) + + f.deleteVolumeResponse, f.err = f.service.DeleteVolume(ctx, req) if f.err != nil { log.Printf("DeleteVolume call failed: '%v'\n", f.err) } @@ -569,7 +637,9 @@ func (f *feature) iInduceError(errtype string) error { case "autoProbeNotEnabled": inducedErrors.autoProbeNotEnabled = true case "autoProbeFailed": - f.service.isiSvc = nil + updatedClusterConfig, _ := f.service.isiClusters.Load(clusterName1) + updatedClusterConfig.(*IsilonClusterConfig).isiSvc = nil + f.service.isiClusters.Store(clusterName1, updatedClusterConfig) f.service.opts.AutoProbe = false case "GOFSMockDevMountsError": gofsutil.GOFSMock.InduceDevMountsError = true @@ -693,9 +763,8 @@ func (f *feature) theErrorContains(arg1 string) error { } func (f *feature) iCallControllerGetCapabilities() error { - ctx := new(context.Context) req := new(csi.ControllerGetCapabilitiesRequest) - f.controllerGetCapabilitiesResponse, f.err = f.service.ControllerGetCapabilities(*ctx, req) + f.controllerGetCapabilitiesResponse, f.err = f.service.ControllerGetCapabilities(context.Background(), req) if f.err != nil { log.Printf("ControllerGetCapabilities call failed: %s\n", f.err.Error()) return f.err @@ -742,7 +811,6 @@ func (f *feature) aValidControllerGetCapabilitiesResponseIsReturned() error { } func (f *feature) iCallValidateVolumeCapabilitiesWithVoltypeAccess(voltype, access string) error { - ctx := new(context.Context) req := new(csi.ValidateVolumeCapabilitiesRequest) if inducedErrors.invalidVolumeID || f.createVolumeResponse == nil { req.VolumeId = "000-000" @@ -781,7 +849,8 @@ func (f *feature) iCallValidateVolumeCapabilitiesWithVoltypeAccess(voltype, acce capabilities = append(capabilities, capability) req.VolumeCapabilities = capabilities log.Printf("Calling ValidateVolumeCapabilities") - f.validateVolumeCapabilitiesResponse, f.err = f.service.ValidateVolumeCapabilities(*ctx, req) + ctx, _, _ := GetRunIDLog(context.Background()) + f.validateVolumeCapabilitiesResponse, f.err = f.service.ValidateVolumeCapabilities(ctx, req) if f.err != nil { return nil } @@ -854,9 +923,26 @@ func getTypicalCapacityRequest(valid bool) *csi.GetCapacityRequest { } func (f *feature) iCallGetCapacity() error { + header := metadata.New(map[string]string{"csi.requestid": "1"}) + ctx, _, _ := GetRunIDLog(context.Background()) + ctx = metadata.NewIncomingContext(ctx, header) + req := getTypicalCapacityRequest(true) + f.getCapacityResponse, f.err = f.service.GetCapacity(ctx, req) + if f.err != nil { + log.Printf("GetCapacity call failed: %s\n", f.err.Error()) + return nil + } + return nil +} + +func (f *feature) iCallGetCapacityWithParams(clusterName string) error { header := metadata.New(map[string]string{"csi.requestid": "1"}) ctx := metadata.NewIncomingContext(context.Background(), header) req := getTypicalCapacityRequest(true) + params := make(map[string]string) + params[ClusterNameParam] = clusterName + req.Parameters = params + f.getCapacityResponse, f.err = f.service.GetCapacity(ctx, req) if f.err != nil { log.Printf("GetCapacity call failed: %s\n", f.err.Error()) @@ -893,9 +979,8 @@ func (f *feature) aValidGetCapacityResponseIsReturned() error { } func (f *feature) iCallNodeGetInfo() error { - ctx := new(context.Context) req := new(csi.NodeGetInfoRequest) - f.nodeGetInfoResponse, f.err = f.service.NodeGetInfo(*ctx, req) + f.nodeGetInfoResponse, f.err = f.service.NodeGetInfo(context.Background(), req) if f.err != nil { log.Printf("NodeGetInfo call failed: %s\n", f.err.Error()) return f.err @@ -904,9 +989,8 @@ func (f *feature) iCallNodeGetInfo() error { } func (f *feature) iCallNodeGetCapabilities() error { - ctx := new(context.Context) req := new(csi.NodeGetCapabilitiesRequest) - f.nodeGetCapabilitiesResponse, f.err = f.service.NodeGetCapabilities(*ctx, req) + f.nodeGetCapabilitiesResponse, f.err = f.service.NodeGetCapabilities(context.Background(), req) if f.err != nil { log.Printf("NodeGetCapabilities call failed: %s\n", f.err.Error()) return f.err @@ -1017,7 +1101,6 @@ func (f *feature) aValidNodeUnstageVolumeResponseIsReturned() error { } func (f *feature) iCallNodeUnpublishVolume() error { - ctx := new(context.Context) req := f.nodeUnpublishVolumeRequest if req == nil { _ = f.getNodeUnpublishVolumeRequest() @@ -1028,7 +1111,7 @@ func (f *feature) iCallNodeUnpublishVolume() error { } fmt.Printf("Calling NodePublishVolume\n") - f.nodeUnpublishVolumeResponse, f.err = f.service.NodeUnpublishVolume(*ctx, req) + f.nodeUnpublishVolumeResponse, f.err = f.service.NodeUnpublishVolume(context.Background(), req) if f.err != nil { log.Printf("NodePublishVolume call failed: %s\n", f.err.Error()) if strings.Contains(f.err.Error(), "Target Path is required") { @@ -1047,7 +1130,6 @@ func (f *feature) iCallNodeUnpublishVolume() error { } func (f *feature) iCallEphemeralNodeUnpublishVolume() error { - ctx := new(context.Context) req := f.nodeUnpublishVolumeRequest if req == nil { _ = f.getNodeUnpublishVolumeRequest() @@ -1058,7 +1140,7 @@ func (f *feature) iCallEphemeralNodeUnpublishVolume() error { } fmt.Printf("Calling NodePublishVolume\n") - f.nodeUnpublishVolumeResponse, f.err = f.service.NodeUnpublishVolume(*ctx, req) + f.nodeUnpublishVolumeResponse, f.err = f.service.NodeUnpublishVolume(context.Background(), req) if f.err != nil { log.Printf("NodePublishVolume call failed: %s\n", f.err.Error()) if strings.Contains(f.err.Error(), "Target Path is required") { @@ -1355,13 +1437,12 @@ func (f *feature) iCallControllerPublishVolume(volID string, accessMode string, } func (f *feature) iCallControllerUnPublishVolume(volID string, accessMode string, nodeID string) error { - ctx := new(context.Context) req := f.getControllerUnPublishVolumeRequest(accessMode, nodeID) f.unpublishVolumeRequest = req // a customized volume ID can be specified to overwrite the default one req.VolumeId = volID - f.unpublishVolumeResponse, f.err = f.service.ControllerUnpublishVolume(*ctx, req) + f.unpublishVolumeResponse, f.err = f.service.ControllerUnpublishVolume(context.Background(), req) if f.err != nil { log.Printf("ControllerUnPublishVolume call failed: %s\n", f.err.Error()) } @@ -1373,8 +1454,6 @@ func (f *feature) iCallControllerUnPublishVolume(volID string, accessMode string } func (f *feature) iCallNodeStageVolume(volID string, accessType string) error { - - ctx := new(context.Context) req := getTypicalNodeStageVolumeRequest(accessType) f.nodeStageVolumeRequest = req @@ -1383,7 +1462,7 @@ func (f *feature) iCallNodeStageVolume(volID string, accessType string) error { req.VolumeId = volID } - f.nodeStageVolumeResponse, f.err = f.service.NodeStageVolume(*ctx, req) + f.nodeStageVolumeResponse, f.err = f.service.NodeStageVolume(context.Background(), req) if f.err != nil { log.Printf("NodeStageVolume call failed: %s\n", f.err.Error()) } @@ -1396,10 +1475,9 @@ func (f *feature) iCallNodeStageVolume(volID string, accessType string) error { } func (f *feature) iCallNodeUnstageVolume(volID string) error { - ctx := new(context.Context) req := getTypicalNodeUnstageVolumeRequest(volID) f.nodeUnstageVolumeRequest = req - f.nodeUnstageVolumeResponse, f.err = f.service.NodeUnstageVolume(*ctx, req) + f.nodeUnstageVolumeResponse, f.err = f.service.NodeUnstageVolume(context.Background(), req) if f.err != nil { log.Printf("NodeUnstageVolume call failed: %s\n", f.err.Error()) } @@ -1411,7 +1489,6 @@ func (f *feature) iCallNodeUnstageVolume(volID string) error { } func (f *feature) iCallListVolumesWithMaxEntriesStartingToken(arg1 int, arg2 string) error { - ctx := new(context.Context) req := new(csi.ListVolumesRequest) // The starting token is not valid if arg2 == "invalid" { @@ -1419,7 +1496,7 @@ func (f *feature) iCallListVolumesWithMaxEntriesStartingToken(arg1 int, arg2 str } req.MaxEntries = int32(arg1) req.StartingToken = arg2 - f.listVolumesResponse, f.err = f.service.ListVolumes(*ctx, req) + f.listVolumesResponse, f.err = f.service.ListVolumes(context.Background(), req) if f.err != nil { log.Printf("ListVolumes call failed: %s\n", f.err.Error()) return nil @@ -1437,11 +1514,10 @@ func (f *feature) aValidListVolumesResponseIsReturned() error { } func (f *feature) iCallDeleteSnapshot(snapshotID string) error { - ctx := new(context.Context) req := new(csi.DeleteSnapshotRequest) req.SnapshotId = snapshotID f.deleteSnapshotRequest = req - _, err := f.service.DeleteSnapshot(*ctx, f.deleteSnapshotRequest) + _, err := f.service.DeleteSnapshot(context.Background(), f.deleteSnapshotRequest) if err != nil { log.Printf("DeleteSnapshot call failed: %s\n", err.Error()) f.err = err @@ -1464,11 +1540,10 @@ func getCreateSnapshotRequest(srcVolumeID, name, isiPath string) *csi.CreateSnap } func (f *feature) iCallCreateSnapshot(srcVolumeID, name, isiPath string) error { - ctx := new(context.Context) f.createSnapshotRequest = getCreateSnapshotRequest(srcVolumeID, name, isiPath) req := f.createSnapshotRequest - f.createSnapshotResponse, f.err = f.service.CreateSnapshot(*ctx, req) + f.createSnapshotResponse, f.err = f.service.CreateSnapshot(context.Background(), req) if f.err != nil { log.Printf("CreateSnapshot call failed: %s\n", f.err.Error()) } @@ -1503,11 +1578,11 @@ func getControllerExpandVolumeRequest(volumeID string, requiredBytes int64) *csi func (f *feature) iCallControllerExpandVolume(volumeID string, requiredBytes int64) error { log.Printf("###") - ctx := new(context.Context) f.controllerExpandVolumeRequest = getControllerExpandVolumeRequest(volumeID, requiredBytes) req := f.controllerExpandVolumeRequest - f.controllerExpandVolumeResponse, f.err = f.service.ControllerExpandVolume(*ctx, req) + ctx, log, _ := GetRunIDLog(context.Background()) + f.controllerExpandVolumeResponse, f.err = f.service.ControllerExpandVolume(ctx, req) if f.err != nil { log.Printf("ControllerExpandVolume call failed: %s\n", f.err.Error()) } @@ -1554,12 +1629,11 @@ func (f *feature) setVolumeContent(isSnapshotType bool, identity string) *csi.Cr } func (f *feature) iCallCreateVolumeFromSnapshot(srcSnapshotID, name string) error { - ctx := new(context.Context) req := getTypicalCreateVolumeRequest() f.createVolumeRequest = req req.Name = name req = f.setVolumeContent(true, srcSnapshotID) - f.createVolumeResponse, f.err = f.service.CreateVolume(*ctx, req) + f.createVolumeResponse, f.err = f.service.CreateVolume(context.Background(), req) if f.err != nil { log.Printf("CreateVolume call failed: '%s'\n", f.err.Error()) } @@ -1570,12 +1644,11 @@ func (f *feature) iCallCreateVolumeFromSnapshot(srcSnapshotID, name string) erro } func (f *feature) iCallCreateVolumeFromVolume(srcVolumeName, name string) error { - ctx := new(context.Context) req := getTypicalCreateVolumeRequest() f.createVolumeRequest = req req.Name = name req = f.setVolumeContent(false, srcVolumeName) - f.createVolumeResponse, f.err = f.service.CreateVolume(*ctx, req) + f.createVolumeResponse, f.err = f.service.CreateVolume(context.Background(), req) if f.err != nil { log.Printf("CreateVolume call failed: '%s'\n", f.err.Error()) } @@ -1586,7 +1659,7 @@ func (f *feature) iCallCreateVolumeFromVolume(srcVolumeName, name string) error } func (f *feature) iCallInitializeRealIsilonService() error { - f.service.initializeService(context.Background()) + f.service.initializeServiceOpts(context.Background()) return nil } @@ -1629,20 +1702,26 @@ func (f *feature) aIsilonServiceWithParams(user, mode string) error { handler := getHandler() // Get or reuse the cached service f.getServiceWithParams(user, mode) + clusterConfig := f.service.getIsilonClusterConfig(clusterName1) if handler != nil && os.Getenv("CSI_ISILON_ENDPOINT") == "" { if f.server == nil { f.server = httptest.NewServer(handler) } log.Printf("server url: %s\n", f.server.URL) - f.service.opts.EndpointURL = f.server.URL + clusterConfig.EndpointURL = f.server.URL } else { f.server = nil } - f.service.isiSvc, f.err = f.service.GetIsiService(context.Background()) + isiSvc, _ := f.service.GetIsiService(context.Background(), clusterConfig) + updatedClusterConfig, _ := f.service.isiClusters.Load(clusterName1) + updatedClusterConfig.(*IsilonClusterConfig).isiSvc = isiSvc + f.service.isiClusters.Store(clusterName1, updatedClusterConfig) f.checkGoRoutines("end aIsilonService") f.service.logServiceStats() if inducedErrors.noIsiService || inducedErrors.autoProbeNotEnabled { - f.service.isiSvc = nil + updatedClusterConfig, _ := f.service.isiClusters.Load(clusterName1) + updatedClusterConfig.(*IsilonClusterConfig).isiSvc = nil + f.service.isiClusters.Store(clusterName1, updatedClusterConfig) } return nil } @@ -1686,23 +1765,31 @@ func (f *feature) aIsilonServiceWithParamsForCustomTopology(user, mode string) e handler := getHandler() // Get or reuse the cached service f.getServiceWithParamsForCustomTopology(user, mode, true) + clusterConfig := f.service.getIsilonClusterConfig(clusterName1) if handler != nil && os.Getenv("CSI_ISILON_ENDPOINT") == "" { if f.server == nil { f.server = httptest.NewServer(handler) } log.Printf("server url: %s\n", f.server.URL) - f.service.opts.EndpointURL = f.server.URL + clusterConfig.EndpointURL = f.server.URL urlList := strings.Split(f.server.URL, ":") log.Printf("urlList: %v", urlList) - f.service.opts.Port = urlList[2] + clusterConfig.IsiPort = urlList[2] } else { f.server = nil } - f.service.isiSvc, f.err = f.service.GetIsiService(context.Background()) + isiSvc, err := f.service.GetIsiService(context.Background(), clusterConfig) + f.err = err + updatedClusterConfig, _ := f.service.isiClusters.Load(clusterName1) + updatedClusterConfig.(*IsilonClusterConfig).isiSvc = isiSvc + f.service.isiClusters.Store(clusterName1, updatedClusterConfig) + f.checkGoRoutines("end aIsilonService") f.service.logServiceStats() if inducedErrors.noIsiService || inducedErrors.autoProbeNotEnabled { - f.service.isiSvc = nil + updatedClusterConfig, _ := f.service.isiClusters.Load(clusterName1) + updatedClusterConfig.(*IsilonClusterConfig).isiSvc = nil + f.service.isiClusters.Store(clusterName1, updatedClusterConfig) } return nil } @@ -1746,23 +1833,29 @@ func (f *feature) aIsilonServiceWithParamsForCustomTopologyNoLabel(user, mode st handler := getHandler() // Get or reuse the cached service f.getServiceWithParamsForCustomTopology(user, mode, false) + clusterConfig := f.service.getIsilonClusterConfig(clusterName1) if handler != nil && os.Getenv("CSI_ISILON_ENDPOINT") == "" { if f.server == nil { f.server = httptest.NewServer(handler) } log.Printf("server url: %s\n", f.server.URL) - f.service.opts.EndpointURL = f.server.URL + clusterConfig.EndpointURL = f.server.URL urlList := strings.Split(f.server.URL, ":") log.Printf("urlList: %v", urlList) - f.service.opts.Port = urlList[2] + clusterConfig.IsiPort = urlList[2] } else { f.server = nil } - f.service.isiSvc, f.err = f.service.GetIsiService(context.Background()) + isiSvc, _ := f.service.GetIsiService(context.Background(), clusterConfig) + updatedClusterConfig, _ := f.service.isiClusters.Load(clusterName1) + updatedClusterConfig.(*IsilonClusterConfig).isiSvc = isiSvc + f.service.isiClusters.Store(clusterName1, updatedClusterConfig) f.checkGoRoutines("end aIsilonService") f.service.logServiceStats() if inducedErrors.noIsiService || inducedErrors.autoProbeNotEnabled { - f.service.isiSvc = nil + updatedClusterConfig, _ := f.service.isiClusters.Load(clusterName1) + updatedClusterConfig.(*IsilonClusterConfig).isiSvc = nil + f.service.isiClusters.Store(clusterName1, updatedClusterConfig) } return nil } @@ -1808,10 +1901,7 @@ func (f *feature) getServiceWithParamsForCustomTopology(user, mode string, apply testNodeHasNoConnection = false svc := new(service) var opts Opts - opts.User = user - opts.Password = "blah" - opts.Endpoint = "127.0.0.1" - opts.EndpointURL = "http://127.0.0.1" + opts.AccessZone = "System" opts.Path = "/ifs/data/csi-isilon" opts.Insecure = true @@ -1820,6 +1910,17 @@ func (f *feature) getServiceWithParamsForCustomTopology(user, mode string, apply opts.CustomTopologyEnabled = true opts.KubeConfigPath = "/etc/kubernetes/admin.conf" + newConfig := IsilonClusterConfig{} + newConfig.ClusterName = clusterName1 + newConfig.IsiIP = "127.0.0.1" + newConfig.IsiPort = "8080" + newConfig.EndpointURL = "http://127.0.0.1" + newConfig.User = user + newConfig.Password = "blah" + newConfig.IsiInsecure = &opts.Insecure + newConfig.IsiPath = "/ifs/data/csi-isilon" + newConfig.IsDefaultCluster = true + host, _ := os.Hostname() result := removeNodeLabels(host) if !result { @@ -1843,7 +1944,11 @@ func (f *feature) getServiceWithParamsForCustomTopology(user, mode string, apply svc.mode = mode f.service = svc f.service.nodeID = host + // TODO - IP has to be updated before release f.service.nodeIP = "1.2.3.4" + f.service.defaultIsiClusterName = clusterName1 + f.service.isiClusters = new(sync.Map) + f.service.isiClusters.Store(newConfig.ClusterName, &newConfig) utils.ConfigureLogger(opts.DebugEnabled) return svc } @@ -1853,15 +1958,23 @@ func (f *feature) getServiceWithParams(user, mode string) *service { testNodeHasNoConnection = false svc := new(service) var opts Opts - opts.User = user - opts.Password = "blah" - opts.Endpoint = "127.0.0.1" - opts.EndpointURL = "http://127.0.0.1" opts.AccessZone = "System" opts.Path = "/ifs/data/csi-isilon" opts.Insecure = true opts.DebugEnabled = true opts.Verbose = 1 + + newConfig := IsilonClusterConfig{} + newConfig.ClusterName = clusterName1 + newConfig.IsiIP = "127.0.0.1" + newConfig.IsiPort = "8080" + newConfig.EndpointURL = "http://127.0.0.1" + newConfig.User = user + newConfig.Password = "blah" + newConfig.IsiInsecure = &opts.Insecure + newConfig.IsiPath = "/ifs/data/csi-isilon" + newConfig.IsDefaultCluster = true + if inducedErrors.autoProbeNotEnabled { opts.AutoProbe = false } else { @@ -1870,8 +1983,11 @@ func (f *feature) getServiceWithParams(user, mode string) *service { svc.opts = opts svc.mode = mode f.service = svc - f.service.nodeID = "k8s-rhel76-qual=#=#=1.2.3.4" + f.service.nodeID = fmt.Sprintf("k8s-rhel76-qual=#=#=1.2.3.4=#=#=#{clusterName1}") f.service.nodeIP = "1.2.3.4" + f.service.defaultIsiClusterName = clusterName1 + f.service.isiClusters = new(sync.Map) + f.service.isiClusters.Store(newConfig.ClusterName, &newConfig) utils.ConfigureLogger(opts.DebugEnabled) return svc } @@ -1891,15 +2007,19 @@ func (f *feature) iCallBeforeServe() error { } func (f *feature) ICallCreateQuotaInIsiServiceWithNegativeSizeInBytes() error { - _, f.err = f.service.isiSvc.CreateQuota(f.service.opts.Path, "volume1", -1, true) + clusterConfig := f.service.getIsilonClusterConfig(clusterName1) + ctx, _, _ := GetRunIDLog(context.Background()) + _, f.err = clusterConfig.isiSvc.CreateQuota(ctx, f.service.opts.Path, "volume1", -1, true) return nil } func (f *feature) iCallGetExportRelatedFunctionsInIsiService() error { - _, f.err = f.service.isiSvc.GetExports() - _, f.err = f.service.isiSvc.GetExportByIDWithZone(557, "System") - f.err = f.service.isiSvc.DeleteQuotaByExportIDWithZone("volume1", 557, "System") - _, _, f.err = f.service.isiSvc.GetExportsWithLimit("2") + clusterConfig := f.service.getIsilonClusterConfig(clusterName1) + ctx, _, _ := GetRunIDLog(context.Background()) + _, f.err = clusterConfig.isiSvc.GetExports(ctx) + _, f.err = clusterConfig.isiSvc.GetExportByIDWithZone(ctx, 557, "System") + f.err = clusterConfig.isiSvc.DeleteQuotaByExportIDWithZone(ctx, "volume1", 557, "System") + _, _, f.err = clusterConfig.isiSvc.GetExportsWithLimit(ctx, "2") return nil } diff --git a/test/ingestion/README.md b/test/ingestion/README.md index e2ceff3..b6b7125 100644 --- a/test/ingestion/README.md +++ b/test/ingestion/README.md @@ -10,11 +10,13 @@ The details of various Quotas can be obtained via the following REST API of OneF GET /platform/1/quota/quotas -Volume Handle is expected to be present in this pattern VolName + VolumeIDSeparator + exportID + VolumeIDSeparator + accessZone for ex. "demovol1=\_=\_=303=\_=\_=System" +Starting from csi-powerscale 1.5, volume handle should include cluster name also (due to multi array support) + +Volume Handle is expected to be present in this pattern VolName + VolumeIDSeparator + exportID + VolumeIDSeparator + accessZone + clusterName for ex. "demovol1=\_=\_=303=\_=\_=System_=\_=cluster" ## Running the script Command Line inputs are taken in this order: volumename, volumehandle, storageclassname, accessmode, storage size, pvname, pvcname ## Examples -To provision a Volume named sample14 in access zone csi-zone having export-id as 6, volumehandle will be sample14=\_=\_=6=\_=\_=csi-zone. Here we are using a custom storage class named as customstorageclass1, access mode as ReadWriteMany, storage size as 500M, pv name as pv1, pvc name as pvc1, we will be running : +To provision a Volume named sample14 in access zone csi-zone having export-id as 6 and cluster name 'cluster1', volumehandle will be sample14=\_=\_=6=\_=\_=csi-zone=\_=\_=cluster1. Here we are using a custom storage class named as customstorageclass1, access mode as ReadWriteMany, storage size as 500M, pv name as pv1, pvc name as pvc1, we will be running : -./ingestion_test.sh sample14 sample14=\_=\_=6=\_=\_=csi-zone customstorageclass1 ReadWriteMany 500M pv1 pvc1 +./ingestion_test.sh sample14 sample14=\_=\_=6=\_=\_=csi-zone=\_=\_=cluster1 customstorageclass1 ReadWriteMany 500M pv1 pvc1 diff --git a/test/ingestion/ingestion_test.sh b/test/ingestion/ingestion_test.sh index 3e160d9..e4781a8 100755 --- a/test/ingestion/ingestion_test.sh +++ b/test/ingestion/ingestion_test.sh @@ -1,10 +1,10 @@ #!/bin/bash # Copyright: (c) 2020, Dell EMC -# volumename volumehandle storageclass accessmode storagesize pvname pvcname -# 1 2 3 4 5 6 7 +# volumename volumehandle storageclass accessmode storagesize pvname pvcname clustername +# 1 2 3 4 5 6 7 8 -if [[ $2 =~ ^[a-zA-Z0-9_-]+=_=_=[0-9]+=_=_=[a-zA-Z0-9_-]+$ ]] +if [[ $2 =~ ^[a-zA-Z0-9_-]+=_=_=[0-9]+=_=_=[a-zA-Z0-9_-]+=_=_=[a-zA-Z0-9_-]+$ ]] then echo "Volume handle pattern matched" diff --git a/test/ingestion/sample/isilonstaticpv.yaml b/test/ingestion/sample/isilonstaticpv.yaml index 78a32cc..b41b013 100644 --- a/test/ingestion/sample/isilonstaticpv.yaml +++ b/test/ingestion/sample/isilonstaticpv.yaml @@ -16,7 +16,7 @@ spec: Path: "/ifs/data/csi/isilonvol" Name: "isilonvol" AzServiceIP: 'XX.XX.XX.XX' - volumeHandle: isilonvol=_=_=652=_=_=System + volumeHandle: isilonvol=_=_=652=_=_=System=_=_=cluster claimRef: name: isilonstaticpvc namespace: default diff --git a/test/integration/README.md b/test/integration/README.md index aceabdd..971864c 100644 --- a/test/integration/README.md +++ b/test/integration/README.md @@ -5,4 +5,6 @@ PowerScale. There are four scripts to set environment variables, env_Quota_Enabled.sh, env_Quota_notEnabled.sh, env_nodeIP1.sh, env_nodeIP2.sh. All files should be populated with values for PowerScale. env_Quota_Enabled.sh is used for Quota enabled, env_Quota_notEnabled.sh is for Quota not enabled. The file env_nodeIP1.sh and file env_nodeIP2.sh added to mock different node IPs for NodeStageVolume with different accessModes, the corresponding feature file is mock_different_nodeIPs.feature. The file main_integration.feature is used to test most scenarios. -To launch the integration test, just run run.sh. Which environment script, feature file and tag needed can be specified in this script. \ No newline at end of file +There is a config file to set secrets details, either this file can be updated or the path for the same can be updated in all the environment files, under the variable name 'X_CSI_ISILON_CONFIG_PATH' + +To launch the integration test, just run make integration-test from csi-powerscale root directory. Whichever environment script, feature file and tag needed can be specified in this script. diff --git a/test/integration/config b/test/integration/config new file mode 100644 index 0000000..e4e4444 --- /dev/null +++ b/test/integration/config @@ -0,0 +1,20 @@ +{ + "isilonClusters": [ + { + "clusterName": "cluster1", + "username": "user", + "password": "password", + "isiIP": "1.2.3.4", + "isDefaultCluster": true + }, + { + "clusterName": "cluster2", + "username": "user", + "password": "password", + "isiIP": "1.2.3.5", + "isiPort": "8080", + "isiInsecure": true, + "isiPath": "/ifs/data/csi", + } + ] +} diff --git a/test/integration/env_Custom_Topology_Enabled.sh b/test/integration/env_Custom_Topology_Enabled.sh index 16c3326..7b8d825 100644 --- a/test/integration/env_Custom_Topology_Enabled.sh +++ b/test/integration/env_Custom_Topology_Enabled.sh @@ -1,9 +1,7 @@ #!/bin/sh # This should be like 111.222.333.444 -export X_CSI_ISI_ENDPOINT="1.1.1.1" -export X_CSI_ISI_USER="" -export X_CSI_ISI_PASSWORD="" +export X_CSI_CLUSTER_NAME="cluster1" export X_CSI_ISI_PATH="/ifs/data/csi/integration" export X_CSI_ISI_PORT="8080" export X_CSI_ISI_QUOTA_ENABLED="true" @@ -15,6 +13,7 @@ export X_CSI_ISI_INSECURE="true" export X_CSI_ISI_AUTOPROBE="false" export X_CSI_ISILON_NO_PROBE_ON_START="true" export X_CSI_MODE="" +export X_CSI_ISILON_CONFIG_PATH=`pwd`/config # Variables for using tests export CSI_ENDPOINT=`pwd`/unix_sock diff --git a/test/integration/env_Quota_Enabled.sh b/test/integration/env_Quota_Enabled.sh index 4c82d7d..edc8da1 100644 --- a/test/integration/env_Quota_Enabled.sh +++ b/test/integration/env_Quota_Enabled.sh @@ -1,9 +1,7 @@ #!/bin/sh # This should be like 111.222.333.444 -export X_CSI_ISI_ENDPOINT="1.1.1.1" -export X_CSI_ISI_USER="root" -export X_CSI_ISI_PASSWORD="" +export X_CSI_CLUSTER_NAME="cluster1" export X_CSI_ISI_PATH="/ifs/data/csi/integration" export X_CSI_ISI_PORT="8080" export X_CSI_ISI_QUOTA_ENABLED="true" @@ -17,6 +15,7 @@ export X_CSI_ISI_INSECURE="true" export X_CSI_ISI_AUTOPROBE="false" export X_CSI_ISILON_NO_PROBE_ON_START="true" export X_CSI_MODE="" +export X_CSI_ISILON_CONFIG_PATH=`pwd`/config # Variables for using tests export CSI_ENDPOINT=`pwd`/unix_sock diff --git a/test/integration/env_Quota_notEnabled.sh b/test/integration/env_Quota_notEnabled.sh index c130507..7510ea1 100644 --- a/test/integration/env_Quota_notEnabled.sh +++ b/test/integration/env_Quota_notEnabled.sh @@ -1,9 +1,7 @@ #!/bin/sh # This should be like 111.222.333.444 -export X_CSI_ISI_ENDPOINT="1.1.1.1" -export X_CSI_ISI_USER="root" -export X_CSI_ISI_PASSWORD="" +export X_CSI_CLUSTER_NAME="cluster1" export X_CSI_ISI_PATH="/ifs/data/csi/integration" export X_CSI_ISI_PORT="8080" export X_CSI_ISI_QUOTA_ENABLED="false" @@ -17,6 +15,7 @@ export X_CSI_ISI_INSECURE="true" export X_CSI_ISI_AUTOPROBE="false" export X_CSI_ISILON_NO_PROBE_ON_START="true" export X_CSI_MODE="" +export X_CSI_ISILON_CONFIG_PATH=`pwd`/config # Variables for using tests export CSI_ENDPOINT=`pwd`/unix_sock diff --git a/test/integration/env_nodeIP1.sh b/test/integration/env_nodeIP1.sh index ad0fd18..e82baa7 100644 --- a/test/integration/env_nodeIP1.sh +++ b/test/integration/env_nodeIP1.sh @@ -2,9 +2,7 @@ # This is used for mocking different node IPs in order to test accessModes # This should be like 111.222.333.444 -export X_CSI_ISI_ENDPOINT="1.1.1.1" -export X_CSI_ISI_USER="root" -export X_CSI_ISI_PASSWORD="" +export X_CSI_CLUSTER_NAME="cluster1" export X_CSI_ISI_PATH="/ifs/data/csi/integration" export X_CSI_ISI_PORT="8080" export X_CSI_ISI_QUOTA_ENABLED="false" @@ -18,6 +16,7 @@ export X_CSI_ISI_INSECURE="true" export X_CSI_ISI_AUTOPROBE="false" export X_CSI_ISILON_NO_PROBE_ON_START="true" export X_CSI_MODE="" +export X_CSI_ISILON_CONFIG_PATH=`pwd`/config # Variables for using tests export CSI_ENDPOINT=`pwd`/unix_sock diff --git a/test/integration/env_nodeIP2.sh b/test/integration/env_nodeIP2.sh index 2735256..8ca8a80 100644 --- a/test/integration/env_nodeIP2.sh +++ b/test/integration/env_nodeIP2.sh @@ -2,18 +2,17 @@ # This is used for mocking different node IPs in order to test accessModes # This should be like 111.222.333.444 -export X_CSI_ISI_ENDPOINT="1.1.1.1" -export X_CSI_ISI_USER="root" -export X_CSI_ISI_PASSWORD="" +export X_CSI_CLUSTER_NAME="cluster1" export X_CSI_ISI_PATH="/ifs/data/csi/integration" export X_CSI_ISI_PORT="8080" export X_CSI_ISI_QUOTA_ENABLED="false" -export X_CSI_NODE_NAME="nodename=#=#=nodename.com=#=#=1.1.1.2" +export X_CSI_NODE_NAME="xyz=#=#=xyz.com=#=#=1.1.1.2" export X_CSI_NODE_IP="1.1.1.2" export X_CSI_ISI_INSECURE="true" export X_CSI_ISI_AUTOPROBE="false" export X_CSI_ISILON_NO_PROBE_ON_START="true" export X_CSI_MODE="" +export X_CSI_ISILON_CONFIG_PATH=`pwd`/config # Variables for using tests export CSI_ENDPOINT=`pwd`/unix_sock diff --git a/test/integration/features/main_integration.feature b/test/integration/features/main_integration.feature index 256b7a4..0cc7220 100644 --- a/test/integration/features/main_integration.feature +++ b/test/integration/features/main_integration.feature @@ -45,7 +45,7 @@ Feature: Isilon CSI interface Then there is not a directory "integration0" Then there is not an export "integration0" - @v1.0 + @todo Scenario Outline: ListVolumes with different max entries and starting token Given a Isilon service When I call ListVolumes with max entries starting token @@ -77,11 +77,11 @@ Feature: Isilon CSI interface @v1.0 Scenario: Ephemeral Inline Volume basic and idempotency tests Given a Isilon service - When I call EphemeralNodePublishVolume "datadir0" - And I call EphemeralNodePublishVolume "datadir0" + When I call EphemeralNodePublishVolume "datadir9" + And I call EphemeralNodePublishVolume "datadir9" Then there are no errors - When I call EphemeralNodeUnpublishVolume "datadir0" - And I call EphemeralNodeUnpublishVolume "datadir0" + When I call EphemeralNodeUnpublishVolume "datadir9" + And I call EphemeralNodeUnpublishVolume "datadir9" Then there are no errors @v1.0 @@ -548,3 +548,4 @@ Feature: Isilon CSI interface | numberOfVolumes | | 2 | | 4 | + diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index d0ab0f4..3684776 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -27,10 +27,10 @@ import ( "testing" "time" - "github.com/DATA-DOG/godog" csi "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/cucumber/godog" "github.com/dell/csi-isilon/provider" - "github.com/rexray/gocsi/utils" + "github.com/dell/gocsi/utils" "google.golang.org/grpc" "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -55,8 +55,10 @@ func TestMain(m *testing.M) { if hostFQDN == "unknown" { fmt.Printf("cannot get FQDN") } - nodeName := host + nodeIDSeparator + hostFQDN + nodeIDSeparator + os.Getenv("X_CSI_NODE_IP") - os.Setenv("X_CSI_NODE_NAME", nodeName) + if os.Getenv("X_CSI_CUSTOM_TOPOLOGY_ENABLED") == "true" { + nodeName := host + nodeIDSeparator + hostFQDN + nodeIDSeparator + os.Getenv("X_CSI_NODE_IP") + os.Setenv("X_CSI_NODE_NAME", nodeName) + } // Make the file needed for NodeStage: // /tmp/datadir -- for file system mounts @@ -68,10 +70,12 @@ func TestMain(m *testing.M) { fmt.Printf("'%s': '%s'\n", datadir, err) } + write, err := os.Create("powerscale_integration_test_results.xml") exitVal := godog.RunWithOptions("godog", func(s *godog.Suite) { FeatureContext(s) }, godog.Options{ - Format: "pretty", + Output: write, + Format: "junit", Paths: []string{os.Args[len(os.Args)-2]}, Tags: os.Args[len(os.Args)-1], }) diff --git a/test/integration/run.sh b/test/integration/run.sh index fb14a42..4c48ca7 100644 --- a/test/integration/run.sh +++ b/test/integration/run.sh @@ -11,15 +11,20 @@ function runTest() { echo "Quota is enabled" runTest ./env_Quota_Enabled.sh ./features/main_integration.feature "v1.0" +mv ./Powerscale_integration_test_results.xml Powerscale_integration_test_results_QuotaEnabled.xml echo "Quota is not enabled" runTest ./env_Quota_notEnabled.sh ./features/integration.feature "v1.0" +mv ./Powerscale_integration_test_results.xml Powerscale_integration_test_results_QuotaNotEnabled.xml echo "test accessModes with nodeIP1" runTest ./env_nodeIP1.sh ./features/mock_different_nodeIPs.feature "first_run" +mv ./Powerscale_integration_test_results.xml Powerscale_integration_test_results_AccessModeIP1.xml echo "test accessModes with nodeIP2" runTest ./env_nodeIP2.sh ./features/mock_different_nodeIPs.feature "second_run" +mv ./Powerscale_integration_test_results.xml Powerscale_integration_test_results_AccessModesIP2.xml echo "Custom Topology is enabled" runTest ./env_Custom_Topology_Enabled.sh ./features/integration.feature "v1.0" +mv ./Powerscale_integration_test_results.xml Powerscale_integration_test_results_CustomTopology.xml diff --git a/test/integration/step_defs_test.go b/test/integration/step_defs_test.go index ad1d0dc..b6462c5 100644 --- a/test/integration/step_defs_test.go +++ b/test/integration/step_defs_test.go @@ -17,8 +17,10 @@ package integration_test import ( "context" + "encoding/json" "errors" "fmt" + "io/ioutil" "log" "os" "os/exec" @@ -26,22 +28,24 @@ import ( "strings" "time" - "github.com/dell/csi-isilon/common/utils" - - "github.com/DATA-DOG/godog" csi "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/cucumber/godog" "github.com/dell/csi-isilon/common/constants" + "github.com/dell/csi-isilon/common/utils" + "github.com/dell/csi-isilon/service" isi "github.com/dell/goisilon" apiv1 "github.com/dell/goisilon/api/v1" ) const ( - MaxRetries = 10 - RetrySleepTime = 1 * time.Second - SleepTime = 200 * time.Millisecond - AccessZoneParam = "AccessZone" - ExportPathParam = "Path" - IsiPathParam = "IsiPath" + MaxRetries = 10 + RetrySleepTime = 1 * time.Second + SleepTime = 200 * time.Millisecond + AccessZoneParam = "AccessZone" + ExportPathParam = "Path" + IsiPathParam = "IsiPath" + ClusterNameParam = "ClusterName" + EnvClusterName = "X_CSI_CLUSTER_NAME" ) var ( @@ -77,6 +81,7 @@ type feature struct { vol *csi.Volume isiPath string accssZone string + clusterName string } func (f *feature) addError(err error) { @@ -129,6 +134,9 @@ func (f *feature) aBasicVolumeRequest(name string, size int64) error { parameters := make(map[string]string) parameters[AccessZoneParam] = "csi0zone" parameters[IsiPathParam] = "/ifs/data/csi/integration" + if _, isPresent := os.LookupEnv(EnvClusterName); isPresent { + parameters[ClusterNameParam] = os.Getenv(EnvClusterName) + } req.Parameters = parameters f.createVolumeRequest = req f.isiPath = parameters[IsiPathParam] @@ -138,6 +146,8 @@ func (f *feature) aBasicVolumeRequest(name string, size int64) error { func (f *feature) iCallCreateVolume() error { time.Sleep(RetrySleepTime) + ctx := context.Background() + ctx, _, _ = service.GetRunIDLog(ctx) volResp, err := f.createVolume(f.createVolumeRequest) if err != nil { fmt.Printf("CreateVolume: '%s'\n", err.Error()) @@ -151,7 +161,7 @@ func (f *feature) iCallCreateVolume() error { volResp.GetVolume().VolumeId, volResp.GetVolume().VolumeContext["CreationTime"]) fmt.Printf("The access zone is '%s'\n", volResp.GetVolume().VolumeContext[AccessZoneParam]) f.volID = volResp.GetVolume().VolumeId - f.volName, f.exportID, f.accssZone, err = utils.ParseNormalizedVolumeID(f.volID) + f.volName, f.exportID, f.accssZone, f.clusterName, err = utils.ParseNormalizedVolumeID(ctx, f.volID) f.volNameID[f.volName] = f.volID f.vol = volResp.Volume } @@ -222,14 +232,23 @@ func (f *feature) thereAreNoErrors() error { func createIsilonClient() (*isi.Client, error) { ctx := context.Background() + configBytes, err := ioutil.ReadFile(os.Getenv(constants.EnvIsilonConfigFile)) + if err != nil { + return nil, fmt.Errorf("file ('%s') error: %v", os.Getenv(constants.EnvIsilonConfigFile), err) + } + user, password, IPaddr, err := getDetails(configBytes, os.Getenv(EnvClusterName)) + if err != nil { + return nil, err + } + isiClient, err = isi.NewClientWithArgs( ctx, - "https://"+os.Getenv(constants.EnvEndpoint)+":"+os.Getenv(constants.EnvPort), + "https://"+IPaddr+":"+os.Getenv(constants.EnvPort), true, 1, - os.Getenv(constants.EnvUser), + user, "", - os.Getenv(constants.EnvPassword), + password, os.Getenv(constants.EnvPath)) if err != nil { fmt.Printf("error creating isilon client: '%s'\n", err.Error()) @@ -237,6 +256,29 @@ func createIsilonClient() (*isi.Client, error) { return isiClient, err } +func getDetails(configBytes []byte, clusterName string) (string, string, string, error) { + jsonConfig := new(service.IsilonClusters) + err := json.Unmarshal(configBytes, &jsonConfig) + if err != nil { + return "", "", "", fmt.Errorf("unable to parse isilon clusters' config details [%v]", err) + } + + if len(jsonConfig.IsilonClusters) == 0 { + return "", "", "", errors.New("cluster details are not provided in isilon-creds secret") + } + for _, config := range jsonConfig.IsilonClusters { + if len(clusterName) > 0 { + if config.ClusterName == clusterName { + return config.User, config.Password, config.IsiIP, nil + } + } else if config.IsDefaultCluster { + return config.User, config.Password, config.IsiIP, nil + } + } + err = errors.New("") + return "", "", "", err +} + func (f *feature) thereIsADirectory(name string) error { ctx := context.Background() isiClient, err = createIsilonClient() @@ -485,6 +527,9 @@ func (f *feature) aVolumeRequest(name string, size int64) error { parameters := make(map[string]string) parameters[AccessZoneParam] = "csi0zone" parameters[IsiPathParam] = "/ifs/data/csi/integration" + if _, isPresent := os.LookupEnv(EnvClusterName); isPresent { + parameters[ClusterNameParam] = os.Getenv(EnvClusterName) + } req.Parameters = parameters f.createVolumeRequest = req f.accssZone = parameters[AccessZoneParam] @@ -548,7 +593,7 @@ func (f *feature) checkNodeExistsForOneExport(am *csi.VolumeCapability_AccessMod return err } case csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER: - if len(*export.Clients) == 1 && utils.IsStringInSlice(nodeIP, *export.Clients) && !utils.IsStringInSlice(nodeIP, *export.ReadWriteClients) && !utils.IsStringInSlice(nodeIP, *export.ReadOnlyClients) && !utils.IsStringInSlice(nodeIP, *export.RootClients) { + if len(*export.Clients) == 2 && utils.IsStringInSlice(nodeIP, *export.Clients) && !utils.IsStringInSlice(nodeIP, *export.ReadWriteClients) && !utils.IsStringInSlice(nodeIP, *export.ReadOnlyClients) && !utils.IsStringInSlice(nodeIP, *export.RootClients) { break } else { err := fmt.Errorf("the location of nodeIP '%s' is wrong\n", nodeIP) @@ -564,6 +609,7 @@ func (f *feature) checkNodeExistsForOneExport(am *csi.VolumeCapability_AccessMod func (f *feature) checkIsilonClientExistsForOneExport(nodeIP string, exportID int, accessZone string) error { isiClient, _ = createIsilonClient() ctx := context.Background() + ctx, _, _ = service.GetRunIDLog(ctx) export, _ := isiClient.GetExportByIDWithZone(ctx, exportID, accessZone) if export == nil { panic(fmt.Sprintf("failed to get export by id '%d' and zone '%s'\n", exportID, accessZone)) @@ -572,7 +618,7 @@ func (f *feature) checkIsilonClientExistsForOneExport(nodeIP string, exportID in var req *csi.ControllerPublishVolumeRequest req = f.controllerPublishVolumeRequest am, err = utils.GetAccessMode(req) - _, fqdn, clientIP, _ := utils.ParseNodeID(nodeIP) + _, fqdn, clientIP, _ := utils.ParseNodeID(ctx, nodeIP) // if fqdn exists, check fqdn firstly, then nodeIP if fqdn != "" { err = f.checkNodeExistsForOneExport(am, fqdn, export) @@ -635,11 +681,12 @@ func (f *feature) nodeUnstageVolume(req *csi.NodeUnstageVolumeRequest) error { func (f *feature) checkIsilonClientNotExistsForOneExport(nodeIP string, exportID int, accessZone string) error { isiClient, _ = createIsilonClient() ctx := context.Background() + ctx, _, _ = service.GetRunIDLog(ctx) export, _ := isiClient.GetExportByIDWithZone(ctx, exportID, accessZone) if export == nil { panic(fmt.Sprintf("failed to get export by id '%d' and zone '%s'\n", exportID, accessZone)) } - _, fqdn, clientIP, _ := utils.ParseNodeID(nodeIP) + _, fqdn, clientIP, _ := utils.ParseNodeID(ctx, nodeIP) if fqdn != "" { isNodeIPInClientFields := utils.IsStringInSlices(clientIP, *export.Clients, *export.ReadOnlyClients, *export.ReadWriteClients, *export.RootClients) isNodeFqdnInClientFields := utils.IsStringInSlices(fqdn, *export.Clients, *export.ReadOnlyClients, *export.ReadWriteClients, *export.RootClients) @@ -699,7 +746,7 @@ func (f *feature) getNodePublishVolumeRequest(path string) *csi.NodePublishVolum // For ephemeral volumes capability := new(csi.VolumeCapability) accessMode := new(csi.VolumeCapability_AccessMode) - accessMode.Mode = csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY + accessMode.Mode = csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER capability.AccessMode = accessMode mount := new(csi.VolumeCapability_MountVolume) mount.FsType = "" @@ -1011,6 +1058,7 @@ func (f *feature) iCallCreateVolumeFromVolume(newVolume, srcVolume string, size func (f *feature) createAVolume(req *csi.CreateVolumeRequest, voltype string) error { time.Sleep(SleepTime) ctx := context.Background() + ctx, _, _ = service.GetRunIDLog(ctx) client := csi.NewControllerClient(grpcClient) volResp, err := client.CreateVolume(ctx, req) if err != nil { @@ -1020,7 +1068,7 @@ func (f *feature) createAVolume(req *csi.CreateVolumeRequest, voltype string) er fmt.Printf("CreateVolume from snap %s (%s) %s\n", volResp.GetVolume().VolumeContext["Name"], volResp.GetVolume().VolumeId, volResp.GetVolume().VolumeContext["CreationTime"]) f.volID = volResp.GetVolume().VolumeId - f.volName, f.exportID, f.accssZone, err = utils.ParseNormalizedVolumeID(f.volID) + f.volName, f.exportID, f.accssZone, f.clusterName, err = utils.ParseNormalizedVolumeID(ctx, f.volID) f.volNameID[f.volName] = f.volID f.vol = volResp.Volume } @@ -1079,6 +1127,9 @@ func (f *feature) getMountVolumeRequest(name string) *csi.CreateVolumeRequest { parameters := make(map[string]string) parameters[AccessZoneParam] = "csi0zone" parameters[IsiPathParam] = "/ifs/data/csi/integration" + if _, isPresent := os.LookupEnv(EnvClusterName); isPresent { + parameters[ClusterNameParam] = os.Getenv(EnvClusterName) + } req.Parameters = parameters f.createVolumeRequest = req f.accssZone = parameters[AccessZoneParam] @@ -1087,6 +1138,8 @@ func (f *feature) getMountVolumeRequest(name string) *csi.CreateVolumeRequest { } func (f *feature) iCreateVolumesInParallel(nVols int) error { + ctx := context.Background() + ctx, _, _ = service.GetRunIDLog(ctx) idchan := make(chan string, nVols) errchan := make(chan error, nVols) t0 := time.Now() @@ -1116,8 +1169,9 @@ func (f *feature) iCreateVolumesInParallel(nVols int) error { var err error id = <-idchan if id != "" { - f.volName, f.exportID, f.accssZone, err = utils.ParseNormalizedVolumeID(id) + f.volName, f.exportID, f.accssZone, f.clusterName, err = utils.ParseNormalizedVolumeID(ctx, id) f.volNameID[f.volName] = id + f.vol = f.volIDContext[id] } err = <-errchan if err != nil { @@ -1241,10 +1295,12 @@ func (f *feature) iNodeStageVolumesInParallel(nVols int) error { } func (f *feature) checkIsilonClientsExist(nVols int) error { + ctx := context.Background() + ctx, _, _ = service.GetRunIDLog(ctx) for i := 0; i < nVols; i++ { volName := fmt.Sprintf("scale%d", i) volId := f.volNameID[volName] - _, exportID, accessZone, _ := utils.ParseNormalizedVolumeID(volId) + _, exportID, accessZone, _, _ := utils.ParseNormalizedVolumeID(ctx, volId) var nodeIP = os.Getenv("X_CSI_NODE_NAME") err := f.checkIsilonClientExistsForOneExport(nodeIP, exportID, accessZone) if err != nil { @@ -1255,10 +1311,12 @@ func (f *feature) checkIsilonClientsExist(nVols int) error { } func (f *feature) checkIsilonClientsNotExist(nVols int) error { + ctx := context.Background() + ctx, _, _ = service.GetRunIDLog(ctx) for i := 0; i < nVols; i++ { volName := fmt.Sprintf("scale%d", i) volID := f.volNameID[volName] - _, exportID, accessZone, _ := utils.ParseNormalizedVolumeID(volID) + _, exportID, accessZone, _, _ := utils.ParseNormalizedVolumeID(ctx, volID) var nodeIP = os.Getenv("X_CSI_NODE_NAME") err := f.checkIsilonClientNotExistsForOneExport(nodeIP, exportID, accessZone) if err != nil { diff --git a/test/sample_files/inlinevolume.yaml b/test/sample_files/inlinevolume.yaml index 094c960..39c499b 100644 --- a/test/sample_files/inlinevolume.yaml +++ b/test/sample_files/inlinevolume.yaml @@ -16,3 +16,4 @@ spec: driver: csi-isilon.dellemc.com volumeAttributes: size: "2Gi" + ClusterName: "cluster1"