From 774f5dabefa0a8b647b33da9b9ef42ee972b3022 Mon Sep 17 00:00:00 2001 From: David Ko Date: Mon, 12 Dec 2022 14:43:09 +0800 Subject: [PATCH] fix: typos Signed-off-by: David Ko --- .codespellignore | 1 + .github/workflows/codespell.yml | 2 +- api/backup.go | 2 +- api/orphan.go | 2 +- controller/backup_controller.go | 2 +- controller/engine_controller.go | 8 ++-- controller/kubernetes_pod_controller.go | 10 ++--- controller/monitor/snapshot_monitor.go | 4 +- controller/node_controller.go | 2 +- controller/node_controller_test.go | 2 +- controller/orphan_controller.go | 2 +- controller/system_backup_controller.go | 4 +- controller/system_rollout_controller.go | 2 +- controller/volume_controller.go | 8 ++-- controller/volume_controller_test.go | 20 ++++----- csi/controller_server.go | 2 +- csi/node_server.go | 2 +- datastore/kubernetes.go | 45 ++++++++++--------- datastore/longhorn.go | 2 +- engineapi/backup_monitor.go | 2 +- k8s/pkg/apis/longhorn/v1beta1/backingimage.go | 2 +- k8s/pkg/apis/longhorn/v1beta1/backuptarget.go | 2 +- k8s/pkg/apis/longhorn/v1beta1/engineimage.go | 2 +- k8s/pkg/apis/longhorn/v1beta1/node.go | 2 +- k8s/pkg/apis/longhorn/v1beta1/volume.go | 5 ++- types/setting.go | 2 +- webhook/admission/README.md | 2 +- webhook/conversion/conversion.go | 6 +-- webhook/resources/snapshot/mutator.go | 2 +- webhook/resources/snapshot/validator.go | 6 +-- webhook/resources/volume/validator.go | 2 +- 31 files changed, 80 insertions(+), 77 deletions(-) diff --git a/.codespellignore b/.codespellignore index 91c974b556..5ee08d6579 100644 --- a/.codespellignore +++ b/.codespellignore @@ -3,3 +3,4 @@ ec2 eks gce gcp +atleast diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml index 327a9666a8..d5abd9659d 100644 --- a/.github/workflows/codespell.yml +++ b/.github/workflows/codespell.yml @@ -20,4 +20,4 @@ jobs: with: check_filenames: true ignore_words_file: .codespellignore - skip: "*/**.yaml,*/**.yml,./scripts,./vendor,MAINTAINERS,LICENSE,go.mod,go.sum " + skip: "*/**.yaml,*/**.yml,./scripts,./vendor,MAINTAINERS,LICENSE,go.mod,go.sum" diff --git a/api/backup.go b/api/backup.go index e3c9fb466c..7e22ee7f63 100644 --- a/api/backup.go +++ b/api/backup.go @@ -56,7 +56,7 @@ func (s *Server) BackupVolumeGet(w http.ResponseWriter, req *http.Request) error func (s *Server) BackupVolumeDelete(w http.ResponseWriter, req *http.Request) error { volName := mux.Vars(req)["volName"] if err := s.m.DeleteBackupVolume(volName); err != nil { - return errors.Wrapf(err, "failed to delet backup volume '%s'", volName) + return errors.Wrapf(err, "failed to delete backup volume '%s'", volName) } return nil } diff --git a/api/orphan.go b/api/orphan.go index aaa7207411..c55eb2b218 100644 --- a/api/orphan.go +++ b/api/orphan.go @@ -24,7 +24,7 @@ func (s *Server) OrphanList(rw http.ResponseWriter, req *http.Request) (err erro func (s *Server) orphanList(apiContext *api.ApiContext) (*client.GenericCollection, error) { list, err := s.m.ListOrphans() if err != nil { - return nil, errors.Wrap(err, "error listing orhpan") + return nil, errors.Wrap(err, "error listing orphan") } return toOrphanCollection(list), nil } diff --git a/controller/backup_controller.go b/controller/backup_controller.go index 799311a3e8..fe77df4e6c 100644 --- a/controller/backup_controller.go +++ b/controller/backup_controller.go @@ -394,7 +394,7 @@ func (bc *BackupController) reconcile(backupName string) (err error) { } // Remove the Backup Volume recurring jobs/groups information. - // Only record the lastest recurring jobs/groups information in backup volume CR and volume.cfg on remote backup target. + // Only record the latest recurring jobs/groups information in backup volume CR and volume.cfg on remote backup target. delete(backupInfo.Labels, types.VolumeRecurringJobInfoLabel) // Update Backup CR status diff --git a/controller/engine_controller.go b/controller/engine_controller.go index 4f2ac11181..b8b39b3f76 100644 --- a/controller/engine_controller.go +++ b/controller/engine_controller.go @@ -994,8 +994,8 @@ func (m *EngineMonitor) refresh(engine *longhorn.Engine) error { return nil } - m.aquireRestoringCounter(true) - defer m.aquireRestoringCounter(false) + m.acquireRestoringCounter(true) + defer m.acquireRestoringCounter(false) } if err = m.restoreBackup(engine, rsMap, cliAPIVersion, engineClientProxy); err != nil { @@ -1026,8 +1026,8 @@ func (m *EngineMonitor) refresh(engine *longhorn.Engine) error { return nil } -func (m *EngineMonitor) aquireRestoringCounter(aquire bool) { - if !aquire { +func (m *EngineMonitor) acquireRestoringCounter(acquire bool) { + if !acquire { m.restoringCounter.DecreaseCount() return } diff --git a/controller/kubernetes_pod_controller.go b/controller/kubernetes_pod_controller.go index dd0d2d3858..c97908e40d 100644 --- a/controller/kubernetes_pod_controller.go +++ b/controller/kubernetes_pod_controller.go @@ -202,11 +202,11 @@ func (kc *KubernetesPodController) handlePodDeletionIfNodeDown(pod *v1.Pod, node // make sure the volumeattachments of the pods are gone first // ref: https://github.com/longhorn/longhorn/issues/2947 - vas, err := kc.getVolumeAttachmentsOfPod(pod) + volumeAttachments, err := kc.getVolumeAttachmentsOfPod(pod) if err != nil { return err } - for _, va := range vas { + for _, va := range volumeAttachments { if va.DeletionTimestamp == nil { err := kc.kubeClient.StorageV1().VolumeAttachments().Delete(context.TODO(), va.Name, metav1.DeleteOptions{}) if err != nil { @@ -239,8 +239,8 @@ func (kc *KubernetesPodController) handlePodDeletionIfNodeDown(pod *v1.Pod, node } func (kc *KubernetesPodController) getVolumeAttachmentsOfPod(pod *v1.Pod) ([]*storagev1.VolumeAttachment, error) { - res := []*storagev1.VolumeAttachment{} - vas, err := kc.ds.ListVolumeAttachmentsRO() + var res []*storagev1.VolumeAttachment + volumeAttachments, err := kc.ds.ListVolumeAttachmentsRO() if err != nil { return nil, err } @@ -262,7 +262,7 @@ func (kc *KubernetesPodController) getVolumeAttachmentsOfPod(pod *v1.Pod) ([]*st pvs[pvc.Spec.VolumeName] = true } - for _, va := range vas { + for _, va := range volumeAttachments { if va.Spec.NodeName != pod.Spec.NodeName { continue } diff --git a/controller/monitor/snapshot_monitor.go b/controller/monitor/snapshot_monitor.go index 731a4e7ce3..fb3b1dd2e7 100644 --- a/controller/monitor/snapshot_monitor.go +++ b/controller/monitor/snapshot_monitor.go @@ -528,8 +528,8 @@ func determineChecksumFromHashStatus(log logrus.FieldLogger, snapshotName, exist return "", fmt.Errorf(prefixChecksumDetermineFailure+"since snapshot disk files are silently corrupted", snapshotName) } - // The checksums from replicas might be different than previous values because of purge, trim, corruption and etc. - // So, the vote mechanism is alway executed to get the latest checksum and then update the status.checksum. + // The checksums from replicas might be different from previous values because of purge, trim, corruption and etc. + // So, the vote mechanism is always executed to get the latest checksum and then update the status.checksum. // If the checksum cannot be determined by the ones from replicas, the existingChecksum (snapshot.status.checksum) will // help to determine the final checksum. found, checksum, maxVotes := determineChecksum(checksums) diff --git a/controller/node_controller.go b/controller/node_controller.go index 50625c3d8f..1aabf95e54 100644 --- a/controller/node_controller.go +++ b/controller/node_controller.go @@ -410,7 +410,7 @@ func (nc *NodeController) syncNode(key string) (err error) { nc.eventRecorder, node, v1.EventTypeWarning) } - // sync node state with kuberentes node status + // sync node state with kubernetes node status kubeNode, err := nc.ds.GetKubernetesNode(name) if err != nil { // if kubernetes node has been removed from cluster diff --git a/controller/node_controller_test.go b/controller/node_controller_test.go index e70179a906..42138fcfd5 100644 --- a/controller/node_controller_test.go +++ b/controller/node_controller_test.go @@ -700,7 +700,7 @@ func (s *TestSuite) TestSyncNode(c *C) { err = sIndexer.Add(imImageSetting) c.Assert(err, IsNil) - // create kuberentes node + // create kubernetes node for _, kubeNode := range tc.kubeNodes { n, err := kubeClient.CoreV1().Nodes().Create(context.TODO(), kubeNode, metav1.CreateOptions{}) c.Assert(err, IsNil) diff --git a/controller/orphan_controller.go b/controller/orphan_controller.go index 8c3a843df4..ede702349c 100644 --- a/controller/orphan_controller.go +++ b/controller/orphan_controller.go @@ -278,7 +278,7 @@ func (oc *OrphanController) cleanupOrphanedData(orphan *longhorn.Orphan) (err er // Make sure if the orphan nodeID and controller ID are same. // If NO, just delete the orphan resource object and don't touch the data. if orphan.Spec.NodeID != oc.controllerID { - log.Infof("Orphan nodeID %v is different from controllerID %v, so just delete the orphan resouce object", + log.Infof("Orphan nodeID %v is different from controllerID %v, so just delete the orphan resource object", orphan.Name, oc.controllerID) return nil } diff --git a/controller/system_backup_controller.go b/controller/system_backup_controller.go index 9d076936a5..c31fed6dfe 100644 --- a/controller/system_backup_controller.go +++ b/controller/system_backup_controller.go @@ -498,7 +498,7 @@ func (c *SystemBackupController) UploadSystemBackup(systemBackup *longhorn.Syste var recordErr error existingSystemBackup := systemBackup.DeepCopy() - // Handle the CR status update here because this method is called by a seperate goroutine. + // Handle the CR status update here because this method is called by a separate goroutine. defer func() { record := &systemBackupRecord{} if recordErr != nil { @@ -579,7 +579,7 @@ func (c *SystemBackupController) cleanupRemoteSystemBackupFiles(systemBackup *lo systemBackupsFromBackupTarget, err := backupTargetClient.ListSystemBackup() if err != nil { - return errors.Wrapf(err, "failed to list system backups in bakup target %v", backupTargetSetting.Value) + return errors.Wrapf(err, "failed to list system backups in backup target %v", backupTargetSetting.Value) } if _, exist := systemBackupsFromBackupTarget[systembackupstore.Name(systemBackup.Name)]; !exist { diff --git a/controller/system_rollout_controller.go b/controller/system_rollout_controller.go index 9de342969f..1912532474 100644 --- a/controller/system_rollout_controller.go +++ b/controller/system_rollout_controller.go @@ -679,7 +679,7 @@ func (c *SystemRolloutController) Unpack(log logrus.FieldLogger) error { } if err := c.cacheAPIExtensionsResources(); err != nil { - return errors.Wrap(err, "failed to extract API Extentions resources") + return errors.Wrap(err, "failed to extract API Extensions resources") } if err := c.cacheLonghornResources(); err != nil { diff --git a/controller/volume_controller.go b/controller/volume_controller.go index 6a035d36c4..b77fa46b62 100644 --- a/controller/volume_controller.go +++ b/controller/volume_controller.go @@ -3518,11 +3518,11 @@ func (vc *VolumeController) processMigration(v *longhorn.Volume, es map[string]* log = log.WithField("migrationEngine", migrationEngine.Name) - allReady := false - if allReady, revertRequired, err = vc.prepareReplicasAndEngineForMigration(v, currentEngine, migrationEngine, rs); err != nil { + ready := false + if ready, revertRequired, err = vc.prepareReplicasAndEngineForMigration(v, currentEngine, migrationEngine, rs); err != nil { return err } - if !allReady || revertRequired { + if !ready || revertRequired { return nil } @@ -3530,7 +3530,7 @@ func (vc *VolumeController) processMigration(v *longhorn.Volume, es map[string]* return nil } -func (vc *VolumeController) prepareReplicasAndEngineForMigration(v *longhorn.Volume, currentEngine, migrationEngine *longhorn.Engine, rs map[string]*longhorn.Replica) (allReady, revertRequired bool, err error) { +func (vc *VolumeController) prepareReplicasAndEngineForMigration(v *longhorn.Volume, currentEngine, migrationEngine *longhorn.Engine, rs map[string]*longhorn.Replica) (ready, revertRequired bool, err error) { log := getLoggerForVolume(vc.logger, v).WithFields(logrus.Fields{"migrationNodeID": v.Spec.MigrationNodeID, "migrationEngine": migrationEngine.Name}) // Check the migration engine current status diff --git a/controller/volume_controller_test.go b/controller/volume_controller_test.go index 4f994f602a..4e9f89ad73 100644 --- a/controller/volume_controller_test.go +++ b/controller/volume_controller_test.go @@ -831,12 +831,12 @@ func (s *TestSuite) TestVolumeLifeCycle(c *C) { tc.expectVolume.Status.State = longhorn.VolumeStateAttaching tc.expectVolume.Status.CurrentImage = tc.volume.Spec.EngineImage tc.expectVolume.Status.CurrentNodeID = tc.volume.Spec.NodeID - expectEs := map[string]*longhorn.Engine{} + expectEngines := map[string]*longhorn.Engine{} for _, e := range tc.expectEngines { e.Spec.RevisionCounterDisabled = true - expectEs[e.Name] = e + expectEngines[e.Name] = e } - tc.expectEngines = expectEs + tc.expectEngines = expectEngines expectRs := map[string]*longhorn.Replica{} for _, r := range tc.expectReplicas { r.Spec.DesireState = longhorn.InstanceStateRunning @@ -871,12 +871,12 @@ func (s *TestSuite) TestVolumeLifeCycle(c *C) { tc.copyCurrentToExpect() - expectEs = map[string]*longhorn.Engine{} + expectEngines = map[string]*longhorn.Engine{} for _, e := range tc.expectEngines { e.Spec.SalvageRequested = true - expectEs[e.Name] = e + expectEngines[e.Name] = e } - tc.expectEngines = expectEs + tc.expectEngines = expectEngines expectRs = map[string]*longhorn.Replica{} for _, r := range tc.expectReplicas { @@ -1484,12 +1484,12 @@ func (s *TestSuite) runTestCases(c *C, testCases map[string]*VolumeTestCase) { for _, retE := range retEs.Items { if tc.engines == nil { // test creation, name would be different - var expectE *longhorn.Engine - for _, expectE = range tc.expectEngines { + var expectEngine *longhorn.Engine + for _, expectEngine = range tc.expectEngines { break } - c.Assert(retE.Spec, DeepEquals, expectE.Spec) - c.Assert(retE.Status, DeepEquals, expectE.Status) + c.Assert(retE.Spec, DeepEquals, expectEngine.Spec) + c.Assert(retE.Status, DeepEquals, expectEngine.Status) } else { c.Assert(retE.Spec, DeepEquals, tc.expectEngines[retE.Name].Spec) c.Assert(retE.Status, DeepEquals, tc.expectEngines[retE.Name].Status) diff --git a/csi/controller_server.go b/csi/controller_server.go index 8ee385998b..060e91488a 100644 --- a/csi/controller_server.go +++ b/csi/controller_server.go @@ -401,7 +401,7 @@ func (cs *ControllerServer) ControllerPublishVolume(ctx context.Context, req *cs return nil, status.Error(codes.InvalidArgument, "volume capability missing in request") } - // TODO: #1875 API returns error instead of not found, so we cannot differenciate between a retrieval failure and non existing resource + // TODO: #1875 API returns error instead of not found, so we cannot differentiate between a retrieval failure and non existing resource if _, err := cs.apiClient.Node.ById(nodeID); err != nil { return nil, status.Errorf(codes.NotFound, "node %s not found", nodeID) } diff --git a/csi/node_server.go b/csi/node_server.go index 4d110404ee..c2c832479c 100644 --- a/csi/node_server.go +++ b/csi/node_server.go @@ -454,7 +454,7 @@ func (ns *NodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol logrus.WithError(err).Errorf("mounted volume %v on node %v failed required filesystem resize", volumeID, ns.nodeID) return nil, status.Error(codes.Internal, err.Error()) } else if resized { - logrus.Infof("mounted volume %v on node %v succesfully resized filesystem after mount", volumeID, ns.nodeID) + logrus.Infof("mounted volume %v on node %v successfully resized filesystem after mount", volumeID, ns.nodeID) } else { logrus.Debugf("mounted volume %v on node %v already has correct filesystem size", volumeID, ns.nodeID) } diff --git a/datastore/kubernetes.go b/datastore/kubernetes.go index d39e8aaee9..aba90c9d15 100644 --- a/datastore/kubernetes.go +++ b/datastore/kubernetes.go @@ -411,7 +411,7 @@ func (s *DataStore) ListInstanceManagerPods() ([]*corev1.Pod, error) { return s.ListPodsBySelector(selector) } -// ListInstanceManagerPodsBy returns a list of instance manager pods that fullfill the below conditions +// ListInstanceManagerPodsBy returns a list of instance manager pods that fulfill the below conditions func (s *DataStore) ListInstanceManagerPodsBy(node string, image string, imType longhorn.InstanceManagerType) ([]*corev1.Pod, error) { selector, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{ MatchLabels: types.GetInstanceManagerLabels(node, image, imType), @@ -464,7 +464,7 @@ func (s *DataStore) GetSupportBundleManagerPod(supportBundle *longhorn.SupportBu if count := len(supportBundleManagerPods); count == 0 { return nil, fmt.Errorf("cannot find support bundle manager pod") } else if count > 1 { - return nil, fmt.Errorf("found unexpect number of %v support bundle manager pod", count) + return nil, fmt.Errorf("found unexpected number of %v support bundle manager pod", count) } return supportBundleManagerPods[0], nil @@ -770,26 +770,27 @@ func NewPVCManifest(size int64, pvName, ns, pvcName, storageClassName string, ac // apiVersion: v1 // kind: Pod // metadata: -// annotations: -// k8s.v1.cni.cncf.io/network-status: |- -// [{ -// "name": "cbr0", -// "interface": "eth0", -// "ips": [ -// "10.42.0.175" -// ], -// "mac": "be:67:b2:19:17:84", -// "default": true, -// "dns": {} -// },{ -// "name": "kube-system/demo-192-168-0-0", -// "interface": "lhnet1", -// "ips": [ -// "192.168.1.175" -// ], -// "mac": "02:59:e5:d4:ae:ea", -// "dns": {} -// }] +// +// annotations: +// k8s.v1.cni.cncf.io/network-status: |- +// [{ +// "name": "cbr0", +// "interface": "eth0", +// "ips": [ +// "10.42.0.175" +// ], +// "mac": "be:67:b2:19:17:84", +// "default": true, +// "dns": {} +// },{ +// "name": "kube-system/demo-192-168-0-0", +// "interface": "lhnet1", +// "ips": [ +// "192.168.1.175" +// ], +// "mac": "02:59:e5:d4:ae:ea", +// "dns": {} +// }] func (s *DataStore) GetStorageIPFromPod(pod *corev1.Pod) string { storageNetwork, err := s.GetSetting(types.SettingNameStorageNetwork) if err != nil { diff --git a/datastore/longhorn.go b/datastore/longhorn.go index a9abaa8a41..eff740214f 100644 --- a/datastore/longhorn.go +++ b/datastore/longhorn.go @@ -457,7 +457,7 @@ func (s *DataStore) ListSettings() (map[types.SettingName]*longhorn.Setting, err for _, itemRO := range list { // Cannot use cached object from lister settingField := types.SettingName(itemRO.Name) - // Ignore the items that we don't recongize + // Ignore the items that we don't recognize if _, ok := types.GetSettingDefinition(settingField); ok { itemMap[settingField] = itemRO.DeepCopy() } diff --git a/engineapi/backup_monitor.go b/engineapi/backup_monitor.go index cd19450fbd..602a7ebeec 100644 --- a/engineapi/backup_monitor.go +++ b/engineapi/backup_monitor.go @@ -244,7 +244,7 @@ func (m *BackupMonitor) exponentialBackOffTimer() bool { // Keep in exponential backoff timer case <-ctx.Done(): // Give it the last try to prevent if the snapshot backup succeed between - // the last trigged backoff time and the max retry period + // the last triggered backoff time and the max retry period currentBackupStatus, err := m.syncBackupStatusFromEngineReplica() if err == nil { m.logger.Info("Change to liner timer to monitor it") diff --git a/k8s/pkg/apis/longhorn/v1beta1/backingimage.go b/k8s/pkg/apis/longhorn/v1beta1/backingimage.go index 61eedaef68..eb86716177 100644 --- a/k8s/pkg/apis/longhorn/v1beta1/backingimage.go +++ b/k8s/pkg/apis/longhorn/v1beta1/backingimage.go @@ -88,7 +88,7 @@ type BackingImageList struct { Items []BackingImage `json:"items"` } -// ConvertTo converts from spoke verion (v1beta1) to hub version (v1beta2) +// ConvertTo converts from spoke version (v1beta1) to hub version (v1beta2) func (bi *BackingImage) ConvertTo(dst conversion.Hub) error { switch t := dst.(type) { case *v1beta2.BackingImage: diff --git a/k8s/pkg/apis/longhorn/v1beta1/backuptarget.go b/k8s/pkg/apis/longhorn/v1beta1/backuptarget.go index 7c30136520..25cc457599 100644 --- a/k8s/pkg/apis/longhorn/v1beta1/backuptarget.go +++ b/k8s/pkg/apis/longhorn/v1beta1/backuptarget.go @@ -73,7 +73,7 @@ type BackupTargetList struct { Items []BackupTarget `json:"items"` } -// ConvertTo converts from spoke verion (v1beta1) to hub version (v1beta2) +// ConvertTo converts from spoke version (v1beta1) to hub version (v1beta2) func (bt *BackupTarget) ConvertTo(dst conversion.Hub) error { switch t := dst.(type) { case *v1beta2.BackupTarget: diff --git a/k8s/pkg/apis/longhorn/v1beta1/engineimage.go b/k8s/pkg/apis/longhorn/v1beta1/engineimage.go index f0c285eaf8..766c9aeea4 100644 --- a/k8s/pkg/apis/longhorn/v1beta1/engineimage.go +++ b/k8s/pkg/apis/longhorn/v1beta1/engineimage.go @@ -89,7 +89,7 @@ type EngineImageList struct { Items []EngineImage `json:"items"` } -// ConvertTo converts from spoke verion (v1beta1) to hub version (v1beta2) +// ConvertTo converts from spoke version (v1beta1) to hub version (v1beta2) func (ei *EngineImage) ConvertTo(dst conversion.Hub) error { switch t := dst.(type) { case *v1beta2.EngineImage: diff --git a/k8s/pkg/apis/longhorn/v1beta1/node.go b/k8s/pkg/apis/longhorn/v1beta1/node.go index 3a3b33d0af..738c1909b1 100644 --- a/k8s/pkg/apis/longhorn/v1beta1/node.go +++ b/k8s/pkg/apis/longhorn/v1beta1/node.go @@ -107,7 +107,7 @@ type NodeList struct { Items []Node `json:"items"` } -// ConvertTo converts from spoke verion (v1beta1) to hub version (v1beta2) +// ConvertTo converts from spoke version (v1beta1) to hub version (v1beta2) func (n *Node) ConvertTo(dst conversion.Hub) error { switch t := dst.(type) { case *v1beta2.Node: diff --git a/k8s/pkg/apis/longhorn/v1beta1/volume.go b/k8s/pkg/apis/longhorn/v1beta1/volume.go index ec24724a67..c68743b25e 100644 --- a/k8s/pkg/apis/longhorn/v1beta1/volume.go +++ b/k8s/pkg/apis/longhorn/v1beta1/volume.go @@ -103,7 +103,8 @@ const ( // VolumeRecurringJobSpec is a deprecated struct. // TODO: Should be removed when recurringJobs gets removed from the volume -// spec. +// +// spec. type VolumeRecurringJobSpec struct { Name string `json:"name"` Groups []string `json:"groups,omitempty"` @@ -226,7 +227,7 @@ type VolumeList struct { Items []Volume `json:"items"` } -// ConvertTo converts from spoke verion (v1beta1) to hub version (v1beta2) +// ConvertTo converts from spoke version (v1beta1) to hub version (v1beta2) func (v *Volume) ConvertTo(dst conversion.Hub) error { switch t := dst.(type) { case *v1beta2.Volume: diff --git a/types/setting.go b/types/setting.go index f4efd374dd..7d60771970 100644 --- a/types/setting.go +++ b/types/setting.go @@ -1379,7 +1379,7 @@ func validateAndUnmarshalLabel(label string) (key, value string, err error) { label = strings.Trim(label, " ") parts := strings.Split(label, ":") if len(parts) != 2 { - return "", "", fmt.Errorf("invalid label %v: should contain the seprator ':'", label) + return "", "", fmt.Errorf("invalid label %v: should contain the separator ':'", label) } return strings.Trim(parts[0], " "), strings.Trim(parts[1], " "), nil } diff --git a/webhook/admission/README.md b/webhook/admission/README.md index 24e488df6b..810da142f2 100644 --- a/webhook/admission/README.md +++ b/webhook/admission/README.md @@ -8,7 +8,7 @@ folders at the host as usual. Most of CRDs in the uninstallation are deleted without checks. Thus, the validation logics for DELETE operations should be added carefully to prevent from the -unstallation failure. +uninstallation failure. Reference: https://github.com/longhorn/longhorn-manager/pull/1279 diff --git a/webhook/conversion/conversion.go b/webhook/conversion/conversion.go index 439afa2824..42383eb1ca 100644 --- a/webhook/conversion/conversion.go +++ b/webhook/conversion/conversion.go @@ -118,7 +118,7 @@ func (h *Handler) convertObject(src, dst runtime.Object) error { return fmt.Errorf("%T is not convertible", src) } - // neigher src nor dst are Hub, means both of them are spoke, so lets get the hub + // neither src nor dst are Hub, means both of them are spoke, so lets get the hub // version type. hub, err := getHub(h.scheme, src) if err != nil { @@ -146,7 +146,7 @@ func (h *Handler) convertObject(src, dst runtime.Object) error { func getHub(scheme *runtime.Scheme, obj runtime.Object) (conversion.Hub, error) { gvks, _, err := scheme.ObjectKinds(obj) if err != nil { - return nil, fmt.Errorf("error retriving object kinds for given object : %v", err) + return nil, fmt.Errorf("failed to retrieve object kinds for given object : %v", err) } var hub conversion.Hub @@ -203,7 +203,7 @@ func statusErrorWithMessage(msg string, params ...interface{}) metav1.Status { } } -// statusSucceed is a helper function to createa an metav1 success status +// statusSucceed is a helper function to create a metav1 success status func statusSucceed() metav1.Status { return metav1.Status{Status: metav1.StatusSuccess} } diff --git a/webhook/resources/snapshot/mutator.go b/webhook/resources/snapshot/mutator.go index cfad7cde0d..26a0c28a0f 100644 --- a/webhook/resources/snapshot/mutator.go +++ b/webhook/resources/snapshot/mutator.go @@ -45,7 +45,7 @@ func (s *snapShotMutator) Create(request *admission.Request, newObj runtime.Obje snapshot, ok := newObj.(*longhorn.Snapshot) if !ok { - return nil, werror.NewInvalidError(fmt.Sprintf("%v is not a *longhorn.Snaphot", newObj), "") + return nil, werror.NewInvalidError(fmt.Sprintf("%v is not a *longhorn.Snapshot", newObj), "") } volume, err := s.ds.GetVolumeRO(snapshot.Spec.Volume) diff --git a/webhook/resources/snapshot/validator.go b/webhook/resources/snapshot/validator.go index 6491c14d7c..65112fd424 100644 --- a/webhook/resources/snapshot/validator.go +++ b/webhook/resources/snapshot/validator.go @@ -38,7 +38,7 @@ func (o *snapshotValidator) Resource() admission.Resource { func (o *snapshotValidator) Create(request *admission.Request, newObj runtime.Object) error { _, ok := newObj.(*longhorn.Snapshot) if !ok { - return werror.NewInvalidError(fmt.Sprintf("%v is not a *longhorn.Snaphot", newObj), "") + return werror.NewInvalidError(fmt.Sprintf("%v is not a *longhorn.Snapshot", newObj), "") } return nil @@ -47,11 +47,11 @@ func (o *snapshotValidator) Create(request *admission.Request, newObj runtime.Ob func (o *snapshotValidator) Update(request *admission.Request, oldObj runtime.Object, newObj runtime.Object) error { oldSnapshot, ok := oldObj.(*longhorn.Snapshot) if !ok { - return werror.NewInvalidError(fmt.Sprintf("%v is not a *longhorn.Snaphot", oldObj), "") + return werror.NewInvalidError(fmt.Sprintf("%v is not a *longhorn.Snapshot", oldObj), "") } newSnapshot, ok := newObj.(*longhorn.Snapshot) if !ok { - return werror.NewInvalidError(fmt.Sprintf("%v is not a *longhorn.Snaphot", newObj), "") + return werror.NewInvalidError(fmt.Sprintf("%v is not a *longhorn.Snapshot", newObj), "") } if newSnapshot.Spec.Volume != oldSnapshot.Spec.Volume { diff --git a/webhook/resources/volume/validator.go b/webhook/resources/volume/validator.go index 4532915a4d..dd3784940a 100644 --- a/webhook/resources/volume/validator.go +++ b/webhook/resources/volume/validator.go @@ -200,7 +200,7 @@ func validateReplicaCount(dataLocality longhorn.DataLocality, replicaCount int) } if dataLocality == longhorn.DataLocalityStrictLocal { if replicaCount != 1 { - return werror.NewInvalidError(fmt.Sprintf("number of replica count should be 1 whe data locality is %v", longhorn.DataLocalityStrictLocal), "") + return werror.NewInvalidError(fmt.Sprintf("number of replica count should be 1 when data locality is %v", longhorn.DataLocalityStrictLocal), "") } } return nil