Skip to content

Commit

Permalink
fix: typos
Browse files Browse the repository at this point in the history
Signed-off-by: David Ko <[email protected]>
  • Loading branch information
innobead committed Dec 12, 2022
1 parent 90d218d commit 774f5da
Show file tree
Hide file tree
Showing 31 changed files with 80 additions and 77 deletions.
1 change: 1 addition & 0 deletions .codespellignore
Original file line number Diff line number Diff line change
Expand Up @@ -3,3 +3,4 @@ ec2
eks
gce
gcp
atleast
2 changes: 1 addition & 1 deletion .github/workflows/codespell.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,4 +20,4 @@ jobs:
with:
check_filenames: true
ignore_words_file: .codespellignore
skip: "*/**.yaml,*/**.yml,./scripts,./vendor,MAINTAINERS,LICENSE,go.mod,go.sum "
skip: "*/**.yaml,*/**.yml,./scripts,./vendor,MAINTAINERS,LICENSE,go.mod,go.sum"
2 changes: 1 addition & 1 deletion api/backup.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ func (s *Server) BackupVolumeGet(w http.ResponseWriter, req *http.Request) error
func (s *Server) BackupVolumeDelete(w http.ResponseWriter, req *http.Request) error {
volName := mux.Vars(req)["volName"]
if err := s.m.DeleteBackupVolume(volName); err != nil {
return errors.Wrapf(err, "failed to delet backup volume '%s'", volName)
return errors.Wrapf(err, "failed to delete backup volume '%s'", volName)
}
return nil
}
Expand Down
2 changes: 1 addition & 1 deletion api/orphan.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ func (s *Server) OrphanList(rw http.ResponseWriter, req *http.Request) (err erro
func (s *Server) orphanList(apiContext *api.ApiContext) (*client.GenericCollection, error) {
list, err := s.m.ListOrphans()
if err != nil {
return nil, errors.Wrap(err, "error listing orhpan")
return nil, errors.Wrap(err, "error listing orphan")
}
return toOrphanCollection(list), nil
}
Expand Down
2 changes: 1 addition & 1 deletion controller/backup_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -394,7 +394,7 @@ func (bc *BackupController) reconcile(backupName string) (err error) {
}

// Remove the Backup Volume recurring jobs/groups information.
// Only record the lastest recurring jobs/groups information in backup volume CR and volume.cfg on remote backup target.
// Only record the latest recurring jobs/groups information in backup volume CR and volume.cfg on remote backup target.
delete(backupInfo.Labels, types.VolumeRecurringJobInfoLabel)

// Update Backup CR status
Expand Down
8 changes: 4 additions & 4 deletions controller/engine_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -994,8 +994,8 @@ func (m *EngineMonitor) refresh(engine *longhorn.Engine) error {
return nil
}

m.aquireRestoringCounter(true)
defer m.aquireRestoringCounter(false)
m.acquireRestoringCounter(true)
defer m.acquireRestoringCounter(false)
}

if err = m.restoreBackup(engine, rsMap, cliAPIVersion, engineClientProxy); err != nil {
Expand Down Expand Up @@ -1026,8 +1026,8 @@ func (m *EngineMonitor) refresh(engine *longhorn.Engine) error {
return nil
}

func (m *EngineMonitor) aquireRestoringCounter(aquire bool) {
if !aquire {
func (m *EngineMonitor) acquireRestoringCounter(acquire bool) {
if !acquire {
m.restoringCounter.DecreaseCount()
return
}
Expand Down
10 changes: 5 additions & 5 deletions controller/kubernetes_pod_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -202,11 +202,11 @@ func (kc *KubernetesPodController) handlePodDeletionIfNodeDown(pod *v1.Pod, node

// make sure the volumeattachments of the pods are gone first
// ref: https://github.com/longhorn/longhorn/issues/2947
vas, err := kc.getVolumeAttachmentsOfPod(pod)
volumeAttachments, err := kc.getVolumeAttachmentsOfPod(pod)
if err != nil {
return err
}
for _, va := range vas {
for _, va := range volumeAttachments {
if va.DeletionTimestamp == nil {
err := kc.kubeClient.StorageV1().VolumeAttachments().Delete(context.TODO(), va.Name, metav1.DeleteOptions{})
if err != nil {
Expand Down Expand Up @@ -239,8 +239,8 @@ func (kc *KubernetesPodController) handlePodDeletionIfNodeDown(pod *v1.Pod, node
}

func (kc *KubernetesPodController) getVolumeAttachmentsOfPod(pod *v1.Pod) ([]*storagev1.VolumeAttachment, error) {
res := []*storagev1.VolumeAttachment{}
vas, err := kc.ds.ListVolumeAttachmentsRO()
var res []*storagev1.VolumeAttachment
volumeAttachments, err := kc.ds.ListVolumeAttachmentsRO()
if err != nil {
return nil, err
}
Expand All @@ -262,7 +262,7 @@ func (kc *KubernetesPodController) getVolumeAttachmentsOfPod(pod *v1.Pod) ([]*st
pvs[pvc.Spec.VolumeName] = true
}

for _, va := range vas {
for _, va := range volumeAttachments {
if va.Spec.NodeName != pod.Spec.NodeName {
continue
}
Expand Down
4 changes: 2 additions & 2 deletions controller/monitor/snapshot_monitor.go
Original file line number Diff line number Diff line change
Expand Up @@ -528,8 +528,8 @@ func determineChecksumFromHashStatus(log logrus.FieldLogger, snapshotName, exist
return "", fmt.Errorf(prefixChecksumDetermineFailure+"since snapshot disk files are silently corrupted", snapshotName)
}

// The checksums from replicas might be different than previous values because of purge, trim, corruption and etc.
// So, the vote mechanism is alway executed to get the latest checksum and then update the status.checksum.
// The checksums from replicas might be different from previous values because of purge, trim, corruption and etc.
// So, the vote mechanism is always executed to get the latest checksum and then update the status.checksum.
// If the checksum cannot be determined by the ones from replicas, the existingChecksum (snapshot.status.checksum) will
// help to determine the final checksum.
found, checksum, maxVotes := determineChecksum(checksums)
Expand Down
2 changes: 1 addition & 1 deletion controller/node_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -410,7 +410,7 @@ func (nc *NodeController) syncNode(key string) (err error) {
nc.eventRecorder, node, v1.EventTypeWarning)
}

// sync node state with kuberentes node status
// sync node state with kubernetes node status
kubeNode, err := nc.ds.GetKubernetesNode(name)
if err != nil {
// if kubernetes node has been removed from cluster
Expand Down
2 changes: 1 addition & 1 deletion controller/node_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -700,7 +700,7 @@ func (s *TestSuite) TestSyncNode(c *C) {
err = sIndexer.Add(imImageSetting)
c.Assert(err, IsNil)

// create kuberentes node
// create kubernetes node
for _, kubeNode := range tc.kubeNodes {
n, err := kubeClient.CoreV1().Nodes().Create(context.TODO(), kubeNode, metav1.CreateOptions{})
c.Assert(err, IsNil)
Expand Down
2 changes: 1 addition & 1 deletion controller/orphan_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -278,7 +278,7 @@ func (oc *OrphanController) cleanupOrphanedData(orphan *longhorn.Orphan) (err er
// Make sure if the orphan nodeID and controller ID are same.
// If NO, just delete the orphan resource object and don't touch the data.
if orphan.Spec.NodeID != oc.controllerID {
log.Infof("Orphan nodeID %v is different from controllerID %v, so just delete the orphan resouce object",
log.Infof("Orphan nodeID %v is different from controllerID %v, so just delete the orphan resource object",
orphan.Name, oc.controllerID)
return nil
}
Expand Down
4 changes: 2 additions & 2 deletions controller/system_backup_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -498,7 +498,7 @@ func (c *SystemBackupController) UploadSystemBackup(systemBackup *longhorn.Syste

var recordErr error
existingSystemBackup := systemBackup.DeepCopy()
// Handle the CR status update here because this method is called by a seperate goroutine.
// Handle the CR status update here because this method is called by a separate goroutine.
defer func() {
record := &systemBackupRecord{}
if recordErr != nil {
Expand Down Expand Up @@ -579,7 +579,7 @@ func (c *SystemBackupController) cleanupRemoteSystemBackupFiles(systemBackup *lo

systemBackupsFromBackupTarget, err := backupTargetClient.ListSystemBackup()
if err != nil {
return errors.Wrapf(err, "failed to list system backups in bakup target %v", backupTargetSetting.Value)
return errors.Wrapf(err, "failed to list system backups in backup target %v", backupTargetSetting.Value)
}

if _, exist := systemBackupsFromBackupTarget[systembackupstore.Name(systemBackup.Name)]; !exist {
Expand Down
2 changes: 1 addition & 1 deletion controller/system_rollout_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -679,7 +679,7 @@ func (c *SystemRolloutController) Unpack(log logrus.FieldLogger) error {
}

if err := c.cacheAPIExtensionsResources(); err != nil {
return errors.Wrap(err, "failed to extract API Extentions resources")
return errors.Wrap(err, "failed to extract API Extensions resources")
}

if err := c.cacheLonghornResources(); err != nil {
Expand Down
8 changes: 4 additions & 4 deletions controller/volume_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -3518,19 +3518,19 @@ func (vc *VolumeController) processMigration(v *longhorn.Volume, es map[string]*

log = log.WithField("migrationEngine", migrationEngine.Name)

allReady := false
if allReady, revertRequired, err = vc.prepareReplicasAndEngineForMigration(v, currentEngine, migrationEngine, rs); err != nil {
ready := false
if ready, revertRequired, err = vc.prepareReplicasAndEngineForMigration(v, currentEngine, migrationEngine, rs); err != nil {
return err
}
if !allReady || revertRequired {
if !ready || revertRequired {
return nil
}

log.Info("volume migration engine is ready")
return nil
}

func (vc *VolumeController) prepareReplicasAndEngineForMigration(v *longhorn.Volume, currentEngine, migrationEngine *longhorn.Engine, rs map[string]*longhorn.Replica) (allReady, revertRequired bool, err error) {
func (vc *VolumeController) prepareReplicasAndEngineForMigration(v *longhorn.Volume, currentEngine, migrationEngine *longhorn.Engine, rs map[string]*longhorn.Replica) (ready, revertRequired bool, err error) {
log := getLoggerForVolume(vc.logger, v).WithFields(logrus.Fields{"migrationNodeID": v.Spec.MigrationNodeID, "migrationEngine": migrationEngine.Name})

// Check the migration engine current status
Expand Down
20 changes: 10 additions & 10 deletions controller/volume_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -831,12 +831,12 @@ func (s *TestSuite) TestVolumeLifeCycle(c *C) {
tc.expectVolume.Status.State = longhorn.VolumeStateAttaching
tc.expectVolume.Status.CurrentImage = tc.volume.Spec.EngineImage
tc.expectVolume.Status.CurrentNodeID = tc.volume.Spec.NodeID
expectEs := map[string]*longhorn.Engine{}
expectEngines := map[string]*longhorn.Engine{}
for _, e := range tc.expectEngines {
e.Spec.RevisionCounterDisabled = true
expectEs[e.Name] = e
expectEngines[e.Name] = e
}
tc.expectEngines = expectEs
tc.expectEngines = expectEngines
expectRs := map[string]*longhorn.Replica{}
for _, r := range tc.expectReplicas {
r.Spec.DesireState = longhorn.InstanceStateRunning
Expand Down Expand Up @@ -871,12 +871,12 @@ func (s *TestSuite) TestVolumeLifeCycle(c *C) {

tc.copyCurrentToExpect()

expectEs = map[string]*longhorn.Engine{}
expectEngines = map[string]*longhorn.Engine{}
for _, e := range tc.expectEngines {
e.Spec.SalvageRequested = true
expectEs[e.Name] = e
expectEngines[e.Name] = e
}
tc.expectEngines = expectEs
tc.expectEngines = expectEngines

expectRs = map[string]*longhorn.Replica{}
for _, r := range tc.expectReplicas {
Expand Down Expand Up @@ -1484,12 +1484,12 @@ func (s *TestSuite) runTestCases(c *C, testCases map[string]*VolumeTestCase) {
for _, retE := range retEs.Items {
if tc.engines == nil {
// test creation, name would be different
var expectE *longhorn.Engine
for _, expectE = range tc.expectEngines {
var expectEngine *longhorn.Engine
for _, expectEngine = range tc.expectEngines {
break
}
c.Assert(retE.Spec, DeepEquals, expectE.Spec)
c.Assert(retE.Status, DeepEquals, expectE.Status)
c.Assert(retE.Spec, DeepEquals, expectEngine.Spec)
c.Assert(retE.Status, DeepEquals, expectEngine.Status)
} else {
c.Assert(retE.Spec, DeepEquals, tc.expectEngines[retE.Name].Spec)
c.Assert(retE.Status, DeepEquals, tc.expectEngines[retE.Name].Status)
Expand Down
2 changes: 1 addition & 1 deletion csi/controller_server.go
Original file line number Diff line number Diff line change
Expand Up @@ -401,7 +401,7 @@ func (cs *ControllerServer) ControllerPublishVolume(ctx context.Context, req *cs
return nil, status.Error(codes.InvalidArgument, "volume capability missing in request")
}

// TODO: #1875 API returns error instead of not found, so we cannot differenciate between a retrieval failure and non existing resource
// TODO: #1875 API returns error instead of not found, so we cannot differentiate between a retrieval failure and non existing resource
if _, err := cs.apiClient.Node.ById(nodeID); err != nil {
return nil, status.Errorf(codes.NotFound, "node %s not found", nodeID)
}
Expand Down
2 changes: 1 addition & 1 deletion csi/node_server.go
Original file line number Diff line number Diff line change
Expand Up @@ -454,7 +454,7 @@ func (ns *NodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol
logrus.WithError(err).Errorf("mounted volume %v on node %v failed required filesystem resize", volumeID, ns.nodeID)
return nil, status.Error(codes.Internal, err.Error())
} else if resized {
logrus.Infof("mounted volume %v on node %v succesfully resized filesystem after mount", volumeID, ns.nodeID)
logrus.Infof("mounted volume %v on node %v successfully resized filesystem after mount", volumeID, ns.nodeID)
} else {
logrus.Debugf("mounted volume %v on node %v already has correct filesystem size", volumeID, ns.nodeID)
}
Expand Down
45 changes: 23 additions & 22 deletions datastore/kubernetes.go
Original file line number Diff line number Diff line change
Expand Up @@ -411,7 +411,7 @@ func (s *DataStore) ListInstanceManagerPods() ([]*corev1.Pod, error) {
return s.ListPodsBySelector(selector)
}

// ListInstanceManagerPodsBy returns a list of instance manager pods that fullfill the below conditions
// ListInstanceManagerPodsBy returns a list of instance manager pods that fulfill the below conditions
func (s *DataStore) ListInstanceManagerPodsBy(node string, image string, imType longhorn.InstanceManagerType) ([]*corev1.Pod, error) {
selector, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{
MatchLabels: types.GetInstanceManagerLabels(node, image, imType),
Expand Down Expand Up @@ -464,7 +464,7 @@ func (s *DataStore) GetSupportBundleManagerPod(supportBundle *longhorn.SupportBu
if count := len(supportBundleManagerPods); count == 0 {
return nil, fmt.Errorf("cannot find support bundle manager pod")
} else if count > 1 {
return nil, fmt.Errorf("found unexpect number of %v support bundle manager pod", count)
return nil, fmt.Errorf("found unexpected number of %v support bundle manager pod", count)
}

return supportBundleManagerPods[0], nil
Expand Down Expand Up @@ -770,26 +770,27 @@ func NewPVCManifest(size int64, pvName, ns, pvcName, storageClassName string, ac
// apiVersion: v1
// kind: Pod
// metadata:
// annotations:
// k8s.v1.cni.cncf.io/network-status: |-
// [{
// "name": "cbr0",
// "interface": "eth0",
// "ips": [
// "10.42.0.175"
// ],
// "mac": "be:67:b2:19:17:84",
// "default": true,
// "dns": {}
// },{
// "name": "kube-system/demo-192-168-0-0",
// "interface": "lhnet1",
// "ips": [
// "192.168.1.175"
// ],
// "mac": "02:59:e5:d4:ae:ea",
// "dns": {}
// }]
//
// annotations:
// k8s.v1.cni.cncf.io/network-status: |-
// [{
// "name": "cbr0",
// "interface": "eth0",
// "ips": [
// "10.42.0.175"
// ],
// "mac": "be:67:b2:19:17:84",
// "default": true,
// "dns": {}
// },{
// "name": "kube-system/demo-192-168-0-0",
// "interface": "lhnet1",
// "ips": [
// "192.168.1.175"
// ],
// "mac": "02:59:e5:d4:ae:ea",
// "dns": {}
// }]
func (s *DataStore) GetStorageIPFromPod(pod *corev1.Pod) string {
storageNetwork, err := s.GetSetting(types.SettingNameStorageNetwork)
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion datastore/longhorn.go
Original file line number Diff line number Diff line change
Expand Up @@ -457,7 +457,7 @@ func (s *DataStore) ListSettings() (map[types.SettingName]*longhorn.Setting, err
for _, itemRO := range list {
// Cannot use cached object from lister
settingField := types.SettingName(itemRO.Name)
// Ignore the items that we don't recongize
// Ignore the items that we don't recognize
if _, ok := types.GetSettingDefinition(settingField); ok {
itemMap[settingField] = itemRO.DeepCopy()
}
Expand Down
2 changes: 1 addition & 1 deletion engineapi/backup_monitor.go
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,7 @@ func (m *BackupMonitor) exponentialBackOffTimer() bool {
// Keep in exponential backoff timer
case <-ctx.Done():
// Give it the last try to prevent if the snapshot backup succeed between
// the last trigged backoff time and the max retry period
// the last triggered backoff time and the max retry period
currentBackupStatus, err := m.syncBackupStatusFromEngineReplica()
if err == nil {
m.logger.Info("Change to liner timer to monitor it")
Expand Down
2 changes: 1 addition & 1 deletion k8s/pkg/apis/longhorn/v1beta1/backingimage.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ type BackingImageList struct {
Items []BackingImage `json:"items"`
}

// ConvertTo converts from spoke verion (v1beta1) to hub version (v1beta2)
// ConvertTo converts from spoke version (v1beta1) to hub version (v1beta2)
func (bi *BackingImage) ConvertTo(dst conversion.Hub) error {
switch t := dst.(type) {
case *v1beta2.BackingImage:
Expand Down
2 changes: 1 addition & 1 deletion k8s/pkg/apis/longhorn/v1beta1/backuptarget.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ type BackupTargetList struct {
Items []BackupTarget `json:"items"`
}

// ConvertTo converts from spoke verion (v1beta1) to hub version (v1beta2)
// ConvertTo converts from spoke version (v1beta1) to hub version (v1beta2)
func (bt *BackupTarget) ConvertTo(dst conversion.Hub) error {
switch t := dst.(type) {
case *v1beta2.BackupTarget:
Expand Down
2 changes: 1 addition & 1 deletion k8s/pkg/apis/longhorn/v1beta1/engineimage.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ type EngineImageList struct {
Items []EngineImage `json:"items"`
}

// ConvertTo converts from spoke verion (v1beta1) to hub version (v1beta2)
// ConvertTo converts from spoke version (v1beta1) to hub version (v1beta2)
func (ei *EngineImage) ConvertTo(dst conversion.Hub) error {
switch t := dst.(type) {
case *v1beta2.EngineImage:
Expand Down
2 changes: 1 addition & 1 deletion k8s/pkg/apis/longhorn/v1beta1/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ type NodeList struct {
Items []Node `json:"items"`
}

// ConvertTo converts from spoke verion (v1beta1) to hub version (v1beta2)
// ConvertTo converts from spoke version (v1beta1) to hub version (v1beta2)
func (n *Node) ConvertTo(dst conversion.Hub) error {
switch t := dst.(type) {
case *v1beta2.Node:
Expand Down
5 changes: 3 additions & 2 deletions k8s/pkg/apis/longhorn/v1beta1/volume.go
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,8 @@ const (

// VolumeRecurringJobSpec is a deprecated struct.
// TODO: Should be removed when recurringJobs gets removed from the volume
// spec.
//
// spec.
type VolumeRecurringJobSpec struct {
Name string `json:"name"`
Groups []string `json:"groups,omitempty"`
Expand Down Expand Up @@ -226,7 +227,7 @@ type VolumeList struct {
Items []Volume `json:"items"`
}

// ConvertTo converts from spoke verion (v1beta1) to hub version (v1beta2)
// ConvertTo converts from spoke version (v1beta1) to hub version (v1beta2)
func (v *Volume) ConvertTo(dst conversion.Hub) error {
switch t := dst.(type) {
case *v1beta2.Volume:
Expand Down
2 changes: 1 addition & 1 deletion types/setting.go
Original file line number Diff line number Diff line change
Expand Up @@ -1379,7 +1379,7 @@ func validateAndUnmarshalLabel(label string) (key, value string, err error) {
label = strings.Trim(label, " ")
parts := strings.Split(label, ":")
if len(parts) != 2 {
return "", "", fmt.Errorf("invalid label %v: should contain the seprator ':'", label)
return "", "", fmt.Errorf("invalid label %v: should contain the separator ':'", label)
}
return strings.Trim(parts[0], " "), strings.Trim(parts[1], " "), nil
}
Expand Down
Loading

0 comments on commit 774f5da

Please sign in to comment.