diff --git a/pkg/controller/cronjob/cronjob_controllerv2.go b/pkg/controller/cronjob/cronjob_controllerv2.go index 082185fc08523..fcb40e2407375 100644 --- a/pkg/controller/cronjob/cronjob_controllerv2.go +++ b/pkg/controller/cronjob/cronjob_controllerv2.go @@ -169,7 +169,7 @@ func (jm *ControllerV2) sync(cronJobKey string) (*time.Duration, error) { cronJob, err := jm.cronJobLister.CronJobs(ns).Get(name) switch { case errors.IsNotFound(err): - // may be cronjob is deleted, dont need to requeue this key + // may be cronjob is deleted, don't need to requeue this key klog.V(4).InfoS("cronjob not found, may be it is deleted", "cronjob", klog.KRef(ns, name), "err", err) return nil, nil case err != nil: diff --git a/pkg/kubelet/eviction/eviction_manager_test.go b/pkg/kubelet/eviction/eviction_manager_test.go index 3abdbb8c4c85a..66759e7aab2ca 100644 --- a/pkg/kubelet/eviction/eviction_manager_test.go +++ b/pkg/kubelet/eviction/eviction_manager_test.go @@ -889,7 +889,7 @@ func TestNodeReclaimFuncs(t *testing.T) { // induce disk pressure! fakeClock.Step(1 * time.Minute) summaryProvider.result = summaryStatsMaker("400Mi", "200Gi", podStats) - // Dont reclaim any disk + // Don't reclaim any disk diskGC.summaryAfterGC = summaryStatsMaker("400Mi", "200Gi", podStats) manager.synchronize(diskInfoProvider, activePodsFunc) diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 00646b120b89d..1216b890bed1f 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -464,7 +464,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, kubeInformers.Start(wait.NeverStop) klog.InfoS("Kubelet client is not nil") } else { - // we dont have a client to sync! + // we don't have a client to sync! nodeIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) nodeLister = corelisters.NewNodeLister(nodeIndexer) nodeHasSynced = func() bool { return true } diff --git a/pkg/kubelet/stats/provider.go b/pkg/kubelet/stats/provider.go index 3ea8633200f57..360c740315d76 100644 --- a/pkg/kubelet/stats/provider.go +++ b/pkg/kubelet/stats/provider.go @@ -144,7 +144,7 @@ func (p *Provider) RootFsStats() (*statsapi.FsStats, error) { } // Get the root container stats's timestamp, which will be used as the - // imageFs stats timestamp. Dont force a stats update, as we only want the timestamp. + // imageFs stats timestamp. Don't force a stats update, as we only want the timestamp. rootStats, err := getCgroupStats(p.cadvisor, "/", false) if err != nil { return nil, fmt.Errorf("failed to get root container stats: %v", err) diff --git a/pkg/quota/v1/evaluator/core/persistent_volume_claims.go b/pkg/quota/v1/evaluator/core/persistent_volume_claims.go index a8caa892439c2..879c6c75566b8 100644 --- a/pkg/quota/v1/evaluator/core/persistent_volume_claims.go +++ b/pkg/quota/v1/evaluator/core/persistent_volume_claims.go @@ -111,7 +111,7 @@ func (p *pvcEvaluator) MatchingScopes(item runtime.Object, scopes []corev1.Scope } // UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes. -// It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope +// It returns the scopes which are in limited scopes but don't have a corresponding covering quota scope func (p *pvcEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) { return []corev1.ScopedResourceSelectorRequirement{}, nil } diff --git a/pkg/quota/v1/evaluator/core/pods.go b/pkg/quota/v1/evaluator/core/pods.go index 6051d24eb85ae..97aa587c0254d 100644 --- a/pkg/quota/v1/evaluator/core/pods.go +++ b/pkg/quota/v1/evaluator/core/pods.go @@ -190,7 +190,7 @@ func (p *podEvaluator) MatchingScopes(item runtime.Object, scopeSelectors []core } // UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes. -// It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope +// It returns the scopes which are in limited scopes but don't have a corresponding covering quota scope func (p *podEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) { uncoveredScopes := []corev1.ScopedResourceSelectorRequirement{} for _, selector := range limitedScopes { diff --git a/pkg/quota/v1/evaluator/core/services.go b/pkg/quota/v1/evaluator/core/services.go index ab5c8a9f48e7b..5d5115a1401a4 100644 --- a/pkg/quota/v1/evaluator/core/services.go +++ b/pkg/quota/v1/evaluator/core/services.go @@ -90,7 +90,7 @@ func (p *serviceEvaluator) MatchingScopes(item runtime.Object, scopes []corev1.S } // UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes. -// It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope +// It returns the scopes which are in limited scopes but don't have a corresponding covering quota scope func (p *serviceEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) { return []corev1.ScopedResourceSelectorRequirement{}, nil } diff --git a/pkg/scheduler/framework/plugins/serviceaffinity/service_affinity.go b/pkg/scheduler/framework/plugins/serviceaffinity/service_affinity.go index 4648d7d1e7d9a..e8a0193b8b584 100644 --- a/pkg/scheduler/framework/plugins/serviceaffinity/service_affinity.go +++ b/pkg/scheduler/framework/plugins/serviceaffinity/service_affinity.go @@ -386,7 +386,7 @@ func (pl *ServiceAffinity) ScoreExtensions() framework.ScoreExtensions { // addUnsetLabelsToMap backfills missing values with values we find in a map. func addUnsetLabelsToMap(aL map[string]string, labelsToAdd []string, labelSet labels.Set) { for _, l := range labelsToAdd { - // if the label is already there, dont overwrite it. + // if the label is already there, don't overwrite it. if _, exists := aL[l]; exists { continue } diff --git a/pkg/scheduler/scheduler_test.go b/pkg/scheduler/scheduler_test.go index c9dfd7f8cb925..4a0404342fe0d 100644 --- a/pkg/scheduler/scheduler_test.go +++ b/pkg/scheduler/scheduler_test.go @@ -719,7 +719,7 @@ func TestSchedulerFailedSchedulingReasons(t *testing.T) { queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc) scache := internalcache.New(10*time.Minute, stop) - // Design the baseline for the pods, and we will make nodes that dont fit it later. + // Design the baseline for the pods, and we will make nodes that don't fit it later. var cpu = int64(4) var mem = int64(500) podWithTooBigResourceRequests := podWithResources("bar", "", v1.ResourceList{ diff --git a/pkg/volume/emptydir/empty_dir.go b/pkg/volume/emptydir/empty_dir.go index c21160e885955..f2bf8e2f33345 100644 --- a/pkg/volume/emptydir/empty_dir.go +++ b/pkg/volume/emptydir/empty_dir.go @@ -117,7 +117,7 @@ func calculateEmptyDirMemorySize(nodeAllocatableMemory *resource.Quantity, spec return sizeLimit } - // size limit defaults to node allocatable (pods cant consume more memory than all pods) + // size limit defaults to node allocatable (pods can't consume more memory than all pods) sizeLimit = nodeAllocatableMemory zero := resource.MustParse("0") diff --git a/pkg/volume/scaleio/sio_client.go b/pkg/volume/scaleio/sio_client.go index 25dfd3d876d1e..fc761071a11f9 100644 --- a/pkg/volume/scaleio/sio_client.go +++ b/pkg/volume/scaleio/sio_client.go @@ -477,7 +477,7 @@ func (c *sioClient) WaitForDetachedDevice(token string) error { go func() { klog.V(4).Info(log("waiting for volume %s to be unmapped/detached", token)) }() - // cant find vol id, then ok. + // can't find vol id, then ok. if _, ok := devMap[token]; !ok { return nil } diff --git a/pkg/volume/vsphere_volume/vsphere_volume_block_test.go b/pkg/volume/vsphere_volume/vsphere_volume_block_test.go index 1b46ab3f51992..f4036531ea8af 100644 --- a/pkg/volume/vsphere_volume/vsphere_volume_block_test.go +++ b/pkg/volume/vsphere_volume/vsphere_volume_block_test.go @@ -43,7 +43,7 @@ func TestGetVolumeSpecFromGlobalMapPath(t *testing.T) { // /tmp/testGlobalPathXXXXX/plugins/kubernetes.io/vsphere-volume/volumeDevices/ tmpVDir, err := utiltesting.MkTmpdir("vsphereBlockVolume") if err != nil { - t.Fatalf("cant' make a temp dir: %s", err) + t.Fatalf("can't make a temp dir: %s", err) } // deferred clean up defer os.RemoveAll(tmpVDir) @@ -80,7 +80,7 @@ func TestGetVolumeSpecFromGlobalMapPath(t *testing.T) { func TestGetPodAndPluginMapPaths(t *testing.T) { tmpVDir, err := utiltesting.MkTmpdir("vsphereBlockVolume") if err != nil { - t.Fatalf("cant' make a temp dir: %s", err) + t.Fatalf("can't make a temp dir: %s", err) } // deferred clean up defer os.RemoveAll(tmpVDir) diff --git a/plugin/pkg/admission/storage/persistentvolume/label/admission.go b/plugin/pkg/admission/storage/persistentvolume/label/admission.go index 7a7fe94dca73c..6ef598fb7a64c 100644 --- a/plugin/pkg/admission/storage/persistentvolume/label/admission.go +++ b/plugin/pkg/admission/storage/persistentvolume/label/admission.go @@ -175,7 +175,7 @@ func (l *persistentVolumeLabel) findVolumeLabels(volume *api.PersistentVolume) ( topologyLabelGA := true domain, domainOK := existingLabels[v1.LabelTopologyZone] region, regionOK := existingLabels[v1.LabelTopologyRegion] - // If they dont have GA labels we should check for failuredomain beta labels + // If they don't have GA labels we should check for failuredomain beta labels // TODO: remove this once all the cloud provider change to GA topology labels if !domainOK || !regionOK { topologyLabelGA = false diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/status_strategy.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/status_strategy.go index a2ff535187b50..e87efe355274d 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/status_strategy.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/status_strategy.go @@ -40,7 +40,7 @@ func (a statusStrategy) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set fieldpath.APIVersion(a.customResourceStrategy.kind.GroupVersion().String()): fieldpath.NewSet( // Note that if there are other top level fields unique to CRDs, // those will also get removed by the apiserver prior to persisting, - // but wont be added to the resetFields set. + // but won't be added to the resetFields set. // This isn't an issue now, but if it becomes an issue in the future // we might need a mechanism that is the inverse of resetFields where diff --git a/staging/src/k8s.io/apiserver/pkg/quota/v1/generic/evaluator.go b/staging/src/k8s.io/apiserver/pkg/quota/v1/generic/evaluator.go index 7ba48c94286a3..805f8159d607c 100644 --- a/staging/src/k8s.io/apiserver/pkg/quota/v1/generic/evaluator.go +++ b/staging/src/k8s.io/apiserver/pkg/quota/v1/generic/evaluator.go @@ -270,7 +270,7 @@ func (o *objectCountEvaluator) MatchingScopes(item runtime.Object, scopes []core } // UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes. -// It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope +// It returns the scopes which are in limited scopes but don't have a corresponding covering quota scope func (o *objectCountEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) { return []corev1.ScopedResourceSelectorRequirement{}, nil } diff --git a/staging/src/k8s.io/apiserver/pkg/quota/v1/interfaces.go b/staging/src/k8s.io/apiserver/pkg/quota/v1/interfaces.go index 15f8b7613d319..511e8818c7365 100644 --- a/staging/src/k8s.io/apiserver/pkg/quota/v1/interfaces.go +++ b/staging/src/k8s.io/apiserver/pkg/quota/v1/interfaces.go @@ -54,7 +54,7 @@ type Evaluator interface { Matches(resourceQuota *corev1.ResourceQuota, item runtime.Object) (bool, error) // MatchingScopes takes the input specified list of scopes and input object and returns the set of scopes that matches input object. MatchingScopes(item runtime.Object, scopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) - // UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes. It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope + // UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes. It returns the scopes which are in limited scopes but don't have a corresponding covering quota scope UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) // MatchingResources takes the input specified list of resources and returns the set of resources evaluator matches. MatchingResources(input []corev1.ResourceName) []corev1.ResourceName diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go index 5b27a1127a2b3..6739a3fa46dab 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go @@ -359,7 +359,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat } } else { // User has not specified any override for this group version. - // filter out types which dont have genclient. + // filter out types which don't have genclient. if tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)); !tags.GenerateClient { continue } diff --git a/staging/src/k8s.io/code-generator/cmd/lister-gen/generators/lister.go b/staging/src/k8s.io/code-generator/cmd/lister-gen/generators/lister.go index 496145b14a670..8ada494690312 100644 --- a/staging/src/k8s.io/code-generator/cmd/lister-gen/generators/lister.go +++ b/staging/src/k8s.io/code-generator/cmd/lister-gen/generators/lister.go @@ -162,7 +162,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat func objectMetaForPackage(p *types.Package) (*types.Type, bool, error) { generatingForPackage := false for _, t := range p.Types { - // filter out types which dont have genclient. + // filter out types which don't have genclient. if !util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)).GenerateClient { continue } diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_test.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_test.go index ba4ad047b48a4..a9684379ae849 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_test.go @@ -1404,7 +1404,7 @@ func getAvailabilitySetName(az *Cloud, vmIndex int, numAS int) string { } // test supporting on 1 nic per vm -// we really dont care about the name of the nic +// we really don't care about the name of the nic // just using the vm name for testing purposes func getNICName(vmIndex int) string { return getVMName(vmIndex) diff --git a/test/e2e/common/network/networking.go b/test/e2e/common/network/networking.go index 55dc0d3e00a11..79f9c0f235516 100644 --- a/test/e2e/common/network/networking.go +++ b/test/e2e/common/network/networking.go @@ -47,7 +47,7 @@ var _ = SIGDescribe("Networking", func() { // Second time, we pass through pods more carefully... framework.Logf("Going to retry %v out of %v pods....", len(failedPodsByHost), len(config.EndpointPods)) for host, failedPods := range failedPodsByHost { - framework.Logf("Doublechecking %v pods in host %v which werent seen the first time.", len(failedPods), host) + framework.Logf("Doublechecking %v pods in host %v which weren't seen the first time.", len(failedPods), host) for _, endpointPod := range failedPods { framework.Logf("Now attempting to probe pod [[[ %v ]]]", endpointPod.Status.PodIP) if err := config.DialFromTestContainer(protocol, endpointPod.Status.PodIP, port, config.MaxTries, 0, sets.NewString(endpointPod.Name)); err != nil { diff --git a/test/e2e/framework/network/utils.go b/test/e2e/framework/network/utils.go index 9e61470a5e44d..691e2e9791af5 100644 --- a/test/e2e/framework/network/utils.go +++ b/test/e2e/framework/network/utils.go @@ -438,7 +438,7 @@ func (config *NetworkingTestConfig) GetHTTPCodeFromTestContainer(path, targetIP // (See the TODO about checking probability, which isnt implemented yet). // - maxTries is the maximum number of curl/echo attempts before an error is returned. The // smaller this number is, the less 'slack' there is for declaring success. -// - if maxTries < expectedEps, this test is guaranteed to return an error, because all endpoints wont be hit. +// - if maxTries < expectedEps, this test is guaranteed to return an error, because all endpoints won't be hit. // - maxTries == minTries will return as soon as all endpoints succeed (or fail once maxTries is reached without // success on all endpoints). // In general its prudent to have a high enough level of minTries to guarantee that all pods get a fair chance at receiving traffic. diff --git a/test/e2e/network/netpol/network_policy.go b/test/e2e/network/netpol/network_policy.go index f254e39bb5f4a..928c739d39edc 100644 --- a/test/e2e/network/netpol/network_policy.go +++ b/test/e2e/network/netpol/network_policy.go @@ -636,7 +636,7 @@ var _ = common.SIGDescribe("Netpol [LinuxOnly]", func() { { Ports: []networkingv1.NetworkPolicyPort{ { - // dont use named ports + // don't use named ports Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 80}, }, { diff --git a/test/e2e/storage/drivers/proxy/portproxy.go b/test/e2e/storage/drivers/proxy/portproxy.go index aef56974ad884..e9cb70482e9e0 100644 --- a/test/e2e/storage/drivers/proxy/portproxy.go +++ b/test/e2e/storage/drivers/proxy/portproxy.go @@ -43,7 +43,7 @@ import ( // Maximum number of forwarded connections. In practice we don't // need more than one per sidecar and kubelet. Keeping this reasonably // small ensures that we don't establish connections through the apiserver -// and the remote kernel which then arent' needed. +// and the remote kernel which then aren't needed. const maxConcurrentConnections = 10 // Listen creates a listener which returns new connections whenever someone connects diff --git a/test/e2e/windows/dns.go b/test/e2e/windows/dns.go index 894d250a55331..79ec33822d068 100644 --- a/test/e2e/windows/dns.go +++ b/test/e2e/windows/dns.go @@ -95,7 +95,7 @@ var _ = SIGDescribe("[Feature:Windows] DNS", func() { ginkgo.By("Verifying that curl queries FAIL for wrong URLs") - // the below tests use curl because nslookup doesnt seem to use ndots properly + // the below tests use curl because nslookup doesn't seem to use ndots properly // ideally we'd use the powershell native ResolveDns but, that is not a part of agnhost images (as of k8s 1.20) // TODO @jayunit100 add ResolveHost to agn images diff --git a/test/e2e_node/eviction_test.go b/test/e2e_node/eviction_test.go index 6d579130e7e79..a190f737e72ef 100644 --- a/test/e2e_node/eviction_test.go +++ b/test/e2e_node/eviction_test.go @@ -597,7 +597,7 @@ func verifyEvictionOrdering(f *framework.Framework, testSpecs []podEvictSpec) er framework.Logf("fetching pod %s; phase= %v", p.Name, p.Status.Phase) } - ginkgo.By("checking eviction ordering and ensuring important pods dont fail") + ginkgo.By("checking eviction ordering and ensuring important pods don't fail") done := true for _, priorityPodSpec := range testSpecs { var priorityPod v1.Pod diff --git a/test/integration/apiserver/apiserver_test.go b/test/integration/apiserver/apiserver_test.go index e9c8f7f17ed48..94e6dae32cb82 100644 --- a/test/integration/apiserver/apiserver_test.go +++ b/test/integration/apiserver/apiserver_test.go @@ -102,7 +102,7 @@ func setupWithResourcesWithOptions(t *testing.T, opts *framework.MasterConfigOpt } func verifyStatusCode(t *testing.T, verb, URL, body string, expectedStatusCode int) { - // We dont use the typed Go client to send this request to be able to verify the response status code. + // We don't use the typed Go client to send this request to be able to verify the response status code. bodyBytes := bytes.NewReader([]byte(body)) req, err := http.NewRequest(verb, URL, bodyBytes) if err != nil {