Skip to content

Commit

Permalink
Fixed several spelling mistakes
Browse files Browse the repository at this point in the history
  • Loading branch information
Niekvdplas committed Mar 30, 2021
1 parent 6572fe4 commit fec272a
Show file tree
Hide file tree
Showing 26 changed files with 27 additions and 27 deletions.
2 changes: 1 addition & 1 deletion pkg/controller/cronjob/cronjob_controllerv2.go
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ func (jm *ControllerV2) sync(cronJobKey string) (*time.Duration, error) {
cronJob, err := jm.cronJobLister.CronJobs(ns).Get(name)
switch {
case errors.IsNotFound(err):
// may be cronjob is deleted, dont need to requeue this key
// may be cronjob is deleted, don't need to requeue this key
klog.V(4).InfoS("cronjob not found, may be it is deleted", "cronjob", klog.KRef(ns, name), "err", err)
return nil, nil
case err != nil:
Expand Down
2 changes: 1 addition & 1 deletion pkg/kubelet/eviction/eviction_manager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -889,7 +889,7 @@ func TestNodeReclaimFuncs(t *testing.T) {
// induce disk pressure!
fakeClock.Step(1 * time.Minute)
summaryProvider.result = summaryStatsMaker("400Mi", "200Gi", podStats)
// Dont reclaim any disk
// Don't reclaim any disk
diskGC.summaryAfterGC = summaryStatsMaker("400Mi", "200Gi", podStats)
manager.synchronize(diskInfoProvider, activePodsFunc)

Expand Down
2 changes: 1 addition & 1 deletion pkg/kubelet/kubelet.go
Original file line number Diff line number Diff line change
Expand Up @@ -464,7 +464,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
kubeInformers.Start(wait.NeverStop)
klog.InfoS("Kubelet client is not nil")
} else {
// we dont have a client to sync!
// we don't have a client to sync!
nodeIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{})
nodeLister = corelisters.NewNodeLister(nodeIndexer)
nodeHasSynced = func() bool { return true }
Expand Down
2 changes: 1 addition & 1 deletion pkg/kubelet/stats/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ func (p *Provider) RootFsStats() (*statsapi.FsStats, error) {
}

// Get the root container stats's timestamp, which will be used as the
// imageFs stats timestamp. Dont force a stats update, as we only want the timestamp.
// imageFs stats timestamp. Don't force a stats update, as we only want the timestamp.
rootStats, err := getCgroupStats(p.cadvisor, "/", false)
if err != nil {
return nil, fmt.Errorf("failed to get root container stats: %v", err)
Expand Down
2 changes: 1 addition & 1 deletion pkg/quota/v1/evaluator/core/persistent_volume_claims.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ func (p *pvcEvaluator) MatchingScopes(item runtime.Object, scopes []corev1.Scope
}

// UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes.
// It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope
// It returns the scopes which are in limited scopes but don't have a corresponding covering quota scope
func (p *pvcEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) {
return []corev1.ScopedResourceSelectorRequirement{}, nil
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/quota/v1/evaluator/core/pods.go
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ func (p *podEvaluator) MatchingScopes(item runtime.Object, scopeSelectors []core
}

// UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes.
// It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope
// It returns the scopes which are in limited scopes but don't have a corresponding covering quota scope
func (p *podEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) {
uncoveredScopes := []corev1.ScopedResourceSelectorRequirement{}
for _, selector := range limitedScopes {
Expand Down
2 changes: 1 addition & 1 deletion pkg/quota/v1/evaluator/core/services.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ func (p *serviceEvaluator) MatchingScopes(item runtime.Object, scopes []corev1.S
}

// UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes.
// It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope
// It returns the scopes which are in limited scopes but don't have a corresponding covering quota scope
func (p *serviceEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) {
return []corev1.ScopedResourceSelectorRequirement{}, nil
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -386,7 +386,7 @@ func (pl *ServiceAffinity) ScoreExtensions() framework.ScoreExtensions {
// addUnsetLabelsToMap backfills missing values with values we find in a map.
func addUnsetLabelsToMap(aL map[string]string, labelsToAdd []string, labelSet labels.Set) {
for _, l := range labelsToAdd {
// if the label is already there, dont overwrite it.
// if the label is already there, don't overwrite it.
if _, exists := aL[l]; exists {
continue
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/scheduler/scheduler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -719,7 +719,7 @@ func TestSchedulerFailedSchedulingReasons(t *testing.T) {
queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
scache := internalcache.New(10*time.Minute, stop)

// Design the baseline for the pods, and we will make nodes that dont fit it later.
// Design the baseline for the pods, and we will make nodes that don't fit it later.
var cpu = int64(4)
var mem = int64(500)
podWithTooBigResourceRequests := podWithResources("bar", "", v1.ResourceList{
Expand Down
2 changes: 1 addition & 1 deletion pkg/volume/emptydir/empty_dir.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ func calculateEmptyDirMemorySize(nodeAllocatableMemory *resource.Quantity, spec
return sizeLimit
}

// size limit defaults to node allocatable (pods cant consume more memory than all pods)
// size limit defaults to node allocatable (pods can't consume more memory than all pods)
sizeLimit = nodeAllocatableMemory
zero := resource.MustParse("0")

Expand Down
2 changes: 1 addition & 1 deletion pkg/volume/scaleio/sio_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -477,7 +477,7 @@ func (c *sioClient) WaitForDetachedDevice(token string) error {
go func() {
klog.V(4).Info(log("waiting for volume %s to be unmapped/detached", token))
}()
// cant find vol id, then ok.
// can't find vol id, then ok.
if _, ok := devMap[token]; !ok {
return nil
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/volume/vsphere_volume/vsphere_volume_block_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ func TestGetVolumeSpecFromGlobalMapPath(t *testing.T) {
// /tmp/testGlobalPathXXXXX/plugins/kubernetes.io/vsphere-volume/volumeDevices/
tmpVDir, err := utiltesting.MkTmpdir("vsphereBlockVolume")
if err != nil {
t.Fatalf("cant' make a temp dir: %s", err)
t.Fatalf("can't make a temp dir: %s", err)
}
// deferred clean up
defer os.RemoveAll(tmpVDir)
Expand Down Expand Up @@ -80,7 +80,7 @@ func TestGetVolumeSpecFromGlobalMapPath(t *testing.T) {
func TestGetPodAndPluginMapPaths(t *testing.T) {
tmpVDir, err := utiltesting.MkTmpdir("vsphereBlockVolume")
if err != nil {
t.Fatalf("cant' make a temp dir: %s", err)
t.Fatalf("can't make a temp dir: %s", err)
}
// deferred clean up
defer os.RemoveAll(tmpVDir)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ func (l *persistentVolumeLabel) findVolumeLabels(volume *api.PersistentVolume) (
topologyLabelGA := true
domain, domainOK := existingLabels[v1.LabelTopologyZone]
region, regionOK := existingLabels[v1.LabelTopologyRegion]
// If they dont have GA labels we should check for failuredomain beta labels
// If they don't have GA labels we should check for failuredomain beta labels
// TODO: remove this once all the cloud provider change to GA topology labels
if !domainOK || !regionOK {
topologyLabelGA = false
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ func (a statusStrategy) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set
fieldpath.APIVersion(a.customResourceStrategy.kind.GroupVersion().String()): fieldpath.NewSet(
// Note that if there are other top level fields unique to CRDs,
// those will also get removed by the apiserver prior to persisting,
// but wont be added to the resetFields set.
// but won't be added to the resetFields set.

// This isn't an issue now, but if it becomes an issue in the future
// we might need a mechanism that is the inverse of resetFields where
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -270,7 +270,7 @@ func (o *objectCountEvaluator) MatchingScopes(item runtime.Object, scopes []core
}

// UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes.
// It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope
// It returns the scopes which are in limited scopes but don't have a corresponding covering quota scope
func (o *objectCountEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) {
return []corev1.ScopedResourceSelectorRequirement{}, nil
}
Expand Down
2 changes: 1 addition & 1 deletion staging/src/k8s.io/apiserver/pkg/quota/v1/interfaces.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ type Evaluator interface {
Matches(resourceQuota *corev1.ResourceQuota, item runtime.Object) (bool, error)
// MatchingScopes takes the input specified list of scopes and input object and returns the set of scopes that matches input object.
MatchingScopes(item runtime.Object, scopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error)
// UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes. It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope
// UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes. It returns the scopes which are in limited scopes but don't have a corresponding covering quota scope
UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error)
// MatchingResources takes the input specified list of resources and returns the set of resources evaluator matches.
MatchingResources(input []corev1.ResourceName) []corev1.ResourceName
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -359,7 +359,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat
}
} else {
// User has not specified any override for this group version.
// filter out types which dont have genclient.
// filter out types which don't have genclient.
if tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)); !tags.GenerateClient {
continue
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat
func objectMetaForPackage(p *types.Package) (*types.Type, bool, error) {
generatingForPackage := false
for _, t := range p.Types {
// filter out types which dont have genclient.
// filter out types which don't have genclient.
if !util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)).GenerateClient {
continue
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1404,7 +1404,7 @@ func getAvailabilitySetName(az *Cloud, vmIndex int, numAS int) string {
}

// test supporting on 1 nic per vm
// we really dont care about the name of the nic
// we really don't care about the name of the nic
// just using the vm name for testing purposes
func getNICName(vmIndex int) string {
return getVMName(vmIndex)
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/common/network/networking.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ var _ = SIGDescribe("Networking", func() {
// Second time, we pass through pods more carefully...
framework.Logf("Going to retry %v out of %v pods....", len(failedPodsByHost), len(config.EndpointPods))
for host, failedPods := range failedPodsByHost {
framework.Logf("Doublechecking %v pods in host %v which werent seen the first time.", len(failedPods), host)
framework.Logf("Doublechecking %v pods in host %v which weren't seen the first time.", len(failedPods), host)
for _, endpointPod := range failedPods {
framework.Logf("Now attempting to probe pod [[[ %v ]]]", endpointPod.Status.PodIP)
if err := config.DialFromTestContainer(protocol, endpointPod.Status.PodIP, port, config.MaxTries, 0, sets.NewString(endpointPod.Name)); err != nil {
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/framework/network/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -438,7 +438,7 @@ func (config *NetworkingTestConfig) GetHTTPCodeFromTestContainer(path, targetIP
// (See the TODO about checking probability, which isnt implemented yet).
// - maxTries is the maximum number of curl/echo attempts before an error is returned. The
// smaller this number is, the less 'slack' there is for declaring success.
// - if maxTries < expectedEps, this test is guaranteed to return an error, because all endpoints wont be hit.
// - if maxTries < expectedEps, this test is guaranteed to return an error, because all endpoints won't be hit.
// - maxTries == minTries will return as soon as all endpoints succeed (or fail once maxTries is reached without
// success on all endpoints).
// In general its prudent to have a high enough level of minTries to guarantee that all pods get a fair chance at receiving traffic.
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/network/netpol/network_policy.go
Original file line number Diff line number Diff line change
Expand Up @@ -636,7 +636,7 @@ var _ = common.SIGDescribe("Netpol [LinuxOnly]", func() {
{
Ports: []networkingv1.NetworkPolicyPort{
{
// dont use named ports
// don't use named ports
Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 80},
},
{
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/storage/drivers/proxy/portproxy.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ import (
// Maximum number of forwarded connections. In practice we don't
// need more than one per sidecar and kubelet. Keeping this reasonably
// small ensures that we don't establish connections through the apiserver
// and the remote kernel which then arent' needed.
// and the remote kernel which then aren't needed.
const maxConcurrentConnections = 10

// Listen creates a listener which returns new connections whenever someone connects
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/windows/dns.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ var _ = SIGDescribe("[Feature:Windows] DNS", func() {

ginkgo.By("Verifying that curl queries FAIL for wrong URLs")

// the below tests use curl because nslookup doesnt seem to use ndots properly
// the below tests use curl because nslookup doesn't seem to use ndots properly
// ideally we'd use the powershell native ResolveDns but, that is not a part of agnhost images (as of k8s 1.20)
// TODO @jayunit100 add ResolveHost to agn images

Expand Down
2 changes: 1 addition & 1 deletion test/e2e_node/eviction_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -597,7 +597,7 @@ func verifyEvictionOrdering(f *framework.Framework, testSpecs []podEvictSpec) er
framework.Logf("fetching pod %s; phase= %v", p.Name, p.Status.Phase)
}

ginkgo.By("checking eviction ordering and ensuring important pods dont fail")
ginkgo.By("checking eviction ordering and ensuring important pods don't fail")
done := true
for _, priorityPodSpec := range testSpecs {
var priorityPod v1.Pod
Expand Down
2 changes: 1 addition & 1 deletion test/integration/apiserver/apiserver_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ func setupWithResourcesWithOptions(t *testing.T, opts *framework.MasterConfigOpt
}

func verifyStatusCode(t *testing.T, verb, URL, body string, expectedStatusCode int) {
// We dont use the typed Go client to send this request to be able to verify the response status code.
// We don't use the typed Go client to send this request to be able to verify the response status code.
bodyBytes := bytes.NewReader([]byte(body))
req, err := http.NewRequest(verb, URL, bodyBytes)
if err != nil {
Expand Down

0 comments on commit fec272a

Please sign in to comment.