Skip to content

Commit 043fa69

Browse files
committed
Update golangci-lint (1.44.0 -> 1.48.0) and Go version (1.17 -> 1.18) for lint workflow
Also fix new lint findings
1 parent 50d525e commit 043fa69

File tree

35 files changed

+238
-213
lines changed

35 files changed

+238
-213
lines changed

.github/workflows/golangci-lint.yml

+2-2
Original file line numberDiff line numberDiff line change
@@ -21,9 +21,9 @@ jobs:
2121
- uses: actions/checkout@v3
2222
- uses: actions/setup-go@v3
2323
with:
24-
go-version: 1.17
24+
go-version: 1.19
2525
- name: golangci-lint
2626
uses: golangci/[email protected]
2727
with:
28-
version: v1.44.0
28+
version: v1.48.0
2929
working-directory: ${{matrix.working-directory}}

cmd/clusterctl/api/v1alpha3/metadata_type_test.go

+1
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1313
See the License for the specific language governing permissions and
1414
limitations under the License.
1515
*/
16+
1617
package v1alpha3
1718

1819
import (

cmd/clusterctl/client/cluster/proxy.go

+6-6
Original file line numberDiff line numberDiff line change
@@ -206,12 +206,12 @@ func (k *proxy) CheckClusterAvailable() error {
206206
// This is done to avoid errors when listing resources of providers which have already been deleted/scaled down to 0 replicas/with
207207
// malfunctioning webhooks.
208208
// For example:
209-
// * The AWS provider has already been deleted, but there are still cluster-wide resources of AWSClusterControllerIdentity.
210-
// * The AWSClusterControllerIdentity resources are still stored in an older version (e.g. v1alpha4, when the preferred
211-
// version is v1beta1)
212-
// * If we now want to delete e.g. the kubeadm bootstrap provider, we cannot list AWSClusterControllerIdentity resources
213-
// as the conversion would fail, because the AWS controller hosting the conversion webhook has already been deleted.
214-
// * Thus we exclude resources of other providers if we detect that ListResources is called to list resources of a provider.
209+
// - The AWS provider has already been deleted, but there are still cluster-wide resources of AWSClusterControllerIdentity.
210+
// - The AWSClusterControllerIdentity resources are still stored in an older version (e.g. v1alpha4, when the preferred
211+
// version is v1beta1)
212+
// - If we now want to delete e.g. the kubeadm bootstrap provider, we cannot list AWSClusterControllerIdentity resources
213+
// as the conversion would fail, because the AWS controller hosting the conversion webhook has already been deleted.
214+
// - Thus we exclude resources of other providers if we detect that ListResources is called to list resources of a provider.
215215
func (k *proxy) ListResources(labels map[string]string, namespaces ...string) ([]unstructured.Unstructured, error) {
216216
cs, err := k.newClientSet()
217217
if err != nil {

cmd/clusterctl/client/cluster/topology.go

+16-14
Original file line numberDiff line numberDiff line change
@@ -257,9 +257,9 @@ func (t *topologyClient) validateInput(in *TopologyPlanInput) error {
257257
}
258258

259259
// prepareInput does the following on the input objects:
260-
// - Set the target namespace on the objects if not set (this operation is generally done by kubectl)
261-
// - Prepare cluster objects so that the state of the cluster, if modified, correctly represents
262-
// the expected changes.
260+
// - Set the target namespace on the objects if not set (this operation is generally done by kubectl)
261+
// - Prepare cluster objects so that the state of the cluster, if modified, correctly represents
262+
// the expected changes.
263263
func (t *topologyClient) prepareInput(ctx context.Context, in *TopologyPlanInput, apiReader client.Reader) error {
264264
if err := t.setMissingNamespaces(in.TargetNamespace, in.Objs); err != nil {
265265
return errors.Wrap(err, "failed to set missing namespaces")
@@ -297,18 +297,20 @@ func (t *topologyClient) setMissingNamespaces(currentNamespace string, objs []*u
297297
}
298298

299299
// prepareClusters does the following operations on each Cluster in the input.
300-
// - Check if the Cluster exists in the real apiserver.
301-
// - If the Cluster exists in the real apiserver we merge the object from the
302-
// server with the object from the input. This final object correctly represents the
303-
// modified cluster object.
304-
// Note: We are using a simple 2-way merge to calculate the final object in this function
305-
// to keep the function simple. In reality kubectl does a lot more. This function does not behave exactly
306-
// the same way as kubectl does.
300+
// - Check if the Cluster exists in the real apiserver.
301+
// - If the Cluster exists in the real apiserver we merge the object from the
302+
// server with the object from the input. This final object correctly represents the
303+
// modified cluster object.
304+
// Note: We are using a simple 2-way merge to calculate the final object in this function
305+
// to keep the function simple. In reality kubectl does a lot more. This function does not behave exactly
306+
// the same way as kubectl does.
307+
//
307308
// *Important note*: We do this above operation because the topology reconciler in a
308-
// real run takes as input a cluster object from the apiserver that has merged spec of
309-
// the changes in the input and the one stored in the server. For example: the cluster
310-
// object in the input will not have cluster.spec.infrastructureRef and cluster.spec.controlPlaneRef
311-
// but the merged object will have these fields set.
309+
//
310+
// real run takes as input a cluster object from the apiserver that has merged spec of
311+
// the changes in the input and the one stored in the server. For example: the cluster
312+
// object in the input will not have cluster.spec.infrastructureRef and cluster.spec.controlPlaneRef
313+
// but the merged object will have these fields set.
312314
func (t *topologyClient) prepareClusters(ctx context.Context, clusters []*unstructured.Unstructured, apiReader client.Reader) error {
313315
if apiReader == nil {
314316
// If there is no backing server there is nothing more to do here.

cmd/clusterctl/client/repository/template.go

+6-6
Original file line numberDiff line numberDiff line change
@@ -144,12 +144,12 @@ func NewTemplate(input TemplateInput) (Template, error) {
144144

145145
// MergeTemplates merges the provided Templates into one Template.
146146
// Notes on the merge operation:
147-
// - The merge operation returns an error if all the templates do not have the same TargetNamespace.
148-
// - The Variables of the resulting template is a union of all Variables in the templates.
149-
// - The default value is picked from the first template that defines it.
150-
// The defaults of the same variable in the subsequent templates will be ignored.
151-
// (e.g when merging a cluster template and its ClusterClass, the default value from the template takes precedence)
152-
// - The Objs of the final template will be a union of all the Objs in the templates.
147+
// - The merge operation returns an error if all the templates do not have the same TargetNamespace.
148+
// - The Variables of the resulting template is a union of all Variables in the templates.
149+
// - The default value is picked from the first template that defines it.
150+
// The defaults of the same variable in the subsequent templates will be ignored.
151+
// (e.g when merging a cluster template and its ClusterClass, the default value from the template takes precedence)
152+
// - The Objs of the final template will be a union of all the Objs in the templates.
153153
func MergeTemplates(templates ...Template) (Template, error) {
154154
templates = filterNilTemplates(templates...)
155155
if len(templates) == 0 {

cmd/clusterctl/client/tree/doc.go

+17-17
Original file line numberDiff line numberDiff line change
@@ -21,29 +21,29 @@ understanding if there are problems and where.
2121
The "at glance" view is based on the idea that we should avoid to overload the user with information, but instead
2222
surface problems, if any; in practice:
2323
24-
- The view assumes we are processing objects conforming with https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20200506-conditions.md.
25-
As a consequence each object should have a Ready condition summarizing the object state.
24+
- The view assumes we are processing objects conforming with https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20200506-conditions.md.
25+
As a consequence each object should have a Ready condition summarizing the object state.
2626
27-
- The view organizes objects in a hierarchical tree, however it is not required that the
28-
tree reflects the ownerReference tree so it is possible to skip objects not relevant for triaging the cluster status
29-
e.g. secrets or templates.
27+
- The view organizes objects in a hierarchical tree, however it is not required that the
28+
tree reflects the ownerReference tree so it is possible to skip objects not relevant for triaging the cluster status
29+
e.g. secrets or templates.
3030
31-
- It is possible to add "meta names" to object, thus making hierarchical tree more consistent for the users,
32-
e.g. use MachineInfrastructure instead of using all the different infrastructure machine kinds (AWSMachine, VSphereMachine etc.).
31+
- It is possible to add "meta names" to object, thus making hierarchical tree more consistent for the users,
32+
e.g. use MachineInfrastructure instead of using all the different infrastructure machine kinds (AWSMachine, VSphereMachine etc.).
3333
34-
- It is possible to add "virtual nodes", thus allowing to make the hierarchical tree more meaningful for the users,
35-
e.g. adding a Workers object to group all the MachineDeployments.
34+
- It is possible to add "virtual nodes", thus allowing to make the hierarchical tree more meaningful for the users,
35+
e.g. adding a Workers object to group all the MachineDeployments.
3636
37-
- It is possible to "group" siblings objects by ready condition e.g. group all the machines with Ready=true
38-
in a single node instead of listing each one of them.
37+
- It is possible to "group" siblings objects by ready condition e.g. group all the machines with Ready=true
38+
in a single node instead of listing each one of them.
3939
40-
- Given that the ready condition of the child object bubbles up to the parents, it is possible to avoid the "echo"
41-
(reporting the same condition at the parent/child) e.g. if a machine's Ready condition is already
42-
surface an error from the infrastructure machine, let's avoid to show the InfrastructureMachine
43-
given that representing its state is redundant in this case.
40+
- Given that the ready condition of the child object bubbles up to the parents, it is possible to avoid the "echo"
41+
(reporting the same condition at the parent/child) e.g. if a machine's Ready condition is already
42+
surface an error from the infrastructure machine, let's avoid to show the InfrastructureMachine
43+
given that representing its state is redundant in this case.
4444
45-
- In order to avoid long list of objects (think e.g. a cluster with 50 worker machines), sibling objects with the
46-
same value for the ready condition can be grouped together into a virtual node, e.g. 10 Machines ready
45+
- In order to avoid long list of objects (think e.g. a cluster with 50 worker machines), sibling objects with the
46+
same value for the ready condition can be grouped together into a virtual node, e.g. 10 Machines ready
4747
4848
The ObjectTree object defined implements all the above behaviors of the "at glance" visualization, by generating
4949
a tree of Kubernetes objects; each object gets a set of annotation, reflecting its own visualization specific attributes,

cmd/clusterctl/client/yamlprocessor/simple_processor_test.go

+1
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1313
See the License for the specific language governing permissions and
1414
limitations under the License.
1515
*/
16+
1617
package yamlprocessor
1718

1819
import (

cmd/clusterctl/log/logger.go

+5-5
Original file line numberDiff line numberDiff line change
@@ -150,11 +150,11 @@ func copySlice(in []interface{}) []interface{} {
150150

151151
// flatten returns a human readable/machine parsable text representing the LogEntry.
152152
// Most notable difference with the klog implementation are:
153-
// - The message is printed at the beginning of the line, without the Msg= variable name e.g.
154-
// "Msg"="This is a message" --> This is a message
155-
// - Variables name are not quoted, eg.
156-
// This is a message "Var1"="value" --> This is a message Var1="value"
157-
// - Variables are not sorted, thus allowing full control to the developer on the output.
153+
// - The message is printed at the beginning of the line, without the Msg= variable name e.g.
154+
// "Msg"="This is a message" --> This is a message
155+
// - Variables name are not quoted, eg.
156+
// This is a message "Var1"="value" --> This is a message Var1="value"
157+
// - Variables are not sorted, thus allowing full control to the developer on the output.
158158
func flatten(entry logEntry) (string, error) {
159159
var msgValue string
160160
var errorValue error

controllers/noderefutil/providerid.go

+4-4
Original file line numberDiff line numberDiff line change
@@ -41,10 +41,10 @@ type ProviderID struct {
4141
}
4242

4343
/*
44-
- must start with at least one non-colon
45-
- followed by ://
46-
- followed by any number of characters
47-
- must end with a non-slash
44+
- must start with at least one non-colon
45+
- followed by ://
46+
- followed by any number of characters
47+
- must end with a non-slash.
4848
*/
4949
var providerIDRegex = regexp.MustCompile("^[^:]+://.*[^/]$")
5050

controllers/remote/cluster_cache_reconciler_test.go

+3-3
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ func TestClusterCacheReconciler(t *testing.T) {
4545

4646
// createAndWatchCluster creates a new cluster and ensures the clusterCacheTracker has a clusterAccessor for it
4747
createAndWatchCluster := func(clusterName string, testNamespace *corev1.Namespace, g *WithT) {
48-
t.Log(fmt.Sprintf("Creating a cluster %q", clusterName))
48+
t.Logf("Creating a cluster %q", clusterName)
4949
testCluster := &clusterv1.Cluster{
5050
ObjectMeta: metav1.ObjectMeta{
5151
Name: clusterName,
@@ -136,7 +136,7 @@ func TestClusterCacheReconciler(t *testing.T) {
136136
defer teardown(t, g, testNamespace)
137137

138138
for _, clusterName := range []string{"cluster-1", "cluster-2", "cluster-3"} {
139-
t.Log(fmt.Sprintf("Deleting cluster %q", clusterName))
139+
t.Logf("Deleting cluster %q", clusterName)
140140
obj := &clusterv1.Cluster{
141141
ObjectMeta: metav1.ObjectMeta{
142142
Namespace: testNamespace.Name,
@@ -145,7 +145,7 @@ func TestClusterCacheReconciler(t *testing.T) {
145145
}
146146
g.Expect(k8sClient.Delete(ctx, obj)).To(Succeed())
147147

148-
t.Log(fmt.Sprintf("Checking cluster %q's clusterAccessor is removed", clusterName))
148+
t.Logf("Checking cluster %q's clusterAccessor is removed", clusterName)
149149
g.Eventually(func() bool { return cct.clusterAccessorExists(util.ObjectKey(obj)) }, timeout).Should(BeFalse())
150150
}
151151
})

controlplane/kubeadm/internal/controllers/remediation.go

+7-7
Original file line numberDiff line numberDiff line change
@@ -190,13 +190,13 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C
190190
//
191191
// The answer mostly depend on the existence of other failing members on top of the one being deleted, and according
192192
// to the etcd fault tolerance specification (see https://etcd.io/docs/v3.3/faq/#what-is-failure-tolerance):
193-
// - 3 CP cluster does not tolerate additional failing members on top of the one being deleted (the target
194-
// cluster size after deletion is 2, fault tolerance 0)
195-
// - 5 CP cluster tolerates 1 additional failing members on top of the one being deleted (the target
196-
// cluster size after deletion is 4, fault tolerance 1)
197-
// - 7 CP cluster tolerates 2 additional failing members on top of the one being deleted (the target
198-
// cluster size after deletion is 6, fault tolerance 2)
199-
// - etc.
193+
// - 3 CP cluster does not tolerate additional failing members on top of the one being deleted (the target
194+
// cluster size after deletion is 2, fault tolerance 0)
195+
// - 5 CP cluster tolerates 1 additional failing members on top of the one being deleted (the target
196+
// cluster size after deletion is 4, fault tolerance 1)
197+
// - 7 CP cluster tolerates 2 additional failing members on top of the one being deleted (the target
198+
// cluster size after deletion is 6, fault tolerance 2)
199+
// - etc.
200200
//
201201
// NOTE: this func assumes the list of members in sync with the list of machines/nodes, it is required to call reconcileEtcdMembers
202202
// ans well as reconcileControlPlaneConditions before this.

hack/tools/conversion-verifier/doc.go

+5-5
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,9 @@ limitations under the License.
1717
// This command line application runs verification steps for conversion types.
1818
//
1919
// The following checks are performed:
20-
// - For each API Kind and Group, only one storage version must exist.
21-
// - Each storage version type and its List counterpart, if there are multiple API versions,
22-
// the type MUST have a Hub() method.
23-
// - For each type with multiple versions, that has a Hub() and storage version,
24-
// the type MUST have ConvertFrom() and ConvertTo() methods.
20+
// - For each API Kind and Group, only one storage version must exist.
21+
// - Each storage version type and its List counterpart, if there are multiple API versions,
22+
// the type MUST have a Hub() method.
23+
// - For each type with multiple versions, that has a Hub() and storage version,
24+
// the type MUST have ConvertFrom() and ConvertTo() methods.
2525
package main

internal/controllers/machinedeployment/machinedeployment_sync.go

+4-4
Original file line numberDiff line numberDiff line change
@@ -68,10 +68,10 @@ func (r *Reconciler) sync(ctx context.Context, d *clusterv1.MachineDeployment, m
6868
// msList should come from getMachineSetsForDeployment(d).
6969
// machineMap should come from getMachineMapForDeployment(d, msList).
7070
//
71-
// 1. Get all old MSes this deployment targets, and calculate the max revision number among them (maxOldV).
72-
// 2. Get new MS this deployment targets (whose machine template matches deployment's), and update new MS's revision number to (maxOldV + 1),
73-
// only if its revision number is smaller than (maxOldV + 1). If this step failed, we'll update it in the next deployment sync loop.
74-
// 3. Copy new MS's revision number to deployment (update deployment's revision). If this step failed, we'll update it in the next deployment sync loop.
71+
// 1. Get all old MSes this deployment targets, and calculate the max revision number among them (maxOldV).
72+
// 2. Get new MS this deployment targets (whose machine template matches deployment's), and update new MS's revision number to (maxOldV + 1),
73+
// only if its revision number is smaller than (maxOldV + 1). If this step failed, we'll update it in the next deployment sync loop.
74+
// 3. Copy new MS's revision number to deployment (update deployment's revision). If this step failed, we'll update it in the next deployment sync loop.
7575
//
7676
// Note that currently the deployment controller is using caches to avoid querying the server for reads.
7777
// This may lead to stale reads of machine sets, thus incorrect deployment status.

internal/controllers/machinedeployment/mdutil/util.go

+4-3
Original file line numberDiff line numberDiff line change
@@ -138,7 +138,8 @@ var annotationsToSkip = map[string]bool{
138138

139139
// skipCopyAnnotation returns true if we should skip copying the annotation with the given annotation key
140140
// TODO(tbd): How to decide which annotations should / should not be copied?
141-
// See https://github.com/kubernetes/kubernetes/pull/20035#issuecomment-179558615
141+
//
142+
// See https://github.com/kubernetes/kubernetes/pull/20035#issuecomment-179558615
142143
func skipCopyAnnotation(key string) bool {
143144
return annotationsToSkip[key]
144145
}
@@ -411,8 +412,8 @@ func FindNewMachineSet(deployment *clusterv1.MachineDeployment, msList []*cluste
411412

412413
// FindOldMachineSets returns the old machine sets targeted by the given Deployment, with the given slice of MSes.
413414
// Returns two list of machine sets
414-
// - the first contains all old machine sets with all non-zero replicas
415-
// - the second contains all old machine sets
415+
// - the first contains all old machine sets with all non-zero replicas
416+
// - the second contains all old machine sets
416417
func FindOldMachineSets(deployment *clusterv1.MachineDeployment, msList []*clusterv1.MachineSet) ([]*clusterv1.MachineSet, []*clusterv1.MachineSet) {
417418
var requiredMSs []*clusterv1.MachineSet
418419
allMSs := make([]*clusterv1.MachineSet, 0, len(msList))

internal/controllers/machinehealthcheck/machinehealthcheck_controller_test.go

+1
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1313
See the License for the specific language governing permissions and
1414
limitations under the License.
1515
*/
16+
1617
package machinehealthcheck
1718

1819
import (

internal/controllers/topology/cluster/cluster_controller_test.go

+5-5
Original file line numberDiff line numberDiff line change
@@ -854,11 +854,11 @@ func assertInfrastructureClusterReconcile(cluster *clusterv1.Cluster) error {
854854
}
855855

856856
// assertControlPlaneReconcile checks if the ControlPlane object:
857-
// 1) Is created.
858-
// 2) Has the correct labels and annotations.
859-
// 3) If it requires ControlPlane Infrastructure and if so:
860-
// i) That the infrastructureMachineTemplate is created correctly.
861-
// ii) That the infrastructureMachineTemplate has the correct labels and annotations
857+
// 1. Is created.
858+
// 2. Has the correct labels and annotations.
859+
// 3. If it requires ControlPlane Infrastructure and if so:
860+
// i) That the infrastructureMachineTemplate is created correctly.
861+
// ii) That the infrastructureMachineTemplate has the correct labels and annotations
862862
func assertControlPlaneReconcile(cluster *clusterv1.Cluster) error {
863863
cp, err := getAndAssertLabelsAndAnnotations(*cluster.Spec.ControlPlaneRef, cluster.Name)
864864
if err != nil {

0 commit comments

Comments
 (0)