Skip to content

Commit

Permalink
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
fix linter findings & tests
Browse files Browse the repository at this point in the history
sbueringer committed Jan 10, 2024

Verified

This commit was signed with the committer’s verified signature. The key has expired.
sbueringer Stefan Büringer
1 parent aa29d37 commit 5a08f09
Showing 35 changed files with 132 additions and 134 deletions.
2 changes: 1 addition & 1 deletion bootstrap/kubeadm/main.go
Original file line number Diff line number Diff line change
@@ -168,7 +168,7 @@ func main() {
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
// Set log level 2 as default.
if err := pflag.CommandLine.Set("v", "2"); err != nil {
setupLog.Error(err, "failed to set log level: %v")
setupLog.Error(err, "failed to set default log level")
os.Exit(1)
}
pflag.Parse()
6 changes: 3 additions & 3 deletions bootstrap/util/configowner.go
Original file line number Diff line number Diff line change
@@ -191,20 +191,20 @@ func GetTypedOwnerByRef(ctx context.Context, c client.Client, ref *corev1.Object
if err != nil {
return nil, errors.Wrapf(err, "failed to construct object of type %s", ref.GroupVersionKind())
}
metaObj, ok := obj.(client.Object)
clientObj, ok := obj.(client.Object)
if !ok {
return nil, errors.Errorf("expected owner reference to refer to a client.Object, is actually %T", obj)
}
key := types.NamespacedName{
Namespace: ref.Namespace,
Name: ref.Name,
}
err = c.Get(ctx, key, metaObj)
err = c.Get(ctx, key, clientObj)
if err != nil {
return nil, err
}

content, err := runtime.DefaultUnstructuredConverter.ToUnstructured(metaObj)
content, err := runtime.DefaultUnstructuredConverter.ToUnstructured(clientObj)
if err != nil {
return nil, err
}
24 changes: 16 additions & 8 deletions bootstrap/util/configowner_test.go
Original file line number Diff line number Diff line change
@@ -184,7 +184,8 @@ func TestHasNodeRefs(t *testing.T) {
g := NewWithT(t)
machine := &clusterv1.Machine{
TypeMeta: metav1.TypeMeta{
Kind: "Machine",
APIVersion: clusterv1.GroupVersion.String(),
Kind: "Machine",
},
ObjectMeta: metav1.ObjectMeta{
Name: "machine-name",
@@ -205,7 +206,8 @@ func TestHasNodeRefs(t *testing.T) {
g := NewWithT(t)
machine := &clusterv1.Machine{
TypeMeta: metav1.TypeMeta{
Kind: "Machine",
APIVersion: clusterv1.GroupVersion.String(),
Kind: "Machine",
},
ObjectMeta: metav1.ObjectMeta{
Name: "machine-name",
@@ -237,7 +239,8 @@ func TestHasNodeRefs(t *testing.T) {
{
// No replicas specified (default is 1). No nodeRefs either.
TypeMeta: metav1.TypeMeta{
Kind: "MachinePool",
APIVersion: expv1.GroupVersion.String(),
Kind: "MachinePool",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: metav1.NamespaceDefault,
@@ -247,7 +250,8 @@ func TestHasNodeRefs(t *testing.T) {
{
// 1 replica but no nodeRefs
TypeMeta: metav1.TypeMeta{
Kind: "MachinePool",
APIVersion: expv1.GroupVersion.String(),
Kind: "MachinePool",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: metav1.NamespaceDefault,
@@ -260,7 +264,8 @@ func TestHasNodeRefs(t *testing.T) {
{
// 2 replicas but only 1 nodeRef
TypeMeta: metav1.TypeMeta{
Kind: "MachinePool",
APIVersion: expv1.GroupVersion.String(),
Kind: "MachinePool",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: metav1.NamespaceDefault,
@@ -300,7 +305,8 @@ func TestHasNodeRefs(t *testing.T) {
{
// 1 replica (default) and 1 nodeRef
TypeMeta: metav1.TypeMeta{
Kind: "MachinePool",
APIVersion: expv1.GroupVersion.String(),
Kind: "MachinePool",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: metav1.NamespaceDefault,
@@ -319,7 +325,8 @@ func TestHasNodeRefs(t *testing.T) {
{
// 2 replicas and nodeRefs
TypeMeta: metav1.TypeMeta{
Kind: "MachinePool",
APIVersion: expv1.GroupVersion.String(),
Kind: "MachinePool",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: metav1.NamespaceDefault,
@@ -346,7 +353,8 @@ func TestHasNodeRefs(t *testing.T) {
{
// 0 replicas and 0 nodeRef
TypeMeta: metav1.TypeMeta{
Kind: "MachinePool",
APIVersion: expv1.GroupVersion.String(),
Kind: "MachinePool",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: metav1.NamespaceDefault,
3 changes: 3 additions & 0 deletions cmd/clusterctl/client/cluster/mover.go
Original file line number Diff line number Diff line change
@@ -1315,6 +1315,9 @@ func patchTopologyManagedFields(ctx context.Context, oldManagedFields []metav1.M
return nil
}

// applyMutators applies mutators to an object.
// Note: TypeMeta must always be set in the object because otherwise after conversion the
// resulting Unstructured would have an empty GVK.
func applyMutators(object client.Object, mutators ...ResourceMutatorFunc) (*unstructured.Unstructured, error) {
if object == nil {
return nil, nil
16 changes: 0 additions & 16 deletions cmd/clusterctl/client/upgrade_test.go
Original file line number Diff line number Diff line change
@@ -181,10 +181,6 @@ func Test_clusterctlClient_ApplyUpgrade(t *testing.T) {
},
},
wantProviders: &clusterctlv1.ProviderList{
TypeMeta: metav1.TypeMeta{
APIVersion: clusterctlv1.GroupVersion.String(),
Kind: "ProviderList",
},
ListMeta: metav1.ListMeta{},
Items: []clusterctlv1.Provider{ // both providers should be upgraded
fakeProvider("cluster-api", clusterctlv1.CoreProviderType, "v1.0.1", "cluster-api-system"),
@@ -209,10 +205,6 @@ func Test_clusterctlClient_ApplyUpgrade(t *testing.T) {
},
},
wantProviders: &clusterctlv1.ProviderList{
TypeMeta: metav1.TypeMeta{
APIVersion: clusterctlv1.GroupVersion.String(),
Kind: "ProviderList",
},
ListMeta: metav1.ListMeta{},
Items: []clusterctlv1.Provider{ // only one provider should be upgraded
fakeProvider("cluster-api", clusterctlv1.CoreProviderType, "v1.0.1", "cluster-api-system"),
@@ -237,10 +229,6 @@ func Test_clusterctlClient_ApplyUpgrade(t *testing.T) {
},
},
wantProviders: &clusterctlv1.ProviderList{
TypeMeta: metav1.TypeMeta{
APIVersion: clusterctlv1.GroupVersion.String(),
Kind: "ProviderList",
},
ListMeta: metav1.ListMeta{},
Items: []clusterctlv1.Provider{ // only one provider should be upgraded
fakeProvider("cluster-api", clusterctlv1.CoreProviderType, "v1.0.0", "cluster-api-system"),
@@ -265,10 +253,6 @@ func Test_clusterctlClient_ApplyUpgrade(t *testing.T) {
},
},
wantProviders: &clusterctlv1.ProviderList{
TypeMeta: metav1.TypeMeta{
APIVersion: clusterctlv1.GroupVersion.String(),
Kind: "ProviderList",
},
ListMeta: metav1.ListMeta{},
Items: []clusterctlv1.Provider{
fakeProvider("cluster-api", clusterctlv1.CoreProviderType, "v1.0.1", "cluster-api-system"),
5 changes: 2 additions & 3 deletions controllers/remote/cluster_cache_healthcheck_test.go
Original file line number Diff line number Diff line change
@@ -28,7 +28,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/klog/v2/klogr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
@@ -76,9 +76,8 @@ func TestClusterCacheHealthCheck(t *testing.T) {
k8sClient = mgr.GetClient()

t.Log("Setting up a ClusterCacheTracker")
log := klogr.New()
cct, err = NewClusterCacheTracker(mgr, ClusterCacheTrackerOptions{
Log: &log,
Log: &ctrl.Log,
Indexes: []Index{NodeProviderIDIndex},
})
g.Expect(err).ToNot(HaveOccurred())
12 changes: 6 additions & 6 deletions controlplane/kubeadm/internal/control_plane.go
Original file line number Diff line number Diff line change
@@ -96,8 +96,8 @@ func (c *ControlPlane) FailureDomains() clusterv1.FailureDomains {
}

// MachineInFailureDomainWithMostMachines returns the first matching failure domain with machines that has the most control-plane machines on it.
func (c *ControlPlane) MachineInFailureDomainWithMostMachines(machines collections.Machines) (*clusterv1.Machine, error) {
fd := c.FailureDomainWithMostMachines(machines)
func (c *ControlPlane) MachineInFailureDomainWithMostMachines(ctx context.Context, machines collections.Machines) (*clusterv1.Machine, error) {
fd := c.FailureDomainWithMostMachines(ctx, machines)
machinesInFailureDomain := machines.Filter(collections.InFailureDomains(fd))
machineToMark := machinesInFailureDomain.Oldest()
if machineToMark == nil {
@@ -116,7 +116,7 @@ func (c *ControlPlane) MachineWithDeleteAnnotation(machines collections.Machines

// FailureDomainWithMostMachines returns a fd which exists both in machines and control-plane machines and has the most
// control-plane machines on it.
func (c *ControlPlane) FailureDomainWithMostMachines(machines collections.Machines) *string {
func (c *ControlPlane) FailureDomainWithMostMachines(ctx context.Context, machines collections.Machines) *string {
// See if there are any Machines that are not in currently defined failure domains first.
notInFailureDomains := machines.Filter(
collections.Not(collections.InFailureDomains(c.FailureDomains().FilterControlPlane().GetIDs()...)),
@@ -127,15 +127,15 @@ func (c *ControlPlane) FailureDomainWithMostMachines(machines collections.Machin
// in the cluster status.
return notInFailureDomains.Oldest().Spec.FailureDomain
}
return failuredomains.PickMost(c.Cluster.Status.FailureDomains.FilterControlPlane(), c.Machines, machines)
return failuredomains.PickMost(ctx, c.Cluster.Status.FailureDomains.FilterControlPlane(), c.Machines, machines)
}

// NextFailureDomainForScaleUp returns the failure domain with the fewest number of up-to-date machines.
func (c *ControlPlane) NextFailureDomainForScaleUp() *string {
func (c *ControlPlane) NextFailureDomainForScaleUp(ctx context.Context) *string {
if len(c.Cluster.Status.FailureDomains.FilterControlPlane()) == 0 {
return nil
}
return failuredomains.PickFewest(c.FailureDomains().FilterControlPlane(), c.UpToDateMachines())
return failuredomains.PickFewest(ctx, c.FailureDomains().FilterControlPlane(), c.UpToDateMachines())
}

// InitialControlPlaneConfig returns a new KubeadmConfigSpec that is to be used for an initializing control plane.
4 changes: 2 additions & 2 deletions controlplane/kubeadm/internal/control_plane_test.go
Original file line number Diff line number Diff line change
@@ -53,12 +53,12 @@ func TestControlPlane(t *testing.T) {
}

t.Run("With all machines in known failure domain, should return the FD with most number of machines", func(t *testing.T) {
g.Expect(*controlPlane.FailureDomainWithMostMachines(controlPlane.Machines)).To(Equal("two"))
g.Expect(*controlPlane.FailureDomainWithMostMachines(ctx, controlPlane.Machines)).To(Equal("two"))
})

t.Run("With some machines in non defined failure domains", func(t *testing.T) {
controlPlane.Machines.Insert(machine("machine-5", withFailureDomain("unknown")))
g.Expect(*controlPlane.FailureDomainWithMostMachines(controlPlane.Machines)).To(Equal("unknown"))
g.Expect(*controlPlane.FailureDomainWithMostMachines(ctx, controlPlane.Machines)).To(Equal("unknown"))
})
})
}
6 changes: 0 additions & 6 deletions controlplane/kubeadm/internal/controllers/controller_test.go
Original file line number Diff line number Diff line change
@@ -37,13 +37,11 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2/klogr"
"k8s.io/utils/ptr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/log"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
@@ -1906,7 +1904,6 @@ kubernetesVersion: metav1.16.1`,
kubeadmCM.DeepCopy(),
}
fakeClient := newFakeClient(objs...)
log.SetLogger(klogr.New())

workloadCluster := &fakeWorkloadCluster{
Workload: &internal.Workload{
@@ -1964,7 +1961,6 @@ kubernetesVersion: metav1.16.1`,
}

fakeClient := newFakeClient(objs...)
log.SetLogger(klogr.New())

workloadCluster := fakeWorkloadCluster{
Workload: &internal.Workload{
@@ -2010,7 +2006,6 @@ kubernetesVersion: metav1.16.1`,
}

fakeClient := newFakeClient(objs...)
log.SetLogger(klogr.New())

workloadCluster := fakeWorkloadCluster{
Workload: &internal.Workload{
@@ -2071,7 +2066,6 @@ kubernetesVersion: metav1.16.1`,
}

fakeClient := newFakeClient(objs...)
log.SetLogger(klogr.New())

workloadCluster := fakeWorkloadCluster{
Workload: &internal.Workload{
10 changes: 5 additions & 5 deletions controlplane/kubeadm/internal/controllers/scale.go
Original file line number Diff line number Diff line change
@@ -39,7 +39,7 @@ func (r *KubeadmControlPlaneReconciler) initializeControlPlane(ctx context.Conte
logger := ctrl.LoggerFrom(ctx)

bootstrapSpec := controlPlane.InitialControlPlaneConfig()
fd := controlPlane.NextFailureDomainForScaleUp()
fd := controlPlane.NextFailureDomainForScaleUp(ctx)
if err := r.cloneConfigsAndGenerateMachine(ctx, controlPlane.Cluster, controlPlane.KCP, bootstrapSpec, fd); err != nil {
logger.Error(err, "Failed to create initial control plane Machine")
r.recorder.Eventf(controlPlane.KCP, corev1.EventTypeWarning, "FailedInitialization", "Failed to create initial control plane Machine for cluster %s control plane: %v", klog.KObj(controlPlane.Cluster), err)
@@ -60,7 +60,7 @@ func (r *KubeadmControlPlaneReconciler) scaleUpControlPlane(ctx context.Context,

// Create the bootstrap configuration
bootstrapSpec := controlPlane.JoinControlPlaneConfig()
fd := controlPlane.NextFailureDomainForScaleUp()
fd := controlPlane.NextFailureDomainForScaleUp(ctx)
if err := r.cloneConfigsAndGenerateMachine(ctx, controlPlane.Cluster, controlPlane.KCP, bootstrapSpec, fd); err != nil {
logger.Error(err, "Failed to create additional control plane Machine")
r.recorder.Eventf(controlPlane.KCP, corev1.EventTypeWarning, "FailedScaleUp", "Failed to create additional control plane Machine for cluster % control plane: %v", klog.KObj(controlPlane.Cluster), err)
@@ -79,7 +79,7 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane(
logger := ctrl.LoggerFrom(ctx)

// Pick the Machine that we should scale down.
machineToDelete, err := selectMachineForScaleDown(controlPlane, outdatedMachines)
machineToDelete, err := selectMachineForScaleDown(ctx, controlPlane, outdatedMachines)
if err != nil {
return ctrl.Result{}, errors.Wrap(err, "failed to select machine for scale down")
}
@@ -223,7 +223,7 @@ func preflightCheckCondition(kind string, obj conditions.Getter, condition clust
return nil
}

func selectMachineForScaleDown(controlPlane *internal.ControlPlane, outdatedMachines collections.Machines) (*clusterv1.Machine, error) {
func selectMachineForScaleDown(ctx context.Context, controlPlane *internal.ControlPlane, outdatedMachines collections.Machines) (*clusterv1.Machine, error) {
machines := controlPlane.Machines
switch {
case controlPlane.MachineWithDeleteAnnotation(outdatedMachines).Len() > 0:
@@ -233,5 +233,5 @@ func selectMachineForScaleDown(controlPlane *internal.ControlPlane, outdatedMach
case outdatedMachines.Len() > 0:
machines = outdatedMachines
}
return controlPlane.MachineInFailureDomainWithMostMachines(machines)
return controlPlane.MachineInFailureDomainWithMostMachines(ctx, machines)
}
2 changes: 1 addition & 1 deletion controlplane/kubeadm/internal/controllers/scale_test.go
Original file line number Diff line number Diff line change
@@ -464,7 +464,7 @@ func TestSelectMachineForScaleDown(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
g := NewWithT(t)

selectedMachine, err := selectMachineForScaleDown(tc.cp, tc.outDatedMachines)
selectedMachine, err := selectMachineForScaleDown(ctx, tc.cp, tc.outDatedMachines)

if tc.expectErr {
g.Expect(err).To(HaveOccurred())
7 changes: 0 additions & 7 deletions controlplane/kubeadm/internal/controllers/status_test.go
Original file line number Diff line number Diff line change
@@ -24,10 +24,8 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2/klogr"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
@@ -72,7 +70,6 @@ func TestKubeadmControlPlaneReconciler_updateStatusNoMachines(t *testing.T) {
g.Expect(err).ToNot(HaveOccurred())

fakeClient := newFakeClient(kcp.DeepCopy(), cluster.DeepCopy())
log.SetLogger(klogr.New())

r := &KubeadmControlPlaneReconciler{
Client: fakeClient,
@@ -145,7 +142,6 @@ func TestKubeadmControlPlaneReconciler_updateStatusAllMachinesNotReady(t *testin
}

fakeClient := newFakeClient(objs...)
log.SetLogger(klogr.New())

r := &KubeadmControlPlaneReconciler{
Client: fakeClient,
@@ -219,7 +215,6 @@ func TestKubeadmControlPlaneReconciler_updateStatusAllMachinesReady(t *testing.T
}

fakeClient := newFakeClient(objs...)
log.SetLogger(klogr.New())

r := &KubeadmControlPlaneReconciler{
Client: fakeClient,
@@ -302,7 +297,6 @@ func TestKubeadmControlPlaneReconciler_updateStatusMachinesReadyMixed(t *testing
objs = append(objs, n, m, kubeadmConfigMap())
machines[m.Name] = m
fakeClient := newFakeClient(objs...)
log.SetLogger(klogr.New())

r := &KubeadmControlPlaneReconciler{
Client: fakeClient,
@@ -383,7 +377,6 @@ func TestKubeadmControlPlaneReconciler_machinesCreatedIsIsTrueEvenWhenTheNodesAr
}

fakeClient := newFakeClient(objs...)
log.SetLogger(klogr.New())

// Set all the machines to `not ready`
r := &KubeadmControlPlaneReconciler{
2 changes: 1 addition & 1 deletion controlplane/kubeadm/main.go
Original file line number Diff line number Diff line change
@@ -171,7 +171,7 @@ func main() {
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
// Set log level 2 as default.
if err := pflag.CommandLine.Set("v", "2"); err != nil {
setupLog.Error(err, "failed to set log level: %v")
setupLog.Error(err, "failed to set default log level")
os.Exit(1)
}
pflag.Parse()
Original file line number Diff line number Diff line change
@@ -97,7 +97,7 @@ func main() {
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
// Set log level 2 as default.
if err := pflag.CommandLine.Set("v", "2"); err != nil {
setupLog.Error(err, "failed to set log level: %v")
setupLog.Error(err, "failed to set default log level")
os.Exit(1)
}
pflag.Parse()
5 changes: 1 addition & 4 deletions go.mod
Original file line number Diff line number Diff line change
@@ -46,7 +46,7 @@ require (
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00
k8s.io/kubectl v0.29.0
k8s.io/utils v0.0.0-20231127182322-b307cd553661
sigs.k8s.io/controller-runtime v0.16.1-0.20240109002307-bf3d274c4d3d
sigs.k8s.io/controller-runtime v0.16.1-0.20240110160329-8f8247fdc1c3
sigs.k8s.io/yaml v1.4.0
)

@@ -96,7 +96,6 @@ require (
github.com/google/uuid v1.4.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/huandu/xstrings v1.3.3 // indirect
@@ -171,12 +170,10 @@ require (
google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/cli-runtime v0.29.0 // indirect
k8s.io/component-helpers v0.29.0 // indirect
k8s.io/kms v0.29.0 // indirect
k8s.io/metrics v0.29.0 // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
15 changes: 2 additions & 13 deletions go.sum
Original file line number Diff line number Diff line change
@@ -7,13 +7,8 @@ cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTj
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk=
cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI=
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
@@ -82,8 +77,6 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I=
github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs=
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k=
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/coredns/caddy v1.1.0 h1:ezvsPrT/tA/7pYDBZxu0cT0VmWk75AfIaf6GSYCNMf0=
github.com/coredns/caddy v1.1.0/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4=
github.com/coredns/corefile-migration v1.0.21 h1:W/DCETrHDiFo0Wj03EyMkaQ9fwsmSgqTCQDHpceaSsE=
@@ -124,8 +117,6 @@ github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxER
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE=
github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI=
github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch/v5 v5.7.0 h1:nJqP7uwL84RJInrohHfW0Fx3awjbm8qZeFv0nW9SYGc=
@@ -859,8 +850,6 @@ k8s.io/component-helpers v0.29.0 h1:Y8W70NGeitKxWwhsPo/vEQbQx5VqJV+3xfLpP3V1VxU=
k8s.io/component-helpers v0.29.0/go.mod h1:j2coxVfmzTOXWSE6sta0MTgNSr572Dcx68F6DD+8fWc=
k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0=
k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo=
k8s.io/kms v0.29.0 h1:KJ1zaZt74CgvgV3NR7tnURJ/mJOKC5X3nwon/WdwgxI=
k8s.io/kms v0.29.0/go.mod h1:mB0f9HLxRXeXUfHfn1A7rpwOlzXI1gIWu86z6buNoYA=
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780=
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA=
k8s.io/kubectl v0.29.0 h1:Oqi48gXjikDhrBF67AYuZRTcJV4lg2l42GmvsP7FmYI=
@@ -874,8 +863,8 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y=
sigs.k8s.io/controller-runtime v0.16.1-0.20240109002307-bf3d274c4d3d h1:+VI+1TP2lIsrMN3e50G/Iy9oz7lvPLtGnEvMihlcngg=
sigs.k8s.io/controller-runtime v0.16.1-0.20240109002307-bf3d274c4d3d/go.mod h1:a2fguE9/y45lR2Sc4Y/7jULuy8qi3Mnf1Gq5rGElNwk=
sigs.k8s.io/controller-runtime v0.16.1-0.20240110160329-8f8247fdc1c3 h1:X+B6Jz22aP0HMpW8ygVVcaqjLfPhmc2l8mNeEMEXmTs=
sigs.k8s.io/controller-runtime v0.16.1-0.20240110160329-8f8247fdc1c3/go.mod h1:2Bh+vGqK/uDH0rHQvlyzWWmVZw3JYetu+GOjd0pWKSQ=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0=
2 changes: 1 addition & 1 deletion hack/tools/go.mod
Original file line number Diff line number Diff line change
@@ -23,7 +23,7 @@ require (
k8s.io/utils v0.0.0-20231127182322-b307cd553661
sigs.k8s.io/cluster-api v0.0.0-00010101000000-000000000000
sigs.k8s.io/cluster-api/test v0.0.0-00010101000000-000000000000
sigs.k8s.io/controller-runtime v0.16.1-0.20240109002307-bf3d274c4d3d
sigs.k8s.io/controller-runtime v0.16.1-0.20240110160329-8f8247fdc1c3
sigs.k8s.io/controller-tools v0.13.0
sigs.k8s.io/kubebuilder/docs/book/utils v0.0.0-20211028165026-57688c578b5d
sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3
4 changes: 2 additions & 2 deletions hack/tools/go.sum
Original file line number Diff line number Diff line change
@@ -490,8 +490,8 @@ k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6R
k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y=
sigs.k8s.io/controller-runtime v0.16.1-0.20240109002307-bf3d274c4d3d h1:+VI+1TP2lIsrMN3e50G/Iy9oz7lvPLtGnEvMihlcngg=
sigs.k8s.io/controller-runtime v0.16.1-0.20240109002307-bf3d274c4d3d/go.mod h1:a2fguE9/y45lR2Sc4Y/7jULuy8qi3Mnf1Gq5rGElNwk=
sigs.k8s.io/controller-runtime v0.16.1-0.20240110160329-8f8247fdc1c3 h1:X+B6Jz22aP0HMpW8ygVVcaqjLfPhmc2l8mNeEMEXmTs=
sigs.k8s.io/controller-runtime v0.16.1-0.20240110160329-8f8247fdc1c3/go.mod h1:2Bh+vGqK/uDH0rHQvlyzWWmVZw3JYetu+GOjd0pWKSQ=
sigs.k8s.io/controller-tools v0.13.0 h1:NfrvuZ4bxyolhDBt/rCZhDnx3M2hzlhgo5n3Iv2RykI=
sigs.k8s.io/controller-tools v0.13.0/go.mod h1:5vw3En2NazbejQGCeWKRrE7q4P+CW8/klfVqP8QZkgA=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
2 changes: 1 addition & 1 deletion hack/tools/runtime-openapi-gen/main.go
Original file line number Diff line number Diff line change
@@ -74,6 +74,6 @@ func main() {

err = os.WriteFile(*outputFile, openAPIBytes, 0600)
if err != nil {
klog.Exitf("Failed to write OpenAPI specification to file %q: %v", outputFile, err)
klog.Exitf("Failed to write OpenAPI specification to file %q: %v", *outputFile, err)
}
}
Original file line number Diff line number Diff line change
@@ -22,7 +22,6 @@ import (
"sort"
"time"

"github.com/go-logr/logr"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -150,7 +149,7 @@ func (r *Reconciler) updateMachineSet(ctx context.Context, deployment *clusterv1
log := ctrl.LoggerFrom(ctx)

// Compute the desired MachineSet.
updatedMS, err := r.computeDesiredMachineSet(deployment, ms, oldMSs, log)
updatedMS, err := r.computeDesiredMachineSet(ctx, deployment, ms, oldMSs)
if err != nil {
return nil, errors.Wrapf(err, "failed to update MachineSet %q", klog.KObj(ms))
}
@@ -172,7 +171,7 @@ func (r *Reconciler) createMachineSetAndWait(ctx context.Context, deployment *cl
log := ctrl.LoggerFrom(ctx)

// Compute the desired MachineSet.
newMS, err := r.computeDesiredMachineSet(deployment, nil, oldMSs, log)
newMS, err := r.computeDesiredMachineSet(ctx, deployment, nil, oldMSs)
if err != nil {
return nil, errors.Wrap(err, "failed to create new MachineSet")
}
@@ -213,7 +212,7 @@ func (r *Reconciler) createMachineSetAndWait(ctx context.Context, deployment *cl
// There are small differences in how we calculate the MachineSet depending on if it
// is a create or update. Example: for a new MachineSet we have to calculate a new name,
// while for an existing MachineSet we have to use the name of the existing MachineSet.
func (r *Reconciler) computeDesiredMachineSet(deployment *clusterv1.MachineDeployment, existingMS *clusterv1.MachineSet, oldMSs []*clusterv1.MachineSet, log logr.Logger) (*clusterv1.MachineSet, error) {
func (r *Reconciler) computeDesiredMachineSet(ctx context.Context, deployment *clusterv1.MachineDeployment, existingMS *clusterv1.MachineSet, oldMSs []*clusterv1.MachineSet) (*clusterv1.MachineSet, error) {
var name string
var uid types.UID
var finalizers []string
@@ -328,7 +327,7 @@ func (r *Reconciler) computeDesiredMachineSet(deployment *clusterv1.MachineDeplo
desiredMS.Spec.Selector = *mdutil.CloneSelectorAndAddLabel(&deployment.Spec.Selector, clusterv1.MachineDeploymentUniqueLabel, uniqueIdentifierLabelValue)

// Set annotations and .spec.template.annotations.
if desiredMS.Annotations, err = mdutil.ComputeMachineSetAnnotations(log, deployment, oldMSs, existingMS); err != nil {
if desiredMS.Annotations, err = mdutil.ComputeMachineSetAnnotations(ctx, deployment, oldMSs, existingMS); err != nil {
return nil, errors.Wrap(err, "failed to compute desired MachineSet: failed to compute annotations")
}
desiredMS.Spec.Template.Annotations = cloneStringMap(deployment.Spec.Template.Annotations)
Original file line number Diff line number Diff line change
@@ -30,7 +30,6 @@ import (
"k8s.io/apimachinery/pkg/types"
apirand "k8s.io/apimachinery/pkg/util/rand"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2/klogr"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
@@ -555,8 +554,6 @@ func TestComputeDesiredMachineSet(t *testing.T) {
},
}

log := klogr.New()

skeletonMSBasedOnMD := &clusterv1.MachineSet{
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
@@ -577,7 +574,7 @@ func TestComputeDesiredMachineSet(t *testing.T) {
expectedMS := skeletonMSBasedOnMD.DeepCopy()

g := NewWithT(t)
actualMS, err := (&Reconciler{}).computeDesiredMachineSet(deployment, nil, nil, log)
actualMS, err := (&Reconciler{}).computeDesiredMachineSet(ctx, deployment, nil, nil)
g.Expect(err).ToNot(HaveOccurred())
assertMachineSet(g, actualMS, expectedMS)
})
@@ -590,7 +587,7 @@ func TestComputeDesiredMachineSet(t *testing.T) {
expectedMS.Spec.Replicas = ptr.To[int32](2) // 4 (maxsurge+replicas) - 2 (replicas of old ms) = 2

g := NewWithT(t)
actualMS, err := (&Reconciler{}).computeDesiredMachineSet(deployment, nil, []*clusterv1.MachineSet{oldMS}, log)
actualMS, err := (&Reconciler{}).computeDesiredMachineSet(ctx, deployment, nil, []*clusterv1.MachineSet{oldMS})
g.Expect(err).ToNot(HaveOccurred())
assertMachineSet(g, actualMS, expectedMS)
})
@@ -627,7 +624,7 @@ func TestComputeDesiredMachineSet(t *testing.T) {
expectedMS.Spec.Template.Labels[clusterv1.MachineDeploymentUniqueLabel] = uniqueID

g := NewWithT(t)
actualMS, err := (&Reconciler{}).computeDesiredMachineSet(deployment, existingMS, nil, log)
actualMS, err := (&Reconciler{}).computeDesiredMachineSet(ctx, deployment, existingMS, nil)
g.Expect(err).ToNot(HaveOccurred())
assertMachineSet(g, actualMS, expectedMS)
})
@@ -668,7 +665,7 @@ func TestComputeDesiredMachineSet(t *testing.T) {
expectedMS.Spec.Template.Labels[clusterv1.MachineDeploymentUniqueLabel] = uniqueID

g := NewWithT(t)
actualMS, err := (&Reconciler{}).computeDesiredMachineSet(deployment, existingMS, []*clusterv1.MachineSet{oldMS}, log)
actualMS, err := (&Reconciler{}).computeDesiredMachineSet(ctx, deployment, existingMS, []*clusterv1.MachineSet{oldMS})
g.Expect(err).ToNot(HaveOccurred())
assertMachineSet(g, actualMS, expectedMS)
})
@@ -714,7 +711,7 @@ func TestComputeDesiredMachineSet(t *testing.T) {
expectedMS.Spec.DeletePolicy = ""

g := NewWithT(t)
actualMS, err := (&Reconciler{}).computeDesiredMachineSet(deployment, existingMS, nil, log)
actualMS, err := (&Reconciler{}).computeDesiredMachineSet(ctx, deployment, existingMS, nil)
g.Expect(err).ToNot(HaveOccurred())
assertMachineSet(g, actualMS, expectedMS)
})
13 changes: 8 additions & 5 deletions internal/controllers/machinedeployment/mdutil/util.go
Original file line number Diff line number Diff line change
@@ -18,6 +18,7 @@ limitations under the License.
package mdutil

import (
"context"
"fmt"
"sort"
"strconv"
@@ -33,6 +34,7 @@ import (
intstrutil "k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/klog/v2"
"k8s.io/utils/integer"
ctrl "sigs.k8s.io/controller-runtime"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util/conversion"
@@ -114,13 +116,14 @@ func SetDeploymentRevision(deployment *clusterv1.MachineDeployment, revision str
}

// MaxRevision finds the highest revision in the machine sets.
func MaxRevision(allMSs []*clusterv1.MachineSet, logger logr.Logger) int64 {
func MaxRevision(ctx context.Context, allMSs []*clusterv1.MachineSet) int64 {
log := ctrl.LoggerFrom(ctx)

max := int64(0)
for _, ms := range allMSs {
if v, err := Revision(ms); err != nil {
// Skip the machine sets when it failed to parse their revision information
logger.Error(err, "Couldn't parse revision for machine set, deployment controller will skip it when reconciling revisions",
"machineset", ms.Name)
log.Error(err, fmt.Sprintf("Couldn't parse revision for MachineSet %s, deployment controller will skip it when reconciling revisions", ms.Name))
} else if v > max {
max = v
}
@@ -185,7 +188,7 @@ func getIntFromAnnotation(ms *clusterv1.MachineSet, annotationKey string, logger

// ComputeMachineSetAnnotations computes the annotations that should be set on the MachineSet.
// Note: The passed in newMS is nil if the new MachineSet doesn't exist in the apiserver yet.
func ComputeMachineSetAnnotations(log logr.Logger, deployment *clusterv1.MachineDeployment, oldMSs []*clusterv1.MachineSet, newMS *clusterv1.MachineSet) (map[string]string, error) {
func ComputeMachineSetAnnotations(ctx context.Context, deployment *clusterv1.MachineDeployment, oldMSs []*clusterv1.MachineSet, newMS *clusterv1.MachineSet) (map[string]string, error) {
// Copy annotations from Deployment annotations while filtering out some annotations
// that we don't want to propagate.
annotations := map[string]string{}
@@ -199,7 +202,7 @@ func ComputeMachineSetAnnotations(log logr.Logger, deployment *clusterv1.Machine
// The newMS's revision should be the greatest among all MSes. Usually, its revision number is newRevision (the max revision number
// of all old MSes + 1). However, it's possible that some old MSes are deleted after the newMS revision being updated, and
// newRevision becomes smaller than newMS's revision. We will never decrease a revision of a MachineSet.
maxOldRevision := MaxRevision(oldMSs, log)
maxOldRevision := MaxRevision(ctx, oldMSs)
newRevisionInt := maxOldRevision + 1
newRevision := strconv.FormatInt(newRevisionInt, 10)
if newMS != nil {
9 changes: 6 additions & 3 deletions internal/controllers/machinedeployment/mdutil/util_test.go
Original file line number Diff line number Diff line change
@@ -30,12 +30,16 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apiserver/pkg/storage/names"
"k8s.io/klog/v2/klogr"
"k8s.io/utils/ptr"
ctrl "sigs.k8s.io/controller-runtime"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
)

var (
ctx = ctrl.SetupSignalHandler()
)

func newDControllerRef(md *clusterv1.MachineDeployment) *metav1.OwnerReference {
isController := true
return &metav1.OwnerReference{
@@ -939,11 +943,10 @@ func TestComputeMachineSetAnnotations(t *testing.T) {
},
}

log := klogr.New()
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
got, err := ComputeMachineSetAnnotations(log, tt.deployment, tt.oldMSs, tt.ms)
got, err := ComputeMachineSetAnnotations(ctx, tt.deployment, tt.oldMSs, tt.ms)
if tt.wantErr {
g.Expect(err).ShouldNot(HaveOccurred())
} else {
6 changes: 2 additions & 4 deletions internal/test/envtest/environment.go
Original file line number Diff line number Diff line change
@@ -42,7 +42,6 @@ import (
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
"k8s.io/klog/v2/klogr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
@@ -72,11 +71,10 @@ import (
)

func init() {
klog.InitFlags(nil)
logger := klogr.New()
logger := klog.Background()
// Use klog as the internal logger for this envtest environment.
log.SetLogger(logger)
// Additionally force all of the controllers to use the Ginkgo logger.
// Additionally force all controllers to use the Ginkgo logger.
ctrl.SetLogger(logger)
// Add logger for ginkgo.
klog.SetOutput(ginkgo.GinkgoWriter)
2 changes: 1 addition & 1 deletion main.go
Original file line number Diff line number Diff line change
@@ -231,7 +231,7 @@ func main() {
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
// Set log level 2 as default.
if err := pflag.CommandLine.Set("v", "2"); err != nil {
setupLog.Error(err, "failed to set log level: %v")
setupLog.Error(err, "failed to set default log level")
os.Exit(1)
}
pflag.Parse()
2 changes: 1 addition & 1 deletion test/extension/main.go
Original file line number Diff line number Diff line change
@@ -110,7 +110,7 @@ func main() {
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
// Set log level 2 as default.
if err := pflag.CommandLine.Set("v", "2"); err != nil {
setupLog.Error(err, "failed to set log level: %v")
setupLog.Error(err, "failed to set default log level")
os.Exit(1)
}
pflag.Parse()
4 changes: 1 addition & 3 deletions test/go.mod
Original file line number Diff line number Diff line change
@@ -32,7 +32,7 @@ require (
k8s.io/klog/v2 v2.110.1
k8s.io/utils v0.0.0-20231127182322-b307cd553661
sigs.k8s.io/cluster-api v0.0.0-00010101000000-000000000000
sigs.k8s.io/controller-runtime v0.16.1-0.20240109002307-bf3d274c4d3d
sigs.k8s.io/controller-runtime v0.16.1-0.20240110160329-8f8247fdc1c3
sigs.k8s.io/kind v0.20.0
sigs.k8s.io/yaml v1.4.0
)
@@ -151,12 +151,10 @@ require (
google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
gotest.tools/v3 v3.4.0 // indirect
k8s.io/cluster-bootstrap v0.29.0 // indirect
k8s.io/kms v0.29.0 // indirect
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
6 changes: 2 additions & 4 deletions test/go.sum
Original file line number Diff line number Diff line change
@@ -498,16 +498,14 @@ k8s.io/component-base v0.29.0 h1:T7rjd5wvLnPBV1vC4zWd/iWRbV8Mdxs+nGaoaFzGw3s=
k8s.io/component-base v0.29.0/go.mod h1:sADonFTQ9Zc9yFLghpDpmNXEdHyQmFIGbiuZbqAXQ1M=
k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0=
k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo=
k8s.io/kms v0.29.0 h1:KJ1zaZt74CgvgV3NR7tnURJ/mJOKC5X3nwon/WdwgxI=
k8s.io/kms v0.29.0/go.mod h1:mB0f9HLxRXeXUfHfn1A7rpwOlzXI1gIWu86z6buNoYA=
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780=
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA=
k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6RxQGZDnzuLcrUTI=
k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y=
sigs.k8s.io/controller-runtime v0.16.1-0.20240109002307-bf3d274c4d3d h1:+VI+1TP2lIsrMN3e50G/Iy9oz7lvPLtGnEvMihlcngg=
sigs.k8s.io/controller-runtime v0.16.1-0.20240109002307-bf3d274c4d3d/go.mod h1:a2fguE9/y45lR2Sc4Y/7jULuy8qi3Mnf1Gq5rGElNwk=
sigs.k8s.io/controller-runtime v0.16.1-0.20240110160329-8f8247fdc1c3 h1:X+B6Jz22aP0HMpW8ygVVcaqjLfPhmc2l8mNeEMEXmTs=
sigs.k8s.io/controller-runtime v0.16.1-0.20240110160329-8f8247fdc1c3/go.mod h1:2Bh+vGqK/uDH0rHQvlyzWWmVZw3JYetu+GOjd0pWKSQ=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/kind v0.20.0 h1:f0sc3v9mQbGnjBUaqSFST1dwIuiikKVGgoTwpoP33a8=
2 changes: 1 addition & 1 deletion test/infrastructure/docker/main.go
Original file line number Diff line number Diff line change
@@ -172,7 +172,7 @@ func main() {
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
// Set log level 2 as default.
if err := pflag.CommandLine.Set("v", "2"); err != nil {
setupLog.Error(err, "failed to set log level: %v")
setupLog.Error(err, "failed to set default log level")
os.Exit(1)
}
pflag.Parse()
2 changes: 1 addition & 1 deletion test/infrastructure/inmemory/main.go
Original file line number Diff line number Diff line change
@@ -166,7 +166,7 @@ func main() {
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
// Set log level 2 as default.
if err := pflag.CommandLine.Set("v", "2"); err != nil {
setupLog.Error(err, "failed to set log level: %v")
setupLog.Error(err, "failed to set default log level")
os.Exit(1)
}
pflag.Parse()
26 changes: 17 additions & 9 deletions util/failuredomains/failure_domains.go
Original file line number Diff line number Diff line change
@@ -18,10 +18,12 @@ limitations under the License.
package failuredomains

import (
"context"
"fmt"
"sort"

"k8s.io/klog/v2/klogr"
"k8s.io/utils/ptr"
ctrl "sigs.k8s.io/controller-runtime"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util/collections"
@@ -50,9 +52,9 @@ func (f failureDomainAggregations) Swap(i, j int) {
}

// PickMost returns a failure domain that is in machines and has most of the group of machines on.
func PickMost(failureDomains clusterv1.FailureDomains, groupMachines, machines collections.Machines) *string {
func PickMost(ctx context.Context, failureDomains clusterv1.FailureDomains, groupMachines, machines collections.Machines) *string {
// orderDescending sorts failure domains according to all machines belonging to the group.
fds := orderDescending(failureDomains, groupMachines)
fds := orderDescending(ctx, failureDomains, groupMachines)
for _, fd := range fds {
for _, m := range machines {
if m.Spec.FailureDomain == nil {
@@ -67,8 +69,8 @@ func PickMost(failureDomains clusterv1.FailureDomains, groupMachines, machines c
}

// orderDescending returns the sorted failure domains in decreasing order.
func orderDescending(failureDomains clusterv1.FailureDomains, machines collections.Machines) failureDomainAggregations {
aggregations := pick(failureDomains, machines)
func orderDescending(ctx context.Context, failureDomains clusterv1.FailureDomains, machines collections.Machines) failureDomainAggregations {
aggregations := pick(ctx, failureDomains, machines)
if len(aggregations) == 0 {
return nil
}
@@ -77,16 +79,18 @@ func orderDescending(failureDomains clusterv1.FailureDomains, machines collectio
}

// PickFewest returns the failure domain with the fewest number of machines.
func PickFewest(failureDomains clusterv1.FailureDomains, machines collections.Machines) *string {
aggregations := pick(failureDomains, machines)
func PickFewest(ctx context.Context, failureDomains clusterv1.FailureDomains, machines collections.Machines) *string {
aggregations := pick(ctx, failureDomains, machines)
if len(aggregations) == 0 {
return nil
}
sort.Sort(aggregations)
return ptr.To(aggregations[0].id)
}

func pick(failureDomains clusterv1.FailureDomains, machines collections.Machines) failureDomainAggregations {
func pick(ctx context.Context, failureDomains clusterv1.FailureDomains, machines collections.Machines) failureDomainAggregations {
log := ctrl.LoggerFrom(ctx)

if len(failureDomains) == 0 {
return failureDomainAggregations{}
}
@@ -105,7 +109,11 @@ func pick(failureDomains clusterv1.FailureDomains, machines collections.Machines
}
id := *m.Spec.FailureDomain
if _, ok := failureDomains[id]; !ok {
klogr.New().Info("unknown failure domain", "machine-name", m.GetName(), "failure-domain-id", id, "known-failure-domains", failureDomains)
var knownFailureDomains []string
for failureDomainID := range failureDomains {
knownFailureDomains = append(knownFailureDomains, failureDomainID)
}
log.Info(fmt.Sprintf("Unknown failure domain %q for Machine %s (known failure domains: %v)", id, m.GetName(), knownFailureDomains))
continue
}
counters[id]++
9 changes: 7 additions & 2 deletions util/failuredomains/failure_domains_test.go
Original file line number Diff line number Diff line change
@@ -21,11 +21,16 @@ import (

. "github.com/onsi/gomega"
"k8s.io/utils/ptr"
ctrl "sigs.k8s.io/controller-runtime"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util/collections"
)

var (
ctx = ctrl.SetupSignalHandler()
)

func TestNewFailureDomainPicker(t *testing.T) {
a := ptr.To("us-west-1a")
b := ptr.To("us-west-1b")
@@ -87,7 +92,7 @@ func TestNewFailureDomainPicker(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
g := NewWithT(t)

fd := PickFewest(tc.fds, tc.machines)
fd := PickFewest(ctx, tc.fds, tc.machines)
if tc.expected == nil {
g.Expect(fd).To(BeNil())
} else {
@@ -167,7 +172,7 @@ func TestNewFailureDomainPickMost(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
g := NewWithT(t)

fd := PickMost(tc.fds, tc.machines, tc.machines)
fd := PickMost(ctx, tc.fds, tc.machines, tc.machines)
if tc.expected == nil {
g.Expect(fd).To(BeNil())
} else {
4 changes: 2 additions & 2 deletions util/patch/patch.go
Original file line number Diff line number Diff line change
@@ -65,7 +65,7 @@ func NewHelper(obj client.Object, crClient client.Client) (*Helper, error) {
}

// Convert the object to unstructured to compare against our before copy.
unstructuredObj, err := toUnstructured(obj)
unstructuredObj, err := toUnstructured(obj, gvk)
if err != nil {
return nil, errors.Wrapf(err, "failed to create patch helper for %s %s: failed to convert object to Unstructured", gvk.Kind, klog.KObj(obj))
}
@@ -105,7 +105,7 @@ func (h *Helper) Patch(ctx context.Context, obj client.Object, opts ...Option) e
}

// Convert the object to unstructured to compare against our before copy.
h.after, err = toUnstructured(obj)
h.after, err = toUnstructured(obj, gvk)
if err != nil {
return errors.Wrapf(err, "failed to patch %s %s: failed to convert object to Unstructured", h.gvk.Kind, klog.KObj(h.beforeObject))
}
10 changes: 8 additions & 2 deletions util/patch/utils.go
Original file line number Diff line number Diff line change
@@ -21,6 +21,7 @@ import (

"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)

type patchType string
@@ -47,7 +48,9 @@ func unstructuredHasStatus(u *unstructured.Unstructured) bool {
return ok
}

func toUnstructured(obj runtime.Object) (*unstructured.Unstructured, error) {
// toUnstructured converts an object to Unstructured.
// We have to pass in a gvk as we can't rely on GVK being set in a runtime.Object.
func toUnstructured(obj runtime.Object, gvk schema.GroupVersionKind) (*unstructured.Unstructured, error) {
// If the incoming object is already unstructured, perform a deep copy first
// otherwise DefaultUnstructuredConverter ends up returning the inner map without
// making a copy.
@@ -58,7 +61,10 @@ func toUnstructured(obj runtime.Object) (*unstructured.Unstructured, error) {
if err != nil {
return nil, err
}
return &unstructured.Unstructured{Object: rawMap}, nil
u := &unstructured.Unstructured{Object: rawMap}
u.SetGroupVersionKind(gvk)

return u, nil
}

// unsafeUnstructuredCopy returns a shallow copy of the unstructured object given as input.
20 changes: 18 additions & 2 deletions util/patch/utils_test.go
Original file line number Diff line number Diff line change
@@ -22,6 +22,7 @@ import (
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
)
@@ -39,10 +40,17 @@ func TestToUnstructured(t *testing.T) {
Paused: true,
},
}
newObj, err := toUnstructured(obj)
gvk := schema.GroupVersionKind{
Group: clusterv1.GroupVersion.Group,
Kind: "Cluster",
Version: clusterv1.GroupVersion.Version,
}
newObj, err := toUnstructured(obj, gvk)
g.Expect(err).ToNot(HaveOccurred())
g.Expect(newObj.GetName()).To(Equal(obj.Name))
g.Expect(newObj.GetNamespace()).To(Equal(obj.Namespace))
g.Expect(newObj.GetAPIVersion()).To(Equal(clusterv1.GroupVersion.String()))
g.Expect(newObj.GetKind()).To(Equal("Cluster"))

// Change a spec field and validate that it stays the same in the incoming object.
g.Expect(unstructured.SetNestedField(newObj.Object, false, "spec", "paused")).To(Succeed())
@@ -55,6 +63,7 @@ func TestToUnstructured(t *testing.T) {
obj := &unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": "test.x.y.z/v1",
"kind": "TestKind",
"metadata": map[string]interface{}{
"name": "test-1",
"namespace": "namespace-1",
@@ -64,11 +73,18 @@ func TestToUnstructured(t *testing.T) {
},
},
}
gvk := schema.GroupVersionKind{
Group: "test.x.y.z",
Kind: "TestKind",
Version: "v1",
}

newObj, err := toUnstructured(obj)
newObj, err := toUnstructured(obj, gvk)
g.Expect(err).ToNot(HaveOccurred())
g.Expect(newObj.GetName()).To(Equal(obj.GetName()))
g.Expect(newObj.GetNamespace()).To(Equal(obj.GetNamespace()))
g.Expect(newObj.GetAPIVersion()).To(Equal("test.x.y.z/v1"))
g.Expect(newObj.GetKind()).To(Equal("TestKind"))

// Validate that the maps point to different addresses.
g.Expect(obj.Object).ToNot(BeIdenticalTo(newObj.Object))

0 comments on commit 5a08f09

Please sign in to comment.