diff --git a/exp/internal/controllers/machinepool_controller.go b/exp/internal/controllers/machinepool_controller.go index 5ade870c39d1..3d78a6bac63f 100644 --- a/exp/internal/controllers/machinepool_controller.go +++ b/exp/internal/controllers/machinepool_controller.go @@ -80,6 +80,21 @@ type MachinePoolReconciler struct { externalTracker external.ObjectTracker } +// scope holds the different objects that are read and used during the reconcile. +type scope struct { + // cluster is the Cluster object the Machine belongs to. + // It is set at the beginning of the reconcile function. + cluster *clusterv1.Cluster + + // machinePool is the MachinePool object. It is set at the beginning + // of the reconcile function. + machinePool *expv1.MachinePool + + // nodeRefMapResult is a map of providerIDs to Nodes that are associated with the Cluster. + // It is set after reconcileInfrastructure is called. + nodeRefMap map[string]*corev1.Node +} + func (r *MachinePoolReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { clusterToMachinePools, err := util.ClusterToTypedObjectsMapper(mgr.GetClient(), &expv1.MachinePoolList{}, mgr.GetScheme()) if err != nil { @@ -210,7 +225,11 @@ func (r *MachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Request) } // Handle normal reconciliation loop. - res, err := r.reconcile(ctx, cluster, mp) + scope := &scope{ + cluster: cluster, + machinePool: mp, + } + res, err := r.reconcile(ctx, scope) // Requeue if the reconcile failed because the ClusterCacheTracker was locked for // the current cluster because of concurrent access. if errors.Is(err, remote.ErrClusterLocked) { @@ -220,8 +239,10 @@ func (r *MachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Request) return res, err } -func (r *MachinePoolReconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster, mp *expv1.MachinePool) (ctrl.Result, error) { +func (r *MachinePoolReconciler) reconcile(ctx context.Context, s *scope) (ctrl.Result, error) { // Ensure the MachinePool is owned by the Cluster it belongs to. + cluster := s.cluster + mp := s.machinePool mp.SetOwnerReferences(util.EnsureOwnerRef(mp.GetOwnerReferences(), metav1.OwnerReference{ APIVersion: clusterv1.GroupVersion.String(), Kind: "Cluster", @@ -229,7 +250,7 @@ func (r *MachinePoolReconciler) reconcile(ctx context.Context, cluster *clusterv UID: cluster.UID, })) - phases := []func(context.Context, *clusterv1.Cluster, *expv1.MachinePool) (ctrl.Result, error){ + phases := []func(context.Context, *scope) (ctrl.Result, error){ r.reconcileBootstrap, r.reconcileInfrastructure, r.reconcileNodeRefs, @@ -239,7 +260,7 @@ func (r *MachinePoolReconciler) reconcile(ctx context.Context, cluster *clusterv errs := []error{} for _, phase := range phases { // Call the inner reconciliation methods. - phaseResult, err := phase(ctx, cluster, mp) + phaseResult, err := phase(ctx, s) if err != nil { errs = append(errs, err) } diff --git a/exp/internal/controllers/machinepool_controller_noderef.go b/exp/internal/controllers/machinepool_controller_noderef.go index a2afac9557ba..e6c27b42ce93 100644 --- a/exp/internal/controllers/machinepool_controller_noderef.go +++ b/exp/internal/controllers/machinepool_controller_noderef.go @@ -46,8 +46,10 @@ type getNodeReferencesResult struct { ready int } -func (r *MachinePoolReconciler) reconcileNodeRefs(ctx context.Context, cluster *clusterv1.Cluster, mp *expv1.MachinePool) (ctrl.Result, error) { +func (r *MachinePoolReconciler) reconcileNodeRefs(ctx context.Context, s *scope) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx) + cluster := s.cluster + mp := s.machinePool // Create a watch on the nodes in the Cluster. if err := r.watchClusterNodes(ctx, cluster); err != nil { @@ -81,8 +83,13 @@ func (r *MachinePoolReconciler) reconcileNodeRefs(ctx context.Context, cluster * return ctrl.Result{}, err } + // Return early if nodeRefMap is nil. + if s.nodeRefMap == nil { + return ctrl.Result{}, errors.Wrapf(err, "failed to get node references") + } + // Get the Node references. - nodeRefsResult, err := r.getNodeReferences(ctx, clusterClient, mp.Spec.ProviderIDList, mp.Spec.MinReadySeconds) + nodeRefsResult, err := r.getNodeReferences(ctx, mp.Spec.ProviderIDList, mp.Spec.MinReadySeconds, s.nodeRefMap) if err != nil { if err == errNoAvailableNodes { log.Info("Cannot assign NodeRefs to MachinePool, no matching Nodes") @@ -153,30 +160,10 @@ func (r *MachinePoolReconciler) deleteRetiredNodes(ctx context.Context, c client return nil } -func (r *MachinePoolReconciler) getNodeReferences(ctx context.Context, c client.Client, providerIDList []string, minReadySeconds *int32) (getNodeReferencesResult, error) { +func (r *MachinePoolReconciler) getNodeReferences(ctx context.Context, providerIDList []string, minReadySeconds *int32, nodeRefsMap map[string]*corev1.Node) (getNodeReferencesResult, error) { log := ctrl.LoggerFrom(ctx, "providerIDList", len(providerIDList)) var ready, available int - nodeRefsMap := make(map[string]corev1.Node) - nodeList := corev1.NodeList{} - for { - if err := c.List(ctx, &nodeList, client.Continue(nodeList.Continue)); err != nil { - return getNodeReferencesResult{}, errors.Wrapf(err, "failed to List nodes") - } - - for _, node := range nodeList.Items { - if node.Spec.ProviderID == "" { - log.V(2).Info("No ProviderID detected, skipping", "providerID", node.Spec.ProviderID) - continue - } - - nodeRefsMap[node.Spec.ProviderID] = node - } - - if nodeList.Continue == "" { - break - } - } var nodeRefs []corev1.ObjectReference for _, providerID := range providerIDList { @@ -185,9 +172,9 @@ func (r *MachinePoolReconciler) getNodeReferences(ctx context.Context, c client. continue } if node, ok := nodeRefsMap[providerID]; ok { - if noderefutil.IsNodeReady(&node) { + if noderefutil.IsNodeReady(node) { ready++ - if noderefutil.IsNodeAvailable(&node, *minReadySeconds, metav1.Now()) { + if noderefutil.IsNodeAvailable(node, *minReadySeconds, metav1.Now()) { available++ } } diff --git a/exp/internal/controllers/machinepool_controller_noderef_test.go b/exp/internal/controllers/machinepool_controller_noderef_test.go index b573cb47ba38..aa9c818b733b 100644 --- a/exp/internal/controllers/machinepool_controller_noderef_test.go +++ b/exp/internal/controllers/machinepool_controller_noderef_test.go @@ -36,9 +36,8 @@ func TestMachinePoolGetNodeReference(t *testing.T) { Client: fake.NewClientBuilder().Build(), recorder: record.NewFakeRecorder(32), } - - nodeList := []client.Object{ - &corev1.Node{ + nodeRefsMap := map[string]*corev1.Node{ + "aws://us-east-1/id-node-1": { ObjectMeta: metav1.ObjectMeta{ Name: "node-1", }, @@ -54,7 +53,7 @@ func TestMachinePoolGetNodeReference(t *testing.T) { }, }, }, - &corev1.Node{ + "aws://us-west-2/id-node-2": { ObjectMeta: metav1.ObjectMeta{ Name: "node-2", }, @@ -70,7 +69,7 @@ func TestMachinePoolGetNodeReference(t *testing.T) { }, }, }, - &corev1.Node{ + "aws://us-west-2/id-node-3": { ObjectMeta: metav1.ObjectMeta{ Name: "node-3", }, @@ -78,7 +77,7 @@ func TestMachinePoolGetNodeReference(t *testing.T) { ProviderID: "aws://us-west-2/id-node-3", }, }, - &corev1.Node{ + "gce://us-central1/gce-id-node-2": { ObjectMeta: metav1.ObjectMeta{ Name: "gce-node-2", }, @@ -94,7 +93,7 @@ func TestMachinePoolGetNodeReference(t *testing.T) { }, }, }, - &corev1.Node{ + "azure://westus2/id-node-4": { ObjectMeta: metav1.ObjectMeta{ Name: "azure-node-4", }, @@ -110,7 +109,7 @@ func TestMachinePoolGetNodeReference(t *testing.T) { }, }, }, - &corev1.Node{ + "azure://westus2/id-nodepool1/0": { ObjectMeta: metav1.ObjectMeta{ Name: "azure-nodepool1-0", }, @@ -126,7 +125,7 @@ func TestMachinePoolGetNodeReference(t *testing.T) { }, }, }, - &corev1.Node{ + "azure://westus2/id-nodepool2/0": { ObjectMeta: metav1.ObjectMeta{ Name: "azure-nodepool2-0", }, @@ -144,8 +143,6 @@ func TestMachinePoolGetNodeReference(t *testing.T) { }, } - client := fake.NewClientBuilder().WithObjects(nodeList...).Build() - testCases := []struct { name string providerIDList []string @@ -284,7 +281,7 @@ func TestMachinePoolGetNodeReference(t *testing.T) { t.Run(test.name, func(t *testing.T) { g := NewWithT(t) - result, err := r.getNodeReferences(ctx, client, test.providerIDList, ptr.To(test.minReadySeconds)) + result, err := r.getNodeReferences(ctx, test.providerIDList, ptr.To(test.minReadySeconds), nodeRefsMap) if test.err == nil { g.Expect(err).ToNot(HaveOccurred()) } else { diff --git a/exp/internal/controllers/machinepool_controller_phases.go b/exp/internal/controllers/machinepool_controller_phases.go index 956627036a09..e46b58f8818c 100644 --- a/exp/internal/controllers/machinepool_controller_phases.go +++ b/exp/internal/controllers/machinepool_controller_phases.go @@ -177,9 +177,10 @@ func (r *MachinePoolReconciler) reconcileExternal(ctx context.Context, cluster * } // reconcileBootstrap reconciles the Spec.Bootstrap.ConfigRef object on a MachinePool. -func (r *MachinePoolReconciler) reconcileBootstrap(ctx context.Context, cluster *clusterv1.Cluster, m *expv1.MachinePool) (ctrl.Result, error) { +func (r *MachinePoolReconciler) reconcileBootstrap(ctx context.Context, s *scope) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx) - + cluster := s.cluster + m := s.machinePool // Call generic external reconciler if we have an external reference. var bootstrapConfig *unstructured.Unstructured if m.Spec.Template.Spec.Bootstrap.ConfigRef != nil { @@ -241,9 +242,10 @@ func (r *MachinePoolReconciler) reconcileBootstrap(ctx context.Context, cluster } // reconcileInfrastructure reconciles the Spec.InfrastructureRef object on a MachinePool. -func (r *MachinePoolReconciler) reconcileInfrastructure(ctx context.Context, cluster *clusterv1.Cluster, mp *expv1.MachinePool) (ctrl.Result, error) { +func (r *MachinePoolReconciler) reconcileInfrastructure(ctx context.Context, s *scope) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx) - + cluster := s.cluster + mp := s.machinePool // Call generic external reconciler. infraReconcileResult, err := r.reconcileExternal(ctx, cluster, mp, &mp.Spec.Template.Spec.InfrastructureRef) if err != nil { @@ -283,8 +285,19 @@ func (r *MachinePoolReconciler) reconcileInfrastructure(ctx context.Context, clu conditions.WithFallbackValue(ready, clusterv1.WaitingForInfrastructureFallbackReason, clusterv1.ConditionSeverityInfo, ""), ) - if err := r.reconcileMachines(ctx, mp, infraConfig); err != nil { - return ctrl.Result{}, errors.Wrapf(err, "failed to reconcile Machines for MachinePool %s", klog.KObj(mp)) + clusterClient, err := r.Tracker.GetClient(ctx, util.ObjectKey(cluster)) + if err != nil { + return ctrl.Result{}, err + } + + var getNodeRefsErr error + // Get the nodeRefsMap from the cluster. + s.nodeRefMap, getNodeRefsErr = r.getNodeRefMap(ctx, clusterClient) + + err = r.reconcileMachines(ctx, s, infraConfig) + + if err != nil || getNodeRefsErr != nil { + return ctrl.Result{}, kerrors.NewAggregate([]error{errors.Wrapf(err, "failed to reconcile Machines for MachinePool %s", klog.KObj(mp)), errors.Wrapf(getNodeRefsErr, "failed to get nodeRefs for MachinePool %s", klog.KObj(mp))}) } if !mp.Status.InfrastructureReady { @@ -328,8 +341,9 @@ func (r *MachinePoolReconciler) reconcileInfrastructure(ctx context.Context, clu // infrastructure is created accordingly. // Note: When supported by the cloud provider implementation of the MachinePool, machines will provide a means to interact // with the corresponding infrastructure (e.g. delete a specific machine in case MachineHealthCheck detects it is unhealthy). -func (r *MachinePoolReconciler) reconcileMachines(ctx context.Context, mp *expv1.MachinePool, infraMachinePool *unstructured.Unstructured) error { +func (r *MachinePoolReconciler) reconcileMachines(ctx context.Context, s *scope, infraMachinePool *unstructured.Unstructured) error { log := ctrl.LoggerFrom(ctx) + mp := s.machinePool var infraMachineKind string if err := util.UnstructuredUnmarshalField(infraMachinePool, &infraMachineKind, "status", "infrastructureMachineKind"); err != nil { @@ -376,7 +390,7 @@ func (r *MachinePoolReconciler) reconcileMachines(ctx context.Context, mp *expv1 return err } - if err := r.createOrUpdateMachines(ctx, mp, machineList.Items, infraMachineList.Items); err != nil { + if err := r.createOrUpdateMachines(ctx, s, machineList.Items, infraMachineList.Items); err != nil { return errors.Wrapf(err, "failed to create machines for MachinePool %q in namespace %q", mp.Name, mp.Namespace) } @@ -384,7 +398,7 @@ func (r *MachinePoolReconciler) reconcileMachines(ctx context.Context, mp *expv1 } // createOrUpdateMachines creates a MachinePool Machine for each infraMachine if it doesn't already exist and sets the owner reference and infraRef. -func (r *MachinePoolReconciler) createOrUpdateMachines(ctx context.Context, mp *expv1.MachinePool, machines []clusterv1.Machine, infraMachines []unstructured.Unstructured) error { +func (r *MachinePoolReconciler) createOrUpdateMachines(ctx context.Context, s *scope, machines []clusterv1.Machine, infraMachines []unstructured.Unstructured) error { log := ctrl.LoggerFrom(ctx) // Construct a set of names of infraMachines that already have a Machine. @@ -398,11 +412,22 @@ func (r *MachinePoolReconciler) createOrUpdateMachines(ctx context.Context, mp * var errs []error for i := range infraMachines { infraMachine := &infraMachines[i] + + // Get Spec.ProviderID from the infraMachine. + var providerID string + var node *corev1.Node + if err := util.UnstructuredUnmarshalField(infraMachine, &providerID, "spec", "providerID"); err != nil { + log.V(4).Info("could not retrieve providerID for infraMachine", "infraMachine", klog.KObj(infraMachine)) + } else { + // Retrieve the Node for the infraMachine from the nodeRefsMap using the providerID. + node = s.nodeRefMap[providerID] + } + // If infraMachine already has a Machine, update it if needed. if existingMachine, ok := infraMachineToMachine[infraMachine.GetName()]; ok { log.V(2).Info("Patching existing Machine for infraMachine", infraMachine.GetKind(), klog.KObj(infraMachine), "Machine", klog.KObj(&existingMachine)) - desiredMachine := computeDesiredMachine(mp, infraMachine, &existingMachine) + desiredMachine := r.computeDesiredMachine(s.machinePool, infraMachine, &existingMachine, node) if err := ssa.Patch(ctx, r.Client, MachinePoolControllerName, desiredMachine, ssa.WithCachingProxy{Cache: r.ssaCache, Original: &existingMachine}); err != nil { log.Error(err, "failed to update Machine", "Machine", klog.KObj(desiredMachine)) errs = append(errs, errors.Wrapf(err, "failed to update Machine %q", klog.KObj(desiredMachine))) @@ -410,7 +435,7 @@ func (r *MachinePoolReconciler) createOrUpdateMachines(ctx context.Context, mp * } else { // Otherwise create a new Machine for the infraMachine. log.Info("Creating new Machine for infraMachine", "infraMachine", klog.KObj(infraMachine)) - machine := computeDesiredMachine(mp, infraMachine, nil) + machine := r.computeDesiredMachine(s.machinePool, infraMachine, nil, node) if err := ssa.Patch(ctx, r.Client, MachinePoolControllerName, machine); err != nil { errs = append(errs, errors.Wrapf(err, "failed to create new Machine for infraMachine %q in namespace %q", infraMachine.GetName(), infraMachine.GetNamespace())) @@ -432,7 +457,7 @@ func (r *MachinePoolReconciler) createOrUpdateMachines(ctx context.Context, mp * // computeDesiredMachine constructs the desired Machine for an infraMachine. // If the Machine exists, it ensures the Machine always owned by the MachinePool. -func computeDesiredMachine(mp *expv1.MachinePool, infraMachine *unstructured.Unstructured, existingMachine *clusterv1.Machine) *clusterv1.Machine { +func (r *MachinePoolReconciler) computeDesiredMachine(mp *expv1.MachinePool, infraMachine *unstructured.Unstructured, existingMachine *clusterv1.Machine, existingNode *corev1.Node) *clusterv1.Machine { infraRef := corev1.ObjectReference{ APIVersion: infraMachine.GetAPIVersion(), Kind: infraMachine.GetKind(), @@ -440,6 +465,11 @@ func computeDesiredMachine(mp *expv1.MachinePool, infraMachine *unstructured.Uns Namespace: infraMachine.GetNamespace(), } + var kubernetesVersion *string + if existingNode != nil && existingNode.Status.NodeInfo.KubeletVersion != "" { + kubernetesVersion = &existingNode.Status.NodeInfo.KubeletVersion + } + machine := &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: infraMachine.GetName(), @@ -452,6 +482,7 @@ func computeDesiredMachine(mp *expv1.MachinePool, infraMachine *unstructured.Uns Spec: clusterv1.MachineSpec{ ClusterName: mp.Spec.ClusterName, InfrastructureRef: infraRef, + Version: kubernetesVersion, }, } @@ -537,3 +568,29 @@ func (r *MachinePoolReconciler) waitForMachineCreation(ctx context.Context, mach return nil } + +func (r *MachinePoolReconciler) getNodeRefMap(ctx context.Context, c client.Client) (map[string]*corev1.Node, error) { + log := ctrl.LoggerFrom(ctx) + nodeRefsMap := make(map[string]*corev1.Node) + nodeList := corev1.NodeList{} + for { + if err := c.List(ctx, &nodeList, client.Continue(nodeList.Continue)); err != nil { + return nil, err + } + + for _, node := range nodeList.Items { + if node.Spec.ProviderID == "" { + log.V(2).Info("No ProviderID detected, skipping", "providerID", node.Spec.ProviderID) + continue + } + + nodeRefsMap[node.Spec.ProviderID] = &node + } + + if nodeList.Continue == "" { + break + } + } + + return nodeRefsMap, nil +} diff --git a/exp/internal/controllers/machinepool_controller_phases_test.go b/exp/internal/controllers/machinepool_controller_phases_test.go index b416c818c874..a995c2a95838 100644 --- a/exp/internal/controllers/machinepool_controller_phases_test.go +++ b/exp/internal/controllers/machinepool_controller_phases_test.go @@ -126,11 +126,18 @@ func TestReconcileMachinePoolPhases(t *testing.T) { bootstrapConfig := defaultBootstrap.DeepCopy() infraConfig := defaultInfra.DeepCopy() + fakeClient := fake.NewClientBuilder().WithObjects(defaultCluster, defaultKubeconfigSecret, machinepool, bootstrapConfig, infraConfig, builder.TestBootstrapConfigCRD, builder.TestInfrastructureMachineTemplateCRD).Build() r := &MachinePoolReconciler{ - Client: fake.NewClientBuilder().WithObjects(defaultCluster, defaultKubeconfigSecret, machinepool, bootstrapConfig, infraConfig, builder.TestBootstrapConfigCRD, builder.TestInfrastructureMachineTemplateCRD).Build(), + Client: fakeClient, + Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), fakeClient, fakeClient, fakeClient.Scheme(), client.ObjectKey{Name: defaultCluster.Name, Namespace: defaultCluster.Namespace}), + } + + scope := &scope{ + cluster: defaultCluster, + machinePool: machinepool, } - res, err := r.reconcile(ctx, defaultCluster, machinepool) + res, err := r.reconcile(ctx, scope) g.Expect(err).ToNot(HaveOccurred()) g.Expect(res.Requeue).To(BeFalse()) @@ -155,11 +162,19 @@ func TestReconcileMachinePoolPhases(t *testing.T) { bootstrapConfig := defaultBootstrap.DeepCopy() infraConfig := defaultInfra.DeepCopy() + fakeClient := fake.NewClientBuilder().WithObjects(defaultCluster, defaultKubeconfigSecret, machinepool, bootstrapConfig, infraConfig, builder.TestBootstrapConfigCRD, builder.TestInfrastructureMachineTemplateCRD).Build() + r := &MachinePoolReconciler{ - Client: fake.NewClientBuilder().WithObjects(defaultCluster, defaultKubeconfigSecret, machinepool, bootstrapConfig, infraConfig, builder.TestBootstrapConfigCRD, builder.TestInfrastructureMachineTemplateCRD).Build(), + Client: fakeClient, + Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), fakeClient, fakeClient, fakeClient.Scheme(), client.ObjectKey{Name: defaultCluster.Name, Namespace: defaultCluster.Namespace}), + } + + scope := &scope{ + cluster: defaultCluster, + machinePool: machinepool, } - res, err := r.reconcile(ctx, defaultCluster, machinepool) + res, err := r.reconcile(ctx, scope) g.Expect(err).ToNot(HaveOccurred()) g.Expect(res.Requeue).To(BeFalse()) @@ -182,11 +197,18 @@ func TestReconcileMachinePoolPhases(t *testing.T) { err = unstructured.SetNestedField(bootstrapConfig.Object, "secret-data", "status", "dataSecretName") g.Expect(err).ToNot(HaveOccurred()) + fakeClient := fake.NewClientBuilder().WithObjects(defaultCluster, defaultKubeconfigSecret, machinepool, bootstrapConfig, infraConfig, builder.TestBootstrapConfigCRD, builder.TestInfrastructureMachineTemplateCRD).Build() r := &MachinePoolReconciler{ - Client: fake.NewClientBuilder().WithObjects(defaultCluster, defaultKubeconfigSecret, machinepool, bootstrapConfig, infraConfig, builder.TestBootstrapConfigCRD, builder.TestInfrastructureMachineTemplateCRD).Build(), + Client: fakeClient, + Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), fakeClient, fakeClient, fakeClient.Scheme(), client.ObjectKey{Name: defaultCluster.Name, Namespace: defaultCluster.Namespace}), + } + + scope := &scope{ + cluster: defaultCluster, + machinePool: machinepool, } - res, err := r.reconcile(ctx, defaultCluster, machinepool) + res, err := r.reconcile(ctx, scope) g.Expect(err).ToNot(HaveOccurred()) g.Expect(res.Requeue).To(BeFalse()) @@ -231,7 +253,12 @@ func TestReconcileMachinePoolPhases(t *testing.T) { Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), fakeClient, fakeClient, fakeClient.Scheme(), client.ObjectKey{Name: defaultCluster.Name, Namespace: defaultCluster.Namespace}), } - res, err := r.reconcile(ctx, defaultCluster, machinepool) + scope := &scope{ + cluster: defaultCluster, + machinePool: machinepool, + } + + res, err := r.reconcile(ctx, scope) g.Expect(err).ToNot(HaveOccurred()) g.Expect(res.Requeue).To(BeFalse()) @@ -288,7 +315,12 @@ func TestReconcileMachinePoolPhases(t *testing.T) { Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), fakeClient, fakeClient, fakeClient.Scheme(), client.ObjectKey{Name: defaultCluster.Name, Namespace: defaultCluster.Namespace}), } - res, err := r.reconcile(ctx, defaultCluster, machinepool) + scope := &scope{ + cluster: defaultCluster, + machinePool: machinepool, + } + + res, err := r.reconcile(ctx, scope) g.Expect(err).ToNot(HaveOccurred()) g.Expect(res.Requeue).To(BeFalse()) @@ -317,11 +349,18 @@ func TestReconcileMachinePoolPhases(t *testing.T) { // Set NodeRef. machinepool.Status.NodeRefs = []corev1.ObjectReference{{Kind: "Node", Name: "machinepool-test-node"}} + fakeClient := fake.NewClientBuilder().WithObjects(defaultCluster, defaultKubeconfigSecret, machinepool, bootstrapConfig, infraConfig, builder.TestBootstrapConfigCRD, builder.TestInfrastructureMachineTemplateCRD).Build() r := &MachinePoolReconciler{ - Client: fake.NewClientBuilder().WithObjects(defaultCluster, defaultKubeconfigSecret, machinepool, bootstrapConfig, infraConfig, builder.TestBootstrapConfigCRD, builder.TestInfrastructureMachineTemplateCRD).Build(), + Client: fakeClient, + Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), fakeClient, fakeClient, fakeClient.Scheme(), client.ObjectKey{Name: defaultCluster.Name, Namespace: defaultCluster.Namespace}), } - res, err := r.reconcile(ctx, defaultCluster, machinepool) + scope := &scope{ + cluster: defaultCluster, + machinePool: machinepool, + } + + res, err := r.reconcile(ctx, scope) g.Expect(err).ToNot(HaveOccurred()) g.Expect(res.Requeue).To(BeFalse()) @@ -363,7 +402,12 @@ func TestReconcileMachinePoolPhases(t *testing.T) { Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), fakeClient, fakeClient, fakeClient.Scheme(), client.ObjectKey{Name: defaultCluster.Name, Namespace: defaultCluster.Namespace}), } - res, err := r.reconcile(ctx, defaultCluster, machinepool) + scope := &scope{ + cluster: defaultCluster, + machinePool: machinepool, + } + + res, err := r.reconcile(ctx, scope) g.Expect(err).ToNot(HaveOccurred()) g.Expect(res.Requeue).To(BeFalse()) @@ -418,7 +462,12 @@ func TestReconcileMachinePoolPhases(t *testing.T) { Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), fakeClient, fakeClient, fakeClient.Scheme(), client.ObjectKey{Name: defaultCluster.Name, Namespace: defaultCluster.Namespace}), } - res, err := r.reconcile(ctx, defaultCluster, machinepool) + scope := &scope{ + cluster: defaultCluster, + machinePool: machinepool, + } + + res, err := r.reconcile(ctx, scope) g.Expect(err).ToNot(HaveOccurred()) g.Expect(res.Requeue).To(BeFalse()) @@ -473,11 +522,18 @@ func TestReconcileMachinePoolPhases(t *testing.T) { machinepool.SetDeletionTimestamp(&deletionTimestamp) machinepool.Finalizers = []string{expv1.MachinePoolFinalizer} + fakeClient := fake.NewClientBuilder().WithObjects(defaultCluster, defaultKubeconfigSecret, machinepool, bootstrapConfig, infraConfig, builder.TestBootstrapConfigCRD, builder.TestInfrastructureMachineTemplateCRD).Build() r := &MachinePoolReconciler{ - Client: fake.NewClientBuilder().WithObjects(defaultCluster, defaultKubeconfigSecret, machinepool, bootstrapConfig, infraConfig, builder.TestBootstrapConfigCRD, builder.TestInfrastructureMachineTemplateCRD).Build(), + Client: fakeClient, + Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), fakeClient, fakeClient, fakeClient.Scheme(), client.ObjectKey{Name: defaultCluster.Name, Namespace: defaultCluster.Namespace}), + } + + scope := &scope{ + cluster: defaultCluster, + machinePool: machinepool, } - res, err := r.reconcile(ctx, defaultCluster, machinepool) + res, err := r.reconcile(ctx, scope) g.Expect(err).ToNot(HaveOccurred()) g.Expect(res.Requeue).To(BeFalse()) @@ -530,11 +586,18 @@ func TestReconcileMachinePoolPhases(t *testing.T) { machinePool.Status.ReadyReplicas = 1 machinePool.Status.Replicas = 1 + fakeClient := fake.NewClientBuilder().WithObjects(defaultCluster, defaultKubeconfigSecret, machinePool, bootstrapConfig, infraConfig, builder.TestBootstrapConfigCRD, builder.TestInfrastructureMachineTemplateCRD).Build() r := &MachinePoolReconciler{ - Client: fake.NewClientBuilder().WithObjects(defaultCluster, defaultKubeconfigSecret, machinePool, bootstrapConfig, infraConfig, builder.TestBootstrapConfigCRD, builder.TestInfrastructureMachineTemplateCRD).Build(), + Client: fakeClient, + Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), fakeClient, fakeClient, fakeClient.Scheme(), client.ObjectKey{Name: defaultCluster.Name, Namespace: defaultCluster.Namespace}), + } + + scope := &scope{ + cluster: defaultCluster, + machinePool: machinePool, } - res, err := r.reconcile(ctx, defaultCluster, machinePool) + res, err := r.reconcile(ctx, scope) g.Expect(err).ToNot(HaveOccurred()) g.Expect(res.Requeue).To(BeFalse()) @@ -554,7 +617,7 @@ func TestReconcileMachinePoolPhases(t *testing.T) { machinePool.Spec.Template.Spec.Bootstrap.ConfigRef.Name = newBootstrapConfig.GetName() // Reconcile again. The new bootstrap config should be used. - res, err = r.reconcile(ctx, defaultCluster, machinePool) + res, err = r.reconcile(ctx, scope) g.Expect(err).ToNot(HaveOccurred()) g.Expect(res.Requeue).To(BeFalse()) @@ -609,11 +672,18 @@ func TestReconcileMachinePoolPhases(t *testing.T) { machinePool.Status.ReadyReplicas = 1 machinePool.Status.Replicas = 1 + fakeClient := fake.NewClientBuilder().WithObjects(defaultCluster, defaultKubeconfigSecret, machinePool, bootstrapConfig, infraConfig, builder.TestBootstrapConfigCRD, builder.TestInfrastructureMachineTemplateCRD).Build() r := &MachinePoolReconciler{ - Client: fake.NewClientBuilder().WithObjects(defaultCluster, defaultKubeconfigSecret, machinePool, bootstrapConfig, infraConfig, builder.TestBootstrapConfigCRD, builder.TestInfrastructureMachineTemplateCRD).Build(), + Client: fakeClient, + Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), fakeClient, fakeClient, fakeClient.Scheme(), client.ObjectKey{Name: defaultCluster.Name, Namespace: defaultCluster.Namespace}), + } + + scope := &scope{ + cluster: defaultCluster, + machinePool: machinePool, } - res, err := r.reconcile(ctx, defaultCluster, machinePool) + res, err := r.reconcile(ctx, scope) g.Expect(err).ToNot(HaveOccurred()) g.Expect(res.Requeue).To(BeFalse()) @@ -635,7 +705,7 @@ func TestReconcileMachinePoolPhases(t *testing.T) { machinePool.Spec.Template.Spec.Bootstrap.ConfigRef.Name = newBootstrapConfig.GetName() // Reconcile again. The new bootstrap config should be used - res, err = r.reconcile(ctx, defaultCluster, machinePool) + res, err = r.reconcile(ctx, scope) g.Expect(err).ToNot(HaveOccurred()) // Controller should wait until bootstrap provider reports ready bootstrap config @@ -924,7 +994,12 @@ func TestReconcileMachinePoolBootstrap(t *testing.T) { Client: fake.NewClientBuilder().WithObjects(tc.machinepool, bootstrapConfig, builder.TestBootstrapConfigCRD, builder.TestInfrastructureMachineTemplateCRD).Build(), } - res, err := r.reconcileBootstrap(ctx, defaultCluster, tc.machinepool) + scope := &scope{ + cluster: defaultCluster, + machinePool: tc.machinepool, + } + + res, err := r.reconcileBootstrap(ctx, scope) g.Expect(res).To(BeComparableTo(tc.expectResult)) if tc.expectError { g.Expect(err).To(HaveOccurred()) @@ -1208,11 +1283,18 @@ func TestReconcileMachinePoolInfrastructure(t *testing.T) { } infraConfig := &unstructured.Unstructured{Object: tc.infraConfig} + fakeClient := fake.NewClientBuilder().WithObjects(tc.machinepool, infraConfig, builder.TestBootstrapConfigCRD, builder.TestInfrastructureMachineTemplateCRD).Build() r := &MachinePoolReconciler{ - Client: fake.NewClientBuilder().WithObjects(tc.machinepool, infraConfig, builder.TestBootstrapConfigCRD, builder.TestInfrastructureMachineTemplateCRD).Build(), + Client: fakeClient, + Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), fakeClient, fakeClient, fakeClient.Scheme(), client.ObjectKey{Name: defaultCluster.Name, Namespace: defaultCluster.Namespace}), + } + + scope := &scope{ + cluster: defaultCluster, + machinePool: tc.machinepool, } - res, err := r.reconcileInfrastructure(ctx, defaultCluster, tc.machinepool) + res, err := r.reconcileInfrastructure(ctx, scope) if tc.expectRequeueAfter { g.Expect(res.RequeueAfter).To(BeNumerically(">=", 0)) } else { @@ -1289,8 +1371,10 @@ func TestReconcileMachinePoolMachines(t *testing.T) { Client: env, ssaCache: ssa.NewCache(), } - - err = r.reconcileMachines(ctx, &machinePool, &unstructured.Unstructured{Object: infraConfig}) + scope := &scope{ + machinePool: &machinePool, + } + err = r.reconcileMachines(ctx, scope, &unstructured.Unstructured{Object: infraConfig}) r.reconcilePhase(&machinePool) g.Expect(err).ToNot(HaveOccurred()) @@ -1351,7 +1435,11 @@ func TestReconcileMachinePoolMachines(t *testing.T) { ssaCache: ssa.NewCache(), } - err = r.reconcileMachines(ctx, &machinePool, &unstructured.Unstructured{Object: infraConfig}) + scope := &scope{ + machinePool: &machinePool, + } + + err = r.reconcileMachines(ctx, scope, &unstructured.Unstructured{Object: infraConfig}) r.reconcilePhase(&machinePool) g.Expect(err).ToNot(HaveOccurred()) @@ -1406,7 +1494,11 @@ func TestReconcileMachinePoolMachines(t *testing.T) { ssaCache: ssa.NewCache(), } - err = r.reconcileMachines(ctx, &machinePool, &unstructured.Unstructured{Object: infraConfig}) + scope := &scope{ + machinePool: &machinePool, + } + + err = r.reconcileMachines(ctx, scope, &unstructured.Unstructured{Object: infraConfig}) r.reconcilePhase(&machinePool) g.Expect(err).ToNot(HaveOccurred()) @@ -1701,7 +1793,12 @@ func TestReconcileMachinePoolScaleToFromZero(t *testing.T) { recorder: record.NewFakeRecorder(32), } - res, err := r.reconcile(ctx, testCluster, machinepool) + scope := &scope{ + cluster: testCluster, + machinePool: machinepool, + } + + res, err := r.reconcile(ctx, scope) g.Expect(err).ToNot(HaveOccurred()) g.Expect(res.Requeue).To(BeFalse()) @@ -1758,7 +1855,12 @@ func TestReconcileMachinePoolScaleToFromZero(t *testing.T) { recorder: record.NewFakeRecorder(32), } - res, err := r.reconcile(ctx, testCluster, machinepool) + scope := &scope{ + cluster: testCluster, + machinePool: machinepool, + } + + res, err := r.reconcile(ctx, scope) g.Expect(err).ToNot(HaveOccurred()) g.Expect(res.Requeue).To(BeFalse()) @@ -1791,12 +1893,19 @@ func TestReconcileMachinePoolScaleToFromZero(t *testing.T) { err := unstructured.SetNestedField(infraConfig.Object, int64(0), "status", "replicas") g.Expect(err).ToNot(HaveOccurred()) + fakeClient := fake.NewClientBuilder().WithObjects(testCluster, kubeconfigSecret, machinepool, bootstrapConfig, infraConfig, builder.TestBootstrapConfigCRD, builder.TestInfrastructureMachineTemplateCRD).Build() r := &MachinePoolReconciler{ - Client: fake.NewClientBuilder().WithObjects(testCluster, kubeconfigSecret, machinepool, bootstrapConfig, infraConfig, builder.TestBootstrapConfigCRD, builder.TestInfrastructureMachineTemplateCRD).Build(), + Client: fakeClient, recorder: record.NewFakeRecorder(32), + Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), fakeClient, fakeClient, fakeClient.Scheme(), client.ObjectKey{Name: testCluster.Name, Namespace: testCluster.Namespace}), } - res, err := r.reconcile(ctx, testCluster, machinepool) + scope := &scope{ + cluster: testCluster, + machinePool: machinepool, + } + + res, err := r.reconcile(ctx, scope) g.Expect(err).ToNot(HaveOccurred()) g.Expect(res.Requeue).To(BeFalse()) @@ -1825,12 +1934,19 @@ func TestReconcileMachinePoolScaleToFromZero(t *testing.T) { err := unstructured.SetNestedField(infraConfig.Object, int64(0), "status", "replicas") g.Expect(err).ToNot(HaveOccurred()) + fakeClient := fake.NewClientBuilder().WithObjects(testCluster, kubeconfigSecret, machinepool, bootstrapConfig, infraConfig, builder.TestBootstrapConfigCRD, builder.TestInfrastructureMachineTemplateCRD).Build() r := &MachinePoolReconciler{ - Client: fake.NewClientBuilder().WithObjects(testCluster, kubeconfigSecret, machinepool, bootstrapConfig, infraConfig, builder.TestBootstrapConfigCRD, builder.TestInfrastructureMachineTemplateCRD).Build(), + Client: fakeClient, recorder: record.NewFakeRecorder(32), + Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), fakeClient, fakeClient, fakeClient.Scheme(), client.ObjectKey{Name: testCluster.Name, Namespace: testCluster.Namespace}), } - res, err := r.reconcile(ctx, testCluster, machinepool) + scope := &scope{ + cluster: testCluster, + machinePool: machinepool, + } + + res, err := r.reconcile(ctx, scope) g.Expect(err).ToNot(HaveOccurred()) g.Expect(res.Requeue).To(BeFalse()) @@ -1888,7 +2004,12 @@ func TestReconcileMachinePoolScaleToFromZero(t *testing.T) { recorder: record.NewFakeRecorder(32), } - res, err := r.reconcile(ctx, testCluster, machinepool) + scope := &scope{ + cluster: testCluster, + machinePool: machinepool, + } + + res, err := r.reconcile(ctx, scope) g.Expect(err).ToNot(HaveOccurred()) g.Expect(res.Requeue).To(BeFalse()) @@ -1986,3 +2107,139 @@ func getMachinePool(replicas int, mpName, clusterName, nsName string) expv1.Mach }, } } + +func TestMachinePoolReconciler_getNodeRefMap(t *testing.T) { + testCases := []struct { + name string + nodeList []client.Object + expected map[string]*corev1.Node + err error + }{ + { + name: "all valid provider ids", + nodeList: []client.Object{ + &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node-1", + }, + Spec: corev1.NodeSpec{ + ProviderID: "aws://us-east-1/id-node-1", + }, + }, + &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gce-node-2", + }, + Spec: corev1.NodeSpec{ + ProviderID: "gce://us-central1/gce-id-node-2", + }, + }, + &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "azure-node-4", + }, + Spec: corev1.NodeSpec{ + ProviderID: "azure://westus2/id-node-4", + }, + }, + }, + expected: map[string]*corev1.Node{ + "aws://us-east-1/id-node-1": { + ObjectMeta: metav1.ObjectMeta{ + Name: "node-1", + }, + Spec: corev1.NodeSpec{ + ProviderID: "aws://us-east-1/id-node-1", + }, + }, + "gce://us-central1/gce-id-node-2": { + ObjectMeta: metav1.ObjectMeta{ + Name: "gce-node-2", + }, + Spec: corev1.NodeSpec{ + ProviderID: "gce://us-central1/gce-id-node-2", + }, + }, + "azure://westus2/id-node-4": { + ObjectMeta: metav1.ObjectMeta{ + Name: "azure-node-4", + }, + Spec: corev1.NodeSpec{ + ProviderID: "azure://westus2/id-node-4", + }, + }, + }, + }, + { + name: "missing provider id", + nodeList: []client.Object{ + &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node-1", + }, + Spec: corev1.NodeSpec{ + ProviderID: "aws://us-east-1/id-node-1", + }, + }, + &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gce-node-2", + }, + Spec: corev1.NodeSpec{ + ProviderID: "gce://us-central1/gce-id-node-2", + }, + }, + &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "azure-node-4", + }, + }, + }, + expected: map[string]*corev1.Node{ + "aws://us-east-1/id-node-1": { + ObjectMeta: metav1.ObjectMeta{ + Name: "node-1", + }, + Spec: corev1.NodeSpec{ + ProviderID: "aws://us-east-1/id-node-1", + }, + }, + "gce://us-central1/gce-id-node-2": { + ObjectMeta: metav1.ObjectMeta{ + Name: "gce-node-2", + }, + Spec: corev1.NodeSpec{ + ProviderID: "gce://us-central1/gce-id-node-2", + }, + }, + }, + }, + { + name: "empty node list", + nodeList: []client.Object{}, + }, + } + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + r := &MachinePoolReconciler{ + Client: fake.NewClientBuilder().Build(), + recorder: record.NewFakeRecorder(32), + } + client := fake.NewClientBuilder().WithObjects(tt.nodeList...).Build() + result, err := r.getNodeRefMap(ctx, client) + if tt.err == nil { + g.Expect(err).ToNot(HaveOccurred()) + } else { + g.Expect(err).To(HaveOccurred()) + g.Expect(err).To(Equal(tt.err), "Expected error %v, got %v", tt.err, err) + } + g.Expect(result).To(HaveLen(len(tt.expected)), "Expected NodeRef count to be %v, got %v", len(result), len(tt.expected)) + for providerID, node := range result { + g.Expect(node).ToNot(BeNil()) + g.Expect(node.Spec).Should(Equal(tt.expected[providerID].Spec)) + g.Expect(node.GetObjectMeta().GetName()).Should(Equal(tt.expected[providerID].GetObjectMeta().GetName())) + } + }) + } +} diff --git a/exp/internal/controllers/machinepool_controller_test.go b/exp/internal/controllers/machinepool_controller_test.go index 731ad55b0200..56d61f88e118 100644 --- a/exp/internal/controllers/machinepool_controller_test.go +++ b/exp/internal/controllers/machinepool_controller_test.go @@ -19,6 +19,7 @@ package controllers import ( "testing" + "github.com/go-logr/logr" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -28,9 +29,11 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/controllers/remote" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/internal/test/builder" "sigs.k8s.io/cluster-api/util" @@ -426,6 +429,7 @@ func TestReconcileMachinePoolRequest(t *testing.T) { r := &MachinePoolReconciler{ Client: clientFake, APIReader: clientFake, + Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), clientFake, clientFake, clientFake.Scheme(), client.ObjectKey{Name: testCluster.Name, Namespace: testCluster.Namespace}), } result, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: util.ObjectKey(&tc.machinePool)}) @@ -864,6 +868,7 @@ func TestMachinePoolConditions(t *testing.T) { r := &MachinePoolReconciler{ Client: clientFake, APIReader: clientFake, + Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), clientFake, clientFake, clientFake.Scheme(), client.ObjectKey{Name: testCluster.Name, Namespace: testCluster.Namespace}), } _, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: util.ObjectKey(machinePool)})