diff --git a/exp/internal/controllers/machinepool_controller_noderef.go b/exp/internal/controllers/machinepool_controller_noderef.go index 790f2280e688..8d34d4d148e0 100644 --- a/exp/internal/controllers/machinepool_controller_noderef.go +++ b/exp/internal/controllers/machinepool_controller_noderef.go @@ -135,7 +135,7 @@ func (r *MachinePoolReconciler) deleteRetiredNodes(ctx context.Context, c client for _, nodeRef := range nodeRefs { node := &corev1.Node{} if err := c.Get(ctx, client.ObjectKey{Name: nodeRef.Name}, node); err != nil { - log.V(2).Error(err, "Failed to get Node, skipping", "Node", klog.KRef("", nodeRef.Name)) + log.Error(err, "Failed to get Node, skipping", "Node", klog.KRef("", nodeRef.Name)) continue } @@ -200,7 +200,7 @@ func (r *MachinePoolReconciler) patchNodes(ctx context.Context, c client.Client, for _, nodeRef := range references { node := &corev1.Node{} if err := c.Get(ctx, client.ObjectKey{Name: nodeRef.Name}, node); err != nil { - log.V(2).Error(err, "Failed to get Node, skipping setting annotations", "Node", klog.KRef("", nodeRef.Name)) + log.Error(err, "Failed to get Node, skipping setting annotations", "Node", klog.KRef("", nodeRef.Name)) continue } patchHelper, err := patch.NewHelper(node, c) @@ -219,7 +219,7 @@ func (r *MachinePoolReconciler) patchNodes(ctx context.Context, c client.Client, // Patch the node if needed. if hasAnnotationChanges || hasTaintChanges { if err := patchHelper.Patch(ctx, node); err != nil { - log.V(2).Error(err, "Failed patch Node to set annotations and drop taints", "Node", klog.KObj(node)) + log.Error(err, "Failed patch Node to set annotations and drop taints", "Node", klog.KObj(node)) return err } } diff --git a/internal/controllers/machine/drain/drain.go b/internal/controllers/machine/drain/drain.go index b2b8016cfdef..f79ac46da093 100644 --- a/internal/controllers/machine/drain/drain.go +++ b/internal/controllers/machine/drain/drain.go @@ -215,7 +215,7 @@ evictionLoop: case <-ctx.Done(): // Skip eviction if the eviction timeout is reached. err := fmt.Errorf("eviction timeout of %s reached, eviction will be retried", evictionTimeout) - log.V(4).Error(err, "Error when evicting Pod") + log.V(4).Info("Error when evicting Pod", "err", err) res.PodsFailedEviction[err.Error()] = append(res.PodsFailedEviction[err.Error()], pd.Pod) continue evictionLoop default: @@ -250,17 +250,17 @@ evictionLoop: err = errors.New(errorMessage) } - log.V(4).Error(err, "Error when evicting Pod") + log.V(4).Info("Error when evicting Pod", "err", err) res.PodsFailedEviction[err.Error()] = append(res.PodsFailedEviction[err.Error()], pd.Pod) case apierrors.IsForbidden(err) && apierrors.HasStatusCause(err, corev1.NamespaceTerminatingCause): // Creating an eviction resource in a terminating namespace will throw a forbidden error, e.g.: // "pods "pod-6-to-trigger-eviction-namespace-terminating" is forbidden: unable to create new content in namespace test-namespace because it is being terminated" // The kube-controller-manager is supposed to set the deletionTimestamp on the Pod and then this error will go away. msg := "Cannot evict pod from terminating namespace: unable to create eviction (kube-controller-manager should set deletionTimestamp)" - log.V(4).Error(err, msg) + log.V(4).Info(msg, "err", err) res.PodsFailedEviction[msg] = append(res.PodsFailedEviction[msg], pd.Pod) default: - log.V(4).Error(err, "Error when evicting Pod") + log.V(4).Info("Error when evicting Pod", "err", err) res.PodsFailedEviction[err.Error()] = append(res.PodsFailedEviction[err.Error()], pd.Pod) } } diff --git a/internal/controllers/machinedeployment/machinedeployment_rollout_ondelete.go b/internal/controllers/machinedeployment/machinedeployment_rollout_ondelete.go index 061af36f4ab4..64d15d1e3361 100644 --- a/internal/controllers/machinedeployment/machinedeployment_rollout_ondelete.go +++ b/internal/controllers/machinedeployment/machinedeployment_rollout_ondelete.go @@ -106,7 +106,7 @@ func (r *Reconciler) reconcileOldMachineSetsOnDelete(ctx context.Context, oldMSs } selectorMap, err := metav1.LabelSelectorAsMap(&oldMS.Spec.Selector) if err != nil { - log.V(4).Error(err, "failed to convert MachineSet label selector to a map") + log.V(4).Info("Failed to convert MachineSet label selector to a map", "err", err) continue } log.V(4).Info("Fetching Machines associated with MachineSet") @@ -127,7 +127,7 @@ func (r *Reconciler) reconcileOldMachineSetsOnDelete(ctx context.Context, oldMSs } machineSetScaleDownAmountDueToMachineDeletion := *oldMS.Spec.Replicas - updatedReplicaCount if machineSetScaleDownAmountDueToMachineDeletion < 0 { - log.V(4).Error(errors.Errorf("Unexpected negative scale down amount: %d", machineSetScaleDownAmountDueToMachineDeletion), fmt.Sprintf("Error reconciling MachineSet %s", oldMS.Name)) + log.V(4).Info(fmt.Sprintf("Error reconciling MachineSet %s", oldMS.Name), "err", errors.Errorf("Unexpected negative scale down amount: %d", machineSetScaleDownAmountDueToMachineDeletion)) } scaleDownAmount -= machineSetScaleDownAmountDueToMachineDeletion log.V(4).Info("Adjusting replica count for deleted machines", "oldReplicas", oldMS.Spec.Replicas, "newReplicas", updatedReplicaCount) diff --git a/util/util.go b/util/util.go index 3892d81cdd55..10f1a6e963de 100644 --- a/util/util.go +++ b/util/util.go @@ -219,7 +219,7 @@ func ClusterToInfrastructureMapFunc(ctx context.Context, gvk schema.GroupVersion key := types.NamespacedName{Namespace: cluster.Namespace, Name: cluster.Spec.InfrastructureRef.Name} if err := c.Get(ctx, key, providerCluster); err != nil { - log.V(4).Error(err, fmt.Sprintf("Failed to get %T", providerCluster)) + log.V(4).Info(fmt.Sprintf("Failed to get %T", providerCluster), "err", err) return nil }