diff --git a/cmd/hubagent/workload/setup.go b/cmd/hubagent/workload/setup.go index 8a582f561..e5c2d0527 100644 --- a/cmd/hubagent/workload/setup.go +++ b/cmd/hubagent/workload/setup.go @@ -28,6 +28,7 @@ import ( "go.goms.io/fleet/pkg/controllers/clusterinventory/clusterprofile" "go.goms.io/fleet/pkg/controllers/clusterresourcebindingwatcher" "go.goms.io/fleet/pkg/controllers/clusterresourceplacement" + "go.goms.io/fleet/pkg/controllers/clusterresourceplacementeviction" "go.goms.io/fleet/pkg/controllers/clusterresourceplacementwatcher" "go.goms.io/fleet/pkg/controllers/clusterschedulingpolicysnapshot" "go.goms.io/fleet/pkg/controllers/memberclusterplacement" @@ -206,6 +207,14 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, return err } + klog.Info("Setting up cluster resource placement eviction controller") + if err := (&clusterresourceplacementeviction.Reconciler{ + Client: mgr.GetClient(), + }).SetupWithManager(mgr); err != nil { + klog.ErrorS(err, "Unable to set up cluster resource placement eviction controller") + return err + } + // Set up the work generator klog.Info("Setting up work generator") if err := (&workgenerator.Reconciler{ diff --git a/pkg/controllers/clusterresourceplacementeviction/controller.go b/pkg/controllers/clusterresourceplacementeviction/controller.go index 0229f8f69..6643cd115 100644 --- a/pkg/controllers/clusterresourceplacementeviction/controller.go +++ b/pkg/controllers/clusterresourceplacementeviction/controller.go @@ -27,27 +27,6 @@ import ( "go.goms.io/fleet/pkg/utils/controller" ) -const ( - clusterResourcePlacementEvictionValidReason = "ClusterResourcePlacementEvictionValid" - clusterResourcePlacementEvictionInvalidReason = "ClusterResourcePlacementEvictionInvalid" - clusterResourcePlacementEvictionExecutedReason = "ClusterResourcePlacementEvictionExecuted" - clusterResourcePlacementEvictionNotExecutedReason = "ClusterResourcePlacementEvictionNotExecuted" - - evictionInvalidMissingCRPMessage = "Failed to find ClusterResourcePlacement targeted by eviction" - evictionInvalidDeletingCRPMessage = "Found deleting ClusterResourcePlacement targeted by eviction" - evictionInvalidMissingCRBMessage = "Failed to find scheduler decision for placement in cluster targeted by eviction" - evictionInvalidMultipleCRBMessage = "Found more than one scheduler decision for placement in cluster targeted by eviction" - evictionValidMessage = "Eviction is valid" - evictionAllowedNoPDBMessage = "Eviction is allowed, no ClusterResourcePlacementDisruptionBudget specified" - evictionAllowedPlacementRemovedMessage = "Eviction is allowed, resources propagated by placement is currently being removed from cluster targeted by eviction" - evictionAllowedPlacementFailedMessage = "Eviction is allowed, placement has failed" - evictionBlockedMisconfiguredPDBSpecifiedMessage = "Eviction is blocked by misconfigured ClusterResourcePlacementDisruptionBudget, either MaxUnavailable is specified or MinAvailable is specified as a percentage for PickAll ClusterResourcePlacement" - evictionBlockedMissingPlacementMessage = "Eviction is blocked, placement has not propagated resources to target cluster yet" - - evictionAllowedPDBSpecifiedFmt = "Eviction is allowed by specified ClusterResourcePlacementDisruptionBudget, availablePlacements: %d, totalPlacements: %d" - evictionBlockedPDBSpecifiedFmt = "Eviction is blocked by specified ClusterResourcePlacementDisruptionBudget, availablePlacements: %d, totalPlacements: %d" -) - // Reconciler reconciles a ClusterResourcePlacementEviction object. type Reconciler struct { client.Client @@ -96,15 +75,15 @@ func (r *Reconciler) validateEviction(ctx context.Context, eviction *placementv1 var crp placementv1beta1.ClusterResourcePlacement if err := r.Client.Get(ctx, types.NamespacedName{Name: eviction.Spec.PlacementName}, &crp); err != nil { if k8serrors.IsNotFound(err) { - klog.V(2).InfoS(evictionInvalidMissingCRPMessage, "clusterResourcePlacementEviction", eviction.Name, "clusterResourcePlacement", eviction.Spec.PlacementName) - markEvictionInvalid(eviction, evictionInvalidMissingCRPMessage) + klog.V(2).InfoS(condition.EvictionInvalidMissingCRPMessage, "clusterResourcePlacementEviction", eviction.Name, "clusterResourcePlacement", eviction.Spec.PlacementName) + markEvictionInvalid(eviction, condition.EvictionInvalidMissingCRPMessage) return validationResult, nil } return nil, controller.NewAPIServerError(true, err) } if crp.DeletionTimestamp != nil { - klog.V(2).InfoS(evictionInvalidDeletingCRPMessage, "clusterResourcePlacementEviction", eviction.Name, "clusterResourcePlacement", eviction.Spec.PlacementName) - markEvictionInvalid(eviction, evictionInvalidDeletingCRPMessage) + klog.V(2).InfoS(condition.EvictionInvalidDeletingCRPMessage, "clusterResourcePlacementEviction", eviction.Name, "clusterResourcePlacement", eviction.Spec.PlacementName) + markEvictionInvalid(eviction, condition.EvictionInvalidDeletingCRPMessage) return validationResult, nil } validationResult.crp = &crp @@ -121,15 +100,15 @@ func (r *Reconciler) validateEviction(ctx context.Context, eviction *placementv1 if evictionTargetBinding == nil { evictionTargetBinding = &crbList.Items[i] } else { - klog.V(2).InfoS(evictionInvalidMultipleCRBMessage, "clusterResourcePlacementEviction", eviction.Name, "clusterResourcePlacement", eviction.Spec.PlacementName) - markEvictionInvalid(eviction, evictionInvalidMultipleCRBMessage) + klog.V(2).InfoS(condition.EvictionInvalidMultipleCRBMessage, "clusterResourcePlacementEviction", eviction.Name, "clusterResourcePlacement", eviction.Spec.PlacementName) + markEvictionInvalid(eviction, condition.EvictionInvalidMultipleCRBMessage) return validationResult, nil } } } if evictionTargetBinding == nil { klog.V(2).InfoS("Failed to find cluster resource binding for cluster targeted by eviction", "clusterResourcePlacementEviction", eviction.Name, "targetCluster", eviction.Spec.ClusterName) - markEvictionInvalid(eviction, evictionInvalidMissingCRBMessage) + markEvictionInvalid(eviction, condition.EvictionInvalidMissingCRBMessage) return validationResult, nil } validationResult.crb = evictionTargetBinding @@ -174,14 +153,14 @@ func (r *Reconciler) executeEviction(ctx context.Context, validationResult *evic if evictionTargetBinding.GetDeletionTimestamp() != nil { klog.V(2).InfoS("ClusterResourceBinding targeted by eviction is being deleted", "clusterResourcePlacementEviction", eviction.Name, "clusterResourceBinding", evictionTargetBinding.Name, "targetCluster", eviction.Spec.ClusterName) - markEvictionExecuted(eviction, evictionAllowedPlacementRemovedMessage) + markEvictionExecuted(eviction, condition.EvictionAllowedPlacementRemovedMessage) return nil } if !isPlacementPresent(evictionTargetBinding) { klog.V(2).InfoS("No resources have been placed for ClusterResourceBinding in target cluster", "clusterResourcePlacementEviction", eviction.Name, "clusterResourceBinding", evictionTargetBinding.Name, "targetCluster", eviction.Spec.ClusterName) - markEvictionNotExecuted(eviction, evictionBlockedMissingPlacementMessage) + markEvictionNotExecuted(eviction, condition.EvictionBlockedMissingPlacementMessage) return nil } @@ -192,7 +171,7 @@ func (r *Reconciler) executeEviction(ctx context.Context, validationResult *evic if err := r.deleteClusterResourceBinding(ctx, evictionTargetBinding); err != nil { return err } - markEvictionExecuted(eviction, evictionAllowedPlacementFailedMessage) + markEvictionExecuted(eviction, condition.EvictionAllowedPlacementFailedMessage) return nil } @@ -202,7 +181,7 @@ func (r *Reconciler) executeEviction(ctx context.Context, validationResult *evic if err = r.deleteClusterResourceBinding(ctx, evictionTargetBinding); err != nil { return err } - markEvictionExecuted(eviction, evictionAllowedNoPDBMessage) + markEvictionExecuted(eviction, condition.EvictionAllowedNoPDBMessage) return nil } return controller.NewAPIServerError(true, err) @@ -211,7 +190,7 @@ func (r *Reconciler) executeEviction(ctx context.Context, validationResult *evic // handle special case for PickAll CRP. if crp.Spec.Policy.PlacementType == placementv1beta1.PickAllPlacementType { if db.Spec.MaxUnavailable != nil || (db.Spec.MinAvailable != nil && db.Spec.MinAvailable.Type == intstr.String) { - markEvictionNotExecuted(eviction, evictionBlockedMisconfiguredPDBSpecifiedMessage) + markEvictionNotExecuted(eviction, condition.EvictionBlockedMisconfiguredPDBSpecifiedMessage) return nil } } @@ -222,9 +201,9 @@ func (r *Reconciler) executeEviction(ctx context.Context, validationResult *evic if err := r.deleteClusterResourceBinding(ctx, evictionTargetBinding); err != nil { return err } - markEvictionExecuted(eviction, fmt.Sprintf(evictionAllowedPDBSpecifiedFmt, availableBindings, totalBindings)) + markEvictionExecuted(eviction, fmt.Sprintf(condition.EvictionAllowedPDBSpecifiedMessageFmt, availableBindings, totalBindings)) } else { - markEvictionNotExecuted(eviction, fmt.Sprintf(evictionBlockedPDBSpecifiedFmt, availableBindings, totalBindings)) + markEvictionNotExecuted(eviction, fmt.Sprintf(condition.EvictionBlockedPDBSpecifiedMessageFmt, availableBindings, totalBindings)) } return nil } @@ -310,8 +289,8 @@ func markEvictionValid(eviction *placementv1alpha1.ClusterResourcePlacementEvict Type: string(placementv1alpha1.PlacementEvictionConditionTypeValid), Status: metav1.ConditionTrue, ObservedGeneration: eviction.Generation, - Reason: clusterResourcePlacementEvictionValidReason, - Message: evictionValidMessage, + Reason: condition.ClusterResourcePlacementEvictionValidReason, + Message: condition.EvictionValidMessage, } eviction.SetConditions(cond) @@ -324,7 +303,7 @@ func markEvictionInvalid(eviction *placementv1alpha1.ClusterResourcePlacementEvi Type: string(placementv1alpha1.PlacementEvictionConditionTypeValid), Status: metav1.ConditionFalse, ObservedGeneration: eviction.Generation, - Reason: clusterResourcePlacementEvictionInvalidReason, + Reason: condition.ClusterResourcePlacementEvictionInvalidReason, Message: message, } eviction.SetConditions(cond) @@ -337,7 +316,7 @@ func markEvictionExecuted(eviction *placementv1alpha1.ClusterResourcePlacementEv Type: string(placementv1alpha1.PlacementEvictionConditionTypeExecuted), Status: metav1.ConditionTrue, ObservedGeneration: eviction.Generation, - Reason: clusterResourcePlacementEvictionExecutedReason, + Reason: condition.ClusterResourcePlacementEvictionExecutedReason, Message: message, } eviction.SetConditions(cond) @@ -350,7 +329,7 @@ func markEvictionNotExecuted(eviction *placementv1alpha1.ClusterResourcePlacemen Type: string(placementv1alpha1.PlacementEvictionConditionTypeExecuted), Status: metav1.ConditionFalse, ObservedGeneration: eviction.Generation, - Reason: clusterResourcePlacementEvictionNotExecutedReason, + Reason: condition.ClusterResourcePlacementEvictionNotExecutedReason, Message: message, } eviction.SetConditions(cond) diff --git a/pkg/controllers/clusterresourceplacementeviction/controller_intergration_test.go b/pkg/controllers/clusterresourceplacementeviction/controller_intergration_test.go index d576a82b9..a91d5b877 100644 --- a/pkg/controllers/clusterresourceplacementeviction/controller_intergration_test.go +++ b/pkg/controllers/clusterresourceplacementeviction/controller_intergration_test.go @@ -9,8 +9,6 @@ import ( "fmt" "time" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -23,6 +21,8 @@ import ( placementv1alpha1 "go.goms.io/fleet/apis/placement/v1alpha1" placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" + "go.goms.io/fleet/pkg/utils/condition" + testutilseviction "go.goms.io/fleet/test/utils/eviction" ) const ( @@ -32,18 +32,6 @@ const ( evictionNameTemplate = "eviction-%d" ) -var ( - lessFuncCondition = func(a, b metav1.Condition) bool { - return a.Type < b.Type - } - - evictionStatusCmpOptions = cmp.Options{ - cmpopts.SortSlices(lessFuncCondition), - cmpopts.IgnoreFields(metav1.Condition{}, "LastTransitionTime"), - cmpopts.EquateEmpty(), - } -) - const ( eventuallyDuration = time.Minute * 2 eventuallyInterval = time.Millisecond * 250 @@ -121,7 +109,10 @@ var _ = Describe("Test ClusterResourcePlacementEviction Controller", func() { }) By("Check eviction status", func() { - evictionStatusUpdatedActual := evictionStatusUpdatedActual(&isValidEviction{bool: true, msg: evictionValidMessage}, &isExecutedEviction{bool: false, msg: fmt.Sprintf(evictionBlockedPDBSpecifiedFmt, 0, 1)}) + evictionStatusUpdatedActual := testutilseviction.StatusUpdatedActual( + ctx, k8sClient, evictionName, + &testutilseviction.IsValidEviction{IsValid: true, Msg: condition.EvictionValidMessage}, + &testutilseviction.IsExecutedEviction{IsExecuted: false, Msg: fmt.Sprintf(condition.EvictionBlockedPDBSpecifiedMessageFmt, 0, 1)}) Eventually(evictionStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed()) }) @@ -211,7 +202,10 @@ var _ = Describe("Test ClusterResourcePlacementEviction Controller", func() { }) By("Check eviction status", func() { - evictionStatusUpdatedActual := evictionStatusUpdatedActual(&isValidEviction{bool: true, msg: evictionValidMessage}, &isExecutedEviction{bool: true, msg: fmt.Sprintf(evictionAllowedPDBSpecifiedFmt, 1, 1)}) + evictionStatusUpdatedActual := testutilseviction.StatusUpdatedActual( + ctx, k8sClient, evictionName, + &testutilseviction.IsValidEviction{IsValid: true, Msg: condition.EvictionValidMessage}, + &testutilseviction.IsExecutedEviction{IsExecuted: true, Msg: fmt.Sprintf(condition.EvictionAllowedPDBSpecifiedMessageFmt, 1, 1)}) Eventually(evictionStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed()) }) @@ -286,7 +280,10 @@ var _ = Describe("Test ClusterResourcePlacementEviction Controller", func() { }) By("Check eviction status", func() { - evictionStatusUpdatedActual := evictionStatusUpdatedActual(&isValidEviction{bool: true, msg: evictionValidMessage}, &isExecutedEviction{bool: false, msg: fmt.Sprintf(evictionBlockedPDBSpecifiedFmt, 0, 1)}) + evictionStatusUpdatedActual := testutilseviction.StatusUpdatedActual( + ctx, k8sClient, evictionName, + &testutilseviction.IsValidEviction{IsValid: true, Msg: condition.EvictionValidMessage}, + &testutilseviction.IsExecutedEviction{IsExecuted: false, Msg: fmt.Sprintf(condition.EvictionBlockedPDBSpecifiedMessageFmt, 0, 1)}) Eventually(evictionStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed()) }) @@ -399,7 +396,10 @@ var _ = Describe("Test ClusterResourcePlacementEviction Controller", func() { }) By("Check eviction status", func() { - evictionStatusUpdatedActual := evictionStatusUpdatedActual(&isValidEviction{bool: true, msg: evictionValidMessage}, &isExecutedEviction{bool: true, msg: fmt.Sprintf(evictionAllowedPDBSpecifiedFmt, 2, 2)}) + evictionStatusUpdatedActual := testutilseviction.StatusUpdatedActual( + ctx, k8sClient, evictionName, + &testutilseviction.IsValidEviction{IsValid: true, Msg: condition.EvictionValidMessage}, + &testutilseviction.IsExecutedEviction{IsExecuted: true, Msg: fmt.Sprintf(condition.EvictionAllowedPDBSpecifiedMessageFmt, 2, 2)}) Eventually(evictionStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed()) }) @@ -423,66 +423,6 @@ var _ = Describe("Test ClusterResourcePlacementEviction Controller", func() { }) }) -func evictionStatusUpdatedActual(isValid *isValidEviction, isExecuted *isExecutedEviction) func() error { - evictionName := fmt.Sprintf(evictionNameTemplate, GinkgoParallelProcess()) - return func() error { - var eviction placementv1alpha1.ClusterResourcePlacementEviction - if err := k8sClient.Get(ctx, types.NamespacedName{Name: evictionName}, &eviction); err != nil { - return err - } - var conditions []metav1.Condition - if isValid != nil { - if isValid.bool { - validCondition := metav1.Condition{ - Type: string(placementv1alpha1.PlacementEvictionConditionTypeValid), - Status: metav1.ConditionTrue, - ObservedGeneration: eviction.GetGeneration(), - Reason: clusterResourcePlacementEvictionValidReason, - Message: isValid.msg, - } - conditions = append(conditions, validCondition) - } else { - invalidCondition := metav1.Condition{ - Type: string(placementv1alpha1.PlacementEvictionConditionTypeValid), - Status: metav1.ConditionFalse, - ObservedGeneration: eviction.GetGeneration(), - Reason: clusterResourcePlacementEvictionInvalidReason, - Message: isValid.msg, - } - conditions = append(conditions, invalidCondition) - } - } - if isExecuted != nil { - if isExecuted.bool { - executedCondition := metav1.Condition{ - Type: string(placementv1alpha1.PlacementEvictionConditionTypeExecuted), - Status: metav1.ConditionTrue, - ObservedGeneration: eviction.GetGeneration(), - Reason: clusterResourcePlacementEvictionExecutedReason, - Message: isExecuted.msg, - } - conditions = append(conditions, executedCondition) - } else { - notExecutedCondition := metav1.Condition{ - Type: string(placementv1alpha1.PlacementEvictionConditionTypeExecuted), - Status: metav1.ConditionFalse, - ObservedGeneration: eviction.GetGeneration(), - Reason: clusterResourcePlacementEvictionNotExecutedReason, - Message: isExecuted.msg, - } - conditions = append(conditions, notExecutedCondition) - } - } - wantStatus := placementv1alpha1.PlacementEvictionStatus{ - Conditions: conditions, - } - if diff := cmp.Diff(eviction.Status, wantStatus, evictionStatusCmpOptions...); diff != "" { - return fmt.Errorf("CRP status diff (-got, +want): %s", diff) - } - return nil - } -} - func buildTestPickNCRP(crpName string, clusterCount int32) placementv1beta1.ClusterResourcePlacement { return placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ @@ -597,13 +537,3 @@ func ensureAllBindingsAreRemoved(crpName string) { ensureCRBRemoved(bindingList.Items[i].Name) } } - -type isValidEviction struct { - bool - msg string -} - -type isExecutedEviction struct { - bool - msg string -} diff --git a/pkg/controllers/clusterresourceplacementeviction/controller_test.go b/pkg/controllers/clusterresourceplacementeviction/controller_test.go index 4a69de9c4..dfd99b154 100644 --- a/pkg/controllers/clusterresourceplacementeviction/controller_test.go +++ b/pkg/controllers/clusterresourceplacementeviction/controller_test.go @@ -21,6 +21,7 @@ import ( placementv1alpha1 "go.goms.io/fleet/apis/placement/v1alpha1" placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" + "go.goms.io/fleet/pkg/utils/condition" ) const ( @@ -81,8 +82,8 @@ func TestValidateEviction(t *testing.T) { Type: string(placementv1alpha1.PlacementEvictionConditionTypeValid), Status: metav1.ConditionFalse, ObservedGeneration: 1, - Reason: clusterResourcePlacementEvictionInvalidReason, - Message: evictionInvalidMissingCRPMessage, + Reason: condition.ClusterResourcePlacementEvictionInvalidReason, + Message: condition.EvictionInvalidMissingCRPMessage, }, wantErr: nil, }, @@ -108,8 +109,8 @@ func TestValidateEviction(t *testing.T) { Type: string(placementv1alpha1.PlacementEvictionConditionTypeValid), Status: metav1.ConditionFalse, ObservedGeneration: 1, - Reason: clusterResourcePlacementEvictionInvalidReason, - Message: evictionInvalidDeletingCRPMessage, + Reason: condition.ClusterResourcePlacementEvictionInvalidReason, + Message: condition.EvictionInvalidDeletingCRPMessage, }, wantErr: nil, }, @@ -129,8 +130,8 @@ func TestValidateEviction(t *testing.T) { Type: string(placementv1alpha1.PlacementEvictionConditionTypeValid), Status: metav1.ConditionFalse, ObservedGeneration: 1, - Reason: clusterResourcePlacementEvictionInvalidReason, - Message: evictionInvalidMultipleCRBMessage, + Reason: condition.ClusterResourcePlacementEvictionInvalidReason, + Message: condition.EvictionInvalidMultipleCRBMessage, }, wantErr: nil, }, @@ -147,8 +148,8 @@ func TestValidateEviction(t *testing.T) { Type: string(placementv1alpha1.PlacementEvictionConditionTypeValid), Status: metav1.ConditionFalse, ObservedGeneration: 1, - Reason: clusterResourcePlacementEvictionInvalidReason, - Message: evictionInvalidMissingCRBMessage, + Reason: condition.ClusterResourcePlacementEvictionInvalidReason, + Message: condition.EvictionInvalidMissingCRBMessage, }, wantErr: nil, }, @@ -332,8 +333,8 @@ func TestExecuteEviction(t *testing.T) { Type: string(placementv1alpha1.PlacementEvictionConditionTypeExecuted), Status: metav1.ConditionFalse, ObservedGeneration: 1, - Reason: clusterResourcePlacementEvictionNotExecutedReason, - Message: evictionBlockedMissingPlacementMessage, + Reason: condition.ClusterResourcePlacementEvictionNotExecutedReason, + Message: condition.EvictionBlockedMissingPlacementMessage, }, wantErr: nil, }, @@ -354,8 +355,8 @@ func TestExecuteEviction(t *testing.T) { Type: string(placementv1alpha1.PlacementEvictionConditionTypeExecuted), Status: metav1.ConditionFalse, ObservedGeneration: 1, - Reason: clusterResourcePlacementEvictionNotExecutedReason, - Message: evictionBlockedMissingPlacementMessage, + Reason: condition.ClusterResourcePlacementEvictionNotExecutedReason, + Message: condition.EvictionBlockedMissingPlacementMessage, }, wantErr: nil, }, @@ -377,8 +378,8 @@ func TestExecuteEviction(t *testing.T) { Type: string(placementv1alpha1.PlacementEvictionConditionTypeExecuted), Status: metav1.ConditionFalse, ObservedGeneration: 1, - Reason: clusterResourcePlacementEvictionNotExecutedReason, - Message: evictionBlockedMissingPlacementMessage, + Reason: condition.ClusterResourcePlacementEvictionNotExecutedReason, + Message: condition.EvictionBlockedMissingPlacementMessage, }, wantErr: nil, }, @@ -402,8 +403,8 @@ func TestExecuteEviction(t *testing.T) { Type: string(placementv1alpha1.PlacementEvictionConditionTypeExecuted), Status: metav1.ConditionTrue, ObservedGeneration: 1, - Reason: clusterResourcePlacementEvictionExecutedReason, - Message: evictionAllowedPlacementRemovedMessage, + Reason: condition.ClusterResourcePlacementEvictionExecutedReason, + Message: condition.EvictionAllowedPlacementRemovedMessage, }, wantErr: nil, }, @@ -435,8 +436,8 @@ func TestExecuteEviction(t *testing.T) { Type: string(placementv1alpha1.PlacementEvictionConditionTypeExecuted), Status: metav1.ConditionTrue, ObservedGeneration: 1, - Reason: clusterResourcePlacementEvictionExecutedReason, - Message: evictionAllowedPlacementFailedMessage, + Reason: condition.ClusterResourcePlacementEvictionExecutedReason, + Message: condition.EvictionAllowedPlacementFailedMessage, }, wantErr: nil, }, @@ -474,8 +475,8 @@ func TestExecuteEviction(t *testing.T) { Type: string(placementv1alpha1.PlacementEvictionConditionTypeExecuted), Status: metav1.ConditionTrue, ObservedGeneration: 1, - Reason: clusterResourcePlacementEvictionExecutedReason, - Message: evictionAllowedPlacementFailedMessage, + Reason: condition.ClusterResourcePlacementEvictionExecutedReason, + Message: condition.EvictionAllowedPlacementFailedMessage, }, wantErr: nil, }, @@ -494,8 +495,8 @@ func TestExecuteEviction(t *testing.T) { Type: string(placementv1alpha1.PlacementEvictionConditionTypeExecuted), Status: metav1.ConditionTrue, ObservedGeneration: 1, - Reason: clusterResourcePlacementEvictionExecutedReason, - Message: evictionAllowedNoPDBMessage, + Reason: condition.ClusterResourcePlacementEvictionExecutedReason, + Message: condition.EvictionAllowedNoPDBMessage, }, wantErr: nil, }, @@ -530,8 +531,8 @@ func TestExecuteEviction(t *testing.T) { Type: string(placementv1alpha1.PlacementEvictionConditionTypeExecuted), Status: metav1.ConditionFalse, ObservedGeneration: 1, - Reason: clusterResourcePlacementEvictionNotExecutedReason, - Message: evictionBlockedMisconfiguredPDBSpecifiedMessage, + Reason: condition.ClusterResourcePlacementEvictionNotExecutedReason, + Message: condition.EvictionBlockedMisconfiguredPDBSpecifiedMessage, }, wantErr: nil, }, @@ -566,8 +567,8 @@ func TestExecuteEviction(t *testing.T) { Type: string(placementv1alpha1.PlacementEvictionConditionTypeExecuted), Status: metav1.ConditionFalse, ObservedGeneration: 1, - Reason: clusterResourcePlacementEvictionNotExecutedReason, - Message: evictionBlockedMisconfiguredPDBSpecifiedMessage, + Reason: condition.ClusterResourcePlacementEvictionNotExecutedReason, + Message: condition.EvictionBlockedMisconfiguredPDBSpecifiedMessage, }, wantErr: nil, }, diff --git a/pkg/controllers/rollout/controller.go b/pkg/controllers/rollout/controller.go index 464ce37cf..0f271b66a 100644 --- a/pkg/controllers/rollout/controller.go +++ b/pkg/controllers/rollout/controller.go @@ -381,7 +381,8 @@ func (r *Reconciler) pickBindingsToRoll(ctx context.Context, allBindings []*flee case fleetv1beta1.BindingStateBound: bindingFailed := false schedulerTargetedBinds = append(schedulerTargetedBinds, binding) - if waitTime, bindingReady := isBindingReady(binding, readyTimeCutOff); bindingReady { + waitTime, bindingReady := isBindingReady(binding, readyTimeCutOff) + if bindingReady { klog.V(3).InfoS("Found a ready bound binding", "clusterResourcePlacement", crpKObj, "binding", bindingKObj) readyBindings = append(readyBindings, binding) } else { @@ -397,20 +398,27 @@ func (r *Reconciler) pickBindingsToRoll(ctx context.Context, allBindings []*flee } else { canBeReadyBindings = append(canBeReadyBindings, binding) } - // PickFromResourceMatchedOverridesForTargetCluster always returns the ordered list of the overrides. - cro, ro, err := overrider.PickFromResourceMatchedOverridesForTargetCluster(ctx, r.Client, binding.Spec.TargetCluster, matchedCROs, matchedROs) - if err != nil { - return nil, nil, false, 0, err - } - // The binding needs update if it's not pointing to the latest resource resourceBinding or the overrides. - if binding.Spec.ResourceSnapshotName != latestResourceSnapshot.Name || !equality.Semantic.DeepEqual(binding.Spec.ClusterResourceOverrideSnapshots, cro) || !equality.Semantic.DeepEqual(binding.Spec.ResourceOverrideSnapshots, ro) { - updateInfo := createUpdateInfo(binding, crp, latestResourceSnapshot, cro, ro) - if bindingFailed { - // the binding has been applied but failed to apply, we can safely update it to latest resources without affecting max unavailable count - applyFailedUpdateCandidates = append(applyFailedUpdateCandidates, updateInfo) - } else { - updateCandidates = append(updateCandidates, updateInfo) + + // check to see if binding is not being deleted. + if binding.DeletionTimestamp.IsZero() { + // PickFromResourceMatchedOverridesForTargetCluster always returns the ordered list of the overrides. + cro, ro, err := overrider.PickFromResourceMatchedOverridesForTargetCluster(ctx, r.Client, binding.Spec.TargetCluster, matchedCROs, matchedROs) + if err != nil { + return nil, nil, false, 0, err + } + // The binding needs update if it's not pointing to the latest resource resourceBinding or the overrides. + if binding.Spec.ResourceSnapshotName != latestResourceSnapshot.Name || !equality.Semantic.DeepEqual(binding.Spec.ClusterResourceOverrideSnapshots, cro) || !equality.Semantic.DeepEqual(binding.Spec.ResourceOverrideSnapshots, ro) { + updateInfo := createUpdateInfo(binding, crp, latestResourceSnapshot, cro, ro) + if bindingFailed { + // the binding has been applied but failed to apply, we can safely update it to latest resources without affecting max unavailable count + applyFailedUpdateCandidates = append(applyFailedUpdateCandidates, updateInfo) + } else { + updateCandidates = append(updateCandidates, updateInfo) + } } + } else if bindingReady { + // it is being deleted, it can be removed from the cluster at any time, so it can be unavailable at any time + canBeUnavailableBindings = append(canBeUnavailableBindings, binding) } } } diff --git a/pkg/controllers/rollout/controller_integration_test.go b/pkg/controllers/rollout/controller_integration_test.go index 57bd67959..dbe271c77 100644 --- a/pkg/controllers/rollout/controller_integration_test.go +++ b/pkg/controllers/rollout/controller_integration_test.go @@ -12,12 +12,12 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "k8s.io/utils/ptr" - apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/ptr" fleetv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" "go.goms.io/fleet/pkg/controllers/work" @@ -607,6 +607,202 @@ var _ = Describe("Test the rollout Controller", func() { }, 5*time.Minute, interval).Should(BeTrue(), "rollout controller should roll all the bindings to use the latest resource snapshot") }) + It("Rollout should be blocked, then unblocked by eviction - evict unscheduled binding", func() { + // create CRP + var targetCluster int32 = 2 + rolloutCRP = clusterResourcePlacementForTest(testCRPName, createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, targetCluster)) + // Set MaxSurge to 0. + rolloutCRP.Spec.Strategy.RollingUpdate.MaxSurge = &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 0, + } + Expect(k8sClient.Create(ctx, rolloutCRP)).Should(Succeed()) + // create master resource snapshot that is latest. + masterSnapshot := generateResourceSnapshot(rolloutCRP.Name, 0, true) + Expect(k8sClient.Create(ctx, masterSnapshot)).Should(Succeed()) + + // create scheduled bindings for master snapshot on target clusters + clusters := make([]string, targetCluster) + for i := 0; i < int(targetCluster); i++ { + clusters[i] = "cluster-" + utils.RandStr() + binding := generateClusterResourceBinding(fleetv1beta1.BindingStateScheduled, masterSnapshot.Name, clusters[i]) + Expect(k8sClient.Create(ctx, binding)).Should(Succeed()) + By(fmt.Sprintf("resource binding %s created", binding.Name)) + bindings = append(bindings, binding) + } + + // check that all bindings are bound. + Eventually(func() bool { + for _, binding := range bindings { + err := k8sClient.Get(ctx, types.NamespacedName{Name: binding.GetName()}, binding) + if err != nil { + return false + } + if binding.Spec.State != fleetv1beta1.BindingStateBound || binding.Spec.ResourceSnapshotName != masterSnapshot.Name { + return false + } + } + return true + }, timeout, interval).Should(BeTrue(), "rollout controller should roll all the bindings to Bound state") + + // mark one binding as ready i.e. applied and available. + availableBinding := 1 + for i := 0; i < availableBinding; i++ { + markBindingApplied(bindings[i], true) + markBindingAvailable(bindings[i], false) + } + // Current state: one ready binding and one canBeReadyBinding. + // create a new scheduled binding. + cluster3 = "cluster-" + utils.RandStr() + newScheduledBinding := generateClusterResourceBinding(fleetv1beta1.BindingStateScheduled, masterSnapshot.Name, cluster3) + Expect(k8sClient.Create(ctx, newScheduledBinding)).Should(Succeed()) + By(fmt.Sprintf("resource binding %s created", newScheduledBinding.Name)) + // add new scheduled binding to list of bindings. + bindings = append(bindings, newScheduledBinding) + + // ensure new binding exists. + Eventually(func() bool { + return !apierrors.IsNotFound(k8sClient.Get(ctx, types.NamespacedName{Name: newScheduledBinding.Name}, newScheduledBinding)) + }, timeout, interval).Should(BeTrue(), "new scheduled binding is not found") + + // check if new scheduled binding is not bound. + Consistently(func() error { + err := k8sClient.Get(ctx, types.NamespacedName{Name: newScheduledBinding.Name}, newScheduledBinding) + if err != nil { + return err + } + if newScheduledBinding.Spec.State == fleetv1beta1.BindingStateBound { + return fmt.Errorf("binding %s is in bound state, which is unexpected", newScheduledBinding.Name) + } + return nil + }, timeout, interval).Should(BeNil(), "rollout controller shouldn't roll new scheduled binding to bound state") + + // Current state: rollout is blocked by maxSurge being 0. + // mark first available bound binding as unscheduled and ensure it's not removed. + unscheduledBinding := 1 + for i := 0; i < unscheduledBinding; i++ { + Eventually(func() error { + err := k8sClient.Get(ctx, types.NamespacedName{Name: bindings[i].Name}, bindings[i]) + if err != nil { + return err + } + bindings[i].Spec.State = fleetv1beta1.BindingStateUnscheduled + return k8sClient.Update(ctx, bindings[i]) + }, timeout, interval).Should(BeNil(), "failed to update binding spec to unscheduled") + + // Ensure unscheduled binding is not removed. + Consistently(func() bool { + return !apierrors.IsNotFound(k8sClient.Get(ctx, types.NamespacedName{Name: bindings[i].Name}, bindings[i])) + }, timeout, interval).Should(BeTrue(), "rollout controller doesn't remove unscheduled binding") + } + + // simulate eviction by deleting unscheduled binding. + for i := 0; i < unscheduledBinding; i++ { + Expect(k8sClient.Delete(ctx, bindings[i])).Should(Succeed()) + } + + // check to see if rollout is unblocked due to eviction. + for i := unscheduledBinding; i < len(bindings); i++ { + Eventually(func() bool { + err := k8sClient.Get(ctx, types.NamespacedName{Name: bindings[i].GetName()}, bindings[i]) + if err != nil { + return false + } + if bindings[i].Spec.State != fleetv1beta1.BindingStateBound || bindings[i].Spec.ResourceSnapshotName != masterSnapshot.Name { + return false + } + return true + }, timeout, interval).Should(BeTrue(), "rollout controller should roll all remaining bindings to Bound state") + } + }) + + It("Rollout should be blocked, then unblocked by eviction - evict bound binding", func() { + // create CRP + var targetCluster int32 = 2 + rolloutCRP = clusterResourcePlacementForTest(testCRPName, createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, targetCluster)) + // Set MaxSurge to 0. + rolloutCRP.Spec.Strategy.RollingUpdate.MaxSurge = &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 0, + } + Expect(k8sClient.Create(ctx, rolloutCRP)).Should(Succeed()) + // create master resource snapshot that is latest. + masterSnapshot := generateResourceSnapshot(rolloutCRP.Name, 0, true) + Expect(k8sClient.Create(ctx, masterSnapshot)).Should(Succeed()) + + // create scheduled bindings for master snapshot on target clusters + clusters := make([]string, targetCluster) + for i := 0; i < int(targetCluster); i++ { + clusters[i] = "cluster-" + utils.RandStr() + binding := generateClusterResourceBinding(fleetv1beta1.BindingStateScheduled, masterSnapshot.Name, clusters[i]) + Expect(k8sClient.Create(ctx, binding)).Should(Succeed()) + By(fmt.Sprintf("resource binding %s created", binding.Name)) + bindings = append(bindings, binding) + } + + // check that all bindings are bound. + Eventually(func() bool { + for _, binding := range bindings { + err := k8sClient.Get(ctx, types.NamespacedName{Name: binding.GetName()}, binding) + if err != nil { + return false + } + if binding.Spec.State != fleetv1beta1.BindingStateBound || binding.Spec.ResourceSnapshotName != masterSnapshot.Name { + return false + } + } + return true + }, timeout, interval).Should(BeTrue(), "rollout controller should roll all the bindings to Bound state") + + // Note: This scenario is very unlikely in production user has to change the target from 2->3->2, + // where scheduler created new scheduled binding but user changed the target number from 3->2 again, before rollout controller reads CRP. + // create a new scheduled binding. + cluster3 = "cluster-" + utils.RandStr() + newScheduledBinding := generateClusterResourceBinding(fleetv1beta1.BindingStateScheduled, masterSnapshot.Name, cluster3) + Expect(k8sClient.Create(ctx, newScheduledBinding)).Should(Succeed()) + By(fmt.Sprintf("resource binding %s created", newScheduledBinding.Name)) + // add new scheduled binding to list of bindings. + bindings = append(bindings, newScheduledBinding) + + // ensure new binding exists. + Eventually(func() bool { + return !apierrors.IsNotFound(k8sClient.Get(ctx, types.NamespacedName{Name: newScheduledBinding.Name}, newScheduledBinding)) + }, timeout, interval).Should(BeTrue(), "new scheduled binding is not found") + + // Current state: rollout is blocked by maxSurge being 0. + // check if new scheduled binding is not bound. + Consistently(func() error { + err := k8sClient.Get(ctx, types.NamespacedName{Name: newScheduledBinding.Name}, newScheduledBinding) + if err != nil { + return err + } + if newScheduledBinding.Spec.State == fleetv1beta1.BindingStateBound { + return fmt.Errorf("binding %s is in bound state, which is unexpected", newScheduledBinding.Name) + } + return nil + }, timeout, interval).Should(BeNil(), "rollout controller shouldn't roll new scheduled binding to bound state") + + // simulate eviction by deleting first bound binding. + firstBoundBinding := 1 + for i := 0; i < firstBoundBinding; i++ { + Expect(k8sClient.Delete(ctx, bindings[i])).Should(Succeed()) + } + + // check to see if the remaining two bindings are bound. + for i := firstBoundBinding; i < len(bindings); i++ { + Eventually(func() bool { + err := k8sClient.Get(ctx, types.NamespacedName{Name: bindings[i].GetName()}, bindings[i]) + if err != nil { + return false + } + if bindings[i].Spec.State != fleetv1beta1.BindingStateBound || bindings[i].Spec.ResourceSnapshotName != masterSnapshot.Name { + return false + } + return true + }, timeout, interval).Should(BeTrue(), "rollout controller should roll all remaining bindings to Bound state") + } + }) + // TODO: should update scheduled bindings to the latest snapshot when it is updated to bound state. // TODO: should count the deleting bindings as can be Unavailable. @@ -707,11 +903,3 @@ func generateResourceSnapshot(testCRPName string, resourceIndex int, isLatest bo } return clusterResourceSnapshot } - -func generateDeletingClusterResourceBinding(targetCluster string) *fleetv1beta1.ClusterResourceBinding { - binding := generateClusterResourceBinding(fleetv1beta1.BindingStateUnscheduled, "anything", targetCluster) - binding.DeletionTimestamp = &metav1.Time{ - Time: now, - } - return binding -} diff --git a/pkg/controllers/rollout/controller_test.go b/pkg/controllers/rollout/controller_test.go index 68188d4cd..4b32d01c7 100644 --- a/pkg/controllers/rollout/controller_test.go +++ b/pkg/controllers/rollout/controller_test.go @@ -205,7 +205,7 @@ func TestWaitForResourcesToCleanUp(t *testing.T) { "test deleting binding block schedule binding on the same cluster": { allBindings: []*fleetv1beta1.ClusterResourceBinding{ generateClusterResourceBinding(fleetv1beta1.BindingStateScheduled, "snapshot-1", cluster1), - generateDeletingClusterResourceBinding(cluster1), + generateDeletingClusterResourceBinding(fleetv1beta1.BindingStateUnscheduled, cluster1), }, wantWait: true, wantErr: false, @@ -213,7 +213,7 @@ func TestWaitForResourcesToCleanUp(t *testing.T) { "test deleting binding not block binding on different cluster": { allBindings: []*fleetv1beta1.ClusterResourceBinding{ generateClusterResourceBinding(fleetv1beta1.BindingStateScheduled, "snapshot-1", cluster1), - generateDeletingClusterResourceBinding(cluster2), + generateDeletingClusterResourceBinding(fleetv1beta1.BindingStateUnscheduled, cluster2), generateClusterResourceBinding(fleetv1beta1.BindingStateBound, "snapshot-1", cluster3), }, wantWait: false, @@ -221,7 +221,7 @@ func TestWaitForResourcesToCleanUp(t *testing.T) { }, "test deleting binding cannot co-exsit with a bound binding on same cluster": { allBindings: []*fleetv1beta1.ClusterResourceBinding{ - generateDeletingClusterResourceBinding(cluster1), + generateDeletingClusterResourceBinding(fleetv1beta1.BindingStateUnscheduled, cluster1), generateClusterResourceBinding(fleetv1beta1.BindingStateBound, "snapshot-1", cluster1), }, wantWait: false, @@ -850,6 +850,52 @@ func TestPickBindingsToRoll(t *testing.T) { Reason: work.WorkNotTrackableReason, // Make it not ready }, } + + readyBoundDeletingBinding := generateDeletingClusterResourceBinding(fleetv1beta1.BindingStateBound, cluster1) + readyBoundDeletingBinding.Generation = 15 + readyBoundDeletingBinding.Status.Conditions = []metav1.Condition{ + { + Type: string(fleetv1beta1.ResourceBindingApplied), + Status: metav1.ConditionTrue, + ObservedGeneration: 15, + }, + { + Type: string(fleetv1beta1.ResourceBindingAvailable), + Status: metav1.ConditionTrue, + ObservedGeneration: 15, + LastTransitionTime: metav1.Time{ + Time: now.Add(-35 * time.Second), + }, + Reason: work.WorkNotTrackableReason, + }, + } + + failedToApplyBoundBinding := generateClusterResourceBinding(fleetv1beta1.BindingStateBound, "snapshot-1", cluster1) + failedToApplyBoundBinding.Generation = 2 + failedToApplyBoundBinding.Status.Conditions = []metav1.Condition{ + { + Type: string(fleetv1beta1.ResourceBindingApplied), + Status: metav1.ConditionFalse, + ObservedGeneration: 2, + }, + } + + readyUnscheduledDeletingBinding := generateDeletingClusterResourceBinding(fleetv1beta1.BindingStateUnscheduled, cluster1) + readyUnscheduledDeletingBinding.Generation = 15 + readyUnscheduledDeletingBinding.Status.Conditions = []metav1.Condition{ + { + Type: string(fleetv1beta1.ResourceBindingApplied), + Status: metav1.ConditionTrue, + ObservedGeneration: 15, + }, + { + Type: string(fleetv1beta1.ResourceBindingAvailable), + Status: metav1.ConditionTrue, + ObservedGeneration: 15, + Reason: work.WorkAvailableReason, // Make it ready + }, + } + tests := map[string]struct { allBindings []*fleetv1beta1.ClusterResourceBinding latestResourceSnapshotName string @@ -1513,6 +1559,228 @@ func TestPickBindingsToRoll(t *testing.T) { wantNeedRoll: true, wantWaitTime: 25 * time.Second, // minWaitTime = (t - 35 seconds) - (t - 60 seconds) = 25 seconds }, + "test bound deleting binding - rollout blocked": { + allBindings: []*fleetv1beta1.ClusterResourceBinding{ + generateDeletingClusterResourceBinding(fleetv1beta1.BindingStateBound, cluster1), + }, + crp: clusterResourcePlacementForTest("test", + createPlacementPolicyForTest(fleetv1beta1.PickAllPlacementType, 0)), + wantTobeUpdatedBindings: []int{}, + wantStaleUnselectedBindings: nil, + wantNeedRoll: false, + wantWaitTime: time.Second, + }, + "test one scheduled, one deleting bound binding - rollout allowed": { + allBindings: []*fleetv1beta1.ClusterResourceBinding{ + generateClusterResourceBinding(fleetv1beta1.BindingStateScheduled, "snapshot-2", cluster1), + generateDeletingClusterResourceBinding(fleetv1beta1.BindingStateBound, cluster2), + }, + crp: clusterResourcePlacementForTest("test", + createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, 2)), + latestResourceSnapshotName: "snapshot-2", + wantTobeUpdatedBindings: []int{0}, + wantStaleUnselectedBindings: []int{}, + wantDesiredBindingsSpec: []fleetv1beta1.ResourceBindingSpec{ + { + State: fleetv1beta1.BindingStateBound, + TargetCluster: cluster1, + ResourceSnapshotName: "snapshot-2", + }, + // bound deleting binding does not have desired spec, so it's empty. + {}, + }, + wantNeedRoll: true, + wantWaitTime: time.Second, + }, + "test one canBeReady bound binding, one deleting canBeReady bound binding, one update candidate - rollout blocked, only update stale binding status": { + allBindings: []*fleetv1beta1.ClusterResourceBinding{ + canBeReadyBinding, + generateDeletingClusterResourceBinding(fleetv1beta1.BindingStateBound, cluster2), + }, + latestResourceSnapshotName: "snapshot-2", + crp: clusterResourcePlacementForTest("test", + createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, 2)), + wantTobeUpdatedBindings: []int{}, + wantStaleUnselectedBindings: []int{0}, + wantDesiredBindingsSpec: []fleetv1beta1.ResourceBindingSpec{ + { + State: fleetv1beta1.BindingStateBound, + TargetCluster: cluster1, + ResourceSnapshotName: "snapshot-2", + }, + // bound deleting binding does not have desired spec, so it's empty. + {}, + }, + wantNeedRoll: true, + wantWaitTime: time.Second, + }, + "test one ready bound binding, one deleting ready bound binding, update candidate - rollout blocked, only update stale binding status": { + allBindings: []*fleetv1beta1.ClusterResourceBinding{ + readyBoundDeletingBinding, + readyBoundBinding, + }, + latestResourceSnapshotName: "snapshot-3", // readyBoundBinding's snapshot is snapshot-2 to force update. + crp: clusterResourcePlacementForTest("test", + createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, 2)), + wantTobeUpdatedBindings: []int{}, + wantStaleUnselectedBindings: []int{1}, + wantDesiredBindingsSpec: []fleetv1beta1.ResourceBindingSpec{ + // bound deleting binding does not have desired spec, so it's empty. + {}, + { + State: fleetv1beta1.BindingStateBound, + TargetCluster: cluster2, + ResourceSnapshotName: "snapshot-3", + }, + }, + wantNeedRoll: true, + wantWaitTime: 0, + }, + "test one failedToApply binding, one deleting binding, update candidate - rollout allowed": { + allBindings: []*fleetv1beta1.ClusterResourceBinding{ + failedToApplyBoundBinding, + generateDeletingClusterResourceBinding(fleetv1beta1.BindingStateBound, cluster2), + }, + latestResourceSnapshotName: "snapshot-2", // failedToApplyBoundBinding's snapshot is snapshot-1 to force update. + crp: clusterResourcePlacementForTest("test", + createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, 2)), + wantTobeUpdatedBindings: []int{0}, + wantStaleUnselectedBindings: []int{}, + wantDesiredBindingsSpec: []fleetv1beta1.ResourceBindingSpec{ + { + State: fleetv1beta1.BindingStateBound, + TargetCluster: cluster1, + ResourceSnapshotName: "snapshot-2", + }, + // bound deleting binding does not have desired spec, so it's empty. + {}, + }, + wantNeedRoll: true, + wantWaitTime: time.Second, + }, + "test one ready deleting bound binding, one unscheduled binding - rollout blocked, unscheduled binding is not removed": { + allBindings: []*fleetv1beta1.ClusterResourceBinding{ + readyBoundDeletingBinding, + generateClusterResourceBinding(fleetv1beta1.BindingStateUnscheduled, "snapshot-1", cluster2), + }, + latestResourceSnapshotName: "snapshot-1", + crp: clusterResourcePlacementForTest("test", + createPlacementPolicyForTest(fleetv1beta1.PickAllPlacementType, 0)), + wantTobeUpdatedBindings: []int{}, + wantStaleUnselectedBindings: []int{}, + wantDesiredBindingsSpec: []fleetv1beta1.ResourceBindingSpec{ + // bound deleting binding does not have desired spec, so it's empty. + {}, + // unscheduled binding does not have desired spec, so it's empty. + {}, + }, + wantNeedRoll: true, + wantWaitTime: time.Second, + }, + "test one deleting unscheduled binding - rollout blocked": { + allBindings: []*fleetv1beta1.ClusterResourceBinding{ + generateDeletingClusterResourceBinding(fleetv1beta1.BindingStateUnscheduled, cluster1), + }, + latestResourceSnapshotName: "snapshot-1", + crp: clusterResourcePlacementForTest("test", + createPlacementPolicyForTest(fleetv1beta1.PickAllPlacementType, 0)), + wantTobeUpdatedBindings: []int{}, + wantStaleUnselectedBindings: []int{}, + wantDesiredBindingsSpec: []fleetv1beta1.ResourceBindingSpec{ + // unscheduled deleting binding does not have desired spec, so it's empty. + {}, + }, + wantNeedRoll: false, + wantWaitTime: time.Second, + }, + "test one scheduled, one deleting unscheduled binding - rollout allowed for scheduled binding": { + allBindings: []*fleetv1beta1.ClusterResourceBinding{ + generateDeletingClusterResourceBinding(fleetv1beta1.BindingStateUnscheduled, cluster1), + generateClusterResourceBinding(fleetv1beta1.BindingStateScheduled, "snapshot-1", cluster2), + }, + latestResourceSnapshotName: "snapshot-1", + crp: clusterResourcePlacementForTest("test", + createPlacementPolicyForTest(fleetv1beta1.PickAllPlacementType, 0)), + wantTobeUpdatedBindings: []int{1}, + wantStaleUnselectedBindings: []int{}, + wantDesiredBindingsSpec: []fleetv1beta1.ResourceBindingSpec{ + // unscheduled deleting binding does not have desired spec, so it's empty. + {}, + { + State: fleetv1beta1.BindingStateBound, + TargetCluster: cluster2, + ResourceSnapshotName: "snapshot-1", + }, + }, + wantNeedRoll: true, + wantWaitTime: time.Second, + }, + "test one ready bound binding, one not ready deleting unscheduled binding - rollout allowed for ready binding": { + allBindings: []*fleetv1beta1.ClusterResourceBinding{ + generateDeletingClusterResourceBinding(fleetv1beta1.BindingStateUnscheduled, cluster1), // not ready unscheduled binding. + readyBoundBinding, + }, + latestResourceSnapshotName: "snapshot-3", // readyBoundBinding's snapshot is snapshot-2 to force update. + crp: clusterResourcePlacementForTest("test", + createPlacementPolicyForTest(fleetv1beta1.PickAllPlacementType, 0)), + wantTobeUpdatedBindings: []int{1}, + wantStaleUnselectedBindings: []int{}, + wantDesiredBindingsSpec: []fleetv1beta1.ResourceBindingSpec{ + // unscheduled deleting binding does not have desired spec, so it's empty. + {}, + { + State: fleetv1beta1.BindingStateBound, + TargetCluster: cluster2, + ResourceSnapshotName: "snapshot-3", + }, + }, + wantNeedRoll: true, + wantWaitTime: time.Second, + }, + "test one ready bound binding, one ready deleting unscheduled binding - rollout allowed for ready binding": { + allBindings: []*fleetv1beta1.ClusterResourceBinding{ + readyUnscheduledDeletingBinding, + readyBoundBinding, + }, + latestResourceSnapshotName: "snapshot-3", // readyBoundBinding's snapshot is snapshot-2 to force update. + crp: clusterResourcePlacementForTest("test", + createPlacementPolicyForTest(fleetv1beta1.PickAllPlacementType, 0)), + wantTobeUpdatedBindings: []int{1}, + wantStaleUnselectedBindings: []int{}, + wantDesiredBindingsSpec: []fleetv1beta1.ResourceBindingSpec{ + // unscheduled deleting binding does not have desired spec, so it's empty. + {}, + { + State: fleetv1beta1.BindingStateBound, + TargetCluster: cluster2, + ResourceSnapshotName: "snapshot-3", + }, + }, + wantNeedRoll: true, + wantWaitTime: 0, + }, + "test one ready bound binding, one ready deleting unscheduled binding - rollout blocked for ready binding": { + allBindings: []*fleetv1beta1.ClusterResourceBinding{ + readyUnscheduledDeletingBinding, + readyBoundBinding, + }, + latestResourceSnapshotName: "snapshot-3", // readyBoundBinding's snapshot is snapshot-2 to force update. + crp: clusterResourcePlacementForTest("test", + createPlacementPolicyForTest(fleetv1beta1.PickNPlacementType, 2)), + wantTobeUpdatedBindings: []int{}, + wantStaleUnselectedBindings: []int{1}, + wantDesiredBindingsSpec: []fleetv1beta1.ResourceBindingSpec{ + // unscheduled deleting binding does not have desired spec, so it's empty. + {}, + { + State: fleetv1beta1.BindingStateBound, + TargetCluster: cluster2, + ResourceSnapshotName: "snapshot-3", + }, + }, + wantNeedRoll: true, + wantWaitTime: 0, + }, } for name, tt := range tests { t.Run(name, func(t *testing.T) { @@ -1544,15 +1812,25 @@ func TestPickBindingsToRoll(t *testing.T) { wantTobeUpdatedBindings := make([]toBeUpdatedBinding, len(tt.wantTobeUpdatedBindings)) for i, index := range tt.wantTobeUpdatedBindings { - wantTobeUpdatedBindings[i].currentBinding = tt.allBindings[index] - wantTobeUpdatedBindings[i].desiredBinding = tt.allBindings[index].DeepCopy() - wantTobeUpdatedBindings[i].desiredBinding.Spec = tt.wantDesiredBindingsSpec[index] + // Unscheduled bindings are only removed in a single rollout cycle. + if tt.allBindings[index].Spec.State != fleetv1beta1.BindingStateUnscheduled { + wantTobeUpdatedBindings[i].currentBinding = tt.allBindings[index] + wantTobeUpdatedBindings[i].desiredBinding = tt.allBindings[index].DeepCopy() + wantTobeUpdatedBindings[i].desiredBinding.Spec = tt.wantDesiredBindingsSpec[index] + } else { + wantTobeUpdatedBindings[i].currentBinding = tt.allBindings[index] + } } wantStaleUnselectedBindings := make([]toBeUpdatedBinding, len(tt.wantStaleUnselectedBindings)) for i, index := range tt.wantStaleUnselectedBindings { - wantStaleUnselectedBindings[i].currentBinding = tt.allBindings[index] - wantStaleUnselectedBindings[i].desiredBinding = tt.allBindings[index].DeepCopy() - wantStaleUnselectedBindings[i].desiredBinding.Spec = tt.wantDesiredBindingsSpec[index] + // Unscheduled bindings are only removed in a single rollout cycle. + if tt.allBindings[index].Spec.State != fleetv1beta1.BindingStateUnscheduled { + wantStaleUnselectedBindings[i].currentBinding = tt.allBindings[index] + wantStaleUnselectedBindings[i].desiredBinding = tt.allBindings[index].DeepCopy() + wantStaleUnselectedBindings[i].desiredBinding.Spec = tt.wantDesiredBindingsSpec[index] + } else { + wantStaleUnselectedBindings[i].currentBinding = tt.allBindings[index] + } } if diff := cmp.Diff(wantTobeUpdatedBindings, gotUpdatedBindings, cmpOptions...); diff != "" { @@ -2057,3 +2335,11 @@ func TestCheckAndUpdateStaleBindingsStatus(t *testing.T) { }) } } + +func generateDeletingClusterResourceBinding(state fleetv1beta1.BindingState, targetCluster string) *fleetv1beta1.ClusterResourceBinding { + binding := generateClusterResourceBinding(state, "anything", targetCluster) + binding.DeletionTimestamp = &metav1.Time{ + Time: now, + } + return binding +} diff --git a/pkg/utils/condition/condition.go b/pkg/utils/condition/condition.go index 7108b18ae..f3df6584f 100644 --- a/pkg/utils/condition/condition.go +++ b/pkg/utils/condition/condition.go @@ -145,6 +145,57 @@ const ( AfterStageTaskWaitTimeElapsedReason = "AfterStageTaskWaitTimeElapsed" ) +// A group of condition reason & message string which is used to populate the ClusterResourcePlacementEviction condition. +const ( + // ClusterResourcePlacementEvictionValidReason is the reason string of condition if the eviction is valid. + ClusterResourcePlacementEvictionValidReason = "ClusterResourcePlacementEvictionValid" + + // ClusterResourcePlacementEvictionInvalidReason is the reason string of condition if the eviction is invalid. + ClusterResourcePlacementEvictionInvalidReason = "ClusterResourcePlacementEvictionInvalid" + + // ClusterResourcePlacementEvictionExecutedReason is the reason string of condition if the eviction is executed. + ClusterResourcePlacementEvictionExecutedReason = "ClusterResourcePlacementEvictionExecuted" + + // ClusterResourcePlacementEvictionNotExecutedReason is the reason string of condition if the eviction is not executed. + ClusterResourcePlacementEvictionNotExecutedReason = "ClusterResourcePlacementEvictionNotExecuted" + + // EvictionInvalidMissingCRPMessage is the message string of invalid eviction condition when CRP is missing. + EvictionInvalidMissingCRPMessage = "Failed to find ClusterResourcePlacement targeted by eviction" + + // EvictionInvalidDeletingCRPMessage is the message string of invalid eviction condition when CRP is deleting. + EvictionInvalidDeletingCRPMessage = "Found deleting ClusterResourcePlacement targeted by eviction" + + // EvictionInvalidMissingCRBMessage is the message string of invalid eviction condition when CRB is missing. + EvictionInvalidMissingCRBMessage = "Failed to find scheduler decision for placement in cluster targeted by eviction" + + // EvictionInvalidMultipleCRBMessage is the message string of invalid eviction condition when more than one CRB is present for cluster targeted by eviction. + EvictionInvalidMultipleCRBMessage = "Found more than one scheduler decision for placement in cluster targeted by eviction" + + // EvictionValidMessage is the message string of valid eviction condition. + EvictionValidMessage = "Eviction is valid" + + // EvictionAllowedNoPDBMessage is the message string for executed condition when no PDB is specified. + EvictionAllowedNoPDBMessage = "Eviction is allowed, no ClusterResourcePlacementDisruptionBudget specified" + + // EvictionAllowedPlacementRemovedMessage is the message string for executed condition when CRB targeted by eviction is being deleted. + EvictionAllowedPlacementRemovedMessage = "Eviction is allowed, resources propagated by placement is currently being removed from cluster targeted by eviction" + + // EvictionAllowedPlacementFailedMessage is the message string for executed condition when placed resources have failed to apply or the resources are not available. + EvictionAllowedPlacementFailedMessage = "Eviction is allowed, placement has failed" + + // EvictionBlockedMisconfiguredPDBSpecifiedMessage is the message string for not executed condition when PDB specified is misconfigured for PickAll CRP. + EvictionBlockedMisconfiguredPDBSpecifiedMessage = "Eviction is blocked by misconfigured ClusterResourcePlacementDisruptionBudget, either MaxUnavailable is specified or MinAvailable is specified as a percentage for PickAll ClusterResourcePlacement" + + // EvictionBlockedMissingPlacementMessage is the message string for not executed condition when resources are yet to be placed in targeted cluster by eviction. + EvictionBlockedMissingPlacementMessage = "Eviction is blocked, placement has not propagated resources to target cluster yet" + + // EvictionAllowedPDBSpecifiedMessageFmt is the message format for executed condition when eviction is allowed by PDB specified. + EvictionAllowedPDBSpecifiedMessageFmt = "Eviction is allowed by specified ClusterResourcePlacementDisruptionBudget, availablePlacements: %d, totalPlacements: %d" + + // EvictionBlockedPDBSpecifiedMessageFmt is the message format for not executed condition when eviction is blocked bt PDB specified. + EvictionBlockedPDBSpecifiedMessageFmt = "Eviction is blocked by specified ClusterResourcePlacementDisruptionBudget, availablePlacements: %d, totalPlacements: %d" +) + // EqualCondition compares one condition with another; it ignores the LastTransitionTime and Message fields, // and will consider the ObservedGeneration values from the two conditions a match if the current // condition is newer. diff --git a/test/e2e/actuals_test.go b/test/e2e/actuals_test.go index dddf87dd4..d8e54cdd6 100644 --- a/test/e2e/actuals_test.go +++ b/test/e2e/actuals_test.go @@ -16,6 +16,7 @@ import ( "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" + placementv1alpha1 "go.goms.io/fleet/apis/placement/v1alpha1" placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" "go.goms.io/fleet/pkg/controllers/clusterresourceplacement" "go.goms.io/fleet/pkg/controllers/work" @@ -890,6 +891,16 @@ func crpRemovedActual(crpName string) func() error { } } +func crpEvictionRemovedActual(crpEvictionName string) func() error { + return func() error { + if err := hubClient.Get(ctx, types.NamespacedName{Name: crpEvictionName}, &placementv1alpha1.ClusterResourcePlacementEviction{}); !errors.IsNotFound(err) { + return fmt.Errorf("CRP eviction still exists or an unexpected error occurred: %w", err) + } + + return nil + } +} + func validateCRPSnapshotRevisions(crpName string, wantPolicySnapshotRevision, wantResourceSnapshotRevision int) error { matchingLabels := client.MatchingLabels{placementv1beta1.CRPTrackingLabel: crpName} diff --git a/test/e2e/placement_eviction_test.go b/test/e2e/placement_eviction_test.go new file mode 100644 index 000000000..6d49912d5 --- /dev/null +++ b/test/e2e/placement_eviction_test.go @@ -0,0 +1,97 @@ +/* +Copyright (c) Microsoft Corporation. +Licensed under the MIT license. +*/ + +package e2e + +import ( + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + placementv1alpha1 "go.goms.io/fleet/apis/placement/v1alpha1" + placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" + "go.goms.io/fleet/pkg/utils/condition" + "go.goms.io/fleet/test/e2e/framework" + testutilseviction "go.goms.io/fleet/test/utils/eviction" +) + +var _ = Describe("ClusterResourcePlacement eviction of bound binding", Ordered, Serial, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpEvictionName := fmt.Sprintf(crpEvictionNameTemplate, GinkgoParallelProcess()) + taintClusterNames := []string{memberCluster1EastProdName} + selectedClusterNames1 := []string{memberCluster1EastProdName, memberCluster2EastCanaryName, memberCluster3WestProdName} + selectedClusterNames2 := []string{memberCluster2EastCanaryName, memberCluster3WestProdName} + + BeforeAll(func() { + By("creating work resources") + createWorkResources() + + // Create the CRP. + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.ClusterResourcePlacementSpec{ + ResourceSelectors: workResourceSelector(), + }, + } + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP %s", crpName) + }) + + AfterAll(func() { + // Remove taint from member cluster 1. + removeTaintsFromMemberClusters(taintClusterNames) + ensureCRPEvictionDeletion(crpEvictionName) + ensureCRPAndRelatedResourcesDeletion(crpName, allMemberClusters) + }) + + It("should update cluster resource placement status as expected", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(workResourceIdentifiers(), selectedClusterNames1, nil, "0") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update cluster resource placement status as expected") + }) + + It("add taint to member cluster 1", func() { + addTaintsToMemberClusters(taintClusterNames, buildTaints(taintClusterNames)) + }) + + It("create cluster resource placement eviction targeting member cluster 1", func() { + crpe := &placementv1alpha1.ClusterResourcePlacementEviction{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpEvictionName, + }, + Spec: placementv1alpha1.PlacementEvictionSpec{ + PlacementName: crpName, + ClusterName: memberCluster1EastProdName, + }, + } + Expect(hubClient.Create(ctx, crpe)).To(Succeed(), "Failed to create CRP eviction %s", crpe.Name) + }) + + It("should update cluster resource placement eviction status as expected", func() { + crpEvictionStatusUpdatedActual := testutilseviction.StatusUpdatedActual( + ctx, hubClient, crpEvictionName, + &testutilseviction.IsValidEviction{IsValid: true, Msg: condition.EvictionValidMessage}, + &testutilseviction.IsExecutedEviction{IsExecuted: true, Msg: condition.EvictionAllowedNoPDBMessage}) + Eventually(crpEvictionStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update cluster resource placement eviction status as expected") + }) + + It("should ensure no resources exist on evicted member cluster with taint", func() { + unSelectedClusters := []*framework.Cluster{memberCluster1EastProd} + for _, cluster := range unSelectedClusters { + resourceRemovedActual := workNamespaceRemovedFromClusterActual(cluster) + Eventually(resourceRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to check if resources doesn't exist on member cluster") + } + }) + + It("should update cluster resource placement status as expected", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(workResourceIdentifiers(), selectedClusterNames2, nil, "0") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update cluster resource placement status as expected") + }) +}) diff --git a/test/e2e/resources_test.go b/test/e2e/resources_test.go index 082962b7f..0f5bfd769 100644 --- a/test/e2e/resources_test.go +++ b/test/e2e/resources_test.go @@ -33,6 +33,7 @@ const ( internalServiceExportNameTemplate = "ise-%d" internalServiceImportNameTemplate = "isi-%d" endpointSliceExportNameTemplate = "ep-%d" + crpEvictionNameTemplate = "crpe-%d" customDeletionBlockerFinalizer = "custom-deletion-blocker-finalizer" workNamespaceLabelName = "process" diff --git a/test/e2e/utils_test.go b/test/e2e/utils_test.go index 99f33e997..98ac0c761 100644 --- a/test/e2e/utils_test.go +++ b/test/e2e/utils_test.go @@ -917,6 +917,17 @@ func ensureCRPAndRelatedResourcesDeletion(crpName string, memberClusters []*fram cleanupWorkResources() } +func ensureCRPEvictionDeletion(crpEvictionName string) { + crpe := &placementv1alpha1.ClusterResourcePlacementEviction{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpEvictionName, + }, + } + Expect(hubClient.Delete(ctx, crpe)).Should(SatisfyAny(Succeed(), utils.NotFoundMatcher{}), "Failed to delete CRP eviction") + removedActual := crpEvictionRemovedActual(crpEvictionName) + Eventually(removedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "CRP eviction still exists") +} + // verifyWorkPropagationAndMarkAsAvailable verifies that works derived from a specific CPR have been created // for a specific cluster, and marks these works in the specific member cluster's // reserved namespace as applied and available. diff --git a/test/utils/eviction/eviction_status.go b/test/utils/eviction/eviction_status.go new file mode 100644 index 000000000..0db7f0831 --- /dev/null +++ b/test/utils/eviction/eviction_status.go @@ -0,0 +1,95 @@ +package eviction + +import ( + "context" + "fmt" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + placementv1alpha1 "go.goms.io/fleet/apis/placement/v1alpha1" + "go.goms.io/fleet/pkg/utils/condition" +) + +var ( + lessFuncCondition = func(a, b metav1.Condition) bool { + return a.Type < b.Type + } + evictionStatusCmpOptions = cmp.Options{ + cmpopts.SortSlices(lessFuncCondition), + cmpopts.IgnoreFields(metav1.Condition{}, "LastTransitionTime"), + cmpopts.EquateEmpty(), + } +) + +func StatusUpdatedActual(ctx context.Context, client client.Client, evictionName string, isValidEviction *IsValidEviction, isExecutedEviction *IsExecutedEviction) func() error { + return func() error { + var eviction placementv1alpha1.ClusterResourcePlacementEviction + if err := client.Get(ctx, types.NamespacedName{Name: evictionName}, &eviction); err != nil { + return err + } + var conditions []metav1.Condition + if isValidEviction != nil { + if isValidEviction.IsValid { + validCondition := metav1.Condition{ + Type: string(placementv1alpha1.PlacementEvictionConditionTypeValid), + Status: metav1.ConditionTrue, + ObservedGeneration: eviction.GetGeneration(), + Reason: condition.ClusterResourcePlacementEvictionValidReason, + Message: isValidEviction.Msg, + } + conditions = append(conditions, validCondition) + } else { + invalidCondition := metav1.Condition{ + Type: string(placementv1alpha1.PlacementEvictionConditionTypeValid), + Status: metav1.ConditionFalse, + ObservedGeneration: eviction.GetGeneration(), + Reason: condition.ClusterResourcePlacementEvictionInvalidReason, + Message: isValidEviction.Msg, + } + conditions = append(conditions, invalidCondition) + } + } + if isExecutedEviction != nil { + if isExecutedEviction.IsExecuted { + executedCondition := metav1.Condition{ + Type: string(placementv1alpha1.PlacementEvictionConditionTypeExecuted), + Status: metav1.ConditionTrue, + ObservedGeneration: eviction.GetGeneration(), + Reason: condition.ClusterResourcePlacementEvictionExecutedReason, + Message: isExecutedEviction.Msg, + } + conditions = append(conditions, executedCondition) + } else { + notExecutedCondition := metav1.Condition{ + Type: string(placementv1alpha1.PlacementEvictionConditionTypeExecuted), + Status: metav1.ConditionFalse, + ObservedGeneration: eviction.GetGeneration(), + Reason: condition.ClusterResourcePlacementEvictionNotExecutedReason, + Message: isExecutedEviction.Msg, + } + conditions = append(conditions, notExecutedCondition) + } + } + wantStatus := placementv1alpha1.PlacementEvictionStatus{ + Conditions: conditions, + } + if diff := cmp.Diff(eviction.Status, wantStatus, evictionStatusCmpOptions...); diff != "" { + return fmt.Errorf("CRP status diff (-got, +want): %s", diff) + } + return nil + } +} + +type IsValidEviction struct { + IsValid bool + Msg string +} + +type IsExecutedEviction struct { + IsExecuted bool + Msg string +}