From b1b25d8245dbb3f77d9903459757cfb7b7cf1589 Mon Sep 17 00:00:00 2001 From: Arvind Thirumurugan Date: Mon, 27 Jan 2025 09:47:26 -0800 Subject: [PATCH] chore: increase time to fire NS alert log (#1023) --- .../membercluster/v1beta1/membercluster_controller.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/controllers/membercluster/v1beta1/membercluster_controller.go b/pkg/controllers/membercluster/v1beta1/membercluster_controller.go index a4d8128e3..c0d85d9ab 100644 --- a/pkg/controllers/membercluster/v1beta1/membercluster_controller.go +++ b/pkg/controllers/membercluster/v1beta1/membercluster_controller.go @@ -141,9 +141,9 @@ func (r *Reconciler) handleDelete(ctx context.Context, mc *clusterv1beta1.Member if !currentNS.DeletionTimestamp.IsZero() { klog.V(2).InfoS("The member cluster namespace is still being deleted", "memberCluster", mcObjRef, "deleteTimestamp", currentNS.DeletionTimestamp) var stuckErr error - if time.Now().After(currentNS.DeletionTimestamp.Add(5 * time.Minute)) { - // alert if the namespace is stuck in deleting for more than 5 minutes - stuckErr = controller.NewUnexpectedBehaviorError(fmt.Errorf("the member cluster namespace %s has been deleting since %s", namespaceName, currentNS.DeletionTimestamp.Format(time.RFC3339))) + if time.Now().After(currentNS.DeletionTimestamp.Add(15 * time.Minute)) { + // alert if the namespace is stuck in deleting for more than 15 minutes + stuckErr = controller.NewUnexpectedBehaviorError(fmt.Errorf("failed to delete the member cluster namespace %s, it has been deleting since %s", namespaceName, currentNS.DeletionTimestamp.Format(time.RFC3339))) } return runtime.Result{RequeueAfter: time.Second}, stuckErr }