From b293f6076f9b24effc0d68120b52bff2d3e2bd33 Mon Sep 17 00:00:00 2001 From: Damian Debkowski Date: Fri, 20 Dec 2024 10:47:01 -0800 Subject: [PATCH] test(cluster): improve test runtime --- internal/daemon/controller/testing.go | 2 - internal/daemon/worker/testing.go | 5 +- .../cluster/parallel/anon_listing_test.go | 1 - .../cluster/parallel/ipv6_listener_test.go | 3 - .../parallel/multi_controller_worker_test.go | 71 +++++++------------ .../parallel/recursive_anon_listing_test.go | 1 - .../cluster/parallel/unix_listener_test.go | 16 ++--- .../sequential/session_cleanup_test.go | 45 +++++++----- .../sequential/worker_bytesupdown_test.go | 6 +- .../cluster/sequential/worker_proxy_test.go | 6 +- .../cluster/sequential/worker_tagging_test.go | 8 --- .../sequential/x509_verification_test.go | 10 +-- internal/tests/helper/testing_helper.go | 56 +++++++++++---- 13 files changed, 107 insertions(+), 123 deletions(-) diff --git a/internal/daemon/controller/testing.go b/internal/daemon/controller/testing.go index abf2e76759..7803b5ba96 100644 --- a/internal/daemon/controller/testing.go +++ b/internal/daemon/controller/testing.go @@ -529,7 +529,6 @@ func NewTestController(t testing.TB, opts *TestControllerOpts) *TestController { var err error tc.c, err = New(ctx, conf) if err != nil { - tc.Shutdown() t.Fatal(err) } @@ -552,7 +551,6 @@ func NewTestController(t testing.TB, opts *TestControllerOpts) *TestController { if !opts.DisableAutoStart { if err := tc.c.Start(); err != nil { - tc.Shutdown() t.Fatal(err) } } diff --git a/internal/daemon/worker/testing.go b/internal/daemon/worker/testing.go index db47dece74..6014bcd8ff 100644 --- a/internal/daemon/worker/testing.go +++ b/internal/daemon/worker/testing.go @@ -72,6 +72,9 @@ func (tw *TestWorker) Name() string { func (tw *TestWorker) UpstreamAddrs() []string { var addrs []string lastStatus := tw.w.LastStatusSuccess() + if lastStatus == nil { + return addrs + } for _, v := range lastStatus.GetCalculatedUpstreams() { addrs = append(addrs, v.Address) } @@ -360,7 +363,6 @@ func NewTestWorker(t testing.TB, opts *TestWorkerOpts) *TestWorker { tw.w, err = New(ctx, conf) if err != nil { - tw.Shutdown() t.Fatal(err) } @@ -387,7 +389,6 @@ func NewTestWorker(t testing.TB, opts *TestWorkerOpts) *TestWorker { if !opts.DisableAutoStart { if err := tw.w.Start(); err != nil { - tw.Shutdown() t.Fatal(err) } } diff --git a/internal/tests/cluster/parallel/anon_listing_test.go b/internal/tests/cluster/parallel/anon_listing_test.go index 73ebefbf82..75f02745fc 100644 --- a/internal/tests/cluster/parallel/anon_listing_test.go +++ b/internal/tests/cluster/parallel/anon_listing_test.go @@ -30,7 +30,6 @@ func TestAnonListing(t *testing.T) { InitialResourcesSuffix: "1234567890", Logger: logger, }) - defer c1.Shutdown() // Anon user has list and read permissions on scopes by default, // verify that list scopes returns expected scope without setting token diff --git a/internal/tests/cluster/parallel/ipv6_listener_test.go b/internal/tests/cluster/parallel/ipv6_listener_test.go index 282c4c8924..411bf6b47f 100644 --- a/internal/tests/cluster/parallel/ipv6_listener_test.go +++ b/internal/tests/cluster/parallel/ipv6_listener_test.go @@ -33,13 +33,11 @@ func TestIPv6Listener(t *testing.T) { Config: conf, Logger: logger.Named("c1"), }) - defer c1.Shutdown() c2 := c1.AddClusterControllerMember(t, &controller.TestControllerOpts{ Config: conf, Logger: c1.Config().Logger.ResetNamed("c2"), }) - defer c2.Shutdown() wg := new(sync.WaitGroup) wg.Add(2) @@ -62,7 +60,6 @@ func TestIPv6Listener(t *testing.T) { InitialUpstreams: append(c1.ClusterAddrs(), c2.ClusterAddrs()...), Logger: logger.Named("w1"), }) - defer w1.Shutdown() wg.Add(2) go func() { diff --git a/internal/tests/cluster/parallel/multi_controller_worker_test.go b/internal/tests/cluster/parallel/multi_controller_worker_test.go index 621a7fc898..5d271f4ce2 100644 --- a/internal/tests/cluster/parallel/multi_controller_worker_test.go +++ b/internal/tests/cluster/parallel/multi_controller_worker_test.go @@ -4,7 +4,7 @@ package parallel import ( - "context" + "slices" "sync" "testing" "time" @@ -14,8 +14,6 @@ import ( "github.com/hashicorp/boundary/internal/daemon/worker" "github.com/hashicorp/boundary/internal/tests/helper" "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-secure-stdlib/strutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -34,12 +32,10 @@ func TestMultiControllerMultiWorkerConnections(t *testing.T) { Config: conf, Logger: logger.Named("c1"), }) - defer c1.Shutdown() c2 := c1.AddClusterControllerMember(t, &controller.TestControllerOpts{ Logger: c1.Config().Logger.ResetNamed("c2"), }) - defer c2.Shutdown() wg := new(sync.WaitGroup) wg.Add(2) @@ -58,7 +54,6 @@ func TestMultiControllerMultiWorkerConnections(t *testing.T) { InitialUpstreams: append(c1.ClusterAddrs(), c2.ClusterAddrs()...), Logger: logger.Named("w1"), }) - defer w1.Shutdown() wg.Add(2) go func() { @@ -74,7 +69,6 @@ func TestMultiControllerMultiWorkerConnections(t *testing.T) { w2 := w1.AddClusterWorkerMember(t, &worker.TestWorkerOpts{ Logger: logger.Named("w2"), }) - defer w2.Shutdown() wg.Add(2) go func() { @@ -100,46 +94,39 @@ func TestMultiControllerMultiWorkerConnections(t *testing.T) { }() wg.Wait() - w1 = worker.NewTestWorker(t, &worker.TestWorkerOpts{ + w3 := worker.NewTestWorker(t, &worker.TestWorkerOpts{ WorkerAuthKms: c1.Config().WorkerAuthKms, InitialUpstreams: c1.ClusterAddrs(), - Logger: logger.Named("w1"), + Logger: logger.Named("w3"), }) - defer w1.Shutdown() wg.Add(2) go func() { defer wg.Done() - helper.ExpectWorkers(t, c1, w1, w2) + helper.ExpectWorkers(t, c1, w2, w3) }() go func() { defer wg.Done() - helper.ExpectWorkers(t, c2, w1, w2) + helper.ExpectWorkers(t, c2, w2, w3) }() wg.Wait() require.NoError(c2.Controller().Shutdown()) - wg.Add(1) - go func() { - defer wg.Done() - helper.ExpectWorkers(t, c1, w1, w2) - }() - wg.Wait() + helper.ExpectWorkers(t, c1, w2, w3) - c2 = c1.AddClusterControllerMember(t, &controller.TestControllerOpts{ - Logger: c1.Config().Logger.ResetNamed("c2"), + c3 := c1.AddClusterControllerMember(t, &controller.TestControllerOpts{ + Logger: c1.Config().Logger.ResetNamed("c3"), }) - defer c2.Shutdown() wg.Add(2) go func() { defer wg.Done() - helper.ExpectWorkers(t, c1, w1, w2) + helper.ExpectWorkers(t, c1, w2, w3) }() go func() { defer wg.Done() - helper.ExpectWorkers(t, c2, w1, w2) + helper.ExpectWorkers(t, c3, w2, w3) }() wg.Wait() } @@ -147,8 +134,7 @@ func TestMultiControllerMultiWorkerConnections(t *testing.T) { func TestWorkerAppendInitialUpstreams(t *testing.T) { t.Parallel() - ctx := context.Background() - require, assert := require.New(t), assert.New(t) + require := require.New(t) logger := hclog.New(&hclog.LoggerOptions{ Level: hclog.Trace, }) @@ -160,7 +146,6 @@ func TestWorkerAppendInitialUpstreams(t *testing.T) { Config: conf, Logger: logger.Named("c1"), }) - defer c1.Shutdown() helper.ExpectWorkers(t, c1) @@ -171,31 +156,29 @@ func TestWorkerAppendInitialUpstreams(t *testing.T) { Logger: logger.Named("w1"), SuccessfulStatusGracePeriodDuration: 1 * time.Second, }) - defer w1.Shutdown() // Wait for worker to send status - cancelCtx, cancel := context.WithTimeout(ctx, 10*time.Second) - t.Cleanup(cancel) - for { - select { - case <-time.After(500 * time.Millisecond): - case <-cancelCtx.Done(): - require.FailNow("No worker found after 10 seconds") - } - successSent := w1.Worker().LastStatusSuccess() - if successSent != nil { - break - } - } helper.ExpectWorkers(t, c1, w1) // Upstreams should be equivalent to the controller cluster addr after status updates - assert.Equal(c1.ClusterAddrs(), w1.Worker().LastStatusSuccess().LastCalculatedUpstreams) + require.Eventually(func() bool { + if w1.Worker().LastStatusSuccess() == nil { + return false + } + return slices.Equal(c1.ClusterAddrs(), w1.Worker().LastStatusSuccess().LastCalculatedUpstreams) + }, 4*time.Second, 250*time.Millisecond) // Bring down the controller - c1.Shutdown() - time.Sleep(3 * time.Second) // Wait a little longer than the grace period + wg := new(sync.WaitGroup) + wg.Add(1) + go func() { + defer wg.Done() + c1.Shutdown() + }() // Upstreams should now match initial upstreams - assert.True(strutil.EquivalentSlices(initialUpstreams, w1.Worker().LastStatusSuccess().LastCalculatedUpstreams)) + require.Eventually(func() bool { + return slices.Equal(initialUpstreams, w1.Worker().LastStatusSuccess().LastCalculatedUpstreams) + }, 4*time.Second, 250*time.Millisecond) + wg.Wait() } diff --git a/internal/tests/cluster/parallel/recursive_anon_listing_test.go b/internal/tests/cluster/parallel/recursive_anon_listing_test.go index 9336fa713b..30e825bd7d 100644 --- a/internal/tests/cluster/parallel/recursive_anon_listing_test.go +++ b/internal/tests/cluster/parallel/recursive_anon_listing_test.go @@ -20,7 +20,6 @@ func TestListAnonymousRecursing(t *testing.T) { require := require.New(t) tc := controller.NewTestController(t, nil) - defer tc.Shutdown() client := tc.Client() token := tc.Token() diff --git a/internal/tests/cluster/parallel/unix_listener_test.go b/internal/tests/cluster/parallel/unix_listener_test.go index 4dfe0d9476..8bd587456f 100644 --- a/internal/tests/cluster/parallel/unix_listener_test.go +++ b/internal/tests/cluster/parallel/unix_listener_test.go @@ -9,7 +9,6 @@ import ( "os" "path" "testing" - "time" "github.com/hashicorp/boundary/api" "github.com/hashicorp/boundary/api/scopes" @@ -75,7 +74,6 @@ func TestUnixListener(t *testing.T) { }, }, }) - defer c1.Shutdown() helper.ExpectWorkers(t, c1) @@ -88,19 +86,17 @@ func TestUnixListener(t *testing.T) { InitialUpstreams: c1.ClusterAddrs(), Logger: logger.Named("w1"), }) - defer w1.Shutdown() - time.Sleep(10 * time.Second) helper.ExpectWorkers(t, c1, w1) require.NoError(w1.Worker().Shutdown()) - time.Sleep(10 * time.Second) + helper.ExpectWorkers(t, c1) require.NoError(c1.Controller().Shutdown()) - c1 = controller.NewTestController(t, &controller.TestControllerOpts{ + c2 := controller.NewTestController(t, &controller.TestControllerOpts{ Config: conf, - Logger: logger.Named("c1"), + Logger: logger.Named("c2"), DisableOidcAuthMethodCreation: true, EventerConfig: &event.EventerConfig{ ObservationsEnabled: true, @@ -120,15 +116,13 @@ func TestUnixListener(t *testing.T) { }, }, }) - defer c1.Shutdown() - time.Sleep(10 * time.Second) - helper.ExpectWorkers(t, c1) + helper.ExpectWorkers(t, c2) client, err := api.NewClient(nil) require.NoError(err) - addrs := c1.ApiAddrs() + addrs := c2.ApiAddrs() require.Len(addrs, 1) require.NoError(client.SetAddr(addrs[0])) diff --git a/internal/tests/cluster/sequential/session_cleanup_test.go b/internal/tests/cluster/sequential/session_cleanup_test.go index 3dbda6c5ad..16b0fdad99 100644 --- a/internal/tests/cluster/sequential/session_cleanup_test.go +++ b/internal/tests/cluster/sequential/session_cleanup_test.go @@ -6,6 +6,7 @@ package sequential import ( "context" "net" + "sync" "testing" "time" @@ -107,7 +108,7 @@ func testWorkerSessionCleanupSingle(burdenCase timeoutBurdenType) func(t *testin ) require.NoError(err) t.Cleanup(func() { - proxy.Close() + _ = proxy.Close() }) require.NotEmpty(t, proxy.ListenerAddr()) @@ -118,10 +119,6 @@ func testWorkerSessionCleanupSingle(burdenCase timeoutBurdenType) func(t *testin SuccessfulStatusGracePeriodDuration: workerGracePeriod(burdenCase), }) - err = w1.Worker().WaitForNextSuccessfulStatusUpdate() - require.NoError(err) - err = c1.WaitForNextWorkerStatusUpdate(w1.Name()) - require.NoError(err) helper.ExpectWorkers(t, c1, w1) // Use an independent context for test things that take a context so @@ -237,8 +234,18 @@ func testWorkerSessionCleanupMulti(burdenCase timeoutBurdenType) func(t *testing PublicClusterAddr: pl2.Addr().String(), WorkerStatusGracePeriodDuration: controllerGracePeriod(burdenCase), }) - helper.ExpectWorkers(t, c1) - helper.ExpectWorkers(t, c2) + + wg := new(sync.WaitGroup) + wg.Add(2) + go func() { + defer wg.Done() + helper.ExpectWorkers(t, c1) + }() + go func() { + defer wg.Done() + helper.ExpectWorkers(t, c2) + }() + wg.Wait() // ************* // ** Proxy 1 ** @@ -251,7 +258,7 @@ func testWorkerSessionCleanupMulti(burdenCase timeoutBurdenType) func(t *testing ) require.NoError(err) t.Cleanup(func() { - p1.Close() + _ = p1.Close() }) require.NotEmpty(t, p1.ListenerAddr()) @@ -266,7 +273,7 @@ func testWorkerSessionCleanupMulti(burdenCase timeoutBurdenType) func(t *testing ) require.NoError(err) t.Cleanup(func() { - p2.Close() + _ = p2.Close() }) require.NotEmpty(t, p2.ListenerAddr()) @@ -282,16 +289,16 @@ func testWorkerSessionCleanupMulti(burdenCase timeoutBurdenType) func(t *testing // Worker needs some extra time to become ready, otherwise for a // currently-unknown reason the next successful status update fails // because it's not sent before the context times out. - time.Sleep(5 * time.Second) - - err = w1.Worker().WaitForNextSuccessfulStatusUpdate() - require.NoError(err) - err = c1.WaitForNextWorkerStatusUpdate(w1.Name()) - require.NoError(err) - err = c2.WaitForNextWorkerStatusUpdate(w1.Name()) - require.NoError(err) - helper.ExpectWorkers(t, c1, w1) - helper.ExpectWorkers(t, c2, w1) + wg.Add(2) + go func() { + defer wg.Done() + helper.ExpectWorkers(t, c1, w1) + }() + go func() { + defer wg.Done() + helper.ExpectWorkers(t, c2, w1) + }() + wg.Wait() // Use an independent context for test things that take a context so // that we aren't tied to any timeouts in the controller, etc. This diff --git a/internal/tests/cluster/sequential/worker_bytesupdown_test.go b/internal/tests/cluster/sequential/worker_bytesupdown_test.go index 400653380f..d56a45055f 100644 --- a/internal/tests/cluster/sequential/worker_bytesupdown_test.go +++ b/internal/tests/cluster/sequential/worker_bytesupdown_test.go @@ -106,8 +106,7 @@ func TestWorkerBytesUpDown(t *testing.T) { conn.TestSendRecvAll(t) // Wait for next status and then check DB for bytes up and down - require.NoError(w1.Worker().WaitForNextSuccessfulStatusUpdate()) - require.NoError(c1.WaitForNextWorkerStatusUpdate(w1.Name())) + helper.ExpectWorkers(t, c1, w1) dbConns1, err := c1.ConnectionsRepo().ListConnectionsBySessionId(ctx, sess.SessionId) require.NoError(err) @@ -127,8 +126,7 @@ func TestWorkerBytesUpDown(t *testing.T) { // update and check everything again. conn.TestSendRecvAll(t) require.NoError(conn.Close()) - require.NoError(w1.Worker().WaitForNextSuccessfulStatusUpdate()) - require.NoError(c1.WaitForNextWorkerStatusUpdate(w1.Name())) + helper.ExpectWorkers(t, c1, w1) dbConns2, err := c1.ConnectionsRepo().ListConnectionsBySessionId(ctx, sess.SessionId) require.NoError(err) diff --git a/internal/tests/cluster/sequential/worker_proxy_test.go b/internal/tests/cluster/sequential/worker_proxy_test.go index f4adc84394..b7714fe860 100644 --- a/internal/tests/cluster/sequential/worker_proxy_test.go +++ b/internal/tests/cluster/sequential/worker_proxy_test.go @@ -51,7 +51,6 @@ func TestWorkerSessionProxyMultipleConnections(t *testing.T) { Logger: logger.Named("c1"), WorkerStatusGracePeriodDuration: helper.DefaultWorkerStatusGracePeriod, }) - t.Cleanup(c1.Shutdown) helper.ExpectWorkers(t, c1) @@ -63,9 +62,7 @@ func TestWorkerSessionProxyMultipleConnections(t *testing.T) { dawdle.WithWbufSize(256), ) require.NoError(err) - t.Cleanup(func() { - proxy.Close() - }) + t.Cleanup(func() { _ = proxy.Close() }) require.NotEmpty(t, proxy.ListenerAddr()) w1 := worker.NewTestWorker(t, &worker.TestWorkerOpts{ @@ -75,7 +72,6 @@ func TestWorkerSessionProxyMultipleConnections(t *testing.T) { SuccessfulStatusGracePeriodDuration: helper.DefaultWorkerStatusGracePeriod, EnableIPv6: true, }) - t.Cleanup(w1.Shutdown) helper.ExpectWorkers(t, c1, w1) diff --git a/internal/tests/cluster/sequential/worker_tagging_test.go b/internal/tests/cluster/sequential/worker_tagging_test.go index c714e97c09..b0a2a2db77 100644 --- a/internal/tests/cluster/sequential/worker_tagging_test.go +++ b/internal/tests/cluster/sequential/worker_tagging_test.go @@ -33,7 +33,6 @@ func TestWorkerTagging(t *testing.T) { InitialResourcesSuffix: "1234567890", Logger: logger.Named("c1"), }) - defer c1.Shutdown() ctx := c1.Context() @@ -62,9 +61,7 @@ func TestWorkerTagging(t *testing.T) { InitialUpstreams: c1.ClusterAddrs(), Logger: logger.Named("w1"), }) - defer w1.Shutdown() w1Addr := w1.ProxyAddrs()[0] - w1.Worker().WaitForNextSuccessfulStatusUpdate() // Worker 2 conf, err = config.DevWorker() @@ -80,9 +77,7 @@ func TestWorkerTagging(t *testing.T) { InitialUpstreams: c1.ClusterAddrs(), Logger: logger.Named("w2"), }) - defer w2.Shutdown() w2Addr := w2.ProxyAddrs()[0] - w2.Worker().WaitForNextSuccessfulStatusUpdate() // Worker 3 conf, err = config.DevWorker() @@ -98,11 +93,8 @@ func TestWorkerTagging(t *testing.T) { InitialUpstreams: c1.ClusterAddrs(), Logger: logger.Named("w3"), }) - defer w3.Shutdown() w3Addr := w3.ProxyAddrs()[0] - w3.Worker().WaitForNextSuccessfulStatusUpdate() - helper.ExpectWorkers(t, c1, w1, w2, w3) // Ensure we are using the OSS filter, which uses egress only for worker selection diff --git a/internal/tests/cluster/sequential/x509_verification_test.go b/internal/tests/cluster/sequential/x509_verification_test.go index cb8297e89b..d31a392fb3 100644 --- a/internal/tests/cluster/sequential/x509_verification_test.go +++ b/internal/tests/cluster/sequential/x509_verification_test.go @@ -15,7 +15,6 @@ import ( "net/http" "sync" "testing" - "time" apiproxy "github.com/hashicorp/boundary/api/proxy" "github.com/hashicorp/boundary/api/targets" @@ -64,10 +63,6 @@ func TestCustomX509Verification_Client(t *testing.T) { }) // Give time for it to connect - time.Sleep(10 * time.Second) - - err = w1.Worker().WaitForNextSuccessfulStatusUpdate() - req.NoError(err) helper.ExpectWorkers(t, c1, w1) // Connect target @@ -233,10 +228,7 @@ func testCustomX509Verification_Server(ec event.TestConfig, certPool *x509.CertP w1.Worker().TestOverrideX509VerifyDnsName = dnsName // Give time for it to connect - time.Sleep(10 * time.Second) - - err = w1.Worker().WaitForNextSuccessfulStatusUpdate() - req.NoError(err) + helper.ExpectWorkers(t, c1, w1) // Connect target client := c1.Client() diff --git a/internal/tests/helper/testing_helper.go b/internal/tests/helper/testing_helper.go index ed916a4421..862723fa48 100644 --- a/internal/tests/helper/testing_helper.go +++ b/internal/tests/helper/testing_helper.go @@ -436,26 +436,52 @@ func NewTestTcpServer(t *testing.T) *TestTcpServer { require.NoError(err) go ts.run() + + require.Eventually(func() bool { + c, err := net.Dial(ts.ln.Addr().Network(), ts.ln.Addr().String()) + if err != nil { + return false + } + _, err = c.Write([]byte{}) + if err != nil { + return false + } + _, err = c.Read([]byte{}) + if err != nil { + return false + } + return true + }, 10*time.Second, 100*time.Millisecond) + return ts } // ExpectWorkers is a blocking call, where the method validates that the expected workers // can be found in the controllers status update. If the provided list of workers is empty, -// this method will sleep for 10 seconds and then validate that the controller worker status -// is empty. +// this method will validate that the controller worker status is empty. func ExpectWorkers(t *testing.T, c *controller.TestController, workers ...*worker.TestWorker) { // validate the controller has no reported workers if len(workers) == 0 { - c.Controller().WorkerStatusUpdateTimes().Clear() - time.Sleep(10 * time.Second) - assert.Eventually(t, func() bool { - empty := true - c.Controller().WorkerStatusUpdateTimes().Range(func(k, v any) bool { - empty = false - return false + require.Eventually(t, func() bool { + updateTimes := c.Controller().WorkerStatusUpdateTimes() + workerMap := map[string]*worker.TestWorker{} + for _, w := range workers { + workerMap[w.Name()] = w + } + updateTimes.Range(func(k, v any) bool { + require.NotNil(t, k) + require.NotNil(t, v) + if workerMap[k.(string)] == nil { + // We don't remove from updateTimes currently so if we're not + // expecting it we'll see an out-of-date entry + return true + } + assert.WithinDuration(t, time.Now(), v.(time.Time), 30*time.Second) + delete(workerMap, k.(string)) + return true }) - return empty - }, 30*time.Second, 2*time.Second) + return len(workerMap) == 0 + }, 30*time.Second, 250*time.Millisecond) return } @@ -465,9 +491,11 @@ func ExpectWorkers(t *testing.T, c *controller.TestController, workers ...*worke wg.Add(1) go func() { defer wg.Done() - require.NoError(t, c.WaitForNextWorkerStatusUpdate(w.Name())) - _, ok := c.Controller().WorkerStatusUpdateTimes().Load(w.Name()) - assert.True(t, ok) + require.NoError(t, w.Worker().WaitForNextSuccessfulStatusUpdate()) + require.Eventually(t, func() bool { + _, ok := c.Controller().WorkerStatusUpdateTimes().Load(w.Name()) + return ok + }, 30*time.Second, 100*time.Millisecond) }() } wg.Wait()