diff --git a/internal/daemon/controller/testing.go b/internal/daemon/controller/testing.go index abf2e76759..7803b5ba96 100644 --- a/internal/daemon/controller/testing.go +++ b/internal/daemon/controller/testing.go @@ -529,7 +529,6 @@ func NewTestController(t testing.TB, opts *TestControllerOpts) *TestController { var err error tc.c, err = New(ctx, conf) if err != nil { - tc.Shutdown() t.Fatal(err) } @@ -552,7 +551,6 @@ func NewTestController(t testing.TB, opts *TestControllerOpts) *TestController { if !opts.DisableAutoStart { if err := tc.c.Start(); err != nil { - tc.Shutdown() t.Fatal(err) } } diff --git a/internal/daemon/worker/testing.go b/internal/daemon/worker/testing.go index 7083becb7b..0a8646074b 100644 --- a/internal/daemon/worker/testing.go +++ b/internal/daemon/worker/testing.go @@ -72,6 +72,9 @@ func (tw *TestWorker) Name() string { func (tw *TestWorker) UpstreamAddrs() []string { var addrs []string lastStatus := tw.w.LastStatusSuccess() + if lastStatus == nil { + return addrs + } for _, v := range lastStatus.GetCalculatedUpstreams() { addrs = append(addrs, v.Address) } @@ -360,7 +363,6 @@ func NewTestWorker(t testing.TB, opts *TestWorkerOpts) *TestWorker { tw.w, err = New(ctx, conf) if err != nil { - tw.Shutdown() t.Fatal(err) } @@ -387,7 +389,6 @@ func NewTestWorker(t testing.TB, opts *TestWorkerOpts) *TestWorker { if !opts.DisableAutoStart { if err := tw.w.Start(); err != nil { - tw.Shutdown() t.Fatal(err) } } diff --git a/internal/tests/cluster/anon_listing_test.go b/internal/tests/cluster/parallel/anon_listing_test.go similarity index 96% rename from internal/tests/cluster/anon_listing_test.go rename to internal/tests/cluster/parallel/anon_listing_test.go index 71535b5842..95efeda07f 100644 --- a/internal/tests/cluster/anon_listing_test.go +++ b/internal/tests/cluster/parallel/anon_listing_test.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 -package cluster +package parallel import ( "testing" @@ -15,6 +15,7 @@ import ( ) func TestAnonListing(t *testing.T) { + t.Parallel() require := require.New(t) logger := hclog.New(&hclog.LoggerOptions{ Level: hclog.Trace, @@ -28,7 +29,6 @@ func TestAnonListing(t *testing.T) { InitialResourcesSuffix: "1234567890", Logger: logger, }) - defer c1.Shutdown() // Anon user has list and read permissions on scopes by default, // verify that list scopes returns expected scope without setting token diff --git a/internal/tests/cluster/parallel/doc.go b/internal/tests/cluster/parallel/doc.go new file mode 100644 index 0000000000..79b1de0e5d --- /dev/null +++ b/internal/tests/cluster/parallel/doc.go @@ -0,0 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +/* +This package includes a set of tests that run in parallel. +A test should only be added to this package if it can be +completely isolated from other tests that currently exist +in this package. If a test is consistently failing due to +not having an isolated environment, it should be moved to +the adjacent "sequential" package. +*/ + +package parallel diff --git a/internal/tests/cluster/ipv6_listener_test.go b/internal/tests/cluster/parallel/ipv6_listener_test.go similarity index 96% rename from internal/tests/cluster/ipv6_listener_test.go rename to internal/tests/cluster/parallel/ipv6_listener_test.go index 9d45422193..8c818e08bc 100644 --- a/internal/tests/cluster/ipv6_listener_test.go +++ b/internal/tests/cluster/parallel/ipv6_listener_test.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 -package cluster +package parallel import ( "context" @@ -32,13 +32,11 @@ func TestIPv6Listener(t *testing.T) { Config: conf, Logger: logger.Named("c1"), }) - defer c1.Shutdown() c2 := c1.AddClusterControllerMember(t, &controller.TestControllerOpts{ Config: conf, Logger: c1.Config().Logger.ResetNamed("c2"), }) - defer c2.Shutdown() wg := new(sync.WaitGroup) wg.Add(2) @@ -61,7 +59,6 @@ func TestIPv6Listener(t *testing.T) { InitialUpstreams: append(c1.ClusterAddrs(), c2.ClusterAddrs()...), Logger: logger.Named("w1"), }) - defer w1.Shutdown() wg.Add(2) go func() { diff --git a/internal/tests/cluster/multi_controller_worker_test.go b/internal/tests/cluster/parallel/multi_controller_worker_test.go similarity index 70% rename from internal/tests/cluster/multi_controller_worker_test.go rename to internal/tests/cluster/parallel/multi_controller_worker_test.go index 9380fda0b9..b80cf42ebc 100644 --- a/internal/tests/cluster/multi_controller_worker_test.go +++ b/internal/tests/cluster/parallel/multi_controller_worker_test.go @@ -1,10 +1,10 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 -package cluster +package parallel import ( - "context" + "slices" "sync" "testing" "time" @@ -14,12 +14,11 @@ import ( "github.com/hashicorp/boundary/internal/daemon/worker" "github.com/hashicorp/boundary/internal/tests/helper" "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-secure-stdlib/strutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestMultiControllerMultiWorkerConnections(t *testing.T) { + t.Parallel() require := require.New(t) logger := hclog.New(&hclog.LoggerOptions{ Level: hclog.Trace, @@ -32,12 +31,10 @@ func TestMultiControllerMultiWorkerConnections(t *testing.T) { Config: conf, Logger: logger.Named("c1"), }) - defer c1.Shutdown() c2 := c1.AddClusterControllerMember(t, &controller.TestControllerOpts{ Logger: c1.Config().Logger.ResetNamed("c2"), }) - defer c2.Shutdown() wg := new(sync.WaitGroup) wg.Add(2) @@ -56,7 +53,6 @@ func TestMultiControllerMultiWorkerConnections(t *testing.T) { InitialUpstreams: append(c1.ClusterAddrs(), c2.ClusterAddrs()...), Logger: logger.Named("w1"), }) - defer w1.Shutdown() wg.Add(2) go func() { @@ -72,7 +68,6 @@ func TestMultiControllerMultiWorkerConnections(t *testing.T) { w2 := w1.AddClusterWorkerMember(t, &worker.TestWorkerOpts{ Logger: logger.Named("w2"), }) - defer w2.Shutdown() wg.Add(2) go func() { @@ -98,53 +93,46 @@ func TestMultiControllerMultiWorkerConnections(t *testing.T) { }() wg.Wait() - w1 = worker.NewTestWorker(t, &worker.TestWorkerOpts{ + w3 := worker.NewTestWorker(t, &worker.TestWorkerOpts{ WorkerAuthKms: c1.Config().WorkerAuthKms, InitialUpstreams: c1.ClusterAddrs(), Logger: logger.Named("w1"), }) - defer w1.Shutdown() wg.Add(2) go func() { defer wg.Done() - helper.ExpectWorkers(t, c1, w1, w2) + helper.ExpectWorkers(t, c1, w2, w3) }() go func() { defer wg.Done() - helper.ExpectWorkers(t, c2, w1, w2) + helper.ExpectWorkers(t, c2, w2, w3) }() wg.Wait() require.NoError(c2.Controller().Shutdown()) - wg.Add(1) - go func() { - defer wg.Done() - helper.ExpectWorkers(t, c1, w1, w2) - }() - wg.Wait() + helper.ExpectWorkers(t, c1, w2, w3) - c2 = c1.AddClusterControllerMember(t, &controller.TestControllerOpts{ - Logger: c1.Config().Logger.ResetNamed("c2"), + c3 := c1.AddClusterControllerMember(t, &controller.TestControllerOpts{ + Logger: c1.Config().Logger.ResetNamed("c3"), }) - defer c2.Shutdown() wg.Add(2) go func() { defer wg.Done() - helper.ExpectWorkers(t, c1, w1, w2) + helper.ExpectWorkers(t, c1, w2, w3) }() go func() { defer wg.Done() - helper.ExpectWorkers(t, c2, w1, w2) + helper.ExpectWorkers(t, c3, w2, w3) }() wg.Wait() } func TestWorkerAppendInitialUpstreams(t *testing.T) { - ctx := context.Background() - require, assert := require.New(t), assert.New(t) + t.Parallel() + require := require.New(t) logger := hclog.New(&hclog.LoggerOptions{ Level: hclog.Trace, }) @@ -156,7 +144,7 @@ func TestWorkerAppendInitialUpstreams(t *testing.T) { Config: conf, Logger: logger.Named("c1"), }) - defer c1.Shutdown() + t.Cleanup(c1.Shutdown) helper.ExpectWorkers(t, c1) @@ -167,31 +155,23 @@ func TestWorkerAppendInitialUpstreams(t *testing.T) { Logger: logger.Named("w1"), SuccessfulStatusGracePeriodDuration: 1 * time.Second, }) - defer w1.Shutdown() + t.Cleanup(w1.Shutdown) // Wait for worker to send status - cancelCtx, cancel := context.WithTimeout(ctx, 10*time.Second) - t.Cleanup(cancel) - for { - select { - case <-time.After(500 * time.Millisecond): - case <-cancelCtx.Done(): - require.FailNow("No worker found after 10 seconds") - } - successSent := w1.Worker().LastStatusSuccess() - if successSent != nil { - break - } - } helper.ExpectWorkers(t, c1, w1) // Upstreams should be equivalent to the controller cluster addr after status updates - assert.Equal(c1.ClusterAddrs(), w1.Worker().LastStatusSuccess().LastCalculatedUpstreams) + require.Eventually(func() bool { + if w1.Worker().LastStatusSuccess() == nil { + return false + } + return slices.Equal(c1.ClusterAddrs(), w1.Worker().LastStatusSuccess().LastCalculatedUpstreams) + }, 4*time.Second, 250*time.Millisecond) // Bring down the controller c1.Shutdown() - time.Sleep(3 * time.Second) // Wait a little longer than the grace period - // Upstreams should now match initial upstreams - assert.True(strutil.EquivalentSlices(initialUpstreams, w1.Worker().LastStatusSuccess().LastCalculatedUpstreams)) + require.Eventually(func() bool { + return slices.Equal(initialUpstreams, w1.Worker().LastStatusSuccess().LastCalculatedUpstreams) + }, 4*time.Second, 250*time.Millisecond) } diff --git a/internal/tests/cluster/package_registry_test.go b/internal/tests/cluster/parallel/package_registry_test.go similarity index 93% rename from internal/tests/cluster/package_registry_test.go rename to internal/tests/cluster/parallel/package_registry_test.go index cb736a553a..72d2b96919 100644 --- a/internal/tests/cluster/package_registry_test.go +++ b/internal/tests/cluster/parallel/package_registry_test.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 -package cluster +package parallel import ( // Enable tcp target support. diff --git a/internal/tests/cluster/recursive_anon_listing_test.go b/internal/tests/cluster/parallel/recursive_anon_listing_test.go similarity index 98% rename from internal/tests/cluster/recursive_anon_listing_test.go rename to internal/tests/cluster/parallel/recursive_anon_listing_test.go index d749039453..975dca41f5 100644 --- a/internal/tests/cluster/recursive_anon_listing_test.go +++ b/internal/tests/cluster/parallel/recursive_anon_listing_test.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 -package cluster +package parallel import ( "strings" @@ -16,9 +16,9 @@ import ( // This test validates the fix for ICU-2301 func TestListAnonymousRecursing(t *testing.T) { + t.Parallel() require := require.New(t) tc := controller.NewTestController(t, nil) - defer tc.Shutdown() client := tc.Client() token := tc.Token() diff --git a/internal/tests/cluster/unix_listener_test.go b/internal/tests/cluster/parallel/unix_listener_test.go similarity index 90% rename from internal/tests/cluster/unix_listener_test.go rename to internal/tests/cluster/parallel/unix_listener_test.go index cf475096ad..468d06e95e 100644 --- a/internal/tests/cluster/unix_listener_test.go +++ b/internal/tests/cluster/parallel/unix_listener_test.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 -package cluster +package parallel import ( "bytes" @@ -9,7 +9,6 @@ import ( "os" "path" "testing" - "time" "github.com/hashicorp/boundary/api" "github.com/hashicorp/boundary/api/scopes" @@ -23,6 +22,7 @@ import ( ) func TestUnixListener(t *testing.T) { + t.Parallel() require := require.New(t) buf := new(bytes.Buffer) logger := hclog.New(&hclog.LoggerOptions{ @@ -73,7 +73,6 @@ func TestUnixListener(t *testing.T) { }, }, }) - defer c1.Shutdown() helper.ExpectWorkers(t, c1) @@ -86,17 +85,14 @@ func TestUnixListener(t *testing.T) { InitialUpstreams: c1.ClusterAddrs(), Logger: logger.Named("w1"), }) - defer w1.Shutdown() - time.Sleep(10 * time.Second) helper.ExpectWorkers(t, c1, w1) require.NoError(w1.Worker().Shutdown()) - time.Sleep(10 * time.Second) helper.ExpectWorkers(t, c1) require.NoError(c1.Controller().Shutdown()) - c1 = controller.NewTestController(t, &controller.TestControllerOpts{ + c2 := controller.NewTestController(t, &controller.TestControllerOpts{ Config: conf, Logger: logger.Named("c1"), DisableOidcAuthMethodCreation: true, @@ -118,15 +114,13 @@ func TestUnixListener(t *testing.T) { }, }, }) - defer c1.Shutdown() - time.Sleep(10 * time.Second) - helper.ExpectWorkers(t, c1) + helper.ExpectWorkers(t, c2) client, err := api.NewClient(nil) require.NoError(err) - addrs := c1.ApiAddrs() + addrs := c2.ApiAddrs() require.Len(addrs, 1) require.NoError(client.SetAddr(addrs[0])) diff --git a/internal/tests/cluster/sequential/doc.go b/internal/tests/cluster/sequential/doc.go new file mode 100644 index 0000000000..71b91ba7db --- /dev/null +++ b/internal/tests/cluster/sequential/doc.go @@ -0,0 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +/* +This package includes a set of tests that run sequentially. +A test should be added to this package if it needs to be completely +isolated from other tests. Newly added tests should not enable the +testing parallel option. Please include a comment in the unit test +that explains why the test must be ran sequentially. Tests that can +be ran in parallel should be moved to the adjacent "parallel" package. +*/ + +package sequential diff --git a/internal/tests/cluster/sequential/package_registry_test.go b/internal/tests/cluster/sequential/package_registry_test.go new file mode 100644 index 0000000000..f3313d626c --- /dev/null +++ b/internal/tests/cluster/sequential/package_registry_test.go @@ -0,0 +1,10 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package sequential + +import ( + // Enable tcp target support. + _ "github.com/hashicorp/boundary/internal/daemon/controller/handlers/targets/tcp" + _ "github.com/hashicorp/boundary/internal/target/tcp" +) diff --git a/internal/tests/cluster/session_cleanup_test.go b/internal/tests/cluster/sequential/session_cleanup_test.go similarity index 93% rename from internal/tests/cluster/session_cleanup_test.go rename to internal/tests/cluster/sequential/session_cleanup_test.go index 71e3d39707..b336fe7df5 100644 --- a/internal/tests/cluster/session_cleanup_test.go +++ b/internal/tests/cluster/sequential/session_cleanup_test.go @@ -1,11 +1,12 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 -package cluster +package sequential import ( "context" "net" + "sync" "testing" "time" @@ -107,7 +108,7 @@ func testWorkerSessionCleanupSingle(burdenCase timeoutBurdenType) func(t *testin ) require.NoError(err) t.Cleanup(func() { - proxy.Close() + _ = proxy.Close() }) require.NotEmpty(t, proxy.ListenerAddr()) @@ -117,11 +118,6 @@ func testWorkerSessionCleanupSingle(burdenCase timeoutBurdenType) func(t *testin Logger: logger.Named("w1"), SuccessfulStatusGracePeriodDuration: workerGracePeriod(burdenCase), }) - - err = w1.Worker().WaitForNextSuccessfulStatusUpdate() - require.NoError(err) - err = c1.WaitForNextWorkerStatusUpdate(w1.Name()) - require.NoError(err) helper.ExpectWorkers(t, c1, w1) // Use an independent context for test things that take a context so @@ -163,7 +159,6 @@ func testWorkerSessionCleanupSingle(burdenCase timeoutBurdenType) func(t *testin // Wait on worker, then check controller sess.ExpectConnectionStateOnWorker(ctx, t, w1, session.StatusClosed) sess.ExpectConnectionStateOnController(ctx, t, c1.Controller().ConnectionRepoFn, session.StatusConnected) - default: // Should be closed on both worker and controller. Wait on // worker then check controller. @@ -237,8 +232,18 @@ func testWorkerSessionCleanupMulti(burdenCase timeoutBurdenType) func(t *testing PublicClusterAddr: pl2.Addr().String(), WorkerStatusGracePeriodDuration: controllerGracePeriod(burdenCase), }) - helper.ExpectWorkers(t, c1) - helper.ExpectWorkers(t, c2) + + wg := new(sync.WaitGroup) + wg.Add(2) + go func() { + defer wg.Done() + helper.ExpectWorkers(t, c1) + }() + go func() { + defer wg.Done() + helper.ExpectWorkers(t, c2) + }() + wg.Wait() // ************* // ** Proxy 1 ** @@ -251,7 +256,7 @@ func testWorkerSessionCleanupMulti(burdenCase timeoutBurdenType) func(t *testing ) require.NoError(err) t.Cleanup(func() { - p1.Close() + _ = p1.Close() }) require.NotEmpty(t, p1.ListenerAddr()) @@ -266,7 +271,7 @@ func testWorkerSessionCleanupMulti(burdenCase timeoutBurdenType) func(t *testing ) require.NoError(err) t.Cleanup(func() { - p2.Close() + _ = p2.Close() }) require.NotEmpty(t, p2.ListenerAddr()) @@ -279,19 +284,17 @@ func testWorkerSessionCleanupMulti(burdenCase timeoutBurdenType) func(t *testing Logger: logger.Named("w1"), SuccessfulStatusGracePeriodDuration: workerGracePeriod(burdenCase), }) - // Worker needs some extra time to become ready, otherwise for a - // currently-unknown reason the next successful status update fails - // because it's not sent before the context times out. - time.Sleep(5 * time.Second) - err = w1.Worker().WaitForNextSuccessfulStatusUpdate() - require.NoError(err) - err = c1.WaitForNextWorkerStatusUpdate(w1.Name()) - require.NoError(err) - err = c2.WaitForNextWorkerStatusUpdate(w1.Name()) - require.NoError(err) - helper.ExpectWorkers(t, c1, w1) - helper.ExpectWorkers(t, c2, w1) + wg.Add(2) + go func() { + defer wg.Done() + helper.ExpectWorkers(t, c1, w1) + }() + go func() { + defer wg.Done() + helper.ExpectWorkers(t, c2, w1) + }() + wg.Wait() // Use an independent context for test things that take a context so // that we aren't tied to any timeouts in the controller, etc. This @@ -327,16 +330,14 @@ func testWorkerSessionCleanupMulti(burdenCase timeoutBurdenType) func(t *testing // successful status report to ensure this. event.WriteSysEvent(ctx, op, "pausing link to controller #1") p1.Pause() - err = w1.Worker().WaitForNextSuccessfulStatusUpdate() - require.NoError(err) + helper.ExpectWorkers(t, c2, w1) sConn.TestSendRecvAll(t) // Resume first controller, pause second. This one should work too. event.WriteSysEvent(ctx, op, "pausing link to controller #2, resuming #1") p1.Resume() p2.Pause() - err = w1.Worker().WaitForNextSuccessfulStatusUpdate() - require.NoError(err) + helper.ExpectWorkers(t, c1, w1) sConn.TestSendRecvAll(t) // Kill the first controller connection again. This one should fail @@ -366,6 +367,7 @@ func testWorkerSessionCleanupMulti(burdenCase timeoutBurdenType) func(t *testing event.WriteSysEvent(ctx, op, "resuming connections to both controllers") p1.Resume() p2.Resume() + err = w1.Worker().WaitForNextSuccessfulStatusUpdate() require.NoError(err) diff --git a/internal/tests/cluster/worker_bytesupdown_test.go b/internal/tests/cluster/sequential/worker_bytesupdown_test.go similarity index 99% rename from internal/tests/cluster/worker_bytesupdown_test.go rename to internal/tests/cluster/sequential/worker_bytesupdown_test.go index 77e734587f..400653380f 100644 --- a/internal/tests/cluster/worker_bytesupdown_test.go +++ b/internal/tests/cluster/sequential/worker_bytesupdown_test.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 -package cluster +package sequential import ( "context" diff --git a/internal/tests/cluster/worker_proxy_test.go b/internal/tests/cluster/sequential/worker_proxy_test.go similarity index 97% rename from internal/tests/cluster/worker_proxy_test.go rename to internal/tests/cluster/sequential/worker_proxy_test.go index 7948aebf2c..b7714fe860 100644 --- a/internal/tests/cluster/worker_proxy_test.go +++ b/internal/tests/cluster/sequential/worker_proxy_test.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 -package cluster +package sequential import ( "context" @@ -51,7 +51,6 @@ func TestWorkerSessionProxyMultipleConnections(t *testing.T) { Logger: logger.Named("c1"), WorkerStatusGracePeriodDuration: helper.DefaultWorkerStatusGracePeriod, }) - t.Cleanup(c1.Shutdown) helper.ExpectWorkers(t, c1) @@ -63,9 +62,7 @@ func TestWorkerSessionProxyMultipleConnections(t *testing.T) { dawdle.WithWbufSize(256), ) require.NoError(err) - t.Cleanup(func() { - proxy.Close() - }) + t.Cleanup(func() { _ = proxy.Close() }) require.NotEmpty(t, proxy.ListenerAddr()) w1 := worker.NewTestWorker(t, &worker.TestWorkerOpts{ @@ -75,7 +72,6 @@ func TestWorkerSessionProxyMultipleConnections(t *testing.T) { SuccessfulStatusGracePeriodDuration: helper.DefaultWorkerStatusGracePeriod, EnableIPv6: true, }) - t.Cleanup(w1.Shutdown) helper.ExpectWorkers(t, c1, w1) diff --git a/internal/tests/cluster/worker_tagging_test.go b/internal/tests/cluster/sequential/worker_tagging_test.go similarity index 98% rename from internal/tests/cluster/worker_tagging_test.go rename to internal/tests/cluster/sequential/worker_tagging_test.go index 1e3e35b77b..5e4a1f0590 100644 --- a/internal/tests/cluster/worker_tagging_test.go +++ b/internal/tests/cluster/sequential/worker_tagging_test.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 -package cluster +package sequential import ( "testing" @@ -33,7 +33,6 @@ func TestWorkerTagging(t *testing.T) { InitialResourcesSuffix: "1234567890", Logger: logger.Named("c1"), }) - defer c1.Shutdown() ctx := c1.Context() @@ -62,7 +61,6 @@ func TestWorkerTagging(t *testing.T) { InitialUpstreams: c1.ClusterAddrs(), Logger: logger.Named("w1"), }) - defer w1.Shutdown() w1Addr := w1.ProxyAddrs()[0] w1.Worker().WaitForNextSuccessfulStatusUpdate() @@ -80,7 +78,6 @@ func TestWorkerTagging(t *testing.T) { InitialUpstreams: c1.ClusterAddrs(), Logger: logger.Named("w2"), }) - defer w2.Shutdown() w2Addr := w2.ProxyAddrs()[0] w2.Worker().WaitForNextSuccessfulStatusUpdate() @@ -98,7 +95,6 @@ func TestWorkerTagging(t *testing.T) { InitialUpstreams: c1.ClusterAddrs(), Logger: logger.Named("w3"), }) - defer w3.Shutdown() w3Addr := w3.ProxyAddrs()[0] w3.Worker().WaitForNextSuccessfulStatusUpdate() diff --git a/internal/tests/cluster/x509_verification_test.go b/internal/tests/cluster/sequential/x509_verification_test.go similarity index 96% rename from internal/tests/cluster/x509_verification_test.go rename to internal/tests/cluster/sequential/x509_verification_test.go index d06e7759aa..705c74b9fa 100644 --- a/internal/tests/cluster/x509_verification_test.go +++ b/internal/tests/cluster/sequential/x509_verification_test.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 -package cluster +package sequential import ( "bytes" @@ -15,7 +15,6 @@ import ( "net/http" "sync" "testing" - "time" apiproxy "github.com/hashicorp/boundary/api/proxy" "github.com/hashicorp/boundary/api/targets" @@ -62,12 +61,6 @@ func TestCustomX509Verification_Client(t *testing.T) { InitialUpstreams: c1.ClusterAddrs(), Logger: logger.Named("w1"), }) - - // Give time for it to connect - time.Sleep(10 * time.Second) - - err = w1.Worker().WaitForNextSuccessfulStatusUpdate() - req.NoError(err) helper.ExpectWorkers(t, c1, w1) // Connect target @@ -232,11 +225,7 @@ func testCustomX509Verification_Server(ec event.TestConfig, certPool *x509.CertP w1.Worker().TestOverrideX509VerifyCertPool = certPool w1.Worker().TestOverrideX509VerifyDnsName = dnsName - // Give time for it to connect - time.Sleep(10 * time.Second) - - err = w1.Worker().WaitForNextSuccessfulStatusUpdate() - req.NoError(err) + helper.ExpectWorkers(t, c1, w1) // Connect target client := c1.Client() diff --git a/internal/tests/helper/testing_helper.go b/internal/tests/helper/testing_helper.go index ed916a4421..786ef708be 100644 --- a/internal/tests/helper/testing_helper.go +++ b/internal/tests/helper/testing_helper.go @@ -436,26 +436,49 @@ func NewTestTcpServer(t *testing.T) *TestTcpServer { require.NoError(err) go ts.run() + + require.Eventually(func() bool { + c, err := net.Dial(ts.ln.Addr().Network(), ts.ln.Addr().String()) + if err != nil { + return false + } + _, err = c.Write([]byte{}) + if err != nil { + return false + } + _, err = c.Read([]byte{}) + if err != nil { + return false + } + return true + }, 10*time.Second, 100*time.Millisecond) + return ts } // ExpectWorkers is a blocking call, where the method validates that the expected workers // can be found in the controllers status update. If the provided list of workers is empty, -// this method will sleep for 10 seconds and then validate that the controller worker status -// is empty. +// this method will validate that the controller worker status is empty. func ExpectWorkers(t *testing.T, c *controller.TestController, workers ...*worker.TestWorker) { - // validate the controller has no reported workers if len(workers) == 0 { - c.Controller().WorkerStatusUpdateTimes().Clear() - time.Sleep(10 * time.Second) - assert.Eventually(t, func() bool { - empty := true - c.Controller().WorkerStatusUpdateTimes().Range(func(k, v any) bool { - empty = false - return false - }) - return empty - }, 30*time.Second, 2*time.Second) + updateTimes := c.Controller().WorkerStatusUpdateTimes() + workerMap := map[string]*worker.TestWorker{} + for _, w := range workers { + workerMap[w.Name()] = w + } + updateTimes.Range(func(k, v any) bool { + require.NotNil(t, k) + require.NotNil(t, v) + if workerMap[k.(string)] == nil { + // We don't remove from updateTimes currently so if we're not + // expecting it we'll see an out-of-date entry + return true + } + assert.WithinDuration(t, time.Now(), v.(time.Time), 30*time.Second) + delete(workerMap, k.(string)) + return true + }) + assert.Empty(t, workerMap) return } @@ -465,9 +488,8 @@ func ExpectWorkers(t *testing.T, c *controller.TestController, workers ...*worke wg.Add(1) go func() { defer wg.Done() + require.NoError(t, w.Worker().WaitForNextSuccessfulStatusUpdate()) require.NoError(t, c.WaitForNextWorkerStatusUpdate(w.Name())) - _, ok := c.Controller().WorkerStatusUpdateTimes().Load(w.Name()) - assert.True(t, ok) }() } wg.Wait()