Skip to content

Commit

Permalink
test(cluster): split tests to improve parallelism
Browse files Browse the repository at this point in the history
  • Loading branch information
ddebko committed Dec 19, 2024
1 parent 090c1df commit 1b0edba
Show file tree
Hide file tree
Showing 17 changed files with 147 additions and 136 deletions.
2 changes: 0 additions & 2 deletions internal/daemon/controller/testing.go
Original file line number Diff line number Diff line change
Expand Up @@ -529,7 +529,6 @@ func NewTestController(t testing.TB, opts *TestControllerOpts) *TestController {
var err error
tc.c, err = New(ctx, conf)
if err != nil {
tc.Shutdown()
t.Fatal(err)
}

Expand All @@ -552,7 +551,6 @@ func NewTestController(t testing.TB, opts *TestControllerOpts) *TestController {

if !opts.DisableAutoStart {
if err := tc.c.Start(); err != nil {
tc.Shutdown()
t.Fatal(err)
}
}
Expand Down
5 changes: 3 additions & 2 deletions internal/daemon/worker/testing.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,9 @@ func (tw *TestWorker) Name() string {
func (tw *TestWorker) UpstreamAddrs() []string {
var addrs []string
lastStatus := tw.w.LastStatusSuccess()
if lastStatus == nil {
return addrs
}
for _, v := range lastStatus.GetCalculatedUpstreams() {
addrs = append(addrs, v.Address)
}
Expand Down Expand Up @@ -360,7 +363,6 @@ func NewTestWorker(t testing.TB, opts *TestWorkerOpts) *TestWorker {

tw.w, err = New(ctx, conf)
if err != nil {
tw.Shutdown()
t.Fatal(err)
}

Expand All @@ -387,7 +389,6 @@ func NewTestWorker(t testing.TB, opts *TestWorkerOpts) *TestWorker {

if !opts.DisableAutoStart {
if err := tw.w.Start(); err != nil {
tw.Shutdown()
t.Fatal(err)
}
}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1

package cluster
package parallel

import (
"testing"
Expand All @@ -15,6 +15,7 @@ import (
)

func TestAnonListing(t *testing.T) {
t.Parallel()
require := require.New(t)
logger := hclog.New(&hclog.LoggerOptions{
Level: hclog.Trace,
Expand All @@ -28,7 +29,6 @@ func TestAnonListing(t *testing.T) {
InitialResourcesSuffix: "1234567890",
Logger: logger,
})
defer c1.Shutdown()

// Anon user has list and read permissions on scopes by default,
// verify that list scopes returns expected scope without setting token
Expand Down
13 changes: 13 additions & 0 deletions internal/tests/cluster/parallel/doc.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1

/*
This package includes a set of tests that run in parallel.
A test should only be added to this package if it can be
completely isolated from other tests that currently exist
in this package. If a test is consistently failing due to
not having an isolated environment, it should be moved to
the adjacent "sequential" package.
*/

package parallel
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1

package cluster
package parallel

import (
"context"
Expand Down Expand Up @@ -32,13 +32,11 @@ func TestIPv6Listener(t *testing.T) {
Config: conf,
Logger: logger.Named("c1"),
})
defer c1.Shutdown()

c2 := c1.AddClusterControllerMember(t, &controller.TestControllerOpts{
Config: conf,
Logger: c1.Config().Logger.ResetNamed("c2"),
})
defer c2.Shutdown()

wg := new(sync.WaitGroup)
wg.Add(2)
Expand All @@ -61,7 +59,6 @@ func TestIPv6Listener(t *testing.T) {
InitialUpstreams: append(c1.ClusterAddrs(), c2.ClusterAddrs()...),
Logger: logger.Named("w1"),
})
defer w1.Shutdown()

wg.Add(2)
go func() {
Expand Down
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1

package cluster
package parallel

import (
"context"
"slices"
"sync"
"testing"
"time"
Expand All @@ -14,12 +14,11 @@ import (
"github.com/hashicorp/boundary/internal/daemon/worker"
"github.com/hashicorp/boundary/internal/tests/helper"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-secure-stdlib/strutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)

func TestMultiControllerMultiWorkerConnections(t *testing.T) {
t.Parallel()
require := require.New(t)
logger := hclog.New(&hclog.LoggerOptions{
Level: hclog.Trace,
Expand All @@ -32,12 +31,10 @@ func TestMultiControllerMultiWorkerConnections(t *testing.T) {
Config: conf,
Logger: logger.Named("c1"),
})
defer c1.Shutdown()

c2 := c1.AddClusterControllerMember(t, &controller.TestControllerOpts{
Logger: c1.Config().Logger.ResetNamed("c2"),
})
defer c2.Shutdown()

wg := new(sync.WaitGroup)
wg.Add(2)
Expand All @@ -56,7 +53,6 @@ func TestMultiControllerMultiWorkerConnections(t *testing.T) {
InitialUpstreams: append(c1.ClusterAddrs(), c2.ClusterAddrs()...),
Logger: logger.Named("w1"),
})
defer w1.Shutdown()

wg.Add(2)
go func() {
Expand All @@ -72,7 +68,6 @@ func TestMultiControllerMultiWorkerConnections(t *testing.T) {
w2 := w1.AddClusterWorkerMember(t, &worker.TestWorkerOpts{
Logger: logger.Named("w2"),
})
defer w2.Shutdown()

wg.Add(2)
go func() {
Expand All @@ -98,53 +93,46 @@ func TestMultiControllerMultiWorkerConnections(t *testing.T) {
}()
wg.Wait()

w1 = worker.NewTestWorker(t, &worker.TestWorkerOpts{
w3 := worker.NewTestWorker(t, &worker.TestWorkerOpts{
WorkerAuthKms: c1.Config().WorkerAuthKms,
InitialUpstreams: c1.ClusterAddrs(),
Logger: logger.Named("w1"),
})
defer w1.Shutdown()

wg.Add(2)
go func() {
defer wg.Done()
helper.ExpectWorkers(t, c1, w1, w2)
helper.ExpectWorkers(t, c1, w2, w3)
}()
go func() {
defer wg.Done()
helper.ExpectWorkers(t, c2, w1, w2)
helper.ExpectWorkers(t, c2, w2, w3)
}()
wg.Wait()

require.NoError(c2.Controller().Shutdown())

wg.Add(1)
go func() {
defer wg.Done()
helper.ExpectWorkers(t, c1, w1, w2)
}()
wg.Wait()
helper.ExpectWorkers(t, c1, w2, w3)

c2 = c1.AddClusterControllerMember(t, &controller.TestControllerOpts{
Logger: c1.Config().Logger.ResetNamed("c2"),
c3 := c1.AddClusterControllerMember(t, &controller.TestControllerOpts{
Logger: c1.Config().Logger.ResetNamed("c3"),
})
defer c2.Shutdown()

wg.Add(2)
go func() {
defer wg.Done()
helper.ExpectWorkers(t, c1, w1, w2)
helper.ExpectWorkers(t, c1, w2, w3)
}()
go func() {
defer wg.Done()
helper.ExpectWorkers(t, c2, w1, w2)
helper.ExpectWorkers(t, c3, w2, w3)
}()
wg.Wait()
}

func TestWorkerAppendInitialUpstreams(t *testing.T) {
ctx := context.Background()
require, assert := require.New(t), assert.New(t)
t.Parallel()
require := require.New(t)
logger := hclog.New(&hclog.LoggerOptions{
Level: hclog.Trace,
})
Expand All @@ -156,7 +144,7 @@ func TestWorkerAppendInitialUpstreams(t *testing.T) {
Config: conf,
Logger: logger.Named("c1"),
})
defer c1.Shutdown()
t.Cleanup(c1.Shutdown)

helper.ExpectWorkers(t, c1)

Expand All @@ -167,31 +155,23 @@ func TestWorkerAppendInitialUpstreams(t *testing.T) {
Logger: logger.Named("w1"),
SuccessfulStatusGracePeriodDuration: 1 * time.Second,
})
defer w1.Shutdown()
t.Cleanup(w1.Shutdown)

// Wait for worker to send status
cancelCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
t.Cleanup(cancel)
for {
select {
case <-time.After(500 * time.Millisecond):
case <-cancelCtx.Done():
require.FailNow("No worker found after 10 seconds")
}
successSent := w1.Worker().LastStatusSuccess()
if successSent != nil {
break
}
}
helper.ExpectWorkers(t, c1, w1)

// Upstreams should be equivalent to the controller cluster addr after status updates
assert.Equal(c1.ClusterAddrs(), w1.Worker().LastStatusSuccess().LastCalculatedUpstreams)
require.Eventually(func() bool {
if w1.Worker().LastStatusSuccess() == nil {
return false
}
return slices.Equal(c1.ClusterAddrs(), w1.Worker().LastStatusSuccess().LastCalculatedUpstreams)
}, 4*time.Second, 250*time.Millisecond)

// Bring down the controller
c1.Shutdown()
time.Sleep(3 * time.Second) // Wait a little longer than the grace period

// Upstreams should now match initial upstreams
assert.True(strutil.EquivalentSlices(initialUpstreams, w1.Worker().LastStatusSuccess().LastCalculatedUpstreams))
require.Eventually(func() bool {
return slices.Equal(initialUpstreams, w1.Worker().LastStatusSuccess().LastCalculatedUpstreams)
}, 4*time.Second, 250*time.Millisecond)
}
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1

package cluster
package parallel

import (
// Enable tcp target support.
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1

package cluster
package parallel

import (
"strings"
Expand All @@ -16,9 +16,9 @@ import (

// This test validates the fix for ICU-2301
func TestListAnonymousRecursing(t *testing.T) {
t.Parallel()
require := require.New(t)
tc := controller.NewTestController(t, nil)
defer tc.Shutdown()

client := tc.Client()
token := tc.Token()
Expand Down
Original file line number Diff line number Diff line change
@@ -1,15 +1,14 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1

package cluster
package parallel

import (
"bytes"
"context"
"os"
"path"
"testing"
"time"

"github.com/hashicorp/boundary/api"
"github.com/hashicorp/boundary/api/scopes"
Expand All @@ -23,6 +22,7 @@ import (
)

func TestUnixListener(t *testing.T) {
t.Parallel()
require := require.New(t)
buf := new(bytes.Buffer)
logger := hclog.New(&hclog.LoggerOptions{
Expand Down Expand Up @@ -73,7 +73,6 @@ func TestUnixListener(t *testing.T) {
},
},
})
defer c1.Shutdown()

helper.ExpectWorkers(t, c1)

Expand All @@ -86,17 +85,14 @@ func TestUnixListener(t *testing.T) {
InitialUpstreams: c1.ClusterAddrs(),
Logger: logger.Named("w1"),
})
defer w1.Shutdown()

time.Sleep(10 * time.Second)
helper.ExpectWorkers(t, c1, w1)

require.NoError(w1.Worker().Shutdown())
time.Sleep(10 * time.Second)
helper.ExpectWorkers(t, c1)

require.NoError(c1.Controller().Shutdown())
c1 = controller.NewTestController(t, &controller.TestControllerOpts{
c2 := controller.NewTestController(t, &controller.TestControllerOpts{
Config: conf,
Logger: logger.Named("c1"),
DisableOidcAuthMethodCreation: true,
Expand All @@ -118,15 +114,13 @@ func TestUnixListener(t *testing.T) {
},
},
})
defer c1.Shutdown()

time.Sleep(10 * time.Second)
helper.ExpectWorkers(t, c1)
helper.ExpectWorkers(t, c2)

client, err := api.NewClient(nil)
require.NoError(err)

addrs := c1.ApiAddrs()
addrs := c2.ApiAddrs()
require.Len(addrs, 1)

require.NoError(client.SetAddr(addrs[0]))
Expand Down
13 changes: 13 additions & 0 deletions internal/tests/cluster/sequential/doc.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1

/*
This package includes a set of tests that run sequentially.
A test should be added to this package if it needs to be completely
isolated from other tests. Newly added tests should not enable the
testing parallel option. Please include a comment in the unit test
that explains why the test must be ran sequentially. Tests that can
be ran in parallel should be moved to the adjacent "parallel" package.
*/

package sequential
Loading

0 comments on commit 1b0edba

Please sign in to comment.