Skip to content

Commit

Permalink
test(cluster): improve test runtime
Browse files Browse the repository at this point in the history
  • Loading branch information
ddebko committed Dec 20, 2024
1 parent dd35ca1 commit b293f60
Show file tree
Hide file tree
Showing 13 changed files with 107 additions and 123 deletions.
2 changes: 0 additions & 2 deletions internal/daemon/controller/testing.go
Original file line number Diff line number Diff line change
Expand Up @@ -529,7 +529,6 @@ func NewTestController(t testing.TB, opts *TestControllerOpts) *TestController {
var err error
tc.c, err = New(ctx, conf)
if err != nil {
tc.Shutdown()
t.Fatal(err)
}

Expand All @@ -552,7 +551,6 @@ func NewTestController(t testing.TB, opts *TestControllerOpts) *TestController {

if !opts.DisableAutoStart {
if err := tc.c.Start(); err != nil {
tc.Shutdown()
t.Fatal(err)
}
}
Expand Down
5 changes: 3 additions & 2 deletions internal/daemon/worker/testing.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,9 @@ func (tw *TestWorker) Name() string {
func (tw *TestWorker) UpstreamAddrs() []string {
var addrs []string
lastStatus := tw.w.LastStatusSuccess()
if lastStatus == nil {
return addrs
}
for _, v := range lastStatus.GetCalculatedUpstreams() {
addrs = append(addrs, v.Address)
}
Expand Down Expand Up @@ -360,7 +363,6 @@ func NewTestWorker(t testing.TB, opts *TestWorkerOpts) *TestWorker {

tw.w, err = New(ctx, conf)
if err != nil {
tw.Shutdown()
t.Fatal(err)
}

Expand All @@ -387,7 +389,6 @@ func NewTestWorker(t testing.TB, opts *TestWorkerOpts) *TestWorker {

if !opts.DisableAutoStart {
if err := tw.w.Start(); err != nil {
tw.Shutdown()
t.Fatal(err)
}
}
Expand Down
1 change: 0 additions & 1 deletion internal/tests/cluster/parallel/anon_listing_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@ func TestAnonListing(t *testing.T) {
InitialResourcesSuffix: "1234567890",
Logger: logger,
})
defer c1.Shutdown()

// Anon user has list and read permissions on scopes by default,
// verify that list scopes returns expected scope without setting token
Expand Down
3 changes: 0 additions & 3 deletions internal/tests/cluster/parallel/ipv6_listener_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,13 +33,11 @@ func TestIPv6Listener(t *testing.T) {
Config: conf,
Logger: logger.Named("c1"),
})
defer c1.Shutdown()

c2 := c1.AddClusterControllerMember(t, &controller.TestControllerOpts{
Config: conf,
Logger: c1.Config().Logger.ResetNamed("c2"),
})
defer c2.Shutdown()

wg := new(sync.WaitGroup)
wg.Add(2)
Expand All @@ -62,7 +60,6 @@ func TestIPv6Listener(t *testing.T) {
InitialUpstreams: append(c1.ClusterAddrs(), c2.ClusterAddrs()...),
Logger: logger.Named("w1"),
})
defer w1.Shutdown()

wg.Add(2)
go func() {
Expand Down
71 changes: 27 additions & 44 deletions internal/tests/cluster/parallel/multi_controller_worker_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
package parallel

import (
"context"
"slices"
"sync"
"testing"
"time"
Expand All @@ -14,8 +14,6 @@ import (
"github.com/hashicorp/boundary/internal/daemon/worker"
"github.com/hashicorp/boundary/internal/tests/helper"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-secure-stdlib/strutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)

Expand All @@ -34,12 +32,10 @@ func TestMultiControllerMultiWorkerConnections(t *testing.T) {
Config: conf,
Logger: logger.Named("c1"),
})
defer c1.Shutdown()

c2 := c1.AddClusterControllerMember(t, &controller.TestControllerOpts{
Logger: c1.Config().Logger.ResetNamed("c2"),
})
defer c2.Shutdown()

wg := new(sync.WaitGroup)
wg.Add(2)
Expand All @@ -58,7 +54,6 @@ func TestMultiControllerMultiWorkerConnections(t *testing.T) {
InitialUpstreams: append(c1.ClusterAddrs(), c2.ClusterAddrs()...),
Logger: logger.Named("w1"),
})
defer w1.Shutdown()

wg.Add(2)
go func() {
Expand All @@ -74,7 +69,6 @@ func TestMultiControllerMultiWorkerConnections(t *testing.T) {
w2 := w1.AddClusterWorkerMember(t, &worker.TestWorkerOpts{
Logger: logger.Named("w2"),
})
defer w2.Shutdown()

wg.Add(2)
go func() {
Expand All @@ -100,55 +94,47 @@ func TestMultiControllerMultiWorkerConnections(t *testing.T) {
}()
wg.Wait()

w1 = worker.NewTestWorker(t, &worker.TestWorkerOpts{
w3 := worker.NewTestWorker(t, &worker.TestWorkerOpts{
WorkerAuthKms: c1.Config().WorkerAuthKms,
InitialUpstreams: c1.ClusterAddrs(),
Logger: logger.Named("w1"),
Logger: logger.Named("w3"),
})
defer w1.Shutdown()

wg.Add(2)
go func() {
defer wg.Done()
helper.ExpectWorkers(t, c1, w1, w2)
helper.ExpectWorkers(t, c1, w2, w3)
}()
go func() {
defer wg.Done()
helper.ExpectWorkers(t, c2, w1, w2)
helper.ExpectWorkers(t, c2, w2, w3)
}()
wg.Wait()

require.NoError(c2.Controller().Shutdown())

wg.Add(1)
go func() {
defer wg.Done()
helper.ExpectWorkers(t, c1, w1, w2)
}()
wg.Wait()
helper.ExpectWorkers(t, c1, w2, w3)

c2 = c1.AddClusterControllerMember(t, &controller.TestControllerOpts{
Logger: c1.Config().Logger.ResetNamed("c2"),
c3 := c1.AddClusterControllerMember(t, &controller.TestControllerOpts{
Logger: c1.Config().Logger.ResetNamed("c3"),
})
defer c2.Shutdown()

wg.Add(2)
go func() {
defer wg.Done()
helper.ExpectWorkers(t, c1, w1, w2)
helper.ExpectWorkers(t, c1, w2, w3)
}()
go func() {
defer wg.Done()
helper.ExpectWorkers(t, c2, w1, w2)
helper.ExpectWorkers(t, c3, w2, w3)
}()
wg.Wait()
}

func TestWorkerAppendInitialUpstreams(t *testing.T) {
t.Parallel()

ctx := context.Background()
require, assert := require.New(t), assert.New(t)
require := require.New(t)
logger := hclog.New(&hclog.LoggerOptions{
Level: hclog.Trace,
})
Expand All @@ -160,7 +146,6 @@ func TestWorkerAppendInitialUpstreams(t *testing.T) {
Config: conf,
Logger: logger.Named("c1"),
})
defer c1.Shutdown()

helper.ExpectWorkers(t, c1)

Expand All @@ -171,31 +156,29 @@ func TestWorkerAppendInitialUpstreams(t *testing.T) {
Logger: logger.Named("w1"),
SuccessfulStatusGracePeriodDuration: 1 * time.Second,
})
defer w1.Shutdown()

// Wait for worker to send status
cancelCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
t.Cleanup(cancel)
for {
select {
case <-time.After(500 * time.Millisecond):
case <-cancelCtx.Done():
require.FailNow("No worker found after 10 seconds")
}
successSent := w1.Worker().LastStatusSuccess()
if successSent != nil {
break
}
}
helper.ExpectWorkers(t, c1, w1)

// Upstreams should be equivalent to the controller cluster addr after status updates
assert.Equal(c1.ClusterAddrs(), w1.Worker().LastStatusSuccess().LastCalculatedUpstreams)
require.Eventually(func() bool {
if w1.Worker().LastStatusSuccess() == nil {
return false
}
return slices.Equal(c1.ClusterAddrs(), w1.Worker().LastStatusSuccess().LastCalculatedUpstreams)
}, 4*time.Second, 250*time.Millisecond)

// Bring down the controller
c1.Shutdown()
time.Sleep(3 * time.Second) // Wait a little longer than the grace period
wg := new(sync.WaitGroup)
wg.Add(1)
go func() {
defer wg.Done()
c1.Shutdown()
}()

// Upstreams should now match initial upstreams
assert.True(strutil.EquivalentSlices(initialUpstreams, w1.Worker().LastStatusSuccess().LastCalculatedUpstreams))
require.Eventually(func() bool {
return slices.Equal(initialUpstreams, w1.Worker().LastStatusSuccess().LastCalculatedUpstreams)
}, 4*time.Second, 250*time.Millisecond)
wg.Wait()
}
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ func TestListAnonymousRecursing(t *testing.T) {

require := require.New(t)
tc := controller.NewTestController(t, nil)
defer tc.Shutdown()

client := tc.Client()
token := tc.Token()
Expand Down
16 changes: 5 additions & 11 deletions internal/tests/cluster/parallel/unix_listener_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ import (
"os"
"path"
"testing"
"time"

"github.com/hashicorp/boundary/api"
"github.com/hashicorp/boundary/api/scopes"
Expand Down Expand Up @@ -75,7 +74,6 @@ func TestUnixListener(t *testing.T) {
},
},
})
defer c1.Shutdown()

helper.ExpectWorkers(t, c1)

Expand All @@ -88,19 +86,17 @@ func TestUnixListener(t *testing.T) {
InitialUpstreams: c1.ClusterAddrs(),
Logger: logger.Named("w1"),
})
defer w1.Shutdown()

time.Sleep(10 * time.Second)
helper.ExpectWorkers(t, c1, w1)

require.NoError(w1.Worker().Shutdown())
time.Sleep(10 * time.Second)

helper.ExpectWorkers(t, c1)

require.NoError(c1.Controller().Shutdown())
c1 = controller.NewTestController(t, &controller.TestControllerOpts{
c2 := controller.NewTestController(t, &controller.TestControllerOpts{
Config: conf,
Logger: logger.Named("c1"),
Logger: logger.Named("c2"),
DisableOidcAuthMethodCreation: true,
EventerConfig: &event.EventerConfig{
ObservationsEnabled: true,
Expand All @@ -120,15 +116,13 @@ func TestUnixListener(t *testing.T) {
},
},
})
defer c1.Shutdown()

time.Sleep(10 * time.Second)
helper.ExpectWorkers(t, c1)
helper.ExpectWorkers(t, c2)

client, err := api.NewClient(nil)
require.NoError(err)

addrs := c1.ApiAddrs()
addrs := c2.ApiAddrs()
require.Len(addrs, 1)

require.NoError(client.SetAddr(addrs[0]))
Expand Down
45 changes: 26 additions & 19 deletions internal/tests/cluster/sequential/session_cleanup_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ package sequential
import (
"context"
"net"
"sync"
"testing"
"time"

Expand Down Expand Up @@ -107,7 +108,7 @@ func testWorkerSessionCleanupSingle(burdenCase timeoutBurdenType) func(t *testin
)
require.NoError(err)
t.Cleanup(func() {
proxy.Close()
_ = proxy.Close()
})
require.NotEmpty(t, proxy.ListenerAddr())

Expand All @@ -118,10 +119,6 @@ func testWorkerSessionCleanupSingle(burdenCase timeoutBurdenType) func(t *testin
SuccessfulStatusGracePeriodDuration: workerGracePeriod(burdenCase),
})

err = w1.Worker().WaitForNextSuccessfulStatusUpdate()
require.NoError(err)
err = c1.WaitForNextWorkerStatusUpdate(w1.Name())
require.NoError(err)
helper.ExpectWorkers(t, c1, w1)

// Use an independent context for test things that take a context so
Expand Down Expand Up @@ -237,8 +234,18 @@ func testWorkerSessionCleanupMulti(burdenCase timeoutBurdenType) func(t *testing
PublicClusterAddr: pl2.Addr().String(),
WorkerStatusGracePeriodDuration: controllerGracePeriod(burdenCase),
})
helper.ExpectWorkers(t, c1)
helper.ExpectWorkers(t, c2)

wg := new(sync.WaitGroup)
wg.Add(2)
go func() {
defer wg.Done()
helper.ExpectWorkers(t, c1)
}()
go func() {
defer wg.Done()
helper.ExpectWorkers(t, c2)
}()
wg.Wait()

// *************
// ** Proxy 1 **
Expand All @@ -251,7 +258,7 @@ func testWorkerSessionCleanupMulti(burdenCase timeoutBurdenType) func(t *testing
)
require.NoError(err)
t.Cleanup(func() {
p1.Close()
_ = p1.Close()
})
require.NotEmpty(t, p1.ListenerAddr())

Expand All @@ -266,7 +273,7 @@ func testWorkerSessionCleanupMulti(burdenCase timeoutBurdenType) func(t *testing
)
require.NoError(err)
t.Cleanup(func() {
p2.Close()
_ = p2.Close()
})
require.NotEmpty(t, p2.ListenerAddr())

Expand All @@ -282,16 +289,16 @@ func testWorkerSessionCleanupMulti(burdenCase timeoutBurdenType) func(t *testing
// Worker needs some extra time to become ready, otherwise for a
// currently-unknown reason the next successful status update fails
// because it's not sent before the context times out.
time.Sleep(5 * time.Second)

err = w1.Worker().WaitForNextSuccessfulStatusUpdate()
require.NoError(err)
err = c1.WaitForNextWorkerStatusUpdate(w1.Name())
require.NoError(err)
err = c2.WaitForNextWorkerStatusUpdate(w1.Name())
require.NoError(err)
helper.ExpectWorkers(t, c1, w1)
helper.ExpectWorkers(t, c2, w1)
wg.Add(2)
go func() {
defer wg.Done()
helper.ExpectWorkers(t, c1, w1)
}()
go func() {
defer wg.Done()
helper.ExpectWorkers(t, c2, w1)
}()
wg.Wait()

// Use an independent context for test things that take a context so
// that we aren't tied to any timeouts in the controller, etc. This
Expand Down
Loading

0 comments on commit b293f60

Please sign in to comment.