From ccfb6ce06ddf9b5a9b6a6a3157547d5bc1cd5342 Mon Sep 17 00:00:00 2001 From: Andrew Gillis <11790789+gammazero@users.noreply.github.com> Date: Tue, 30 Jul 2024 09:07:07 -0700 Subject: [PATCH] test: use ipfs/go-test to consolidate code to generate test data (#638) * Use ipfs/go-test to consolidate code to generate test data The go-test packages provides functionality for common types of test data generation. Use go-test for generating test data instead of internal utility packages. The random test data can be generated deterministically by setting an initial seed if doing so is necessary to recreate certain test data. Return `rand.Rand` in place of `util.randGen`. The type `util.randGen` was only used to provide a `Read` method as an alternative to using the `rand.Rand.Read` method. --- CHANGELOG.md | 1 + bitswap/benchmarks_test.go | 16 +- .../blockpresencemanager_test.go | 168 +++------- .../messagequeue/donthavetimeoutmgr_test.go | 22 +- .../messagequeue/messagequeue_test.go | 80 ++--- .../internal/peermanager/peermanager_test.go | 45 +-- .../peermanager/peerwantmanager_test.go | 308 ++++++------------ .../providerquerymanager_test.go | 38 +-- .../session/peerresponsetracker_test.go | 10 +- .../session/sentwantblockstracker_test.go | 6 +- .../client/internal/session/session_test.go | 115 +++---- .../internal/session/sessionwants_test.go | 10 +- .../session/sessionwantsender_test.go | 231 +++++-------- .../client/internal/session/wantinfo_test.go | 8 +- .../sessioninterestmanager_test.go | 40 +-- .../sessionmanager/sessionmanager_test.go | 18 +- .../sessionpeermanager_test.go | 20 +- bitswap/internal/testutil/testutil.go | 142 -------- bitswap/internal/testutil/testutil_test.go | 16 - bitswap/network/connecteventmanager_test.go | 8 +- .../decision/blockstoremanager_test.go | 18 +- .../server/internal/decision/engine_test.go | 16 +- .../internal/decision/taskmerger_test.go | 14 +- chunker/buzhash_test.go | 4 +- chunker/rabin_test.go | 6 +- chunker/splitting_test.go | 12 +- examples/go.sum | 2 + go.mod | 1 + go.sum | 2 + ipld/merkledag/merkledag_test.go | 8 +- ipld/unixfs/hamt/hamt_stress_test.go | 15 +- .../unixfs/importer/balanced/balanced_test.go | 4 +- ipld/unixfs/importer/importer_test.go | 12 +- ipld/unixfs/importer/trickle/trickle_test.go | 51 +-- ipld/unixfs/mod/dagmodifier_test.go | 32 +- ipld/unixfs/test/utils.go | 12 +- mfs/mfs_test.go | 4 +- pinning/pinner/dspinner/pin_test.go | 14 +- util/util.go | 25 +- util/util_test.go | 20 +- 40 files changed, 547 insertions(+), 1027 deletions(-) delete mode 100644 bitswap/internal/testutil/testutil.go delete mode 100644 bitswap/internal/testutil/testutil_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index fcb13469c..e1be6d134 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ The following emojis are used to highlight certain changes: - `bitswap/server` minor memory use and performance improvements - `bitswap` unify logger names to use uniform format bitswap/path/pkgname - `gateway` now always returns meaningful cache-control headers for generated HTML listings of UnixFS directories +- generate random test data using `ipfs/go-test` instead of internal util code ### Removed diff --git a/bitswap/benchmarks_test.go b/bitswap/benchmarks_test.go index 80eb373ab..bd8f342ea 100644 --- a/bitswap/benchmarks_test.go +++ b/bitswap/benchmarks_test.go @@ -12,8 +12,8 @@ import ( "testing" "time" - "github.com/ipfs/boxo/bitswap/internal/testutil" blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-test/random" protocol "github.com/libp2p/go-libp2p/core/protocol" "github.com/ipfs/boxo/bitswap" @@ -169,8 +169,8 @@ func BenchmarkFetchFromOldBitswap(b *testing.B) { testinstance.ConnectInstances(instances) // Generate blocks, with a smaller root block - rootBlock := testutil.GenerateBlocksOfSize(1, rootBlockSize) - blocks := testutil.GenerateBlocksOfSize(bch.blockCount, stdBlockSize) + rootBlock := random.BlocksOfSize(1, rootBlockSize) + blocks := random.BlocksOfSize(bch.blockCount, stdBlockSize) blocks[0] = rootBlock[0] // Run the distribution @@ -300,7 +300,7 @@ func BenchmarkDatacenterMultiLeechMultiSeed(b *testing.B) { defer ig.Close() instances := ig.Instances(numnodes) - blocks := testutil.GenerateBlocksOfSize(numblks, blockSize) + blocks := random.BlocksOfSize(numblks, int(blockSize)) runDistributionMulti(b, instances[:3], instances[3:], blocks, bstoreLatency, df, ff) } }) @@ -317,8 +317,8 @@ func subtestDistributeAndFetch(b *testing.B, numnodes, numblks int, d delay.D, b ig := testinstance.NewTestInstanceGenerator(net, nil, nil) instances := ig.Instances(numnodes) - rootBlock := testutil.GenerateBlocksOfSize(1, rootBlockSize) - blocks := testutil.GenerateBlocksOfSize(numblks, stdBlockSize) + rootBlock := random.BlocksOfSize(1, rootBlockSize) + blocks := random.BlocksOfSize(numblks, stdBlockSize) blocks[0] = rootBlock[0] runDistribution(b, instances, blocks, bstoreLatency, df, ff) ig.Close() @@ -333,8 +333,8 @@ func subtestDistributeAndFetchRateLimited(b *testing.B, numnodes, numblks int, d defer ig.Close() instances := ig.Instances(numnodes) - rootBlock := testutil.GenerateBlocksOfSize(1, rootBlockSize) - blocks := testutil.GenerateBlocksOfSize(numblks, blockSize) + rootBlock := random.BlocksOfSize(1, rootBlockSize) + blocks := random.BlocksOfSize(numblks, int(blockSize)) blocks[0] = rootBlock[0] runDistribution(b, instances, blocks, bstoreLatency, df, ff) } diff --git a/bitswap/client/internal/blockpresencemanager/blockpresencemanager_test.go b/bitswap/client/internal/blockpresencemanager/blockpresencemanager_test.go index 0a1ba7d80..bde71676a 100644 --- a/bitswap/client/internal/blockpresencemanager/blockpresencemanager_test.go +++ b/bitswap/client/internal/blockpresencemanager/blockpresencemanager_test.go @@ -3,9 +3,10 @@ package blockpresencemanager import ( "testing" - "github.com/ipfs/boxo/bitswap/internal/testutil" cid "github.com/ipfs/go-cid" + "github.com/ipfs/go-test/random" peer "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" ) const ( @@ -18,111 +19,72 @@ const ( func TestBlockPresenceManager(t *testing.T) { bpm := New() - p := testutil.GeneratePeers(1)[0] - cids := testutil.GenerateCids(2) + p := random.Peers(1)[0] + cids := random.Cids(2) c0 := cids[0] c1 := cids[1] // Nothing stored yet, both PeerHasBlock and PeerDoesNotHaveBlock should // return false - if bpm.PeerHasBlock(p, c0) { - t.Fatal(expHasFalseMsg) - } - if bpm.PeerDoesNotHaveBlock(p, c0) { - t.Fatal(expDoesNotHaveFalseMsg) - } + require.False(t, bpm.PeerHasBlock(p, c0), expHasFalseMsg) + require.False(t, bpm.PeerDoesNotHaveBlock(p, c0), expDoesNotHaveFalseMsg) // HAVE cid0 / DONT_HAVE cid1 bpm.ReceiveFrom(p, []cid.Cid{c0}, []cid.Cid{c1}) // Peer has received HAVE for cid0 - if !bpm.PeerHasBlock(p, c0) { - t.Fatal(expHasTrueMsg) - } - if bpm.PeerDoesNotHaveBlock(p, c0) { - t.Fatal(expDoesNotHaveFalseMsg) - } + require.True(t, bpm.PeerHasBlock(p, c0), expHasTrueMsg) + require.False(t, bpm.PeerDoesNotHaveBlock(p, c0), expDoesNotHaveFalseMsg) // Peer has received DONT_HAVE for cid1 - if !bpm.PeerDoesNotHaveBlock(p, c1) { - t.Fatal(expDoesNotHaveTrueMsg) - } - if bpm.PeerHasBlock(p, c1) { - t.Fatal(expHasFalseMsg) - } + require.True(t, bpm.PeerDoesNotHaveBlock(p, c1), expDoesNotHaveTrueMsg) + require.False(t, bpm.PeerHasBlock(p, c1), expHasFalseMsg) // HAVE cid1 / DONT_HAVE cid0 bpm.ReceiveFrom(p, []cid.Cid{c1}, []cid.Cid{c0}) // DONT_HAVE cid0 should NOT over-write earlier HAVE cid0 - if bpm.PeerDoesNotHaveBlock(p, c0) { - t.Fatal(expDoesNotHaveFalseMsg) - } - if !bpm.PeerHasBlock(p, c0) { - t.Fatal(expHasTrueMsg) - } + require.False(t, bpm.PeerDoesNotHaveBlock(p, c0), expDoesNotHaveFalseMsg) + require.True(t, bpm.PeerHasBlock(p, c0), expHasTrueMsg) // HAVE cid1 should over-write earlier DONT_HAVE cid1 - if !bpm.PeerHasBlock(p, c1) { - t.Fatal(expHasTrueMsg) - } - if bpm.PeerDoesNotHaveBlock(p, c1) { - t.Fatal(expDoesNotHaveFalseMsg) - } + require.True(t, bpm.PeerHasBlock(p, c1), expHasTrueMsg) + require.False(t, bpm.PeerDoesNotHaveBlock(p, c1), expDoesNotHaveFalseMsg) // Remove cid0 bpm.RemoveKeys([]cid.Cid{c0}) // Nothing stored, both PeerHasBlock and PeerDoesNotHaveBlock should // return false - if bpm.PeerHasBlock(p, c0) { - t.Fatal(expHasFalseMsg) - } - if bpm.PeerDoesNotHaveBlock(p, c0) { - t.Fatal(expDoesNotHaveFalseMsg) - } + require.False(t, bpm.PeerHasBlock(p, c0), expHasFalseMsg) + require.False(t, bpm.PeerDoesNotHaveBlock(p, c0), expDoesNotHaveFalseMsg) // Remove cid1 bpm.RemoveKeys([]cid.Cid{c1}) // Nothing stored, both PeerHasBlock and PeerDoesNotHaveBlock should // return false - if bpm.PeerHasBlock(p, c1) { - t.Fatal(expHasFalseMsg) - } - if bpm.PeerDoesNotHaveBlock(p, c1) { - t.Fatal(expDoesNotHaveFalseMsg) - } + require.False(t, bpm.PeerHasBlock(p, c1), expHasFalseMsg) + require.False(t, bpm.PeerDoesNotHaveBlock(p, c1), expDoesNotHaveFalseMsg) bpm.ReceiveFrom(p, []cid.Cid{c0}, []cid.Cid{c1}) - if !bpm.PeerHasBlock(p, c0) { - t.Fatal(expHasTrueMsg) - } - if !bpm.PeerDoesNotHaveBlock(p, c1) { - t.Fatal(expDoesNotHaveTrueMsg) - } + require.True(t, bpm.PeerHasBlock(p, c0), expHasTrueMsg) + require.True(t, bpm.PeerDoesNotHaveBlock(p, c1), expDoesNotHaveTrueMsg) + bpm.RemovePeer(p) - if bpm.PeerHasBlock(p, c0) { - t.Fatal(expHasFalseMsg) - } - if bpm.PeerDoesNotHaveBlock(p, c0) { - t.Fatal(expDoesNotHaveFalseMsg) - } - if bpm.PeerHasBlock(p, c1) { - t.Fatal(expHasFalseMsg) - } - if bpm.PeerDoesNotHaveBlock(p, c1) { - t.Fatal(expDoesNotHaveFalseMsg) - } + require.False(t, bpm.PeerHasBlock(p, c0), expHasFalseMsg) + require.False(t, bpm.PeerDoesNotHaveBlock(p, c0), expDoesNotHaveFalseMsg) + require.False(t, bpm.PeerHasBlock(p, c1), expHasFalseMsg) + require.False(t, bpm.PeerDoesNotHaveBlock(p, c1), expDoesNotHaveFalseMsg) } func TestAddRemoveMulti(t *testing.T) { bpm := New() - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) p0 := peers[0] p1 := peers[1] - cids := testutil.GenerateCids(3) + cids := random.Cids(3) c0 := cids[0] c1 := cids[1] c2 := cids[2] @@ -136,78 +98,46 @@ func TestAddRemoveMulti(t *testing.T) { // - HAVE cid0 // - HAVE cid1 // - DONT_HAVE cid2 - if !bpm.PeerHasBlock(p0, c0) { - t.Fatal(expHasTrueMsg) - } - if !bpm.PeerHasBlock(p0, c1) { - t.Fatal(expHasTrueMsg) - } - if !bpm.PeerDoesNotHaveBlock(p0, c2) { - t.Fatal(expDoesNotHaveTrueMsg) - } + require.True(t, bpm.PeerHasBlock(p0, c0), expHasTrueMsg) + require.True(t, bpm.PeerHasBlock(p0, c1), expHasTrueMsg) + require.True(t, bpm.PeerDoesNotHaveBlock(p0, c2), expDoesNotHaveTrueMsg) // Peer 1 should end up with // - HAVE cid1 // - HAVE cid2 // - DONT_HAVE cid0 - if !bpm.PeerHasBlock(p1, c1) { - t.Fatal(expHasTrueMsg) - } - if !bpm.PeerHasBlock(p1, c2) { - t.Fatal(expHasTrueMsg) - } - if !bpm.PeerDoesNotHaveBlock(p1, c0) { - t.Fatal(expDoesNotHaveTrueMsg) - } + require.True(t, bpm.PeerHasBlock(p1, c1), expHasTrueMsg) + require.True(t, bpm.PeerHasBlock(p1, c2), expHasTrueMsg) + require.True(t, bpm.PeerDoesNotHaveBlock(p1, c0), expDoesNotHaveTrueMsg) // Remove cid1 and cid2. Should end up with // Peer 0: HAVE cid0 // Peer 1: DONT_HAVE cid0 bpm.RemoveKeys([]cid.Cid{c1, c2}) - if !bpm.PeerHasBlock(p0, c0) { - t.Fatal(expHasTrueMsg) - } - if !bpm.PeerDoesNotHaveBlock(p1, c0) { - t.Fatal(expDoesNotHaveTrueMsg) - } + require.True(t, bpm.PeerHasBlock(p0, c0), expHasTrueMsg) + require.True(t, bpm.PeerDoesNotHaveBlock(p1, c0), expDoesNotHaveTrueMsg) // The other keys should have been cleared, so both HasBlock() and // DoesNotHaveBlock() should return false - if bpm.PeerHasBlock(p0, c1) { - t.Fatal(expHasFalseMsg) - } - if bpm.PeerDoesNotHaveBlock(p0, c1) { - t.Fatal(expDoesNotHaveFalseMsg) - } - if bpm.PeerHasBlock(p0, c2) { - t.Fatal(expHasFalseMsg) - } - if bpm.PeerDoesNotHaveBlock(p0, c2) { - t.Fatal(expDoesNotHaveFalseMsg) - } - if bpm.PeerHasBlock(p1, c1) { - t.Fatal(expHasFalseMsg) - } - if bpm.PeerDoesNotHaveBlock(p1, c1) { - t.Fatal(expDoesNotHaveFalseMsg) - } - if bpm.PeerHasBlock(p1, c2) { - t.Fatal(expHasFalseMsg) - } - if bpm.PeerDoesNotHaveBlock(p1, c2) { - t.Fatal(expDoesNotHaveFalseMsg) - } + require.False(t, bpm.PeerHasBlock(p0, c1), expHasFalseMsg) + require.False(t, bpm.PeerDoesNotHaveBlock(p0, c1), expDoesNotHaveFalseMsg) + require.False(t, bpm.PeerHasBlock(p0, c2), expHasFalseMsg) + require.False(t, bpm.PeerDoesNotHaveBlock(p0, c2), expDoesNotHaveFalseMsg) + require.False(t, bpm.PeerHasBlock(p1, c1), expHasFalseMsg) + require.False(t, bpm.PeerDoesNotHaveBlock(p1, c1), expDoesNotHaveFalseMsg) + require.False(t, bpm.PeerHasBlock(p1, c2), expHasFalseMsg) + require.False(t, bpm.PeerDoesNotHaveBlock(p1, c2), expDoesNotHaveFalseMsg) } func TestAllPeersDoNotHaveBlock(t *testing.T) { bpm := New() - peers := testutil.GeneratePeers(3) + peers := random.Peers(3) p0 := peers[0] p1 := peers[1] p2 := peers[2] - cids := testutil.GenerateCids(3) + cids := random.Cids(3) c0 := cids[0] c1 := cids[1] c2 := cids[2] @@ -248,11 +178,7 @@ func TestAllPeersDoNotHaveBlock(t *testing.T) { } for i, tc := range testcases { - if !testutil.MatchKeysIgnoreOrder( - bpm.AllPeersDoNotHaveBlock(tc.peers, tc.ks), - tc.exp, - ) { - t.Fatalf("test case %d failed: expected matching keys", i) - } + require.ElementsMatchf(t, bpm.AllPeersDoNotHaveBlock(tc.peers, tc.ks), tc.exp, + "test case %d failed: expected matching keys", i) } } diff --git a/bitswap/client/internal/messagequeue/donthavetimeoutmgr_test.go b/bitswap/client/internal/messagequeue/donthavetimeoutmgr_test.go index 038213c25..ee478e605 100644 --- a/bitswap/client/internal/messagequeue/donthavetimeoutmgr_test.go +++ b/bitswap/client/internal/messagequeue/donthavetimeoutmgr_test.go @@ -8,8 +8,8 @@ import ( "time" "github.com/benbjohnson/clock" - "github.com/ipfs/boxo/bitswap/internal/testutil" cid "github.com/ipfs/go-cid" + "github.com/ipfs/go-test/random" "github.com/libp2p/go-libp2p/p2p/protocol/ping" ) @@ -73,8 +73,8 @@ func (tr *timeoutRecorder) clear() { } func TestDontHaveTimeoutMgrTimeout(t *testing.T) { - firstks := testutil.GenerateCids(2) - secondks := append(firstks, testutil.GenerateCids(3)...) + firstks := random.Cids(2) + secondks := append(firstks, random.Cids(3)...) latency := time.Millisecond * 20 latMultiplier := 2 expProcessTime := 5 * time.Millisecond @@ -129,7 +129,7 @@ func TestDontHaveTimeoutMgrTimeout(t *testing.T) { } func TestDontHaveTimeoutMgrCancel(t *testing.T) { - ks := testutil.GenerateCids(3) + ks := random.Cids(3) latency := time.Millisecond * 10 latMultiplier := 1 expProcessTime := time.Duration(0) @@ -165,7 +165,7 @@ func TestDontHaveTimeoutMgrCancel(t *testing.T) { } func TestDontHaveTimeoutWantCancelWant(t *testing.T) { - ks := testutil.GenerateCids(3) + ks := random.Cids(3) latency := time.Millisecond * 20 latMultiplier := 1 expProcessTime := time.Duration(0) @@ -218,7 +218,7 @@ func TestDontHaveTimeoutWantCancelWant(t *testing.T) { } func TestDontHaveTimeoutRepeatedAddPending(t *testing.T) { - ks := testutil.GenerateCids(10) + ks := random.Cids(10) latency := time.Millisecond * 5 latMultiplier := 1 expProcessTime := time.Duration(0) @@ -251,7 +251,7 @@ func TestDontHaveTimeoutRepeatedAddPending(t *testing.T) { } func TestDontHaveTimeoutMgrMessageLatency(t *testing.T) { - ks := testutil.GenerateCids(2) + ks := random.Cids(2) latency := time.Millisecond * 40 latMultiplier := 1 expProcessTime := time.Duration(0) @@ -300,7 +300,7 @@ func TestDontHaveTimeoutMgrMessageLatency(t *testing.T) { } func TestDontHaveTimeoutMgrMessageLatencyMax(t *testing.T) { - ks := testutil.GenerateCids(2) + ks := random.Cids(2) clock := clock.NewMock() pinged := make(chan struct{}) pc := &mockPeerConn{latency: time.Second, clock: clock, pinged: pinged} @@ -333,7 +333,7 @@ func TestDontHaveTimeoutMgrMessageLatencyMax(t *testing.T) { } func TestDontHaveTimeoutMgrUsesDefaultTimeoutIfPingError(t *testing.T) { - ks := testutil.GenerateCids(2) + ks := random.Cids(2) latency := time.Millisecond * 1 latMultiplier := 2 expProcessTime := 2 * time.Millisecond @@ -374,7 +374,7 @@ func TestDontHaveTimeoutMgrUsesDefaultTimeoutIfPingError(t *testing.T) { } func TestDontHaveTimeoutMgrUsesDefaultTimeoutIfLatencyLonger(t *testing.T) { - ks := testutil.GenerateCids(2) + ks := random.Cids(2) latency := time.Millisecond * 200 latMultiplier := 1 expProcessTime := time.Duration(0) @@ -414,7 +414,7 @@ func TestDontHaveTimeoutMgrUsesDefaultTimeoutIfLatencyLonger(t *testing.T) { } func TestDontHaveTimeoutNoTimeoutAfterShutdown(t *testing.T) { - ks := testutil.GenerateCids(2) + ks := random.Cids(2) latency := time.Millisecond * 10 latMultiplier := 1 expProcessTime := time.Duration(0) diff --git a/bitswap/client/internal/messagequeue/messagequeue_test.go b/bitswap/client/internal/messagequeue/messagequeue_test.go index e9b8f7c54..3a9c21309 100644 --- a/bitswap/client/internal/messagequeue/messagequeue_test.go +++ b/bitswap/client/internal/messagequeue/messagequeue_test.go @@ -10,11 +10,11 @@ import ( "time" "github.com/benbjohnson/clock" - "github.com/ipfs/boxo/bitswap/internal/testutil" bsmsg "github.com/ipfs/boxo/bitswap/message" pb "github.com/ipfs/boxo/bitswap/message/pb" bsnet "github.com/ipfs/boxo/bitswap/network" cid "github.com/ipfs/go-cid" + "github.com/ipfs/go-test/random" peer "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/p2p/protocol/ping" ) @@ -167,9 +167,9 @@ func TestStartupAndShutdown(t *testing.T) { resetChan := make(chan struct{}, 1) fakeSender := newFakeMessageSender(resetChan, messagesSent, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - peerID := testutil.GeneratePeers(1)[0] + peerID := random.Peers(1)[0] messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) - bcstwh := testutil.GenerateCids(10) + bcstwh := random.Cids(10) messageQueue.Startup() messageQueue.AddBroadcastWantHaves(bcstwh) @@ -205,10 +205,10 @@ func TestSendingMessagesDeduped(t *testing.T) { resetChan := make(chan struct{}, 1) fakeSender := newFakeMessageSender(resetChan, messagesSent, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - peerID := testutil.GeneratePeers(1)[0] + peerID := random.Peers(1)[0] messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) - wantHaves := testutil.GenerateCids(10) - wantBlocks := testutil.GenerateCids(10) + wantHaves := random.Cids(10) + wantBlocks := random.Cids(10) messageQueue.Startup() messageQueue.AddWants(wantBlocks, wantHaves) @@ -226,10 +226,10 @@ func TestSendingMessagesPartialDupe(t *testing.T) { resetChan := make(chan struct{}, 1) fakeSender := newFakeMessageSender(resetChan, messagesSent, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - peerID := testutil.GeneratePeers(1)[0] + peerID := random.Peers(1)[0] messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) - wantHaves := testutil.GenerateCids(10) - wantBlocks := testutil.GenerateCids(10) + wantHaves := random.Cids(10) + wantBlocks := random.Cids(10) messageQueue.Startup() messageQueue.AddWants(wantBlocks[:8], wantHaves[:8]) @@ -247,13 +247,13 @@ func TestSendingMessagesPriority(t *testing.T) { resetChan := make(chan struct{}, 1) fakeSender := newFakeMessageSender(resetChan, messagesSent, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - peerID := testutil.GeneratePeers(1)[0] + peerID := random.Peers(1)[0] messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) - wantHaves1 := testutil.GenerateCids(5) - wantHaves2 := testutil.GenerateCids(5) + wantHaves1 := random.Cids(5) + wantHaves2 := random.Cids(5) wantHaves := append(wantHaves1, wantHaves2...) - wantBlocks1 := testutil.GenerateCids(5) - wantBlocks2 := testutil.GenerateCids(5) + wantBlocks1 := random.Cids(5) + wantBlocks2 := random.Cids(5) wantBlocks := append(wantBlocks1, wantBlocks2...) messageQueue.Startup() @@ -314,11 +314,11 @@ func TestCancelOverridesPendingWants(t *testing.T) { resetChan := make(chan struct{}, 1) fakeSender := newFakeMessageSender(resetChan, messagesSent, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - peerID := testutil.GeneratePeers(1)[0] + peerID := random.Peers(1)[0] messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) - wantHaves := testutil.GenerateCids(2) - wantBlocks := testutil.GenerateCids(2) + wantHaves := random.Cids(2) + wantBlocks := random.Cids(2) cancels := []cid.Cid{wantBlocks[0], wantHaves[0]} messageQueue.Startup() @@ -364,10 +364,10 @@ func TestWantOverridesPendingCancels(t *testing.T) { resetChan := make(chan struct{}, 1) fakeSender := newFakeMessageSender(resetChan, messagesSent, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - peerID := testutil.GeneratePeers(1)[0] + peerID := random.Peers(1)[0] messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) - cids := testutil.GenerateCids(3) + cids := random.Cids(3) wantBlocks := cids[:1] wantHaves := cids[1:] @@ -410,14 +410,14 @@ func TestWantlistRebroadcast(t *testing.T) { resetChan := make(chan struct{}, 1) fakeSender := newFakeMessageSender(resetChan, messagesSent, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - peerID := testutil.GeneratePeers(1)[0] + peerID := random.Peers(1)[0] dhtm := &fakeDontHaveTimeoutMgr{} clock := clock.NewMock() events := make(chan messageEvent) messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValidLatency, dhtm, clock, events) - bcstwh := testutil.GenerateCids(10) - wantHaves := testutil.GenerateCids(10) - wantBlocks := testutil.GenerateCids(10) + bcstwh := random.Cids(10) + wantHaves := random.Cids(10) + wantBlocks := random.Cids(10) // Add some broadcast want-haves messageQueue.Startup() @@ -519,9 +519,9 @@ func TestSendingLargeMessages(t *testing.T) { fakeSender := newFakeMessageSender(resetChan, messagesSent, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} dhtm := &fakeDontHaveTimeoutMgr{} - peerID := testutil.GeneratePeers(1)[0] + peerID := random.Peers(1)[0] - wantBlocks := testutil.GenerateCids(10) + wantBlocks := random.Cids(10) entrySize := 44 maxMsgSize := entrySize * 3 // 3 wants messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMsgSize, sendErrorBackoff, maxValidLatency, dhtm, clock.New(), nil) @@ -547,7 +547,7 @@ func TestSendToPeerThatDoesntSupportHave(t *testing.T) { resetChan := make(chan struct{}, 1) fakeSender := newFakeMessageSender(resetChan, messagesSent, false) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - peerID := testutil.GeneratePeers(1)[0] + peerID := random.Peers(1)[0] messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) messageQueue.Startup() @@ -558,7 +558,7 @@ func TestSendToPeerThatDoesntSupportHave(t *testing.T) { // - broadcast want-haves should be sent as want-blocks // Check broadcast want-haves - bcwh := testutil.GenerateCids(10) + bcwh := random.Cids(10) messageQueue.AddBroadcastWantHaves(bcwh) messages := collectMessages(ctx, t, messagesSent, collectTimeout) @@ -576,8 +576,8 @@ func TestSendToPeerThatDoesntSupportHave(t *testing.T) { } // Check regular want-haves and want-blocks - wbs := testutil.GenerateCids(10) - whs := testutil.GenerateCids(10) + wbs := random.Cids(10) + whs := random.Cids(10) messageQueue.AddWants(wbs, whs) messages = collectMessages(ctx, t, messagesSent, collectTimeout) @@ -601,13 +601,13 @@ func TestSendToPeerThatDoesntSupportHaveMonitorsTimeouts(t *testing.T) { resetChan := make(chan struct{}, 1) fakeSender := newFakeMessageSender(resetChan, messagesSent, false) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - peerID := testutil.GeneratePeers(1)[0] + peerID := random.Peers(1)[0] dhtm := &fakeDontHaveTimeoutMgr{} messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValidLatency, dhtm, clock.New(), nil) messageQueue.Startup() - wbs := testutil.GenerateCids(10) + wbs := random.Cids(10) messageQueue.AddWants(wbs, nil) collectMessages(ctx, t, messagesSent, collectTimeout) @@ -632,7 +632,7 @@ func TestResponseReceived(t *testing.T) { resetChan := make(chan struct{}, 1) fakeSender := newFakeMessageSender(resetChan, messagesSent, false) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - peerID := testutil.GeneratePeers(1)[0] + peerID := random.Peers(1)[0] dhtm := &fakeDontHaveTimeoutMgr{} clock := clock.NewMock() @@ -640,7 +640,7 @@ func TestResponseReceived(t *testing.T) { messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValidLatency, dhtm, clock, events) messageQueue.Startup() - cids := testutil.GenerateCids(10) + cids := random.Cids(10) // Add some wants messageQueue.AddWants(cids[:5], nil) @@ -681,13 +681,13 @@ func TestResponseReceivedAppliesForFirstResponseOnly(t *testing.T) { resetChan := make(chan struct{}, 1) fakeSender := newFakeMessageSender(resetChan, messagesSent, false) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - peerID := testutil.GeneratePeers(1)[0] + peerID := random.Peers(1)[0] dhtm := &fakeDontHaveTimeoutMgr{} messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValidLatency, dhtm, clock.New(), nil) messageQueue.Startup() - cids := testutil.GenerateCids(2) + cids := random.Cids(2) // Add some wants and wait messageQueue.AddWants(cids, nil) @@ -725,7 +725,7 @@ func TestResponseReceivedDiscardsOutliers(t *testing.T) { resetChan := make(chan struct{}, 1) fakeSender := newFakeMessageSender(resetChan, messagesSent, false) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} - peerID := testutil.GeneratePeers(1)[0] + peerID := random.Peers(1)[0] maxValLatency := 30 * time.Millisecond dhtm := &fakeDontHaveTimeoutMgr{} @@ -734,7 +734,7 @@ func TestResponseReceivedDiscardsOutliers(t *testing.T) { messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValLatency, dhtm, clock, events) messageQueue.Startup() - cids := testutil.GenerateCids(4) + cids := random.Cids(4) // Add some wants and wait 20ms messageQueue.AddWants(cids[:2], nil) @@ -796,7 +796,7 @@ func BenchmarkMessageQueue(b *testing.B) { fakeSender := newFakeMessageSender(resetChan, messagesSent, true) fakenet := &fakeMessageNetwork{nil, nil, fakeSender} dhtm := &fakeDontHaveTimeoutMgr{} - peerID := testutil.GeneratePeers(1)[0] + peerID := random.Peers(1)[0] messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValidLatency, dhtm, clock.New(), nil) messageQueue.Startup() @@ -832,10 +832,10 @@ func BenchmarkMessageQueue(b *testing.B) { // Alternately add either a few wants or a lot of broadcast wants if rand.Intn(2) == 0 { - wants := testutil.GenerateCids(10) + wants := random.Cids(10) qs[i].AddWants(wants[:2], wants[2:]) } else { - wants := testutil.GenerateCids(60) + wants := random.Cids(60) qs[i].AddBroadcastWantHaves(wants) } } diff --git a/bitswap/client/internal/peermanager/peermanager_test.go b/bitswap/client/internal/peermanager/peermanager_test.go index d3c712704..b778c46e3 100644 --- a/bitswap/client/internal/peermanager/peermanager_test.go +++ b/bitswap/client/internal/peermanager/peermanager_test.go @@ -3,11 +3,12 @@ package peermanager import ( "context" "math/rand" + "slices" "testing" "time" - "github.com/ipfs/boxo/bitswap/internal/testutil" cid "github.com/ipfs/go-cid" + "github.com/ipfs/go-test/random" "github.com/libp2p/go-libp2p/core/peer" ) @@ -83,7 +84,7 @@ func TestAddingAndRemovingPeers(t *testing.T) { msgs := make(chan msg, 16) peerQueueFactory := makePeerQueueFactory(msgs) - tp := testutil.GeneratePeers(6) + tp := random.Peers(6) self, peer1, peer2, peer3, peer4, peer5 := tp[0], tp[1], tp[2], tp[3], tp[4], tp[5] peerManager := New(ctx, peerQueueFactory, self) @@ -93,14 +94,14 @@ func TestAddingAndRemovingPeers(t *testing.T) { connectedPeers := peerManager.ConnectedPeers() - if !testutil.ContainsPeer(connectedPeers, peer1) || - !testutil.ContainsPeer(connectedPeers, peer2) || - !testutil.ContainsPeer(connectedPeers, peer3) { + if !slices.Contains(connectedPeers, peer1) || + !slices.Contains(connectedPeers, peer2) || + !slices.Contains(connectedPeers, peer3) { t.Fatal("Peers not connected that should be connected") } - if testutil.ContainsPeer(connectedPeers, peer4) || - testutil.ContainsPeer(connectedPeers, peer5) { + if slices.Contains(connectedPeers, peer4) || + slices.Contains(connectedPeers, peer5) { t.Fatal("Peers connected that shouldn't be connected") } @@ -108,7 +109,7 @@ func TestAddingAndRemovingPeers(t *testing.T) { peerManager.Disconnected(peer1) connectedPeers = peerManager.ConnectedPeers() - if testutil.ContainsPeer(connectedPeers, peer1) { + if slices.Contains(connectedPeers, peer1) { t.Fatal("Peer should have been disconnected but was not") } @@ -116,7 +117,7 @@ func TestAddingAndRemovingPeers(t *testing.T) { peerManager.Connected(peer1) connectedPeers = peerManager.ConnectedPeers() - if !testutil.ContainsPeer(connectedPeers, peer1) { + if !slices.Contains(connectedPeers, peer1) { t.Fatal("Peer should have been connected but was not") } } @@ -126,11 +127,11 @@ func TestBroadcastOnConnect(t *testing.T) { defer cancel() msgs := make(chan msg, 16) peerQueueFactory := makePeerQueueFactory(msgs) - tp := testutil.GeneratePeers(2) + tp := random.Peers(2) self, peer1 := tp[0], tp[1] peerManager := New(ctx, peerQueueFactory, self) - cids := testutil.GenerateCids(2) + cids := random.Cids(2) peerManager.BroadcastWantHaves(ctx, cids) // Connect with two broadcast wants for first peer @@ -147,11 +148,11 @@ func TestBroadcastWantHaves(t *testing.T) { defer cancel() msgs := make(chan msg, 16) peerQueueFactory := makePeerQueueFactory(msgs) - tp := testutil.GeneratePeers(3) + tp := random.Peers(3) self, peer1, peer2 := tp[0], tp[1], tp[2] peerManager := New(ctx, peerQueueFactory, self) - cids := testutil.GenerateCids(3) + cids := random.Cids(3) // Broadcast the first two. peerManager.BroadcastWantHaves(ctx, cids[:2]) @@ -188,10 +189,10 @@ func TestSendWants(t *testing.T) { defer cancel() msgs := make(chan msg, 16) peerQueueFactory := makePeerQueueFactory(msgs) - tp := testutil.GeneratePeers(2) + tp := random.Peers(2) self, peer1 := tp[0], tp[1] peerManager := New(ctx, peerQueueFactory, self) - cids := testutil.GenerateCids(4) + cids := random.Cids(4) peerManager.Connected(peer1) peerManager.SendWants(ctx, peer1, []cid.Cid{cids[0]}, []cid.Cid{cids[2]}) @@ -222,10 +223,10 @@ func TestSendCancels(t *testing.T) { defer cancel() msgs := make(chan msg, 16) peerQueueFactory := makePeerQueueFactory(msgs) - tp := testutil.GeneratePeers(3) + tp := random.Peers(3) self, peer1, peer2 := tp[0], tp[1], tp[2] peerManager := New(ctx, peerQueueFactory, self) - cids := testutil.GenerateCids(4) + cids := random.Cids(4) // Connect to peer1 and peer2 peerManager.Connected(peer1) @@ -283,7 +284,7 @@ func TestSessionRegistration(t *testing.T) { msgs := make(chan msg, 16) peerQueueFactory := makePeerQueueFactory(msgs) - tp := testutil.GeneratePeers(3) + tp := random.Peers(3) self, p1, p2 := tp[0], tp[1], tp[2] peerManager := New(ctx, peerQueueFactory, self) @@ -343,8 +344,8 @@ func BenchmarkPeerManager(b *testing.B) { return &benchPeerQueue{} } - self := testutil.GeneratePeers(1)[0] - peers := testutil.GeneratePeers(500) + self := random.Peers(1)[0] + peers := random.Peers(500) peerManager := New(ctx, peerQueueFactory, self) // Create a bunch of connections @@ -364,11 +365,11 @@ func BenchmarkPeerManager(b *testing.B) { // Alternately add either a few wants or many broadcast wants r := rand.Intn(8) if r == 0 { - wants := testutil.GenerateCids(10) + wants := random.Cids(10) peerManager.SendWants(ctx, peers[i], wants[:2], wants[2:]) wanted = append(wanted, wants...) } else if r == 1 { - wants := testutil.GenerateCids(30) + wants := random.Cids(30) peerManager.BroadcastWantHaves(ctx, wants) wanted = append(wanted, wants...) } else { diff --git a/bitswap/client/internal/peermanager/peerwantmanager_test.go b/bitswap/client/internal/peermanager/peerwantmanager_test.go index 505fbea1a..bfe0c626d 100644 --- a/bitswap/client/internal/peermanager/peerwantmanager_test.go +++ b/bitswap/client/internal/peermanager/peerwantmanager_test.go @@ -3,9 +3,10 @@ package peermanager import ( "testing" - "github.com/ipfs/boxo/bitswap/internal/testutil" cid "github.com/ipfs/go-cid" + "github.com/ipfs/go-test/random" peer "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" ) type gauge struct { @@ -61,43 +62,32 @@ func clearSent(pqs map[peer.ID]PeerQueue) { func TestEmpty(t *testing.T) { pwm := newPeerWantManager(&gauge{}, &gauge{}) - - if len(pwm.getWantBlocks()) > 0 { - t.Fatal("Expected GetWantBlocks() to have length 0") - } - if len(pwm.getWantHaves()) > 0 { - t.Fatal("Expected GetWantHaves() to have length 0") - } + require.Empty(t, pwm.getWantBlocks()) + require.Empty(t, pwm.getWantHaves()) } func TestPWMBroadcastWantHaves(t *testing.T) { pwm := newPeerWantManager(&gauge{}, &gauge{}) - peers := testutil.GeneratePeers(3) - cids := testutil.GenerateCids(2) - cids2 := testutil.GenerateCids(2) - cids3 := testutil.GenerateCids(2) + peers := random.Peers(3) + cids := random.Cids(2) + cids2 := random.Cids(2) + cids3 := random.Cids(2) peerQueues := make(map[peer.ID]PeerQueue) for _, p := range peers[:2] { pq := &mockPQ{} peerQueues[p] = pq pwm.addPeer(pq, p) - if len(pq.bcst) > 0 { - t.Errorf("expected no broadcast wants") - } + require.Empty(t, pq.bcst, "expected no broadcast wants") } // Broadcast 2 cids to 2 peers pwm.broadcastWantHaves(cids) for _, pqi := range peerQueues { pq := pqi.(*mockPQ) - if len(pq.bcst) != 2 { - t.Fatal("Expected 2 want-haves") - } - if !testutil.MatchKeysIgnoreOrder(pq.bcst, cids) { - t.Fatal("Expected all cids to be broadcast") - } + require.Len(t, pq.bcst, 2, "Expected 2 want-haves") + require.ElementsMatch(t, pq.bcst, cids, "Expected all cids to be broadcast") } // Broadcasting same cids should have no effect @@ -105,9 +95,7 @@ func TestPWMBroadcastWantHaves(t *testing.T) { pwm.broadcastWantHaves(cids) for _, pqi := range peerQueues { pq := pqi.(*mockPQ) - if len(pq.bcst) != 0 { - t.Fatal("Expected 0 want-haves") - } + require.Len(t, pq.bcst, 0, "Expected 0 want-haves") } // Broadcast 2 other cids @@ -115,12 +103,8 @@ func TestPWMBroadcastWantHaves(t *testing.T) { pwm.broadcastWantHaves(cids2) for _, pqi := range peerQueues { pq := pqi.(*mockPQ) - if len(pq.bcst) != 2 { - t.Fatal("Expected 2 want-haves") - } - if !testutil.MatchKeysIgnoreOrder(pq.bcst, cids2) { - t.Fatal("Expected all new cids to be broadcast") - } + require.Len(t, pq.bcst, 2, "Expected 2 want-haves") + require.ElementsMatch(t, pq.bcst, cids2, "Expected all new cids to be broadcast") } // Broadcast mix of old and new cids @@ -128,18 +112,14 @@ func TestPWMBroadcastWantHaves(t *testing.T) { pwm.broadcastWantHaves(append(cids, cids3...)) for _, pqi := range peerQueues { pq := pqi.(*mockPQ) - if len(pq.bcst) != 2 { - t.Fatal("Expected 2 want-haves") - } + require.Len(t, pq.bcst, 2, "Expected 2 want-haves") // Only new cids should be broadcast - if !testutil.MatchKeysIgnoreOrder(pq.bcst, cids3) { - t.Fatal("Expected all new cids to be broadcast") - } + require.ElementsMatch(t, pq.bcst, cids3, "Expected all new cids to be broadcast") } // Sending want-block for a cid should prevent broadcast to that peer clearSent(peerQueues) - cids4 := testutil.GenerateCids(4) + cids4 := random.Cids(4) wantBlocks := []cid.Cid{cids4[0], cids4[2]} p0 := peers[0] p1 := peers[1] @@ -147,19 +127,13 @@ func TestPWMBroadcastWantHaves(t *testing.T) { pwm.broadcastWantHaves(cids4) pq0 := peerQueues[p0].(*mockPQ) - if len(pq0.bcst) != 2 { // only broadcast 2 / 4 want-haves - t.Fatal("Expected 2 want-haves") - } - if !testutil.MatchKeysIgnoreOrder(pq0.bcst, []cid.Cid{cids4[1], cids4[3]}) { - t.Fatalf("Expected unsent cids to be broadcast") - } + // only broadcast 2 / 4 want-haves + require.Len(t, pq0.bcst, 2, "Expected 2 want-haves") + require.ElementsMatch(t, pq0.bcst, []cid.Cid{cids4[1], cids4[3]}, "Expected unsent cids to be broadcast") pq1 := peerQueues[p1].(*mockPQ) - if len(pq1.bcst) != 4 { // broadcast all 4 want-haves - t.Fatal("Expected 4 want-haves") - } - if !testutil.MatchKeysIgnoreOrder(pq1.bcst, cids4) { - t.Fatal("Expected all cids to be broadcast") - } + // broadcast all 4 want-haves + require.Len(t, pq1.bcst, 4, "Expected 4 want-haves") + require.ElementsMatch(t, pq1.bcst, cids4, "Expected all cids to be broadcast") allCids := cids allCids = append(allCids, cids2...) @@ -171,25 +145,21 @@ func TestPWMBroadcastWantHaves(t *testing.T) { pq2 := &mockPQ{} peerQueues[peer2] = pq2 pwm.addPeer(pq2, peer2) - if !testutil.MatchKeysIgnoreOrder(pq2.bcst, allCids) { - t.Fatalf("Expected all cids to be broadcast.") - } + require.ElementsMatch(t, pq2.bcst, allCids, "Expected all cids to be broadcast") clearSent(peerQueues) pwm.broadcastWantHaves(allCids) - if len(pq2.bcst) != 0 { - t.Errorf("did not expect to have CIDs to broadcast") - } + require.Empty(t, pq2.bcst, "did not expect to have CIDs to broadcast") } func TestPWMSendWants(t *testing.T) { pwm := newPeerWantManager(&gauge{}, &gauge{}) - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) p0 := peers[0] p1 := peers[1] - cids := testutil.GenerateCids(2) - cids2 := testutil.GenerateCids(2) + cids := random.Cids(2) + cids2 := random.Cids(2) peerQueues := make(map[peer.ID]PeerQueue) for _, p := range peers[:2] { @@ -203,75 +173,55 @@ func TestPWMSendWants(t *testing.T) { // Send 2 want-blocks and 2 want-haves to p0 clearSent(peerQueues) pwm.sendWants(p0, cids, cids2) - if !testutil.MatchKeysIgnoreOrder(pq0.wbs, cids) { - t.Fatal("Expected 2 want-blocks") - } - if !testutil.MatchKeysIgnoreOrder(pq0.whs, cids2) { - t.Fatal("Expected 2 want-haves") - } + require.ElementsMatch(t, pq0.wbs, cids, "Expected 2 want-blocks") + require.ElementsMatch(t, pq0.whs, cids2, "Expected 2 want-haves") // Send to p0 // - 1 old want-block and 2 new want-blocks // - 1 old want-have and 2 new want-haves clearSent(peerQueues) - cids3 := testutil.GenerateCids(2) - cids4 := testutil.GenerateCids(2) + cids3 := random.Cids(2) + cids4 := random.Cids(2) pwm.sendWants(p0, append(cids3, cids[0]), append(cids4, cids2[0])) - if !testutil.MatchKeysIgnoreOrder(pq0.wbs, cids3) { - t.Fatal("Expected 2 want-blocks") - } - if !testutil.MatchKeysIgnoreOrder(pq0.whs, cids4) { - t.Fatal("Expected 2 want-haves") - } + require.ElementsMatch(t, pq0.wbs, cids3, "Expected 2 want-blocks") + require.ElementsMatch(t, pq0.whs, cids4, "Expected 2 want-haves") // Send to p0 as want-blocks: 1 new want-block, 1 old want-have clearSent(peerQueues) - cids5 := testutil.GenerateCids(1) + cids5 := random.Cids(1) newWantBlockOldWantHave := append(cids5, cids2[0]) pwm.sendWants(p0, newWantBlockOldWantHave, []cid.Cid{}) // If a want was sent as a want-have, it should be ok to now send it as a // want-block - if !testutil.MatchKeysIgnoreOrder(pq0.wbs, newWantBlockOldWantHave) { - t.Fatal("Expected 2 want-blocks") - } - if len(pq0.whs) != 0 { - t.Fatal("Expected 0 want-haves") - } + require.ElementsMatch(t, pq0.wbs, newWantBlockOldWantHave, "Expected 2 want-blocks") + require.Empty(t, pq0.whs, "Expected 0 want-haves") // Send to p0 as want-haves: 1 new want-have, 1 old want-block clearSent(peerQueues) - cids6 := testutil.GenerateCids(1) + cids6 := random.Cids(1) newWantHaveOldWantBlock := append(cids6, cids[0]) pwm.sendWants(p0, []cid.Cid{}, newWantHaveOldWantBlock) // If a want was previously sent as a want-block, it should not be // possible to now send it as a want-have - if !testutil.MatchKeysIgnoreOrder(pq0.whs, cids6) { - t.Fatal("Expected 1 want-have") - } - if len(pq0.wbs) != 0 { - t.Fatal("Expected 0 want-blocks") - } + require.ElementsMatch(t, pq0.whs, cids6, "Expected 1 want-have") + require.Empty(t, pq0.wbs, "Expected 0 want-blocks") // Send 2 want-blocks and 2 want-haves to p1 pwm.sendWants(p1, cids, cids2) - if !testutil.MatchKeysIgnoreOrder(pq1.wbs, cids) { - t.Fatal("Expected 2 want-blocks") - } - if !testutil.MatchKeysIgnoreOrder(pq1.whs, cids2) { - t.Fatal("Expected 2 want-haves") - } + require.ElementsMatch(t, pq1.wbs, cids, "Expected 2 want-blocks") + require.ElementsMatch(t, pq1.whs, cids2, "Expected 2 want-haves") } func TestPWMSendCancels(t *testing.T) { pwm := newPeerWantManager(&gauge{}, &gauge{}) - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) p0 := peers[0] p1 := peers[1] - wb1 := testutil.GenerateCids(2) - wh1 := testutil.GenerateCids(2) - wb2 := testutil.GenerateCids(2) - wh2 := testutil.GenerateCids(2) + wb1 := random.Cids(2) + wh1 := random.Cids(2) + wb2 := random.Cids(2) + wh2 := random.Cids(2) allwb := append(wb1, wb2...) allwh := append(wh1, wh2...) @@ -290,54 +240,32 @@ func TestPWMSendCancels(t *testing.T) { // (1 overlapping want-block / want-have with p0) pwm.sendWants(p1, append(wb2, wb1[1]), append(wh2, wh1[1])) - if !testutil.MatchKeysIgnoreOrder(pwm.getWantBlocks(), allwb) { - t.Fatal("Expected 4 cids to be wanted") - } - if !testutil.MatchKeysIgnoreOrder(pwm.getWantHaves(), allwh) { - t.Fatal("Expected 4 cids to be wanted") - } + require.ElementsMatch(t, pwm.getWantBlocks(), allwb, "Expected 4 cids to be wanted") + require.ElementsMatch(t, pwm.getWantHaves(), allwh, "Expected 4 cids to be wanted") // Cancel 1 want-block and 1 want-have that were sent to p0 clearSent(peerQueues) pwm.sendCancels([]cid.Cid{wb1[0], wh1[0]}) // Should cancel the want-block and want-have - if len(pq1.cancels) != 0 { - t.Fatal("Expected no cancels sent to p1") - } - if !testutil.MatchKeysIgnoreOrder(pq0.cancels, []cid.Cid{wb1[0], wh1[0]}) { - t.Fatal("Expected 2 cids to be cancelled") - } - if !testutil.MatchKeysIgnoreOrder(pwm.getWantBlocks(), append(wb2, wb1[1])) { - t.Fatal("Expected 3 want-blocks") - } - if !testutil.MatchKeysIgnoreOrder(pwm.getWantHaves(), append(wh2, wh1[1])) { - t.Fatal("Expected 3 want-haves") - } + require.Empty(t, pq1.cancels, "Expected no cancels sent to p1") + require.ElementsMatch(t, pq0.cancels, []cid.Cid{wb1[0], wh1[0]}, "Expected 2 cids to be cancelled") + require.ElementsMatch(t, pwm.getWantBlocks(), append(wb2, wb1[1]), "Expected 3 want-blocks") + require.ElementsMatch(t, pwm.getWantHaves(), append(wh2, wh1[1]), "Expected 3 want-haves") // Cancel everything clearSent(peerQueues) allCids := append(allwb, allwh...) pwm.sendCancels(allCids) // Should cancel the remaining want-blocks and want-haves for p0 - if !testutil.MatchKeysIgnoreOrder(pq0.cancels, []cid.Cid{wb1[1], wh1[1]}) { - t.Fatal("Expected un-cancelled cids to be cancelled") - } + require.ElementsMatch(t, pq0.cancels, []cid.Cid{wb1[1], wh1[1]}, "Expected un-cancelled cids to be cancelled") // Should cancel the remaining want-blocks and want-haves for p1 remainingP1 := append(wb2, wh2...) remainingP1 = append(remainingP1, wb1[1], wh1[1]) - if len(pq1.cancels) != len(remainingP1) { - t.Fatal("mismatch", len(pq1.cancels), len(remainingP1)) - } - if !testutil.MatchKeysIgnoreOrder(pq1.cancels, remainingP1) { - t.Fatal("Expected un-cancelled cids to be cancelled") - } - if len(pwm.getWantBlocks()) != 0 { - t.Fatal("Expected 0 want-blocks") - } - if len(pwm.getWantHaves()) != 0 { - t.Fatal("Expected 0 want-haves") - } + require.Equal(t, len(pq1.cancels), len(remainingP1), "mismatch", len(pq1.cancels), len(remainingP1)) + require.ElementsMatch(t, pq1.cancels, remainingP1, "Expected un-cancelled cids to be cancelled") + require.Empty(t, pwm.getWantBlocks(), "Expected 0 want-blocks") + require.Empty(t, pwm.getWantHaves(), "Expected 0 want-haves") } func TestStats(t *testing.T) { @@ -345,11 +273,11 @@ func TestStats(t *testing.T) { wbg := &gauge{} pwm := newPeerWantManager(g, wbg) - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) p0 := peers[0] p1 := peers[1] - cids := testutil.GenerateCids(2) - cids2 := testutil.GenerateCids(2) + cids := random.Cids(2) + cids2 := random.Cids(2) peerQueues := make(map[peer.ID]PeerQueue) pq := &mockPQ{} @@ -359,86 +287,54 @@ func TestStats(t *testing.T) { // Send 2 want-blocks and 2 want-haves to p0 pwm.sendWants(p0, cids, cids2) - if g.count != 4 { - t.Fatal("Expected 4 wants") - } - if wbg.count != 2 { - t.Fatal("Expected 2 want-blocks") - } + require.Equal(t, 4, g.count, "Expected 4 wants") + require.Equal(t, 2, wbg.count, "Expected 2 want-blocks") // Send 1 old want-block and 2 new want-blocks to p0 - cids3 := testutil.GenerateCids(2) + cids3 := random.Cids(2) pwm.sendWants(p0, append(cids3, cids[0]), []cid.Cid{}) - if g.count != 6 { - t.Fatal("Expected 6 wants") - } - if wbg.count != 4 { - t.Fatal("Expected 4 want-blocks") - } + require.Equal(t, 6, g.count, "Expected 6 wants") + require.Equal(t, 4, wbg.count, "Expected 4 want-blocks") // Broadcast 1 old want-have and 2 new want-haves - cids4 := testutil.GenerateCids(2) + cids4 := random.Cids(2) pwm.broadcastWantHaves(append(cids4, cids2[0])) - if g.count != 8 { - t.Fatal("Expected 8 wants") - } - if wbg.count != 4 { - t.Fatal("Expected 4 want-blocks") - } + require.Equal(t, 8, g.count, "Expected 8 wants") + require.Equal(t, 4, wbg.count, "Expected 4 want-blocks") // Add a second peer pwm.addPeer(pq, p1) - if g.count != 8 { - t.Fatal("Expected 8 wants") - } - if wbg.count != 4 { - t.Fatal("Expected 4 want-blocks") - } + require.Equal(t, 8, g.count, "Expected 8 wants") + require.Equal(t, 4, wbg.count, "Expected 4 want-blocks") // Cancel 1 want-block that was sent to p0 // and 1 want-block that was not sent - cids5 := testutil.GenerateCids(1) + cids5 := random.Cids(1) pwm.sendCancels(append(cids5, cids[0])) - if g.count != 7 { - t.Fatal("Expected 7 wants") - } - if wbg.count != 3 { - t.Fatal("Expected 3 want-blocks") - } + require.Equal(t, 7, g.count, "Expected 7 wants") + require.Equal(t, 3, wbg.count, "Expected 3 want-blocks") // Remove first peer pwm.removePeer(p0) // Should still have 3 broadcast wants - if g.count != 3 { - t.Fatal("Expected 3 wants") - } - if wbg.count != 0 { - t.Fatal("Expected all want-blocks to be removed") - } + require.Equal(t, 3, g.count, "Expected 3 wants") + require.Zero(t, wbg.count, "Expected all want-blocks to be removed") // Remove second peer pwm.removePeer(p1) // Should still have 3 broadcast wants - if g.count != 3 { - t.Fatal("Expected 3 wants") - } - if wbg.count != 0 { - t.Fatal("Expected 0 want-blocks") - } + require.Equal(t, 3, g.count, "Expected 3 wants") + require.Zero(t, wbg.count, "Expected 0 want-blocks") // Cancel one remaining broadcast want-have pwm.sendCancels(cids2[:1]) - if g.count != 2 { - t.Fatal("Expected 2 wants") - } - if wbg.count != 0 { - t.Fatal("Expected 0 want-blocks") - } + require.Equal(t, 2, g.count, "Expected 2 wants") + require.Zero(t, wbg.count, "Expected 0 want-blocks") } func TestStatsOverlappingWantBlockWantHave(t *testing.T) { @@ -446,11 +342,11 @@ func TestStatsOverlappingWantBlockWantHave(t *testing.T) { wbg := &gauge{} pwm := newPeerWantManager(g, wbg) - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) p0 := peers[0] p1 := peers[1] - cids := testutil.GenerateCids(2) - cids2 := testutil.GenerateCids(2) + cids := random.Cids(2) + cids2 := random.Cids(2) pwm.addPeer(&mockPQ{}, p0) pwm.addPeer(&mockPQ{}, p1) @@ -462,22 +358,14 @@ func TestStatsOverlappingWantBlockWantHave(t *testing.T) { // 2 want-haves and 2 want-blocks to p1 pwm.sendWants(p1, cids2, cids) - if g.count != 4 { - t.Fatal("Expected 4 wants") - } - if wbg.count != 4 { - t.Fatal("Expected 4 want-blocks") - } + require.Equal(t, 4, g.count, "Expected 4 wants") + require.Equal(t, 4, wbg.count, "Expected 4 want-blocks") // Cancel 1 of each group of cids pwm.sendCancels([]cid.Cid{cids[0], cids2[0]}) - if g.count != 2 { - t.Fatal("Expected 2 wants") - } - if wbg.count != 2 { - t.Fatal("Expected 2 want-blocks") - } + require.Equal(t, 2, g.count, "Expected 2 wants") + require.Equal(t, 2, wbg.count, "Expected 2 want-blocks") } func TestStatsRemovePeerOverlappingWantBlockWantHave(t *testing.T) { @@ -485,11 +373,11 @@ func TestStatsRemovePeerOverlappingWantBlockWantHave(t *testing.T) { wbg := &gauge{} pwm := newPeerWantManager(g, wbg) - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) p0 := peers[0] p1 := peers[1] - cids := testutil.GenerateCids(2) - cids2 := testutil.GenerateCids(2) + cids := random.Cids(2) + cids2 := random.Cids(2) pwm.addPeer(&mockPQ{}, p0) pwm.addPeer(&mockPQ{}, p1) @@ -501,20 +389,12 @@ func TestStatsRemovePeerOverlappingWantBlockWantHave(t *testing.T) { // 2 want-haves and 2 want-blocks to p1 pwm.sendWants(p1, cids2, cids) - if g.count != 4 { - t.Fatal("Expected 4 wants") - } - if wbg.count != 4 { - t.Fatal("Expected 4 want-blocks") - } + require.Equal(t, 4, g.count, "Expected 4 wants") + require.Equal(t, 4, wbg.count, "Expected 4 want-blocks") // Remove p0 pwm.removePeer(p0) - if g.count != 4 { - t.Fatal("Expected 4 wants") - } - if wbg.count != 2 { - t.Fatal("Expected 2 want-blocks") - } + require.Equal(t, 4, g.count, "Expected 4 wants") + require.Equal(t, 2, wbg.count, "Expected 2 want-blocks") } diff --git a/bitswap/client/internal/providerquerymanager/providerquerymanager_test.go b/bitswap/client/internal/providerquerymanager/providerquerymanager_test.go index 52447e2c1..9deb77f99 100644 --- a/bitswap/client/internal/providerquerymanager/providerquerymanager_test.go +++ b/bitswap/client/internal/providerquerymanager/providerquerymanager_test.go @@ -8,8 +8,8 @@ import ( "testing" "time" - "github.com/ipfs/boxo/bitswap/internal/testutil" cid "github.com/ipfs/go-cid" + "github.com/ipfs/go-test/random" "github.com/libp2p/go-libp2p/core/peer" ) @@ -58,7 +58,7 @@ func (fpn *fakeProviderNetwork) FindProvidersAsync(ctx context.Context, k cid.Ci } func TestNormalSimultaneousFetch(t *testing.T) { - peers := testutil.GeneratePeers(10) + peers := random.Peers(10) fpn := &fakeProviderNetwork{ peersFound: peers, delay: 1 * time.Millisecond, @@ -66,7 +66,7 @@ func TestNormalSimultaneousFetch(t *testing.T) { ctx := context.Background() providerQueryManager := New(ctx, fpn) providerQueryManager.Startup() - keys := testutil.GenerateCids(2) + keys := random.Cids(2) sessionCtx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() @@ -95,7 +95,7 @@ func TestNormalSimultaneousFetch(t *testing.T) { } func TestDedupingProviderRequests(t *testing.T) { - peers := testutil.GeneratePeers(10) + peers := random.Peers(10) fpn := &fakeProviderNetwork{ peersFound: peers, delay: 1 * time.Millisecond, @@ -103,7 +103,7 @@ func TestDedupingProviderRequests(t *testing.T) { ctx := context.Background() providerQueryManager := New(ctx, fpn) providerQueryManager.Startup() - key := testutil.GenerateCids(1)[0] + key := random.Cids(1)[0] sessionCtx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() @@ -135,7 +135,7 @@ func TestDedupingProviderRequests(t *testing.T) { } func TestCancelOneRequestDoesNotTerminateAnother(t *testing.T) { - peers := testutil.GeneratePeers(10) + peers := random.Peers(10) fpn := &fakeProviderNetwork{ peersFound: peers, delay: 1 * time.Millisecond, @@ -144,7 +144,7 @@ func TestCancelOneRequestDoesNotTerminateAnother(t *testing.T) { providerQueryManager := New(ctx, fpn) providerQueryManager.Startup() - key := testutil.GenerateCids(1)[0] + key := random.Cids(1)[0] // first session will cancel before done firstSessionCtx, firstCancel := context.WithTimeout(ctx, 3*time.Millisecond) @@ -179,7 +179,7 @@ func TestCancelOneRequestDoesNotTerminateAnother(t *testing.T) { } func TestCancelManagerExitsGracefully(t *testing.T) { - peers := testutil.GeneratePeers(10) + peers := random.Peers(10) fpn := &fakeProviderNetwork{ peersFound: peers, delay: 1 * time.Millisecond, @@ -190,7 +190,7 @@ func TestCancelManagerExitsGracefully(t *testing.T) { providerQueryManager := New(managerCtx, fpn) providerQueryManager.Startup() - key := testutil.GenerateCids(1)[0] + key := random.Cids(1)[0] sessionCtx, cancel := context.WithTimeout(ctx, 20*time.Millisecond) defer cancel() @@ -214,7 +214,7 @@ func TestCancelManagerExitsGracefully(t *testing.T) { } func TestPeersWithConnectionErrorsNotAddedToPeerList(t *testing.T) { - peers := testutil.GeneratePeers(10) + peers := random.Peers(10) fpn := &fakeProviderNetwork{ peersFound: peers, connectError: errors.New("not able to connect"), @@ -224,7 +224,7 @@ func TestPeersWithConnectionErrorsNotAddedToPeerList(t *testing.T) { providerQueryManager := New(ctx, fpn) providerQueryManager.Startup() - key := testutil.GenerateCids(1)[0] + key := random.Cids(1)[0] sessionCtx, cancel := context.WithTimeout(ctx, 20*time.Millisecond) defer cancel() @@ -247,7 +247,7 @@ func TestPeersWithConnectionErrorsNotAddedToPeerList(t *testing.T) { } func TestRateLimitingRequests(t *testing.T) { - peers := testutil.GeneratePeers(10) + peers := random.Peers(10) fpn := &fakeProviderNetwork{ peersFound: peers, delay: 5 * time.Millisecond, @@ -258,7 +258,7 @@ func TestRateLimitingRequests(t *testing.T) { providerQueryManager := New(ctx, fpn) providerQueryManager.Startup() - keys := testutil.GenerateCids(maxInProcessRequests + 1) + keys := random.Cids(maxInProcessRequests + 1) sessionCtx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() var requestChannels []<-chan peer.ID @@ -286,7 +286,7 @@ func TestRateLimitingRequests(t *testing.T) { } func TestFindProviderTimeout(t *testing.T) { - peers := testutil.GeneratePeers(10) + peers := random.Peers(10) fpn := &fakeProviderNetwork{ peersFound: peers, delay: 10 * time.Millisecond, @@ -295,7 +295,7 @@ func TestFindProviderTimeout(t *testing.T) { providerQueryManager := New(ctx, fpn) providerQueryManager.Startup() providerQueryManager.SetFindProviderTimeout(2 * time.Millisecond) - keys := testutil.GenerateCids(1) + keys := random.Cids(1) sessionCtx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() @@ -310,7 +310,7 @@ func TestFindProviderTimeout(t *testing.T) { } func TestFindProviderPreCanceled(t *testing.T) { - peers := testutil.GeneratePeers(10) + peers := random.Peers(10) fpn := &fakeProviderNetwork{ peersFound: peers, delay: 1 * time.Millisecond, @@ -319,7 +319,7 @@ func TestFindProviderPreCanceled(t *testing.T) { providerQueryManager := New(ctx, fpn) providerQueryManager.Startup() providerQueryManager.SetFindProviderTimeout(100 * time.Millisecond) - keys := testutil.GenerateCids(1) + keys := random.Cids(1) sessionCtx, cancel := context.WithCancel(ctx) cancel() @@ -335,7 +335,7 @@ func TestFindProviderPreCanceled(t *testing.T) { } func TestCancelFindProvidersAfterCompletion(t *testing.T) { - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) fpn := &fakeProviderNetwork{ peersFound: peers, delay: 1 * time.Millisecond, @@ -344,7 +344,7 @@ func TestCancelFindProvidersAfterCompletion(t *testing.T) { providerQueryManager := New(ctx, fpn) providerQueryManager.Startup() providerQueryManager.SetFindProviderTimeout(100 * time.Millisecond) - keys := testutil.GenerateCids(1) + keys := random.Cids(1) sessionCtx, cancel := context.WithCancel(ctx) firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[0]) diff --git a/bitswap/client/internal/session/peerresponsetracker_test.go b/bitswap/client/internal/session/peerresponsetracker_test.go index bdcd80daa..0bf5fe3fe 100644 --- a/bitswap/client/internal/session/peerresponsetracker_test.go +++ b/bitswap/client/internal/session/peerresponsetracker_test.go @@ -4,12 +4,12 @@ import ( "math" "testing" - "github.com/ipfs/boxo/bitswap/internal/testutil" + "github.com/ipfs/go-test/random" peer "github.com/libp2p/go-libp2p/core/peer" ) func TestPeerResponseTrackerInit(t *testing.T) { - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) prt := newPeerResponseTracker() if prt.choose([]peer.ID{}) != "" { @@ -25,7 +25,7 @@ func TestPeerResponseTrackerInit(t *testing.T) { } func TestPeerResponseTrackerProbabilityUnknownPeers(t *testing.T) { - peers := testutil.GeneratePeers(4) + peers := random.Peers(4) prt := newPeerResponseTracker() choices := []int{0, 0, 0, 0} @@ -54,7 +54,7 @@ func TestPeerResponseTrackerProbabilityUnknownPeers(t *testing.T) { } func TestPeerResponseTrackerProbabilityOneKnownOneUnknownPeer(t *testing.T) { - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) prt := newPeerResponseTracker() prt.receivedBlockFrom(peers[0]) @@ -79,7 +79,7 @@ func TestPeerResponseTrackerProbabilityOneKnownOneUnknownPeer(t *testing.T) { } func TestPeerResponseTrackerProbabilityProportional(t *testing.T) { - peers := testutil.GeneratePeers(3) + peers := random.Peers(3) prt := newPeerResponseTracker() probabilities := []float64{0.1, 0.6, 0.3} diff --git a/bitswap/client/internal/session/sentwantblockstracker_test.go b/bitswap/client/internal/session/sentwantblockstracker_test.go index c74e8c5f8..7e4435fd8 100644 --- a/bitswap/client/internal/session/sentwantblockstracker_test.go +++ b/bitswap/client/internal/session/sentwantblockstracker_test.go @@ -3,12 +3,12 @@ package session import ( "testing" - "github.com/ipfs/boxo/bitswap/internal/testutil" + "github.com/ipfs/go-test/random" ) func TestSendWantBlocksTracker(t *testing.T) { - peers := testutil.GeneratePeers(2) - cids := testutil.GenerateCids(2) + peers := random.Peers(2) + cids := random.Cids(2) swbt := newSentWantBlocksTracker() if swbt.haveSentWantBlockTo(peers[0], cids[0]) { diff --git a/bitswap/client/internal/session/session_test.go b/bitswap/client/internal/session/session_test.go index 2eb166f90..1c40b64e1 100644 --- a/bitswap/client/internal/session/session_test.go +++ b/bitswap/client/internal/session/session_test.go @@ -2,6 +2,7 @@ package session import ( "context" + "slices" "sync" "testing" "time" @@ -11,12 +12,14 @@ import ( bspm "github.com/ipfs/boxo/bitswap/client/internal/peermanager" bssim "github.com/ipfs/boxo/bitswap/client/internal/sessioninterestmanager" bsspm "github.com/ipfs/boxo/bitswap/client/internal/sessionpeermanager" - "github.com/ipfs/boxo/bitswap/internal/testutil" "github.com/ipfs/boxo/internal/test" + blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" blocksutil "github.com/ipfs/go-ipfs-blocksutil" delay "github.com/ipfs/go-ipfs-delay" + "github.com/ipfs/go-test/random" peer "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" ) type mockSessionMgr struct { @@ -159,7 +162,7 @@ func TestSessionGetBlocks(t *testing.T) { bpm := bsbpm.New() notif := notifications.New() defer notif.Shutdown() - id := testutil.GenerateSessionID() + id := random.SequenceNext() sm := newMockSessionMgr() session := New(ctx, sm, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") blockGenerator := blocksutil.NewBlockGenerator() @@ -179,34 +182,29 @@ func TestSessionGetBlocks(t *testing.T) { // Should have registered session's interest in blocks intSes := sim.FilterSessionInterested(id, cids) - if !testutil.MatchKeysIgnoreOrder(intSes[0], cids) { - t.Fatal("did not register session interest in blocks") - } + require.ElementsMatch(t, intSes[0], cids, "did not register session interest in blocks") // Should have sent out broadcast request for wants - if len(receivedWantReq.cids) != broadcastLiveWantsLimit { - t.Fatal("did not enqueue correct initial number of wants") - } + require.Len(t, receivedWantReq.cids, broadcastLiveWantsLimit, "did not enqueue correct initial number of wants") // Simulate receiving HAVEs from several peers - peers := testutil.GeneratePeers(5) + peers := random.Peers(5) for i, p := range peers { - blk := blks[testutil.IndexOf(blks, receivedWantReq.cids[i])] + blkIndex := slices.IndexFunc(blks, func(blk blocks.Block) bool { + return blk.Cid() == receivedWantReq.cids[i] + }) + blk := blks[blkIndex] session.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{blk.Cid()}, []cid.Cid{}) } time.Sleep(10 * time.Millisecond) // Verify new peers were recorded - if !testutil.MatchPeersIgnoreOrder(fspm.Peers(), peers) { - t.Fatal("peers not recorded by the peer manager") - } + require.ElementsMatch(t, fspm.Peers(), peers, "peers not recorded by the peer manager") // Verify session still wants received blocks _, unwanted := sim.SplitWantedUnwanted(blks) - if len(unwanted) > 0 { - t.Fatal("all blocks should still be wanted") - } + require.Empty(t, unwanted, "all blocks should still be wanted") // Simulate receiving DONT_HAVE for a CID session.ReceiveFrom(peers[0], []cid.Cid{}, []cid.Cid{}, []cid.Cid{blks[0].Cid()}) @@ -215,9 +213,7 @@ func TestSessionGetBlocks(t *testing.T) { // Verify session still wants received blocks _, unwanted = sim.SplitWantedUnwanted(blks) - if len(unwanted) > 0 { - t.Fatal("all blocks should still be wanted") - } + require.Empty(t, unwanted, "all blocks should still be wanted") // Simulate receiving block for a CID session.ReceiveFrom(peers[1], []cid.Cid{blks[0].Cid()}, []cid.Cid{}, []cid.Cid{}) @@ -226,12 +222,9 @@ func TestSessionGetBlocks(t *testing.T) { // Verify session no longer wants received block wanted, unwanted := sim.SplitWantedUnwanted(blks) - if len(unwanted) != 1 || !unwanted[0].Cid().Equals(blks[0].Cid()) { - t.Fatal("session wants block that has already been received") - } - if len(wanted) != len(blks)-1 { - t.Fatal("session wants incorrect number of blocks") - } + require.Len(t, unwanted, 1) + require.True(t, unwanted[0].Cid().Equals(blks[0].Cid()), "session wants block that has already been received") + require.Len(t, wanted, len(blks)-1, "session wants incorrect number of blocks") // Shut down session cancel() @@ -239,9 +232,7 @@ func TestSessionGetBlocks(t *testing.T) { time.Sleep(10 * time.Millisecond) // Verify session was removed - if !sm.removeSessionCalled() { - t.Fatal("expected session to be removed") - } + require.True(t, sm.removeSessionCalled(), "expected session to be removed") } func TestSessionFindMorePeers(t *testing.T) { @@ -254,7 +245,7 @@ func TestSessionFindMorePeers(t *testing.T) { bpm := bsbpm.New() notif := notifications.New() defer notif.Shutdown() - id := testutil.GenerateSessionID() + id := random.SequenceNext() sm := newMockSessionMgr() session := New(ctx, sm, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") session.SetBaseTickDelay(200 * time.Microsecond) @@ -265,9 +256,7 @@ func TestSessionFindMorePeers(t *testing.T) { cids = append(cids, block.Cid()) } _, err := session.GetBlocks(ctx, cids) - if err != nil { - t.Fatal("error getting blocks") - } + require.NoError(t, err, "error getting blocks") // The session should initially broadcast want-haves select { @@ -280,7 +269,7 @@ func TestSessionFindMorePeers(t *testing.T) { time.Sleep(20 * time.Millisecond) // need to make sure some latency registers // or there will be no tick set -- time precision on Windows in go is in the // millisecond range - p := testutil.GeneratePeers(1)[0] + p := random.Peers(1)[0] blk := blks[0] session.ReceiveFrom(p, []cid.Cid{blk.Cid()}, []cid.Cid{}, []cid.Cid{}) @@ -302,9 +291,7 @@ func TestSessionFindMorePeers(t *testing.T) { // Make sure the first block is not included because it has already // been received for _, c := range receivedWantReq.cids { - if c.Equals(cids[0]) { - t.Fatal("should not braodcast block that was already received") - } + require.False(t, c.Equals(cids[0]), "should not braodcast block that was already received") } case <-ctx.Done(): t.Fatal("Never rebroadcast want list") @@ -329,7 +316,7 @@ func TestSessionOnPeersExhausted(t *testing.T) { bpm := bsbpm.New() notif := notifications.New() defer notif.Shutdown() - id := testutil.GenerateSessionID() + id := random.SequenceNext() sm := newMockSessionMgr() session := New(ctx, sm, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") blockGenerator := blocksutil.NewBlockGenerator() @@ -339,17 +326,13 @@ func TestSessionOnPeersExhausted(t *testing.T) { cids = append(cids, block.Cid()) } _, err := session.GetBlocks(ctx, cids) - if err != nil { - t.Fatal("error getting blocks") - } + require.NoError(t, err, "error getting blocks") // Wait for initial want request receivedWantReq := <-fpm.wantReqs // Should have sent out broadcast request for wants - if len(receivedWantReq.cids) != broadcastLiveWantsLimit { - t.Fatal("did not enqueue correct initial number of wants") - } + require.Len(t, receivedWantReq.cids, broadcastLiveWantsLimit, "did not enqueue correct initial number of wants") // Signal that all peers have send DONT_HAVE for two of the wants session.onPeersExhausted(cids[len(cids)-2:]) @@ -358,9 +341,7 @@ func TestSessionOnPeersExhausted(t *testing.T) { receivedWantReq = <-fpm.wantReqs // Should have sent out broadcast request for wants - if len(receivedWantReq.cids) != 2 { - t.Fatal("did not enqueue correct initial number of wants") - } + require.Len(t, receivedWantReq.cids, 2, "did not enqueue correct initial number of wants") } func TestSessionFailingToGetFirstBlock(t *testing.T) { @@ -375,7 +356,7 @@ func TestSessionFailingToGetFirstBlock(t *testing.T) { bpm := bsbpm.New() notif := notifications.New() defer notif.Shutdown() - id := testutil.GenerateSessionID() + id := random.SequenceNext() sm := newMockSessionMgr() session := New(ctx, sm, id, fspm, fpf, sim, fpm, bpm, notif, 10*time.Millisecond, delay.Fixed(100*time.Millisecond), "") blockGenerator := blocksutil.NewBlockGenerator() @@ -386,9 +367,7 @@ func TestSessionFailingToGetFirstBlock(t *testing.T) { } startTick := time.Now() _, err := session.GetBlocks(ctx, cids) - if err != nil { - t.Fatal("error getting blocks") - } + require.NoError(t, err, "error getting blocks") // The session should initially broadcast want-haves select { @@ -410,7 +389,9 @@ func TestSessionFailingToGetFirstBlock(t *testing.T) { // Wait for a request to find more peers to occur select { case k := <-fpf.findMorePeersRequested: - if testutil.IndexOf(blks, k) == -1 { + if !slices.ContainsFunc(blks, func(blk blocks.Block) bool { + return blk.Cid() == k + }) { t.Fatal("did not rebroadcast an active want") } case <-ctx.Done(): @@ -472,7 +453,9 @@ func TestSessionFailingToGetFirstBlock(t *testing.T) { // Wait for rebroadcast to occur select { case k := <-fpf.findMorePeersRequested: - if testutil.IndexOf(blks, k) == -1 { + if !slices.ContainsFunc(blks, func(blk blocks.Block) bool { + return blk.Cid() == k + }) { t.Fatal("did not rebroadcast an active want") } case <-ctx.Done(): @@ -488,7 +471,7 @@ func TestSessionCtxCancelClosesGetBlocksChannel(t *testing.T) { bpm := bsbpm.New() notif := notifications.New() defer notif.Shutdown() - id := testutil.GenerateSessionID() + id := random.SequenceNext() sm := newMockSessionMgr() // Create a new session with its own context @@ -505,9 +488,7 @@ func TestSessionCtxCancelClosesGetBlocksChannel(t *testing.T) { defer getcancel() getBlocksCh, err := session.GetBlocks(getctx, []cid.Cid{blks[0].Cid()}) - if err != nil { - t.Fatal("error getting blocks") - } + require.NoError(t, err, "error getting blocks") // Cancel the session context sesscancel() @@ -515,9 +496,7 @@ func TestSessionCtxCancelClosesGetBlocksChannel(t *testing.T) { // Expect the GetBlocks() channel to be closed select { case _, ok := <-getBlocksCh: - if ok { - t.Fatal("expected channel to be closed but was not closed") - } + require.False(t, ok, "expected channel to be closed but was not closed") case <-timerCtx.Done(): t.Fatal("expected channel to be closed before timeout") } @@ -525,9 +504,7 @@ func TestSessionCtxCancelClosesGetBlocksChannel(t *testing.T) { time.Sleep(10 * time.Millisecond) // Expect RemoveSession to be called - if !sm.removeSessionCalled() { - t.Fatal("expected onShutdown to be called") - } + require.True(t, sm.removeSessionCalled(), "expected onShutdown to be called") } func TestSessionOnShutdownCalled(t *testing.T) { @@ -538,7 +515,7 @@ func TestSessionOnShutdownCalled(t *testing.T) { bpm := bsbpm.New() notif := notifications.New() defer notif.Shutdown() - id := testutil.GenerateSessionID() + id := random.SequenceNext() sm := newMockSessionMgr() // Create a new session with its own context @@ -552,9 +529,7 @@ func TestSessionOnShutdownCalled(t *testing.T) { time.Sleep(10 * time.Millisecond) // Expect RemoveSession to be called - if !sm.removeSessionCalled() { - t.Fatal("expected onShutdown to be called") - } + require.True(t, sm.removeSessionCalled(), "expected onShutdown to be called") } func TestSessionReceiveMessageAfterCtxCancel(t *testing.T) { @@ -567,7 +542,7 @@ func TestSessionReceiveMessageAfterCtxCancel(t *testing.T) { bpm := bsbpm.New() notif := notifications.New() defer notif.Shutdown() - id := testutil.GenerateSessionID() + id := random.SequenceNext() sm := newMockSessionMgr() session := New(ctx, sm, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") blockGenerator := blocksutil.NewBlockGenerator() @@ -575,9 +550,7 @@ func TestSessionReceiveMessageAfterCtxCancel(t *testing.T) { cids := []cid.Cid{blks[0].Cid(), blks[1].Cid()} _, err := session.GetBlocks(ctx, cids) - if err != nil { - t.Fatal("error getting blocks") - } + require.NoError(t, err, "error getting blocks") // Wait for initial want request <-fpm.wantReqs @@ -586,7 +559,7 @@ func TestSessionReceiveMessageAfterCtxCancel(t *testing.T) { cancelCtx() // Simulate receiving block for a CID - peer := testutil.GeneratePeers(1)[0] + peer := random.Peers(1)[0] session.ReceiveFrom(peer, []cid.Cid{blks[0].Cid()}, []cid.Cid{}, []cid.Cid{}) time.Sleep(5 * time.Millisecond) diff --git a/bitswap/client/internal/session/sessionwants_test.go b/bitswap/client/internal/session/sessionwants_test.go index f940ac14f..77430135f 100644 --- a/bitswap/client/internal/session/sessionwants_test.go +++ b/bitswap/client/internal/session/sessionwants_test.go @@ -3,8 +3,8 @@ package session import ( "testing" - "github.com/ipfs/boxo/bitswap/internal/testutil" cid "github.com/ipfs/go-cid" + "github.com/ipfs/go-test/random" ) func TestEmptySessionWants(t *testing.T) { @@ -30,8 +30,8 @@ func TestEmptySessionWants(t *testing.T) { func TestSessionWants(t *testing.T) { sw := newSessionWants(5) - cids := testutil.GenerateCids(10) - others := testutil.GenerateCids(1) + cids := random.Cids(10) + others := random.Cids(1) // Add 10 new wants // toFetch Live @@ -111,7 +111,7 @@ func TestSessionWants(t *testing.T) { func TestPrepareBroadcast(t *testing.T) { sw := newSessionWants(3) - cids := testutil.GenerateCids(10) + cids := random.Cids(10) // Add 6 new wants // toFetch Live @@ -171,7 +171,7 @@ func TestPrepareBroadcast(t *testing.T) { // Test that even after GC broadcast returns correct wants func TestPrepareBroadcastAfterGC(t *testing.T) { sw := newSessionWants(5) - cids := testutil.GenerateCids(liveWantsOrderGCLimit * 2) + cids := random.Cids(liveWantsOrderGCLimit * 2) sw.BlocksRequested(cids) diff --git a/bitswap/client/internal/session/sessionwantsender_test.go b/bitswap/client/internal/session/sessionwantsender_test.go index 86a930f61..ac094ac06 100644 --- a/bitswap/client/internal/session/sessionwantsender_test.go +++ b/bitswap/client/internal/session/sessionwantsender_test.go @@ -9,9 +9,10 @@ import ( bsbpm "github.com/ipfs/boxo/bitswap/client/internal/blockpresencemanager" bspm "github.com/ipfs/boxo/bitswap/client/internal/peermanager" bsspm "github.com/ipfs/boxo/bitswap/client/internal/sessionpeermanager" - "github.com/ipfs/boxo/bitswap/internal/testutil" cid "github.com/ipfs/go-cid" + "github.com/ipfs/go-test/random" peer "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" ) type sentWants struct { @@ -98,7 +99,8 @@ func (pm *mockPeerManager) waitNextWants() map[peer.ID]*sentWants { pm.lk.Lock() defer pm.lk.Unlock() - nw := make(map[peer.ID]*sentWants) + + nw := make(map[peer.ID]*sentWants, len(pm.peerSends)) for p, sentWants := range pm.peerSends { nw[p] = sentWants } @@ -108,10 +110,7 @@ func (pm *mockPeerManager) waitNextWants() map[peer.ID]*sentWants { func (pm *mockPeerManager) clearWants() { pm.lk.Lock() defer pm.lk.Unlock() - - for p := range pm.peerSends { - delete(pm.peerSends, p) - } + clear(pm.peerSends) } type exhaustedPeers struct { @@ -141,10 +140,10 @@ func (ep *exhaustedPeers) exhausted() []cid.Cid { } func TestSendWants(t *testing.T) { - cids := testutil.GenerateCids(4) - peers := testutil.GeneratePeers(1) + cids := random.Cids(4) + peers := random.Peers(1) peerA := peers[0] - sid := uint64(1) + const sid = uint64(1) pm := newMockPeerManager() fpm := newFakeSessionPeerManager() swc := newMockSessionMgr() @@ -168,23 +167,17 @@ func TestSendWants(t *testing.T) { // Should have sent // peerA: want-block cid0, cid1 sw, ok := peerSends[peerA] - if !ok { - t.Fatal("Nothing sent to peer") - } - if !testutil.MatchKeysIgnoreOrder(sw.wantBlocksKeys(), blkCids0) { - t.Fatal("Wrong keys") - } - if len(sw.wantHavesKeys()) > 0 { - t.Fatal("Expecting no want-haves") - } + require.True(t, ok, "Nothing sent to peer") + require.ElementsMatch(t, sw.wantBlocksKeys(), blkCids0, "Wrong keys") + require.Empty(t, sw.wantHavesKeys(), "Expecting no want-haves") } func TestSendsWantBlockToOnePeerOnly(t *testing.T) { - cids := testutil.GenerateCids(4) - peers := testutil.GeneratePeers(2) + cids := random.Cids(4) + peers := random.Peers(2) peerA := peers[0] peerB := peers[1] - sid := uint64(1) + const sid = uint64(1) pm := newMockPeerManager() fpm := newFakeSessionPeerManager() swc := newMockSessionMgr() @@ -208,12 +201,8 @@ func TestSendsWantBlockToOnePeerOnly(t *testing.T) { // Should have sent // peerA: want-block cid0, cid1 sw, ok := peerSends[peerA] - if !ok { - t.Fatal("Nothing sent to peer") - } - if !testutil.MatchKeysIgnoreOrder(sw.wantBlocksKeys(), blkCids0) { - t.Fatal("Wrong keys") - } + require.True(t, ok, "Nothing sent to peer") + require.ElementsMatch(t, sw.wantBlocksKeys(), blkCids0, "Wrong keys") // Clear wants (makes keeping track of what's been sent easier) pm.clearWants() @@ -228,23 +217,17 @@ func TestSendsWantBlockToOnePeerOnly(t *testing.T) { // peerB. Should have sent // peerB: want-have cid0, cid1 sw, ok = peerSends[peerB] - if !ok { - t.Fatal("Nothing sent to peer") - } - if sw.wantBlocks.Len() > 0 { - t.Fatal("Expecting no want-blocks") - } - if !testutil.MatchKeysIgnoreOrder(sw.wantHavesKeys(), blkCids0) { - t.Fatal("Wrong keys") - } + require.True(t, ok, "Nothing sent to peer") + require.Zero(t, sw.wantBlocks.Len(), "Expecting no want-blocks") + require.ElementsMatch(t, sw.wantHavesKeys(), blkCids0, "Wrong keys") } func TestReceiveBlock(t *testing.T) { - cids := testutil.GenerateCids(2) - peers := testutil.GeneratePeers(2) + cids := random.Cids(2) + peers := random.Peers(2) peerA := peers[0] peerB := peers[1] - sid := uint64(1) + const sid = uint64(1) pm := newMockPeerManager() fpm := newFakeSessionPeerManager() swc := newMockSessionMgr() @@ -267,12 +250,8 @@ func TestReceiveBlock(t *testing.T) { // Should have sent // peerA: want-block cid0, cid1 sw, ok := peerSends[peerA] - if !ok { - t.Fatal("Nothing sent to peer") - } - if !testutil.MatchKeysIgnoreOrder(sw.wantBlocksKeys(), cids) { - t.Fatal("Wrong keys") - } + require.True(t, ok, "Nothing sent to peer") + require.ElementsMatch(t, sw.wantBlocksKeys(), cids, "Wrong keys") // Clear wants (makes keeping track of what's been sent easier) pm.clearWants() @@ -292,9 +271,7 @@ func TestReceiveBlock(t *testing.T) { // (should not have sent want-block for cid0 because block0 has already // been received) sw, ok = peerSends[peerB] - if !ok { - t.Fatal("Nothing sent to peer") - } + require.True(t, ok, "Nothing sent to peer") wb := sw.wantBlocksKeys() if len(wb) != 1 || !wb[0].Equals(cids[1]) { t.Fatal("Wrong keys", wb) @@ -302,8 +279,8 @@ func TestReceiveBlock(t *testing.T) { } func TestCancelWants(t *testing.T) { - cids := testutil.GenerateCids(4) - sid := uint64(1) + cids := random.Cids(4) + const sid = uint64(1) pm := newMockPeerManager() fpm := newFakeSessionPeerManager() swc := newMockSessionMgr() @@ -330,17 +307,15 @@ func TestCancelWants(t *testing.T) { // Should have sent cancels for cid0, cid2 sent := swc.cancelled() - if !testutil.MatchKeysIgnoreOrder(sent, cancelCids) { - t.Fatal("Wrong keys") - } + require.ElementsMatch(t, sent, cancelCids, "Wrong keys") } func TestRegisterSessionWithPeerManager(t *testing.T) { - cids := testutil.GenerateCids(2) - peers := testutil.GeneratePeers(2) + cids := random.Cids(2) + peers := random.Peers(2) peerA := peers[0] peerB := peers[1] - sid := uint64(1) + const sid = uint64(1) pm := newMockPeerManager() fpm := newFakeSessionPeerManager() swc := newMockSessionMgr() @@ -359,9 +334,7 @@ func TestRegisterSessionWithPeerManager(t *testing.T) { time.Sleep(10 * time.Millisecond) // Expect session to have been registered with PeerManager - if !pm.has(peerA, sid) { - t.Fatal("Expected HAVE to register session with PeerManager") - } + require.True(t, pm.has(peerA, sid), "Expected HAVE to register session with PeerManager") // peerB: block cid1 spm.Update(peerB, cids[1:], nil, nil) @@ -370,18 +343,16 @@ func TestRegisterSessionWithPeerManager(t *testing.T) { time.Sleep(10 * time.Millisecond) // Expect session to have been registered with PeerManager - if !pm.has(peerB, sid) { - t.Fatal("Expected HAVE to register session with PeerManager") - } + require.True(t, pm.has(peerB, sid), "Expected HAVE to register session with PeerManager") } func TestProtectConnFirstPeerToSendWantedBlock(t *testing.T) { - cids := testutil.GenerateCids(2) - peers := testutil.GeneratePeers(3) + cids := random.Cids(2) + peers := random.Peers(3) peerA := peers[0] peerB := peers[1] peerC := peers[2] - sid := uint64(1) + const sid = uint64(1) pm := newMockPeerManager() fpt := newFakePeerTagger() fpm := bsspm.New(1, fpt) @@ -432,11 +403,11 @@ func TestProtectConnFirstPeerToSendWantedBlock(t *testing.T) { } func TestPeerUnavailable(t *testing.T) { - cids := testutil.GenerateCids(2) - peers := testutil.GeneratePeers(2) + cids := random.Cids(2) + peers := random.Peers(2) peerA := peers[0] peerB := peers[1] - sid := uint64(1) + const sid = uint64(1) pm := newMockPeerManager() fpm := newFakeSessionPeerManager() swc := newMockSessionMgr() @@ -459,12 +430,8 @@ func TestPeerUnavailable(t *testing.T) { // Should have sent // peerA: want-block cid0, cid1 sw, ok := peerSends[peerA] - if !ok { - t.Fatal("Nothing sent to peer") - } - if !testutil.MatchKeysIgnoreOrder(sw.wantBlocksKeys(), cids) { - t.Fatal("Wrong keys") - } + require.True(t, ok, "Nothing sent to peer") + require.ElementsMatch(t, sw.wantBlocksKeys(), cids, "Wrong keys") // Clear wants (makes keeping track of what's been sent easier) pm.clearWants() @@ -490,20 +457,16 @@ func TestPeerUnavailable(t *testing.T) { // Should now have sent want-block cid0, cid1 to peerB sw, ok = peerSends[peerB] - if !ok { - t.Fatal("Nothing sent to peer") - } - if !testutil.MatchKeysIgnoreOrder(sw.wantBlocksKeys(), cids) { - t.Fatal("Wrong keys") - } + require.True(t, ok, "Nothing sent to peer") + require.ElementsMatch(t, sw.wantBlocksKeys(), cids, "Wrong keys") } func TestPeersExhausted(t *testing.T) { - cids := testutil.GenerateCids(3) - peers := testutil.GeneratePeers(2) + cids := random.Cids(3) + peers := random.Peers(2) peerA := peers[0] peerB := peers[1] - sid := uint64(1) + const sid = uint64(1) pm := newMockPeerManager() fpm := newFakeSessionPeerManager() swc := newMockSessionMgr() @@ -531,9 +494,7 @@ func TestPeersExhausted(t *testing.T) { // All available peers (peer A) have sent us a DONT_HAVE for cid1, // so expect that onPeersExhausted() will be called with cid1 - if !testutil.MatchKeysIgnoreOrder(ep.exhausted(), []cid.Cid{cids[1]}) { - t.Fatal("Wrong keys") - } + require.ElementsMatch(t, ep.exhausted(), []cid.Cid{cids[1]}, "Wrong keys") // Clear exhausted cids ep.clear() @@ -566,9 +527,7 @@ func TestPeersExhausted(t *testing.T) { // All available peers (peer A and peer B) have sent us a DONT_HAVE for // cid2, so expect that onPeersExhausted() will be called with cid2 - if !testutil.MatchKeysIgnoreOrder(ep.exhausted(), []cid.Cid{cids[2]}) { - t.Fatal("Wrong keys") - } + require.ElementsMatch(t, ep.exhausted(), []cid.Cid{cids[2]}, "Wrong keys") } // Tests that when @@ -576,11 +535,11 @@ func TestPeersExhausted(t *testing.T) { // - the remaining peer becomes unavailable // onPeersExhausted should be sent for that CID func TestPeersExhaustedLastWaitingPeerUnavailable(t *testing.T) { - cids := testutil.GenerateCids(2) - peers := testutil.GeneratePeers(2) + cids := random.Cids(2) + peers := random.Peers(2) peerA := peers[0] peerB := peers[1] - sid := uint64(1) + const sid = uint64(1) pm := newMockPeerManager() fpm := newFakeSessionPeerManager() swc := newMockSessionMgr() @@ -617,19 +576,17 @@ func TestPeersExhaustedLastWaitingPeerUnavailable(t *testing.T) { // All remaining peers (peer A) have sent us a DONT_HAVE for cid1, // so expect that onPeersExhausted() will be called with cid1 - if !testutil.MatchKeysIgnoreOrder(ep.exhausted(), []cid.Cid{cids[1]}) { - t.Fatal("Wrong keys") - } + require.ElementsMatch(t, ep.exhausted(), []cid.Cid{cids[1]}, "Wrong keys") } // Tests that when all the peers are removed from the session // onPeersExhausted should be called with all outstanding CIDs func TestPeersExhaustedAllPeersUnavailable(t *testing.T) { - cids := testutil.GenerateCids(3) - peers := testutil.GeneratePeers(2) + cids := random.Cids(3) + peers := random.Peers(2) peerA := peers[0] peerB := peers[1] - sid := uint64(1) + const sid = uint64(1) pm := newMockPeerManager() fpm := newFakeSessionPeerManager() swc := newMockSessionMgr() @@ -661,15 +618,13 @@ func TestPeersExhaustedAllPeersUnavailable(t *testing.T) { // Expect that onPeersExhausted() will be called with all cids for blocks // that have not been received - if !testutil.MatchKeysIgnoreOrder(ep.exhausted(), []cid.Cid{cids[1], cids[2]}) { - t.Fatal("Wrong keys") - } + require.ElementsMatch(t, ep.exhausted(), []cid.Cid{cids[1], cids[2]}, "Wrong keys") } func TestConsecutiveDontHaveLimit(t *testing.T) { - cids := testutil.GenerateCids(peerDontHaveLimit + 10) - p := testutil.GeneratePeers(1)[0] - sid := uint64(1) + cids := random.Cids(peerDontHaveLimit + 10) + p := random.Peers(1)[0] + const sid = uint64(1) pm := newMockPeerManager() fpm := newFakeSessionPeerManager() swc := newMockSessionMgr() @@ -691,9 +646,7 @@ func TestConsecutiveDontHaveLimit(t *testing.T) { time.Sleep(10 * time.Millisecond) // Peer should be available - if has := fpm.HasPeer(p); !has { - t.Fatal("Expected peer to be available") - } + require.True(t, fpm.HasPeer(p), "Expected peer to be available") // Receive DONT_HAVEs from peer that do not exceed limit for _, c := range cids[1:peerDontHaveLimit] { @@ -705,9 +658,7 @@ func TestConsecutiveDontHaveLimit(t *testing.T) { time.Sleep(20 * time.Millisecond) // Peer should be available - if has := fpm.HasPeer(p); !has { - t.Fatal("Expected peer to be available") - } + require.True(t, fpm.HasPeer(p), "Expected peer to be available") // Receive DONT_HAVEs from peer that exceed limit for _, c := range cids[peerDontHaveLimit:] { @@ -719,15 +670,13 @@ func TestConsecutiveDontHaveLimit(t *testing.T) { time.Sleep(20 * time.Millisecond) // Session should remove peer - if has := fpm.HasPeer(p); has { - t.Fatal("Expected peer not to be available") - } + require.False(t, fpm.HasPeer(p), "Expected peer not to be available") } func TestConsecutiveDontHaveLimitInterrupted(t *testing.T) { - cids := testutil.GenerateCids(peerDontHaveLimit + 10) - p := testutil.GeneratePeers(1)[0] - sid := uint64(1) + cids := random.Cids(peerDontHaveLimit + 10) + p := random.Peers(1)[0] + const sid = uint64(1) pm := newMockPeerManager() fpm := newFakeSessionPeerManager() swc := newMockSessionMgr() @@ -749,9 +698,7 @@ func TestConsecutiveDontHaveLimitInterrupted(t *testing.T) { time.Sleep(5 * time.Millisecond) // Peer should be available - if has := fpm.HasPeer(p); !has { - t.Fatal("Expected peer to be available") - } + require.True(t, fpm.HasPeer(p), "Expected peer to be available") // Receive DONT_HAVE then HAVE then DONT_HAVE from peer, // where consecutive DONT_HAVEs would have exceeded limit @@ -776,15 +723,13 @@ func TestConsecutiveDontHaveLimitInterrupted(t *testing.T) { time.Sleep(5 * time.Millisecond) // Peer should be available - if has := fpm.HasPeer(p); !has { - t.Fatal("Expected peer to be available") - } + require.True(t, fpm.HasPeer(p), "Expected peer to be available") } func TestConsecutiveDontHaveReinstateAfterRemoval(t *testing.T) { - cids := testutil.GenerateCids(peerDontHaveLimit + 10) - p := testutil.GeneratePeers(1)[0] - sid := uint64(1) + cids := random.Cids(peerDontHaveLimit + 10) + p := random.Peers(1)[0] + const sid = uint64(1) pm := newMockPeerManager() fpm := newFakeSessionPeerManager() swc := newMockSessionMgr() @@ -806,9 +751,7 @@ func TestConsecutiveDontHaveReinstateAfterRemoval(t *testing.T) { time.Sleep(5 * time.Millisecond) // Peer should be available - if has := fpm.HasPeer(p); !has { - t.Fatal("Expected peer to be available") - } + require.True(t, fpm.HasPeer(p), "Expected peer to be available") // Receive DONT_HAVEs from peer that exceed limit for _, c := range cids[1 : peerDontHaveLimit+2] { @@ -820,9 +763,7 @@ func TestConsecutiveDontHaveReinstateAfterRemoval(t *testing.T) { time.Sleep(10 * time.Millisecond) // Session should remove peer - if has := fpm.HasPeer(p); has { - t.Fatal("Expected peer not to be available") - } + require.False(t, fpm.HasPeer(p), "Expected peer not to be available") // Receive a HAVE from peer (adds it back into the session) bpm.ReceiveFrom(p, cids[:1], []cid.Cid{}) @@ -832,11 +773,9 @@ func TestConsecutiveDontHaveReinstateAfterRemoval(t *testing.T) { time.Sleep(10 * time.Millisecond) // Peer should be available - if has := fpm.HasPeer(p); !has { - t.Fatal("Expected peer to be available") - } + require.True(t, fpm.HasPeer(p), "Expected peer to be available") - cids2 := testutil.GenerateCids(peerDontHaveLimit + 10) + cids2 := random.Cids(peerDontHaveLimit + 10) // Receive DONT_HAVEs from peer that don't exceed limit for _, c := range cids2[1:peerDontHaveLimit] { @@ -848,9 +787,7 @@ func TestConsecutiveDontHaveReinstateAfterRemoval(t *testing.T) { time.Sleep(10 * time.Millisecond) // Peer should be available - if has := fpm.HasPeer(p); !has { - t.Fatal("Expected peer to be available") - } + require.True(t, fpm.HasPeer(p), "Expected peer to be available") // Receive DONT_HAVEs from peer that exceed limit for _, c := range cids2[peerDontHaveLimit:] { @@ -862,15 +799,13 @@ func TestConsecutiveDontHaveReinstateAfterRemoval(t *testing.T) { time.Sleep(10 * time.Millisecond) // Session should remove peer - if has := fpm.HasPeer(p); has { - t.Fatal("Expected peer not to be available") - } + require.False(t, fpm.HasPeer(p), "Expected peer not to be available") } func TestConsecutiveDontHaveDontRemoveIfHasWantedBlock(t *testing.T) { - cids := testutil.GenerateCids(peerDontHaveLimit + 10) - p := testutil.GeneratePeers(1)[0] - sid := uint64(1) + cids := random.Cids(peerDontHaveLimit + 10) + p := random.Peers(1)[0] + const sid = uint64(1) pm := newMockPeerManager() fpm := newFakeSessionPeerManager() swc := newMockSessionMgr() @@ -893,9 +828,7 @@ func TestConsecutiveDontHaveDontRemoveIfHasWantedBlock(t *testing.T) { time.Sleep(10 * time.Millisecond) // Peer should be available - if has := fpm.HasPeer(p); !has { - t.Fatal("Expected peer to be available") - } + require.True(t, fpm.HasPeer(p), "Expected peer to be available") // Receive DONT_HAVEs from peer that exceed limit for _, c := range cids[1 : peerDontHaveLimit+5] { @@ -908,7 +841,5 @@ func TestConsecutiveDontHaveDontRemoveIfHasWantedBlock(t *testing.T) { // Peer should still be available because it has a block that we want. // (We received a HAVE for cid 0 but didn't yet receive the block) - if has := fpm.HasPeer(p); !has { - t.Fatal("Expected peer to be available") - } + require.True(t, fpm.HasPeer(p), "Expected peer to be available") } diff --git a/bitswap/client/internal/session/wantinfo_test.go b/bitswap/client/internal/session/wantinfo_test.go index ad42b174d..a21670d8d 100644 --- a/bitswap/client/internal/session/wantinfo_test.go +++ b/bitswap/client/internal/session/wantinfo_test.go @@ -3,7 +3,7 @@ package session import ( "testing" - "github.com/ipfs/boxo/bitswap/internal/testutil" + "github.com/ipfs/go-test/random" ) func TestEmptyWantInfo(t *testing.T) { @@ -15,7 +15,7 @@ func TestEmptyWantInfo(t *testing.T) { } func TestSetPeerBlockPresence(t *testing.T) { - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) wp := newWantInfo(newPeerResponseTracker()) wp.setPeerBlockPresence(peers[0], BPUnknown) @@ -35,7 +35,7 @@ func TestSetPeerBlockPresence(t *testing.T) { } func TestSetPeerBlockPresenceBestLower(t *testing.T) { - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) wp := newWantInfo(newPeerResponseTracker()) wp.setPeerBlockPresence(peers[0], BPHave) @@ -55,7 +55,7 @@ func TestSetPeerBlockPresenceBestLower(t *testing.T) { } func TestRemoveThenSetDontHave(t *testing.T) { - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) wp := newWantInfo(newPeerResponseTracker()) wp.setPeerBlockPresence(peers[0], BPUnknown) diff --git a/bitswap/client/internal/sessioninterestmanager/sessioninterestmanager_test.go b/bitswap/client/internal/sessioninterestmanager/sessioninterestmanager_test.go index f2b4d8aa0..a9779e297 100644 --- a/bitswap/client/internal/sessioninterestmanager/sessioninterestmanager_test.go +++ b/bitswap/client/internal/sessioninterestmanager/sessioninterestmanager_test.go @@ -3,15 +3,15 @@ package sessioninterestmanager import ( "testing" - "github.com/ipfs/boxo/bitswap/internal/testutil" cid "github.com/ipfs/go-cid" + "github.com/ipfs/go-test/random" ) func TestEmpty(t *testing.T) { sim := New() - ses := uint64(1) - cids := testutil.GenerateCids(2) + const ses = 1 + cids := random.Cids(2) res := sim.FilterSessionInterested(ses, cids) if len(res) != 1 || len(res[0]) > 0 { t.Fatal("Expected no interest") @@ -24,10 +24,10 @@ func TestEmpty(t *testing.T) { func TestBasic(t *testing.T) { sim := New() - ses1 := uint64(1) - ses2 := uint64(2) - cids1 := testutil.GenerateCids(2) - cids2 := append(testutil.GenerateCids(1), cids1[1]) + const ses1 = 1 + const ses2 = 2 + cids1 := random.Cids(2) + cids2 := append(random.Cids(1), cids1[1]) sim.RecordSessionInterest(ses1, cids1) res := sim.FilterSessionInterested(ses1, cids1) @@ -59,8 +59,8 @@ func TestBasic(t *testing.T) { func TestInterestedSessions(t *testing.T) { sim := New() - ses := uint64(1) - cids := testutil.GenerateCids(3) + const ses = 1 + cids := random.Cids(3) sim.RecordSessionInterest(ses, cids[0:2]) if len(sim.InterestedSessions(cids, []cid.Cid{}, []cid.Cid{})) != 1 { @@ -86,10 +86,10 @@ func TestInterestedSessions(t *testing.T) { func TestRemoveSession(t *testing.T) { sim := New() - ses1 := uint64(1) - ses2 := uint64(2) - cids1 := testutil.GenerateCids(2) - cids2 := append(testutil.GenerateCids(1), cids1[1]) + const ses1 = 1 + const ses2 = 2 + cids1 := random.Cids(2) + cids2 := append(random.Cids(1), cids1[1]) sim.RecordSessionInterest(ses1, cids1) sim.RecordSessionInterest(ses2, cids2) sim.RemoveSession(ses1) @@ -114,10 +114,10 @@ func TestRemoveSession(t *testing.T) { func TestRemoveSessionInterested(t *testing.T) { sim := New() - ses1 := uint64(1) - ses2 := uint64(2) - cids1 := testutil.GenerateCids(2) - cids2 := append(testutil.GenerateCids(1), cids1[1]) + const ses1 = uint64(1) + const ses2 = uint64(2) + cids1 := random.Cids(2) + cids2 := append(random.Cids(1), cids1[1]) sim.RecordSessionInterest(ses1, cids1) sim.RecordSessionInterest(ses2, cids2) @@ -148,10 +148,10 @@ func TestRemoveSessionInterested(t *testing.T) { } func TestSplitWantedUnwanted(t *testing.T) { - blks := testutil.GenerateBlocksOfSize(3, 1024) + blks := random.BlocksOfSize(3, 1024) sim := New() - ses1 := uint64(1) - ses2 := uint64(2) + const ses1 = 1 + const ses2 = 2 var cids []cid.Cid for _, b := range blks { diff --git a/bitswap/client/internal/sessionmanager/sessionmanager_test.go b/bitswap/client/internal/sessionmanager/sessionmanager_test.go index 8b6c4207c..e8259b1d8 100644 --- a/bitswap/client/internal/sessionmanager/sessionmanager_test.go +++ b/bitswap/client/internal/sessionmanager/sessionmanager_test.go @@ -12,11 +12,11 @@ import ( bspm "github.com/ipfs/boxo/bitswap/client/internal/peermanager" bssession "github.com/ipfs/boxo/bitswap/client/internal/session" bssim "github.com/ipfs/boxo/bitswap/client/internal/sessioninterestmanager" - "github.com/ipfs/boxo/bitswap/internal/testutil" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" delay "github.com/ipfs/go-ipfs-delay" peer "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" ) type fakeSession struct { @@ -153,9 +153,7 @@ func TestReceiveFrom(t *testing.T) { t.Fatal("should have received want-haves but didn't") } - if len(pm.cancelled()) != 1 { - t.Fatal("should have sent cancel for received blocks") - } + require.Len(t, pm.cancelled(), 1, "should have sent cancel for received blocks") } func TestReceiveBlocksWhenManagerShutdown(t *testing.T) { @@ -246,19 +244,13 @@ func TestShutdown(t *testing.T) { sim.RecordSessionInterest(firstSession.ID(), cids) sm.ReceiveFrom(ctx, p, []cid.Cid{}, []cid.Cid{}, cids) - if !bpm.HasKey(block.Cid()) { - t.Fatal("expected cid to be added to block presence manager") - } + require.True(t, bpm.HasKey(block.Cid()), "expected cid to be added to block presence manager") sm.Shutdown() // wait for cleanup time.Sleep(10 * time.Millisecond) - if bpm.HasKey(block.Cid()) { - t.Fatal("expected cid to be removed from block presence manager") - } - if !testutil.MatchKeysIgnoreOrder(pm.cancelled(), cids) { - t.Fatal("expected cancels to be sent") - } + require.False(t, bpm.HasKey(block.Cid()), "expected cid to be removed from block presence manager") + require.ElementsMatch(t, pm.cancelled(), cids, "expected cancels to be sent") } diff --git a/bitswap/client/internal/sessionpeermanager/sessionpeermanager_test.go b/bitswap/client/internal/sessionpeermanager/sessionpeermanager_test.go index 0d9275579..ce7a24872 100644 --- a/bitswap/client/internal/sessionpeermanager/sessionpeermanager_test.go +++ b/bitswap/client/internal/sessionpeermanager/sessionpeermanager_test.go @@ -4,7 +4,7 @@ import ( "sync" "testing" - "github.com/ipfs/boxo/bitswap/internal/testutil" + "github.com/ipfs/go-test/random" peer "github.com/libp2p/go-libp2p/core/peer" ) @@ -78,7 +78,7 @@ func (fpt *fakePeerTagger) isProtected(p peer.ID) bool { } func TestAddPeers(t *testing.T) { - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) spm := New(1, &fakePeerTagger{}) isNew := spm.AddPeer(peers[0]) @@ -98,7 +98,7 @@ func TestAddPeers(t *testing.T) { } func TestRemovePeers(t *testing.T) { - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) spm := New(1, &fakePeerTagger{}) existed := spm.RemovePeer(peers[0]) @@ -124,7 +124,7 @@ func TestRemovePeers(t *testing.T) { } func TestHasPeers(t *testing.T) { - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) spm := New(1, &fakePeerTagger{}) if spm.HasPeers() { @@ -153,7 +153,7 @@ func TestHasPeers(t *testing.T) { } func TestHasPeer(t *testing.T) { - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) spm := New(1, &fakePeerTagger{}) if spm.HasPeer(peers[0]) { @@ -181,7 +181,7 @@ func TestHasPeer(t *testing.T) { } func TestPeers(t *testing.T) { - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) spm := New(1, &fakePeerTagger{}) if len(spm.Peers()) > 0 { @@ -205,7 +205,7 @@ func TestPeers(t *testing.T) { } func TestPeersDiscovered(t *testing.T) { - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) spm := New(1, &fakePeerTagger{}) if spm.PeersDiscovered() { @@ -224,7 +224,7 @@ func TestPeersDiscovered(t *testing.T) { } func TestPeerTagging(t *testing.T) { - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) fpt := &fakePeerTagger{} spm := New(1, fpt) @@ -250,7 +250,7 @@ func TestPeerTagging(t *testing.T) { } func TestProtectConnection(t *testing.T) { - peers := testutil.GeneratePeers(1) + peers := random.Peers(1) peerA := peers[0] fpt := newFakePeerTagger() spm := New(1, fpt) @@ -276,7 +276,7 @@ func TestProtectConnection(t *testing.T) { } func TestShutdown(t *testing.T) { - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) fpt := newFakePeerTagger() spm := New(1, fpt) diff --git a/bitswap/internal/testutil/testutil.go b/bitswap/internal/testutil/testutil.go deleted file mode 100644 index 3fc2a8e0a..000000000 --- a/bitswap/internal/testutil/testutil.go +++ /dev/null @@ -1,142 +0,0 @@ -package testutil - -import ( - "crypto/rand" - "strconv" - - "github.com/ipfs/boxo/bitswap/client/wantlist" - bsmsg "github.com/ipfs/boxo/bitswap/message" - blocks "github.com/ipfs/go-block-format" - cid "github.com/ipfs/go-cid" - blocksutil "github.com/ipfs/go-ipfs-blocksutil" - peer "github.com/libp2p/go-libp2p/core/peer" -) - -var ( - blockGenerator = blocksutil.NewBlockGenerator() - prioritySeq int32 -) - -// GenerateBlocksOfSize generates a series of blocks of the given byte size -func GenerateBlocksOfSize(n int, size int64) []blocks.Block { - generatedBlocks := make([]blocks.Block, 0, n) - for i := 0; i < n; i++ { - // rand.Read never errors - buf := make([]byte, size) - rand.Read(buf) - b := blocks.NewBlock(buf) - generatedBlocks = append(generatedBlocks, b) - - } - return generatedBlocks -} - -// GenerateCids produces n content identifiers. -func GenerateCids(n int) []cid.Cid { - cids := make([]cid.Cid, 0, n) - for i := 0; i < n; i++ { - c := blockGenerator.Next().Cid() - cids = append(cids, c) - } - return cids -} - -// GenerateMessageEntries makes fake bitswap message entries. -func GenerateMessageEntries(n int, isCancel bool) []bsmsg.Entry { - bsmsgs := make([]bsmsg.Entry, 0, n) - for i := 0; i < n; i++ { - prioritySeq++ - msg := bsmsg.Entry{ - Entry: wantlist.NewRefEntry(blockGenerator.Next().Cid(), prioritySeq), - Cancel: isCancel, - } - bsmsgs = append(bsmsgs, msg) - } - return bsmsgs -} - -var peerSeq int - -// GeneratePeers creates n peer ids. -func GeneratePeers(n int) []peer.ID { - peerIds := make([]peer.ID, 0, n) - for i := 0; i < n; i++ { - peerSeq++ - p := peer.ID(strconv.Itoa(peerSeq)) - peerIds = append(peerIds, p) - } - return peerIds -} - -var nextSession uint64 - -// GenerateSessionID make a unit session identifier. -func GenerateSessionID() uint64 { - nextSession++ - return uint64(nextSession) -} - -// ContainsPeer returns true if a peer is found n a list of peers. -func ContainsPeer(peers []peer.ID, p peer.ID) bool { - for _, n := range peers { - if p == n { - return true - } - } - return false -} - -// IndexOf returns the index of a given cid in an array of blocks -func IndexOf(blks []blocks.Block, c cid.Cid) int { - for i, n := range blks { - if n.Cid() == c { - return i - } - } - return -1 -} - -// ContainsBlock returns true if a block is found n a list of blocks -func ContainsBlock(blks []blocks.Block, block blocks.Block) bool { - return IndexOf(blks, block.Cid()) != -1 -} - -// ContainsKey returns true if a key is found n a list of CIDs. -func ContainsKey(ks []cid.Cid, c cid.Cid) bool { - for _, k := range ks { - if c == k { - return true - } - } - return false -} - -// MatchKeysIgnoreOrder returns true if the lists of CIDs match (even if -// they're in a different order) -func MatchKeysIgnoreOrder(ks1 []cid.Cid, ks2 []cid.Cid) bool { - if len(ks1) != len(ks2) { - return false - } - - for _, k := range ks1 { - if !ContainsKey(ks2, k) { - return false - } - } - return true -} - -// MatchPeersIgnoreOrder returns true if the lists of peers match (even if -// they're in a different order) -func MatchPeersIgnoreOrder(ps1 []peer.ID, ps2 []peer.ID) bool { - if len(ps1) != len(ps2) { - return false - } - - for _, p := range ps1 { - if !ContainsPeer(ps2, p) { - return false - } - } - return true -} diff --git a/bitswap/internal/testutil/testutil_test.go b/bitswap/internal/testutil/testutil_test.go deleted file mode 100644 index c4dc1af15..000000000 --- a/bitswap/internal/testutil/testutil_test.go +++ /dev/null @@ -1,16 +0,0 @@ -package testutil - -import ( - "testing" - - blocks "github.com/ipfs/go-block-format" -) - -func TestGenerateBlocksOfSize(t *testing.T) { - for _, b1 := range GenerateBlocksOfSize(10, 100) { - b2 := blocks.NewBlock(b1.RawData()) - if b2.Cid() != b1.Cid() { - t.Fatal("block CIDs mismatch") - } - } -} diff --git a/bitswap/network/connecteventmanager_test.go b/bitswap/network/connecteventmanager_test.go index bb3c52266..3107efbcf 100644 --- a/bitswap/network/connecteventmanager_test.go +++ b/bitswap/network/connecteventmanager_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - "github.com/ipfs/boxo/bitswap/internal/testutil" + "github.com/ipfs/go-test/random" "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" ) @@ -46,7 +46,7 @@ func wait(t *testing.T, c *connectEventManager) { func TestConnectEventManagerConnectDisconnect(t *testing.T) { connListener := newMockConnListener() - peers := testutil.GeneratePeers(2) + peers := random.Peers(2) cem := newConnectEventManager(connListener) cem.Start() t.Cleanup(cem.Stop) @@ -85,7 +85,7 @@ func TestConnectEventManagerConnectDisconnect(t *testing.T) { func TestConnectEventManagerMarkUnresponsive(t *testing.T) { connListener := newMockConnListener() - p := testutil.GeneratePeers(1)[0] + p := random.Peers(1)[0] cem := newConnectEventManager(connListener) cem.Start() t.Cleanup(cem.Stop) @@ -134,7 +134,7 @@ func TestConnectEventManagerMarkUnresponsive(t *testing.T) { func TestConnectEventManagerDisconnectAfterMarkUnresponsive(t *testing.T) { connListener := newMockConnListener() - p := testutil.GeneratePeers(1)[0] + p := random.Peers(1)[0] cem := newConnectEventManager(connListener) cem.Start() t.Cleanup(cem.Stop) diff --git a/bitswap/server/internal/decision/blockstoremanager_test.go b/bitswap/server/internal/decision/blockstoremanager_test.go index 1192873f3..f65c88e83 100644 --- a/bitswap/server/internal/decision/blockstoremanager_test.go +++ b/bitswap/server/internal/decision/blockstoremanager_test.go @@ -7,7 +7,6 @@ import ( "testing" "time" - "github.com/ipfs/boxo/bitswap/internal/testutil" blockstore "github.com/ipfs/boxo/blockstore" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" @@ -16,6 +15,7 @@ import ( ds_sync "github.com/ipfs/go-datastore/sync" delay "github.com/ipfs/go-ipfs-delay" "github.com/ipfs/go-metrics-interface" + "github.com/ipfs/go-test/random" ) func newBlockstoreManagerForTesting( @@ -40,7 +40,7 @@ func TestBlockstoreManagerNotFoundKey(t *testing.T) { bsm := newBlockstoreManagerForTesting(t, ctx, bstore, 5) - cids := testutil.GenerateCids(4) + cids := random.Cids(4) sizes, err := bsm.getBlockSizes(ctx, cids) if err != nil { t.Fatal(err) @@ -158,11 +158,11 @@ func TestBlockstoreManagerConcurrency(t *testing.T) { dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)) - workerCount := 5 + const workerCount = 5 bsm := newBlockstoreManagerForTesting(t, ctx, bstore, workerCount) - blkSize := int64(8 * 1024) - blks := testutil.GenerateBlocksOfSize(32, blkSize) + const blkSize = 8 * 1024 + blks := random.BlocksOfSize(32, blkSize) var ks []cid.Cid for _, b := range blks { ks = append(ks, b.Cid()) @@ -195,14 +195,14 @@ func TestBlockstoreManagerConcurrency(t *testing.T) { func TestBlockstoreManagerClose(t *testing.T) { ctx := context.Background() - delayTime := 20 * time.Millisecond + const delayTime = 20 * time.Millisecond bsdelay := delay.Fixed(delayTime) dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)) bsm := newBlockstoreManagerForTesting(t, ctx, bstore, 3) - blks := testutil.GenerateBlocksOfSize(10, 1024) + blks := random.BlocksOfSize(10, 1024) var ks []cid.Cid for _, b := range blks { ks = append(ks, b.Cid()) @@ -227,7 +227,7 @@ func TestBlockstoreManagerClose(t *testing.T) { } func TestBlockstoreManagerCtxDone(t *testing.T) { - delayTime := 20 * time.Millisecond + const delayTime = 20 * time.Millisecond bsdelay := delay.Fixed(delayTime) underlyingDstore := ds_sync.MutexWrap(ds.NewMapDatastore()) @@ -238,7 +238,7 @@ func TestBlockstoreManagerCtxDone(t *testing.T) { ctx := context.Background() bsm := newBlockstoreManagerForTesting(t, ctx, bstore, 3) - blks := testutil.GenerateBlocksOfSize(100, 128) + blks := random.BlocksOfSize(100, 128) var ks []cid.Cid for _, b := range blks { ks = append(ks, b.Cid()) diff --git a/bitswap/server/internal/decision/engine_test.go b/bitswap/server/internal/decision/engine_test.go index b83342302..593bbde0f 100644 --- a/bitswap/server/internal/decision/engine_test.go +++ b/bitswap/server/internal/decision/engine_test.go @@ -15,7 +15,6 @@ import ( "github.com/benbjohnson/clock" wl "github.com/ipfs/boxo/bitswap/client/wantlist" - "github.com/ipfs/boxo/bitswap/internal/testutil" message "github.com/ipfs/boxo/bitswap/message" pb "github.com/ipfs/boxo/bitswap/message/pb" blockstore "github.com/ipfs/boxo/blockstore" @@ -23,6 +22,7 @@ import ( "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-datastore/sync" + "github.com/ipfs/go-test/random" process "github.com/jbenet/goprocess" peer "github.com/libp2p/go-libp2p/core/peer" libp2ptest "github.com/libp2p/go-libp2p/core/test" @@ -222,8 +222,8 @@ func TestOutboxClosedWhenEngineClosed(t *testing.T) { } func TestPartnerWantHaveWantBlockNonActive(t *testing.T) { - alphabet := "abcdefghijklmnopqrstuvwxyz" - vowels := "aeiou" + const alphabet = "abcdefghijklmnopqrstuvwxyz" + const vowels = "aeiou" bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) for _, letter := range strings.Split(alphabet, "") { @@ -562,7 +562,7 @@ func TestPartnerWantHaveWantBlockNonActive(t *testing.T) { } func TestPartnerWantHaveWantBlockActive(t *testing.T) { - alphabet := "abcdefghijklmnopqrstuvwxyz" + const alphabet = "abcdefghijklmnopqrstuvwxyz" bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) for _, letter := range strings.Split(alphabet, "") { @@ -904,7 +904,7 @@ func TestSendReceivedBlocksToPeersThatWantThem(t *testing.T) { e := newEngineForTesting(ctx, bs, &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) - blks := testutil.GenerateBlocksOfSize(4, 8*1024) + blks := random.BlocksOfSize(4, 8*1024) msg := message.New(false) msg.AddEntry(blks[0].Cid(), 4, pb.Message_Wantlist_Have, false) msg.AddEntry(blks[1].Cid(), 3, pb.Message_Wantlist_Have, false) @@ -950,7 +950,7 @@ func TestSendDontHave(t *testing.T) { e := newEngineForTesting(ctx, bs, &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) - blks := testutil.GenerateBlocksOfSize(4, 8*1024) + blks := random.BlocksOfSize(4, 8*1024) msg := message.New(false) msg.AddEntry(blks[0].Cid(), 4, pb.Message_Wantlist_Have, false) msg.AddEntry(blks[1].Cid(), 3, pb.Message_Wantlist_Have, true) @@ -1016,7 +1016,7 @@ func TestWantlistForPeer(t *testing.T) { e := newEngineForTesting(ctx, bs, &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) - blks := testutil.GenerateBlocksOfSize(4, 8*1024) + blks := random.BlocksOfSize(4, 8*1024) msg := message.New(false) msg.AddEntry(blks[0].Cid(), 2, pb.Message_Wantlist_Have, false) msg.AddEntry(blks[1].Cid(), 3, pb.Message_Wantlist_Have, false) @@ -1455,7 +1455,7 @@ func TestTaggingPeers(t *testing.T) { } func TestTaggingUseful(t *testing.T) { - peerSampleIntervalHalf := 10 * time.Millisecond + const peerSampleIntervalHalf = 10 * time.Millisecond ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() diff --git a/bitswap/server/internal/decision/taskmerger_test.go b/bitswap/server/internal/decision/taskmerger_test.go index ae3b0384d..e1b58d056 100644 --- a/bitswap/server/internal/decision/taskmerger_test.go +++ b/bitswap/server/internal/decision/taskmerger_test.go @@ -3,13 +3,13 @@ package decision import ( "testing" - "github.com/ipfs/boxo/bitswap/internal/testutil" "github.com/ipfs/go-peertaskqueue" "github.com/ipfs/go-peertaskqueue/peertask" + "github.com/ipfs/go-test/random" ) func TestPushHaveVsBlock(t *testing.T) { - partner := testutil.GeneratePeers(1)[0] + partner := random.Peers(1)[0] wantHave := peertask.Task{ Topic: "1", @@ -61,7 +61,7 @@ func TestPushHaveVsBlock(t *testing.T) { } func TestPushSizeInfo(t *testing.T) { - partner := testutil.GeneratePeers(1)[0] + partner := random.Peers(1)[0] wantBlockBlockSize := 10 wantBlockDontHaveBlockSize := 0 @@ -131,8 +131,8 @@ func TestPushSizeInfo(t *testing.T) { } } - isWantBlock := true - isWantHave := false + const isWantBlock = true + const isWantHave = false // want-block (DONT_HAVE) should have no effect on existing want-block (DONT_HAVE) runTestCase([]peertask.Task{wantBlockDontHave, wantBlockDontHave}, wantBlockDontHave.Work, wantBlockDontHaveBlockSize, isWantBlock) @@ -173,7 +173,7 @@ func TestPushSizeInfo(t *testing.T) { } func TestPushHaveVsBlockActive(t *testing.T) { - partner := testutil.GeneratePeers(1)[0] + partner := random.Peers(1)[0] wantBlock := peertask.Task{ Topic: "1", @@ -227,7 +227,7 @@ func TestPushHaveVsBlockActive(t *testing.T) { } func TestPushSizeInfoActive(t *testing.T) { - partner := testutil.GeneratePeers(1)[0] + partner := random.Peers(1)[0] wantBlock := peertask.Task{ Topic: "1", diff --git a/chunker/buzhash_test.go b/chunker/buzhash_test.go index 2eaf5ae32..0c334d54f 100644 --- a/chunker/buzhash_test.go +++ b/chunker/buzhash_test.go @@ -5,13 +5,13 @@ import ( "io" "testing" - util "github.com/ipfs/boxo/util" + random "github.com/ipfs/go-test/random" ) func testBuzhashChunking(t *testing.T, buf []byte) (chunkCount int) { t.Parallel() - n, err := util.NewTimeSeededRand().Read(buf) + n, err := random.NewRand().Read(buf) if n < len(buf) { t.Fatalf("expected %d bytes, got %d", len(buf), n) } diff --git a/chunker/rabin_test.go b/chunker/rabin_test.go index 31f3464ee..98939d36d 100644 --- a/chunker/rabin_test.go +++ b/chunker/rabin_test.go @@ -6,15 +6,15 @@ import ( "io" "testing" - util "github.com/ipfs/boxo/util" blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-test/random" ) func TestRabinChunking(t *testing.T) { t.Parallel() data := make([]byte, 1024*1024*16) - n, err := util.NewTimeSeededRand().Read(data) + n, err := random.NewRand().Read(data) if n < len(data) { t.Fatalf("expected %d bytes, got %d", len(data), n) } @@ -72,7 +72,7 @@ func testReuse(t *testing.T, cr newSplitter) { t.Parallel() data := make([]byte, 1024*1024*16) - n, err := util.NewTimeSeededRand().Read(data) + n, err := random.NewRand().Read(data) if n < len(data) { t.Fatalf("expected %d bytes, got %d", len(data), n) } diff --git a/chunker/splitting_test.go b/chunker/splitting_test.go index c6712446a..23170ee37 100644 --- a/chunker/splitting_test.go +++ b/chunker/splitting_test.go @@ -5,12 +5,12 @@ import ( "io" "testing" - u "github.com/ipfs/boxo/util" + "github.com/ipfs/go-test/random" ) func randBuf(t *testing.T, size int) []byte { buf := make([]byte, size) - if _, err := u.NewTimeSeededRand().Read(buf); err != nil { + if _, err := random.NewRand().Read(buf); err != nil { t.Fatal("failed to read enough randomness") } return buf @@ -25,7 +25,7 @@ func copyBuf(buf []byte) []byte { func TestSizeSplitterOverAllocate(t *testing.T) { t.Parallel() - max := 1000 + const max = 1000 r := bytes.NewReader(randBuf(t, max)) chunksize := int64(1024 * 256) splitter := NewSizeSplitter(r, chunksize) @@ -80,10 +80,10 @@ func TestSizeSplitterFillsChunks(t *testing.T) { } t.Parallel() - max := 10000000 + const max = 10000000 b := randBuf(t, max) r := &clipReader{r: bytes.NewReader(b), size: 4000} - chunksize := int64(1024 * 256) + const chunksize = 1024 * 256 c, _ := Chan(NewSizeSplitter(r, chunksize)) sofar := 0 @@ -98,7 +98,7 @@ func TestSizeSplitterFillsChunks(t *testing.T) { copy(whole[sofar:], chunk) sofar += len(chunk) - if sofar != max && len(chunk) < int(chunksize) { + if sofar != max && len(chunk) < chunksize { t.Fatal("sizesplitter split at a smaller size") } } diff --git a/examples/go.sum b/examples/go.sum index 0846f18ba..d988c58c3 100644 --- a/examples/go.sum +++ b/examples/go.sum @@ -215,6 +215,8 @@ github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fG github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= github.com/ipfs/go-peertaskqueue v0.8.1 h1:YhxAs1+wxb5jk7RvS0LHdyiILpNmRIRnZVztekOF0pg= github.com/ipfs/go-peertaskqueue v0.8.1/go.mod h1:Oxxd3eaK279FxeydSPPVGHzbwVeHjatZ2GA8XD+KbPU= +github.com/ipfs/go-test v0.0.4 h1:DKT66T6GBB6PsDFLoO56QZPrOmzJkqU1FZH5C9ySkew= +github.com/ipfs/go-test v0.0.4/go.mod h1:qhIM1EluEfElKKM6fnWxGn822/z9knUGM1+I/OAQNKI= github.com/ipfs/go-unixfs v0.4.5 h1:wj8JhxvV1G6CD7swACwSKYa+NgtdWC1RUit+gFnymDU= github.com/ipfs/go-unixfs v0.4.5/go.mod h1:BIznJNvt/gEx/ooRMI4Us9K8+qeGO7vx1ohnbk8gjFg= github.com/ipfs/go-unixfsnode v1.9.0 h1:ubEhQhr22sPAKO2DNsyVBW7YB/zA8Zkif25aBvz8rc8= diff --git a/go.mod b/go.mod index f40b0b66e..a777c7fba 100644 --- a/go.mod +++ b/go.mod @@ -30,6 +30,7 @@ require ( github.com/ipfs/go-log/v2 v2.5.1 github.com/ipfs/go-metrics-interface v0.0.1 github.com/ipfs/go-peertaskqueue v0.8.1 + github.com/ipfs/go-test v0.0.4 github.com/ipfs/go-unixfsnode v1.9.0 github.com/ipld/go-car v0.6.2 github.com/ipld/go-car/v2 v2.13.1 diff --git a/go.sum b/go.sum index f8ca0bf1f..2ca728eb4 100644 --- a/go.sum +++ b/go.sum @@ -222,6 +222,8 @@ github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fG github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= github.com/ipfs/go-peertaskqueue v0.8.1 h1:YhxAs1+wxb5jk7RvS0LHdyiILpNmRIRnZVztekOF0pg= github.com/ipfs/go-peertaskqueue v0.8.1/go.mod h1:Oxxd3eaK279FxeydSPPVGHzbwVeHjatZ2GA8XD+KbPU= +github.com/ipfs/go-test v0.0.4 h1:DKT66T6GBB6PsDFLoO56QZPrOmzJkqU1FZH5C9ySkew= +github.com/ipfs/go-test v0.0.4/go.mod h1:qhIM1EluEfElKKM6fnWxGn822/z9knUGM1+I/OAQNKI= github.com/ipfs/go-unixfs v0.4.5 h1:wj8JhxvV1G6CD7swACwSKYa+NgtdWC1RUit+gFnymDU= github.com/ipfs/go-unixfs v0.4.5/go.mod h1:BIznJNvt/gEx/ooRMI4Us9K8+qeGO7vx1ohnbk8gjFg= github.com/ipfs/go-unixfsnode v1.9.0 h1:ubEhQhr22sPAKO2DNsyVBW7YB/zA8Zkif25aBvz8rc8= diff --git a/ipld/merkledag/merkledag_test.go b/ipld/merkledag/merkledag_test.go index ffe4946ca..004d462aa 100644 --- a/ipld/merkledag/merkledag_test.go +++ b/ipld/merkledag/merkledag_test.go @@ -23,10 +23,10 @@ import ( bserv "github.com/ipfs/boxo/blockservice" bstest "github.com/ipfs/boxo/blockservice/test" offline "github.com/ipfs/boxo/exchange/offline" - u "github.com/ipfs/boxo/util" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" ipld "github.com/ipfs/go-ipld-format" + "github.com/ipfs/go-test/random" prime "github.com/ipld/go-ipld-prime" mh "github.com/multiformats/go-multihash" ) @@ -353,7 +353,7 @@ func (devZero) Read(b []byte) (int, error) { } func TestBatchFetch(t *testing.T) { - read := io.LimitReader(u.NewTimeSeededRand(), 1024*32) + read := io.LimitReader(random.NewRand(), 1024*32) runBatchFetchTest(t, read) } @@ -513,7 +513,7 @@ func TestFetchGraph(t *testing.T) { dservs = append(dservs, NewDAGService(bsi)) } - read := io.LimitReader(u.NewTimeSeededRand(), 1024*32) + read := io.LimitReader(random.NewRand(), 1024*32) root := makeTestDAG(t, read, dservs[0]) err := FetchGraph(context.TODO(), root.Cid(), dservs[1]) @@ -595,7 +595,7 @@ func TestWalk(t *testing.T) { bsi := bstest.Mocks(1) ds := NewDAGService(bsi[0]) - read := io.LimitReader(u.NewTimeSeededRand(), 1024*1024) + read := io.LimitReader(random.NewRand(), 1024*1024) root := makeTestDAG(t, read, ds) set := cid.NewSet() diff --git a/ipld/unixfs/hamt/hamt_stress_test.go b/ipld/unixfs/hamt/hamt_stress_test.go index 89c3c69b4..de8575d22 100644 --- a/ipld/unixfs/hamt/hamt_stress_test.go +++ b/ipld/unixfs/hamt/hamt_stress_test.go @@ -33,14 +33,6 @@ type testOp struct { Val string } -func stringArrToSet(arr []string) map[string]bool { - out := make(map[string]bool) - for _, s := range arr { - out[s] = true - } - return out -} - // generate two different random sets of operations to result in the same // ending directory (same set of entries at the end) and execute each of them // in turn, then compare to ensure the output is the same on each. @@ -147,7 +139,10 @@ func executeOpSet(t *testing.T, ds ipld.DAGService, width int, ops []testOp) (*S } func genOpSet(seed int64, keep, temp []string) []testOp { - tempset := stringArrToSet(temp) + tempset := make(map[string]struct{}, len(temp)) + for _, s := range temp { + tempset[s] = struct{}{} + } allnames := append(keep, temp...) shuffle(seed, allnames) @@ -172,7 +167,7 @@ func genOpSet(seed int64, keep, temp []string) []testOp { Val: next, }) - if tempset[next] { + if _, ok := tempset[next]; ok { todel = append(todel, next) } } else { diff --git a/ipld/unixfs/importer/balanced/balanced_test.go b/ipld/unixfs/importer/balanced/balanced_test.go index 17afbb232..5a5dcf9ad 100644 --- a/ipld/unixfs/importer/balanced/balanced_test.go +++ b/ipld/unixfs/importer/balanced/balanced_test.go @@ -14,8 +14,8 @@ import ( chunker "github.com/ipfs/boxo/chunker" dag "github.com/ipfs/boxo/ipld/merkledag" mdtest "github.com/ipfs/boxo/ipld/merkledag/test" - u "github.com/ipfs/boxo/util" ipld "github.com/ipfs/go-ipld-format" + "github.com/ipfs/go-test/random" ) // TODO: extract these tests and more as a generic layout test suite @@ -41,7 +41,7 @@ func buildTestDag(ds ipld.DAGService, spl chunker.Splitter) (*dag.ProtoNode, err func getTestDag(t *testing.T, ds ipld.DAGService, size int64, blksize int64) (*dag.ProtoNode, []byte) { data := make([]byte, size) - u.NewTimeSeededRand().Read(data) + random.NewRand().Read(data) r := bytes.NewReader(data) nd, err := buildTestDag(ds, chunker.NewSizeSplitter(r, blksize)) diff --git a/ipld/unixfs/importer/importer_test.go b/ipld/unixfs/importer/importer_test.go index 85028257d..1d20525ea 100644 --- a/ipld/unixfs/importer/importer_test.go +++ b/ipld/unixfs/importer/importer_test.go @@ -10,14 +10,14 @@ import ( chunker "github.com/ipfs/boxo/chunker" mdtest "github.com/ipfs/boxo/ipld/merkledag/test" - u "github.com/ipfs/boxo/util" cid "github.com/ipfs/go-cid" ipld "github.com/ipfs/go-ipld-format" + "github.com/ipfs/go-test/random" ) func getBalancedDag(t testing.TB, size int64, blksize int64) (ipld.Node, ipld.DAGService) { ds := mdtest.Mock() - r := io.LimitReader(u.NewTimeSeededRand(), size) + r := io.LimitReader(random.NewRand(), size) nd, err := BuildDagFromReader(ds, chunker.NewSizeSplitter(r, blksize)) if err != nil { t.Fatal(err) @@ -27,7 +27,7 @@ func getBalancedDag(t testing.TB, size int64, blksize int64) (ipld.Node, ipld.DA func getTrickleDag(t testing.TB, size int64, blksize int64) (ipld.Node, ipld.DAGService) { ds := mdtest.Mock() - r := io.LimitReader(u.NewTimeSeededRand(), size) + r := io.LimitReader(random.NewRand(), size) nd, err := BuildTrickleDagFromReader(ds, chunker.NewSizeSplitter(r, blksize)) if err != nil { t.Fatal(err) @@ -38,7 +38,7 @@ func getTrickleDag(t testing.TB, size int64, blksize int64) (ipld.Node, ipld.DAG func TestStableCid(t *testing.T) { ds := mdtest.Mock() buf := make([]byte, 10*1024*1024) - u.NewSeededRand(0xdeadbeef).Read(buf) + random.NewSeededRand(0xdeadbeef).Read(buf) r := bytes.NewReader(buf) nd, err := BuildDagFromReader(ds, chunker.DefaultSplitter(r)) @@ -46,7 +46,7 @@ func TestStableCid(t *testing.T) { t.Fatal(err) } - expected, err := cid.Decode("QmZN1qquw84zhV4j6vT56tCcmFxaDaySL1ezTXFvMdNmrK") + expected, err := cid.Decode("QmPu94p2EkpSpgKdyz8eWomA7edAQN6maztoBycMZFixyz") if err != nil { t.Fatal(err) } @@ -72,7 +72,7 @@ func TestStableCid(t *testing.T) { func TestBalancedDag(t *testing.T) { ds := mdtest.Mock() buf := make([]byte, 10000) - u.NewTimeSeededRand().Read(buf) + random.NewRand().Read(buf) r := bytes.NewReader(buf) nd, err := BuildDagFromReader(ds, chunker.DefaultSplitter(r)) diff --git a/ipld/unixfs/importer/trickle/trickle_test.go b/ipld/unixfs/importer/trickle/trickle_test.go index e525cd9e8..9078fdc02 100644 --- a/ipld/unixfs/importer/trickle/trickle_test.go +++ b/ipld/unixfs/importer/trickle/trickle_test.go @@ -15,8 +15,8 @@ import ( chunker "github.com/ipfs/boxo/chunker" merkledag "github.com/ipfs/boxo/ipld/merkledag" mdtest "github.com/ipfs/boxo/ipld/merkledag/test" - u "github.com/ipfs/boxo/util" ipld "github.com/ipfs/go-ipld-format" + "github.com/ipfs/go-test/random" ) type UseRawLeaves bool @@ -90,7 +90,7 @@ func dup(b []byte) []byte { func testFileConsistency(t *testing.T, bs chunker.SplitterGen, nbytes int, rawLeaves UseRawLeaves) { should := make([]byte, nbytes) - u.NewTimeSeededRand().Read(should) + random.NewRand().Read(should) read := bytes.NewReader(should) ds := mdtest.Mock() @@ -120,9 +120,10 @@ func TestBuilderConsistency(t *testing.T) { } func testBuilderConsistency(t *testing.T, rawLeaves UseRawLeaves) { - nbytes := 100000 + const nbytes = 100000 buf := new(bytes.Buffer) - io.CopyN(buf, u.NewTimeSeededRand(), int64(nbytes)) + io.CopyN(buf, random.NewRand(), int64(nbytes)) + should := dup(buf.Bytes()) dagserv := mdtest.Mock() nd, err := buildTestDag(dagserv, chunker.DefaultSplitter(buf), rawLeaves) @@ -163,9 +164,9 @@ func TestIndirectBlocks(t *testing.T) { func testIndirectBlocks(t *testing.T, rawLeaves UseRawLeaves) { splitter := chunker.SizeSplitterGen(512) - nbytes := 1024 * 1024 + const nbytes = 1024 * 1024 buf := make([]byte, nbytes) - u.NewTimeSeededRand().Read(buf) + random.NewRand().Read(buf) read := bytes.NewReader(buf) @@ -195,9 +196,9 @@ func TestSeekingBasic(t *testing.T) { } func testSeekingBasic(t *testing.T, rawLeaves UseRawLeaves) { - nbytes := int64(10 * 1024) + const nbytes = 10 * 1024 should := make([]byte, nbytes) - u.NewTimeSeededRand().Read(should) + random.NewRand().Read(should) read := bytes.NewReader(should) ds := mdtest.Mock() @@ -236,9 +237,9 @@ func TestSeekToBegin(t *testing.T) { } func testSeekToBegin(t *testing.T, rawLeaves UseRawLeaves) { - nbytes := int64(10 * 1024) + const nbytes = 10 * 1024 should := make([]byte, nbytes) - u.NewTimeSeededRand().Read(should) + random.NewRand().Read(should) read := bytes.NewReader(should) ds := mdtest.Mock() @@ -284,9 +285,9 @@ func TestSeekToAlmostBegin(t *testing.T) { } func testSeekToAlmostBegin(t *testing.T, rawLeaves UseRawLeaves) { - nbytes := int64(10 * 1024) + const nbytes = 10 * 1024 should := make([]byte, nbytes) - u.NewTimeSeededRand().Read(should) + random.NewRand().Read(should) read := bytes.NewReader(should) ds := mdtest.Mock() @@ -332,9 +333,9 @@ func TestSeekEnd(t *testing.T) { } func testSeekEnd(t *testing.T, rawLeaves UseRawLeaves) { - nbytes := int64(50 * 1024) + const nbytes = 50 * 1024 should := make([]byte, nbytes) - u.NewTimeSeededRand().Read(should) + random.NewRand().Read(should) read := bytes.NewReader(should) ds := mdtest.Mock() @@ -362,9 +363,9 @@ func TestSeekEndSingleBlockFile(t *testing.T) { } func testSeekEndSingleBlockFile(t *testing.T, rawLeaves UseRawLeaves) { - nbytes := int64(100) + const nbytes = 100 should := make([]byte, nbytes) - u.NewTimeSeededRand().Read(should) + random.NewRand().Read(should) read := bytes.NewReader(should) ds := mdtest.Mock() @@ -392,9 +393,9 @@ func TestSeekingStress(t *testing.T) { } func testSeekingStress(t *testing.T, rawLeaves UseRawLeaves) { - nbytes := int64(1024 * 1024) + const nbytes = 1024 * 1024 should := make([]byte, nbytes) - u.NewTimeSeededRand().Read(should) + random.NewRand().Read(should) read := bytes.NewReader(should) ds := mdtest.Mock() @@ -440,9 +441,9 @@ func TestSeekingConsistency(t *testing.T) { } func testSeekingConsistency(t *testing.T, rawLeaves UseRawLeaves) { - nbytes := int64(128 * 1024) + const nbytes = 128 * 1024 should := make([]byte, nbytes) - u.NewTimeSeededRand().Read(should) + random.NewRand().Read(should) read := bytes.NewReader(should) ds := mdtest.Mock() @@ -458,7 +459,7 @@ func testSeekingConsistency(t *testing.T, rawLeaves UseRawLeaves) { out := make([]byte, nbytes) - for coff := nbytes - 4096; coff >= 0; coff -= 4096 { + for coff := int64(nbytes - 4096); coff >= 0; coff -= 4096 { t.Log(coff) n, err := rs.Seek(coff, io.SeekStart) if err != nil { @@ -487,9 +488,9 @@ func TestAppend(t *testing.T) { } func testAppend(t *testing.T, rawLeaves UseRawLeaves) { - nbytes := int64(128 * 1024) + const nbytes = 128 * 1024 should := make([]byte, nbytes) - u.NewTimeSeededRand().Read(should) + random.NewRand().Read(should) // Reader for half the bytes read := bytes.NewReader(should[:nbytes/2]) @@ -554,9 +555,9 @@ func testMultipleAppends(t *testing.T, rawLeaves UseRawLeaves) { ds := mdtest.Mock() // TODO: fix small size appends and make this number bigger - nbytes := int64(1000) + const nbytes = 1000 should := make([]byte, nbytes) - u.NewTimeSeededRand().Read(should) + random.NewRand().Read(should) read := bytes.NewReader(nil) nd, err := buildTestDag(ds, chunker.NewSizeSplitter(read, 500), rawLeaves) diff --git a/ipld/unixfs/mod/dagmodifier_test.go b/ipld/unixfs/mod/dagmodifier_test.go index fab7a125b..ebbc98b10 100644 --- a/ipld/unixfs/mod/dagmodifier_test.go +++ b/ipld/unixfs/mod/dagmodifier_test.go @@ -7,19 +7,17 @@ import ( "testing" dag "github.com/ipfs/boxo/ipld/merkledag" + "github.com/ipfs/boxo/ipld/unixfs" h "github.com/ipfs/boxo/ipld/unixfs/importer/helpers" trickle "github.com/ipfs/boxo/ipld/unixfs/importer/trickle" uio "github.com/ipfs/boxo/ipld/unixfs/io" testu "github.com/ipfs/boxo/ipld/unixfs/test" - - "github.com/ipfs/boxo/ipld/unixfs" - u "github.com/ipfs/boxo/util" + "github.com/ipfs/go-test/random" ) func testModWrite(t *testing.T, beg, size uint64, orig []byte, dm *DagModifier, opts testu.NodeOpts) []byte { newdata := make([]byte, size) - r := u.NewTimeSeededRand() - r.Read(newdata) + random.NewRand().Read(newdata) if size+beg > uint64(len(orig)) { orig = append(orig, make([]byte, (size+beg)-uint64(len(orig)))...) @@ -135,7 +133,7 @@ func testDagModifierBasic(t *testing.T, opts testu.NodeOpts) { t.Fatal(err) } - expected := uint64(50000 + 3500 + 3000) + const expected = uint64(50000 + 3500 + 3000) if size != expected { t.Fatalf("Final reported size is incorrect [%d != %d]", size, expected) } @@ -161,7 +159,7 @@ func testMultiWrite(t *testing.T, opts testu.NodeOpts) { } data := make([]byte, 4000) - u.NewTimeSeededRand().Read(data) + random.NewRand().Read(data) for i := 0; i < len(data); i++ { n, err := dagmod.WriteAt(data[i:i+1], int64(i)) @@ -205,7 +203,7 @@ func testMultiWriteAndFlush(t *testing.T, opts testu.NodeOpts) { } data := make([]byte, 20) - u.NewTimeSeededRand().Read(data) + random.NewRand().Read(data) for i := 0; i < len(data); i++ { n, err := dagmod.WriteAt(data[i:i+1], int64(i)) @@ -244,7 +242,7 @@ func testWriteNewFile(t *testing.T, opts testu.NodeOpts) { } towrite := make([]byte, 2000) - u.NewTimeSeededRand().Read(towrite) + random.NewRand().Read(towrite) nw, err := dagmod.Write(towrite) if err != nil { @@ -277,7 +275,7 @@ func testMultiWriteCoal(t *testing.T, opts testu.NodeOpts) { } data := make([]byte, 1000) - u.NewTimeSeededRand().Read(data) + random.NewRand().Read(data) for i := 0; i < len(data); i++ { n, err := dagmod.WriteAt(data[:i+1], 0) @@ -313,11 +311,11 @@ func testLargeWriteChunks(t *testing.T, opts testu.NodeOpts) { dagmod.RawLeaves = true } - wrsize := 1000 - datasize := 10000000 + const wrsize = 1000 + const datasize = 10000000 data := make([]byte, datasize) - u.NewTimeSeededRand().Read(data) + random.NewRand().Read(data) for i := 0; i < datasize/wrsize; i++ { n, err := dagmod.WriteAt(data[i*wrsize:(i+1)*wrsize], int64(i*wrsize)) @@ -532,7 +530,7 @@ func testSparseWrite(t *testing.T, opts testu.NodeOpts) { } buf := make([]byte, 5000) - u.NewTimeSeededRand().Read(buf[2500:]) + random.NewRand().Read(buf[2500:]) wrote, err := dagmod.WriteAt(buf[2500:], 2500) if err != nil { @@ -577,7 +575,7 @@ func testSeekPastEndWrite(t *testing.T, opts testu.NodeOpts) { } buf := make([]byte, 5000) - u.NewTimeSeededRand().Read(buf[2500:]) + random.NewRand().Read(buf[2500:]) nseek, err := dagmod.Seek(2500, io.SeekStart) if err != nil { @@ -841,7 +839,7 @@ func BenchmarkDagmodWrite(b *testing.B) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - wrsize := 4096 + const wrsize = 4096 dagmod, err := NewDagModifier(ctx, n, dserv, testu.SizeSplitterGen(512)) if err != nil { @@ -849,7 +847,7 @@ func BenchmarkDagmodWrite(b *testing.B) { } buf := make([]byte, b.N*wrsize) - u.NewTimeSeededRand().Read(buf) + random.NewRand().Read(buf) b.StartTimer() b.SetBytes(int64(wrsize)) for i := 0; i < b.N; i++ { diff --git a/ipld/unixfs/test/utils.go b/ipld/unixfs/test/utils.go index 4df8c1675..465808ca2 100644 --- a/ipld/unixfs/test/utils.go +++ b/ipld/unixfs/test/utils.go @@ -14,9 +14,9 @@ import ( chunker "github.com/ipfs/boxo/chunker" mdag "github.com/ipfs/boxo/ipld/merkledag" mdagmock "github.com/ipfs/boxo/ipld/merkledag/test" - u "github.com/ipfs/boxo/util" cid "github.com/ipfs/go-cid" ipld "github.com/ipfs/go-ipld-format" + "github.com/ipfs/go-test/random" mh "github.com/multiformats/go-multihash" ) @@ -85,14 +85,8 @@ func GetEmptyNode(t testing.TB, dserv ipld.DAGService, opts NodeOpts) ipld.Node // GetRandomNode returns a random unixfs file node. func GetRandomNode(t testing.TB, dserv ipld.DAGService, size int64, opts NodeOpts) ([]byte, ipld.Node) { - in := io.LimitReader(u.NewTimeSeededRand(), size) - buf, err := io.ReadAll(in) - if err != nil { - t.Fatal(err) - } - - node := GetNode(t, dserv, buf, opts) - return buf, node + buf := random.Bytes(int(size)) + return buf, GetNode(t, dserv, buf, opts) } // ArrComp checks if two byte slices are the same. diff --git a/mfs/mfs_test.go b/mfs/mfs_test.go index 2375c4f05..eb5585a64 100644 --- a/mfs/mfs_test.go +++ b/mfs/mfs_test.go @@ -25,7 +25,7 @@ import ( ft "github.com/ipfs/boxo/ipld/unixfs" importer "github.com/ipfs/boxo/ipld/unixfs/importer" uio "github.com/ipfs/boxo/ipld/unixfs/io" - u "github.com/ipfs/boxo/util" + "github.com/ipfs/go-test/random" cid "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" @@ -46,7 +46,7 @@ func getDagserv(t testing.TB) ipld.DAGService { } func getRandFile(t *testing.T, ds ipld.DAGService, size int64) ipld.Node { - r := io.LimitReader(u.NewTimeSeededRand(), size) + r := io.LimitReader(random.NewRand(), size) return fileNodeFromReader(t, ds, r) } diff --git a/pinning/pinner/dspinner/pin_test.go b/pinning/pinner/dspinner/pin_test.go index 54e308cba..cbc1e6c34 100644 --- a/pinning/pinner/dspinner/pin_test.go +++ b/pinning/pinner/dspinner/pin_test.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "io" "path" "testing" "time" @@ -22,13 +21,11 @@ import ( blockstore "github.com/ipfs/boxo/blockstore" offline "github.com/ipfs/boxo/exchange/offline" - util "github.com/ipfs/boxo/util" + "github.com/ipfs/go-test/random" ipfspin "github.com/ipfs/boxo/pinning/pinner" ) -var rand = util.NewTimeSeededRand() - type fakeLogger struct { logging.StandardLogger lastError error @@ -44,13 +41,8 @@ func (f *fakeLogger) Errorf(format string, args ...interface{}) { func randNode() (*mdag.ProtoNode, cid.Cid) { nd := new(mdag.ProtoNode) - nd.SetData(make([]byte, 32)) - _, err := io.ReadFull(rand, nd.Data()) - if err != nil { - panic(err) - } - k := nd.Cid() - return nd, k + nd.SetData(random.Bytes(32)) + return nd, nd.Cid() } func assertPinned(t *testing.T, p ipfspin.Pinner, c cid.Cid, failmsg string) { diff --git a/util/util.go b/util/util.go index 7a96ae393..3a76dd238 100644 --- a/util/util.go +++ b/util/util.go @@ -54,33 +54,20 @@ func ExpandPathnames(paths []string) ([]string, error) { return out, nil } -type randGen struct { - rand.Rand -} - // NewTimeSeededRand returns a random bytes reader // which has been initialized with the current time. +// +// Deprecated: use github.com/ipfs/go-test/random instead. func NewTimeSeededRand() io.Reader { - src := rand.NewSource(time.Now().UnixNano()) - return &randGen{ - Rand: *rand.New(src), - } + return NewSeededRand(time.Now().UnixNano()) } // NewSeededRand returns a random bytes reader // initialized with the given seed. +// +// Deprecated: use github.com/ipfs/go-test/random instead. func NewSeededRand(seed int64) io.Reader { - src := rand.NewSource(seed) - return &randGen{ - Rand: *rand.New(src), - } -} - -func (r *randGen) Read(p []byte) (n int, err error) { - for i := 0; i < len(p); i++ { - p[i] = byte(r.Rand.Intn(255)) - } - return len(p), nil + return rand.New(rand.NewSource(seed)) } // GetenvBool is the way to check an env var as a boolean diff --git a/util/util_test.go b/util/util_test.go index 70747ad90..c884d4614 100644 --- a/util/util_test.go +++ b/util/util_test.go @@ -3,6 +3,8 @@ package util import ( "bytes" "testing" + + "github.com/ipfs/go-test/random" ) func TestXOR(t *testing.T) { @@ -33,9 +35,9 @@ func TestXOR(t *testing.T) { } func BenchmarkHash256K(b *testing.B) { - buf := make([]byte, 256*1024) - NewTimeSeededRand().Read(buf) - b.SetBytes(int64(256 * 1024)) + const size = 256 * 1024 + buf := random.Bytes(size) + b.SetBytes(size) b.ResetTimer() for i := 0; i < b.N; i++ { Hash(buf) @@ -43,9 +45,9 @@ func BenchmarkHash256K(b *testing.B) { } func BenchmarkHash512K(b *testing.B) { - buf := make([]byte, 512*1024) - NewTimeSeededRand().Read(buf) - b.SetBytes(int64(512 * 1024)) + const size = 512 * 1024 + buf := random.Bytes(size) + b.SetBytes(size) b.ResetTimer() for i := 0; i < b.N; i++ { Hash(buf) @@ -53,9 +55,9 @@ func BenchmarkHash512K(b *testing.B) { } func BenchmarkHash1M(b *testing.B) { - buf := make([]byte, 1024*1024) - NewTimeSeededRand().Read(buf) - b.SetBytes(int64(1024 * 1024)) + const size = 1024 * 1024 + buf := random.Bytes(size) + b.SetBytes(size) b.ResetTimer() for i := 0; i < b.N; i++ { Hash(buf)