diff --git a/go.mod b/go.mod index 6b350a4008..00cee1a99a 100644 --- a/go.mod +++ b/go.mod @@ -50,6 +50,8 @@ require ( gopkg.in/natefinch/lumberjack.v2 v2.0.0 ) +require github.com/google/go-querystring v1.1.0 // indirect + require ( github.com/DataDog/zstd v1.4.5 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect @@ -110,6 +112,7 @@ require ( github.com/golang/protobuf v1.5.3 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/google/flatbuffers v1.12.1 // indirect + github.com/google/go-github/v62 v62.0.0 github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/graph-gophers/graphql-go v1.3.0 // indirect diff --git a/go.sum b/go.sum index 1b63dfb496..8676c270c4 100644 --- a/go.sum +++ b/go.sum @@ -350,6 +350,7 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -357,7 +358,11 @@ github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8 github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-github/v62 v62.0.0 h1:/6mGCaRywZz9MuHyw9gD1CwsbmBX8GWsbFkwMmHdhl4= +github.com/google/go-github/v62 v62.0.0/go.mod h1:EMxeUqGJq2xRu9DYBMwel/mr7kZrzUOfQmmpYrZn2a4= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= diff --git a/system_tests/block_validator_test.go b/system_tests/block_validator_test.go index 54046edf15..b4dafbff15 100644 --- a/system_tests/block_validator_test.go +++ b/system_tests/block_validator_test.go @@ -27,6 +27,7 @@ import ( "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/redisutil" + "github.com/offchainlabs/nitro/util/testhelpers/github" "github.com/offchainlabs/nitro/validator/client/redis" ) @@ -39,24 +40,34 @@ const ( upgradeArbOs ) -func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops int, workload workloadType, arbitrator bool, useRedisStreams bool) { +type Options struct { + dasModeString string + workloadLoops int + workload workloadType + arbitrator bool + useRedisStreams bool + wasmRootDir string +} + +func testBlockValidatorSimple(t *testing.T, opts Options) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - chainConfig, l1NodeConfigA, lifecycleManager, _, dasSignerKey := setupConfigWithDAS(t, ctx, dasModeString) + chainConfig, l1NodeConfigA, lifecycleManager, _, dasSignerKey := setupConfigWithDAS(t, ctx, opts.dasModeString) defer lifecycleManager.StopAndWaitUntil(time.Second) - if workload == upgradeArbOs { + if opts.workload == upgradeArbOs { chainConfig.ArbitrumChainParams.InitialArbOSVersion = 10 } var delayEvery int - if workloadLoops > 1 { + if opts.workloadLoops > 1 { l1NodeConfigA.BatchPoster.MaxDelay = time.Millisecond * 500 - delayEvery = workloadLoops / 3 + delayEvery = opts.workloadLoops / 3 } builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder = builder.WithWasmRootDir(opts.wasmRootDir) builder.nodeConfig = l1NodeConfigA builder.chainConfig = chainConfig builder.L2Info = nil @@ -70,7 +81,7 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops validatorConfig.DataAvailability = l1NodeConfigA.DataAvailability validatorConfig.DataAvailability.RPCAggregator.Enable = false redisURL := "" - if useRedisStreams { + if opts.useRedisStreams { redisURL = redisutil.CreateTestRedis(ctx, t) validatorConfig.BlockValidator.RedisValidationClientConfig = redis.TestValidationClientConfig validatorConfig.BlockValidator.RedisValidationClientConfig.RedisURL = redisURL @@ -78,7 +89,7 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops validatorConfig.BlockValidator.RedisValidationClientConfig = redis.ValidationClientConfig{} } - AddDefaultValNode(t, ctx, validatorConfig, !arbitrator, redisURL) + AddDefaultValNode(t, ctx, validatorConfig, !opts.arbitrator, redisURL, opts.wasmRootDir) testClientB, cleanupB := builder.Build2ndNode(t, &SecondNodeParams{nodeConfig: validatorConfig}) defer cleanupB() @@ -87,17 +98,17 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops perTransfer := big.NewInt(1e12) var simple *mocksgen.Simple - if workload != upgradeArbOs { - for i := 0; i < workloadLoops; i++ { + if opts.workload != upgradeArbOs { + for i := 0; i < opts.workloadLoops; i++ { var tx *types.Transaction - if workload == ethSend { + if opts.workload == ethSend { tx = builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, perTransfer, nil) } else { var contractCode []byte var gas uint64 - if workload == smallContract { + if opts.workload == smallContract { contractCode = []byte{byte(vm.PUSH0)} contractCode = append(contractCode, byte(vm.PUSH0)) contractCode = append(contractCode, byte(vm.PUSH1)) @@ -130,7 +141,7 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops err := builder.L2.Client.SendTransaction(ctx, tx) Require(t, err) _, err = builder.L2.EnsureTxSucceeded(tx) - if workload != depleteGas { + if opts.workload != depleteGas { Require(t, err) } if delayEvery > 0 && i%delayEvery == (delayEvery-1) { @@ -184,7 +195,7 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops Require(t, err) } - if workload != depleteGas { + if opts.workload != depleteGas { delayedTx := builder.L2Info.PrepareTx("Owner", "User2", 30002, perTransfer, nil) builder.L1.SendWaitTestTransactions(t, []*types.Transaction{ WrapL2ForDelayed(t, delayedTx, builder.L1Info, "User", 100000), @@ -203,11 +214,11 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops Require(t, err) } - if workload == ethSend { + if opts.workload == ethSend { l2balance, err := testClientB.Client.BalanceAt(ctx, builder.L2Info.GetAddress("User2"), nil) Require(t, err) - expectedBalance := new(big.Int).Mul(perTransfer, big.NewInt(int64(workloadLoops+1))) + expectedBalance := new(big.Int).Mul(perTransfer, big.NewInt(int64(opts.workloadLoops+1))) if l2balance.Cmp(expectedBalance) != 0 { Fatal(t, "Unexpected balance:", l2balance) } @@ -251,21 +262,65 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops } func TestBlockValidatorSimpleOnchainUpgradeArbOs(t *testing.T) { - testBlockValidatorSimple(t, "onchain", 1, upgradeArbOs, true, false) + opts := Options{ + dasModeString: "onchain", + workloadLoops: 1, + workload: upgradeArbOs, + arbitrator: true, + } + testBlockValidatorSimple(t, opts) } func TestBlockValidatorSimpleOnchain(t *testing.T) { - testBlockValidatorSimple(t, "onchain", 1, ethSend, true, false) + opts := Options{ + dasModeString: "onchain", + workloadLoops: 1, + workload: ethSend, + arbitrator: true, + } + testBlockValidatorSimple(t, opts) +} + +func TestBlockValidatorSimpleOnchainWithPublishedMachine(t *testing.T) { + cr, err := github.LatestConsensusRelease(context.Background()) + Require(t, err) + machPath := populateMachineDir(t, cr) + opts := Options{ + dasModeString: "onchain", + workloadLoops: 1, + workload: ethSend, + arbitrator: true, + wasmRootDir: machPath, + } + testBlockValidatorSimple(t, opts) } func TestBlockValidatorSimpleOnchainWithRedisStreams(t *testing.T) { - testBlockValidatorSimple(t, "onchain", 1, ethSend, true, true) + opts := Options{ + dasModeString: "onchain", + workloadLoops: 1, + workload: ethSend, + arbitrator: true, + useRedisStreams: true, + } + testBlockValidatorSimple(t, opts) } func TestBlockValidatorSimpleLocalDAS(t *testing.T) { - testBlockValidatorSimple(t, "files", 1, ethSend, true, false) + opts := Options{ + dasModeString: "files", + workloadLoops: 1, + workload: ethSend, + arbitrator: true, + } + testBlockValidatorSimple(t, opts) } func TestBlockValidatorSimpleJITOnchain(t *testing.T) { - testBlockValidatorSimple(t, "files", 8, smallContract, false, false) + opts := Options{ + dasModeString: "files", + workloadLoops: 8, + workload: smallContract, + } + testBlockValidatorSimple(t, opts) } diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 1c45802b54..16d6b2f131 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -11,6 +11,7 @@ import ( "io" "math/big" "net" + "net/http" "os" "strconv" "strings" @@ -74,6 +75,7 @@ import ( "github.com/offchainlabs/nitro/solgen/go/upgrade_executorgen" "github.com/offchainlabs/nitro/statetransfer" "github.com/offchainlabs/nitro/util/testhelpers" + "github.com/offchainlabs/nitro/util/testhelpers/github" "golang.org/x/exp/slog" ) @@ -156,6 +158,7 @@ type NodeBuilder struct { execConfig *gethexec.Config l1StackConfig *node.Config l2StackConfig *node.Config + valnodeConfig *valnode.Config L1Info info L2Info info @@ -190,6 +193,8 @@ func (b *NodeBuilder) DefaultConfig(t *testing.T, withL1 bool) *NodeBuilder { b.dataDir = t.TempDir() b.l1StackConfig = createStackConfigForTest(b.dataDir) b.l2StackConfig = createStackConfigForTest(b.dataDir) + cp := valnode.TestValidationConfig + b.valnodeConfig = &cp b.execConfig = gethexec.ConfigDefaultTest() return b } @@ -201,6 +206,11 @@ func (b *NodeBuilder) WithArbOSVersion(arbosVersion uint64) *NodeBuilder { return b } +func (b *NodeBuilder) WithWasmRootDir(wasmRootDir string) *NodeBuilder { + b.valnodeConfig.Wasm.RootPath = wasmRootDir + return b +} + func (b *NodeBuilder) Build(t *testing.T) func() { if b.execConfig.RPC.MaxRecreateStateDepth == arbitrum.UninitializedMaxRecreateStateDepth { if b.execConfig.Caching.Archive { @@ -212,13 +222,13 @@ func (b *NodeBuilder) Build(t *testing.T) func() { if b.withL1 { l1, l2 := NewTestClient(b.ctx), NewTestClient(b.ctx) b.L2Info, l2.ConsensusNode, l2.Client, l2.Stack, b.L1Info, l1.L1Backend, l1.Client, l1.Stack = - createTestNodeWithL1(t, b.ctx, b.isSequencer, b.nodeConfig, b.execConfig, b.chainConfig, b.l2StackConfig, b.L2Info) + createTestNodeWithL1(t, b.ctx, b.isSequencer, b.nodeConfig, b.execConfig, b.chainConfig, b.l2StackConfig, b.valnodeConfig, b.L2Info) b.L1, b.L2 = l1, l2 b.L1.cleanup = func() { requireClose(t, b.L1.Stack) } } else { l2 := NewTestClient(b.ctx) b.L2Info, l2.ConsensusNode, l2.Client = - createTestNode(t, b.ctx, b.L2Info, b.nodeConfig, b.execConfig, b.chainConfig, b.takeOwnership) + createTestNode(t, b.ctx, b.L2Info, b.nodeConfig, b.execConfig, b.chainConfig, b.valnodeConfig, b.takeOwnership) b.L2 = l2 } b.L2.ExecNode = getExecNode(t, b.L2.ConsensusNode) @@ -265,7 +275,7 @@ func (b *NodeBuilder) Build2ndNode(t *testing.T, params *SecondNodeParams) (*Tes l2 := NewTestClient(b.ctx) l2.Client, l2.ConsensusNode = - Create2ndNodeWithConfig(t, b.ctx, b.L2.ConsensusNode, b.L1.Stack, b.L1Info, params.initData, params.nodeConfig, params.execConfig, params.stackConfig) + Create2ndNodeWithConfig(t, b.ctx, b.L2.ConsensusNode, b.L1.Stack, b.L1Info, params.initData, params.nodeConfig, params.execConfig, params.stackConfig, b.valnodeConfig) l2.ExecNode = getExecNode(t, l2.ConsensusNode) l2.cleanup = func() { l2.ConsensusNode.StopAndWait() } return l2, func() { l2.cleanup() } @@ -605,12 +615,13 @@ func currentRootModule(t *testing.T) common.Hash { return locator.LatestWasmModuleRoot() } -func AddDefaultValNode(t *testing.T, ctx context.Context, nodeConfig *arbnode.Config, useJit bool, redisURL string) { +func AddDefaultValNode(t *testing.T, ctx context.Context, nodeConfig *arbnode.Config, useJit bool, redisURL string, wasmRootDir string) { if !nodeConfig.ValidatorRequired() { return } conf := valnode.TestValidationConfig conf.UseJit = useJit + conf.Wasm.RootPath = wasmRootDir // Enable redis streams when URL is specified if redisURL != "" { conf.Arbitrator.RedisValidationServerConfig = rediscons.DefaultValidationServerConfig @@ -708,7 +719,7 @@ func getInitMessage(ctx context.Context, t *testing.T, l1client client, addresse } func DeployOnTestL1( - t *testing.T, ctx context.Context, l1info info, l1client client, chainConfig *params.ChainConfig, + t *testing.T, ctx context.Context, l1info info, l1client client, chainConfig *params.ChainConfig, wasmModuleRoot common.Hash, ) (*chaininfo.RollupAddresses, *arbostypes.ParsedInitMessage) { l1info.GenerateAccount("RollupOwner") l1info.GenerateAccount("Sequencer") @@ -722,8 +733,6 @@ func DeployOnTestL1( l1info.PrepareTx("Faucet", "User", 30000, big.NewInt(9223372036854775807), nil)}) l1TransactionOpts := l1info.GetDefaultTransactOpts("RollupOwner", ctx) - locator, err := server_common.NewMachineLocator("") - Require(t, err) serializedChainConfig, err := json.Marshal(chainConfig) Require(t, err) @@ -742,7 +751,7 @@ func DeployOnTestL1( []common.Address{l1info.GetAddress("Sequencer")}, l1info.GetAddress("RollupOwner"), 0, - arbnode.GenerateRollupConfig(false, locator.LatestWasmModuleRoot(), l1info.GetAddress("RollupOwner"), chainConfig, serializedChainConfig, common.Address{}), + arbnode.GenerateRollupConfig(false, wasmModuleRoot, l1info.GetAddress("RollupOwner"), chainConfig, serializedChainConfig, common.Address{}), nativeToken, maxDataSize, false, @@ -819,6 +828,7 @@ func createTestNodeWithL1( execConfig *gethexec.Config, chainConfig *params.ChainConfig, stackConfig *node.Config, + valnodeConfig *valnode.Config, l2info_in info, ) ( l2info info, currentNode *arbnode.Node, l2client *ethclient.Client, l2stack *node.Node, @@ -842,7 +852,9 @@ func createTestNodeWithL1( if l2info == nil { l2info = NewArbTestInfo(t, chainConfig.ChainID) } - addresses, initMessage := DeployOnTestL1(t, ctx, l1info, l1client, chainConfig) + locator, err := server_common.NewMachineLocator(valnodeConfig.Wasm.RootPath) + Require(t, err) + addresses, initMessage := DeployOnTestL1(t, ctx, l1info, l1client, chainConfig, locator.LatestWasmModuleRoot()) _, l2stack, l2chainDb, l2arbDb, l2blockchain = createL2BlockChainWithStackConfig(t, l2info, "", chainConfig, initMessage, stackConfig, &execConfig.Caching) var sequencerTxOptsPtr *bind.TransactOpts var dataSigner signature.DataSignerFunc @@ -865,7 +877,7 @@ func createTestNodeWithL1( validatorTxOptsPtr = &validatorTxOpts } - AddDefaultValNode(t, ctx, nodeConfig, true, "") + AddDefaultValNode(t, ctx, nodeConfig, true, "", valnodeConfig.Wasm.RootPath) Require(t, execConfig.Validate()) execConfigFetcher := func() *gethexec.Config { return execConfig } @@ -889,7 +901,7 @@ func createTestNodeWithL1( // L2 -Only. Enough for tests that needs no interface to L1 // Requires precompiles.AllowDebugPrecompiles = true func createTestNode( - t *testing.T, ctx context.Context, l2Info *BlockchainTestInfo, nodeConfig *arbnode.Config, execConfig *gethexec.Config, chainConfig *params.ChainConfig, takeOwnership bool, + t *testing.T, ctx context.Context, l2Info *BlockchainTestInfo, nodeConfig *arbnode.Config, execConfig *gethexec.Config, chainConfig *params.ChainConfig, valnodeConfig *valnode.Config, takeOwnership bool, ) (*BlockchainTestInfo, *arbnode.Node, *ethclient.Client) { if nodeConfig == nil { nodeConfig = arbnode.ConfigDefaultL2Test() @@ -900,7 +912,7 @@ func createTestNode( feedErrChan := make(chan error, 10) - AddDefaultValNode(t, ctx, nodeConfig, true, "") + AddDefaultValNode(t, ctx, nodeConfig, true, "", valnodeConfig.Wasm.RootPath) l2info, stack, chainDb, arbDb, blockchain := createL2BlockChain(t, l2Info, "", chainConfig, &execConfig.Caching) @@ -972,6 +984,7 @@ func Create2ndNodeWithConfig( nodeConfig *arbnode.Config, execConfig *gethexec.Config, stackConfig *node.Config, + valnodeConfig *valnode.Config, ) (*ethclient.Client, *arbnode.Node) { if nodeConfig == nil { nodeConfig = arbnode.ConfigDefaultL1NonSequencerTest() @@ -1011,7 +1024,7 @@ func Create2ndNodeWithConfig( l2blockchain, err := gethexec.WriteOrTestBlockChain(l2chainDb, coreCacheConfig, initReader, chainConfig, initMessage, gethexec.ConfigDefaultTest().TxLookupLimit, 0) Require(t, err) - AddDefaultValNode(t, ctx, nodeConfig, true, "") + AddDefaultValNode(t, ctx, nodeConfig, true, "", valnodeConfig.Wasm.RootPath) Require(t, execConfig.Validate()) Require(t, nodeConfig.Validate()) @@ -1287,3 +1300,31 @@ func logParser[T any](t *testing.T, source string, name string) func(*types.Log) return event } } + +func populateMachineDir(t *testing.T, cr *github.ConsensusRelease) string { + baseDir := t.TempDir() + machineDir := baseDir + "/machines" + err := os.Mkdir(machineDir, 0755) + Require(t, err) + err = os.Mkdir(machineDir+"/latest", 0755) + Require(t, err) + mrFile, err := os.Create(machineDir + "/latest/module-root.txt") + Require(t, err) + _, err = mrFile.WriteString(cr.WavmModuleRoot) + Require(t, err) + machResp, err := http.Get(cr.MachineWavmURL.String()) + Require(t, err) + defer machResp.Body.Close() + machineFile, err := os.Create(machineDir + "/latest/machine.wavm.br") + Require(t, err) + _, err = io.Copy(machineFile, machResp.Body) + Require(t, err) + replayResp, err := http.Get(cr.ReplayWasmURL.String()) + Require(t, err) + defer replayResp.Body.Close() + replayFile, err := os.Create(machineDir + "/latest/replay.wasm") + Require(t, err) + _, err = io.Copy(replayFile, replayResp.Body) + Require(t, err) + return machineDir +} diff --git a/system_tests/das_test.go b/system_tests/das_test.go index fc0f17cecc..2332f4ee9e 100644 --- a/system_tests/das_test.go +++ b/system_tests/das_test.go @@ -33,6 +33,8 @@ import ( "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/util/signature" + "github.com/offchainlabs/nitro/validator/server_common" + "github.com/offchainlabs/nitro/validator/valnode" "golang.org/x/exp/slog" ) @@ -110,7 +112,9 @@ func TestDASRekey(t *testing.T) { l1info, l1client, _, l1stack := createTestL1BlockChain(t, nil) defer requireClose(t, l1stack) feedErrChan := make(chan error, 10) - addresses, initMessage := DeployOnTestL1(t, ctx, l1info, l1client, chainConfig) + locator, err := server_common.NewMachineLocator("") + Require(t, err) + addresses, initMessage := DeployOnTestL1(t, ctx, l1info, l1client, chainConfig, locator.LatestWasmModuleRoot()) // Setup DAS servers dasDataDir := t.TempDir() @@ -152,13 +156,13 @@ func TestDASRekey(t *testing.T) { l1NodeConfigB.DataAvailability.ParentChainNodeURL = "none" - l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, l1NodeConfigB, nil, nil) + l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, l1NodeConfigB, nil, nil, &valnode.TestValidationConfig) checkBatchPosting(t, ctx, l1client, l2clientA, l1info, l2info, big.NewInt(1e12), l2clientB) nodeA.StopAndWait() nodeB.StopAndWait() } - err := dasRpcServerA.Shutdown(ctx) + err = dasRpcServerA.Shutdown(ctx) Require(t, err) dasRpcServerB, pubkeyB, backendConfigB, _, _ := startLocalDASServer(t, ctx, dasDataDir, l1client, addresses.SequencerInbox) defer func() { @@ -191,7 +195,7 @@ func TestDASRekey(t *testing.T) { Require(t, nodeA.Start(ctx)) l2clientA := ClientForStack(t, l2stackA) - l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, l1NodeConfigB, nil, nil) + l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, l1NodeConfigB, nil, nil, &valnode.TestValidationConfig) checkBatchPosting(t, ctx, l1client, l2clientA, l1info, l2info, big.NewInt(2e12), l2clientB) nodeA.StopAndWait() @@ -244,8 +248,11 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { Require(t, err) l1Reader.Start(ctx) defer l1Reader.StopAndWait() + feedErrChan := make(chan error, 10) - addresses, initMessage := DeployOnTestL1(t, ctx, l1info, l1client, chainConfig) + locator, err := server_common.NewMachineLocator("") + Require(t, err) + addresses, initMessage := DeployOnTestL1(t, ctx, l1info, l1client, chainConfig, locator.LatestWasmModuleRoot()) keyDir, fileDataDir, dbDataDir := t.TempDir(), t.TempDir(), t.TempDir() pubkey, _, err := das.GenerateAndStoreKeys(keyDir) @@ -341,7 +348,7 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { l1NodeConfigB.DataAvailability.RestAggregator.Enable = true l1NodeConfigB.DataAvailability.RestAggregator.Urls = []string{"http://" + restLis.Addr().String()} l1NodeConfigB.DataAvailability.ParentChainNodeURL = "none" - l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, l1NodeConfigB, nil, nil) + l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, l1NodeConfigB, nil, nil, &valnode.TestValidationConfig) checkBatchPosting(t, ctx, l1client, l2clientA, l1info, l2info, big.NewInt(1e12), l2clientB) diff --git a/system_tests/full_challenge_impl_test.go b/system_tests/full_challenge_impl_test.go index e4c49c3227..b466e2db23 100644 --- a/system_tests/full_challenge_impl_test.go +++ b/system_tests/full_challenge_impl_test.go @@ -248,7 +248,7 @@ func createL2Nodes(t *testing.T, ctx context.Context, conf *arbnode.Config, chai return consensusNode, execNode } -func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, challengeMsgIdx int64) { +func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, challengeMsgIdx int64, wasmRootDir string) { glogger := log.NewGlogHandler( log.NewTerminalHandler(io.Writer(os.Stderr), false)) glogger.Verbosity(log.LvlInfo) @@ -273,15 +273,19 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall var valStack *node.Node var mockSpawn *mockSpawner + valNodeConfig := &valnode.TestValidationConfig + valNodeConfig.Wasm.RootPath = wasmRootDir if useStubs { - mockSpawn, valStack = createMockValidationNode(t, ctx, &valnode.TestValidationConfig.Arbitrator) + mockSpawn, valStack = createMockValidationNode(t, ctx, &valNodeConfig.Arbitrator) } else { - _, valStack = createTestValidationNode(t, ctx, &valnode.TestValidationConfig) + _, valStack = createTestValidationNode(t, ctx, valNodeConfig) } configByValidationNode(conf, valStack) fatalErrChan := make(chan error, 10) - asserterRollupAddresses, initMessage := DeployOnTestL1(t, ctx, l1Info, l1Backend, chainConfig) + locator, err := server_common.NewMachineLocator(wasmRootDir) + Require(t, err) + asserterRollupAddresses, initMessage := DeployOnTestL1(t, ctx, l1Info, l1Backend, chainConfig, locator.LatestWasmModuleRoot()) deployerTxOpts := l1Info.GetDefaultTransactOpts("deployer", ctx) sequencerTxOpts := l1Info.GetDefaultTransactOpts("sequencer", ctx) @@ -295,7 +299,7 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall asserterRollupAddresses.SequencerInbox = asserterSeqInboxAddr asserterL2Info := NewArbTestInfo(t, chainConfig.ChainID) asserterL2, asserterExec := createL2Nodes(t, ctx, conf, chainConfig, l1Backend, asserterL2Info, asserterRollupAddresses, initMessage, nil, nil, fatalErrChan) - err := asserterL2.Start(ctx) + err = asserterL2.Start(ctx) Require(t, err) challengerRollupAddresses := *asserterRollupAddresses @@ -335,10 +339,6 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall } ospEntry := DeployOneStepProofEntry(t, ctx, &deployerTxOpts, l1Backend) - locator, err := server_common.NewMachineLocator("") - if err != nil { - Fatal(t, err) - } var wasmModuleRoot common.Hash if useStubs { wasmModuleRoot = mockWasmModuleRoots[0] diff --git a/system_tests/full_challenge_mock_test.go b/system_tests/full_challenge_mock_test.go index d32c2b40ab..82f57dd7ad 100644 --- a/system_tests/full_challenge_mock_test.go +++ b/system_tests/full_challenge_mock_test.go @@ -8,14 +8,16 @@ import "testing" func TestMockChallengeManagerAsserterIncorrect(t *testing.T) { t.Parallel() + defaultWasmRootDir := "" for i := int64(1); i <= makeBatch_MsgsPerBatch*3; i++ { - RunChallengeTest(t, false, true, i) + RunChallengeTest(t, false, true, i, defaultWasmRootDir) } } func TestMockChallengeManagerAsserterCorrect(t *testing.T) { t.Parallel() + defaultWasmRootDir := "" for i := int64(1); i <= makeBatch_MsgsPerBatch*3; i++ { - RunChallengeTest(t, true, true, i) + RunChallengeTest(t, true, true, i, defaultWasmRootDir) } } diff --git a/system_tests/full_challenge_test.go b/system_tests/full_challenge_test.go index d15ee83d1d..96c82848d2 100644 --- a/system_tests/full_challenge_test.go +++ b/system_tests/full_challenge_test.go @@ -6,14 +6,37 @@ package arbtest -import "testing" +import ( + "context" + "testing" + + "github.com/offchainlabs/nitro/util/testhelpers/github" +) func TestChallengeManagerFullAsserterIncorrect(t *testing.T) { t.Parallel() - RunChallengeTest(t, false, false, makeBatch_MsgsPerBatch+1) + defaultWasmRootDir := "" + RunChallengeTest(t, false, false, makeBatch_MsgsPerBatch+1, defaultWasmRootDir) +} + +func TestChallengeManagerFullAsserterIncorrectWithPublishedMachine(t *testing.T) { + t.Parallel() + cr, err := github.LatestConsensusRelease(context.Background()) + Require(t, err) + machPath := populateMachineDir(t, cr) + RunChallengeTest(t, false, true, makeBatch_MsgsPerBatch+1, machPath) } func TestChallengeManagerFullAsserterCorrect(t *testing.T) { t.Parallel() - RunChallengeTest(t, true, false, makeBatch_MsgsPerBatch+2) + defaultWasmRootDir := "" + RunChallengeTest(t, true, false, makeBatch_MsgsPerBatch+2, defaultWasmRootDir) +} + +func TestChallengeManagerFullAsserterCorrectWithPublishedMachine(t *testing.T) { + t.Parallel() + cr, err := github.LatestConsensusRelease(context.Background()) + Require(t, err) + machPath := populateMachineDir(t, cr) + RunChallengeTest(t, true, true, makeBatch_MsgsPerBatch+2, machPath) } diff --git a/system_tests/nodeinterface_test.go b/system_tests/nodeinterface_test.go index b692af6e30..4eace8d514 100644 --- a/system_tests/nodeinterface_test.go +++ b/system_tests/nodeinterface_test.go @@ -19,6 +19,7 @@ import ( "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbos/util" "github.com/offchainlabs/nitro/solgen/go/node_interfacegen" + "github.com/offchainlabs/nitro/validator/server_common" ) func TestFindBatch(t *testing.T) { @@ -39,7 +40,9 @@ func TestFindBatch(t *testing.T) { chainConfig := params.ArbitrumDevTestChainConfig() fatalErrChan := make(chan error, 10) - rollupAddresses, initMsg := DeployOnTestL1(t, ctx, l1Info, l1Backend, chainConfig) + locator, err := server_common.NewMachineLocator("") + Require(t, err) + rollupAddresses, initMsg := DeployOnTestL1(t, ctx, l1Info, l1Backend, chainConfig, locator.LatestWasmModuleRoot()) bridgeAddr, seqInbox, seqInboxAddr := setupSequencerInboxStub(ctx, t, l1Info, l1Backend, chainConfig) @@ -49,7 +52,7 @@ func TestFindBatch(t *testing.T) { rollupAddresses.SequencerInbox = seqInboxAddr l2Info := NewArbTestInfo(t, chainConfig.ChainID) consensus, _ := createL2Nodes(t, ctx, conf, chainConfig, l1Backend, l2Info, rollupAddresses, initMsg, nil, nil, fatalErrChan) - err := consensus.Start(ctx) + err = consensus.Start(ctx) Require(t, err) l2Client := ClientForStack(t, consensus.Stack) diff --git a/system_tests/program_recursive_test.go b/system_tests/program_recursive_test.go index d4cab510d3..245d62fc0c 100644 --- a/system_tests/program_recursive_test.go +++ b/system_tests/program_recursive_test.go @@ -134,7 +134,9 @@ func testProgramResursiveCalls(t *testing.T, tests [][]multiCallRecurse, jit boo // set-up validator validatorConfig := arbnode.ConfigDefaultL1NonSequencerTest() validatorConfig.BlockValidator.Enable = true - AddDefaultValNode(t, ctx, validatorConfig, jit, "") + emptyRedisURL := "" + defaultWasmRootPath := "" + AddDefaultValNode(t, ctx, validatorConfig, jit, emptyRedisURL, defaultWasmRootPath) valClient, valCleanup := builder.Build2ndNode(t, &SecondNodeParams{nodeConfig: validatorConfig}) defer valCleanup() diff --git a/util/testhelpers/github/releases.go b/util/testhelpers/github/releases.go new file mode 100644 index 0000000000..59f591d92c --- /dev/null +++ b/util/testhelpers/github/releases.go @@ -0,0 +1,83 @@ +package github + +import ( + "context" + "fmt" + "net/url" + "regexp" + "strings" + + "github.com/google/go-github/v62/github" +) + +var wasmRootExp = regexp.MustCompile(`\*\*WAVM Module Root\*\*: (0x[a-f0-9]{64})`) + +type ConsensusRelease struct { + WavmModuleRoot string + MachineWavmURL url.URL + ReplayWasmURL url.URL +} + +// NitroReleases returns the most recent 50 releases of the Nitro repository. +func NitroReleases(ctx context.Context) ([]*github.RepositoryRelease, error) { + client := github.NewClient(nil) + opts := &github.ListOptions{ + PerPage: 50, + } + releases, _, err := client.Repositories.ListReleases(ctx, "OffchainLabs", "nitro", opts) + return releases, err +} + +// LatestConsensusRelease returns data about the latest consensus release. +func LatestConsensusRelease(ctx context.Context) (*ConsensusRelease, error) { + releases, err := NitroReleases(ctx) + if err != nil { + return nil, err + } + var found *ConsensusRelease + for _, release := range releases { + if strings.HasPrefix(release.GetTagName(), "consensus") { + if found, err = fromRelease(release); err != nil { + return nil, err + } + break + } + } + if found == nil { + return nil, fmt.Errorf("no consensus release found") + } + return found, nil +} + +func fromRelease(release *github.RepositoryRelease) (*ConsensusRelease, error) { + // TODO(eljobe): Consider making the module-root.txt a release asset. + // This is currently brittle because it relies on the release body format. + matches := wasmRootExp.FindStringSubmatch(release.GetBody()) + if len(matches) != 2 { + return nil, fmt.Errorf("no WAVM module root found in release body") + } + wavmModuleRoot := matches[1] + var machineWavmURL url.URL + var replayWasmURL url.URL + for _, asset := range release.Assets { + if asset.GetName() == "machine.wavm.br" { + wURL, err := url.Parse(asset.GetBrowserDownloadURL()) + if err != nil { + return nil, err + } + machineWavmURL = *wURL + } + if asset.GetName() == "replay.wasm" { + rURL, err := url.Parse(asset.GetBrowserDownloadURL()) + if err != nil { + return nil, err + } + replayWasmURL = *rURL + } + } + return &ConsensusRelease{ + WavmModuleRoot: wavmModuleRoot, + MachineWavmURL: machineWavmURL, + ReplayWasmURL: replayWasmURL, + }, nil +} diff --git a/util/testhelpers/github/releases_test.go b/util/testhelpers/github/releases_test.go new file mode 100644 index 0000000000..a25d68c543 --- /dev/null +++ b/util/testhelpers/github/releases_test.go @@ -0,0 +1,38 @@ +package github + +import ( + "context" + "testing" +) + +func TestReleases(t *testing.T) { + rels, err := NitroReleases(context.Background()) + if err != nil { + t.Error(err) + } + if len(rels) == 0 { + t.Error("No releases found") + } + if len(rels) != 50 { + t.Errorf("Expected 50 releases, got %d", len(rels)) + } +} + +func TestLatestConsensusRelease(t *testing.T) { + rel, err := LatestConsensusRelease(context.Background()) + if err != nil { + t.Fatal(err) + } + if rel == nil { + t.Fatal("No consensus release found") + } + if rel.WavmModuleRoot == "" { + t.Error("Unexpected empty WAVM module root.") + } + if rel.MachineWavmURL.String() == "" { + t.Error("Unexpected empty machine WAVM URL.") + } + if rel.ReplayWasmURL.String() == "" { + t.Error("Unexpected empty replay WASM URL.") + } +}