From cee7d1fb02cf17d06a71e71ef070c139be3ed406 Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Mon, 8 Apr 2024 16:41:30 -0500 Subject: [PATCH 01/75] Improve blocks re-execution and make it compatible with --init.then-quit --- blocks_reexecutor/blocks_reexecutor.go | 51 ++++++++++++++++++-------- cmd/nitro/nitro.go | 26 ++++++++++--- system_tests/blocks_reexecutor_test.go | 26 +++++-------- 3 files changed, 65 insertions(+), 38 deletions(-) diff --git a/blocks_reexecutor/blocks_reexecutor.go b/blocks_reexecutor/blocks_reexecutor.go index bb6de00cad..bedea37776 100644 --- a/blocks_reexecutor/blocks_reexecutor.go +++ b/blocks_reexecutor/blocks_reexecutor.go @@ -42,10 +42,9 @@ func (c *Config) Validate() error { } var DefaultConfig = Config{ - Enable: false, - Mode: "random", - Room: runtime.NumCPU(), - BlocksPerThread: 10000, + Enable: false, + Mode: "random", + Room: runtime.NumCPU(), } var TestConfig = Config{ @@ -84,25 +83,38 @@ func New(c *Config, blockchain *core.BlockChain, fatalErrChan chan error) *Block start = chainStart end = chainEnd } - if start < chainStart { - log.Warn("state reexecutor's start block number is lower than genesis, resetting to genesis") + if start < chainStart || start > chainEnd { + log.Warn("invalid state reexecutor's start block number, resetting to genesis", "start", start, "genesis", chainStart) start = chainStart } - if end > chainEnd { - log.Warn("state reexecutor's end block number is greater than latest, resetting to latest") + if end > chainEnd || end < chainStart { + log.Warn("invalid state reexecutor's end block number, resetting to latest", "end", end, "latest", chainEnd) end = chainEnd } if c.Mode == "random" && end != start { - if c.BlocksPerThread > end-start { - c.BlocksPerThread = end - start + // Reexecute a range of 10000 or (non-zero) c.BlocksPerThread number of blocks between start to end picked randomly + rng := uint64(10000) + if c.BlocksPerThread != 0 { + rng = c.BlocksPerThread + } + if rng > end-start { + rng = end - start } - start += uint64(rand.Intn(int(end - start - c.BlocksPerThread + 1))) - end = start + c.BlocksPerThread + start += uint64(rand.Intn(int(end - start - rng + 1))) + end = start + rng } - // inclusive of block reexecution [start, end] + // Inclusive of block reexecution [start, end] if start > 0 { start-- } + // Divide work equally among available threads + if c.BlocksPerThread == 0 { + c.BlocksPerThread = 10000 + work := (end - start) / uint64(c.Room) + if work > 0 { + c.BlocksPerThread = work + } + } return &BlocksReExecutor{ config: c, blockchain: blockchain, @@ -125,11 +137,13 @@ func (s *BlocksReExecutor) LaunchBlocksReExecution(ctx context.Context, currentB } // we don't use state release pattern here // TODO do we want to use release pattern here? - startState, startHeader, _, err := arbitrum.FindLastAvailableState(ctx, s.blockchain, s.stateFor, s.blockchain.GetHeaderByNumber(start), nil, -1) + startState, startHeader, release, err := arbitrum.FindLastAvailableState(ctx, s.blockchain, s.stateFor, s.blockchain.GetHeaderByNumber(start), nil, -1) if err != nil { s.fatalErrChan <- fmt.Errorf("blocksReExecutor failed to get last available state while searching for state at %d, err: %w", start, err) return s.startBlock } + // NoOp + defer release() start = startHeader.Number.Uint64() s.LaunchThread(func(ctx context.Context) { _, err := arbitrum.AdvanceStateUpToBlock(ctx, s.blockchain, startState, s.blockchain.GetHeaderByNumber(currentBlock), startHeader, nil) @@ -169,9 +183,14 @@ func (s *BlocksReExecutor) Impl(ctx context.Context) { log.Info("BlocksReExecutor successfully completed re-execution of blocks against historic state", "stateAt", s.startBlock, "startBlock", s.startBlock+1, "endBlock", end) } -func (s *BlocksReExecutor) Start(ctx context.Context) { +func (s *BlocksReExecutor) Start(ctx context.Context, done chan struct{}) { s.StopWaiter.Start(ctx, s) - s.LaunchThread(s.Impl) + s.LaunchThread(func(ctx context.Context) { + s.Impl(ctx) + if done != nil { + close(done) + } + }) } func (s *BlocksReExecutor) StopAndWait() { diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 997adf9369..59241204f1 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -494,6 +494,25 @@ func mainImpl() int { return 1 } + fatalErrChan := make(chan error, 10) + + var blocksReExecutor *blocksreexecutor.BlocksReExecutor + if nodeConfig.BlocksReExecutor.Enable && l2BlockChain != nil { + blocksReExecutor = blocksreexecutor.New(&nodeConfig.BlocksReExecutor, l2BlockChain, fatalErrChan) + if nodeConfig.Init.ThenQuit { + success := make(chan struct{}) + blocksReExecutor.Start(ctx, success) + deferFuncs = append(deferFuncs, func() { blocksReExecutor.StopAndWait() }) + select { + case err := <-fatalErrChan: + log.Error("shutting down due to fatal error", "err", err) + defer log.Error("shut down due to fatal error", "err", err) + return 1 + case <-success: + } + } + } + if nodeConfig.Init.ThenQuit && nodeConfig.Init.ResetToMessage < 0 { return 0 } @@ -514,8 +533,6 @@ func mainImpl() int { return 1 } - fatalErrChan := make(chan error, 10) - var valNode *valnode.ValidationNode if sameProcessValidationNodeEnabled { valNode, err = valnode.CreateValidationNode( @@ -644,9 +661,8 @@ func mainImpl() int { // remove previous deferFuncs, StopAndWait closes database and blockchain. deferFuncs = []func(){func() { currentNode.StopAndWait() }} } - if nodeConfig.BlocksReExecutor.Enable && l2BlockChain != nil { - blocksReExecutor := blocksreexecutor.New(&nodeConfig.BlocksReExecutor, l2BlockChain, fatalErrChan) - blocksReExecutor.Start(ctx) + if blocksReExecutor != nil && !nodeConfig.Init.ThenQuit { + blocksReExecutor.Start(ctx, nil) deferFuncs = append(deferFuncs, func() { blocksReExecutor.StopAndWait() }) } diff --git a/system_tests/blocks_reexecutor_test.go b/system_tests/blocks_reexecutor_test.go index c2941ddcc4..66690d1427 100644 --- a/system_tests/blocks_reexecutor_test.go +++ b/system_tests/blocks_reexecutor_test.go @@ -45,16 +45,11 @@ func TestBlocksReExecutorModes(t *testing.T) { } } + // Reexecute blocks at mode full success := make(chan struct{}) + executorFull := blocksreexecutor.New(&blocksreexecutor.TestConfig, blockchain, feedErrChan) + executorFull.Start(ctx, success) - // Reexecute blocks at mode full - go func() { - executorFull := blocksreexecutor.New(&blocksreexecutor.TestConfig, blockchain, feedErrChan) - executorFull.StopWaiter.Start(ctx, executorFull) - executorFull.Impl(ctx) - executorFull.StopAndWait() - success <- struct{}{} - }() select { case err := <-feedErrChan: t.Errorf("error occurred: %v", err) @@ -66,15 +61,12 @@ func TestBlocksReExecutorModes(t *testing.T) { } // Reexecute blocks at mode random - go func() { - c := &blocksreexecutor.TestConfig - c.Mode = "random" - executorRandom := blocksreexecutor.New(c, blockchain, feedErrChan) - executorRandom.StopWaiter.Start(ctx, executorRandom) - executorRandom.Impl(ctx) - executorRandom.StopAndWait() - success <- struct{}{} - }() + success = make(chan struct{}) + c := &blocksreexecutor.TestConfig + c.Mode = "random" + executorRandom := blocksreexecutor.New(c, blockchain, feedErrChan) + executorRandom.Start(ctx, success) + select { case err := <-feedErrChan: t.Errorf("error occurred: %v", err) From f9055c93622bd32fe2f5049bd89600236c4ee689 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Sat, 20 Apr 2024 02:26:52 +0200 Subject: [PATCH 02/75] add pebble extra options --- arbnode/dataposter/storage_test.go | 3 +- cmd/conf/database.go | 123 +++++++++++++++++++++++++++-- cmd/nitro/init.go | 12 +-- cmd/nitro/nitro.go | 5 +- cmd/pruning/pruning.go | 8 +- execution/gethexec/node.go | 2 +- go-ethereum | 2 +- system_tests/common_test.go | 11 ++- system_tests/das_test.go | 6 +- system_tests/pruning_test.go | 6 +- system_tests/staterecovery_test.go | 4 +- 11 files changed, 152 insertions(+), 30 deletions(-) diff --git a/arbnode/dataposter/storage_test.go b/arbnode/dataposter/storage_test.go index f98c120f38..343efac3c7 100644 --- a/arbnode/dataposter/storage_test.go +++ b/arbnode/dataposter/storage_test.go @@ -19,6 +19,7 @@ import ( "github.com/offchainlabs/nitro/arbnode/dataposter/redis" "github.com/offchainlabs/nitro/arbnode/dataposter/slice" "github.com/offchainlabs/nitro/arbnode/dataposter/storage" + "github.com/offchainlabs/nitro/cmd/conf" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/redisutil" "github.com/offchainlabs/nitro/util/signature" @@ -44,7 +45,7 @@ func newLevelDBStorage(t *testing.T, encF storage.EncoderDecoderF) *dbstorage.St func newPebbleDBStorage(t *testing.T, encF storage.EncoderDecoderF) *dbstorage.Storage { t.Helper() - db, err := rawdb.NewPebbleDBDatabase(path.Join(t.TempDir(), "pebble.db"), 0, 0, "default", false, true) + db, err := rawdb.NewPebbleDBDatabase(path.Join(t.TempDir(), "pebble.db"), 0, 0, "default", false, true, conf.PersistentConfigDefault.Pebble.ExtraOptions()) if err != nil { t.Fatalf("NewPebbleDBDatabase() unexpected error: %v", err) } diff --git a/cmd/conf/database.go b/cmd/conf/database.go index b049375d66..be0c630fa9 100644 --- a/cmd/conf/database.go +++ b/cmd/conf/database.go @@ -8,17 +8,21 @@ import ( "os" "path" "path/filepath" + "runtime" + "time" + "github.com/ethereum/go-ethereum/ethdb/pebble" flag "github.com/spf13/pflag" ) type PersistentConfig struct { - GlobalConfig string `koanf:"global-config"` - Chain string `koanf:"chain"` - LogDir string `koanf:"log-dir"` - Handles int `koanf:"handles"` - Ancient string `koanf:"ancient"` - DBEngine string `koanf:"db-engine"` + GlobalConfig string `koanf:"global-config"` + Chain string `koanf:"chain"` + LogDir string `koanf:"log-dir"` + Handles int `koanf:"handles"` + Ancient string `koanf:"ancient"` + DBEngine string `koanf:"db-engine"` + Pebble PebbleConfig `koanf:"pebble"` } var PersistentConfigDefault = PersistentConfig{ @@ -28,6 +32,7 @@ var PersistentConfigDefault = PersistentConfig{ Handles: 512, Ancient: "", DBEngine: "leveldb", + Pebble: PebbleConfigDefault, } func PersistentConfigAddOptions(prefix string, f *flag.FlagSet) { @@ -37,6 +42,7 @@ func PersistentConfigAddOptions(prefix string, f *flag.FlagSet) { f.Int(prefix+".handles", PersistentConfigDefault.Handles, "number of file descriptor handles to use for the database") f.String(prefix+".ancient", PersistentConfigDefault.Ancient, "directory of ancient where the chain freezer can be opened") f.String(prefix+".db-engine", PersistentConfigDefault.DBEngine, "backing database implementation to use ('leveldb' or 'pebble')") + PebbleConfigAddOptions(prefix+".pebble", f) } func (c *PersistentConfig) ResolveDirectoryNames() error { @@ -96,3 +102,108 @@ func (c *PersistentConfig) Validate() error { } return nil } + +type PebbleConfig struct { + BytesPerSync int `koanf:"bytes-per-sync"` + L0CompactionFileThreshold int `koanf:"l0-compaction-file-threshold"` + L0CompactionThreshold int `koanf:"l0-compaction-threshold"` + L0StopWritesThreshold int `koanf:"l0-stop-writes-threshold"` + LBaseMaxBytes int64 `koanf:"l-base-max-bytes"` + MaxConcurrentCompactions int `koanf:"max-concurrent-compactions"` + DisableAutomaticCompactions bool `koanf:"disable-automatic-compactions"` + WALBytesPerSync int `koanf:"wal-bytes-per-sync"` + WALDir string `koanf:"wal-dir"` + WALMinSyncInterval int `koanf:"wal-min-sync-interval"` + TargetByteDeletionRate int `koanf:"target-byte-deletion-rate"` + Experimental PebbleExperimentalConfig `koaf:"experimental"` +} + +var PebbleConfigDefault = PebbleConfig{ + BytesPerSync: 0, // pebble default will be used + L0CompactionFileThreshold: 0, // pebble default will be used + L0CompactionThreshold: 0, // pebble default will be used + L0StopWritesThreshold: 0, // pebble default will be used + LBaseMaxBytes: 0, // pebble default will be used + MaxConcurrentCompactions: runtime.NumCPU(), + DisableAutomaticCompactions: false, + WALBytesPerSync: 0, // pebble default will be used + WALDir: "", // default will use same dir as for sstables + WALMinSyncInterval: 0, // pebble default will be used + TargetByteDeletionRate: 0, // pebble default will be used + Experimental: PebbleExperimentalConfigDefault, +} + +func PebbleConfigAddOptions(prefix string, f *flag.FlagSet) { + f.Int(prefix+".bytes-per-sync", PebbleConfigDefault.BytesPerSync, "number of bytes to write to a SSTable before calling Sync on it in the background (0 = pebble default)") + f.Int(prefix+".l0-compaction-file-threshold", PebbleConfigDefault.L0CompactionFileThreshold, "count of L0 files necessary to trigger an L0 compaction (0 = pebble default)") + f.Int(prefix+".l0-compaction-threshold", PebbleConfigDefault.L0CompactionThreshold, "amount of L0 read-amplification necessary to trigger an L0 compaction (0 = pebble default)") + f.Int(prefix+".l0-stop-writes-threshold", PebbleConfigDefault.L0StopWritesThreshold, "hard limit on L0 read-amplification, computed as the number of L0 sublevels. Writes are stopped when this threshold is reached (0 = pebble default)") + f.Int64(prefix+".l-base-max-bytes", PebbleConfigDefault.LBaseMaxBytes, "hard limit on L0 read-amplification, computed as the number of L0 sublevels. Writes are stopped when this threshold is reached (0 = pebble default)") + f.Int(prefix+".max-concurrent-compactions", PebbleConfigDefault.MaxConcurrentCompactions, "maximum number of concurrent compactions (0 = pebble default)") + f.Bool(prefix+".disable-automatic-compactions", PebbleConfigDefault.DisableAutomaticCompactions, "disables automatic compactions") + f.Int(prefix+".wal-bytes-per-sync", PebbleConfigDefault.WALBytesPerSync, "number of bytes to write to a write-ahead log (WAL) before calling Sync on it in the backgroud (0 = pebble default)") + f.String(prefix+".wal-dir", PebbleConfigDefault.WALDir, "directory to store write-ahead logs (WALs) in. If empty, WALs will be stored in the same directory as sstables") + f.Int(prefix+".wal-min-sync-interval", PebbleConfigDefault.WALMinSyncInterval, "minimum duration in microseconds between syncs of the WAL. If WAL syncs are requested faster than this interval, they will be artificially delayed.") + PebbleExperimentalConfigAddOptions(".experimental", f) +} + +type PebbleExperimentalConfig struct { + L0CompactionConcurrency int `koanf:"l0-compaction-concurrency"` + CompactionDebtConcurrency uint64 `koanf:"compaction-debt-concurrency"` + ReadCompactionRate int64 `koanf:"read-compaction-rate"` + ReadSamplingMultiplier int64 `koanf:"read-sampling-multiplier"` + MaxWriterConcurrency int `koanf:"max-writer-concurrency"` + ForceWriterParallelism bool `koanf:"force-writer-parallelism"` +} + +var PebbleExperimentalConfigDefault = PebbleExperimentalConfig{ + L0CompactionConcurrency: 0, + CompactionDebtConcurrency: 0, + ReadCompactionRate: 0, + ReadSamplingMultiplier: -1, + MaxWriterConcurrency: 0, + ForceWriterParallelism: false, +} + +func PebbleExperimentalConfigAddOptions(prefix string, f *flag.FlagSet) { + f.Int(prefix+".l0-compaction-concurrency", PebbleExperimentalConfigDefault.L0CompactionConcurrency, "threshold of L0 read-amplification at which compaction concurrency is enabled (if compaction-debt-concurrency was not already exceeded). Every multiple of this value enables another concurrent compaction up to max-concurrent-compactions. (0 = pebble default)") + f.Uint64(prefix+".compaction-debt-concurrency", PebbleExperimentalConfigDefault.CompactionDebtConcurrency, "controls the threshold of compaction debt at which additional compaction concurrency slots are added. For every multiple of this value in compaction debt bytes, an additional concurrent compaction is added. This works \"on top\" of l0-compaction-concurrency, so the higher of the count of compaction concurrency slots as determined by the two options is chosen. (0 = pebble default)") + f.Int64(prefix+".read-compaction-rate", PebbleExperimentalConfigDefault.ReadCompactionRate, "controls the frequency of read triggered compactions by adjusting `AllowedSeeks` in manifest.FileMetadata: AllowedSeeks = FileSize / ReadCompactionRate") + f.Int64(prefix+".read-sampling-multiplier", PebbleExperimentalConfigDefault.ReadSamplingMultiplier, "a multiplier for the readSamplingPeriod in iterator.maybeSampleRead() to control the frequency of read sampling to trigger a read triggered compaction. A value of -1 prevents sampling and disables read triggered compactions. Geth default is -1. The pebble default is 1 << 4. which gets multiplied with a constant of 1 << 16 to yield 1 << 20 (1MB). (0 = pebble default)") + f.Int(prefix+".max-writer-concurrency", PebbleExperimentalConfigDefault.MaxWriterConcurrency, "maximum number of compression workers the compression queue is allowed to use. If max-writer-concurrency > 0, then the Writer will use parallelism, to compress and write blocks to disk. Otherwise, the writer will compress and write blocks to disk synchronously.") + f.Bool(prefix+".force-writer-parallelism", PebbleExperimentalConfigDefault.ForceWriterParallelism, "force parallelism in the sstable Writer for the metamorphic tests. Even with the MaxWriterConcurrency option set, pebble only enables parallelism in the sstable Writer if there is enough CPU available, and this option bypasses that.") +} + +func (c *PebbleConfig) ExtraOptions() *pebble.ExtraOptions { + var maxConcurrentCompactions func() int + if c.MaxConcurrentCompactions > 0 { + maxConcurrentCompactions = func() int { return c.MaxConcurrentCompactions } + } + var walMinSyncInterval func() time.Duration + if c.WALMinSyncInterval > 0 { + walMinSyncInterval = func() time.Duration { + return time.Microsecond * time.Duration(c.WALMinSyncInterval) + } + } + return &pebble.ExtraOptions{ + BytesPerSync: c.BytesPerSync, + L0CompactionFileThreshold: c.L0CompactionFileThreshold, + L0CompactionThreshold: c.L0CompactionThreshold, + L0StopWritesThreshold: c.L0StopWritesThreshold, + LBaseMaxBytes: c.LBaseMaxBytes, + MaxConcurrentCompactions: maxConcurrentCompactions, + DisableAutomaticCompactions: c.DisableAutomaticCompactions, + WALBytesPerSync: c.WALBytesPerSync, + WALDir: c.WALDir, + WALMinSyncInterval: walMinSyncInterval, + TargetByteDeletionRate: c.TargetByteDeletionRate, + Experimental: pebble.ExtraOptionsExperimental{ + L0CompactionConcurrency: c.Experimental.L0CompactionConcurrency, + CompactionDebtConcurrency: c.Experimental.CompactionDebtConcurrency, + ReadCompactionRate: c.Experimental.ReadCompactionRate, + ReadSamplingMultiplier: c.Experimental.ReadSamplingMultiplier, + MaxWriterConcurrency: c.Experimental.MaxWriterConcurrency, + ForceWriterParallelism: c.Experimental.ForceWriterParallelism, + }, + } +} diff --git a/cmd/nitro/init.go b/cmd/nitro/init.go index 6ebfec3bb1..2bae2d9e11 100644 --- a/cmd/nitro/init.go +++ b/cmd/nitro/init.go @@ -159,19 +159,19 @@ func validateBlockChain(blockChain *core.BlockChain, chainConfig *params.ChainCo return nil } -func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeConfig, chainId *big.Int, cacheConfig *core.CacheConfig, l1Client arbutil.L1Interface, rollupAddrs chaininfo.RollupAddresses) (ethdb.Database, *core.BlockChain, error) { +func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeConfig, chainId *big.Int, cacheConfig *core.CacheConfig, persistentConfig *conf.PersistentConfig, l1Client arbutil.L1Interface, rollupAddrs chaininfo.RollupAddresses) (ethdb.Database, *core.BlockChain, error) { if !config.Init.Force { - if readOnlyDb, err := stack.OpenDatabaseWithFreezer("l2chaindata", 0, 0, "", "l2chaindata/", true); err == nil { + if readOnlyDb, err := stack.OpenDatabaseWithFreezerWithExtraOptions("l2chaindata", 0, 0, "", "l2chaindata/", true, persistentConfig.Pebble.ExtraOptions()); err == nil { if chainConfig := gethexec.TryReadStoredChainConfig(readOnlyDb); chainConfig != nil { readOnlyDb.Close() if !arbmath.BigEquals(chainConfig.ChainID, chainId) { return nil, nil, fmt.Errorf("database has chain ID %v but config has chain ID %v (are you sure this database is for the right chain?)", chainConfig.ChainID, chainId) } - chainDb, err := stack.OpenDatabaseWithFreezer("l2chaindata", config.Execution.Caching.DatabaseCache, config.Persistent.Handles, config.Persistent.Ancient, "l2chaindata/", false) + chainDb, err := stack.OpenDatabaseWithFreezerWithExtraOptions("l2chaindata", config.Execution.Caching.DatabaseCache, config.Persistent.Handles, config.Persistent.Ancient, "l2chaindata/", false, persistentConfig.Pebble.ExtraOptions()) if err != nil { return chainDb, nil, err } - err = pruning.PruneChainDb(ctx, chainDb, stack, &config.Init, cacheConfig, l1Client, rollupAddrs, config.Node.ValidatorRequired()) + err = pruning.PruneChainDb(ctx, chainDb, stack, &config.Init, cacheConfig, persistentConfig, l1Client, rollupAddrs, config.Node.ValidatorRequired()) if err != nil { return chainDb, nil, fmt.Errorf("error pruning: %w", err) } @@ -219,7 +219,7 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo var initDataReader statetransfer.InitDataReader = nil - chainDb, err := stack.OpenDatabaseWithFreezer("l2chaindata", config.Execution.Caching.DatabaseCache, config.Persistent.Handles, config.Persistent.Ancient, "l2chaindata/", false) + chainDb, err := stack.OpenDatabaseWithFreezerWithExtraOptions("l2chaindata", config.Execution.Caching.DatabaseCache, config.Persistent.Handles, config.Persistent.Ancient, "l2chaindata/", false, persistentConfig.Pebble.ExtraOptions()) if err != nil { return chainDb, nil, err } @@ -367,7 +367,7 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo return chainDb, l2BlockChain, err } - err = pruning.PruneChainDb(ctx, chainDb, stack, &config.Init, cacheConfig, l1Client, rollupAddrs, config.Node.ValidatorRequired()) + err = pruning.PruneChainDb(ctx, chainDb, stack, &config.Init, cacheConfig, persistentConfig, l1Client, rollupAddrs, config.Node.ValidatorRequired()) if err != nil { return chainDb, nil, fmt.Errorf("error pruning: %w", err) } diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 79ecd51ac2..f70d16a25a 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -177,6 +177,7 @@ func mainImpl() int { nodeConfig.Auth.Apply(&stackConf) nodeConfig.IPC.Apply(&stackConf) nodeConfig.GraphQL.Apply(&stackConf) + if nodeConfig.WS.ExposeAll { stackConf.WSModules = append(stackConf.WSModules, "personal") } @@ -476,7 +477,7 @@ func mainImpl() int { } } - chainDb, l2BlockChain, err := openInitializeChainDb(ctx, stack, nodeConfig, new(big.Int).SetUint64(nodeConfig.Chain.ID), gethexec.DefaultCacheConfigFor(stack, &nodeConfig.Execution.Caching), l1Client, rollupAddrs) + chainDb, l2BlockChain, err := openInitializeChainDb(ctx, stack, nodeConfig, new(big.Int).SetUint64(nodeConfig.Chain.ID), gethexec.DefaultCacheConfigFor(stack, &nodeConfig.Execution.Caching), &nodeConfig.Persistent, l1Client, rollupAddrs) if l2BlockChain != nil { deferFuncs = append(deferFuncs, func() { l2BlockChain.Stop() }) } @@ -487,7 +488,7 @@ func mainImpl() int { return 1 } - arbDb, err := stack.OpenDatabase("arbitrumdata", 0, 0, "arbitrumdata/", false) + arbDb, err := stack.OpenDatabaseWithExtraOptions("arbitrumdata", 0, 0, "arbitrumdata/", false, nodeConfig.Persistent.Pebble.ExtraOptions()) deferFuncs = append(deferFuncs, func() { closeDb(arbDb, "arbDb") }) if err != nil { log.Error("failed to open database", "err", err) diff --git a/cmd/pruning/pruning.go b/cmd/pruning/pruning.go index c483526aa1..363126a49f 100644 --- a/cmd/pruning/pruning.go +++ b/cmd/pruning/pruning.go @@ -80,12 +80,12 @@ func (r *importantRoots) addHeader(header *types.Header, overwrite bool) error { var hashListRegex = regexp.MustCompile("^(0x)?[0-9a-fA-F]{64}(,(0x)?[0-9a-fA-F]{64})*$") // Finds important roots to retain while proving -func findImportantRoots(ctx context.Context, chainDb ethdb.Database, stack *node.Node, initConfig *conf.InitConfig, cacheConfig *core.CacheConfig, l1Client arbutil.L1Interface, rollupAddrs chaininfo.RollupAddresses, validatorRequired bool) ([]common.Hash, error) { +func findImportantRoots(ctx context.Context, chainDb ethdb.Database, stack *node.Node, initConfig *conf.InitConfig, cacheConfig *core.CacheConfig, persistentConfig *conf.PersistentConfig, l1Client arbutil.L1Interface, rollupAddrs chaininfo.RollupAddresses, validatorRequired bool) ([]common.Hash, error) { chainConfig := gethexec.TryReadStoredChainConfig(chainDb) if chainConfig == nil { return nil, errors.New("database doesn't have a chain config (was this node initialized?)") } - arbDb, err := stack.OpenDatabase("arbitrumdata", 0, 0, "arbitrumdata/", true) + arbDb, err := stack.OpenDatabaseWithExtraOptions("arbitrumdata", 0, 0, "arbitrumdata/", true, persistentConfig.Pebble.ExtraOptions()) if err != nil { return nil, err } @@ -232,11 +232,11 @@ func findImportantRoots(ctx context.Context, chainDb ethdb.Database, stack *node return roots.roots, nil } -func PruneChainDb(ctx context.Context, chainDb ethdb.Database, stack *node.Node, initConfig *conf.InitConfig, cacheConfig *core.CacheConfig, l1Client arbutil.L1Interface, rollupAddrs chaininfo.RollupAddresses, validatorRequired bool) error { +func PruneChainDb(ctx context.Context, chainDb ethdb.Database, stack *node.Node, initConfig *conf.InitConfig, cacheConfig *core.CacheConfig, persistentConfig *conf.PersistentConfig, l1Client arbutil.L1Interface, rollupAddrs chaininfo.RollupAddresses, validatorRequired bool) error { if initConfig.Prune == "" { return pruner.RecoverPruning(stack.InstanceDir(), chainDb) } - root, err := findImportantRoots(ctx, chainDb, stack, initConfig, cacheConfig, l1Client, rollupAddrs, validatorRequired) + root, err := findImportantRoots(ctx, chainDb, stack, initConfig, cacheConfig, persistentConfig, l1Client, rollupAddrs, validatorRequired) if err != nil { return fmt.Errorf("failed to find root to retain for pruning: %w", err) } diff --git a/execution/gethexec/node.go b/execution/gethexec/node.go index 54f9ed6fe1..284934245b 100644 --- a/execution/gethexec/node.go +++ b/execution/gethexec/node.go @@ -216,7 +216,7 @@ func CreateExecutionNode( var classicOutbox *ClassicOutboxRetriever if l2BlockChain.Config().ArbitrumChainParams.GenesisBlockNum > 0 { - classicMsgDb, err := stack.OpenDatabase("classic-msg", 0, 0, "classicmsg/", true) + classicMsgDb, err := stack.OpenDatabase("classic-msg", 0, 0, "classicmsg/", true) // TODO can we skip using ExtraOptions here? if err != nil { log.Warn("Classic Msg Database not found", "err", err) classicOutbox = nil diff --git a/go-ethereum b/go-ethereum index daccadb06c..935cb21640 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit daccadb06c7bd9ad7e86c74f33ea39d897f0ece4 +Subproject commit 935cb216402c9693faf86d75a7fbb045109ed4a3 diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 7f9f4844fd..4bcf1349e2 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -21,6 +21,7 @@ import ( "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/blsSignatures" "github.com/offchainlabs/nitro/cmd/chaininfo" + "github.com/offchainlabs/nitro/cmd/conf" "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/das" "github.com/offchainlabs/nitro/deploy" @@ -718,9 +719,10 @@ func createL2BlockChainWithStackConfig( stack, err = node.New(stackConfig) Require(t, err) - chainDb, err := stack.OpenDatabase("l2chaindata", 0, 0, "l2chaindata/", false) + // TODO get pebble.ExtraOptions from conf.PersistentConfig when opening the DBs + chainDb, err := stack.OpenDatabaseWithExtraOptions("l2chaindata", 0, 0, "l2chaindata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions()) Require(t, err) - arbDb, err := stack.OpenDatabase("arbitrumdata", 0, 0, "arbitrumdata/", false) + arbDb, err := stack.OpenDatabaseWithExtraOptions("arbitrumdata", 0, 0, "arbitrumdata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions()) Require(t, err) initReader := statetransfer.NewMemoryInitDataReader(&l2info.ArbInitData) @@ -922,9 +924,10 @@ func Create2ndNodeWithConfig( l2stack, err := node.New(stackConfig) Require(t, err) - l2chainDb, err := l2stack.OpenDatabase("l2chaindata", 0, 0, "l2chaindata/", false) + // TODO get pebble.ExtraOptions from conf.PersistentConfig when opening the DBs + l2chainDb, err := l2stack.OpenDatabaseWithExtraOptions("l2chaindata", 0, 0, "l2chaindata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions()) Require(t, err) - l2arbDb, err := l2stack.OpenDatabase("arbitrumdata", 0, 0, "arbitrumdata/", false) + l2arbDb, err := l2stack.OpenDatabaseWithExtraOptions("arbitrumdata", 0, 0, "arbitrumdata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions()) Require(t, err) initReader := statetransfer.NewMemoryInitDataReader(l2InitData) diff --git a/system_tests/das_test.go b/system_tests/das_test.go index c4a3c453d8..be0ef9c957 100644 --- a/system_tests/das_test.go +++ b/system_tests/das_test.go @@ -25,6 +25,7 @@ import ( "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/blsSignatures" + "github.com/offchainlabs/nitro/cmd/conf" "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/das" "github.com/offchainlabs/nitro/execution/gethexec" @@ -175,10 +176,11 @@ func TestDASRekey(t *testing.T) { l2stackA, err := node.New(stackConfig) Require(t, err) - l2chainDb, err := l2stackA.OpenDatabase("l2chaindata", 0, 0, "l2chaindata/", false) + // TODO get pebble.ExtraOptions from conf.PersistentConfig + l2chainDb, err := l2stackA.OpenDatabaseWithExtraOptions("l2chaindata", 0, 0, "l2chaindata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions()) Require(t, err) - l2arbDb, err := l2stackA.OpenDatabase("arbitrumdata", 0, 0, "arbitrumdata/", false) + l2arbDb, err := l2stackA.OpenDatabaseWithExtraOptions("arbitrumdata", 0, 0, "arbitrumdata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions()) Require(t, err) l2blockchain, err := gethexec.GetBlockChain(l2chainDb, nil, chainConfig, gethexec.ConfigDefaultTest().TxLookupLimit) diff --git a/system_tests/pruning_test.go b/system_tests/pruning_test.go index 8efc8653e6..e83c350804 100644 --- a/system_tests/pruning_test.go +++ b/system_tests/pruning_test.go @@ -65,7 +65,8 @@ func TestPruning(t *testing.T) { stack, err := node.New(builder.l2StackConfig) Require(t, err) defer stack.Close() - chainDb, err := stack.OpenDatabase("l2chaindata", 0, 0, "l2chaindata/", false) + // TODO get pebble.ExtraOptions from conf.PersistentConfig + chainDb, err := stack.OpenDatabaseWithExtraOptions("l2chaindata", 0, 0, "l2chaindata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions()) Require(t, err) defer chainDb.Close() chainDbEntriesBeforePruning := countStateEntries(chainDb) @@ -89,7 +90,8 @@ func TestPruning(t *testing.T) { initConfig := conf.InitConfigDefault initConfig.Prune = "full" coreCacheConfig := gethexec.DefaultCacheConfigFor(stack, &builder.execConfig.Caching) - err = pruning.PruneChainDb(ctx, chainDb, stack, &initConfig, coreCacheConfig, builder.L1.Client, *builder.L2.ConsensusNode.DeployInfo, false) + persistentConfig := conf.PersistentConfigDefault + err = pruning.PruneChainDb(ctx, chainDb, stack, &initConfig, coreCacheConfig, &persistentConfig, builder.L1.Client, *builder.L2.ConsensusNode.DeployInfo, false) Require(t, err) for _, key := range testKeys { diff --git a/system_tests/staterecovery_test.go b/system_tests/staterecovery_test.go index 632e748da8..9dc1081a7b 100644 --- a/system_tests/staterecovery_test.go +++ b/system_tests/staterecovery_test.go @@ -9,6 +9,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/trie" + "github.com/offchainlabs/nitro/cmd/conf" "github.com/offchainlabs/nitro/cmd/staterecovery" "github.com/offchainlabs/nitro/execution/gethexec" ) @@ -49,7 +50,8 @@ func TestRectreateMissingStates(t *testing.T) { stack, err := node.New(builder.l2StackConfig) Require(t, err) defer stack.Close() - chainDb, err := stack.OpenDatabase("l2chaindata", 0, 0, "l2chaindata/", false) + // TODO get pebble.ExtraOptions from conf.PersistentConfig + chainDb, err := stack.OpenDatabaseWithExtraOptions("l2chaindata", 0, 0, "l2chaindata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions()) Require(t, err) defer chainDb.Close() cacheConfig := gethexec.DefaultCacheConfigFor(stack, &gethexec.DefaultCachingConfig) From 43d6e82020468299029f96426f20dd98635380b2 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Mon, 22 Apr 2024 16:20:57 +0200 Subject: [PATCH 03/75] update geth --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index 935cb21640..9e62e652e2 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 935cb216402c9693faf86d75a7fbb045109ed4a3 +Subproject commit 9e62e652e211a47ad1c71a428b4a7ea6b96ae710 From 8981880ff6d341a9961dcaa0ee4466ee872de339 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Mon, 22 Apr 2024 16:47:21 +0200 Subject: [PATCH 04/75] fix koanf prefix --- cmd/conf/database.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/conf/database.go b/cmd/conf/database.go index be0c630fa9..8e3759ee73 100644 --- a/cmd/conf/database.go +++ b/cmd/conf/database.go @@ -115,7 +115,7 @@ type PebbleConfig struct { WALDir string `koanf:"wal-dir"` WALMinSyncInterval int `koanf:"wal-min-sync-interval"` TargetByteDeletionRate int `koanf:"target-byte-deletion-rate"` - Experimental PebbleExperimentalConfig `koaf:"experimental"` + Experimental PebbleExperimentalConfig `koanf:"experimental"` } var PebbleConfigDefault = PebbleConfig{ @@ -144,7 +144,7 @@ func PebbleConfigAddOptions(prefix string, f *flag.FlagSet) { f.Int(prefix+".wal-bytes-per-sync", PebbleConfigDefault.WALBytesPerSync, "number of bytes to write to a write-ahead log (WAL) before calling Sync on it in the backgroud (0 = pebble default)") f.String(prefix+".wal-dir", PebbleConfigDefault.WALDir, "directory to store write-ahead logs (WALs) in. If empty, WALs will be stored in the same directory as sstables") f.Int(prefix+".wal-min-sync-interval", PebbleConfigDefault.WALMinSyncInterval, "minimum duration in microseconds between syncs of the WAL. If WAL syncs are requested faster than this interval, they will be artificially delayed.") - PebbleExperimentalConfigAddOptions(".experimental", f) + PebbleExperimentalConfigAddOptions(prefix+".experimental", f) } type PebbleExperimentalConfig struct { From 9d128ea332bbd987b739add12f6d142953595645 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Tue, 23 Apr 2024 15:30:03 +0200 Subject: [PATCH 05/75] add missing koanf pebble flag --- cmd/conf/database.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/conf/database.go b/cmd/conf/database.go index 8e3759ee73..9264baa843 100644 --- a/cmd/conf/database.go +++ b/cmd/conf/database.go @@ -144,6 +144,7 @@ func PebbleConfigAddOptions(prefix string, f *flag.FlagSet) { f.Int(prefix+".wal-bytes-per-sync", PebbleConfigDefault.WALBytesPerSync, "number of bytes to write to a write-ahead log (WAL) before calling Sync on it in the backgroud (0 = pebble default)") f.String(prefix+".wal-dir", PebbleConfigDefault.WALDir, "directory to store write-ahead logs (WALs) in. If empty, WALs will be stored in the same directory as sstables") f.Int(prefix+".wal-min-sync-interval", PebbleConfigDefault.WALMinSyncInterval, "minimum duration in microseconds between syncs of the WAL. If WAL syncs are requested faster than this interval, they will be artificially delayed.") + f.Int(prefix+".target-byte-deletion-rate", PebbleConfigDefault.TargetByteDeletionRate, "rate (in bytes per second) at which sstable file deletions are limited to (under normal circumstances).") PebbleExperimentalConfigAddOptions(prefix+".experimental", f) } From 7be2e34314ac234149e7408663781ce160817bcf Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Tue, 23 Apr 2024 19:40:19 +0200 Subject: [PATCH 06/75] add pebble layers config --- cmd/conf/database.go | 19 +++++++++++++++++++ go-ethereum | 2 +- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/cmd/conf/database.go b/cmd/conf/database.go index 9264baa843..fdf8eed565 100644 --- a/cmd/conf/database.go +++ b/cmd/conf/database.go @@ -116,6 +116,8 @@ type PebbleConfig struct { WALMinSyncInterval int `koanf:"wal-min-sync-interval"` TargetByteDeletionRate int `koanf:"target-byte-deletion-rate"` Experimental PebbleExperimentalConfig `koanf:"experimental"` + TargetFileSize int64 `koanf:"target-file-size"` + TargetFileSizeEqualLayers bool `koanf:"target-file-size-equal-layers"` } var PebbleConfigDefault = PebbleConfig{ @@ -131,6 +133,8 @@ var PebbleConfigDefault = PebbleConfig{ WALMinSyncInterval: 0, // pebble default will be used TargetByteDeletionRate: 0, // pebble default will be used Experimental: PebbleExperimentalConfigDefault, + TargetFileSize: 2 * 1024 * 1024, + TargetFileSizeEqualLayers: true, } func PebbleConfigAddOptions(prefix string, f *flag.FlagSet) { @@ -146,6 +150,8 @@ func PebbleConfigAddOptions(prefix string, f *flag.FlagSet) { f.Int(prefix+".wal-min-sync-interval", PebbleConfigDefault.WALMinSyncInterval, "minimum duration in microseconds between syncs of the WAL. If WAL syncs are requested faster than this interval, they will be artificially delayed.") f.Int(prefix+".target-byte-deletion-rate", PebbleConfigDefault.TargetByteDeletionRate, "rate (in bytes per second) at which sstable file deletions are limited to (under normal circumstances).") PebbleExperimentalConfigAddOptions(prefix+".experimental", f) + f.Int64(prefix+".target-file-size", PebbleConfigDefault.TargetFileSize, "target file size for the level 0") + f.Bool(prefix+".target-file-size-equal-layers", PebbleConfigDefault.TargetFileSizeEqualLayers, "if true same target-file-size will be uses for all layers, otherwise target size for layer n = 2 * target size for layer n - 1") } type PebbleExperimentalConfig struct { @@ -186,6 +192,18 @@ func (c *PebbleConfig) ExtraOptions() *pebble.ExtraOptions { return time.Microsecond * time.Duration(c.WALMinSyncInterval) } } + var levels []pebble.ExtraLevelOptions + if c.TargetFileSize > 0 { + if c.TargetFileSizeEqualLayers { + for i := 0; i < 7; i++ { + levels = append(levels, pebble.ExtraLevelOptions{TargetFileSize: c.TargetFileSize}) + } + } else { + for i := 0; i < 7; i++ { + levels = append(levels, pebble.ExtraLevelOptions{TargetFileSize: c.TargetFileSize << i}) + } + } + } return &pebble.ExtraOptions{ BytesPerSync: c.BytesPerSync, L0CompactionFileThreshold: c.L0CompactionFileThreshold, @@ -206,5 +224,6 @@ func (c *PebbleConfig) ExtraOptions() *pebble.ExtraOptions { MaxWriterConcurrency: c.Experimental.MaxWriterConcurrency, ForceWriterParallelism: c.Experimental.ForceWriterParallelism, }, + Levels: levels, } } diff --git a/go-ethereum b/go-ethereum index 9e62e652e2..d6428a6842 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 9e62e652e211a47ad1c71a428b4a7ea6b96ae710 +Subproject commit d6428a6842a8c7d39821e74662fe3e0af34babd7 From 95422f94ecfd17e2a5e3be49efe1d3fd605d51d6 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Wed, 24 Apr 2024 15:15:58 +0200 Subject: [PATCH 07/75] add pebble block size and index block size options --- cmd/conf/database.go | 40 +++++++++++++++++++++++++++------------- go-ethereum | 2 +- 2 files changed, 28 insertions(+), 14 deletions(-) diff --git a/cmd/conf/database.go b/cmd/conf/database.go index fdf8eed565..bdaf8c1b73 100644 --- a/cmd/conf/database.go +++ b/cmd/conf/database.go @@ -5,6 +5,7 @@ package conf import ( "fmt" + "math" "os" "path" "path/filepath" @@ -116,8 +117,12 @@ type PebbleConfig struct { WALMinSyncInterval int `koanf:"wal-min-sync-interval"` TargetByteDeletionRate int `koanf:"target-byte-deletion-rate"` Experimental PebbleExperimentalConfig `koanf:"experimental"` - TargetFileSize int64 `koanf:"target-file-size"` - TargetFileSizeEqualLayers bool `koanf:"target-file-size-equal-layers"` + + // level specific + BlockSize int `koanf:"block-size"` + IndexBlockSize int `koanf:"index-block-size"` + TargetFileSize int64 `koanf:"target-file-size"` + TargetFileSizeEqualLevels bool `koanf:"target-file-size-equal-levels"` } var PebbleConfigDefault = PebbleConfig{ @@ -133,8 +138,10 @@ var PebbleConfigDefault = PebbleConfig{ WALMinSyncInterval: 0, // pebble default will be used TargetByteDeletionRate: 0, // pebble default will be used Experimental: PebbleExperimentalConfigDefault, + BlockSize: 4096, + IndexBlockSize: 4096, TargetFileSize: 2 * 1024 * 1024, - TargetFileSizeEqualLayers: true, + TargetFileSizeEqualLevels: true, } func PebbleConfigAddOptions(prefix string, f *flag.FlagSet) { @@ -149,9 +156,11 @@ func PebbleConfigAddOptions(prefix string, f *flag.FlagSet) { f.String(prefix+".wal-dir", PebbleConfigDefault.WALDir, "directory to store write-ahead logs (WALs) in. If empty, WALs will be stored in the same directory as sstables") f.Int(prefix+".wal-min-sync-interval", PebbleConfigDefault.WALMinSyncInterval, "minimum duration in microseconds between syncs of the WAL. If WAL syncs are requested faster than this interval, they will be artificially delayed.") f.Int(prefix+".target-byte-deletion-rate", PebbleConfigDefault.TargetByteDeletionRate, "rate (in bytes per second) at which sstable file deletions are limited to (under normal circumstances).") + f.Int(prefix+".block-size", PebbleConfigDefault.BlockSize, "target uncompressed size in bytes of each table block") + f.Int(prefix+".index-block-size", PebbleConfigDefault.IndexBlockSize, fmt.Sprintf("target uncompressed size in bytes of each index block. When the index block size is larger than this target, two-level indexes are automatically enabled. Setting this option to a large value (such as %d) disables the automatic creation of two-level indexes.", math.MaxInt32)) PebbleExperimentalConfigAddOptions(prefix+".experimental", f) f.Int64(prefix+".target-file-size", PebbleConfigDefault.TargetFileSize, "target file size for the level 0") - f.Bool(prefix+".target-file-size-equal-layers", PebbleConfigDefault.TargetFileSizeEqualLayers, "if true same target-file-size will be uses for all layers, otherwise target size for layer n = 2 * target size for layer n - 1") + f.Bool(prefix+".target-file-size-equal-levels", PebbleConfigDefault.TargetFileSizeEqualLevels, "if true same target-file-size will be uses for all levels, otherwise target size for layer n = 2 * target size for layer n - 1") } type PebbleExperimentalConfig struct { @@ -193,16 +202,16 @@ func (c *PebbleConfig) ExtraOptions() *pebble.ExtraOptions { } } var levels []pebble.ExtraLevelOptions - if c.TargetFileSize > 0 { - if c.TargetFileSizeEqualLayers { - for i := 0; i < 7; i++ { - levels = append(levels, pebble.ExtraLevelOptions{TargetFileSize: c.TargetFileSize}) - } - } else { - for i := 0; i < 7; i++ { - levels = append(levels, pebble.ExtraLevelOptions{TargetFileSize: c.TargetFileSize << i}) - } + for i := 0; i < 7; i++ { + targetFileSize := c.TargetFileSize + if !c.TargetFileSizeEqualLevels { + targetFileSize = targetFileSize << i } + levels = append(levels, pebble.ExtraLevelOptions{ + BlockSize: c.BlockSize, + IndexBlockSize: c.IndexBlockSize, + TargetFileSize: targetFileSize, + }) } return &pebble.ExtraOptions{ BytesPerSync: c.BytesPerSync, @@ -227,3 +236,8 @@ func (c *PebbleConfig) ExtraOptions() *pebble.ExtraOptions { Levels: levels, } } + +func (c *PebbleConfig) Validate() error { + // TODO + return nil +} diff --git a/go-ethereum b/go-ethereum index d6428a6842..509f1114ed 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit d6428a6842a8c7d39821e74662fe3e0af34babd7 +Subproject commit 509f1114edd9d4e367cedfe4011ceed5766e3f07 From 69d65fedd619a44e186be83de134e6ae3681d63c Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Wed, 24 Apr 2024 15:38:25 +0200 Subject: [PATCH 08/75] add MemTableStopWritesThreshold pebble option --- cmd/conf/database.go | 4 ++++ go-ethereum | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/cmd/conf/database.go b/cmd/conf/database.go index bdaf8c1b73..59a7cafd51 100644 --- a/cmd/conf/database.go +++ b/cmd/conf/database.go @@ -110,6 +110,7 @@ type PebbleConfig struct { L0CompactionThreshold int `koanf:"l0-compaction-threshold"` L0StopWritesThreshold int `koanf:"l0-stop-writes-threshold"` LBaseMaxBytes int64 `koanf:"l-base-max-bytes"` + MemTableStopWritesThreshold int `koanf:"mem-table-stop-writes-threshold"` MaxConcurrentCompactions int `koanf:"max-concurrent-compactions"` DisableAutomaticCompactions bool `koanf:"disable-automatic-compactions"` WALBytesPerSync int `koanf:"wal-bytes-per-sync"` @@ -131,6 +132,7 @@ var PebbleConfigDefault = PebbleConfig{ L0CompactionThreshold: 0, // pebble default will be used L0StopWritesThreshold: 0, // pebble default will be used LBaseMaxBytes: 0, // pebble default will be used + MemTableStopWritesThreshold: 2, MaxConcurrentCompactions: runtime.NumCPU(), DisableAutomaticCompactions: false, WALBytesPerSync: 0, // pebble default will be used @@ -150,6 +152,7 @@ func PebbleConfigAddOptions(prefix string, f *flag.FlagSet) { f.Int(prefix+".l0-compaction-threshold", PebbleConfigDefault.L0CompactionThreshold, "amount of L0 read-amplification necessary to trigger an L0 compaction (0 = pebble default)") f.Int(prefix+".l0-stop-writes-threshold", PebbleConfigDefault.L0StopWritesThreshold, "hard limit on L0 read-amplification, computed as the number of L0 sublevels. Writes are stopped when this threshold is reached (0 = pebble default)") f.Int64(prefix+".l-base-max-bytes", PebbleConfigDefault.LBaseMaxBytes, "hard limit on L0 read-amplification, computed as the number of L0 sublevels. Writes are stopped when this threshold is reached (0 = pebble default)") + f.Int(prefix+".mem-table-stop-writes-threshold", PebbleConfigDefault.MemTableStopWritesThreshold, "hard limit on the number of queued of MemTables") f.Int(prefix+".max-concurrent-compactions", PebbleConfigDefault.MaxConcurrentCompactions, "maximum number of concurrent compactions (0 = pebble default)") f.Bool(prefix+".disable-automatic-compactions", PebbleConfigDefault.DisableAutomaticCompactions, "disables automatic compactions") f.Int(prefix+".wal-bytes-per-sync", PebbleConfigDefault.WALBytesPerSync, "number of bytes to write to a write-ahead log (WAL) before calling Sync on it in the backgroud (0 = pebble default)") @@ -219,6 +222,7 @@ func (c *PebbleConfig) ExtraOptions() *pebble.ExtraOptions { L0CompactionThreshold: c.L0CompactionThreshold, L0StopWritesThreshold: c.L0StopWritesThreshold, LBaseMaxBytes: c.LBaseMaxBytes, + MemTableStopWritesThreshold: c.MemTableStopWritesThreshold, MaxConcurrentCompactions: maxConcurrentCompactions, DisableAutomaticCompactions: c.DisableAutomaticCompactions, WALBytesPerSync: c.WALBytesPerSync, diff --git a/go-ethereum b/go-ethereum index 509f1114ed..040c6f7870 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 509f1114edd9d4e367cedfe4011ceed5766e3f07 +Subproject commit 040c6f787056826112340ce0b4e5b8d43503f20a From c835fea2a840120adfc3459933a7118ae5219265 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Wed, 24 Apr 2024 16:02:57 +0200 Subject: [PATCH 09/75] update geth --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index 040c6f7870..5e8d11c191 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 040c6f787056826112340ce0b4e5b8d43503f20a +Subproject commit 5e8d11c191c4b88e53ca53e69b7854efe89487fd From 04b16998573bcb0ae4bf10c6dd316e7eda004000 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Thu, 25 Apr 2024 00:48:59 +0200 Subject: [PATCH 10/75] update geth --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index 5e8d11c191..07d08fede3 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 5e8d11c191c4b88e53ca53e69b7854efe89487fd +Subproject commit 07d08fede3e5e8bbfbdb3797fad08d94f8c7699a From 08ece6f85c9bf9c2ef393ab7d1cdcf5d53f7cac7 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Thu, 25 Apr 2024 01:01:52 +0200 Subject: [PATCH 11/75] update geth --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index 07d08fede3..31dcc54970 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 07d08fede3e5e8bbfbdb3797fad08d94f8c7699a +Subproject commit 31dcc54970876a09e13820a4a7334f39af38157d From 028fd31cc45f3948420ea6cd76e40251a177edd6 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Thu, 25 Apr 2024 02:29:22 +0200 Subject: [PATCH 12/75] update geth --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index 31dcc54970..a67aac7029 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 31dcc54970876a09e13820a4a7334f39af38157d +Subproject commit a67aac7029db022dd0e078783809e2fedf20de53 From 6d5343d5e2882a30b2fcae7d17f02591289c0f26 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Fri, 26 Apr 2024 23:23:01 +0200 Subject: [PATCH 13/75] update pebble options descriptions --- cmd/conf/database.go | 20 ++++++++++---------- go-ethereum | 2 +- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/cmd/conf/database.go b/cmd/conf/database.go index 59a7cafd51..1c8b673dd3 100644 --- a/cmd/conf/database.go +++ b/cmd/conf/database.go @@ -147,15 +147,15 @@ var PebbleConfigDefault = PebbleConfig{ } func PebbleConfigAddOptions(prefix string, f *flag.FlagSet) { - f.Int(prefix+".bytes-per-sync", PebbleConfigDefault.BytesPerSync, "number of bytes to write to a SSTable before calling Sync on it in the background (0 = pebble default)") - f.Int(prefix+".l0-compaction-file-threshold", PebbleConfigDefault.L0CompactionFileThreshold, "count of L0 files necessary to trigger an L0 compaction (0 = pebble default)") - f.Int(prefix+".l0-compaction-threshold", PebbleConfigDefault.L0CompactionThreshold, "amount of L0 read-amplification necessary to trigger an L0 compaction (0 = pebble default)") - f.Int(prefix+".l0-stop-writes-threshold", PebbleConfigDefault.L0StopWritesThreshold, "hard limit on L0 read-amplification, computed as the number of L0 sublevels. Writes are stopped when this threshold is reached (0 = pebble default)") - f.Int64(prefix+".l-base-max-bytes", PebbleConfigDefault.LBaseMaxBytes, "hard limit on L0 read-amplification, computed as the number of L0 sublevels. Writes are stopped when this threshold is reached (0 = pebble default)") + f.Int(prefix+".bytes-per-sync", PebbleConfigDefault.BytesPerSync, "number of bytes to write to a SSTable before calling Sync on it in the background") + f.Int(prefix+".l0-compaction-file-threshold", PebbleConfigDefault.L0CompactionFileThreshold, "count of L0 files necessary to trigger an L0 compaction") + f.Int(prefix+".l0-compaction-threshold", PebbleConfigDefault.L0CompactionThreshold, "amount of L0 read-amplification necessary to trigger an L0 compaction") + f.Int(prefix+".l0-stop-writes-threshold", PebbleConfigDefault.L0StopWritesThreshold, "hard limit on L0 read-amplification, computed as the number of L0 sublevels. Writes are stopped when this threshold is reached") + f.Int64(prefix+".l-base-max-bytes", PebbleConfigDefault.LBaseMaxBytes, "The maximum number of bytes for LBase. The base level is the level which L0 is compacted into. The base level is determined dynamically based on the existing data in the LSM. The maximum number of bytes for other levels is computed dynamically based on the base level's maximum size. When the maximum number of bytes for a level is exceeded, compaction is requested.") f.Int(prefix+".mem-table-stop-writes-threshold", PebbleConfigDefault.MemTableStopWritesThreshold, "hard limit on the number of queued of MemTables") - f.Int(prefix+".max-concurrent-compactions", PebbleConfigDefault.MaxConcurrentCompactions, "maximum number of concurrent compactions (0 = pebble default)") + f.Int(prefix+".max-concurrent-compactions", PebbleConfigDefault.MaxConcurrentCompactions, "maximum number of concurrent compactions") f.Bool(prefix+".disable-automatic-compactions", PebbleConfigDefault.DisableAutomaticCompactions, "disables automatic compactions") - f.Int(prefix+".wal-bytes-per-sync", PebbleConfigDefault.WALBytesPerSync, "number of bytes to write to a write-ahead log (WAL) before calling Sync on it in the backgroud (0 = pebble default)") + f.Int(prefix+".wal-bytes-per-sync", PebbleConfigDefault.WALBytesPerSync, "number of bytes to write to a write-ahead log (WAL) before calling Sync on it in the backgroud") f.String(prefix+".wal-dir", PebbleConfigDefault.WALDir, "directory to store write-ahead logs (WALs) in. If empty, WALs will be stored in the same directory as sstables") f.Int(prefix+".wal-min-sync-interval", PebbleConfigDefault.WALMinSyncInterval, "minimum duration in microseconds between syncs of the WAL. If WAL syncs are requested faster than this interval, they will be artificially delayed.") f.Int(prefix+".target-byte-deletion-rate", PebbleConfigDefault.TargetByteDeletionRate, "rate (in bytes per second) at which sstable file deletions are limited to (under normal circumstances).") @@ -185,10 +185,10 @@ var PebbleExperimentalConfigDefault = PebbleExperimentalConfig{ } func PebbleExperimentalConfigAddOptions(prefix string, f *flag.FlagSet) { - f.Int(prefix+".l0-compaction-concurrency", PebbleExperimentalConfigDefault.L0CompactionConcurrency, "threshold of L0 read-amplification at which compaction concurrency is enabled (if compaction-debt-concurrency was not already exceeded). Every multiple of this value enables another concurrent compaction up to max-concurrent-compactions. (0 = pebble default)") - f.Uint64(prefix+".compaction-debt-concurrency", PebbleExperimentalConfigDefault.CompactionDebtConcurrency, "controls the threshold of compaction debt at which additional compaction concurrency slots are added. For every multiple of this value in compaction debt bytes, an additional concurrent compaction is added. This works \"on top\" of l0-compaction-concurrency, so the higher of the count of compaction concurrency slots as determined by the two options is chosen. (0 = pebble default)") + f.Int(prefix+".l0-compaction-concurrency", PebbleExperimentalConfigDefault.L0CompactionConcurrency, "threshold of L0 read-amplification at which compaction concurrency is enabled (if compaction-debt-concurrency was not already exceeded). Every multiple of this value enables another concurrent compaction up to max-concurrent-compactions.") + f.Uint64(prefix+".compaction-debt-concurrency", PebbleExperimentalConfigDefault.CompactionDebtConcurrency, "controls the threshold of compaction debt at which additional compaction concurrency slots are added. For every multiple of this value in compaction debt bytes, an additional concurrent compaction is added. This works \"on top\" of l0-compaction-concurrency, so the higher of the count of compaction concurrency slots as determined by the two options is chosen.") f.Int64(prefix+".read-compaction-rate", PebbleExperimentalConfigDefault.ReadCompactionRate, "controls the frequency of read triggered compactions by adjusting `AllowedSeeks` in manifest.FileMetadata: AllowedSeeks = FileSize / ReadCompactionRate") - f.Int64(prefix+".read-sampling-multiplier", PebbleExperimentalConfigDefault.ReadSamplingMultiplier, "a multiplier for the readSamplingPeriod in iterator.maybeSampleRead() to control the frequency of read sampling to trigger a read triggered compaction. A value of -1 prevents sampling and disables read triggered compactions. Geth default is -1. The pebble default is 1 << 4. which gets multiplied with a constant of 1 << 16 to yield 1 << 20 (1MB). (0 = pebble default)") + f.Int64(prefix+".read-sampling-multiplier", PebbleExperimentalConfigDefault.ReadSamplingMultiplier, "a multiplier for the readSamplingPeriod in iterator.maybeSampleRead() to control the frequency of read sampling to trigger a read triggered compaction. A value of -1 prevents sampling and disables read triggered compactions. Geth default is -1. The pebble default is 1 << 4. which gets multiplied with a constant of 1 << 16 to yield 1 << 20 (1MB).") f.Int(prefix+".max-writer-concurrency", PebbleExperimentalConfigDefault.MaxWriterConcurrency, "maximum number of compression workers the compression queue is allowed to use. If max-writer-concurrency > 0, then the Writer will use parallelism, to compress and write blocks to disk. Otherwise, the writer will compress and write blocks to disk synchronously.") f.Bool(prefix+".force-writer-parallelism", PebbleExperimentalConfigDefault.ForceWriterParallelism, "force parallelism in the sstable Writer for the metamorphic tests. Even with the MaxWriterConcurrency option set, pebble only enables parallelism in the sstable Writer if there is enough CPU available, and this option bypasses that.") } diff --git a/go-ethereum b/go-ethereum index a67aac7029..9f39f194d0 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit a67aac7029db022dd0e078783809e2fedf20de53 +Subproject commit 9f39f194d0a5b1ab1a47b1d4f83cd112f18dc4b3 From 6a1f54d59117ef600bd140fd1b50b1b99b1ddc8d Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Tue, 30 Apr 2024 18:44:59 +0100 Subject: [PATCH 14/75] Add option for websocket message size limit This plumbs through the websocket message size limit option for all rpc clients. --- cmd/conf/chain.go | 2 ++ util/rpcclient/rpcclient.go | 29 +++++++++++++++++------------ 2 files changed, 19 insertions(+), 12 deletions(-) diff --git a/cmd/conf/chain.go b/cmd/conf/chain.go index 531945b4d6..8ad853e7aa 100644 --- a/cmd/conf/chain.go +++ b/cmd/conf/chain.go @@ -25,6 +25,8 @@ var L1ConnectionConfigDefault = rpcclient.ClientConfig{ Timeout: time.Minute, ConnectionWait: time.Minute, ArgLogLimit: 2048, + // Use geth's unexported wsDefaultReadLimit from rpc/websocket.go + WebsocketMessageSizeLimit: 32 * 1024 * 1024, } var L1ConfigDefault = ParentChainConfig{ diff --git a/util/rpcclient/rpcclient.go b/util/rpcclient/rpcclient.go index 02b41cf15d..cc6f11c986 100644 --- a/util/rpcclient/rpcclient.go +++ b/util/rpcclient/rpcclient.go @@ -21,14 +21,15 @@ import ( ) type ClientConfig struct { - URL string `json:"url,omitempty" koanf:"url"` - JWTSecret string `json:"jwtsecret,omitempty" koanf:"jwtsecret"` - Timeout time.Duration `json:"timeout,omitempty" koanf:"timeout" reload:"hot"` - Retries uint `json:"retries,omitempty" koanf:"retries" reload:"hot"` - ConnectionWait time.Duration `json:"connection-wait,omitempty" koanf:"connection-wait"` - ArgLogLimit uint `json:"arg-log-limit,omitempty" koanf:"arg-log-limit" reload:"hot"` - RetryErrors string `json:"retry-errors,omitempty" koanf:"retry-errors" reload:"hot"` - RetryDelay time.Duration `json:"retry-delay,omitempty" koanf:"retry-delay"` + URL string `json:"url,omitempty" koanf:"url"` + JWTSecret string `json:"jwtsecret,omitempty" koanf:"jwtsecret"` + Timeout time.Duration `json:"timeout,omitempty" koanf:"timeout" reload:"hot"` + Retries uint `json:"retries,omitempty" koanf:"retries" reload:"hot"` + ConnectionWait time.Duration `json:"connection-wait,omitempty" koanf:"connection-wait"` + ArgLogLimit uint `json:"arg-log-limit,omitempty" koanf:"arg-log-limit" reload:"hot"` + RetryErrors string `json:"retry-errors,omitempty" koanf:"retry-errors" reload:"hot"` + RetryDelay time.Duration `json:"retry-delay,omitempty" koanf:"retry-delay"` + WebsocketMessageSizeLimit int64 `json:"websocket-message-size-limit,omitempty" koanf:"websocket-message-size-limit"` retryErrors *regexp.Regexp } @@ -46,8 +47,9 @@ func (c *ClientConfig) Validate() error { type ClientConfigFetcher func() *ClientConfig var TestClientConfig = ClientConfig{ - URL: "self", - JWTSecret: "", + URL: "self", + JWTSecret: "", + WebsocketMessageSizeLimit: 32 * 1024 * 1024, } var DefaultClientConfig = ClientConfig{ @@ -56,6 +58,8 @@ var DefaultClientConfig = ClientConfig{ Retries: 3, RetryErrors: "websocket: close.*|dial tcp .*|.*i/o timeout|.*connection reset by peer|.*connection refused", ArgLogLimit: 2048, + // Use geth's unexported wsDefaultReadLimit from rpc/websocket.go + WebsocketMessageSizeLimit: 32 * 1024 * 1024, } func RPCClientAddOptions(prefix string, f *flag.FlagSet, defaultConfig *ClientConfig) { @@ -67,6 +71,7 @@ func RPCClientAddOptions(prefix string, f *flag.FlagSet, defaultConfig *ClientCo f.Uint(prefix+".retries", defaultConfig.Retries, "number of retries in case of failure(0 mean one attempt)") f.String(prefix+".retry-errors", defaultConfig.RetryErrors, "Errors matching this regular expression are automatically retried") f.Duration(prefix+".retry-delay", defaultConfig.RetryDelay, "delay between retries") + f.Int64(prefix+".websocket-message-size-limit", defaultConfig.WebsocketMessageSizeLimit, "websocket message size limit used by the RPC client. 0 means no limit") } type RpcClient struct { @@ -256,9 +261,9 @@ func (c *RpcClient) Start(ctx_in context.Context) error { var err error var client *rpc.Client if jwt == nil { - client, err = rpc.DialContext(ctx, url) + client, err = rpc.DialOptions(ctx, url, rpc.WithWebsocketMessageSizeLimit(c.config().WebsocketMessageSizeLimit)) } else { - client, err = rpc.DialOptions(ctx, url, rpc.WithHTTPAuth(node.NewJWTAuth([32]byte(*jwt)))) + client, err = rpc.DialOptions(ctx, url, rpc.WithHTTPAuth(node.NewJWTAuth([32]byte(*jwt))), rpc.WithWebsocketMessageSizeLimit(c.config().WebsocketMessageSizeLimit)) } cancelCtx() if err == nil { From 3919a6e8175588c78b8ae7b8bd5f2e3e6ae84253 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Thu, 2 May 2024 17:39:12 +0200 Subject: [PATCH 15/75] update geth --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index 9f39f194d0..3ecb5979ae 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 9f39f194d0a5b1ab1a47b1d4f83cd112f18dc4b3 +Subproject commit 3ecb5979ae489902c97d7146209c35071d167be6 From 991f07d2e3e6c8d1d74368977f463b21d73dc59e Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Thu, 2 May 2024 18:05:05 +0200 Subject: [PATCH 16/75] update geth --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index 3ecb5979ae..1aaeef7598 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 3ecb5979ae489902c97d7146209c35071d167be6 +Subproject commit 1aaeef75987a3d4379cf7d876cdf1526d8701884 From 39d33c7a88e01a1e5ca77f2c2ff45d06ede45498 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Sat, 4 May 2024 00:27:13 +0200 Subject: [PATCH 17/75] update geth --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index 1aaeef7598..ac85a19d5f 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 1aaeef75987a3d4379cf7d876cdf1526d8701884 +Subproject commit ac85a19d5f56231076d5bab95504d666b084fa3b From 04b9b373b6fb4e529c6a0b27d6fc847de97ee35d Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Tue, 7 May 2024 16:01:27 -0500 Subject: [PATCH 18/75] Block reexecutor should not try to reexecute genesis block --- blocks_reexecutor/blocks_reexecutor.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/blocks_reexecutor/blocks_reexecutor.go b/blocks_reexecutor/blocks_reexecutor.go index bedea37776..0ad4337e0f 100644 --- a/blocks_reexecutor/blocks_reexecutor.go +++ b/blocks_reexecutor/blocks_reexecutor.go @@ -104,7 +104,8 @@ func New(c *Config, blockchain *core.BlockChain, fatalErrChan chan error) *Block end = start + rng } // Inclusive of block reexecution [start, end] - if start > 0 { + // Do not reexecute genesis block i,e chainStart + if start > 0 && start != chainStart { start-- } // Divide work equally among available threads From dc706bf5f48307951a5172205d6fe058dd5f9db4 Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Thu, 9 May 2024 17:21:29 -0700 Subject: [PATCH 19/75] Increase default websocket size limit --- cmd/conf/chain.go | 13 ++++++------- util/rpcclient/rpcclient.go | 15 +++++++-------- 2 files changed, 13 insertions(+), 15 deletions(-) diff --git a/cmd/conf/chain.go b/cmd/conf/chain.go index 8ad853e7aa..ab9a713287 100644 --- a/cmd/conf/chain.go +++ b/cmd/conf/chain.go @@ -20,13 +20,12 @@ type ParentChainConfig struct { } var L1ConnectionConfigDefault = rpcclient.ClientConfig{ - URL: "", - Retries: 2, - Timeout: time.Minute, - ConnectionWait: time.Minute, - ArgLogLimit: 2048, - // Use geth's unexported wsDefaultReadLimit from rpc/websocket.go - WebsocketMessageSizeLimit: 32 * 1024 * 1024, + URL: "", + Retries: 2, + Timeout: time.Minute, + ConnectionWait: time.Minute, + ArgLogLimit: 2048, + WebsocketMessageSizeLimit: 256 * 1024 * 1024, } var L1ConfigDefault = ParentChainConfig{ diff --git a/util/rpcclient/rpcclient.go b/util/rpcclient/rpcclient.go index cc6f11c986..56aebef396 100644 --- a/util/rpcclient/rpcclient.go +++ b/util/rpcclient/rpcclient.go @@ -49,17 +49,16 @@ type ClientConfigFetcher func() *ClientConfig var TestClientConfig = ClientConfig{ URL: "self", JWTSecret: "", - WebsocketMessageSizeLimit: 32 * 1024 * 1024, + WebsocketMessageSizeLimit: 256 * 1024 * 1024, } var DefaultClientConfig = ClientConfig{ - URL: "self-auth", - JWTSecret: "", - Retries: 3, - RetryErrors: "websocket: close.*|dial tcp .*|.*i/o timeout|.*connection reset by peer|.*connection refused", - ArgLogLimit: 2048, - // Use geth's unexported wsDefaultReadLimit from rpc/websocket.go - WebsocketMessageSizeLimit: 32 * 1024 * 1024, + URL: "self-auth", + JWTSecret: "", + Retries: 3, + RetryErrors: "websocket: close.*|dial tcp .*|.*i/o timeout|.*connection reset by peer|.*connection refused", + ArgLogLimit: 2048, + WebsocketMessageSizeLimit: 256 * 1024 * 1024, } func RPCClientAddOptions(prefix string, f *flag.FlagSet, defaultConfig *ClientConfig) { From 90374dc51277dd99c6fce14737f0cd17e4406f29 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Tue, 14 May 2024 12:32:23 +0200 Subject: [PATCH 20/75] add subdirectory to wal-dir to avoid filename collision between dbs, move most pebble options to experimental section --- arbnode/dataposter/storage_test.go | 2 +- cmd/conf/database.go | 171 +++++++++++++++++------------ cmd/nitro/init.go | 6 +- cmd/nitro/nitro.go | 2 +- cmd/pruning/pruning.go | 2 +- system_tests/common_test.go | 8 +- system_tests/das_test.go | 4 +- system_tests/pruning_test.go | 2 +- system_tests/staterecovery_test.go | 2 +- 9 files changed, 113 insertions(+), 86 deletions(-) diff --git a/arbnode/dataposter/storage_test.go b/arbnode/dataposter/storage_test.go index 343efac3c7..e2aa321e0d 100644 --- a/arbnode/dataposter/storage_test.go +++ b/arbnode/dataposter/storage_test.go @@ -45,7 +45,7 @@ func newLevelDBStorage(t *testing.T, encF storage.EncoderDecoderF) *dbstorage.St func newPebbleDBStorage(t *testing.T, encF storage.EncoderDecoderF) *dbstorage.Storage { t.Helper() - db, err := rawdb.NewPebbleDBDatabase(path.Join(t.TempDir(), "pebble.db"), 0, 0, "default", false, true, conf.PersistentConfigDefault.Pebble.ExtraOptions()) + db, err := rawdb.NewPebbleDBDatabase(path.Join(t.TempDir(), "pebble.db"), 0, 0, "default", false, true, conf.PersistentConfigDefault.Pebble.ExtraOptions("pebble")) if err != nil { t.Fatalf("NewPebbleDBDatabase() unexpected error: %v", err) } diff --git a/cmd/conf/database.go b/cmd/conf/database.go index 1c8b673dd3..d60ee51c5b 100644 --- a/cmd/conf/database.go +++ b/cmd/conf/database.go @@ -101,81 +101,85 @@ func (c *PersistentConfig) Validate() error { if c.DBEngine != "leveldb" && c.DBEngine != "pebble" { return fmt.Errorf(`invalid .db-engine choice: %q, allowed "leveldb" or "pebble"`, c.DBEngine) } + if c.DBEngine == "pebble" { + if err := c.Pebble.Validate(); err != nil { + return err + } + } return nil } type PebbleConfig struct { - BytesPerSync int `koanf:"bytes-per-sync"` - L0CompactionFileThreshold int `koanf:"l0-compaction-file-threshold"` - L0CompactionThreshold int `koanf:"l0-compaction-threshold"` - L0StopWritesThreshold int `koanf:"l0-stop-writes-threshold"` - LBaseMaxBytes int64 `koanf:"l-base-max-bytes"` - MemTableStopWritesThreshold int `koanf:"mem-table-stop-writes-threshold"` - MaxConcurrentCompactions int `koanf:"max-concurrent-compactions"` - DisableAutomaticCompactions bool `koanf:"disable-automatic-compactions"` - WALBytesPerSync int `koanf:"wal-bytes-per-sync"` - WALDir string `koanf:"wal-dir"` - WALMinSyncInterval int `koanf:"wal-min-sync-interval"` - TargetByteDeletionRate int `koanf:"target-byte-deletion-rate"` - Experimental PebbleExperimentalConfig `koanf:"experimental"` + MaxConcurrentCompactions int `koanf:"max-concurrent-compactions"` + Experimental PebbleExperimentalConfig `koanf:"experimental"` +} + +var PebbleConfigDefault = PebbleConfig{ + MaxConcurrentCompactions: runtime.NumCPU(), + Experimental: PebbleExperimentalConfigDefault, +} + +func PebbleConfigAddOptions(prefix string, f *flag.FlagSet) { + f.Int(prefix+".max-concurrent-compactions", PebbleConfigDefault.MaxConcurrentCompactions, "maximum number of concurrent compactions") + PebbleExperimentalConfigAddOptions(prefix+".experimental", f) +} + +func (c *PebbleConfig) Validate() error { + if c.MaxConcurrentCompactions < 1 { + return fmt.Errorf("invalid .max-concurrent-compactions value: %d, has to be greater then 0", c.MaxConcurrentCompactions) + } + if err := c.Experimental.Validate(); err != nil { + return err + } + return nil +} + +type PebbleExperimentalConfig struct { + BytesPerSync int `koanf:"bytes-per-sync"` + L0CompactionFileThreshold int `koanf:"l0-compaction-file-threshold"` + L0CompactionThreshold int `koanf:"l0-compaction-threshold"` + L0StopWritesThreshold int `koanf:"l0-stop-writes-threshold"` + LBaseMaxBytes int64 `koanf:"l-base-max-bytes"` + MemTableStopWritesThreshold int `koanf:"mem-table-stop-writes-threshold"` + DisableAutomaticCompactions bool `koanf:"disable-automatic-compactions"` + WALBytesPerSync int `koanf:"wal-bytes-per-sync"` + WALDir string `koanf:"wal-dir"` + WALMinSyncInterval int `koanf:"wal-min-sync-interval"` + TargetByteDeletionRate int `koanf:"target-byte-deletion-rate"` // level specific BlockSize int `koanf:"block-size"` IndexBlockSize int `koanf:"index-block-size"` TargetFileSize int64 `koanf:"target-file-size"` TargetFileSizeEqualLevels bool `koanf:"target-file-size-equal-levels"` + + // pebble experimental + L0CompactionConcurrency int `koanf:"l0-compaction-concurrency"` + CompactionDebtConcurrency uint64 `koanf:"compaction-debt-concurrency"` + ReadCompactionRate int64 `koanf:"read-compaction-rate"` + ReadSamplingMultiplier int64 `koanf:"read-sampling-multiplier"` + MaxWriterConcurrency int `koanf:"max-writer-concurrency"` + ForceWriterParallelism bool `koanf:"force-writer-parallelism"` } -var PebbleConfigDefault = PebbleConfig{ +var PebbleExperimentalConfigDefault = PebbleExperimentalConfig{ BytesPerSync: 0, // pebble default will be used L0CompactionFileThreshold: 0, // pebble default will be used L0CompactionThreshold: 0, // pebble default will be used L0StopWritesThreshold: 0, // pebble default will be used LBaseMaxBytes: 0, // pebble default will be used MemTableStopWritesThreshold: 2, - MaxConcurrentCompactions: runtime.NumCPU(), DisableAutomaticCompactions: false, WALBytesPerSync: 0, // pebble default will be used WALDir: "", // default will use same dir as for sstables WALMinSyncInterval: 0, // pebble default will be used TargetByteDeletionRate: 0, // pebble default will be used - Experimental: PebbleExperimentalConfigDefault, - BlockSize: 4096, - IndexBlockSize: 4096, - TargetFileSize: 2 * 1024 * 1024, - TargetFileSizeEqualLevels: true, -} -func PebbleConfigAddOptions(prefix string, f *flag.FlagSet) { - f.Int(prefix+".bytes-per-sync", PebbleConfigDefault.BytesPerSync, "number of bytes to write to a SSTable before calling Sync on it in the background") - f.Int(prefix+".l0-compaction-file-threshold", PebbleConfigDefault.L0CompactionFileThreshold, "count of L0 files necessary to trigger an L0 compaction") - f.Int(prefix+".l0-compaction-threshold", PebbleConfigDefault.L0CompactionThreshold, "amount of L0 read-amplification necessary to trigger an L0 compaction") - f.Int(prefix+".l0-stop-writes-threshold", PebbleConfigDefault.L0StopWritesThreshold, "hard limit on L0 read-amplification, computed as the number of L0 sublevels. Writes are stopped when this threshold is reached") - f.Int64(prefix+".l-base-max-bytes", PebbleConfigDefault.LBaseMaxBytes, "The maximum number of bytes for LBase. The base level is the level which L0 is compacted into. The base level is determined dynamically based on the existing data in the LSM. The maximum number of bytes for other levels is computed dynamically based on the base level's maximum size. When the maximum number of bytes for a level is exceeded, compaction is requested.") - f.Int(prefix+".mem-table-stop-writes-threshold", PebbleConfigDefault.MemTableStopWritesThreshold, "hard limit on the number of queued of MemTables") - f.Int(prefix+".max-concurrent-compactions", PebbleConfigDefault.MaxConcurrentCompactions, "maximum number of concurrent compactions") - f.Bool(prefix+".disable-automatic-compactions", PebbleConfigDefault.DisableAutomaticCompactions, "disables automatic compactions") - f.Int(prefix+".wal-bytes-per-sync", PebbleConfigDefault.WALBytesPerSync, "number of bytes to write to a write-ahead log (WAL) before calling Sync on it in the backgroud") - f.String(prefix+".wal-dir", PebbleConfigDefault.WALDir, "directory to store write-ahead logs (WALs) in. If empty, WALs will be stored in the same directory as sstables") - f.Int(prefix+".wal-min-sync-interval", PebbleConfigDefault.WALMinSyncInterval, "minimum duration in microseconds between syncs of the WAL. If WAL syncs are requested faster than this interval, they will be artificially delayed.") - f.Int(prefix+".target-byte-deletion-rate", PebbleConfigDefault.TargetByteDeletionRate, "rate (in bytes per second) at which sstable file deletions are limited to (under normal circumstances).") - f.Int(prefix+".block-size", PebbleConfigDefault.BlockSize, "target uncompressed size in bytes of each table block") - f.Int(prefix+".index-block-size", PebbleConfigDefault.IndexBlockSize, fmt.Sprintf("target uncompressed size in bytes of each index block. When the index block size is larger than this target, two-level indexes are automatically enabled. Setting this option to a large value (such as %d) disables the automatic creation of two-level indexes.", math.MaxInt32)) - PebbleExperimentalConfigAddOptions(prefix+".experimental", f) - f.Int64(prefix+".target-file-size", PebbleConfigDefault.TargetFileSize, "target file size for the level 0") - f.Bool(prefix+".target-file-size-equal-levels", PebbleConfigDefault.TargetFileSizeEqualLevels, "if true same target-file-size will be uses for all levels, otherwise target size for layer n = 2 * target size for layer n - 1") -} - -type PebbleExperimentalConfig struct { - L0CompactionConcurrency int `koanf:"l0-compaction-concurrency"` - CompactionDebtConcurrency uint64 `koanf:"compaction-debt-concurrency"` - ReadCompactionRate int64 `koanf:"read-compaction-rate"` - ReadSamplingMultiplier int64 `koanf:"read-sampling-multiplier"` - MaxWriterConcurrency int `koanf:"max-writer-concurrency"` - ForceWriterParallelism bool `koanf:"force-writer-parallelism"` -} + BlockSize: 4096, + IndexBlockSize: 4096, + TargetFileSize: 2 * 1024 * 1024, + TargetFileSizeEqualLevels: true, -var PebbleExperimentalConfigDefault = PebbleExperimentalConfig{ L0CompactionConcurrency: 0, CompactionDebtConcurrency: 0, ReadCompactionRate: 0, @@ -185,6 +189,22 @@ var PebbleExperimentalConfigDefault = PebbleExperimentalConfig{ } func PebbleExperimentalConfigAddOptions(prefix string, f *flag.FlagSet) { + f.Int(prefix+".bytes-per-sync", PebbleExperimentalConfigDefault.BytesPerSync, "number of bytes to write to a SSTable before calling Sync on it in the background") + f.Int(prefix+".l0-compaction-file-threshold", PebbleExperimentalConfigDefault.L0CompactionFileThreshold, "count of L0 files necessary to trigger an L0 compaction") + f.Int(prefix+".l0-compaction-threshold", PebbleExperimentalConfigDefault.L0CompactionThreshold, "amount of L0 read-amplification necessary to trigger an L0 compaction") + f.Int(prefix+".l0-stop-writes-threshold", PebbleExperimentalConfigDefault.L0StopWritesThreshold, "hard limit on L0 read-amplification, computed as the number of L0 sublevels. Writes are stopped when this threshold is reached") + f.Int64(prefix+".l-base-max-bytes", PebbleExperimentalConfigDefault.LBaseMaxBytes, "The maximum number of bytes for LBase. The base level is the level which L0 is compacted into. The base level is determined dynamically based on the existing data in the LSM. The maximum number of bytes for other levels is computed dynamically based on the base level's maximum size. When the maximum number of bytes for a level is exceeded, compaction is requested.") + f.Int(prefix+".mem-table-stop-writes-threshold", PebbleExperimentalConfigDefault.MemTableStopWritesThreshold, "hard limit on the number of queued of MemTables") + f.Bool(prefix+".disable-automatic-compactions", PebbleExperimentalConfigDefault.DisableAutomaticCompactions, "disables automatic compactions") + f.Int(prefix+".wal-bytes-per-sync", PebbleExperimentalConfigDefault.WALBytesPerSync, "number of bytes to write to a write-ahead log (WAL) before calling Sync on it in the backgroud") + f.String(prefix+".wal-dir", PebbleExperimentalConfigDefault.WALDir, "absolute path of directory to store write-ahead logs (WALs) in. If empty, WALs will be stored in the same directory as sstables") + f.Int(prefix+".wal-min-sync-interval", PebbleExperimentalConfigDefault.WALMinSyncInterval, "minimum duration in microseconds between syncs of the WAL. If WAL syncs are requested faster than this interval, they will be artificially delayed.") + f.Int(prefix+".target-byte-deletion-rate", PebbleExperimentalConfigDefault.TargetByteDeletionRate, "rate (in bytes per second) at which sstable file deletions are limited to (under normal circumstances).") + f.Int(prefix+".block-size", PebbleExperimentalConfigDefault.BlockSize, "target uncompressed size in bytes of each table block") + f.Int(prefix+".index-block-size", PebbleExperimentalConfigDefault.IndexBlockSize, fmt.Sprintf("target uncompressed size in bytes of each index block. When the index block size is larger than this target, two-level indexes are automatically enabled. Setting this option to a large value (such as %d) disables the automatic creation of two-level indexes.", math.MaxInt32)) + f.Int64(prefix+".target-file-size", PebbleExperimentalConfigDefault.TargetFileSize, "target file size for the level 0") + f.Bool(prefix+".target-file-size-equal-levels", PebbleExperimentalConfigDefault.TargetFileSizeEqualLevels, "if true same target-file-size will be uses for all levels, otherwise target size for layer n = 2 * target size for layer n - 1") + f.Int(prefix+".l0-compaction-concurrency", PebbleExperimentalConfigDefault.L0CompactionConcurrency, "threshold of L0 read-amplification at which compaction concurrency is enabled (if compaction-debt-concurrency was not already exceeded). Every multiple of this value enables another concurrent compaction up to max-concurrent-compactions.") f.Uint64(prefix+".compaction-debt-concurrency", PebbleExperimentalConfigDefault.CompactionDebtConcurrency, "controls the threshold of compaction debt at which additional compaction concurrency slots are added. For every multiple of this value in compaction debt bytes, an additional concurrent compaction is added. This works \"on top\" of l0-compaction-concurrency, so the higher of the count of compaction concurrency slots as determined by the two options is chosen.") f.Int64(prefix+".read-compaction-rate", PebbleExperimentalConfigDefault.ReadCompactionRate, "controls the frequency of read triggered compactions by adjusting `AllowedSeeks` in manifest.FileMetadata: AllowedSeeks = FileSize / ReadCompactionRate") @@ -193,42 +213,54 @@ func PebbleExperimentalConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".force-writer-parallelism", PebbleExperimentalConfigDefault.ForceWriterParallelism, "force parallelism in the sstable Writer for the metamorphic tests. Even with the MaxWriterConcurrency option set, pebble only enables parallelism in the sstable Writer if there is enough CPU available, and this option bypasses that.") } -func (c *PebbleConfig) ExtraOptions() *pebble.ExtraOptions { +func (c *PebbleExperimentalConfig) Validate() error { + if !filepath.IsAbs(c.WALDir) { + return fmt.Errorf("invalid .wal-dir directory (%s) - has to be an absolute path", c.WALDir) + } + // TODO + return nil +} + +func (c *PebbleConfig) ExtraOptions(namespace string) *pebble.ExtraOptions { var maxConcurrentCompactions func() int if c.MaxConcurrentCompactions > 0 { maxConcurrentCompactions = func() int { return c.MaxConcurrentCompactions } } var walMinSyncInterval func() time.Duration - if c.WALMinSyncInterval > 0 { + if c.Experimental.WALMinSyncInterval > 0 { walMinSyncInterval = func() time.Duration { - return time.Microsecond * time.Duration(c.WALMinSyncInterval) + return time.Microsecond * time.Duration(c.Experimental.WALMinSyncInterval) } } var levels []pebble.ExtraLevelOptions for i := 0; i < 7; i++ { - targetFileSize := c.TargetFileSize - if !c.TargetFileSizeEqualLevels { + targetFileSize := c.Experimental.TargetFileSize + if !c.Experimental.TargetFileSizeEqualLevels { targetFileSize = targetFileSize << i } levels = append(levels, pebble.ExtraLevelOptions{ - BlockSize: c.BlockSize, - IndexBlockSize: c.IndexBlockSize, + BlockSize: c.Experimental.BlockSize, + IndexBlockSize: c.Experimental.IndexBlockSize, TargetFileSize: targetFileSize, }) } + walDir := c.Experimental.WALDir + if walDir != "" { + walDir = path.Join(walDir, namespace) + } return &pebble.ExtraOptions{ - BytesPerSync: c.BytesPerSync, - L0CompactionFileThreshold: c.L0CompactionFileThreshold, - L0CompactionThreshold: c.L0CompactionThreshold, - L0StopWritesThreshold: c.L0StopWritesThreshold, - LBaseMaxBytes: c.LBaseMaxBytes, - MemTableStopWritesThreshold: c.MemTableStopWritesThreshold, + BytesPerSync: c.Experimental.BytesPerSync, + L0CompactionFileThreshold: c.Experimental.L0CompactionFileThreshold, + L0CompactionThreshold: c.Experimental.L0CompactionThreshold, + L0StopWritesThreshold: c.Experimental.L0StopWritesThreshold, + LBaseMaxBytes: c.Experimental.LBaseMaxBytes, + MemTableStopWritesThreshold: c.Experimental.MemTableStopWritesThreshold, MaxConcurrentCompactions: maxConcurrentCompactions, - DisableAutomaticCompactions: c.DisableAutomaticCompactions, - WALBytesPerSync: c.WALBytesPerSync, - WALDir: c.WALDir, + DisableAutomaticCompactions: c.Experimental.DisableAutomaticCompactions, + WALBytesPerSync: c.Experimental.WALBytesPerSync, + WALDir: walDir, WALMinSyncInterval: walMinSyncInterval, - TargetByteDeletionRate: c.TargetByteDeletionRate, + TargetByteDeletionRate: c.Experimental.TargetByteDeletionRate, Experimental: pebble.ExtraOptionsExperimental{ L0CompactionConcurrency: c.Experimental.L0CompactionConcurrency, CompactionDebtConcurrency: c.Experimental.CompactionDebtConcurrency, @@ -240,8 +272,3 @@ func (c *PebbleConfig) ExtraOptions() *pebble.ExtraOptions { Levels: levels, } } - -func (c *PebbleConfig) Validate() error { - // TODO - return nil -} diff --git a/cmd/nitro/init.go b/cmd/nitro/init.go index 9362154ec0..31ce4b91ea 100644 --- a/cmd/nitro/init.go +++ b/cmd/nitro/init.go @@ -172,13 +172,13 @@ func validateBlockChain(blockChain *core.BlockChain, chainConfig *params.ChainCo func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeConfig, chainId *big.Int, cacheConfig *core.CacheConfig, persistentConfig *conf.PersistentConfig, l1Client arbutil.L1Interface, rollupAddrs chaininfo.RollupAddresses) (ethdb.Database, *core.BlockChain, error) { if !config.Init.Force { - if readOnlyDb, err := stack.OpenDatabaseWithFreezerWithExtraOptions("l2chaindata", 0, 0, "", "l2chaindata/", true, persistentConfig.Pebble.ExtraOptions()); err == nil { + if readOnlyDb, err := stack.OpenDatabaseWithFreezerWithExtraOptions("l2chaindata", 0, 0, "", "l2chaindata/", true, persistentConfig.Pebble.ExtraOptions("l2chaindata")); err == nil { if chainConfig := gethexec.TryReadStoredChainConfig(readOnlyDb); chainConfig != nil { readOnlyDb.Close() if !arbmath.BigEquals(chainConfig.ChainID, chainId) { return nil, nil, fmt.Errorf("database has chain ID %v but config has chain ID %v (are you sure this database is for the right chain?)", chainConfig.ChainID, chainId) } - chainDb, err := stack.OpenDatabaseWithFreezerWithExtraOptions("l2chaindata", config.Execution.Caching.DatabaseCache, config.Persistent.Handles, config.Persistent.Ancient, "l2chaindata/", false, persistentConfig.Pebble.ExtraOptions()) + chainDb, err := stack.OpenDatabaseWithFreezerWithExtraOptions("l2chaindata", config.Execution.Caching.DatabaseCache, config.Persistent.Handles, config.Persistent.Ancient, "l2chaindata/", false, persistentConfig.Pebble.ExtraOptions("l2chaindata")) if err != nil { return chainDb, nil, err } @@ -230,7 +230,7 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo var initDataReader statetransfer.InitDataReader = nil - chainDb, err := stack.OpenDatabaseWithFreezerWithExtraOptions("l2chaindata", config.Execution.Caching.DatabaseCache, config.Persistent.Handles, config.Persistent.Ancient, "l2chaindata/", false, persistentConfig.Pebble.ExtraOptions()) + chainDb, err := stack.OpenDatabaseWithFreezerWithExtraOptions("l2chaindata", config.Execution.Caching.DatabaseCache, config.Persistent.Handles, config.Persistent.Ancient, "l2chaindata/", false, persistentConfig.Pebble.ExtraOptions("l2chaindata")) if err != nil { return chainDb, nil, err } diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 277afa302a..4ee042d477 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -489,7 +489,7 @@ func mainImpl() int { return 1 } - arbDb, err := stack.OpenDatabaseWithExtraOptions("arbitrumdata", 0, 0, "arbitrumdata/", false, nodeConfig.Persistent.Pebble.ExtraOptions()) + arbDb, err := stack.OpenDatabaseWithExtraOptions("arbitrumdata", 0, 0, "arbitrumdata/", false, nodeConfig.Persistent.Pebble.ExtraOptions("arbitrumdata")) deferFuncs = append(deferFuncs, func() { closeDb(arbDb, "arbDb") }) if err != nil { log.Error("failed to open database", "err", err) diff --git a/cmd/pruning/pruning.go b/cmd/pruning/pruning.go index 363126a49f..72e7d2c516 100644 --- a/cmd/pruning/pruning.go +++ b/cmd/pruning/pruning.go @@ -85,7 +85,7 @@ func findImportantRoots(ctx context.Context, chainDb ethdb.Database, stack *node if chainConfig == nil { return nil, errors.New("database doesn't have a chain config (was this node initialized?)") } - arbDb, err := stack.OpenDatabaseWithExtraOptions("arbitrumdata", 0, 0, "arbitrumdata/", true, persistentConfig.Pebble.ExtraOptions()) + arbDb, err := stack.OpenDatabaseWithExtraOptions("arbitrumdata", 0, 0, "arbitrumdata/", true, persistentConfig.Pebble.ExtraOptions("arbitrumdata")) if err != nil { return nil, err } diff --git a/system_tests/common_test.go b/system_tests/common_test.go index fd63eb9431..4aa8581bd0 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -774,9 +774,9 @@ func createL2BlockChainWithStackConfig( Require(t, err) // TODO get pebble.ExtraOptions from conf.PersistentConfig when opening the DBs - chainDb, err := stack.OpenDatabaseWithExtraOptions("l2chaindata", 0, 0, "l2chaindata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions()) + chainDb, err := stack.OpenDatabaseWithExtraOptions("l2chaindata", 0, 0, "l2chaindata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("l2chaindata")) Require(t, err) - arbDb, err := stack.OpenDatabaseWithExtraOptions("arbitrumdata", 0, 0, "arbitrumdata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions()) + arbDb, err := stack.OpenDatabaseWithExtraOptions("arbitrumdata", 0, 0, "arbitrumdata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("arbitrumdata")) Require(t, err) initReader := statetransfer.NewMemoryInitDataReader(&l2info.ArbInitData) @@ -979,9 +979,9 @@ func Create2ndNodeWithConfig( Require(t, err) // TODO get pebble.ExtraOptions from conf.PersistentConfig when opening the DBs - l2chainDb, err := l2stack.OpenDatabaseWithExtraOptions("l2chaindata", 0, 0, "l2chaindata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions()) + l2chainDb, err := l2stack.OpenDatabaseWithExtraOptions("l2chaindata", 0, 0, "l2chaindata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("l2chaindata")) Require(t, err) - l2arbDb, err := l2stack.OpenDatabaseWithExtraOptions("arbitrumdata", 0, 0, "arbitrumdata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions()) + l2arbDb, err := l2stack.OpenDatabaseWithExtraOptions("arbitrumdata", 0, 0, "arbitrumdata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("arbitrumdata")) Require(t, err) initReader := statetransfer.NewMemoryInitDataReader(l2InitData) diff --git a/system_tests/das_test.go b/system_tests/das_test.go index 7495b9a13e..2febadb3d2 100644 --- a/system_tests/das_test.go +++ b/system_tests/das_test.go @@ -179,10 +179,10 @@ func TestDASRekey(t *testing.T) { Require(t, err) // TODO get pebble.ExtraOptions from conf.PersistentConfig - l2chainDb, err := l2stackA.OpenDatabaseWithExtraOptions("l2chaindata", 0, 0, "l2chaindata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions()) + l2chainDb, err := l2stackA.OpenDatabaseWithExtraOptions("l2chaindata", 0, 0, "l2chaindata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("l2chaindata")) Require(t, err) - l2arbDb, err := l2stackA.OpenDatabaseWithExtraOptions("arbitrumdata", 0, 0, "arbitrumdata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions()) + l2arbDb, err := l2stackA.OpenDatabaseWithExtraOptions("arbitrumdata", 0, 0, "arbitrumdata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("arbitrumdata")) Require(t, err) l2blockchain, err := gethexec.GetBlockChain(l2chainDb, nil, chainConfig, gethexec.ConfigDefaultTest().TxLookupLimit) diff --git a/system_tests/pruning_test.go b/system_tests/pruning_test.go index e83c350804..d2453887ee 100644 --- a/system_tests/pruning_test.go +++ b/system_tests/pruning_test.go @@ -66,7 +66,7 @@ func TestPruning(t *testing.T) { Require(t, err) defer stack.Close() // TODO get pebble.ExtraOptions from conf.PersistentConfig - chainDb, err := stack.OpenDatabaseWithExtraOptions("l2chaindata", 0, 0, "l2chaindata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions()) + chainDb, err := stack.OpenDatabaseWithExtraOptions("l2chaindata", 0, 0, "l2chaindata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("l2chaindata")) Require(t, err) defer chainDb.Close() chainDbEntriesBeforePruning := countStateEntries(chainDb) diff --git a/system_tests/staterecovery_test.go b/system_tests/staterecovery_test.go index 9dc1081a7b..459a6e3ee8 100644 --- a/system_tests/staterecovery_test.go +++ b/system_tests/staterecovery_test.go @@ -51,7 +51,7 @@ func TestRectreateMissingStates(t *testing.T) { Require(t, err) defer stack.Close() // TODO get pebble.ExtraOptions from conf.PersistentConfig - chainDb, err := stack.OpenDatabaseWithExtraOptions("l2chaindata", 0, 0, "l2chaindata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions()) + chainDb, err := stack.OpenDatabaseWithExtraOptions("l2chaindata", 0, 0, "l2chaindata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("l2chaindata")) Require(t, err) defer chainDb.Close() cacheConfig := gethexec.DefaultCacheConfigFor(stack, &gethexec.DefaultCachingConfig) From d949c071bee0a9969955dc97834bdd168cabcd95 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Tue, 14 May 2024 12:41:53 +0200 Subject: [PATCH 21/75] update geth --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index ac85a19d5f..6d23a7b7e6 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit ac85a19d5f56231076d5bab95504d666b084fa3b +Subproject commit 6d23a7b7e6a99701adf1f69701ad367dec61c08c From cb72afd4ba1977b41bc4587901bea052a31e9f54 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Tue, 14 May 2024 17:16:28 +0200 Subject: [PATCH 22/75] set PebbleConfig defaults to geth / pebble defaults --- cmd/conf/database.go | 34 +++++++++++++++++----------------- cmd/nitro/nitro.go | 1 - 2 files changed, 17 insertions(+), 18 deletions(-) diff --git a/cmd/conf/database.go b/cmd/conf/database.go index d60ee51c5b..57674ba7f1 100644 --- a/cmd/conf/database.go +++ b/cmd/conf/database.go @@ -163,27 +163,27 @@ type PebbleExperimentalConfig struct { } var PebbleExperimentalConfigDefault = PebbleExperimentalConfig{ - BytesPerSync: 0, // pebble default will be used - L0CompactionFileThreshold: 0, // pebble default will be used - L0CompactionThreshold: 0, // pebble default will be used - L0StopWritesThreshold: 0, // pebble default will be used - LBaseMaxBytes: 0, // pebble default will be used + BytesPerSync: 512 << 10, // 512 KB + L0CompactionFileThreshold: 500, + L0CompactionThreshold: 4, + L0StopWritesThreshold: 12, + LBaseMaxBytes: 64 << 20, // 64 MB MemTableStopWritesThreshold: 2, DisableAutomaticCompactions: false, - WALBytesPerSync: 0, // pebble default will be used - WALDir: "", // default will use same dir as for sstables - WALMinSyncInterval: 0, // pebble default will be used - TargetByteDeletionRate: 0, // pebble default will be used + WALBytesPerSync: 0, // no background syncing + WALDir: "", // use same dir as for sstables + WALMinSyncInterval: 0, // no artificial delay + TargetByteDeletionRate: 0, // deletion pacing disabled - BlockSize: 4096, - IndexBlockSize: 4096, - TargetFileSize: 2 * 1024 * 1024, + BlockSize: 4 << 10, // 4 KB + IndexBlockSize: 4 << 10, // 4 KB + TargetFileSize: 2 << 20, // 2 MB TargetFileSizeEqualLevels: true, - L0CompactionConcurrency: 0, - CompactionDebtConcurrency: 0, - ReadCompactionRate: 0, - ReadSamplingMultiplier: -1, + L0CompactionConcurrency: 10, + CompactionDebtConcurrency: 1 << 30, // 1GB + ReadCompactionRate: 16000, // see ReadSamplingMultiplier comment + ReadSamplingMultiplier: -1, // geth default, disables read sampling and disables read triggered compaction MaxWriterConcurrency: 0, ForceWriterParallelism: false, } @@ -196,7 +196,7 @@ func PebbleExperimentalConfigAddOptions(prefix string, f *flag.FlagSet) { f.Int64(prefix+".l-base-max-bytes", PebbleExperimentalConfigDefault.LBaseMaxBytes, "The maximum number of bytes for LBase. The base level is the level which L0 is compacted into. The base level is determined dynamically based on the existing data in the LSM. The maximum number of bytes for other levels is computed dynamically based on the base level's maximum size. When the maximum number of bytes for a level is exceeded, compaction is requested.") f.Int(prefix+".mem-table-stop-writes-threshold", PebbleExperimentalConfigDefault.MemTableStopWritesThreshold, "hard limit on the number of queued of MemTables") f.Bool(prefix+".disable-automatic-compactions", PebbleExperimentalConfigDefault.DisableAutomaticCompactions, "disables automatic compactions") - f.Int(prefix+".wal-bytes-per-sync", PebbleExperimentalConfigDefault.WALBytesPerSync, "number of bytes to write to a write-ahead log (WAL) before calling Sync on it in the backgroud") + f.Int(prefix+".wal-bytes-per-sync", PebbleExperimentalConfigDefault.WALBytesPerSync, "number of bytes to write to a write-ahead log (WAL) before calling Sync on it in the background") f.String(prefix+".wal-dir", PebbleExperimentalConfigDefault.WALDir, "absolute path of directory to store write-ahead logs (WALs) in. If empty, WALs will be stored in the same directory as sstables") f.Int(prefix+".wal-min-sync-interval", PebbleExperimentalConfigDefault.WALMinSyncInterval, "minimum duration in microseconds between syncs of the WAL. If WAL syncs are requested faster than this interval, they will be artificially delayed.") f.Int(prefix+".target-byte-deletion-rate", PebbleExperimentalConfigDefault.TargetByteDeletionRate, "rate (in bytes per second) at which sstable file deletions are limited to (under normal circumstances).") diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 434f36eeb2..9cf2a1a136 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -177,7 +177,6 @@ func mainImpl() int { nodeConfig.Auth.Apply(&stackConf) nodeConfig.IPC.Apply(&stackConf) nodeConfig.GraphQL.Apply(&stackConf) - if nodeConfig.WS.ExposeAll { stackConf.WSModules = append(stackConf.WSModules, "personal") } From 03ee1dc52e2f163b569b17b36edb96c65a04d9c2 Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Tue, 14 May 2024 10:38:30 -0500 Subject: [PATCH 23/75] address PR comments --- blocks_reexecutor/blocks_reexecutor.go | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/blocks_reexecutor/blocks_reexecutor.go b/blocks_reexecutor/blocks_reexecutor.go index 0ad4337e0f..a03b29fefd 100644 --- a/blocks_reexecutor/blocks_reexecutor.go +++ b/blocks_reexecutor/blocks_reexecutor.go @@ -25,6 +25,8 @@ type Config struct { EndBlock uint64 `koanf:"end-block"` Room int `koanf:"room"` BlocksPerThread uint64 `koanf:"blocks-per-thread"` + + blocksPerThread uint64 } func (c *Config) Validate() error { @@ -35,8 +37,13 @@ func (c *Config) Validate() error { if c.EndBlock < c.StartBlock { return errors.New("invalid block range for blocks re-execution") } - if c.Room == 0 { - return errors.New("room for blocks re-execution cannot be zero") + if c.Room < 0 { + return errors.New("room for blocks re-execution should be greater than 0") + } + if c.BlocksPerThread != 0 { + c.blocksPerThread = c.BlocksPerThread + } else { + c.blocksPerThread = 10000 } return nil } @@ -52,6 +59,7 @@ var TestConfig = Config{ Mode: "full", Room: runtime.NumCPU(), BlocksPerThread: 10, + blocksPerThread: 10, } func ConfigAddOptions(prefix string, f *flag.FlagSet) { @@ -93,10 +101,7 @@ func New(c *Config, blockchain *core.BlockChain, fatalErrChan chan error) *Block } if c.Mode == "random" && end != start { // Reexecute a range of 10000 or (non-zero) c.BlocksPerThread number of blocks between start to end picked randomly - rng := uint64(10000) - if c.BlocksPerThread != 0 { - rng = c.BlocksPerThread - } + rng := c.blocksPerThread if rng > end-start { rng = end - start } @@ -108,12 +113,11 @@ func New(c *Config, blockchain *core.BlockChain, fatalErrChan chan error) *Block if start > 0 && start != chainStart { start-- } - // Divide work equally among available threads + // Divide work equally among available threads when BlocksPerThread is zero if c.BlocksPerThread == 0 { - c.BlocksPerThread = 10000 work := (end - start) / uint64(c.Room) if work > 0 { - c.BlocksPerThread = work + c.blocksPerThread = work } } return &BlocksReExecutor{ @@ -132,12 +136,10 @@ func New(c *Config, blockchain *core.BlockChain, fatalErrChan chan error) *Block // LaunchBlocksReExecution launches the thread to apply blocks of range [currentBlock-s.config.BlocksPerThread, currentBlock] to the last available valid state func (s *BlocksReExecutor) LaunchBlocksReExecution(ctx context.Context, currentBlock uint64) uint64 { - start := arbmath.SaturatingUSub(currentBlock, s.config.BlocksPerThread) + start := arbmath.SaturatingUSub(currentBlock, s.config.blocksPerThread) if start < s.startBlock { start = s.startBlock } - // we don't use state release pattern here - // TODO do we want to use release pattern here? startState, startHeader, release, err := arbitrum.FindLastAvailableState(ctx, s.blockchain, s.stateFor, s.blockchain.GetHeaderByNumber(start), nil, -1) if err != nil { s.fatalErrChan <- fmt.Errorf("blocksReExecutor failed to get last available state while searching for state at %d, err: %w", start, err) From 761e8e2d8328dda454632f1d8a9a997505a42e56 Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Wed, 15 May 2024 13:57:28 +0200 Subject: [PATCH 24/75] Create streams in redis client, poll on it in redis-server --- pubsub/common.go | 17 ++++++++ staker/block_validator.go | 2 +- validator/client/redis/producer.go | 9 +++- validator/server_common/machine_locator.go | 2 +- validator/valnode/redis/consumer.go | 49 ++++++++++++++++++++++ 5 files changed, 76 insertions(+), 3 deletions(-) create mode 100644 pubsub/common.go diff --git a/pubsub/common.go b/pubsub/common.go new file mode 100644 index 0000000000..2aefa02c84 --- /dev/null +++ b/pubsub/common.go @@ -0,0 +1,17 @@ +package pubsub + +import ( + "context" + + "github.com/go-redis/redis/v8" +) + +// CreateStream tries to create stream with given name, if it already exists +// does not return an error. +func CreateStream(ctx context.Context, streamName string, client redis.UniversalClient) error { + _, err := client.XGroupCreateMkStream(ctx, streamName, streamName, "$").Result() + if err == nil || err.Error() == "BUSYGROUP Consumer Group name already exists" { + return nil + } + return err +} diff --git a/staker/block_validator.go b/staker/block_validator.go index e494b3da10..d9126a27f8 100644 --- a/staker/block_validator.go +++ b/staker/block_validator.go @@ -1088,7 +1088,7 @@ func (v *BlockValidator) Initialize(ctx context.Context) error { } // First spawner is always RedisValidationClient if RedisStreams are enabled. if v.redisValidator != nil { - err := v.redisValidator.Initialize(moduleRoots) + err := v.redisValidator.Initialize(ctx, moduleRoots) if err != nil { return err } diff --git a/validator/client/redis/producer.go b/validator/client/redis/producer.go index 1055d93968..c971664bd3 100644 --- a/validator/client/redis/producer.go +++ b/validator/client/redis/producer.go @@ -23,6 +23,7 @@ type ValidationClientConfig struct { Room int32 `koanf:"room"` RedisURL string `koanf:"redis-url"` ProducerConfig pubsub.ProducerConfig `koanf:"producer-config"` + CreateStreams bool `koanf:"create-streams"` } func (c ValidationClientConfig) Enabled() bool { @@ -34,6 +35,7 @@ var DefaultValidationClientConfig = ValidationClientConfig{ Room: 2, RedisURL: "", ProducerConfig: pubsub.DefaultProducerConfig, + CreateStreams: true, } var TestValidationClientConfig = ValidationClientConfig{ @@ -41,12 +43,14 @@ var TestValidationClientConfig = ValidationClientConfig{ Room: 2, RedisURL: "", ProducerConfig: pubsub.TestProducerConfig, + CreateStreams: true, } func ValidationClientConfigAddOptions(prefix string, f *pflag.FlagSet) { f.String(prefix+".name", DefaultValidationClientConfig.Name, "validation client name") f.Int32(prefix+".room", DefaultValidationClientConfig.Room, "validation client room") pubsub.ProducerAddConfigAddOptions(prefix+".producer-config", f) + f.Bool(prefix+".create-streams", DefaultValidationClientConfig.CreateStreams, "create redis streams if it does not exist") } // ValidationClient implements validation client through redis streams. @@ -78,8 +82,11 @@ func NewValidationClient(cfg *ValidationClientConfig) (*ValidationClient, error) }, nil } -func (c *ValidationClient) Initialize(moduleRoots []common.Hash) error { +func (c *ValidationClient) Initialize(ctx context.Context, moduleRoots []common.Hash) error { for _, mr := range moduleRoots { + if err := pubsub.CreateStream(ctx, server_api.RedisStreamForRoot(mr), c.redisClient); err != nil { + return fmt.Errorf("creating redis stream: %w", err) + } if _, exists := c.producers[mr]; exists { log.Warn("Producer already existsw for module root", "hash", mr) continue diff --git a/validator/server_common/machine_locator.go b/validator/server_common/machine_locator.go index 28093c30f0..71f6af60b6 100644 --- a/validator/server_common/machine_locator.go +++ b/validator/server_common/machine_locator.go @@ -58,7 +58,7 @@ func NewMachineLocator(rootPath string) (*MachineLocator, error) { for _, dir := range dirs { fInfo, err := os.Stat(dir) if err != nil { - log.Warn("Getting file info", "error", err) + log.Warn("Getting file info", "dir", dir, "error", err) continue } if !fInfo.IsDir() { diff --git a/validator/valnode/redis/consumer.go b/validator/valnode/redis/consumer.go index 1cadaf7c9a..95d45589f3 100644 --- a/validator/valnode/redis/consumer.go +++ b/validator/valnode/redis/consumer.go @@ -3,10 +3,13 @@ package redis import ( "context" "fmt" + "sync" + "sync/atomic" "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" + "github.com/go-redis/redis/v8" "github.com/offchainlabs/nitro/pubsub" "github.com/offchainlabs/nitro/util/redisutil" "github.com/offchainlabs/nitro/util/stopwaiter" @@ -42,12 +45,56 @@ func NewValidationServer(cfg *ValidationServerConfig, spawner validator.Validati } consumers[mr] = c } + var ( + wg sync.WaitGroup + initialized atomic.Bool + ) + initialized.Store(true) + for i := 0; i < len(cfg.ModuleRoots); i++ { + mr := cfg.ModuleRoots[i] + wg.Add(1) + go func() { + defer wg.Done() + done := waitForStream(redisClient, mr) + select { + case <-time.After(cfg.StreamTimeout): + initialized.Store(false) + return + case <-done: + return + } + }() + } + wg.Wait() + if !initialized.Load() { + return nil, fmt.Errorf("waiting for streams to be created: timed out") + } return &ValidationServer{ consumers: consumers, spawner: spawner, }, nil } +func streamExists(client redis.UniversalClient, streamName string) bool { + groups, err := client.XInfoStream(context.TODO(), streamName).Result() + if err != nil { + log.Error("Reading redis streams", "error", err) + return false + } + return groups.Groups > 0 +} + +func waitForStream(client redis.UniversalClient, streamName string) chan struct{} { + var ret chan struct{} + go func() { + if streamExists(client, streamName) { + ret <- struct{}{} + } + time.Sleep(time.Millisecond * 100) + }() + return ret +} + func (s *ValidationServer) Start(ctx_in context.Context) { s.StopWaiter.Start(ctx_in, s) for moduleRoot, c := range s.consumers { @@ -83,6 +130,8 @@ type ValidationServerConfig struct { ConsumerConfig pubsub.ConsumerConfig `koanf:"consumer-config"` // Supported wasm module roots. ModuleRoots []string `koanf:"module-roots"` + // Timeout on polling for existence of each redis stream. + StreamTimeout time.Duration `koanf:"stream-timeout"` } var DefaultValidationServerConfig = ValidationServerConfig{ From 0c84ac6fe1638dc66fcf8ac5051ea457127063ae Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Wed, 15 May 2024 15:22:41 +0200 Subject: [PATCH 25/75] Implement tracing and CPU profiling of long running block creations --- execution/gethexec/sequencer.go | 70 ++++++++++++++++++++++++++++++++- 1 file changed, 69 insertions(+), 1 deletion(-) diff --git a/execution/gethexec/sequencer.go b/execution/gethexec/sequencer.go index 23340594c4..da816c212f 100644 --- a/execution/gethexec/sequencer.go +++ b/execution/gethexec/sequencer.go @@ -9,13 +9,17 @@ import ( "fmt" "math" "math/big" + "os" "runtime/debug" + "runtime/pprof" + "runtime/trace" "strconv" "strings" "sync" "sync/atomic" "time" + "github.com/google/uuid" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/execution" "github.com/offchainlabs/nitro/util/arbmath" @@ -76,6 +80,7 @@ type SequencerConfig struct { NonceFailureCacheExpiry time.Duration `koanf:"nonce-failure-cache-expiry" reload:"hot"` ExpectedSurplusSoftThreshold string `koanf:"expected-surplus-soft-threshold" reload:"hot"` ExpectedSurplusHardThreshold string `koanf:"expected-surplus-hard-threshold" reload:"hot"` + EnableProfiling bool `koanf:"enable-profiling"` expectedSurplusSoftThreshold int expectedSurplusHardThreshold int } @@ -125,6 +130,7 @@ var DefaultSequencerConfig = SequencerConfig{ NonceFailureCacheExpiry: time.Second, ExpectedSurplusSoftThreshold: "default", ExpectedSurplusHardThreshold: "default", + EnableProfiling: true, } var TestSequencerConfig = SequencerConfig{ @@ -142,6 +148,7 @@ var TestSequencerConfig = SequencerConfig{ NonceFailureCacheExpiry: time.Second, ExpectedSurplusSoftThreshold: "default", ExpectedSurplusHardThreshold: "default", + EnableProfiling: false, } func SequencerConfigAddOptions(prefix string, f *flag.FlagSet) { @@ -159,6 +166,7 @@ func SequencerConfigAddOptions(prefix string, f *flag.FlagSet) { f.Duration(prefix+".nonce-failure-cache-expiry", DefaultSequencerConfig.NonceFailureCacheExpiry, "maximum amount of time to wait for a predecessor before rejecting a tx with nonce too high") f.String(prefix+".expected-surplus-soft-threshold", DefaultSequencerConfig.ExpectedSurplusSoftThreshold, "if expected surplus is lower than this value, warnings are posted") f.String(prefix+".expected-surplus-hard-threshold", DefaultSequencerConfig.ExpectedSurplusHardThreshold, "if expected surplus is lower than this value, new incoming transactions will be denied") + f.Bool(prefix+".enable-profiling", DefaultSequencerConfig.EnableProfiling, "enable CPU profiling and tracing") } type txQueueItem struct { @@ -327,6 +335,7 @@ type Sequencer struct { expectedSurplusMutex sync.RWMutex expectedSurplus int64 expectedSurplusUpdated bool + enableProfiling bool } func NewSequencer(execEngine *ExecutionEngine, l1Reader *headerreader.HeaderReader, configFetcher SequencerConfigFetcher) (*Sequencer, error) { @@ -353,6 +362,7 @@ func NewSequencer(execEngine *ExecutionEngine, l1Reader *headerreader.HeaderRead l1Timestamp: 0, pauseChan: nil, onForwarderSet: make(chan struct{}, 1), + enableProfiling: config.EnableProfiling, } s.nonceFailures = &nonceFailureCache{ containers.NewLruCacheWithOnEvict(config.NonceCacheSize, s.onNonceFailureEvict), @@ -758,6 +768,59 @@ func (s *Sequencer) precheckNonces(queueItems []txQueueItem) []txQueueItem { return outputQueueItems } +func deleteFiles(files ...*os.File) { + for _, f := range files { + if err := os.Remove(f.Name()); err != nil { + log.Error("Error removing file", "name", f.Name()) + } + } +} + +func closeFiles(files ...*os.File) { + for _, f := range files { + if err := os.Remove(f.Name()); err != nil { + log.Error("Error closing file", "name", f.Name()) + } + } +} + +// createBlockWithProfiling runs create block with tracing and CPU profiling +// enabled. If the block creation takes longer than 5 seconds, it keeps both +// and prints out filenames in an error log line. +func (s *Sequencer) createBlockWithProfiling(ctx context.Context) bool { + id := uuid.NewString() + pprofFile, err := os.CreateTemp("", id+".pprof") + if err != nil { + log.Error("Creating temporary file for profiling CPU", "error", err) + return false + } + traceFile, err := os.CreateTemp("", id+".trace") + if err != nil { + log.Error("Creating temporary file for tracing", "error", err) + return false + } + if err := pprof.StartCPUProfile(pprofFile); err != nil { + log.Error("Starting CPU profiling", "error", err) + deleteFiles(pprofFile, traceFile) + return false + } + if err := trace.Start(traceFile); err != nil { + log.Error("Starting tracing", "error", err) + } + start := time.Now() + res := s.createBlock(ctx) + elapsed := time.Since(start) + pprof.StopCPUProfile() + trace.Stop() + closeFiles(pprofFile, traceFile) + if elapsed > 5*time.Second { + log.Error("Block creation took longer than 5 seconds", "pprof", pprofFile.Name()) + return res + } + deleteFiles(pprofFile, traceFile) + return res +} + func (s *Sequencer) createBlock(ctx context.Context) (returnValue bool) { var queueItems []txQueueItem var totalBatchSize int @@ -1088,7 +1151,12 @@ func (s *Sequencer) Start(ctxIn context.Context) error { s.CallIteratively(func(ctx context.Context) time.Duration { nextBlock := time.Now().Add(s.config().MaxBlockSpeed) - madeBlock := s.createBlock(ctx) + var madeBlock bool + if s.enableProfiling { + s.createBlockWithProfiling(ctx) + } else { + madeBlock = s.createBlock(ctx) + } if madeBlock { // Note: this may return a negative duration, but timers are fine with that (they treat negative durations as 0). return time.Until(nextBlock) From a1403d0698b534cbd441d1d75496db95e9eb2bdd Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Wed, 15 May 2024 15:27:16 +0200 Subject: [PATCH 26/75] Don't abort block creation if profiling fails --- execution/gethexec/sequencer.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/execution/gethexec/sequencer.go b/execution/gethexec/sequencer.go index da816c212f..2724095154 100644 --- a/execution/gethexec/sequencer.go +++ b/execution/gethexec/sequencer.go @@ -792,19 +792,17 @@ func (s *Sequencer) createBlockWithProfiling(ctx context.Context) bool { pprofFile, err := os.CreateTemp("", id+".pprof") if err != nil { log.Error("Creating temporary file for profiling CPU", "error", err) - return false } traceFile, err := os.CreateTemp("", id+".trace") if err != nil { log.Error("Creating temporary file for tracing", "error", err) - return false } if err := pprof.StartCPUProfile(pprofFile); err != nil { log.Error("Starting CPU profiling", "error", err) - deleteFiles(pprofFile, traceFile) - return false + deleteFiles(pprofFile) } if err := trace.Start(traceFile); err != nil { + deleteFiles(traceFile) log.Error("Starting tracing", "error", err) } start := time.Now() From 3b33bc4f74356e95bdf98d51ebd3e3df09fe92df Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Wed, 15 May 2024 15:28:02 +0200 Subject: [PATCH 27/75] Set madeblock correctly --- execution/gethexec/sequencer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/execution/gethexec/sequencer.go b/execution/gethexec/sequencer.go index 2724095154..4247556905 100644 --- a/execution/gethexec/sequencer.go +++ b/execution/gethexec/sequencer.go @@ -1151,7 +1151,7 @@ func (s *Sequencer) Start(ctxIn context.Context) error { nextBlock := time.Now().Add(s.config().MaxBlockSpeed) var madeBlock bool if s.enableProfiling { - s.createBlockWithProfiling(ctx) + madeBlock = s.createBlockWithProfiling(ctx) } else { madeBlock = s.createBlock(ctx) } From e35c1c0d43cdc45a467767acdac326b1c8adf2ed Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Wed, 15 May 2024 21:02:31 +0200 Subject: [PATCH 28/75] clean up TODOs --- system_tests/common_test.go | 2 -- system_tests/das_test.go | 1 - system_tests/pruning_test.go | 1 - system_tests/staterecovery_test.go | 1 - 4 files changed, 5 deletions(-) diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 7a5296516e..f8ba4c8b77 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -773,7 +773,6 @@ func createL2BlockChainWithStackConfig( stack, err = node.New(stackConfig) Require(t, err) - // TODO get pebble.ExtraOptions from conf.PersistentConfig when opening the DBs chainDb, err := stack.OpenDatabaseWithExtraOptions("l2chaindata", 0, 0, "l2chaindata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("l2chaindata")) Require(t, err) arbDb, err := stack.OpenDatabaseWithExtraOptions("arbitrumdata", 0, 0, "arbitrumdata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("arbitrumdata")) @@ -978,7 +977,6 @@ func Create2ndNodeWithConfig( l2stack, err := node.New(stackConfig) Require(t, err) - // TODO get pebble.ExtraOptions from conf.PersistentConfig when opening the DBs l2chainDb, err := l2stack.OpenDatabaseWithExtraOptions("l2chaindata", 0, 0, "l2chaindata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("l2chaindata")) Require(t, err) l2arbDb, err := l2stack.OpenDatabaseWithExtraOptions("arbitrumdata", 0, 0, "arbitrumdata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("arbitrumdata")) diff --git a/system_tests/das_test.go b/system_tests/das_test.go index 2febadb3d2..11d887315a 100644 --- a/system_tests/das_test.go +++ b/system_tests/das_test.go @@ -178,7 +178,6 @@ func TestDASRekey(t *testing.T) { l2stackA, err := node.New(stackConfig) Require(t, err) - // TODO get pebble.ExtraOptions from conf.PersistentConfig l2chainDb, err := l2stackA.OpenDatabaseWithExtraOptions("l2chaindata", 0, 0, "l2chaindata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("l2chaindata")) Require(t, err) diff --git a/system_tests/pruning_test.go b/system_tests/pruning_test.go index d2453887ee..041781ac48 100644 --- a/system_tests/pruning_test.go +++ b/system_tests/pruning_test.go @@ -65,7 +65,6 @@ func TestPruning(t *testing.T) { stack, err := node.New(builder.l2StackConfig) Require(t, err) defer stack.Close() - // TODO get pebble.ExtraOptions from conf.PersistentConfig chainDb, err := stack.OpenDatabaseWithExtraOptions("l2chaindata", 0, 0, "l2chaindata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("l2chaindata")) Require(t, err) defer chainDb.Close() diff --git a/system_tests/staterecovery_test.go b/system_tests/staterecovery_test.go index 459a6e3ee8..a20cffc787 100644 --- a/system_tests/staterecovery_test.go +++ b/system_tests/staterecovery_test.go @@ -50,7 +50,6 @@ func TestRectreateMissingStates(t *testing.T) { stack, err := node.New(builder.l2StackConfig) Require(t, err) defer stack.Close() - // TODO get pebble.ExtraOptions from conf.PersistentConfig chainDb, err := stack.OpenDatabaseWithExtraOptions("l2chaindata", 0, 0, "l2chaindata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("l2chaindata")) Require(t, err) defer chainDb.Close() From b426fdd5ec0cd6ade88de0eadd17caf7202083cb Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Wed, 15 May 2024 21:26:21 +0200 Subject: [PATCH 29/75] update geth --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index 6d23a7b7e6..5b7b36a339 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 6d23a7b7e6a99701adf1f69701ad367dec61c08c +Subproject commit 5b7b36a339ac28d708bca072dc5ec8189ceadac2 From 5e42b9b24c9d99bca2fe713505efea76e23940ff Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Thu, 16 May 2024 17:15:40 +0200 Subject: [PATCH 30/75] Launch stream creation threads on Validation server start --- pubsub/common.go | 12 +++ pubsub/consumer.go | 8 ++ validator/valnode/redis/consumer.go | 125 +++++++++++++--------------- 3 files changed, 76 insertions(+), 69 deletions(-) diff --git a/pubsub/common.go b/pubsub/common.go index 2aefa02c84..bc0ab1035b 100644 --- a/pubsub/common.go +++ b/pubsub/common.go @@ -3,6 +3,7 @@ package pubsub import ( "context" + "github.com/ethereum/go-ethereum/log" "github.com/go-redis/redis/v8" ) @@ -15,3 +16,14 @@ func CreateStream(ctx context.Context, streamName string, client redis.Universal } return err } + +// StreamExists returns whether there are any consumer group for specified +// redis stream. +func StreamExists(ctx context.Context, client redis.UniversalClient, streamName string) bool { + groups, err := client.XInfoStream(ctx, streamName).Result() + if err != nil { + log.Error("Reading redis streams", "error", err) + return false + } + return groups.Groups > 0 +} diff --git a/pubsub/consumer.go b/pubsub/consumer.go index 7a5078ee00..d7809b5f1b 100644 --- a/pubsub/consumer.go +++ b/pubsub/consumer.go @@ -86,6 +86,14 @@ func heartBeatKey(id string) string { return fmt.Sprintf("consumer:%s:heartbeat", id) } +func (c *Consumer[Request, Response]) RedisClient() redis.UniversalClient { + return c.client +} + +func (c *Consumer[Request, Response]) StreamName() string { + return c.redisStream +} + func (c *Consumer[Request, Response]) heartBeatKey() string { return heartBeatKey(c.id) } diff --git a/validator/valnode/redis/consumer.go b/validator/valnode/redis/consumer.go index 95d45589f3..bc1cd289e7 100644 --- a/validator/valnode/redis/consumer.go +++ b/validator/valnode/redis/consumer.go @@ -3,13 +3,10 @@ package redis import ( "context" "fmt" - "sync" - "sync/atomic" "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" - "github.com/go-redis/redis/v8" "github.com/offchainlabs/nitro/pubsub" "github.com/offchainlabs/nitro/util/redisutil" "github.com/offchainlabs/nitro/util/stopwaiter" @@ -25,7 +22,8 @@ type ValidationServer struct { spawner validator.ValidationSpawner // consumers stores moduleRoot to consumer mapping. - consumers map[common.Hash]*pubsub.Consumer[*validator.ValidationInput, validator.GoGlobalState] + consumers map[common.Hash]*pubsub.Consumer[*validator.ValidationInput, validator.GoGlobalState] + streamTimeout time.Duration } func NewValidationServer(cfg *ValidationServerConfig, spawner validator.ValidationSpawner) (*ValidationServer, error) { @@ -45,84 +43,73 @@ func NewValidationServer(cfg *ValidationServerConfig, spawner validator.Validati } consumers[mr] = c } - var ( - wg sync.WaitGroup - initialized atomic.Bool - ) - initialized.Store(true) - for i := 0; i < len(cfg.ModuleRoots); i++ { - mr := cfg.ModuleRoots[i] - wg.Add(1) - go func() { - defer wg.Done() - done := waitForStream(redisClient, mr) - select { - case <-time.After(cfg.StreamTimeout): - initialized.Store(false) - return - case <-done: - return - } - }() - } - wg.Wait() - if !initialized.Load() { - return nil, fmt.Errorf("waiting for streams to be created: timed out") - } return &ValidationServer{ - consumers: consumers, - spawner: spawner, + consumers: consumers, + spawner: spawner, + streamTimeout: cfg.StreamTimeout, }, nil } -func streamExists(client redis.UniversalClient, streamName string) bool { - groups, err := client.XInfoStream(context.TODO(), streamName).Result() - if err != nil { - log.Error("Reading redis streams", "error", err) - return false - } - return groups.Groups > 0 -} - -func waitForStream(client redis.UniversalClient, streamName string) chan struct{} { - var ret chan struct{} - go func() { - if streamExists(client, streamName) { - ret <- struct{}{} - } - time.Sleep(time.Millisecond * 100) - }() - return ret -} - func (s *ValidationServer) Start(ctx_in context.Context) { s.StopWaiter.Start(ctx_in, s) + // Channel that all consumers use to indicate their readiness. + readyStreams := make(chan struct{}, len(s.consumers)) for moduleRoot, c := range s.consumers { c := c + moduleRoot := moduleRoot c.Start(ctx_in) - s.StopWaiter.CallIteratively(func(ctx context.Context) time.Duration { - req, err := c.Consume(ctx) - if err != nil { - log.Error("Consuming request", "error", err) - return 0 + // Channel for single consumer, once readiness is indicated in this, + // consumer will start consuming iteratively. + ready := make(chan struct{}, 1) + s.StopWaiter.LaunchThread(func(ctx context.Context) { + for { + if pubsub.StreamExists(ctx, c.RedisClient(), c.StreamName()) { + ready <- struct{}{} + readyStreams <- struct{}{} + return + } + time.Sleep(time.Millisecond * 100) } - if req == nil { - // There's nothing in the queue. + }) + s.StopWaiter.LaunchThread(func(ctx context.Context) { + <-ready // Wait until the stream exists and start consuming iteratively. + s.StopWaiter.CallIteratively(func(ctx context.Context) time.Duration { + req, err := c.Consume(ctx) + if err != nil { + log.Error("Consuming request", "error", err) + return 0 + } + if req == nil { + // There's nothing in the queue. + return time.Second + } + valRun := s.spawner.Launch(req.Value, moduleRoot) + res, err := valRun.Await(ctx) + if err != nil { + log.Error("Error validating", "request value", req.Value, "error", err) + return 0 + } + if err := c.SetResult(ctx, req.ID, res); err != nil { + log.Error("Error setting result for request", "id", req.ID, "result", res, "error", err) + return 0 + } return time.Second - } - valRun := s.spawner.Launch(req.Value, moduleRoot) - res, err := valRun.Await(ctx) - if err != nil { - log.Error("Error validating", "request value", req.Value, "error", err) - return 0 - } - if err := c.SetResult(ctx, req.ID, res); err != nil { - log.Error("Error setting result for request", "id", req.ID, "result", res, "error", err) - return 0 - } - return time.Second + }) }) } + + for { + select { + case <-readyStreams: + log.Trace("At least one stream is ready") + return // Don't block Start if at least one of the stream is ready. + case <-time.After(s.streamTimeout): + log.Error("Waiting for redis streams timed out") + case <-ctx_in.Done(): + log.Error(("Context expired, failed to start")) + return + } + } } type ValidationServerConfig struct { From 459106163d80806e556f2c23c896cc74acd0c1d3 Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Thu, 16 May 2024 21:34:04 +0200 Subject: [PATCH 31/75] Gracefully shutdown consumer on interrupts --- pubsub/consumer.go | 48 ++++++++++++++++++++++++++++++++++++++++--- pubsub/pubsub_test.go | 8 +++++++- 2 files changed, 52 insertions(+), 4 deletions(-) diff --git a/pubsub/consumer.go b/pubsub/consumer.go index 7a5078ee00..af1345f059 100644 --- a/pubsub/consumer.go +++ b/pubsub/consumer.go @@ -5,6 +5,10 @@ import ( "encoding/json" "errors" "fmt" + "os" + "os/signal" + "sync/atomic" + "syscall" "time" "github.com/ethereum/go-ethereum/log" @@ -46,6 +50,10 @@ type Consumer[Request any, Response any] struct { redisStream string redisGroup string cfg *ConsumerConfig + // terminating indicates whether interrupt was received, in which case + // consumer should clean up for graceful shutdown. + terminating atomic.Bool + signals chan os.Signal } type Message[Request any] struct { @@ -57,29 +65,51 @@ func NewConsumer[Request any, Response any](client redis.UniversalClient, stream if streamName == "" { return nil, fmt.Errorf("redis stream name cannot be empty") } - consumer := &Consumer[Request, Response]{ + return &Consumer[Request, Response]{ id: uuid.NewString(), client: client, redisStream: streamName, redisGroup: streamName, // There is 1-1 mapping of redis stream and consumer group. cfg: cfg, - } - return consumer, nil + terminating: atomic.Bool{}, + signals: make(chan os.Signal, 1), + }, nil } // Start starts the consumer to iteratively perform heartbeat in configured intervals. func (c *Consumer[Request, Response]) Start(ctx context.Context) { c.StopWaiter.Start(ctx, c) + c.listenForInterrupt() c.StopWaiter.CallIteratively( func(ctx context.Context) time.Duration { + if !c.terminating.Load() { + log.Trace("Consumer is terminating, stopping heartbeat update") + return time.Hour + } c.heartBeat(ctx) return c.cfg.KeepAliveTimeout / 10 }, ) } +// listenForInterrupt launches a thread that notifies the channel when interrupt +// is received. +func (c *Consumer[Request, Response]) listenForInterrupt() { + signal.Notify(c.signals, syscall.SIGINT, syscall.SIGTERM) + c.StopWaiter.LaunchThread(func(ctx context.Context) { + select { + case sig := <-c.signals: + log.Info("Received interrup", "signal", sig.String()) + case <-ctx.Done(): + log.Info("Context is done", "error", ctx.Err()) + } + c.deleteHeartBeat(ctx) + }) +} + func (c *Consumer[Request, Response]) StopAndWait() { c.StopWaiter.StopAndWait() + c.deleteHeartBeat(c.GetContext()) } func heartBeatKey(id string) string { @@ -90,6 +120,18 @@ func (c *Consumer[Request, Response]) heartBeatKey() string { return heartBeatKey(c.id) } +// deleteHeartBeat deletes the heartbeat to indicate it is being shut down. +func (c *Consumer[Request, Response]) deleteHeartBeat(ctx context.Context) { + c.terminating.Store(true) + if err := c.client.Del(ctx, c.heartBeatKey()).Err(); err != nil { + l := log.Info + if ctx.Err() != nil { + l = log.Error + } + l("Deleting heardbeat", "consumer", c.id, "error", err) + } +} + // heartBeat updates the heartBeat key indicating aliveness. func (c *Consumer[Request, Response]) heartBeat(ctx context.Context) { if err := c.client.Set(ctx, c.heartBeatKey(), time.Now().UnixMilli(), 2*c.cfg.KeepAliveTimeout).Err(); err != nil { diff --git a/pubsub/pubsub_test.go b/pubsub/pubsub_test.go index 31f6d9e20a..11407e686f 100644 --- a/pubsub/pubsub_test.go +++ b/pubsub/pubsub_test.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "os" "sort" "testing" @@ -232,7 +233,12 @@ func TestRedisProduce(t *testing.T) { if _, err := consumers[i].Consume(ctx); err != nil { t.Errorf("Error consuming message: %v", err) } - consumers[i].StopAndWait() + // Terminate half of the consumers, send interrupt to others. + if i%2 == 0 { + consumers[i].StopAndWait() + } else { + consumers[i].signals <- os.Interrupt + } } } From f18419b0fb92bf523668d1272c1bd9764c5015ed Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Thu, 16 May 2024 22:48:37 +0200 Subject: [PATCH 32/75] Delete heartbeat before stopAndWait --- pubsub/consumer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pubsub/consumer.go b/pubsub/consumer.go index af1345f059..d74d4ef1b2 100644 --- a/pubsub/consumer.go +++ b/pubsub/consumer.go @@ -108,8 +108,8 @@ func (c *Consumer[Request, Response]) listenForInterrupt() { } func (c *Consumer[Request, Response]) StopAndWait() { - c.StopWaiter.StopAndWait() c.deleteHeartBeat(c.GetContext()) + c.StopWaiter.StopAndWait() } func heartBeatKey(id string) string { From 3373c4a5b84010a7cd5f3b47a7f43de5cf46f476 Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Thu, 16 May 2024 23:18:47 +0200 Subject: [PATCH 33/75] Fix test --- pubsub/consumer.go | 1 - pubsub/pubsub_test.go | 7 +++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/pubsub/consumer.go b/pubsub/consumer.go index d74d4ef1b2..97ab004764 100644 --- a/pubsub/consumer.go +++ b/pubsub/consumer.go @@ -108,7 +108,6 @@ func (c *Consumer[Request, Response]) listenForInterrupt() { } func (c *Consumer[Request, Response]) StopAndWait() { - c.deleteHeartBeat(c.GetContext()) c.StopWaiter.StopAndWait() } diff --git a/pubsub/pubsub_test.go b/pubsub/pubsub_test.go index 11407e686f..9111c5cf66 100644 --- a/pubsub/pubsub_test.go +++ b/pubsub/pubsub_test.go @@ -7,6 +7,7 @@ import ( "os" "sort" "testing" + "time" "github.com/ethereum/go-ethereum/log" "github.com/go-redis/redis/v8" @@ -202,6 +203,7 @@ func consume(ctx context.Context, t *testing.T, consumers []*Consumer[testReques } func TestRedisProduce(t *testing.T) { + log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true))) t.Parallel() for _, tc := range []struct { name string @@ -213,7 +215,7 @@ func TestRedisProduce(t *testing.T) { }, { name: "some consumers killed, others should take over their work", - killConsumers: false, + killConsumers: true, }, } { t.Run(tc.name, func(t *testing.T) { @@ -233,7 +235,7 @@ func TestRedisProduce(t *testing.T) { if _, err := consumers[i].Consume(ctx); err != nil { t.Errorf("Error consuming message: %v", err) } - // Terminate half of the consumers, send interrupt to others. + //Terminate half of the consumers, send interrupt to others. if i%2 == 0 { consumers[i].StopAndWait() } else { @@ -242,6 +244,7 @@ func TestRedisProduce(t *testing.T) { } } + time.Sleep(time.Second) gotMessages, wantResponses := consume(ctx, t, consumers) gotResponses, err := awaitResponses(ctx, promises) if err != nil { From 3fd412e78e725b0bc850e800741b16effab491b3 Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Thu, 16 May 2024 23:46:56 +0200 Subject: [PATCH 34/75] Fix lint --- pubsub/pubsub_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pubsub/pubsub_test.go b/pubsub/pubsub_test.go index 9111c5cf66..85314dc29a 100644 --- a/pubsub/pubsub_test.go +++ b/pubsub/pubsub_test.go @@ -235,7 +235,7 @@ func TestRedisProduce(t *testing.T) { if _, err := consumers[i].Consume(ctx); err != nil { t.Errorf("Error consuming message: %v", err) } - //Terminate half of the consumers, send interrupt to others. + // Terminate half of the consumers, send interrupt to others. if i%2 == 0 { consumers[i].StopAndWait() } else { From 6884188d20b089f9320b6fc26bad6d049583364f Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Fri, 17 May 2024 13:21:12 -0500 Subject: [PATCH 35/75] address PR comments --- blocks_reexecutor/blocks_reexecutor.go | 46 ++++++++++++-------------- 1 file changed, 22 insertions(+), 24 deletions(-) diff --git a/blocks_reexecutor/blocks_reexecutor.go b/blocks_reexecutor/blocks_reexecutor.go index a03b29fefd..f58e0ce00f 100644 --- a/blocks_reexecutor/blocks_reexecutor.go +++ b/blocks_reexecutor/blocks_reexecutor.go @@ -25,8 +25,6 @@ type Config struct { EndBlock uint64 `koanf:"end-block"` Room int `koanf:"room"` BlocksPerThread uint64 `koanf:"blocks-per-thread"` - - blocksPerThread uint64 } func (c *Config) Validate() error { @@ -40,11 +38,6 @@ func (c *Config) Validate() error { if c.Room < 0 { return errors.New("room for blocks re-execution should be greater than 0") } - if c.BlocksPerThread != 0 { - c.blocksPerThread = c.BlocksPerThread - } else { - c.blocksPerThread = 10000 - } return nil } @@ -59,7 +52,6 @@ var TestConfig = Config{ Mode: "full", Room: runtime.NumCPU(), BlocksPerThread: 10, - blocksPerThread: 10, } func ConfigAddOptions(prefix string, f *flag.FlagSet) { @@ -73,13 +65,14 @@ func ConfigAddOptions(prefix string, f *flag.FlagSet) { type BlocksReExecutor struct { stopwaiter.StopWaiter - config *Config - blockchain *core.BlockChain - stateFor arbitrum.StateForHeaderFunction - done chan struct{} - fatalErrChan chan error - startBlock uint64 - currentBlock uint64 + config *Config + blockchain *core.BlockChain + stateFor arbitrum.StateForHeaderFunction + done chan struct{} + fatalErrChan chan error + startBlock uint64 + currentBlock uint64 + blocksPerThread uint64 } func New(c *Config, blockchain *core.BlockChain, fatalErrChan chan error) *BlocksReExecutor { @@ -99,9 +92,13 @@ func New(c *Config, blockchain *core.BlockChain, fatalErrChan chan error) *Block log.Warn("invalid state reexecutor's end block number, resetting to latest", "end", end, "latest", chainEnd) end = chainEnd } + blocksPerThread := uint64(10000) + if c.BlocksPerThread != 0 { + blocksPerThread = c.BlocksPerThread + } if c.Mode == "random" && end != start { // Reexecute a range of 10000 or (non-zero) c.BlocksPerThread number of blocks between start to end picked randomly - rng := c.blocksPerThread + rng := blocksPerThread if rng > end-start { rng = end - start } @@ -117,16 +114,17 @@ func New(c *Config, blockchain *core.BlockChain, fatalErrChan chan error) *Block if c.BlocksPerThread == 0 { work := (end - start) / uint64(c.Room) if work > 0 { - c.blocksPerThread = work + blocksPerThread = work } } return &BlocksReExecutor{ - config: c, - blockchain: blockchain, - currentBlock: end, - startBlock: start, - done: make(chan struct{}, c.Room), - fatalErrChan: fatalErrChan, + config: c, + blockchain: blockchain, + currentBlock: end, + startBlock: start, + blocksPerThread: blocksPerThread, + done: make(chan struct{}, c.Room), + fatalErrChan: fatalErrChan, stateFor: func(header *types.Header) (*state.StateDB, arbitrum.StateReleaseFunc, error) { state, err := blockchain.StateAt(header.Root) return state, arbitrum.NoopStateRelease, err @@ -136,7 +134,7 @@ func New(c *Config, blockchain *core.BlockChain, fatalErrChan chan error) *Block // LaunchBlocksReExecution launches the thread to apply blocks of range [currentBlock-s.config.BlocksPerThread, currentBlock] to the last available valid state func (s *BlocksReExecutor) LaunchBlocksReExecution(ctx context.Context, currentBlock uint64) uint64 { - start := arbmath.SaturatingUSub(currentBlock, s.config.blocksPerThread) + start := arbmath.SaturatingUSub(currentBlock, s.blocksPerThread) if start < s.startBlock { start = s.startBlock } From b5b12e89049de4c334035d979f7734d67e3d36d3 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Fri, 17 May 2024 15:11:19 -0600 Subject: [PATCH 36/75] Add support for configurable lru cache resize on nitro init --- arbitrator/stylus/src/cache.rs | 4 ++++ arbitrator/stylus/src/lib.rs | 6 ++++++ arbnode/inbox_test.go | 1 + arbos/programs/native.go | 4 ++++ execution/gethexec/blockchain.go | 18 ++++++++++++++++++ execution/gethexec/executionengine.go | 10 ++++++++++ execution/gethexec/node.go | 3 +++ system_tests/recreatestate_rpc_test.go | 2 +- system_tests/staterecovery_test.go | 2 +- 9 files changed, 48 insertions(+), 2 deletions(-) diff --git a/arbitrator/stylus/src/cache.rs b/arbitrator/stylus/src/cache.rs index 2b83c6152f..6a9e677be5 100644 --- a/arbitrator/stylus/src/cache.rs +++ b/arbitrator/stylus/src/cache.rs @@ -66,6 +66,10 @@ impl InitCache { } } + pub fn set_lru_size(size: u32) { + cache!().lru.resize(NonZeroUsize::new(size.try_into().unwrap()).unwrap()) + } + /// Retrieves a cached value, updating items as necessary. pub fn get(module_hash: Bytes32, version: u16, debug: bool) -> Option<(Module, Store)> { let mut cache = cache!(); diff --git a/arbitrator/stylus/src/lib.rs b/arbitrator/stylus/src/lib.rs index 7abfb98bf5..9ccc9829ca 100644 --- a/arbitrator/stylus/src/lib.rs +++ b/arbitrator/stylus/src/lib.rs @@ -212,6 +212,12 @@ pub unsafe extern "C" fn stylus_call( status } +/// resize lru +#[no_mangle] +pub extern "C" fn stylus_cache_lru_resize(size: u32) { + InitCache::set_lru_size(size); +} + /// Caches an activated user program. /// /// # Safety diff --git a/arbnode/inbox_test.go b/arbnode/inbox_test.go index 5c879743a4..594e0cedb5 100644 --- a/arbnode/inbox_test.go +++ b/arbnode/inbox_test.go @@ -65,6 +65,7 @@ func NewTransactionStreamerForTest(t *testing.T, ownerAddress common.Address) (* if err != nil { Fail(t, err) } + execEngine.Initialize(gethexec.DefaultCachingConfig.StylusLRUCache) execSeq := &execClientWrapper{execEngine, t} inbox, err := NewTransactionStreamer(arbDb, bc.Config(), execSeq, nil, make(chan error, 1), transactionStreamerConfigFetcher) if err != nil { diff --git a/arbos/programs/native.go b/arbos/programs/native.go index 7a6c16d866..17068371b1 100644 --- a/arbos/programs/native.go +++ b/arbos/programs/native.go @@ -253,6 +253,10 @@ func init() { } } +func ResizeWasmLruCache(size uint32) { + C.stylus_cache_lru_resize(u32(size)) +} + func (value bytes32) toHash() common.Hash { hash := common.Hash{} for index, b := range value.bytes { diff --git a/execution/gethexec/blockchain.go b/execution/gethexec/blockchain.go index 2a20c3da26..1d5060ca8a 100644 --- a/execution/gethexec/blockchain.go +++ b/execution/gethexec/blockchain.go @@ -37,6 +37,7 @@ type CachingConfig struct { SnapshotRestoreGasLimit uint64 `koanf:"snapshot-restore-gas-limit"` MaxNumberOfBlocksToSkipStateSaving uint32 `koanf:"max-number-of-blocks-to-skip-state-saving"` MaxAmountOfGasToSkipStateSaving uint64 `koanf:"max-amount-of-gas-to-skip-state-saving"` + StylusLRUCache uint32 `koanf:"stylus-lru-cache"` } func CachingConfigAddOptions(prefix string, f *flag.FlagSet) { @@ -51,6 +52,7 @@ func CachingConfigAddOptions(prefix string, f *flag.FlagSet) { f.Uint64(prefix+".snapshot-restore-gas-limit", DefaultCachingConfig.SnapshotRestoreGasLimit, "maximum gas rolled back to recover snapshot") f.Uint32(prefix+".max-number-of-blocks-to-skip-state-saving", DefaultCachingConfig.MaxNumberOfBlocksToSkipStateSaving, "maximum number of blocks to skip state saving to persistent storage (archive node only) -- warning: this option seems to cause issues") f.Uint64(prefix+".max-amount-of-gas-to-skip-state-saving", DefaultCachingConfig.MaxAmountOfGasToSkipStateSaving, "maximum amount of gas in blocks to skip saving state to Persistent storage (archive node only) -- warning: this option seems to cause issues") + f.Uint32(prefix+".stylus-lru-cache", DefaultCachingConfig.StylusLRUCache, "initialized stylus programs to keep in LRU cache") } var DefaultCachingConfig = CachingConfig{ @@ -65,6 +67,22 @@ var DefaultCachingConfig = CachingConfig{ SnapshotRestoreGasLimit: 300_000_000_000, MaxNumberOfBlocksToSkipStateSaving: 0, MaxAmountOfGasToSkipStateSaving: 0, + StylusLRUCache: 256, +} + +var TestCachingConfig = CachingConfig{ + Archive: false, + BlockCount: 128, + BlockAge: 30 * time.Minute, + TrieTimeLimit: time.Hour, + TrieDirtyCache: 1024, + TrieCleanCache: 600, + SnapshotCache: 400, + DatabaseCache: 2048, + SnapshotRestoreGasLimit: 300_000_000_000, + MaxNumberOfBlocksToSkipStateSaving: 0, + MaxAmountOfGasToSkipStateSaving: 0, + StylusLRUCache: 0, } // TODO remove stack from parameters as it is no longer needed here diff --git a/execution/gethexec/executionengine.go b/execution/gethexec/executionengine.go index 38569f44ab..b3ebe80f37 100644 --- a/execution/gethexec/executionengine.go +++ b/execution/gethexec/executionengine.go @@ -1,6 +1,9 @@ // Copyright 2022-2024, Offchain Labs, Inc. // For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE +//go:build !wasm +// +build !wasm + package gethexec /* @@ -28,6 +31,7 @@ import ( "github.com/offchainlabs/nitro/arbos/arbosState" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbos/l1pricing" + "github.com/offchainlabs/nitro/arbos/programs" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/execution" "github.com/offchainlabs/nitro/util/arbmath" @@ -72,6 +76,12 @@ func NewExecutionEngine(bc *core.BlockChain) (*ExecutionEngine, error) { }, nil } +func (n *ExecutionEngine) Initialize(rustCacheSize uint32) { + if rustCacheSize != 0 { + programs.ResizeWasmLruCache(rustCacheSize) + } +} + func (s *ExecutionEngine) SetRecorder(recorder *BlockRecorder) { if s.Started() { panic("trying to set recorder after start") diff --git a/execution/gethexec/node.go b/execution/gethexec/node.go index ae76b88530..b7fe1c6e14 100644 --- a/execution/gethexec/node.go +++ b/execution/gethexec/node.go @@ -107,6 +107,7 @@ var ConfigDefault = Config{ func ConfigDefaultNonSequencerTest() *Config { config := ConfigDefault + config.Caching = TestCachingConfig config.ParentChainReader = headerreader.TestConfig config.Sequencer.Enable = false config.Forwarder = DefaultTestForwarderConfig @@ -119,6 +120,7 @@ func ConfigDefaultNonSequencerTest() *Config { func ConfigDefaultTest() *Config { config := ConfigDefault + config.Caching = TestCachingConfig config.Sequencer = TestSequencerConfig config.ParentChainReader = headerreader.TestConfig config.ForwardingTarget = "null" @@ -280,6 +282,7 @@ func (n *ExecutionNode) GetL1GasPriceEstimate() (uint64, error) { } func (n *ExecutionNode) Initialize(ctx context.Context) error { + n.ExecEngine.Initialize(n.ConfigFetcher().Caching.StylusLRUCache) n.ArbInterface.Initialize(n) err := n.Backend.Start() if err != nil { diff --git a/system_tests/recreatestate_rpc_test.go b/system_tests/recreatestate_rpc_test.go index 777ed17961..bf321808de 100644 --- a/system_tests/recreatestate_rpc_test.go +++ b/system_tests/recreatestate_rpc_test.go @@ -449,7 +449,7 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig } func TestSkippingSavingStateAndRecreatingAfterRestart(t *testing.T) { - cacheConfig := gethexec.DefaultCachingConfig + cacheConfig := gethexec.TestCachingConfig cacheConfig.Archive = true cacheConfig.SnapshotCache = 0 // disable snapshots cacheConfig.BlockAge = 0 // use only Caching.BlockCount to keep only last N blocks in dirties cache, no matter how new they are diff --git a/system_tests/staterecovery_test.go b/system_tests/staterecovery_test.go index 632e748da8..02c2623cfa 100644 --- a/system_tests/staterecovery_test.go +++ b/system_tests/staterecovery_test.go @@ -52,7 +52,7 @@ func TestRectreateMissingStates(t *testing.T) { chainDb, err := stack.OpenDatabase("l2chaindata", 0, 0, "l2chaindata/", false) Require(t, err) defer chainDb.Close() - cacheConfig := gethexec.DefaultCacheConfigFor(stack, &gethexec.DefaultCachingConfig) + cacheConfig := gethexec.DefaultCacheConfigFor(stack, &gethexec.TestCachingConfig) bc, err := gethexec.GetBlockChain(chainDb, cacheConfig, builder.chainConfig, builder.execConfig.TxLookupLimit) Require(t, err) err = staterecovery.RecreateMissingStates(chainDb, bc, cacheConfig, 1) From ced4a07273a2de581bc57580468b2fc58e8922b5 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Fri, 17 May 2024 16:04:11 -0600 Subject: [PATCH 37/75] add tags when creating wasm-wrapped database --- cmd/nitro/init.go | 4 ++-- system_tests/common_test.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd/nitro/init.go b/cmd/nitro/init.go index c52c87732c..0b36fcfdaf 100644 --- a/cmd/nitro/init.go +++ b/cmd/nitro/init.go @@ -186,7 +186,7 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo if err != nil { return nil, nil, err } - chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmDb) + chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmDb, 1) err = pruning.PruneChainDb(ctx, chainDb, stack, &config.Init, cacheConfig, l1Client, rollupAddrs, config.Node.ValidatorRequired()) if err != nil { return chainDb, nil, fmt.Errorf("error pruning: %w", err) @@ -243,7 +243,7 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo if err != nil { return nil, nil, err } - chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmDb) + chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmDb, 1) if config.Init.ImportFile != "" { initDataReader, err = statetransfer.NewJsonInitDataReader(config.Init.ImportFile) diff --git a/system_tests/common_test.go b/system_tests/common_test.go index f6bfde2108..edc16ffec4 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -777,7 +777,7 @@ func createL2BlockChainWithStackConfig( Require(t, err) wasmData, err := stack.OpenDatabase("wasm", 0, 0, "wasm/", false) Require(t, err) - chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmData) + chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmData, 0) arbDb, err := stack.OpenDatabase("arbitrumdata", 0, 0, "arbitrumdata/", false) Require(t, err) @@ -984,7 +984,7 @@ func Create2ndNodeWithConfig( Require(t, err) wasmData, err := l2stack.OpenDatabase("wasm", 0, 0, "wasm/", false) Require(t, err) - l2chainDb := rawdb.WrapDatabaseWithWasm(l2chainData, wasmData) + l2chainDb := rawdb.WrapDatabaseWithWasm(l2chainData, wasmData, 0) l2arbDb, err := l2stack.OpenDatabase("arbitrumdata", 0, 0, "arbitrumdata/", false) Require(t, err) From 0f30f9f4e6cbd05cc76c6710cefbb24929b75eb9 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Fri, 17 May 2024 16:05:41 -0600 Subject: [PATCH 38/75] arbitrator: add and use long_term tag --- arbitrator/stylus/src/cache.rs | 63 +++++++++++++++++++-------------- arbitrator/stylus/src/lib.rs | 16 +++++---- arbitrator/stylus/src/native.rs | 9 ++--- 3 files changed, 52 insertions(+), 36 deletions(-) diff --git a/arbitrator/stylus/src/cache.rs b/arbitrator/stylus/src/cache.rs index 6a9e677be5..3a15bc5d6a 100644 --- a/arbitrator/stylus/src/cache.rs +++ b/arbitrator/stylus/src/cache.rs @@ -21,7 +21,7 @@ macro_rules! cache { } pub struct InitCache { - arbos: HashMap, + long_term: HashMap, lru: LruCache, } @@ -59,9 +59,14 @@ impl CacheItem { } impl InitCache { + // current implementation only has one tag that stores to the long_term + // future implementations might have more, but 0 is a reserved tag + // that will never modify long_term state + const ARBOS_TAG: u32 = 1; + fn new(size: usize) -> Self { Self { - arbos: HashMap::new(), + long_term: HashMap::new(), lru: LruCache::new(NonZeroUsize::new(size).unwrap()), } } @@ -76,7 +81,7 @@ impl InitCache { let key = CacheKey::new(module_hash, version, debug); // See if the item is in the long term cache - if let Some(item) = cache.arbos.get(&key) { + if let Some(item) = cache.long_term.get(&key) { return Some(item.data()); } @@ -88,18 +93,27 @@ impl InitCache { } /// Inserts an item into the long term cache, cloning from the LRU cache if able. + /// If long_term_tag is 0 will only insert to LRU pub fn insert( module_hash: Bytes32, module: &[u8], version: u16, + long_term_tag: u32, debug: bool, ) -> Result<(Module, Store)> { let key = CacheKey::new(module_hash, version, debug); // if in LRU, add to ArbOS let mut cache = cache!(); + if let Some(item) = cache.long_term.get(&key) { + return Ok(item.data()) + } if let Some(item) = cache.lru.peek(&key).cloned() { - cache.arbos.insert(key, item.clone()); + if long_term_tag == Self::ARBOS_TAG { + cache.long_term.insert(key, item.clone()); + } else { + cache.lru.promote(&key) + } return Ok(item.data()); } drop(cache); @@ -109,37 +123,34 @@ impl InitCache { let item = CacheItem::new(module, engine); let data = item.data(); - cache!().arbos.insert(key, item); + let mut cache = cache!(); + if long_term_tag != Self::ARBOS_TAG { + cache.lru.put(key, item); + } else { + cache.long_term.insert(key, item); + } Ok(data) } - /// Inserts an item into the short-lived LRU cache. - pub fn insert_lru( - module_hash: Bytes32, - module: &[u8], - version: u16, - debug: bool, - ) -> Result<(Module, Store)> { - let engine = CompileConfig::version(version, debug).engine(); - let module = unsafe { Module::deserialize_unchecked(&engine, module)? }; - - let key = CacheKey::new(module_hash, version, debug); - let item = CacheItem::new(module, engine); - cache!().lru.put(key, item.clone()); - Ok(item.data()) - } - /// Evicts an item in the long-term cache. - pub fn evict(module_hash: Bytes32, version: u16, debug: bool) { + pub fn evict(module_hash: Bytes32, version: u16, long_term_tag: u32, debug: bool) { + if long_term_tag != Self::ARBOS_TAG { + return + } let key = CacheKey::new(module_hash, version, debug); - cache!().arbos.remove(&key); + let mut cache = cache!(); + if let Some(item) = cache.long_term.remove(&key) { + cache.lru.put(key, item); + } } - /// Modifies the cache for reorg, dropping the long-term cache. - pub fn reorg(_block: u64) { + pub fn clear_long_term(long_term_tag: u32) { + if long_term_tag != Self::ARBOS_TAG { + return + } let mut cache = cache!(); let cache = &mut *cache; - for (key, item) in cache.arbos.drain() { + for (key, item) in cache.long_term.drain() { cache.lru.put(key, item); // not all will fit, just a heuristic } } diff --git a/arbitrator/stylus/src/lib.rs b/arbitrator/stylus/src/lib.rs index 9ccc9829ca..6133b6ac34 100644 --- a/arbitrator/stylus/src/lib.rs +++ b/arbitrator/stylus/src/lib.rs @@ -183,6 +183,7 @@ pub unsafe extern "C" fn stylus_call( debug_chain: bool, output: *mut RustBytes, gas: *mut u64, + long_term_tag: u32, ) -> UserOutcomeKind { let module = module.slice(); let calldata = calldata.slice().to_vec(); @@ -193,7 +194,7 @@ pub unsafe extern "C" fn stylus_call( // Safety: module came from compile_user_wasm and we've paid for memory expansion let instance = unsafe { - NativeInstance::deserialize_cached(module, config.version, evm_api, evm_data, debug_chain) + NativeInstance::deserialize_cached(module, config.version, evm_api, evm_data, long_term_tag, debug_chain) }; let mut instance = match instance { Ok(instance) => instance, @@ -223,28 +224,31 @@ pub extern "C" fn stylus_cache_lru_resize(size: u32) { /// # Safety /// /// `module` must represent a valid module produced from `stylus_activate`. +/// arbos_tag: a tag for arbos cache. 0 won't affect real caching +/// currently only if tag==1 caching will be affected #[no_mangle] pub unsafe extern "C" fn stylus_cache_module( module: GoSliceData, module_hash: Bytes32, version: u16, + arbos_tag: u32, debug: bool, ) { - if let Err(error) = InitCache::insert(module_hash, module.slice(), version, debug) { + if let Err(error) = InitCache::insert(module_hash, module.slice(), version, arbos_tag, debug) { panic!("tried to cache invalid asm!: {error}"); } } /// Evicts an activated user program from the init cache. #[no_mangle] -pub extern "C" fn stylus_evict_module(module_hash: Bytes32, version: u16, debug: bool) { - InitCache::evict(module_hash, version, debug); +pub extern "C" fn stylus_evict_module(module_hash: Bytes32, version: u16, arbos_tag: u32, debug: bool) { + InitCache::evict(module_hash, version, arbos_tag, debug); } /// Reorgs the init cache. This will likely never happen. #[no_mangle] -pub extern "C" fn stylus_reorg_vm(block: u64) { - InitCache::reorg(block); +pub extern "C" fn stylus_reorg_vm(_block: u64, arbos_tag: u32) { + InitCache::clear_long_term(arbos_tag); } /// Frees the vector. Does nothing when the vector is null. diff --git a/arbitrator/stylus/src/native.rs b/arbitrator/stylus/src/native.rs index 6d5e4cd2e9..38155818c0 100644 --- a/arbitrator/stylus/src/native.rs +++ b/arbitrator/stylus/src/native.rs @@ -113,6 +113,7 @@ impl> NativeInstance { version: u16, evm: E, evm_data: EvmData, + mut long_term_tag: u32, debug: bool, ) -> Result { let compile = CompileConfig::version(version, debug); @@ -122,10 +123,10 @@ impl> NativeInstance { if let Some((module, store)) = InitCache::get(module_hash, version, debug) { return Self::from_module(module, store, env); } - let (module, store) = match env.evm_data.cached { - true => InitCache::insert(module_hash, module, version, debug)?, - false => InitCache::insert_lru(module_hash, module, version, debug)?, - }; + if !env.evm_data.cached { + long_term_tag = 0; + } + let (module, store) = InitCache::insert(module_hash, module, version, long_term_tag, debug)?; Self::from_module(module, store, env) } From 72f8b9da72a8fb6821e06b970ff85573d565f3f2 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Fri, 17 May 2024 16:08:59 -0600 Subject: [PATCH 39/75] nitro: use tag for stylus calls --- arbos/programs/native.go | 20 ++++++++++++-------- arbos/programs/programs.go | 7 ++++++- arbos/programs/wasm.go | 1 + arbos/tx_processor.go | 1 + execution/gethexec/executionengine.go | 3 ++- 5 files changed, 22 insertions(+), 10 deletions(-) diff --git a/arbos/programs/native.go b/arbos/programs/native.go index 17068371b1..f24dcac64d 100644 --- a/arbos/programs/native.go +++ b/arbos/programs/native.go @@ -172,6 +172,7 @@ func callProgram( evmData *EvmData, stylusParams *ProgParams, memoryModel *MemoryModel, + arbos_tag uint32, ) ([]byte, error) { db := interpreter.Evm().StateDB debug := stylusParams.DebugMode @@ -198,6 +199,7 @@ func callProgram( cbool(debug), output, (*u64)(&scope.Contract.Gas), + u32(arbos_tag), )) depth := interpreter.Depth() @@ -228,8 +230,9 @@ func cacheProgram(db vm.StateDB, module common.Hash, program Program, params *St if err != nil { panic("unable to recreate wasm") } - state.CacheWasmRust(asm, module, program.version, debug) - db.RecordCacheWasm(state.CacheWasm{ModuleHash: module, Version: program.version, Debug: debug}) + tag := db.Database().WasmCacheTag() + state.CacheWasmRust(asm, module, program.version, tag, debug) + db.RecordCacheWasm(state.CacheWasm{ModuleHash: module, Version: program.version, Tag: tag, Debug: debug}) } } @@ -237,19 +240,20 @@ func cacheProgram(db vm.StateDB, module common.Hash, program Program, params *St // For gas estimation and eth_call, we ignore permanent updates and rely on Rust's LRU. func evictProgram(db vm.StateDB, module common.Hash, version uint16, debug bool, runMode core.MessageRunMode, forever bool) { if runMode == core.MessageCommitMode { - state.EvictWasmRust(module, version, debug) + tag := db.Database().WasmCacheTag() + state.EvictWasmRust(module, version, tag, debug) if !forever { - db.RecordEvictWasm(state.EvictWasm{ModuleHash: module, Version: version, Debug: debug}) + db.RecordEvictWasm(state.EvictWasm{ModuleHash: module, Version: version, Tag: tag, Debug: debug}) } } } func init() { - state.CacheWasmRust = func(asm []byte, moduleHash common.Hash, version uint16, debug bool) { - C.stylus_cache_module(goSlice(asm), hashToBytes32(moduleHash), u16(version), cbool(debug)) + state.CacheWasmRust = func(asm []byte, moduleHash common.Hash, version uint16, tag uint32, debug bool) { + C.stylus_cache_module(goSlice(asm), hashToBytes32(moduleHash), u16(version), u32(tag), cbool(debug)) } - state.EvictWasmRust = func(moduleHash common.Hash, version uint16, debug bool) { - C.stylus_evict_module(hashToBytes32(moduleHash), u16(version), cbool(debug)) + state.EvictWasmRust = func(moduleHash common.Hash, version uint16, tag uint32, debug bool) { + C.stylus_evict_module(hashToBytes32(moduleHash), u16(version), u32(tag), cbool(debug)) } } diff --git a/arbos/programs/programs.go b/arbos/programs/programs.go index 9d51172986..f27d5834bf 100644 --- a/arbos/programs/programs.go +++ b/arbos/programs/programs.go @@ -166,6 +166,7 @@ func (p Programs) CallProgram( tracingInfo *util.TracingInfo, calldata []byte, reentrant bool, + runmode core.MessageRunMode, ) ([]byte, error) { evm := interpreter.Evm() contract := scope.Contract @@ -237,7 +238,11 @@ func (p Programs) CallProgram( if contract.CodeAddr != nil { address = *contract.CodeAddr } - return callProgram(address, moduleHash, localAsm, scope, interpreter, tracingInfo, calldata, evmData, goParams, model) + var arbos_tag uint32 + if runmode == core.MessageCommitMode { + arbos_tag = statedb.Database().WasmCacheTag() + } + return callProgram(address, moduleHash, localAsm, scope, interpreter, tracingInfo, calldata, evmData, goParams, model, arbos_tag) } func getWasm(statedb vm.StateDB, program common.Address) ([]byte, error) { diff --git a/arbos/programs/wasm.go b/arbos/programs/wasm.go index 95f30e83b6..4bc978a2b6 100644 --- a/arbos/programs/wasm.go +++ b/arbos/programs/wasm.go @@ -143,6 +143,7 @@ func callProgram( evmData *EvmData, params *ProgParams, memoryModel *MemoryModel, + _arbos_tag uint32, ) ([]byte, error) { reqHandler := newApiClosures(interpreter, tracingInfo, scope, memoryModel) gasLeft, retData, err := CallProgramLoop(moduleHash, calldata, scope.Contract.Gas, evmData, params, reqHandler) diff --git a/arbos/tx_processor.go b/arbos/tx_processor.go index b5fb64f695..65762fd2d1 100644 --- a/arbos/tx_processor.go +++ b/arbos/tx_processor.go @@ -127,6 +127,7 @@ func (p *TxProcessor) ExecuteWASM(scope *vm.ScopeContext, input []byte, interpre tracingInfo, input, reentrant, + p.RunMode(), ) } diff --git a/execution/gethexec/executionengine.go b/execution/gethexec/executionengine.go index b3ebe80f37..00218c9291 100644 --- a/execution/gethexec/executionengine.go +++ b/execution/gethexec/executionengine.go @@ -147,8 +147,9 @@ func (s *ExecutionEngine) Reorg(count arbutil.MessageIndex, newMessages []arbost return nil, nil } + tag := s.bc.StateCache().WasmCacheTag() // reorg Rust-side VM state - C.stylus_reorg_vm(C.uint64_t(blockNum)) + C.stylus_reorg_vm(C.uint64_t(blockNum), C.uint32_t(tag)) err := s.bc.ReorgToOldBlock(targetBlock) if err != nil { From cd03bf07ed3d7065d8b5a243ac4562f62370774f Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Fri, 17 May 2024 16:09:20 -0600 Subject: [PATCH 40/75] geth: udate pin to support arbos tags --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index 8048ac4bed..940fbe020e 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 8048ac4bed2eda18284e3c022ea5ee4cce771134 +Subproject commit 940fbe020e03707365da09de939058944d9e1f5d From 1ed090dcda3ac03c0c46321cb4a309b59dcb87c8 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Fri, 17 May 2024 17:23:44 -0600 Subject: [PATCH 41/75] cargo fmt --- arbitrator/stylus/src/cache.rs | 12 +++++++----- arbitrator/stylus/src/lib.rs | 16 ++++++++++++++-- arbitrator/stylus/src/native.rs | 3 ++- 3 files changed, 23 insertions(+), 8 deletions(-) diff --git a/arbitrator/stylus/src/cache.rs b/arbitrator/stylus/src/cache.rs index 3a15bc5d6a..06739f2219 100644 --- a/arbitrator/stylus/src/cache.rs +++ b/arbitrator/stylus/src/cache.rs @@ -72,7 +72,9 @@ impl InitCache { } pub fn set_lru_size(size: u32) { - cache!().lru.resize(NonZeroUsize::new(size.try_into().unwrap()).unwrap()) + cache!() + .lru + .resize(NonZeroUsize::new(size.try_into().unwrap()).unwrap()) } /// Retrieves a cached value, updating items as necessary. @@ -106,7 +108,7 @@ impl InitCache { // if in LRU, add to ArbOS let mut cache = cache!(); if let Some(item) = cache.long_term.get(&key) { - return Ok(item.data()) + return Ok(item.data()); } if let Some(item) = cache.lru.peek(&key).cloned() { if long_term_tag == Self::ARBOS_TAG { @@ -135,7 +137,7 @@ impl InitCache { /// Evicts an item in the long-term cache. pub fn evict(module_hash: Bytes32, version: u16, long_term_tag: u32, debug: bool) { if long_term_tag != Self::ARBOS_TAG { - return + return; } let key = CacheKey::new(module_hash, version, debug); let mut cache = cache!(); @@ -146,8 +148,8 @@ impl InitCache { pub fn clear_long_term(long_term_tag: u32) { if long_term_tag != Self::ARBOS_TAG { - return - } + return; + } let mut cache = cache!(); let cache = &mut *cache; for (key, item) in cache.long_term.drain() { diff --git a/arbitrator/stylus/src/lib.rs b/arbitrator/stylus/src/lib.rs index 6133b6ac34..3c53359f8b 100644 --- a/arbitrator/stylus/src/lib.rs +++ b/arbitrator/stylus/src/lib.rs @@ -194,7 +194,14 @@ pub unsafe extern "C" fn stylus_call( // Safety: module came from compile_user_wasm and we've paid for memory expansion let instance = unsafe { - NativeInstance::deserialize_cached(module, config.version, evm_api, evm_data, long_term_tag, debug_chain) + NativeInstance::deserialize_cached( + module, + config.version, + evm_api, + evm_data, + long_term_tag, + debug_chain, + ) }; let mut instance = match instance { Ok(instance) => instance, @@ -241,7 +248,12 @@ pub unsafe extern "C" fn stylus_cache_module( /// Evicts an activated user program from the init cache. #[no_mangle] -pub extern "C" fn stylus_evict_module(module_hash: Bytes32, version: u16, arbos_tag: u32, debug: bool) { +pub extern "C" fn stylus_evict_module( + module_hash: Bytes32, + version: u16, + arbos_tag: u32, + debug: bool, +) { InitCache::evict(module_hash, version, arbos_tag, debug); } diff --git a/arbitrator/stylus/src/native.rs b/arbitrator/stylus/src/native.rs index 38155818c0..2858d59fdc 100644 --- a/arbitrator/stylus/src/native.rs +++ b/arbitrator/stylus/src/native.rs @@ -126,7 +126,8 @@ impl> NativeInstance { if !env.evm_data.cached { long_term_tag = 0; } - let (module, store) = InitCache::insert(module_hash, module, version, long_term_tag, debug)?; + let (module, store) = + InitCache::insert(module_hash, module, version, long_term_tag, debug)?; Self::from_module(module, store, env) } From 019581e7e733139c331751ae6485ffbd153f8dd5 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Mon, 20 May 2024 14:18:26 -0600 Subject: [PATCH 42/75] allowed-wasm-module-roots: accept paths as well --- cmd/nitro/nitro.go | 17 ++++++++++++++++- validator/valnode/valnode.go | 2 +- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 9280c3af02..473df21811 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -6,6 +6,7 @@ package main import ( "context" "crypto/ecdsa" + "encoding/hex" "errors" "fmt" "io" @@ -452,7 +453,21 @@ func mainImpl() int { if len(allowedWasmModuleRoots) > 0 { moduleRootMatched := false for _, root := range allowedWasmModuleRoots { - if common.HexToHash(root) == moduleRoot { + bytes, err := hex.DecodeString(root) + if err == nil { + if common.HexToHash(root) == common.BytesToHash(bytes) { + moduleRootMatched = true + break + } + continue + } + locator, locatorErr := server_common.NewMachineLocator(root) + if err != nil { + log.Warn("allowed-wasm-module-roots: value not a hex nor valid path:", "value", root, "locatorErr", locatorErr, "decodeErr", err) + continue + } + path := locator.GetMachinePath(moduleRoot) + if _, err := os.Stat(path); err == nil { moduleRootMatched = true break } diff --git a/validator/valnode/valnode.go b/validator/valnode/valnode.go index 93a5b37238..972e11189d 100644 --- a/validator/valnode/valnode.go +++ b/validator/valnode/valnode.go @@ -25,7 +25,7 @@ type WasmConfig struct { func WasmConfigAddOptions(prefix string, f *pflag.FlagSet) { f.String(prefix+".root-path", DefaultWasmConfig.RootPath, "path to machine folders, each containing wasm files (machine.wavm.br, replay.wasm)") f.Bool(prefix+".enable-wasmroots-check", DefaultWasmConfig.EnableWasmrootsCheck, "enable check for compatibility of on-chain WASM module root with node") - f.StringSlice(prefix+".allowed-wasm-module-roots", DefaultWasmConfig.AllowedWasmModuleRoots, "list of WASM module roots to check if the on-chain WASM module root belongs to on node startup") + f.StringSlice(prefix+".allowed-wasm-module-roots", DefaultWasmConfig.AllowedWasmModuleRoots, "list of WASM module roots or mahcine base paths to match against on-chain WasmModuleRoot") } var DefaultWasmConfig = WasmConfig{ From 458669ad9a2e003c3c8ec920b24798a378b080c2 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Mon, 20 May 2024 17:59:45 -0600 Subject: [PATCH 43/75] dockerfile: sort split-validator support nitro has legacy machines and config to check for these wasmModuleRots nitro-validator has split-validation on nitro-dev is based of validator and has latest as well --- Dockerfile | 37 +++++++++++++++++++------------------ scripts/split-val-entry.sh | 2 +- 2 files changed, 20 insertions(+), 19 deletions(-) diff --git a/Dockerfile b/Dockerfile index 5c56b60cc0..f7e26ec084 100644 --- a/Dockerfile +++ b/Dockerfile @@ -203,6 +203,7 @@ COPY ./scripts/download-machine.sh . #RUN ./download-machine.sh consensus-v11 0xf4389b835497a910d7ba3ebfb77aa93da985634f3c052de1290360635be40c4a #RUN ./download-machine.sh consensus-v11.1 0x68e4fe5023f792d4ef584796c84d710303a5e12ea02d6e37e2b5e9c4332507c4 #RUN ./download-machine.sh consensus-v20 0x8b104a2e80ac6165dc58b9048de12f301d70b02a0ab51396c22b4b4b802a16a4 +RUN ./download-machine.sh consensus-v30-rc.1 0x8805d035d5fdb8bb4450f306d9ab82633e2b6316260529cdcaf1b3702afbd5d5 FROM golang:1.21-bookworm as node-builder WORKDIR /workspace @@ -268,11 +269,15 @@ USER user WORKDIR /home/user/ ENTRYPOINT [ "/usr/local/bin/nitro" ] +FROM offchainlabs/nitro-node:v2.3.4-rc.5-b4cc111 as nitro-legacy + FROM nitro-node-slim as nitro-node USER root COPY --from=prover-export /bin/jit /usr/local/bin/ COPY --from=node-builder /workspace/target/bin/daserver /usr/local/bin/ COPY --from=node-builder /workspace/target/bin/datool /usr/local/bin/ +COPY --from=nitro-legacy /home/user/target/machines /home/user/nitro-legacy/machines +RUN rm -rf /workspace/target/legacy-machines/latest RUN export DEBIAN_FRONTEND=noninteractive && \ apt-get update && \ apt-get install -y \ @@ -282,10 +287,23 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* /usr/share/doc/* /var/cache/ldconfig/aux-cache /usr/lib/python3.9/__pycache__/ /usr/lib/python3.9/*/__pycache__/ /var/log/* && \ nitro --version +ENTRYPOINT [ "/usr/local/bin/nitro" , "--validation.wasm.allowed-wasm-module-roots", "/home/user/nitro-legacy/machines,/workspace/machines"] USER user -FROM nitro-node as nitro-node-dev-base +FROM nitro-node as nitro-node-validator +USER root +COPY --from=nitro-legacy /usr/local/bin/nitro-val /home/user/nitro-legacy/bin/nitro-val +COPY --from=nitro-legacy /usr/local/bin/jit /home/user/nitro-legacy/bin/jit +RUN export DEBIAN_FRONTEND=noninteractive && \ + apt-get update && \ + apt-get install -y xxd netcat-traditional && \ + rm -rf /var/lib/apt/lists/* /usr/share/doc/* /var/cache/ldconfig/aux-cache /usr/lib/python3.9/__pycache__/ /usr/lib/python3.9/*/__pycache__/ /var/log/* +COPY scripts/split-val-entry.sh /usr/local/bin +ENTRYPOINT [ "/usr/local/bin/split-val-entry.sh" ] +USER user + +FROM nitro-node-validator as nitro-node-dev USER root # Copy in latest WASM module root RUN rm -f /home/user/target/machines/latest @@ -309,22 +327,5 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ USER user -FROM offchainlabs/nitro-node:v2.3.4-rc.5-b4cc111 as nitro-legacy - -FROM nitro-node-dev-base as nitro-node-dev -USER root - -RUN export DEBIAN_FRONTEND=noninteractive && \ - apt-get update && \ - apt-get install -y xxd netcat-traditional && \ - rm -rf /var/lib/apt/lists/* /usr/share/doc/* /var/cache/ldconfig/aux-cache /usr/lib/python3.9/__pycache__/ /usr/lib/python3.9/*/__pycache__/ /var/log/* -COPY scripts/split-val-entry.sh /usr/local/bin -COPY --from=nitro-legacy /home/user/target/machines /home/user/nitro-legacy/machines -RUN rm -rf /workspace/target/legacy-machines/latest -COPY --from=nitro-legacy /usr/local/bin/nitro-val /home/user/nitro-legacy/bin/nitro-val -COPY --from=nitro-legacy /usr/local/bin/jit /home/user/nitro-legacy/bin/jit -ENTRYPOINT [ "/usr/local/bin/split-val-entry.sh" ] -USER user - FROM nitro-node as nitro-node-default # Just to ensure nitro-node-dist is default diff --git a/scripts/split-val-entry.sh b/scripts/split-val-entry.sh index 6f56a8ec46..1f640f9763 100755 --- a/scripts/split-val-entry.sh +++ b/scripts/split-val-entry.sh @@ -16,4 +16,4 @@ for port in 52000 52001; do done done echo launching nitro-node -/usr/local/bin/nitro --node.block-validator.validation-server-configs-list='[{"jwtsecret":"/tmp/nitro-val.jwt","url":"http://127.0.0.10:52000"}, {"jwtsecret":"/tmp/nitro-val.jwt","url":"http://127.0.0.10:52001"}]' "$@" +/usr/local/bin/nitro --validation.wasm.allowed-wasm-module-roots /home/user/nitro-legacy/machines,/workspace/machines --node.block-validator.validation-server-configs-list='[{"jwtsecret":"/tmp/nitro-val.jwt","url":"http://127.0.0.10:52000"}, {"jwtsecret":"/tmp/nitro-val.jwt","url":"http://127.0.0.10:52001"}]' "$@" From b3693be5a21f45d5e9ec63c681068844995d3dbd Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Mon, 20 May 2024 18:41:22 -0600 Subject: [PATCH 44/75] log when choosing validator --- staker/block_validator.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/staker/block_validator.go b/staker/block_validator.go index e494b3da10..50ccac0471 100644 --- a/staker/block_validator.go +++ b/staker/block_validator.go @@ -1097,13 +1097,18 @@ func (v *BlockValidator) Initialize(ctx context.Context) error { for _, root := range moduleRoots { if v.redisValidator != nil && validator.SpawnerSupportsModule(v.redisValidator, root) { v.chosenValidator[root] = v.redisValidator + log.Info("validator chosen", "WasmMosuleRoot", root, "chosen", "redis") } else { for _, spawner := range v.execSpawners { if validator.SpawnerSupportsModule(spawner, root) { v.chosenValidator[root] = spawner + log.Info("validator chosen", "WasmMosuleRoot", root, "chosen", spawner.Name()) break } } + if v.chosenValidator[root] == nil { + log.Error("validator not found", "WasmMosuleRoot", root) + } } } return nil From c79b98d6bbb3b36a43e089e2fa622c676ae5c1b5 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Mon, 20 May 2024 20:30:54 -0600 Subject: [PATCH 45/75] fix moduleRoots condition --- cmd/nitro/nitro.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 473df21811..815257cf7a 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -462,7 +462,7 @@ func mainImpl() int { continue } locator, locatorErr := server_common.NewMachineLocator(root) - if err != nil { + if locatorErr != nil { log.Warn("allowed-wasm-module-roots: value not a hex nor valid path:", "value", root, "locatorErr", locatorErr, "decodeErr", err) continue } From dc7e874065523970eae4d4f6c1b20f991c2c228b Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Mon, 20 May 2024 20:41:22 -0600 Subject: [PATCH 46/75] Dockerfile: use consensus 30-rc.2 --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index f7e26ec084..e5718868fa 100644 --- a/Dockerfile +++ b/Dockerfile @@ -203,7 +203,7 @@ COPY ./scripts/download-machine.sh . #RUN ./download-machine.sh consensus-v11 0xf4389b835497a910d7ba3ebfb77aa93da985634f3c052de1290360635be40c4a #RUN ./download-machine.sh consensus-v11.1 0x68e4fe5023f792d4ef584796c84d710303a5e12ea02d6e37e2b5e9c4332507c4 #RUN ./download-machine.sh consensus-v20 0x8b104a2e80ac6165dc58b9048de12f301d70b02a0ab51396c22b4b4b802a16a4 -RUN ./download-machine.sh consensus-v30-rc.1 0x8805d035d5fdb8bb4450f306d9ab82633e2b6316260529cdcaf1b3702afbd5d5 +RUN ./download-machine.sh consensus-v30-rc.2 0xb0de9cb89e4d944ae6023a3b62276e54804c242fd8c4c2d8e6cc4450f5fa8b1b FROM golang:1.21-bookworm as node-builder WORKDIR /workspace From 6ebcf740c5705b925ac2edc162b7ca3af249c422 Mon Sep 17 00:00:00 2001 From: Emiliano Bonassi Date: Mon, 20 May 2024 22:26:41 -0700 Subject: [PATCH 47/75] feat(das): add support to configure http body limit --- cmd/daserver/daserver.go | 13 ++++++++----- cmd/genericconf/server.go | 2 ++ das/dasRpcServer.go | 9 ++++++--- das/rpc_test.go | 2 +- system_tests/common_test.go | 2 +- system_tests/das_test.go | 4 ++-- 6 files changed, 20 insertions(+), 12 deletions(-) diff --git a/cmd/daserver/daserver.go b/cmd/daserver/daserver.go index 8036487d26..48cc5546de 100644 --- a/cmd/daserver/daserver.go +++ b/cmd/daserver/daserver.go @@ -31,10 +31,11 @@ import ( ) type DAServerConfig struct { - EnableRPC bool `koanf:"enable-rpc"` - RPCAddr string `koanf:"rpc-addr"` - RPCPort uint64 `koanf:"rpc-port"` - RPCServerTimeouts genericconf.HTTPServerTimeoutConfig `koanf:"rpc-server-timeouts"` + EnableRPC bool `koanf:"enable-rpc"` + RPCAddr string `koanf:"rpc-addr"` + RPCPort uint64 `koanf:"rpc-port"` + RPCServerTimeouts genericconf.HTTPServerTimeoutConfig `koanf:"rpc-server-timeouts"` + RPCServerBodyLimit int `koanf:"rpc-server-body-limit"` EnableREST bool `koanf:"enable-rest"` RESTAddr string `koanf:"rest-addr"` @@ -58,6 +59,7 @@ var DefaultDAServerConfig = DAServerConfig{ RPCAddr: "localhost", RPCPort: 9876, RPCServerTimeouts: genericconf.HTTPServerTimeoutConfigDefault, + RPCServerBodyLimit: genericconf.HTTPServerBodyLimitDefault, EnableREST: false, RESTAddr: "localhost", RESTPort: 9877, @@ -88,6 +90,7 @@ func parseDAServer(args []string) (*DAServerConfig, error) { f.Bool("enable-rpc", DefaultDAServerConfig.EnableRPC, "enable the HTTP-RPC server listening on rpc-addr and rpc-port") f.String("rpc-addr", DefaultDAServerConfig.RPCAddr, "HTTP-RPC server listening interface") f.Uint64("rpc-port", DefaultDAServerConfig.RPCPort, "HTTP-RPC server listening port") + f.Int("rpc-server-body-limit", DefaultDAServerConfig.RPCServerBodyLimit, "HTTP-RPC server maximum request body size in bytes") genericconf.HTTPServerTimeoutConfigAddOptions("rpc-server-timeouts", f) f.Bool("enable-rest", DefaultDAServerConfig.EnableREST, "enable the REST server listening on rest-addr and rest-port") @@ -250,7 +253,7 @@ func startup() error { if serverConfig.EnableRPC { log.Info("Starting HTTP-RPC server", "addr", serverConfig.RPCAddr, "port", serverConfig.RPCPort, "revision", vcsRevision, "vcs.time", vcsTime) - rpcServer, err = das.StartDASRPCServer(ctx, serverConfig.RPCAddr, serverConfig.RPCPort, serverConfig.RPCServerTimeouts, daReader, daWriter, daHealthChecker) + rpcServer, err = das.StartDASRPCServer(ctx, serverConfig.RPCAddr, serverConfig.RPCPort, serverConfig.RPCServerTimeouts, serverConfig.RPCServerBodyLimit, daReader, daWriter, daHealthChecker) if err != nil { return err } diff --git a/cmd/genericconf/server.go b/cmd/genericconf/server.go index 7550791d6d..18f13dd204 100644 --- a/cmd/genericconf/server.go +++ b/cmd/genericconf/server.go @@ -48,6 +48,8 @@ var HTTPServerTimeoutConfigDefault = HTTPServerTimeoutConfig{ IdleTimeout: 120 * time.Second, } +var HTTPServerBodyLimitDefault = 0 // Use default from go-ethereum + func (c HTTPConfig) Apply(stackConf *node.Config) { stackConf.HTTPHost = c.Addr stackConf.HTTPPort = c.Port diff --git a/das/dasRpcServer.go b/das/dasRpcServer.go index 2f1fc1fd42..8bab8f0b6d 100644 --- a/das/dasRpcServer.go +++ b/das/dasRpcServer.go @@ -36,19 +36,22 @@ type DASRPCServer struct { daHealthChecker DataAvailabilityServiceHealthChecker } -func StartDASRPCServer(ctx context.Context, addr string, portNum uint64, rpcServerTimeouts genericconf.HTTPServerTimeoutConfig, daReader DataAvailabilityServiceReader, daWriter DataAvailabilityServiceWriter, daHealthChecker DataAvailabilityServiceHealthChecker) (*http.Server, error) { +func StartDASRPCServer(ctx context.Context, addr string, portNum uint64, rpcServerTimeouts genericconf.HTTPServerTimeoutConfig, RPCServerBodyLimit int, daReader DataAvailabilityServiceReader, daWriter DataAvailabilityServiceWriter, daHealthChecker DataAvailabilityServiceHealthChecker) (*http.Server, error) { listener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", addr, portNum)) if err != nil { return nil, err } - return StartDASRPCServerOnListener(ctx, listener, rpcServerTimeouts, daReader, daWriter, daHealthChecker) + return StartDASRPCServerOnListener(ctx, listener, rpcServerTimeouts, RPCServerBodyLimit, daReader, daWriter, daHealthChecker) } -func StartDASRPCServerOnListener(ctx context.Context, listener net.Listener, rpcServerTimeouts genericconf.HTTPServerTimeoutConfig, daReader DataAvailabilityServiceReader, daWriter DataAvailabilityServiceWriter, daHealthChecker DataAvailabilityServiceHealthChecker) (*http.Server, error) { +func StartDASRPCServerOnListener(ctx context.Context, listener net.Listener, rpcServerTimeouts genericconf.HTTPServerTimeoutConfig, RPCServerBodyLimit int, daReader DataAvailabilityServiceReader, daWriter DataAvailabilityServiceWriter, daHealthChecker DataAvailabilityServiceHealthChecker) (*http.Server, error) { if daWriter == nil { return nil, errors.New("No writer backend was configured for DAS RPC server. Has the BLS signing key been set up (--data-availability.key.key-dir or --data-availability.key.priv-key options)?") } rpcServer := rpc.NewServer() + if RPCServerBodyLimit > 0 { + rpcServer.SetHTTPBodyLimit(RPCServerBodyLimit) + } err := rpcServer.RegisterName("das", &DASRPCServer{ daReader: daReader, daWriter: daWriter, diff --git a/das/rpc_test.go b/das/rpc_test.go index 044ba597be..658592cc0b 100644 --- a/das/rpc_test.go +++ b/das/rpc_test.go @@ -55,7 +55,7 @@ func TestRPC(t *testing.T) { testhelpers.RequireImpl(t, err) localDas, err := NewSignAfterStoreDASWriterWithSeqInboxCaller(privKey, nil, storageService, "") testhelpers.RequireImpl(t, err) - dasServer, err := StartDASRPCServerOnListener(ctx, lis, genericconf.HTTPServerTimeoutConfigDefault, storageService, localDas, storageService) + dasServer, err := StartDASRPCServerOnListener(ctx, lis, genericconf.HTTPServerTimeoutConfigDefault, genericconf.HTTPServerBodyLimitDefault, storageService, localDas, storageService) defer func() { if err := dasServer.Shutdown(ctx); err != nil { panic(err) diff --git a/system_tests/common_test.go b/system_tests/common_test.go index f6bfde2108..04b91d6a18 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -1127,7 +1127,7 @@ func setupConfigWithDAS( Require(t, err) restLis, err := net.Listen("tcp", "localhost:0") Require(t, err) - _, err = das.StartDASRPCServerOnListener(ctx, rpcLis, genericconf.HTTPServerTimeoutConfigDefault, daReader, daWriter, daHealthChecker) + _, err = das.StartDASRPCServerOnListener(ctx, rpcLis, genericconf.HTTPServerTimeoutConfigDefault, genericconf.HTTPServerBodyLimitDefault, daReader, daWriter, daHealthChecker) Require(t, err) _, err = das.NewRestfulDasServerOnListener(restLis, genericconf.HTTPServerTimeoutConfigDefault, daReader, daHealthChecker) Require(t, err) diff --git a/system_tests/das_test.go b/system_tests/das_test.go index bb09cc9880..a5ce02d87b 100644 --- a/system_tests/das_test.go +++ b/system_tests/das_test.go @@ -74,7 +74,7 @@ func startLocalDASServer( Require(t, err) rpcLis, err := net.Listen("tcp", "localhost:0") Require(t, err) - rpcServer, err := das.StartDASRPCServerOnListener(ctx, rpcLis, genericconf.HTTPServerTimeoutConfigDefault, storageService, daWriter, storageService) + rpcServer, err := das.StartDASRPCServerOnListener(ctx, rpcLis, genericconf.HTTPServerTimeoutConfigDefault, genericconf.HTTPServerBodyLimitDefault, storageService, daWriter, storageService) Require(t, err) restLis, err := net.Listen("tcp", "localhost:0") Require(t, err) @@ -283,7 +283,7 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { defer lifecycleManager.StopAndWaitUntil(time.Second) rpcLis, err := net.Listen("tcp", "localhost:0") Require(t, err) - _, err = das.StartDASRPCServerOnListener(ctx, rpcLis, genericconf.HTTPServerTimeoutConfigDefault, daReader, daWriter, daHealthChecker) + _, err = das.StartDASRPCServerOnListener(ctx, rpcLis, genericconf.HTTPServerTimeoutConfigDefault, genericconf.HTTPServerBodyLimitDefault, daReader, daWriter, daHealthChecker) Require(t, err) restLis, err := net.Listen("tcp", "localhost:0") Require(t, err) From 71c9da7c8cf98933c8d306cf198fcbc269727917 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Tue, 21 May 2024 08:45:33 -0600 Subject: [PATCH 48/75] Dockerfile: fix path --- Dockerfile | 2 +- scripts/split-val-entry.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index e5718868fa..58976fc6e1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -287,7 +287,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* /usr/share/doc/* /var/cache/ldconfig/aux-cache /usr/lib/python3.9/__pycache__/ /usr/lib/python3.9/*/__pycache__/ /var/log/* && \ nitro --version -ENTRYPOINT [ "/usr/local/bin/nitro" , "--validation.wasm.allowed-wasm-module-roots", "/home/user/nitro-legacy/machines,/workspace/machines"] +ENTRYPOINT [ "/usr/local/bin/nitro" , "--validation.wasm.allowed-wasm-module-roots", "/home/user/nitro-legacy/machines,/home/user/target/machines"] USER user diff --git a/scripts/split-val-entry.sh b/scripts/split-val-entry.sh index 1f640f9763..8e1be0f6cc 100755 --- a/scripts/split-val-entry.sh +++ b/scripts/split-val-entry.sh @@ -16,4 +16,4 @@ for port in 52000 52001; do done done echo launching nitro-node -/usr/local/bin/nitro --validation.wasm.allowed-wasm-module-roots /home/user/nitro-legacy/machines,/workspace/machines --node.block-validator.validation-server-configs-list='[{"jwtsecret":"/tmp/nitro-val.jwt","url":"http://127.0.0.10:52000"}, {"jwtsecret":"/tmp/nitro-val.jwt","url":"http://127.0.0.10:52001"}]' "$@" +/usr/local/bin/nitro --validation.wasm.allowed-wasm-module-roots /home/user/nitro-legacy/machines,/home/user/target/machines --node.block-validator.validation-server-configs-list='[{"jwtsecret":"/tmp/nitro-val.jwt","url":"http://127.0.0.10:52000"}, {"jwtsecret":"/tmp/nitro-val.jwt","url":"http://127.0.0.10:52001"}]' "$@" From 153ffa76ca5226b2068d01acef643fee3f0e0fa9 Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Tue, 21 May 2024 13:32:07 -0300 Subject: [PATCH 49/75] add apt-get update to wasm-libs-builder --- Dockerfile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 5c56b60cc0..19a0b46ebd 100644 --- a/Dockerfile +++ b/Dockerfile @@ -41,7 +41,8 @@ RUN apt-get update && apt-get install -y curl build-essential=12.9 FROM wasm-base as wasm-libs-builder # clang / lld used by soft-float wasm -RUN apt-get install -y clang=1:14.0-55.7~deb12u1 lld=1:14.0-55.7~deb12u1 wabt +RUN apt-get update && \ + apt-get install -y clang=1:14.0-55.7~deb12u1 lld=1:14.0-55.7~deb12u1 wabt # pinned rust 1.75.0 RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain 1.75.0 --target x86_64-unknown-linux-gnu wasm32-unknown-unknown wasm32-wasi COPY ./Makefile ./ From de58296c1a41da7cf9b4fce82ab5687b4925bd47 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Tue, 21 May 2024 12:28:32 -0600 Subject: [PATCH 50/75] geth: update --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index 940fbe020e..b8d4ced531 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 940fbe020e03707365da09de939058944d9e1f5d +Subproject commit b8d4ced5316c987d095ef1fc3ecb5e8ae0df094d From e8685b359cd82771ec9b5c30900c32e4a142834a Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Tue, 21 May 2024 16:52:49 -0600 Subject: [PATCH 51/75] fix typo --- go-ethereum | 2 +- staker/block_validator.go | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go-ethereum b/go-ethereum index 8048ac4bed..b8d4ced531 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 8048ac4bed2eda18284e3c022ea5ee4cce771134 +Subproject commit b8d4ced5316c987d095ef1fc3ecb5e8ae0df094d diff --git a/staker/block_validator.go b/staker/block_validator.go index 50ccac0471..027ee78248 100644 --- a/staker/block_validator.go +++ b/staker/block_validator.go @@ -1097,17 +1097,17 @@ func (v *BlockValidator) Initialize(ctx context.Context) error { for _, root := range moduleRoots { if v.redisValidator != nil && validator.SpawnerSupportsModule(v.redisValidator, root) { v.chosenValidator[root] = v.redisValidator - log.Info("validator chosen", "WasmMosuleRoot", root, "chosen", "redis") + log.Info("validator chosen", "WasmModuleRoot", root, "chosen", "redis") } else { for _, spawner := range v.execSpawners { if validator.SpawnerSupportsModule(spawner, root) { v.chosenValidator[root] = spawner - log.Info("validator chosen", "WasmMosuleRoot", root, "chosen", spawner.Name()) + log.Info("validator chosen", "WasmModuleRoot", root, "chosen", spawner.Name()) break } } if v.chosenValidator[root] == nil { - log.Error("validator not found", "WasmMosuleRoot", root) + log.Error("validator not found", "WasmModuleRoot", root) } } } From aaf4d1c8ce1baa12d14b3becaf51510fb687d654 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 21 May 2024 21:03:29 -0500 Subject: [PATCH 52/75] Fix off-by-one in data poster nonce check --- arbnode/dataposter/data_poster.go | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index fb35ac3c8d..34ca9e1483 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -857,24 +857,23 @@ func (p *DataPoster) sendTx(ctx context.Context, prevTx *storage.QueuedTransacti return fmt.Errorf("couldn't get preceding tx in DataPoster to check if should send tx with nonce %d: %w", newTx.FullTx.Nonce(), err) } if precedingTx != nil { // precedingTx == nil -> the actual preceding tx was already confirmed - var latestBlockNumber, prevBlockNumber, reorgResistantNonce uint64 if precedingTx.FullTx.Type() != newTx.FullTx.Type() || !precedingTx.Sent { - latestBlockNumber, err = p.client.BlockNumber(ctx) + latestBlockNumber, err := p.client.BlockNumber(ctx) if err != nil { return fmt.Errorf("couldn't get block number in DataPoster to check if should send tx with nonce %d: %w", newTx.FullTx.Nonce(), err) } - prevBlockNumber = arbmath.SaturatingUSub(latestBlockNumber, 1) - reorgResistantNonce, err = p.client.NonceAt(ctx, p.Sender(), new(big.Int).SetUint64(prevBlockNumber)) + prevBlockNumber := arbmath.SaturatingUSub(latestBlockNumber, 1) + reorgResistantTxCount, err := p.client.NonceAt(ctx, p.Sender(), new(big.Int).SetUint64(prevBlockNumber)) if err != nil { return fmt.Errorf("couldn't determine reorg resistant nonce in DataPoster to check if should send tx with nonce %d: %w", newTx.FullTx.Nonce(), err) } - if precedingTx.FullTx.Nonce() > reorgResistantNonce { - log.Info("DataPoster is avoiding creating a mempool nonce gap (the tx remains queued and will be retried)", "nonce", newTx.FullTx.Nonce(), "prevType", precedingTx.FullTx.Type(), "type", newTx.FullTx.Type(), "prevSent", precedingTx.Sent) + if newTx.FullTx.Nonce() > reorgResistantTxCount { + log.Info("DataPoster is avoiding creating a mempool nonce gap (the tx remains queued and will be retried)", "nonce", newTx.FullTx.Nonce(), "prevType", precedingTx.FullTx.Type(), "type", newTx.FullTx.Type(), "prevSent", precedingTx.Sent, "latestBlockNumber", latestBlockNumber, "prevBlockNumber", prevBlockNumber, "reorgResistantTxCount", reorgResistantTxCount) return nil } } else { - log.Info("DataPoster will send previously unsent batch tx", "nonce", newTx.FullTx.Nonce(), "prevType", precedingTx.FullTx.Type(), "type", newTx.FullTx.Type(), "prevSent", precedingTx.Sent, "latestBlockNumber", latestBlockNumber, "prevBlockNumber", prevBlockNumber, "reorgResistantNonce", reorgResistantNonce) + log.Info("DataPoster will send previously unsent batch tx", "nonce", newTx.FullTx.Nonce(), "prevType", precedingTx.FullTx.Type(), "type", newTx.FullTx.Type(), "prevSent", precedingTx.Sent) } } } From 345e828b430efff7b66d401abe21759cc0af3abc Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 21 May 2024 21:41:53 -0500 Subject: [PATCH 53/75] Always log when sending previously unsent tx --- arbnode/dataposter/data_poster.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 34ca9e1483..399bc19dbd 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -857,13 +857,14 @@ func (p *DataPoster) sendTx(ctx context.Context, prevTx *storage.QueuedTransacti return fmt.Errorf("couldn't get preceding tx in DataPoster to check if should send tx with nonce %d: %w", newTx.FullTx.Nonce(), err) } if precedingTx != nil { // precedingTx == nil -> the actual preceding tx was already confirmed + var latestBlockNumber, prevBlockNumber, reorgResistantTxCount uint64 if precedingTx.FullTx.Type() != newTx.FullTx.Type() || !precedingTx.Sent { - latestBlockNumber, err := p.client.BlockNumber(ctx) + latestBlockNumber, err = p.client.BlockNumber(ctx) if err != nil { return fmt.Errorf("couldn't get block number in DataPoster to check if should send tx with nonce %d: %w", newTx.FullTx.Nonce(), err) } - prevBlockNumber := arbmath.SaturatingUSub(latestBlockNumber, 1) - reorgResistantTxCount, err := p.client.NonceAt(ctx, p.Sender(), new(big.Int).SetUint64(prevBlockNumber)) + prevBlockNumber = arbmath.SaturatingUSub(latestBlockNumber, 1) + reorgResistantTxCount, err = p.client.NonceAt(ctx, p.Sender(), new(big.Int).SetUint64(prevBlockNumber)) if err != nil { return fmt.Errorf("couldn't determine reorg resistant nonce in DataPoster to check if should send tx with nonce %d: %w", newTx.FullTx.Nonce(), err) } @@ -872,9 +873,8 @@ func (p *DataPoster) sendTx(ctx context.Context, prevTx *storage.QueuedTransacti log.Info("DataPoster is avoiding creating a mempool nonce gap (the tx remains queued and will be retried)", "nonce", newTx.FullTx.Nonce(), "prevType", precedingTx.FullTx.Type(), "type", newTx.FullTx.Type(), "prevSent", precedingTx.Sent, "latestBlockNumber", latestBlockNumber, "prevBlockNumber", prevBlockNumber, "reorgResistantTxCount", reorgResistantTxCount) return nil } - } else { - log.Info("DataPoster will send previously unsent batch tx", "nonce", newTx.FullTx.Nonce(), "prevType", precedingTx.FullTx.Type(), "type", newTx.FullTx.Type(), "prevSent", precedingTx.Sent) } + log.Debug("DataPoster will send previously unsent batch tx", "nonce", newTx.FullTx.Nonce(), "prevType", precedingTx.FullTx.Type(), "type", newTx.FullTx.Type(), "prevSent", precedingTx.Sent, "latestBlockNumber", latestBlockNumber, "prevBlockNumber", prevBlockNumber, "reorgResistantTxCount", reorgResistantTxCount) } } From 8737d5ccca5c252797af89906f1c5840df93d6ee Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 21 May 2024 21:45:39 -0500 Subject: [PATCH 54/75] Improve previouslySent check --- arbnode/dataposter/data_poster.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 399bc19dbd..5aaef959d8 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -851,7 +851,8 @@ func (p *DataPoster) sendTx(ctx context.Context, prevTx *storage.QueuedTransacti // different type with a lower nonce. // If we decide not to send this tx yet, just leave it queued and with Sent set to false. // The resending/repricing loop in DataPoster.Start will keep trying. - if !newTx.Sent && newTx.FullTx.Nonce() > 0 { + previouslySent := newTx.Sent || (prevTx != nil && prevTx.Sent) // if we've previously sent this nonce + if !previouslySent && newTx.FullTx.Nonce() > 0 { precedingTx, err := p.queue.Get(ctx, arbmath.SaturatingUSub(newTx.FullTx.Nonce(), 1)) if err != nil { return fmt.Errorf("couldn't get preceding tx in DataPoster to check if should send tx with nonce %d: %w", newTx.FullTx.Nonce(), err) From 65f8bb569adeb743f645412df0e4c80346920c39 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Wed, 22 May 2024 13:53:13 +0200 Subject: [PATCH 55/75] update geth --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index 5b7b36a339..07f6d7a8c1 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 5b7b36a339ac28d708bca072dc5ec8189ceadac2 +Subproject commit 07f6d7a8c149f8752aa8deef4598cfd184a37e94 From 5570fb3e57c574b2004aca9ecd3ad1831bd5ee4c Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Wed, 22 May 2024 14:18:09 +0200 Subject: [PATCH 56/75] Drop listenForInterrupt, since stopAndWait is already called on sigint --- pubsub/consumer.go | 22 +--------------------- pubsub/pubsub_test.go | 10 +++------- 2 files changed, 4 insertions(+), 28 deletions(-) diff --git a/pubsub/consumer.go b/pubsub/consumer.go index 97ab004764..0288c19e45 100644 --- a/pubsub/consumer.go +++ b/pubsub/consumer.go @@ -5,10 +5,7 @@ import ( "encoding/json" "errors" "fmt" - "os" - "os/signal" "sync/atomic" - "syscall" "time" "github.com/ethereum/go-ethereum/log" @@ -53,7 +50,6 @@ type Consumer[Request any, Response any] struct { // terminating indicates whether interrupt was received, in which case // consumer should clean up for graceful shutdown. terminating atomic.Bool - signals chan os.Signal } type Message[Request any] struct { @@ -72,14 +68,12 @@ func NewConsumer[Request any, Response any](client redis.UniversalClient, stream redisGroup: streamName, // There is 1-1 mapping of redis stream and consumer group. cfg: cfg, terminating: atomic.Bool{}, - signals: make(chan os.Signal, 1), }, nil } // Start starts the consumer to iteratively perform heartbeat in configured intervals. func (c *Consumer[Request, Response]) Start(ctx context.Context) { c.StopWaiter.Start(ctx, c) - c.listenForInterrupt() c.StopWaiter.CallIteratively( func(ctx context.Context) time.Duration { if !c.terminating.Load() { @@ -92,22 +86,8 @@ func (c *Consumer[Request, Response]) Start(ctx context.Context) { ) } -// listenForInterrupt launches a thread that notifies the channel when interrupt -// is received. -func (c *Consumer[Request, Response]) listenForInterrupt() { - signal.Notify(c.signals, syscall.SIGINT, syscall.SIGTERM) - c.StopWaiter.LaunchThread(func(ctx context.Context) { - select { - case sig := <-c.signals: - log.Info("Received interrup", "signal", sig.String()) - case <-ctx.Done(): - log.Info("Context is done", "error", ctx.Err()) - } - c.deleteHeartBeat(ctx) - }) -} - func (c *Consumer[Request, Response]) StopAndWait() { + c.deleteHeartBeat(c.GetParentContext()) c.StopWaiter.StopAndWait() } diff --git a/pubsub/pubsub_test.go b/pubsub/pubsub_test.go index 85314dc29a..cdf5fa1ef6 100644 --- a/pubsub/pubsub_test.go +++ b/pubsub/pubsub_test.go @@ -232,15 +232,11 @@ func TestRedisProduce(t *testing.T) { // Consumer messages in every third consumer but don't ack them to check // that other consumers will claim ownership on those messages. for i := 0; i < len(consumers); i += 3 { + consumers[i].Start(ctx) if _, err := consumers[i].Consume(ctx); err != nil { t.Errorf("Error consuming message: %v", err) } - // Terminate half of the consumers, send interrupt to others. - if i%2 == 0 { - consumers[i].StopAndWait() - } else { - consumers[i].signals <- os.Interrupt - } + consumers[i].StopAndWait() } } @@ -252,7 +248,7 @@ func TestRedisProduce(t *testing.T) { } producer.StopAndWait() for _, c := range consumers { - c.StopWaiter.StopAndWait() + c.StopAndWait() } got, err := mergeValues(gotMessages) if err != nil { From 9a866114c9ea15e6efd0bf4452dfa0ec67cdb3b8 Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Wed, 22 May 2024 14:55:52 +0200 Subject: [PATCH 57/75] Fix test --- pubsub/pubsub_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pubsub/pubsub_test.go b/pubsub/pubsub_test.go index cdf5fa1ef6..72504602e3 100644 --- a/pubsub/pubsub_test.go +++ b/pubsub/pubsub_test.go @@ -285,6 +285,7 @@ func TestRedisReproduceDisabled(t *testing.T) { // Consumer messages in every third consumer but don't ack them to check // that other consumers will claim ownership on those messages. for i := 0; i < len(consumers); i += 3 { + consumers[i].Start(ctx) if _, err := consumers[i].Consume(ctx); err != nil { t.Errorf("Error consuming message: %v", err) } From 7d67d8b9263deb7d9492f5cd7b08595b6c99f2b4 Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Wed, 22 May 2024 16:41:05 +0200 Subject: [PATCH 58/75] Use in memory buffer and dump on disk only if needed --- execution/gethexec/sequencer.go | 53 +++++++++++++-------------------- 1 file changed, 21 insertions(+), 32 deletions(-) diff --git a/execution/gethexec/sequencer.go b/execution/gethexec/sequencer.go index 4247556905..c40669495c 100644 --- a/execution/gethexec/sequencer.go +++ b/execution/gethexec/sequencer.go @@ -4,12 +4,14 @@ package gethexec import ( + "bytes" "context" "errors" "fmt" "math" "math/big" "os" + "path" "runtime/debug" "runtime/pprof" "runtime/trace" @@ -768,41 +770,15 @@ func (s *Sequencer) precheckNonces(queueItems []txQueueItem) []txQueueItem { return outputQueueItems } -func deleteFiles(files ...*os.File) { - for _, f := range files { - if err := os.Remove(f.Name()); err != nil { - log.Error("Error removing file", "name", f.Name()) - } - } -} - -func closeFiles(files ...*os.File) { - for _, f := range files { - if err := os.Remove(f.Name()); err != nil { - log.Error("Error closing file", "name", f.Name()) - } - } -} - // createBlockWithProfiling runs create block with tracing and CPU profiling // enabled. If the block creation takes longer than 5 seconds, it keeps both // and prints out filenames in an error log line. func (s *Sequencer) createBlockWithProfiling(ctx context.Context) bool { - id := uuid.NewString() - pprofFile, err := os.CreateTemp("", id+".pprof") - if err != nil { - log.Error("Creating temporary file for profiling CPU", "error", err) - } - traceFile, err := os.CreateTemp("", id+".trace") - if err != nil { - log.Error("Creating temporary file for tracing", "error", err) - } - if err := pprof.StartCPUProfile(pprofFile); err != nil { + pprofBuf, traceBuf := bytes.NewBuffer(nil), bytes.NewBuffer(nil) + if err := pprof.StartCPUProfile(pprofBuf); err != nil { log.Error("Starting CPU profiling", "error", err) - deleteFiles(pprofFile) } - if err := trace.Start(traceFile); err != nil { - deleteFiles(traceFile) + if err := trace.Start(traceBuf); err != nil { log.Error("Starting tracing", "error", err) } start := time.Now() @@ -810,15 +786,28 @@ func (s *Sequencer) createBlockWithProfiling(ctx context.Context) bool { elapsed := time.Since(start) pprof.StopCPUProfile() trace.Stop() - closeFiles(pprofFile, traceFile) if elapsed > 5*time.Second { - log.Error("Block creation took longer than 5 seconds", "pprof", pprofFile.Name()) + writeAndLog(pprofBuf, traceBuf) return res } - deleteFiles(pprofFile, traceFile) return res } +func writeAndLog(pprof, trace *bytes.Buffer) { + id := uuid.NewString() + pprofFile := path.Join(os.TempDir(), id+".pprof") + if err := os.WriteFile(pprofFile, pprof.Bytes(), 0o644); err != nil { + log.Error("Creating temporary file for pprof", "fileName", pprofFile, "error", err) + return + } + traceFile := path.Join(os.TempDir(), id+".trace") + if err := os.WriteFile(traceFile, trace.Bytes(), 0o644); err != nil { + log.Error("Creating temporary file for trace", "fileName", traceFile, "error", err) + return + } + log.Debug("Block creation took longer than 5 seconds, created pprof and trace files", "pprof", pprofFile, "traceFile", traceFile) +} + func (s *Sequencer) createBlock(ctx context.Context) (returnValue bool) { var queueItems []txQueueItem var totalBatchSize int From 101c339d6776af808ae269cdb44b838f1377d1fd Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Wed, 22 May 2024 16:48:44 +0200 Subject: [PATCH 59/75] Fix gosec linter error --- execution/gethexec/sequencer.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/execution/gethexec/sequencer.go b/execution/gethexec/sequencer.go index c40669495c..9a94e35f61 100644 --- a/execution/gethexec/sequencer.go +++ b/execution/gethexec/sequencer.go @@ -796,12 +796,12 @@ func (s *Sequencer) createBlockWithProfiling(ctx context.Context) bool { func writeAndLog(pprof, trace *bytes.Buffer) { id := uuid.NewString() pprofFile := path.Join(os.TempDir(), id+".pprof") - if err := os.WriteFile(pprofFile, pprof.Bytes(), 0o644); err != nil { + if err := os.WriteFile(pprofFile, pprof.Bytes(), 0o600); err != nil { log.Error("Creating temporary file for pprof", "fileName", pprofFile, "error", err) return } traceFile := path.Join(os.TempDir(), id+".trace") - if err := os.WriteFile(traceFile, trace.Bytes(), 0o644); err != nil { + if err := os.WriteFile(traceFile, trace.Bytes(), 0o600); err != nil { log.Error("Creating temporary file for trace", "fileName", traceFile, "error", err) return } From 0ce93785e406c3375cb1931297b4e9580e4faf4f Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Wed, 22 May 2024 08:47:57 -0600 Subject: [PATCH 60/75] block_validator: fail but dont segfault if no validator --- staker/block_validator.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/staker/block_validator.go b/staker/block_validator.go index 027ee78248..5a511920f2 100644 --- a/staker/block_validator.go +++ b/staker/block_validator.go @@ -791,8 +791,9 @@ validationsLoop: } for _, moduleRoot := range wasmRoots { if v.chosenValidator[moduleRoot] == nil { - v.possiblyFatal(fmt.Errorf("did not find spawner for moduleRoot :%v", moduleRoot)) - continue + notFoundErr := fmt.Errorf("did not find spawner for moduleRoot :%v", moduleRoot) + v.possiblyFatal(notFoundErr) + return nil, notFoundErr } if v.chosenValidator[moduleRoot].Room() == 0 { log.Trace("advanceValidations: no more room", "moduleRoot", moduleRoot) @@ -1107,7 +1108,7 @@ func (v *BlockValidator) Initialize(ctx context.Context) error { } } if v.chosenValidator[root] == nil { - log.Error("validator not found", "WasmModuleRoot", root) + return fmt.Errorf("cannot validate WasmModuleRoot %v", root) } } } From 16c95d730f4614625c9d5a10b88984d06aaac645 Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Thu, 23 May 2024 17:28:11 +0200 Subject: [PATCH 61/75] Delete heartbeat after stopAndWait --- pubsub/consumer.go | 2 +- system_tests/block_validator_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pubsub/consumer.go b/pubsub/consumer.go index 0288c19e45..4b51d24f2d 100644 --- a/pubsub/consumer.go +++ b/pubsub/consumer.go @@ -87,8 +87,8 @@ func (c *Consumer[Request, Response]) Start(ctx context.Context) { } func (c *Consumer[Request, Response]) StopAndWait() { - c.deleteHeartBeat(c.GetParentContext()) c.StopWaiter.StopAndWait() + c.deleteHeartBeat(c.GetParentContext()) } func heartBeatKey(id string) string { diff --git a/system_tests/block_validator_test.go b/system_tests/block_validator_test.go index dfd892a079..debd6d4c7c 100644 --- a/system_tests/block_validator_test.go +++ b/system_tests/block_validator_test.go @@ -72,7 +72,7 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops redisURL := "" if useRedisStreams { redisURL = redisutil.CreateTestRedis(ctx, t) - validatorConfig.BlockValidator.RedisValidationClientConfig = redis.DefaultValidationClientConfig + validatorConfig.BlockValidator.RedisValidationClientConfig = redis.TestValidationClientConfig validatorConfig.BlockValidator.RedisValidationClientConfig.RedisURL = redisURL } From 8504c5c0ba8303fdf18ce8efc0f94b1e81b47f00 Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Thu, 23 May 2024 08:54:02 -0700 Subject: [PATCH 62/75] Update blocks_reexecutor/blocks_reexecutor.go Co-authored-by: Maciej Kulawik <10907694+magicxyyz@users.noreply.github.com> --- blocks_reexecutor/blocks_reexecutor.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/blocks_reexecutor/blocks_reexecutor.go b/blocks_reexecutor/blocks_reexecutor.go index f58e0ce00f..1e4a06fe90 100644 --- a/blocks_reexecutor/blocks_reexecutor.go +++ b/blocks_reexecutor/blocks_reexecutor.go @@ -35,7 +35,7 @@ func (c *Config) Validate() error { if c.EndBlock < c.StartBlock { return errors.New("invalid block range for blocks re-execution") } - if c.Room < 0 { + if c.Room <= 0 { return errors.New("room for blocks re-execution should be greater than 0") } return nil From 56fc8d4b867351b9d2ed7714360389dd5d5b76ee Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Thu, 23 May 2024 20:50:19 +0200 Subject: [PATCH 63/75] Drop terminating atomic bool --- pubsub/consumer.go | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/pubsub/consumer.go b/pubsub/consumer.go index 4b51d24f2d..c9590de8e6 100644 --- a/pubsub/consumer.go +++ b/pubsub/consumer.go @@ -5,7 +5,6 @@ import ( "encoding/json" "errors" "fmt" - "sync/atomic" "time" "github.com/ethereum/go-ethereum/log" @@ -47,9 +46,6 @@ type Consumer[Request any, Response any] struct { redisStream string redisGroup string cfg *ConsumerConfig - // terminating indicates whether interrupt was received, in which case - // consumer should clean up for graceful shutdown. - terminating atomic.Bool } type Message[Request any] struct { @@ -67,7 +63,6 @@ func NewConsumer[Request any, Response any](client redis.UniversalClient, stream redisStream: streamName, redisGroup: streamName, // There is 1-1 mapping of redis stream and consumer group. cfg: cfg, - terminating: atomic.Bool{}, }, nil } @@ -76,10 +71,6 @@ func (c *Consumer[Request, Response]) Start(ctx context.Context) { c.StopWaiter.Start(ctx, c) c.StopWaiter.CallIteratively( func(ctx context.Context) time.Duration { - if !c.terminating.Load() { - log.Trace("Consumer is terminating, stopping heartbeat update") - return time.Hour - } c.heartBeat(ctx) return c.cfg.KeepAliveTimeout / 10 }, @@ -101,7 +92,6 @@ func (c *Consumer[Request, Response]) heartBeatKey() string { // deleteHeartBeat deletes the heartbeat to indicate it is being shut down. func (c *Consumer[Request, Response]) deleteHeartBeat(ctx context.Context) { - c.terminating.Store(true) if err := c.client.Del(ctx, c.heartBeatKey()).Err(); err != nil { l := log.Info if ctx.Err() != nil { From 14c661636c1040d84b3ba162fe63ce74322dd4ec Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Thu, 23 May 2024 21:10:47 +0200 Subject: [PATCH 64/75] Address comments --- pubsub/common.go | 8 ++++---- validator/client/redis/producer.go | 10 +++++++--- validator/valnode/redis/consumer.go | 9 +++++++-- 3 files changed, 18 insertions(+), 9 deletions(-) diff --git a/pubsub/common.go b/pubsub/common.go index bc0ab1035b..e1dc22c909 100644 --- a/pubsub/common.go +++ b/pubsub/common.go @@ -11,15 +11,15 @@ import ( // does not return an error. func CreateStream(ctx context.Context, streamName string, client redis.UniversalClient) error { _, err := client.XGroupCreateMkStream(ctx, streamName, streamName, "$").Result() - if err == nil || err.Error() == "BUSYGROUP Consumer Group name already exists" { - return nil + if err != nil && !StreamExists(ctx, streamName, client) { + return err } - return err + return nil } // StreamExists returns whether there are any consumer group for specified // redis stream. -func StreamExists(ctx context.Context, client redis.UniversalClient, streamName string) bool { +func StreamExists(ctx context.Context, streamName string, client redis.UniversalClient) bool { groups, err := client.XInfoStream(ctx, streamName).Result() if err != nil { log.Error("Reading redis streams", "error", err) diff --git a/validator/client/redis/producer.go b/validator/client/redis/producer.go index c971664bd3..41ae100954 100644 --- a/validator/client/redis/producer.go +++ b/validator/client/redis/producer.go @@ -43,7 +43,7 @@ var TestValidationClientConfig = ValidationClientConfig{ Room: 2, RedisURL: "", ProducerConfig: pubsub.TestProducerConfig, - CreateStreams: true, + CreateStreams: false, } func ValidationClientConfigAddOptions(prefix string, f *pflag.FlagSet) { @@ -63,6 +63,7 @@ type ValidationClient struct { producerConfig pubsub.ProducerConfig redisClient redis.UniversalClient moduleRoots []common.Hash + createStreams bool } func NewValidationClient(cfg *ValidationClientConfig) (*ValidationClient, error) { @@ -79,13 +80,16 @@ func NewValidationClient(cfg *ValidationClientConfig) (*ValidationClient, error) producers: make(map[common.Hash]*pubsub.Producer[*validator.ValidationInput, validator.GoGlobalState]), producerConfig: cfg.ProducerConfig, redisClient: redisClient, + createStreams: cfg.CreateStreams, }, nil } func (c *ValidationClient) Initialize(ctx context.Context, moduleRoots []common.Hash) error { for _, mr := range moduleRoots { - if err := pubsub.CreateStream(ctx, server_api.RedisStreamForRoot(mr), c.redisClient); err != nil { - return fmt.Errorf("creating redis stream: %w", err) + if c.createStreams { + if err := pubsub.CreateStream(ctx, server_api.RedisStreamForRoot(mr), c.redisClient); err != nil { + return fmt.Errorf("creating redis stream: %w", err) + } } if _, exists := c.producers[mr]; exists { log.Warn("Producer already existsw for module root", "hash", mr) diff --git a/validator/valnode/redis/consumer.go b/validator/valnode/redis/consumer.go index bc1cd289e7..2fa25ef3c5 100644 --- a/validator/valnode/redis/consumer.go +++ b/validator/valnode/redis/consumer.go @@ -63,7 +63,7 @@ func (s *ValidationServer) Start(ctx_in context.Context) { ready := make(chan struct{}, 1) s.StopWaiter.LaunchThread(func(ctx context.Context) { for { - if pubsub.StreamExists(ctx, c.RedisClient(), c.StreamName()) { + if pubsub.StreamExists(ctx, c.StreamName(), c.RedisClient()) { ready <- struct{}{} readyStreams <- struct{}{} return @@ -72,7 +72,12 @@ func (s *ValidationServer) Start(ctx_in context.Context) { } }) s.StopWaiter.LaunchThread(func(ctx context.Context) { - <-ready // Wait until the stream exists and start consuming iteratively. + select { + case <-ctx.Done(): + log.Error("Context done", "error", ctx.Err().Error()) + return + case <-ready: // Wait until the stream exists and start consuming iteratively. + } s.StopWaiter.CallIteratively(func(ctx context.Context) time.Duration { req, err := c.Consume(ctx) if err != nil { From ab6fa4cae57f99b7dae10ad07033f53216f05ad8 Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Thu, 23 May 2024 21:14:41 +0200 Subject: [PATCH 65/75] Switch threshold from 5 to 2 seconds --- execution/gethexec/sequencer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/execution/gethexec/sequencer.go b/execution/gethexec/sequencer.go index 9a94e35f61..dd84c352a4 100644 --- a/execution/gethexec/sequencer.go +++ b/execution/gethexec/sequencer.go @@ -786,7 +786,7 @@ func (s *Sequencer) createBlockWithProfiling(ctx context.Context) bool { elapsed := time.Since(start) pprof.StopCPUProfile() trace.Stop() - if elapsed > 5*time.Second { + if elapsed > 2*time.Second { writeAndLog(pprofBuf, traceBuf) return res } From f48c25658b2d82f1adaae84400abae7c87df1483 Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Thu, 23 May 2024 21:45:25 +0200 Subject: [PATCH 66/75] Address comments --- validator/valnode/redis/consumer.go | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/validator/valnode/redis/consumer.go b/validator/valnode/redis/consumer.go index 2fa25ef3c5..52c8728681 100644 --- a/validator/valnode/redis/consumer.go +++ b/validator/valnode/redis/consumer.go @@ -68,13 +68,18 @@ func (s *ValidationServer) Start(ctx_in context.Context) { readyStreams <- struct{}{} return } - time.Sleep(time.Millisecond * 100) + select { + case <-ctx.Done(): + log.Info("Context done", "error", ctx.Err().Error()) + return + case <-time.After(time.Millisecond * 100): + } } }) s.StopWaiter.LaunchThread(func(ctx context.Context) { select { case <-ctx.Done(): - log.Error("Context done", "error", ctx.Err().Error()) + log.Info("Context done", "error", ctx.Err().Error()) return case <-ready: // Wait until the stream exists and start consuming iteratively. } @@ -111,7 +116,7 @@ func (s *ValidationServer) Start(ctx_in context.Context) { case <-time.After(s.streamTimeout): log.Error("Waiting for redis streams timed out") case <-ctx_in.Done(): - log.Error(("Context expired, failed to start")) + log.Info(("Context expired, failed to start")) return } } @@ -130,17 +135,20 @@ var DefaultValidationServerConfig = ValidationServerConfig{ RedisURL: "", ConsumerConfig: pubsub.DefaultConsumerConfig, ModuleRoots: []string{}, + StreamTimeout: 10 * time.Minute, } var TestValidationServerConfig = ValidationServerConfig{ RedisURL: "", ConsumerConfig: pubsub.TestConsumerConfig, ModuleRoots: []string{}, + StreamTimeout: time.Minute, } func ValidationServerConfigAddOptions(prefix string, f *pflag.FlagSet) { pubsub.ConsumerConfigAddOptions(prefix+".consumer-config", f) f.StringSlice(prefix+".module-roots", nil, "Supported module root hashes") + f.Duration(prefix+"stream-timeout", DefaultValidationServerConfig.StreamTimeout, "Timeout on polling for existence of redis streams") } func (cfg *ValidationServerConfig) Enabled() bool { From 3c08b790f34637b532dfb34904e9414ca368622e Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Thu, 23 May 2024 21:55:56 +0200 Subject: [PATCH 67/75] Fix flag --- validator/valnode/redis/consumer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/validator/valnode/redis/consumer.go b/validator/valnode/redis/consumer.go index 52c8728681..26c44fc5e0 100644 --- a/validator/valnode/redis/consumer.go +++ b/validator/valnode/redis/consumer.go @@ -148,7 +148,7 @@ var TestValidationServerConfig = ValidationServerConfig{ func ValidationServerConfigAddOptions(prefix string, f *pflag.FlagSet) { pubsub.ConsumerConfigAddOptions(prefix+".consumer-config", f) f.StringSlice(prefix+".module-roots", nil, "Supported module root hashes") - f.Duration(prefix+"stream-timeout", DefaultValidationServerConfig.StreamTimeout, "Timeout on polling for existence of redis streams") + f.Duration(prefix+".stream-timeout", DefaultValidationServerConfig.StreamTimeout, "Timeout on polling for existence of redis streams") } func (cfg *ValidationServerConfig) Enabled() bool { From e44cfafd05066c250a98c1960240ba88a1087a11 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Thu, 23 May 2024 16:54:43 -0600 Subject: [PATCH 68/75] testnode: update pin --- nitro-testnode | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nitro-testnode b/nitro-testnode index e530842e58..c334820b2d 160000 --- a/nitro-testnode +++ b/nitro-testnode @@ -1 +1 @@ -Subproject commit e530842e583e2f3543f97a71c3a7cb53f8a10814 +Subproject commit c334820b2dba6dfa4078f81ed242afbbccc19c91 From 570c31d51d692610607096b4f4a4e92ffa5538d0 Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Thu, 23 May 2024 16:20:07 -0700 Subject: [PATCH 69/75] update geth pin --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index 07f6d7a8c1..f45f6d7560 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 07f6d7a8c149f8752aa8deef4598cfd184a37e94 +Subproject commit f45f6d75601626daf108aa62ea6cb1549d91c528 From dd27ef17d584d607e0972ac7cd12c734ebf1462d Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Thu, 23 May 2024 23:31:18 -0500 Subject: [PATCH 70/75] Allow 0x prefix for allowed-wasm-module-roots flag --- cmd/nitro/nitro.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 26aedfbfb7..427974b34f 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -453,7 +453,7 @@ func mainImpl() int { if len(allowedWasmModuleRoots) > 0 { moduleRootMatched := false for _, root := range allowedWasmModuleRoots { - bytes, err := hex.DecodeString(root) + bytes, err := hex.DecodeString(strings.TrimPrefix(root, "0x")) if err == nil { if common.HexToHash(root) == common.BytesToHash(bytes) { moduleRootMatched = true From 71ce50f6553b0cbec0624178c4cb3cad26904ba6 Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Fri, 24 May 2024 11:11:37 +0200 Subject: [PATCH 71/75] Don't block on consumers start --- validator/valnode/redis/consumer.go | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/validator/valnode/redis/consumer.go b/validator/valnode/redis/consumer.go index 26c44fc5e0..3569e78b5c 100644 --- a/validator/valnode/redis/consumer.go +++ b/validator/valnode/redis/consumer.go @@ -107,19 +107,20 @@ func (s *ValidationServer) Start(ctx_in context.Context) { }) }) } - - for { - select { - case <-readyStreams: - log.Trace("At least one stream is ready") - return // Don't block Start if at least one of the stream is ready. - case <-time.After(s.streamTimeout): - log.Error("Waiting for redis streams timed out") - case <-ctx_in.Done(): - log.Info(("Context expired, failed to start")) - return + s.StopWaiter.LaunchThread(func(ctx context.Context) { + for { + select { + case <-readyStreams: + log.Trace("At least one stream is ready") + return // Don't block Start if at least one of the stream is ready. + case <-time.After(s.streamTimeout): + log.Error("Waiting for redis streams timed out") + case <-ctx.Done(): + log.Info(("Context expired, failed to start")) + return + } } - } + }) } type ValidationServerConfig struct { From 36da838f734209419719eec7e8500bc988794089 Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Fri, 24 May 2024 17:57:49 +0200 Subject: [PATCH 72/75] Fix test --- pubsub/common.go | 4 ++-- system_tests/block_validator_test.go | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/pubsub/common.go b/pubsub/common.go index e1dc22c909..9f05304e46 100644 --- a/pubsub/common.go +++ b/pubsub/common.go @@ -20,10 +20,10 @@ func CreateStream(ctx context.Context, streamName string, client redis.Universal // StreamExists returns whether there are any consumer group for specified // redis stream. func StreamExists(ctx context.Context, streamName string, client redis.UniversalClient) bool { - groups, err := client.XInfoStream(ctx, streamName).Result() + got, err := client.Do(ctx, "XINFO", "STREAM", streamName).Result() if err != nil { log.Error("Reading redis streams", "error", err) return false } - return groups.Groups > 0 + return got != nil } diff --git a/system_tests/block_validator_test.go b/system_tests/block_validator_test.go index debd6d4c7c..54046edf15 100644 --- a/system_tests/block_validator_test.go +++ b/system_tests/block_validator_test.go @@ -74,6 +74,8 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops redisURL = redisutil.CreateTestRedis(ctx, t) validatorConfig.BlockValidator.RedisValidationClientConfig = redis.TestValidationClientConfig validatorConfig.BlockValidator.RedisValidationClientConfig.RedisURL = redisURL + } else { + validatorConfig.BlockValidator.RedisValidationClientConfig = redis.ValidationClientConfig{} } AddDefaultValNode(t, ctx, validatorConfig, !arbitrator, redisURL) From 751ff4a3d8bf4d79d4523d5dca4c382d0a60a668 Mon Sep 17 00:00:00 2001 From: Emiliano Bonassi Date: Sat, 25 May 2024 09:52:56 -0700 Subject: [PATCH 73/75] Update daserver.go Co-authored-by: Tristan-Wilson <87238672+Tristan-Wilson@users.noreply.github.com> --- cmd/daserver/daserver.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/daserver/daserver.go b/cmd/daserver/daserver.go index 48cc5546de..1a3fd435b8 100644 --- a/cmd/daserver/daserver.go +++ b/cmd/daserver/daserver.go @@ -90,7 +90,7 @@ func parseDAServer(args []string) (*DAServerConfig, error) { f.Bool("enable-rpc", DefaultDAServerConfig.EnableRPC, "enable the HTTP-RPC server listening on rpc-addr and rpc-port") f.String("rpc-addr", DefaultDAServerConfig.RPCAddr, "HTTP-RPC server listening interface") f.Uint64("rpc-port", DefaultDAServerConfig.RPCPort, "HTTP-RPC server listening port") - f.Int("rpc-server-body-limit", DefaultDAServerConfig.RPCServerBodyLimit, "HTTP-RPC server maximum request body size in bytes") + f.Int("rpc-server-body-limit", DefaultDAServerConfig.RPCServerBodyLimit, "HTTP-RPC server maximum request body size in bytes; the default (0) uses geth's 5MB limit") genericconf.HTTPServerTimeoutConfigAddOptions("rpc-server-timeouts", f) f.Bool("enable-rest", DefaultDAServerConfig.EnableREST, "enable the REST server listening on rest-addr and rest-port") From 1d088e5f882eb2fe270c7ef61e84e41deb44554a Mon Sep 17 00:00:00 2001 From: Emiliano Bonassi Date: Sat, 25 May 2024 09:53:12 -0700 Subject: [PATCH 74/75] Update server.go Co-authored-by: Tristan-Wilson <87238672+Tristan-Wilson@users.noreply.github.com> --- cmd/genericconf/server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/genericconf/server.go b/cmd/genericconf/server.go index 18f13dd204..9b8acd5f71 100644 --- a/cmd/genericconf/server.go +++ b/cmd/genericconf/server.go @@ -48,7 +48,7 @@ var HTTPServerTimeoutConfigDefault = HTTPServerTimeoutConfig{ IdleTimeout: 120 * time.Second, } -var HTTPServerBodyLimitDefault = 0 // Use default from go-ethereum +const HTTPServerBodyLimitDefault = 0 // Use default from go-ethereum func (c HTTPConfig) Apply(stackConf *node.Config) { stackConf.HTTPHost = c.Addr From 5931051f80e6fb1acedd9bf341a0cba6335a084d Mon Sep 17 00:00:00 2001 From: Emiliano Bonassi Date: Sat, 25 May 2024 09:56:31 -0700 Subject: [PATCH 75/75] nits dasRpcServer.go --- das/dasRpcServer.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/das/dasRpcServer.go b/das/dasRpcServer.go index 8bab8f0b6d..03f755b90e 100644 --- a/das/dasRpcServer.go +++ b/das/dasRpcServer.go @@ -36,21 +36,21 @@ type DASRPCServer struct { daHealthChecker DataAvailabilityServiceHealthChecker } -func StartDASRPCServer(ctx context.Context, addr string, portNum uint64, rpcServerTimeouts genericconf.HTTPServerTimeoutConfig, RPCServerBodyLimit int, daReader DataAvailabilityServiceReader, daWriter DataAvailabilityServiceWriter, daHealthChecker DataAvailabilityServiceHealthChecker) (*http.Server, error) { +func StartDASRPCServer(ctx context.Context, addr string, portNum uint64, rpcServerTimeouts genericconf.HTTPServerTimeoutConfig, rpcServerBodyLimit int, daReader DataAvailabilityServiceReader, daWriter DataAvailabilityServiceWriter, daHealthChecker DataAvailabilityServiceHealthChecker) (*http.Server, error) { listener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", addr, portNum)) if err != nil { return nil, err } - return StartDASRPCServerOnListener(ctx, listener, rpcServerTimeouts, RPCServerBodyLimit, daReader, daWriter, daHealthChecker) + return StartDASRPCServerOnListener(ctx, listener, rpcServerTimeouts, rpcServerBodyLimit, daReader, daWriter, daHealthChecker) } -func StartDASRPCServerOnListener(ctx context.Context, listener net.Listener, rpcServerTimeouts genericconf.HTTPServerTimeoutConfig, RPCServerBodyLimit int, daReader DataAvailabilityServiceReader, daWriter DataAvailabilityServiceWriter, daHealthChecker DataAvailabilityServiceHealthChecker) (*http.Server, error) { +func StartDASRPCServerOnListener(ctx context.Context, listener net.Listener, rpcServerTimeouts genericconf.HTTPServerTimeoutConfig, rpcServerBodyLimit int, daReader DataAvailabilityServiceReader, daWriter DataAvailabilityServiceWriter, daHealthChecker DataAvailabilityServiceHealthChecker) (*http.Server, error) { if daWriter == nil { return nil, errors.New("No writer backend was configured for DAS RPC server. Has the BLS signing key been set up (--data-availability.key.key-dir or --data-availability.key.priv-key options)?") } rpcServer := rpc.NewServer() - if RPCServerBodyLimit > 0 { - rpcServer.SetHTTPBodyLimit(RPCServerBodyLimit) + if rpcServerBodyLimit > 0 { + rpcServer.SetHTTPBodyLimit(rpcServerBodyLimit) } err := rpcServer.RegisterName("das", &DASRPCServer{ daReader: daReader,