diff --git a/Dockerfile b/Dockerfile index 0b3e13371..6e26a1622 100644 --- a/Dockerfile +++ b/Dockerfile @@ -202,6 +202,7 @@ COPY ./scripts/download-machine.sh . #RUN ./download-machine.sh consensus-v10.3 0xf559b6d4fa869472dabce70fe1c15221bdda837533dfd891916836975b434dec #RUN ./download-machine.sh consensus-v11 0xf4389b835497a910d7ba3ebfb77aa93da985634f3c052de1290360635be40c4a #RUN ./download-machine.sh consensus-v11.1 0x68e4fe5023f792d4ef584796c84d710303a5e12ea02d6e37e2b5e9c4332507c4 +#RUN ./download-machine.sh consensus-v20 0x8b104a2e80ac6165dc58b9048de12f301d70b02a0ab51396c22b4b4b802a16a4 RUN mkdir 0x965a35130f4e34b7b2339eac03b2eacc659e2dafe850d213ea6a7cdf9edfa99f && \ ln -sfT 0x965a35130f4e34b7b2339eac03b2eacc659e2dafe850d213ea6a7cdf9edfa99f latest && \ diff --git a/arbitrator/Cargo.lock b/arbitrator/Cargo.lock index ae3af8d38..1962b231a 100644 --- a/arbitrator/Cargo.lock +++ b/arbitrator/Cargo.lock @@ -28,6 +28,18 @@ dependencies = [ "version_check", ] +[[package]] +name = "ahash" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a" +dependencies = [ + "cfg-if 1.0.0", + "once_cell", + "version_check", + "zerocopy", +] + [[package]] name = "aho-corasick" version = "1.1.2" @@ -37,6 +49,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "allocator-api2" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" + [[package]] name = "ansi_term" version = "0.12.1" @@ -735,7 +753,7 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" dependencies = [ - "ahash", + "ahash 0.7.8", ] [[package]] @@ -743,6 +761,10 @@ name = "hashbrown" version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" +dependencies = [ + "ahash 0.8.6", + "allocator-api2", +] [[package]] name = "heck" @@ -870,6 +892,7 @@ dependencies = [ "prover", "rand", "rand_pcg", + "sha2 0.9.9", "sha3 0.9.1", "structopt", "stylus", @@ -964,6 +987,15 @@ version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" +[[package]] +name = "lru" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc" +dependencies = [ + "hashbrown 0.14.3", +] + [[package]] name = "mach" version = "0.3.2" @@ -1300,6 +1332,7 @@ dependencies = [ "itertools", "lazy_static", "libc", + "lru", "nom", "nom-leb128", "num", @@ -2440,6 +2473,26 @@ dependencies = [ "tap", ] +[[package]] +name = "zerocopy" +version = "0.7.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.52", +] + [[package]] name = "zeroize" version = "1.7.0" diff --git a/arbitrator/jit/Cargo.toml b/arbitrator/jit/Cargo.toml index 2864c92ab..fb49b871b 100644 --- a/arbitrator/jit/Cargo.toml +++ b/arbitrator/jit/Cargo.toml @@ -21,6 +21,7 @@ hex = "0.4.3" structopt = "0.3.26" sha3 = "0.9.1" libc = "0.2.132" +sha2 = "0.9.9" [features] llvm = ["dep:wasmer-compiler-llvm"] diff --git a/arbitrator/jit/src/wavmio.rs b/arbitrator/jit/src/wavmio.rs index ec36e4150..062d18d8e 100644 --- a/arbitrator/jit/src/wavmio.rs +++ b/arbitrator/jit/src/wavmio.rs @@ -8,6 +8,8 @@ use crate::{ }; use arbutil::{Color, PreimageType}; use caller_env::{GuestPtr, MemAccess}; +use sha2::Sha256; +use sha3::{Digest, Keccak256}; use std::{ io, io::{BufReader, BufWriter, ErrorKind}, @@ -168,6 +170,21 @@ pub fn resolve_preimage_impl( error!("Missing requested preimage for hash {hash_hex} in {name}") }; + // Check if preimage rehashes to the provided hash. Exclude blob preimages + let calculated_hash: [u8; 32] = match preimage_type { + PreimageType::Keccak256 => Keccak256::digest(preimage).into(), + PreimageType::Sha2_256 => Sha256::digest(preimage).into(), + PreimageType::EthVersionedHash => *hash, + }; + if calculated_hash != *hash { + error!( + "Calculated hash {} of preimage {} does not match provided hash {}", + hex::encode(calculated_hash), + hex::encode(preimage), + hex::encode(*hash) + ); + } + if offset % 32 != 0 { error!("bad offset {offset} in {name}") }; diff --git a/arbitrator/prover/Cargo.toml b/arbitrator/prover/Cargo.toml index bc4e0f1c5..1fabc4d56 100644 --- a/arbitrator/prover/Cargo.toml +++ b/arbitrator/prover/Cargo.toml @@ -38,6 +38,7 @@ num-derive = "0.4.1" num-traits = "0.2.17" c-kzg = { version = "0.4.0", optional = true } # TODO: look into switching to rust-kzg (no crates.io release or hosted rustdoc yet) sha2 = "0.9.9" +lru = "0.12.3" [lib] name = "prover" diff --git a/arbitrator/prover/src/lib.rs b/arbitrator/prover/src/lib.rs index b5a92baaa..d2a8a4349 100644 --- a/arbitrator/prover/src/lib.rs +++ b/arbitrator/prover/src/lib.rs @@ -25,6 +25,7 @@ pub use machine::Machine; use arbutil::{Bytes32, PreimageType}; use eyre::{Report, Result}; +use lru::LruCache; use machine::{ argument_data_to_inbox, get_empty_preimage_resolver, GlobalState, MachineStatus, PreimageResolver, @@ -32,16 +33,21 @@ use machine::{ use static_assertions::const_assert_eq; use std::{ ffi::CStr, + num::NonZeroUsize, os::raw::{c_char, c_int}, path::Path, ptr, slice, sync::{ atomic::{self, AtomicU8}, - Arc, + Arc, Mutex, }, }; use utils::CBytes; +lazy_static::lazy_static! { + static ref BLOBHASH_PREIMAGE_CACHE: Mutex> = Mutex::new(LruCache::new(NonZeroUsize::new(12).unwrap())); +} + #[repr(C)] #[derive(Clone, Copy)] pub struct CByteArray { @@ -326,32 +332,50 @@ pub struct ResolvedPreimage { pub len: isize, // negative if not found } +macro_rules! handle_preimage_resolution { + ($context:expr, $ty:expr, $hash:expr, $resolver:expr) => {{ + let res = $resolver($context, $ty.into(), $hash.as_ptr()); + if res.len < 0 { + return None; + } + let data = CBytes::from_raw_parts(res.ptr, res.len as usize); + // Check if preimage rehashes to the provided hash + match crate::utils::hash_preimage(&data, $ty) { + Ok(have_hash) if have_hash.as_slice() == *$hash => {} + Ok(got_hash) => panic!( + "Resolved incorrect data for hash {} (rehashed to {})", + $hash, + Bytes32(got_hash), + ), + Err(err) => panic!( + "Failed to hash preimage from resolver (expecting hash {}): {}", + $hash, err, + ), + } + Some(data) + }}; +} + #[no_mangle] +#[cfg(feature = "native")] pub unsafe extern "C" fn arbitrator_set_preimage_resolver( mach: *mut Machine, resolver: unsafe extern "C" fn(u64, u8, *const u8) -> ResolvedPreimage, ) { (*mach).set_preimage_resolver(Arc::new( move |context: u64, ty: PreimageType, hash: Bytes32| -> Option { - let res = resolver(context, ty.into(), hash.as_ptr()); - if res.len < 0 { + if let PreimageType::EthVersionedHash = ty { + let mut cache = BLOBHASH_PREIMAGE_CACHE.lock().unwrap(); + if cache.contains(&hash) { + return cache.get(&hash).cloned(); + } + if let Some(data) = handle_preimage_resolution!(context, ty, hash, resolver) { + cache.put(hash, data.clone()); + return Some(data); + } return None; } - let data = CBytes::from_raw_parts(res.ptr, res.len as usize); - #[cfg(debug_assertions)] - match crate::utils::hash_preimage(&data, ty) { - Ok(have_hash) if have_hash.as_slice() == *hash => {} - Ok(got_hash) => panic!( - "Resolved incorrect data for hash {} (rehashed to {})", - hash, - Bytes32(got_hash), - ), - Err(err) => panic!( - "Failed to hash preimage from resolver (expecting hash {}): {}", - hash, err, - ), - } - Some(data) + handle_preimage_resolution!(context, ty, hash, resolver) }, ) as PreimageResolver); } diff --git a/arbitrator/wasm-libraries/Cargo.lock b/arbitrator/wasm-libraries/Cargo.lock index 3692d9d7c..58b63b59a 100644 --- a/arbitrator/wasm-libraries/Cargo.lock +++ b/arbitrator/wasm-libraries/Cargo.lock @@ -13,6 +13,24 @@ dependencies = [ "version_check", ] +[[package]] +name = "ahash" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" +dependencies = [ + "cfg-if 1.0.0", + "once_cell", + "version_check", + "zerocopy", +] + +[[package]] +name = "allocator-api2" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" + [[package]] name = "ansi_term" version = "0.12.1" @@ -447,7 +465,7 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" dependencies = [ - "ahash", + "ahash 0.7.8", ] [[package]] @@ -455,6 +473,10 @@ name = "hashbrown" version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" +dependencies = [ + "ahash 0.8.11", + "allocator-api2", +] [[package]] name = "heck" @@ -582,6 +604,15 @@ dependencies = [ "scopeguard", ] +[[package]] +name = "lru" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc" +dependencies = [ + "hashbrown 0.14.3", +] + [[package]] name = "memchr" version = "2.7.1" @@ -842,6 +873,7 @@ dependencies = [ "itertools", "lazy_static", "libc", + "lru", "nom", "nom-leb128", "num", @@ -1544,3 +1576,23 @@ checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" dependencies = [ "tap", ] + +[[package]] +name = "zerocopy" +version = "0.7.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.52", +] diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index e09775ea4..32b617510 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -18,6 +18,7 @@ import ( "github.com/andybalholm/brotli" "github.com/spf13/pflag" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" @@ -52,8 +53,18 @@ import ( ) var ( - batchPosterWalletBalance = metrics.NewRegisteredGaugeFloat64("arb/batchposter/wallet/balanceether", nil) - batchPosterGasRefunderBalance = metrics.NewRegisteredGaugeFloat64("arb/batchposter/gasrefunder/balanceether", nil) + batchPosterWalletBalance = metrics.NewRegisteredGaugeFloat64("arb/batchposter/wallet/eth", nil) + batchPosterGasRefunderBalance = metrics.NewRegisteredGaugeFloat64("arb/batchposter/gasrefunder/eth", nil) + baseFeeGauge = metrics.NewRegisteredGauge("arb/batchposter/basefee", nil) + blobFeeGauge = metrics.NewRegisteredGauge("arb/batchposter/blobfee", nil) + l1GasPriceGauge = metrics.NewRegisteredGauge("arb/batchposter/l1gasprice", nil) + l1GasPriceEstimateGauge = metrics.NewRegisteredGauge("arb/batchposter/l1gasprice/estimate", nil) + latestBatchSurplusGauge = metrics.NewRegisteredGauge("arb/batchposter/latestbatchsurplus", nil) + blockGasUsedGauge = metrics.NewRegisteredGauge("arb/batchposter/blockgas/used", nil) + blockGasLimitGauge = metrics.NewRegisteredGauge("arb/batchposter/blockgas/limit", nil) + blobGasUsedGauge = metrics.NewRegisteredGauge("arb/batchposter/blobgas/used", nil) + blobGasLimitGauge = metrics.NewRegisteredGauge("arb/batchposter/blobgas/limit", nil) + suggestedTipCapGauge = metrics.NewRegisteredGauge("arb/batchposter/suggestedtipcap", nil) usableBytesInBlob = big.NewInt(int64(len(kzg4844.Blob{}) * 31 / 32)) blobTxBlobGasPerBlob = big.NewInt(params.BlobTxBlobGasPerBlob) @@ -91,6 +102,7 @@ type BatchPoster struct { dataPoster *dataposter.DataPoster redisLock *redislock.Simple messagesPerBatch *arbmath.MovingAverage[uint64] + non4844BatchCount int // Count of consecutive non-4844 batches posted // This is an atomic variable that should only be accessed atomically. // An estimate of the number of batches we want to post but haven't yet. // This doesn't include batches which we don't want to post yet due to the L1 bounds. @@ -129,20 +141,21 @@ type BatchPosterConfig struct { // Batch post polling interval. PollInterval time.Duration `koanf:"poll-interval" reload:"hot"` // Batch posting error delay. - ErrorDelay time.Duration `koanf:"error-delay" reload:"hot"` - CompressionLevel int `koanf:"compression-level" reload:"hot"` - DASRetentionPeriod time.Duration `koanf:"das-retention-period" reload:"hot"` - GasRefunderAddress string `koanf:"gas-refunder-address" reload:"hot"` - DataPoster dataposter.DataPosterConfig `koanf:"data-poster" reload:"hot"` - RedisUrl string `koanf:"redis-url"` - RedisLock redislock.SimpleCfg `koanf:"redis-lock" reload:"hot"` - ExtraBatchGas uint64 `koanf:"extra-batch-gas" reload:"hot"` - Post4844Blobs bool `koanf:"post-4844-blobs" reload:"hot"` - IgnoreBlobPrice bool `koanf:"ignore-blob-price" reload:"hot"` - ParentChainWallet genericconf.WalletConfig `koanf:"parent-chain-wallet"` - L1BlockBound string `koanf:"l1-block-bound" reload:"hot"` - L1BlockBoundBypass time.Duration `koanf:"l1-block-bound-bypass" reload:"hot"` - UseAccessLists bool `koanf:"use-access-lists" reload:"hot"` + ErrorDelay time.Duration `koanf:"error-delay" reload:"hot"` + CompressionLevel int `koanf:"compression-level" reload:"hot"` + DASRetentionPeriod time.Duration `koanf:"das-retention-period" reload:"hot"` + GasRefunderAddress string `koanf:"gas-refunder-address" reload:"hot"` + DataPoster dataposter.DataPosterConfig `koanf:"data-poster" reload:"hot"` + RedisUrl string `koanf:"redis-url"` + RedisLock redislock.SimpleCfg `koanf:"redis-lock" reload:"hot"` + ExtraBatchGas uint64 `koanf:"extra-batch-gas" reload:"hot"` + Post4844Blobs bool `koanf:"post-4844-blobs" reload:"hot"` + IgnoreBlobPrice bool `koanf:"ignore-blob-price" reload:"hot"` + ParentChainWallet genericconf.WalletConfig `koanf:"parent-chain-wallet"` + L1BlockBound string `koanf:"l1-block-bound" reload:"hot"` + L1BlockBoundBypass time.Duration `koanf:"l1-block-bound-bypass" reload:"hot"` + UseAccessLists bool `koanf:"use-access-lists" reload:"hot"` + GasEstimateBaseFeeMultipleBips arbmath.Bips `koanf:"gas-estimate-base-fee-multiple-bips"` gasRefunder common.Address l1BlockBound l1BlockBound @@ -193,6 +206,7 @@ func BatchPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { f.String(prefix+".l1-block-bound", DefaultBatchPosterConfig.L1BlockBound, "only post messages to batches when they're within the max future block/timestamp as of this L1 block tag (\"safe\", \"finalized\", \"latest\", or \"ignore\" to ignore this check)") f.Duration(prefix+".l1-block-bound-bypass", DefaultBatchPosterConfig.L1BlockBoundBypass, "post batches even if not within the layer 1 future bounds if we're within this margin of the max delay") f.Bool(prefix+".use-access-lists", DefaultBatchPosterConfig.UseAccessLists, "post batches with access lists to reduce gas usage (disabled for L3s)") + f.Uint64(prefix+".gas-estimate-base-fee-multiple-bips", uint64(DefaultBatchPosterConfig.GasEstimateBaseFeeMultipleBips), "for gas estimation, use this multiple of the basefee (measured in basis points) as the max fee per gas") redislock.AddConfigOptions(prefix+".redis-lock", f) dataposter.DataPosterConfigAddOptions(prefix+".data-poster", f, dataposter.DefaultDataPosterConfig) genericconf.WalletConfigAddOptions(prefix+".parent-chain-wallet", f, DefaultBatchPosterConfig.ParentChainWallet.Pathname) @@ -204,23 +218,24 @@ var DefaultBatchPosterConfig = BatchPosterConfig{ // This default is overridden for L3 chains in applyChainParameters in cmd/nitro/nitro.go MaxSize: 100000, // TODO: is 1000 bytes an appropriate margin for error vs blob space efficiency? - Max4844BatchSize: (254 * params.BlobTxFieldElementsPerBlob / 8 * (params.MaxBlobGasPerBlock / params.BlobTxBlobGasPerBlob)) - 1000, - PollInterval: time.Second * 10, - ErrorDelay: time.Second * 10, - MaxDelay: time.Hour, - WaitForMaxDelay: false, - CompressionLevel: brotli.BestCompression, - DASRetentionPeriod: time.Hour * 24 * 15, - GasRefunderAddress: "", - ExtraBatchGas: 50_000, - Post4844Blobs: false, - IgnoreBlobPrice: false, - DataPoster: dataposter.DefaultDataPosterConfig, - ParentChainWallet: DefaultBatchPosterL1WalletConfig, - L1BlockBound: "", - L1BlockBoundBypass: time.Hour, - UseAccessLists: true, - RedisLock: redislock.DefaultCfg, + Max4844BatchSize: blobs.BlobEncodableData*(params.MaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob) - 1000, + PollInterval: time.Second * 10, + ErrorDelay: time.Second * 10, + MaxDelay: time.Hour, + WaitForMaxDelay: false, + CompressionLevel: brotli.BestCompression, + DASRetentionPeriod: time.Hour * 24 * 15, + GasRefunderAddress: "", + ExtraBatchGas: 50_000, + Post4844Blobs: false, + IgnoreBlobPrice: false, + DataPoster: dataposter.DefaultDataPosterConfig, + ParentChainWallet: DefaultBatchPosterL1WalletConfig, + L1BlockBound: "", + L1BlockBoundBypass: time.Hour, + UseAccessLists: true, + RedisLock: redislock.DefaultCfg, + GasEstimateBaseFeeMultipleBips: arbmath.OneInBips * 3 / 2, } var DefaultBatchPosterL1WalletConfig = genericconf.WalletConfig{ @@ -232,24 +247,25 @@ var DefaultBatchPosterL1WalletConfig = genericconf.WalletConfig{ } var TestBatchPosterConfig = BatchPosterConfig{ - Enable: true, - MaxSize: 100000, - Max4844BatchSize: DefaultBatchPosterConfig.Max4844BatchSize, - PollInterval: time.Millisecond * 10, - ErrorDelay: time.Millisecond * 10, - MaxDelay: 0, - WaitForMaxDelay: false, - CompressionLevel: 2, - DASRetentionPeriod: time.Hour * 24 * 15, - GasRefunderAddress: "", - ExtraBatchGas: 10_000, - Post4844Blobs: true, - IgnoreBlobPrice: false, - DataPoster: dataposter.TestDataPosterConfig, - ParentChainWallet: DefaultBatchPosterL1WalletConfig, - L1BlockBound: "", - L1BlockBoundBypass: time.Hour, - UseAccessLists: true, + Enable: true, + MaxSize: 100000, + Max4844BatchSize: DefaultBatchPosterConfig.Max4844BatchSize, + PollInterval: time.Millisecond * 10, + ErrorDelay: time.Millisecond * 10, + MaxDelay: 0, + WaitForMaxDelay: false, + CompressionLevel: 2, + DASRetentionPeriod: time.Hour * 24 * 15, + GasRefunderAddress: "", + ExtraBatchGas: 10_000, + Post4844Blobs: true, + IgnoreBlobPrice: false, + DataPoster: dataposter.TestDataPosterConfig, + ParentChainWallet: DefaultBatchPosterL1WalletConfig, + L1BlockBound: "", + L1BlockBoundBypass: time.Hour, + UseAccessLists: true, + GasEstimateBaseFeeMultipleBips: arbmath.OneInBips * 3 / 2, } type BatchPosterOpts struct { @@ -426,6 +442,35 @@ func AccessList(opts *AccessListOpts) types.AccessList { return l } +type txInfo struct { + Hash common.Hash `json:"hash"` + Nonce hexutil.Uint64 `json:"nonce"` + From common.Address `json:"from"` + To *common.Address `json:"to"` + Gas hexutil.Uint64 `json:"gas"` + GasPrice *hexutil.Big `json:"gasPrice"` + GasFeeCap *hexutil.Big `json:"maxFeePerGas,omitempty"` + GasTipCap *hexutil.Big `json:"maxPriorityFeePerGas,omitempty"` + Input hexutil.Bytes `json:"input"` + Value *hexutil.Big `json:"value"` + Accesses *types.AccessList `json:"accessList,omitempty"` +} + +// getTxsInfoByBlock fetches all the transactions inside block of id 'number' using json rpc +// and returns an array of txInfo which has fields that are necessary in checking for batch reverts +func (b *BatchPoster) getTxsInfoByBlock(ctx context.Context, number int64) ([]txInfo, error) { + blockNrStr := rpc.BlockNumber(number).String() + rawRpcClient := b.l1Reader.Client().Client() + var blk struct { + Transactions []txInfo `json:"transactions"` + } + err := rawRpcClient.CallContext(ctx, &blk, "eth_getBlockByNumber", blockNrStr, true) + if err != nil { + return nil, fmt.Errorf("error fetching block %d : %w", number, err) + } + return blk.Transactions, nil +} + // checkRevert checks blocks with number in range [from, to] whether they // contain reverted batch_poster transaction. // It returns true if it finds batch posting needs to halt, which is true if a batch reverts @@ -435,20 +480,15 @@ func (b *BatchPoster) checkReverts(ctx context.Context, to int64) (bool, error) return false, fmt.Errorf("wrong range, from: %d > to: %d", b.nextRevertCheckBlock, to) } for ; b.nextRevertCheckBlock <= to; b.nextRevertCheckBlock++ { - number := big.NewInt(b.nextRevertCheckBlock) - block, err := b.l1Reader.Client().BlockByNumber(ctx, number) + txs, err := b.getTxsInfoByBlock(ctx, b.nextRevertCheckBlock) if err != nil { - return false, fmt.Errorf("getting block: %v by number: %w", number, err) + return false, fmt.Errorf("error getting transactions data of block %d: %w", b.nextRevertCheckBlock, err) } - for idx, tx := range block.Transactions() { - from, err := b.l1Reader.Client().TransactionSender(ctx, tx, block.Hash(), uint(idx)) - if err != nil { - return false, fmt.Errorf("getting sender of transaction tx: %v, %w", tx.Hash(), err) - } - if from == b.dataPoster.Sender() { - r, err := b.l1Reader.Client().TransactionReceipt(ctx, tx.Hash()) + for _, tx := range txs { + if tx.From == b.dataPoster.Sender() { + r, err := b.l1Reader.Client().TransactionReceipt(ctx, tx.Hash) if err != nil { - return false, fmt.Errorf("getting a receipt for transaction: %v, %w", tx.Hash(), err) + return false, fmt.Errorf("getting a receipt for transaction: %v, %w", tx.Hash, err) } if r.Status == types.ReceiptStatusFailed { shouldHalt := !b.config().DataPoster.UseNoOpStorage @@ -456,8 +496,22 @@ func (b *BatchPoster) checkReverts(ctx context.Context, to int64) (bool, error) if shouldHalt { logLevel = log.Error } - txErr := arbutil.DetailTxError(ctx, b.l1Reader.Client(), tx, r) - logLevel("Transaction from batch poster reverted", "nonce", tx.Nonce(), "txHash", tx.Hash(), "blockNumber", r.BlockNumber, "blockHash", r.BlockHash, "txErr", txErr) + al := types.AccessList{} + if tx.Accesses != nil { + al = *tx.Accesses + } + txErr := arbutil.DetailTxErrorUsingCallMsg(ctx, b.l1Reader.Client(), tx.Hash, r, ethereum.CallMsg{ + From: tx.From, + To: tx.To, + Gas: uint64(tx.Gas), + GasPrice: tx.GasPrice.ToInt(), + GasFeeCap: tx.GasFeeCap.ToInt(), + GasTipCap: tx.GasTipCap.ToInt(), + Value: tx.Value.ToInt(), + Data: tx.Input, + AccessList: al, + }) + logLevel("Transaction from batch poster reverted", "nonce", tx.Nonce, "txHash", tx.Hash, "blockNumber", r.BlockNumber, "blockHash", r.BlockHash, "txErr", txErr) return shouldHalt, nil } } @@ -466,6 +520,49 @@ func (b *BatchPoster) checkReverts(ctx context.Context, to int64) (bool, error) return false, nil } +func (b *BatchPoster) pollForL1PriceData(ctx context.Context) { + headerCh, unsubscribe := b.l1Reader.Subscribe(false) + defer unsubscribe() + + blobGasLimitGauge.Update(params.MaxBlobGasPerBlock) + for { + select { + case h, ok := <-headerCh: + if !ok { + log.Info("L1 headers channel checking for l1 price data has been closed") + return + } + baseFeeGauge.Update(h.BaseFee.Int64()) + l1GasPrice := h.BaseFee.Uint64() + if h.BlobGasUsed != nil { + if h.ExcessBlobGas != nil { + blobFeePerByte := eip4844.CalcBlobFee(eip4844.CalcExcessBlobGas(*h.ExcessBlobGas, *h.BlobGasUsed)) + blobFeePerByte.Mul(blobFeePerByte, blobTxBlobGasPerBlob) + blobFeePerByte.Div(blobFeePerByte, usableBytesInBlob) + blobFeeGauge.Update(blobFeePerByte.Int64()) + if l1GasPrice > blobFeePerByte.Uint64()/16 { + l1GasPrice = blobFeePerByte.Uint64() / 16 + } + } + blobGasUsedGauge.Update(int64(*h.BlobGasUsed)) + } + blockGasUsedGauge.Update(int64(h.GasUsed)) + blockGasLimitGauge.Update(int64(h.GasLimit)) + suggestedTipCap, err := b.l1Reader.Client().SuggestGasTipCap(ctx) + if err != nil { + log.Error("unable to fetch suggestedTipCap from l1 client to update arb/batchposter/suggestedtipcap metric", "err", err) + } else { + suggestedTipCapGauge.Update(suggestedTipCap.Int64()) + } + l1GasPriceEstimate := b.streamer.CurrentEstimateOfL1GasPrice() + l1GasPriceGauge.Update(int64(l1GasPrice)) + l1GasPriceEstimateGauge.Update(int64(l1GasPriceEstimate)) + case <-ctx.Done(): + return + } + } +} + // pollForReverts runs a gouroutine that listens to l1 block headers, checks // if any transaction made by batch poster was reverted. func (b *BatchPoster) pollForReverts(ctx context.Context) { @@ -845,11 +942,12 @@ func (b *BatchPoster) encodeAddBatch( var ErrNormalGasEstimationFailed = errors.New("normal gas estimation failed") type estimateGasParams struct { - From common.Address `json:"from"` - To *common.Address `json:"to"` - Data hexutil.Bytes `json:"data"` - AccessList types.AccessList `json:"accessList"` - BlobHashes []common.Hash `json:"blobVersionedHashes,omitempty"` + From common.Address `json:"from"` + To *common.Address `json:"to"` + Data hexutil.Bytes `json:"data"` + MaxFeePerGas *hexutil.Big `json:"maxFeePerGas"` + AccessList types.AccessList `json:"accessList"` + BlobHashes []common.Hash `json:"blobVersionedHashes,omitempty"` } func estimateGas(client rpc.ClientInterface, ctx context.Context, params estimateGasParams) (uint64, error) { @@ -860,16 +958,22 @@ func estimateGas(client rpc.ClientInterface, ctx context.Context, params estimat func (b *BatchPoster) estimateGas(ctx context.Context, sequencerMessage []byte, delayedMessages uint64, realData []byte, realBlobs []kzg4844.Blob, realNonce uint64, realAccessList types.AccessList) (uint64, error) { config := b.config() + rpcClient := b.l1Reader.Client() + rawRpcClient := rpcClient.Client() useNormalEstimation := b.dataPoster.MaxMempoolTransactions() == 1 if !useNormalEstimation { // Check if we can use normal estimation anyways because we're at the latest nonce - latestNonce, err := b.l1Reader.Client().NonceAt(ctx, b.dataPoster.Sender(), nil) + latestNonce, err := rpcClient.NonceAt(ctx, b.dataPoster.Sender(), nil) if err != nil { return 0, err } useNormalEstimation = latestNonce == realNonce } - rawRpcClient := b.l1Reader.Client().Client() + latestHeader, err := rpcClient.HeaderByNumber(ctx, nil) + if err != nil { + return 0, err + } + maxFeePerGas := arbmath.BigMulByBips(latestHeader.BaseFee, config.GasEstimateBaseFeeMultipleBips) if useNormalEstimation { _, realBlobHashes, err := blobs.ComputeCommitmentsAndHashes(realBlobs) if err != nil { @@ -877,11 +981,12 @@ func (b *BatchPoster) estimateGas(ctx context.Context, sequencerMessage []byte, } // If we're at the latest nonce, we can skip the special future tx estimate stuff gas, err := estimateGas(rawRpcClient, ctx, estimateGasParams{ - From: b.dataPoster.Sender(), - To: &b.seqInboxAddr, - Data: realData, - BlobHashes: realBlobHashes, - AccessList: realAccessList, + From: b.dataPoster.Sender(), + To: &b.seqInboxAddr, + Data: realData, + MaxFeePerGas: (*hexutil.Big)(maxFeePerGas), + BlobHashes: realBlobHashes, + AccessList: realAccessList, }) if err != nil { return 0, fmt.Errorf("%w: %w", ErrNormalGasEstimationFailed, err) @@ -902,10 +1007,11 @@ func (b *BatchPoster) estimateGas(ctx context.Context, sequencerMessage []byte, return 0, fmt.Errorf("failed to compute blob commitments: %w", err) } gas, err := estimateGas(rawRpcClient, ctx, estimateGasParams{ - From: b.dataPoster.Sender(), - To: &b.seqInboxAddr, - Data: data, - BlobHashes: blobHashes, + From: b.dataPoster.Sender(), + To: &b.seqInboxAddr, + Data: data, + MaxFeePerGas: (*hexutil.Big)(maxFeePerGas), + BlobHashes: blobHashes, // This isn't perfect because we're probably estimating the batch at a different sequence number, // but it should overestimate rather than underestimate which is fine. AccessList: realAccessList, @@ -958,7 +1064,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) } var use4844 bool config := b.config() - if config.Post4844Blobs && latestHeader.ExcessBlobGas != nil && latestHeader.BlobGasUsed != nil { + if config.Post4844Blobs && b.daWriter == nil && latestHeader.ExcessBlobGas != nil && latestHeader.BlobGasUsed != nil { arbOSVersion, err := b.arbOSVersionGetter.ArbOSVersionForMessageNumber(arbutil.MessageIndex(arbmath.SaturatingUSub(uint64(batchPosition.MessageCount), 1))) if err != nil { return false, err @@ -967,12 +1073,22 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) if config.IgnoreBlobPrice { use4844 = true } else { - blobFeePerByte := eip4844.CalcBlobFee(eip4844.CalcExcessBlobGas(*latestHeader.ExcessBlobGas, *latestHeader.BlobGasUsed)) - blobFeePerByte.Mul(blobFeePerByte, blobTxBlobGasPerBlob) - blobFeePerByte.Div(blobFeePerByte, usableBytesInBlob) - - calldataFeePerByte := arbmath.BigMulByUint(latestHeader.BaseFee, 16) - use4844 = arbmath.BigLessThan(blobFeePerByte, calldataFeePerByte) + backlog := atomic.LoadUint64(&b.backlog) + // Logic to prevent switching from non-4844 batches to 4844 batches too often, + // so that blocks can be filled efficiently. The geth txpool rejects txs for + // accounts that already have the other type of txs in the pool with + // "address already reserved". This logic makes sure that, if there is a backlog, + // that enough non-4844 batches have been posted to fill a block before switching. + if backlog == 0 || + b.non4844BatchCount == 0 || + b.non4844BatchCount > 16 { + blobFeePerByte := eip4844.CalcBlobFee(eip4844.CalcExcessBlobGas(*latestHeader.ExcessBlobGas, *latestHeader.BlobGasUsed)) + blobFeePerByte.Mul(blobFeePerByte, blobTxBlobGasPerBlob) + blobFeePerByte.Div(blobFeePerByte, usableBytesInBlob) + + calldataFeePerByte := arbmath.BigMulByUint(latestHeader.BaseFee, 16) + use4844 = arbmath.BigLessThan(blobFeePerByte, calldataFeePerByte) + } } } } @@ -1208,9 +1324,23 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) "totalSegments", len(b.building.segments.rawSegments), "numBlobs", len(kzgBlobs), ) + + surplus := arbmath.SaturatingMul( + arbmath.SaturatingSub( + l1GasPriceGauge.Snapshot().Value(), + l1GasPriceEstimateGauge.Snapshot().Value()), + int64(len(sequencerMsg)*16), + ) + latestBatchSurplusGauge.Update(surplus) + recentlyHitL1Bounds := time.Since(b.lastHitL1Bounds) < config.PollInterval*3 postedMessages := b.building.msgCount - batchPosition.MessageCount b.messagesPerBatch.Update(uint64(postedMessages)) + if b.building.use4844 { + b.non4844BatchCount = 0 + } else { + b.non4844BatchCount++ + } unpostedMessages := msgCount - b.building.msgCount messagesPerBatch := b.messagesPerBatch.Average() if messagesPerBatch == 0 { @@ -1272,6 +1402,7 @@ func (b *BatchPoster) Start(ctxIn context.Context) { b.redisLock.Start(ctxIn) b.StopWaiter.Start(ctxIn, b) b.LaunchThread(b.pollForReverts) + b.LaunchThread(b.pollForL1PriceData) commonEphemeralErrorHandler := util.NewEphemeralErrorHandler(time.Minute, "", 0) exceedMaxMempoolSizeEphemeralErrorHandler := util.NewEphemeralErrorHandler(5*time.Minute, dataposter.ErrExceedsMaxMempoolSize.Error(), time.Minute) storageRaceEphemeralErrorHandler := util.NewEphemeralErrorHandler(5*time.Minute, storage.ErrStorageRace.Error(), time.Minute) @@ -1350,3 +1481,56 @@ func (b *BatchPoster) StopAndWait() { b.dataPoster.StopAndWait() b.redisLock.StopAndWait() } + +type BoolRing struct { + buffer []bool + bufferPosition int +} + +func NewBoolRing(size int) *BoolRing { + return &BoolRing{ + buffer: make([]bool, 0, size), + } +} + +func (b *BoolRing) Update(value bool) { + period := cap(b.buffer) + if period == 0 { + return + } + if len(b.buffer) < period { + b.buffer = append(b.buffer, value) + } else { + b.buffer[b.bufferPosition] = value + } + b.bufferPosition = (b.bufferPosition + 1) % period +} + +func (b *BoolRing) Empty() bool { + return len(b.buffer) == 0 +} + +// Peek returns the most recently inserted value. +// Assumes not empty, check Empty() first +func (b *BoolRing) Peek() bool { + lastPosition := b.bufferPosition - 1 + if lastPosition < 0 { + // This is the case where we have wrapped around, since Peek() shouldn't + // be called without checking Empty(), so we can just use capactity. + lastPosition = cap(b.buffer) - 1 + } + return b.buffer[lastPosition] +} + +// All returns true if the BoolRing is full and all values equal value. +func (b *BoolRing) All(value bool) bool { + if len(b.buffer) < cap(b.buffer) { + return false + } + for _, v := range b.buffer { + if v != value { + return false + } + } + return true +} diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 1415f7814..416ebf725 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -31,10 +31,10 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" - "github.com/ethereum/go-ethereum/signer/core/apitypes" "github.com/go-redis/redis/v8" "github.com/holiman/uint256" "github.com/offchainlabs/nitro/arbnode/dataposter/dbstorage" + "github.com/offchainlabs/nitro/arbnode/dataposter/externalsigner" "github.com/offchainlabs/nitro/arbnode/dataposter/noop" "github.com/offchainlabs/nitro/arbnode/dataposter/slice" "github.com/offchainlabs/nitro/arbnode/dataposter/storage" @@ -42,6 +42,7 @@ import ( "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/blobs" "github.com/offchainlabs/nitro/util/headerreader" + "github.com/offchainlabs/nitro/util/rpcclient" "github.com/offchainlabs/nitro/util/signature" "github.com/offchainlabs/nitro/util/stopwaiter" "github.com/spf13/pflag" @@ -58,17 +59,18 @@ import ( // DataPoster must be RLP serializable and deserializable type DataPoster struct { stopwaiter.StopWaiter - headerReader *headerreader.HeaderReader - client arbutil.L1Interface - auth *bind.TransactOpts - signer signerFn - config ConfigFetcher - usingNoOpStorage bool - replacementTimes []time.Duration - metadataRetriever func(ctx context.Context, blockNum *big.Int) ([]byte, error) - extraBacklog func() uint64 - parentChainID *big.Int - parentChainID256 *uint256.Int + headerReader *headerreader.HeaderReader + client arbutil.L1Interface + auth *bind.TransactOpts + signer signerFn + config ConfigFetcher + usingNoOpStorage bool + replacementTimes []time.Duration + blobTxReplacementTimes []time.Duration + metadataRetriever func(ctx context.Context, blockNum *big.Int) ([]byte, error) + extraBacklog func() uint64 + parentChainID *big.Int + parentChainID256 *uint256.Int // These fields are protected by the mutex. // TODO: factor out these fields into separate structure, since now one @@ -129,6 +131,10 @@ func NewDataPoster(ctx context.Context, opts *DataPosterOpts) (*DataPoster, erro if err != nil { return nil, err } + blobTxReplacementTimes, err := parseReplacementTimes(cfg.BlobTxReplacementTimes) + if err != nil { + return nil, err + } useNoOpStorage := cfg.UseNoOpStorage if opts.HeaderReader.IsParentChainArbitrum() && !cfg.UseNoOpStorage { useNoOpStorage = true @@ -172,15 +178,16 @@ func NewDataPoster(ctx context.Context, opts *DataPosterOpts) (*DataPoster, erro signer: func(_ context.Context, addr common.Address, tx *types.Transaction) (*types.Transaction, error) { return opts.Auth.Signer(addr, tx) }, - config: opts.Config, - usingNoOpStorage: useNoOpStorage, - replacementTimes: replacementTimes, - metadataRetriever: opts.MetadataRetriever, - queue: queue, - errorCount: make(map[uint64]int), - maxFeeCapExpression: expression, - extraBacklog: opts.ExtraBacklog, - parentChainID: opts.ParentChainID, + config: opts.Config, + usingNoOpStorage: useNoOpStorage, + replacementTimes: replacementTimes, + blobTxReplacementTimes: blobTxReplacementTimes, + metadataRetriever: opts.MetadataRetriever, + queue: queue, + errorCount: make(map[uint64]int), + maxFeeCapExpression: expression, + extraBacklog: opts.ExtraBacklog, + parentChainID: opts.ParentChainID, } var overflow bool dp.parentChainID256, overflow = uint256.FromBig(opts.ParentChainID) @@ -244,35 +251,6 @@ func rpcClient(ctx context.Context, opts *ExternalSignerCfg) (*rpc.Client, error ) } -// txToSendTxArgs converts transaction to SendTxArgs. This is needed for -// external signer to specify From field. -func txToSendTxArgs(addr common.Address, tx *types.Transaction) (*apitypes.SendTxArgs, error) { - var to *common.MixedcaseAddress - if tx.To() != nil { - to = new(common.MixedcaseAddress) - *to = common.NewMixedcaseAddress(*tx.To()) - } - data := (hexutil.Bytes)(tx.Data()) - val := (*hexutil.Big)(tx.Value()) - if val == nil { - val = (*hexutil.Big)(big.NewInt(0)) - } - al := tx.AccessList() - return &apitypes.SendTxArgs{ - From: common.NewMixedcaseAddress(addr), - To: to, - Gas: hexutil.Uint64(tx.Gas()), - GasPrice: (*hexutil.Big)(tx.GasPrice()), - MaxFeePerGas: (*hexutil.Big)(tx.GasFeeCap()), - MaxPriorityFeePerGas: (*hexutil.Big)(tx.GasTipCap()), - Value: *val, - Nonce: hexutil.Uint64(tx.Nonce()), - Data: &data, - AccessList: &al, - ChainID: (*hexutil.Big)(tx.ChainId()), - }, nil -} - // externalSigner returns signer function and ethereum address of the signer. // Returns an error if address isn't specified or if it can't connect to the // signer RPC server. @@ -291,7 +269,7 @@ func externalSigner(ctx context.Context, opts *ExternalSignerCfg) (signerFn, com // RLP encoded transaction object. // https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_signtransaction var data hexutil.Bytes - args, err := txToSendTxArgs(addr, tx) + args, err := externalsigner.TxToSignTxArgs(addr, tx) if err != nil { return nil, fmt.Errorf("error converting transaction to sendTxArgs: %w", err) } @@ -322,14 +300,15 @@ func (p *DataPoster) MaxMempoolTransactions() uint64 { if p.usingNoOpStorage { return 1 } - return p.config().MaxMempoolTransactions + config := p.config() + return arbmath.MinInt(config.MaxMempoolTransactions, config.MaxMempoolWeight) } var ErrExceedsMaxMempoolSize = errors.New("posting this transaction will exceed max mempool size") // Does basic check whether posting transaction with specified nonce would // result in exceeding maximum queue length or maximum transactions in mempool. -func (p *DataPoster) canPostWithNonce(ctx context.Context, nextNonce uint64) error { +func (p *DataPoster) canPostWithNonce(ctx context.Context, nextNonce uint64, thisWeight uint64) error { cfg := p.config() // If the queue has reached configured max size, don't post a transaction. if cfg.MaxQueuedTransactions > 0 { @@ -352,6 +331,43 @@ func (p *DataPoster) canPostWithNonce(ctx context.Context, nextNonce uint64) err return fmt.Errorf("%w: transaction nonce: %d, unconfirmed nonce: %d, max mempool size: %d", ErrExceedsMaxMempoolSize, nextNonce, unconfirmedNonce, cfg.MaxMempoolTransactions) } } + // Check that posting a new transaction won't exceed maximum pending + // weight in mempool. + if cfg.MaxMempoolWeight > 0 { + unconfirmedNonce, err := p.client.NonceAt(ctx, p.Sender(), nil) + if err != nil { + return fmt.Errorf("getting nonce of a dataposter sender: %w", err) + } + if unconfirmedNonce > nextNonce { + return fmt.Errorf("latest on-chain nonce %v is greater than to next nonce %v", unconfirmedNonce, nextNonce) + } + + var confirmedWeight uint64 + if unconfirmedNonce > 0 { + confirmedMeta, err := p.queue.Get(ctx, unconfirmedNonce-1) + if err != nil { + return err + } + if confirmedMeta != nil { + confirmedWeight = confirmedMeta.CumulativeWeight() + } + } + previousTxMeta, err := p.queue.FetchLast(ctx) + if err != nil { + return err + } + var previousTxCumulativeWeight uint64 + if previousTxMeta != nil { + previousTxCumulativeWeight = previousTxMeta.CumulativeWeight() + } + previousTxCumulativeWeight = arbmath.MaxInt(previousTxCumulativeWeight, confirmedWeight) + newCumulativeWeight := previousTxCumulativeWeight + thisWeight + + weightDiff := arbmath.MinInt(newCumulativeWeight-confirmedWeight, (nextNonce-unconfirmedNonce)*params.MaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob) + if weightDiff > cfg.MaxMempoolWeight { + return fmt.Errorf("%w: transaction nonce: %d, transaction cumulative weight: %d, unconfirmed nonce: %d, confirmed weight: %d, new mempool weight: %d, max mempool weight: %d", ErrExceedsMaxMempoolSize, nextNonce, newCumulativeWeight, unconfirmedNonce, confirmedWeight, weightDiff, cfg.MaxMempoolWeight) + } + } return nil } @@ -360,41 +376,41 @@ func (p *DataPoster) waitForL1Finality() bool { } // Requires the caller hold the mutex. -// Returns the next nonce, its metadata if stored, a bool indicating if the metadata is present, and an error. +// Returns the next nonce, its metadata if stored, a bool indicating if the metadata is present, the cumulative weight, and an error if present. // Unlike GetNextNonceAndMeta, this does not call the metadataRetriever if the metadata is not stored in the queue. -func (p *DataPoster) getNextNonceAndMaybeMeta(ctx context.Context) (uint64, []byte, bool, error) { +func (p *DataPoster) getNextNonceAndMaybeMeta(ctx context.Context, thisWeight uint64) (uint64, []byte, bool, uint64, error) { // Ensure latest finalized block state is available. blockNum, err := p.client.BlockNumber(ctx) if err != nil { - return 0, nil, false, err + return 0, nil, false, 0, err } lastQueueItem, err := p.queue.FetchLast(ctx) if err != nil { - return 0, nil, false, fmt.Errorf("fetching last element from queue: %w", err) + return 0, nil, false, 0, fmt.Errorf("fetching last element from queue: %w", err) } if lastQueueItem != nil { nextNonce := lastQueueItem.FullTx.Nonce() + 1 - if err := p.canPostWithNonce(ctx, nextNonce); err != nil { - return 0, nil, false, err + if err := p.canPostWithNonce(ctx, nextNonce, thisWeight); err != nil { + return 0, nil, false, 0, err } - return nextNonce, lastQueueItem.Meta, true, nil + return nextNonce, lastQueueItem.Meta, true, lastQueueItem.CumulativeWeight(), nil } if err := p.updateNonce(ctx); err != nil { if !p.queue.IsPersistent() && p.waitForL1Finality() { - return 0, nil, false, fmt.Errorf("error getting latest finalized nonce (and queue is not persistent): %w", err) + return 0, nil, false, 0, fmt.Errorf("error getting latest finalized nonce (and queue is not persistent): %w", err) } // Fall back to using a recent block to get the nonce. This is safe because there's nothing in the queue. nonceQueryBlock := arbmath.UintToBig(arbmath.SaturatingUSub(blockNum, 1)) log.Warn("failed to update nonce with queue empty; falling back to using a recent block", "recentBlock", nonceQueryBlock, "err", err) nonce, err := p.client.NonceAt(ctx, p.Sender(), nonceQueryBlock) if err != nil { - return 0, nil, false, fmt.Errorf("failed to get nonce at block %v: %w", nonceQueryBlock, err) + return 0, nil, false, 0, fmt.Errorf("failed to get nonce at block %v: %w", nonceQueryBlock, err) } p.lastBlock = nonceQueryBlock p.nonce = nonce } - return p.nonce, nil, false, nil + return p.nonce, nil, false, p.nonce, nil } // GetNextNonceAndMeta retrieves generates next nonce, validates that a @@ -403,7 +419,7 @@ func (p *DataPoster) getNextNonceAndMaybeMeta(ctx context.Context) (uint64, []by func (p *DataPoster) GetNextNonceAndMeta(ctx context.Context) (uint64, []byte, error) { p.mutex.Lock() defer p.mutex.Unlock() - nonce, meta, hasMeta, err := p.getNextNonceAndMaybeMeta(ctx) + nonce, meta, hasMeta, _, err := p.getNextNonceAndMaybeMeta(ctx, 1) if err != nil { return 0, nil, err } @@ -413,7 +429,8 @@ func (p *DataPoster) GetNextNonceAndMeta(ctx context.Context) (uint64, []byte, e return nonce, meta, err } -const minRbfIncrease = arbmath.OneInBips * 11 / 10 +const minNonBlobRbfIncrease = arbmath.OneInBips * 11 / 10 +const minBlobRbfIncrease = arbmath.OneInBips * 2 // evalMaxFeeCapExpr uses MaxFeeCapFormula from config to calculate the expression's result by plugging in appropriate parameter values // backlogOfBatches should already include extraBacklog @@ -452,20 +469,16 @@ func (p *DataPoster) evalMaxFeeCapExpr(backlogOfBatches uint64, elapsed time.Dur var big4 = big.NewInt(4) // The dataPosterBacklog argument should *not* include extraBacklog (it's added in in this function) -func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit uint64, numBlobs int, lastFeeCap *big.Int, lastTipCap *big.Int, dataCreatedAt time.Time, dataPosterBacklog uint64) (*big.Int, *big.Int, *big.Int, error) { +func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit uint64, numBlobs uint64, lastTx *types.Transaction, dataCreatedAt time.Time, dataPosterBacklog uint64, latestHeader *types.Header) (*big.Int, *big.Int, *big.Int, error) { config := p.config() dataPosterBacklog += p.extraBacklog() - latestHeader, err := p.headerReader.LastHeader(ctx) - if err != nil { - return nil, nil, nil, err - } + if latestHeader.BaseFee == nil { return nil, nil, nil, fmt.Errorf("latest parent chain block %v missing BaseFee (either the parent chain does not have EIP-1559 or the parent chain node is not synced)", latestHeader.Number) } - newBlobFeeCap := big.NewInt(0) + currentBlobFee := big.NewInt(0) if latestHeader.ExcessBlobGas != nil && latestHeader.BlobGasUsed != nil { - newBlobFeeCap = eip4844.CalcBlobFee(eip4844.CalcExcessBlobGas(*latestHeader.ExcessBlobGas, *latestHeader.BlobGasUsed)) - newBlobFeeCap.Mul(newBlobFeeCap, common.Big2) + currentBlobFee = eip4844.CalcBlobFee(eip4844.CalcExcessBlobGas(*latestHeader.ExcessBlobGas, *latestHeader.BlobGasUsed)) } else if numBlobs > 0 { return nil, nil, nil, fmt.Errorf( "latest parent chain block %v missing ExcessBlobGas or BlobGasUsed but blobs were specified in data poster transaction "+ @@ -478,106 +491,177 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u if err != nil { return nil, nil, nil, fmt.Errorf("failed to get latest nonce %v blocks ago (block %v): %w", config.NonceRbfSoftConfs, softConfBlock, err) } - newFeeCap := new(big.Int).Mul(latestHeader.BaseFee, common.Big2) - newFeeCap = arbmath.BigMax(newFeeCap, arbmath.FloatToBig(config.MinFeeCapGwei*params.GWei)) - newTipCap, err := p.client.SuggestGasTipCap(ctx) + suggestedTip, err := p.client.SuggestGasTipCap(ctx) if err != nil { return nil, nil, nil, err } - newTipCap = arbmath.BigMax(newTipCap, arbmath.FloatToBig(config.MinTipCapGwei*params.GWei)) - newTipCap = arbmath.BigMin(newTipCap, arbmath.FloatToBig(config.MaxTipCapGwei*params.GWei)) - - hugeTipIncrease := false - if lastTipCap != nil { - newTipCap = arbmath.BigMax(newTipCap, arbmath.BigMulByBips(lastTipCap, minRbfIncrease)) - // hugeTipIncrease is true if the new tip cap is at least 10x the last tip cap - hugeTipIncrease = lastTipCap.Sign() == 0 || arbmath.BigDiv(newTipCap, lastTipCap).Cmp(big.NewInt(10)) >= 0 + minTipCapGwei, maxTipCapGwei, minRbfIncrease := config.MinTipCapGwei, config.MaxTipCapGwei, minNonBlobRbfIncrease + if numBlobs > 0 { + minTipCapGwei, maxTipCapGwei, minRbfIncrease = config.MinBlobTxTipCapGwei, config.MaxBlobTxTipCapGwei, minBlobRbfIncrease } + newTipCap := suggestedTip + newTipCap = arbmath.BigMax(newTipCap, arbmath.FloatToBig(minTipCapGwei*params.GWei)) + newTipCap = arbmath.BigMin(newTipCap, arbmath.FloatToBig(maxTipCapGwei*params.GWei)) - newFeeCap.Add(newFeeCap, newTipCap) - if lastFeeCap != nil && hugeTipIncrease { - log.Warn("data poster recommending huge tip increase", "lastTipCap", lastTipCap, "newTipCap", newTipCap) - // If we're trying to drastically increase the tip, make sure we increase the fee cap by minRbfIncrease. - newFeeCap = arbmath.BigMax(newFeeCap, arbmath.BigMulByBips(lastFeeCap, minRbfIncrease)) - } - - // TODO: if we're significantly increasing the blob fee cap, we also need to increase the fee cap my minRbfIncrease - // TODO: look more into geth's blob mempool and make sure this behavior conforms (I think minRbfIncrease might be higher there) - + // Compute the max fee with normalized gas so that blob txs aren't priced differently. + // Later, split the total cost bid into blob and non-blob fee caps. elapsed := time.Since(dataCreatedAt) - maxFeeCap, err := p.evalMaxFeeCapExpr(dataPosterBacklog, elapsed) + maxNormalizedFeeCap, err := p.evalMaxFeeCapExpr(dataPosterBacklog, elapsed) if err != nil { return nil, nil, nil, err } - if arbmath.BigGreaterThan(newFeeCap, maxFeeCap) { - log.Warn( - "reducing proposed fee cap to current maximum", - "proposedFeeCap", newFeeCap, - "maxFeeCap", maxFeeCap, - "elapsed", elapsed, - ) - newFeeCap = maxFeeCap - } + normalizedGas := gasLimit + numBlobs*blobs.BlobEncodableData*params.TxDataNonZeroGasEIP2028 + targetMaxCost := arbmath.BigMulByUint(maxNormalizedFeeCap, normalizedGas) - // TODO: also have an expression limiting the max blob fee cap + maxMempoolWeight := arbmath.MinInt(config.MaxMempoolWeight, config.MaxMempoolTransactions) latestBalance := p.balance balanceForTx := new(big.Int).Set(latestBalance) + weight := arbmath.MaxInt(1, numBlobs) + weightRemaining := weight + if config.AllocateMempoolBalance && !p.usingNoOpStorage { - // We split the transactions into three groups: - // - The first transaction gets 1/2 of the balance. - // - The first half of transactions get 1/3 of the balance split among them. - // - The remaining transactions get the remaining 1/6 of the balance split among them. + // We split the transaction weight into three groups: + // - The first weight point gets 1/2 of the balance. + // - The first half of the weight gets 1/3 of the balance split among them. + // - The remaining weight get the remaining 1/6 of the balance split among them. // This helps ensure batch posting is reliable under a variety of fee conditions. // With noop storage, we don't try to replace-by-fee, so we don't need to worry about this. - balanceForTx.Div(balanceForTx, common.Big2) - if nonce != softConfNonce && config.MaxMempoolTransactions > 1 { + balancePerWeight := new(big.Int).Div(balanceForTx, common.Big2) + balanceForTx = big.NewInt(0) + if nonce == softConfNonce || maxMempoolWeight == 1 { + balanceForTx.Add(balanceForTx, balancePerWeight) + weightRemaining -= 1 + } + if weightRemaining > 0 { // Compared to dividing the remaining transactions by balance equally, // the first half of transactions should get a 4/3 weight, // and the remaining half should get a 2/3 weight. // This makes sure the average weight is 1, and the first half of transactions // have twice the weight of the second half of transactions. // The +1 and -1 here are to account for the first transaction being handled separately. - if nonce > softConfNonce && nonce < softConfNonce+1+(config.MaxMempoolTransactions-1)/2 { - balanceForTx.Mul(balanceForTx, big4) + if nonce > softConfNonce && nonce < softConfNonce+1+(maxMempoolWeight-1)/2 { + balancePerWeight.Mul(balancePerWeight, big4) } else { - balanceForTx.Mul(balanceForTx, common.Big2) + balancePerWeight.Mul(balancePerWeight, common.Big2) } - balanceForTx.Div(balanceForTx, common.Big3) + balancePerWeight.Div(balancePerWeight, common.Big3) // After weighting, split the balance between each of the transactions // other than the first tx which already got half. // balanceForTx /= config.MaxMempoolTransactions-1 - balanceForTx.Div(balanceForTx, arbmath.UintToBig(config.MaxMempoolTransactions-1)) + balancePerWeight.Div(balancePerWeight, arbmath.UintToBig(maxMempoolWeight-1)) + balanceForTx.Add(balanceForTx, arbmath.BigMulByUint(balancePerWeight, weight)) } } - // TODO: take into account blob costs - balanceFeeCap := arbmath.BigDivByUint(balanceForTx, gasLimit) - if arbmath.BigGreaterThan(newFeeCap, balanceFeeCap) { + + if arbmath.BigGreaterThan(targetMaxCost, balanceForTx) { log.Warn( "lack of L1 balance prevents posting transaction with desired fee cap", "balance", latestBalance, - "maxTransactions", config.MaxMempoolTransactions, + "weight", weight, + "maxMempoolWeight", maxMempoolWeight, "balanceForTransaction", balanceForTx, "gasLimit", gasLimit, - "desiredFeeCap", newFeeCap, - "balanceFeeCap", balanceFeeCap, + "targetMaxCost", targetMaxCost, "nonce", nonce, "softConfNonce", softConfNonce, ) - newFeeCap = balanceFeeCap + targetMaxCost = balanceForTx + } + + if lastTx != nil { + // Replace by fee rules require that the tip cap is increased + newTipCap = arbmath.BigMax(newTipCap, arbmath.BigMulByBips(lastTx.GasTipCap(), minRbfIncrease)) + } + + // Divide the targetMaxCost into blob and non-blob costs. + currentNonBlobFee := arbmath.BigAdd(latestHeader.BaseFee, newTipCap) + blobGasUsed := params.BlobTxBlobGasPerBlob * numBlobs + currentBlobCost := arbmath.BigMulByUint(currentBlobFee, blobGasUsed) + currentNonBlobCost := arbmath.BigMulByUint(currentNonBlobFee, gasLimit) + newBlobFeeCap := arbmath.BigMul(targetMaxCost, currentBlobFee) + newBlobFeeCap.Div(newBlobFeeCap, arbmath.BigAdd(currentBlobCost, currentNonBlobCost)) + if lastTx != nil && lastTx.BlobGasFeeCap() != nil { + newBlobFeeCap = arbmath.BigMax(newBlobFeeCap, arbmath.BigMulByBips(lastTx.BlobGasFeeCap(), minRbfIncrease)) + } + targetBlobCost := arbmath.BigMulByUint(newBlobFeeCap, blobGasUsed) + targetNonBlobCost := arbmath.BigSub(targetMaxCost, targetBlobCost) + newBaseFeeCap := arbmath.BigDivByUint(targetNonBlobCost, gasLimit) + if lastTx != nil && numBlobs > 0 && arbmath.BigDivToBips(newBaseFeeCap, lastTx.GasFeeCap()) < minRbfIncrease { + // Increase the non-blob fee cap to the minimum rbf increase + newBaseFeeCap = arbmath.BigMulByBips(lastTx.GasFeeCap(), minRbfIncrease) + newNonBlobCost := arbmath.BigMulByUint(newBaseFeeCap, gasLimit) + // Increasing the non-blob fee cap requires lowering the blob fee cap to compensate + baseFeeCostIncrease := arbmath.BigSub(newNonBlobCost, targetNonBlobCost) + newBlobCost := arbmath.BigSub(targetBlobCost, baseFeeCostIncrease) + newBlobFeeCap = arbmath.BigDivByUint(newBlobCost, blobGasUsed) + } + + if config.MaxFeeBidMultipleBips > 0 { + // Limit the fee caps to be no greater than max(MaxFeeBidMultipleBips, minRbf) + maxNonBlobFee := arbmath.BigMulByBips(currentNonBlobFee, config.MaxFeeBidMultipleBips) + if lastTx != nil { + maxNonBlobFee = arbmath.BigMax(maxNonBlobFee, arbmath.BigMulByBips(lastTx.GasFeeCap(), minRbfIncrease)) + } + maxBlobFee := arbmath.BigMulByBips(currentBlobFee, config.MaxFeeBidMultipleBips) + if lastTx != nil && lastTx.BlobGasFeeCap() != nil { + maxBlobFee = arbmath.BigMax(maxBlobFee, arbmath.BigMulByBips(lastTx.BlobGasFeeCap(), minRbfIncrease)) + } + newBaseFeeCap = arbmath.BigMin(newBaseFeeCap, maxNonBlobFee) + newBlobFeeCap = arbmath.BigMin(newBlobFeeCap, maxBlobFee) } - if arbmath.BigGreaterThan(newTipCap, newFeeCap) { - log.Warn( - "reducing new tip cap to new fee cap", + if arbmath.BigGreaterThan(newTipCap, newBaseFeeCap) { + log.Info( + "reducing new tip cap to new basefee cap", "proposedTipCap", newTipCap, - "newFeeCap", newFeeCap, + "newBasefeeCap", newBaseFeeCap, ) - newTipCap = new(big.Int).Set(newFeeCap) + newTipCap = new(big.Int).Set(newBaseFeeCap) + } + + logFields := []any{ + "targetMaxCost", targetMaxCost, + "elapsed", elapsed, + "dataPosterBacklog", dataPosterBacklog, + "nonce", nonce, + "isReplacing", lastTx != nil, + "balanceForTx", balanceForTx, + "currentBaseFee", latestHeader.BaseFee, + "newBasefeeCap", newBaseFeeCap, + "suggestedTip", suggestedTip, + "newTipCap", newTipCap, + "currentBlobFee", currentBlobFee, + "newBlobFeeCap", newBlobFeeCap, + } + + log.Debug("calculated data poster fee and tip caps", logFields...) + + if newBaseFeeCap.Sign() < 0 || newTipCap.Sign() < 0 || newBlobFeeCap.Sign() < 0 { + msg := "can't meet data poster fee cap obligations with current target max cost" + log.Info(msg, logFields...) + if lastTx != nil { + // wait until we have a higher target max cost to replace by fee + return lastTx.GasFeeCap(), lastTx.GasTipCap(), lastTx.BlobGasFeeCap(), nil + } else { + return nil, nil, nil, errors.New(msg) + } + } + + if lastTx != nil && (arbmath.BigLessThan(newBaseFeeCap, currentNonBlobFee) || (numBlobs > 0 && arbmath.BigLessThan(newBlobFeeCap, currentBlobFee))) { + // Make sure our replace by fee can meet the current parent chain fee demands. + // Without this check, we'd blindly increase each fee component by the min rbf amount each time, + // without looking at which component(s) actually need increased. + // E.g. instead of 2x basefee and 2x blobfee, we might actually want to 4x basefee and 2x blobfee. + // This check lets us hold off on the rbf until we are actually meet the current fee requirements, + // which lets us move in a particular direction (biasing towards either basefee or blobfee). + log.Info("can't meet current parent chain fees with current target max cost", logFields...) + // wait until we have a higher target max cost to replace by fee + return lastTx.GasFeeCap(), lastTx.GasTipCap(), lastTx.BlobGasFeeCap(), nil } - return newFeeCap, newTipCap, newBlobFeeCap, nil + return newBaseFeeCap, newTipCap, newBlobFeeCap, nil } func (p *DataPoster) PostSimpleTransaction(ctx context.Context, nonce uint64, to common.Address, calldata []byte, gasLimit uint64, value *big.Int) (*types.Transaction, error) { @@ -588,7 +672,11 @@ func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Tim p.mutex.Lock() defer p.mutex.Unlock() - expectedNonce, _, _, err := p.getNextNonceAndMaybeMeta(ctx) + var weight uint64 = 1 + if len(kzgBlobs) > 0 { + weight = uint64(len(kzgBlobs)) + } + expectedNonce, _, _, lastCumulativeWeight, err := p.getNextNonceAndMaybeMeta(ctx, weight) if err != nil { return nil, err } @@ -601,14 +689,21 @@ func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Tim return nil, fmt.Errorf("failed to update data poster balance: %w", err) } - feeCap, tipCap, blobFeeCap, err := p.feeAndTipCaps(ctx, nonce, gasLimit, len(kzgBlobs), nil, nil, dataCreatedAt, 0) + latestHeader, err := p.headerReader.LastHeader(ctx) + if err != nil { + return nil, err + } + + feeCap, tipCap, blobFeeCap, err := p.feeAndTipCaps(ctx, nonce, gasLimit, uint64(len(kzgBlobs)), nil, dataCreatedAt, 0, latestHeader) if err != nil { return nil, err } var deprecatedData types.DynamicFeeTx var inner types.TxData + replacementTimes := p.replacementTimes if len(kzgBlobs) > 0 { + replacementTimes = p.blobTxReplacementTimes value256, overflow := uint256.FromBig(value) if overflow { return nil, fmt.Errorf("blob transaction callvalue %v overflows uint256", value) @@ -662,13 +757,15 @@ func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Tim if err != nil { return nil, fmt.Errorf("signing transaction: %w", err) } + cumulativeWeight := lastCumulativeWeight + weight queuedTx := storage.QueuedTransaction{ - DeprecatedData: deprecatedData, - FullTx: fullTx, - Meta: meta, - Sent: false, - Created: dataCreatedAt, - NextReplacement: time.Now().Add(p.replacementTimes[0]), + DeprecatedData: deprecatedData, + FullTx: fullTx, + Meta: meta, + Sent: false, + Created: dataCreatedAt, + NextReplacement: time.Now().Add(replacementTimes[0]), + StoredCumulativeWeight: &cumulativeWeight, } return fullTx, p.sendTx(ctx, nil, &queuedTx) } @@ -701,17 +798,44 @@ func (p *DataPoster) saveTx(ctx context.Context, prevTx, newTx *storage.QueuedTr } func (p *DataPoster) sendTx(ctx context.Context, prevTx *storage.QueuedTransaction, newTx *storage.QueuedTransaction) error { + latestHeader, err := p.client.HeaderByNumber(ctx, nil) + if err != nil { + return err + } + var currentBlobFee *big.Int + if latestHeader.ExcessBlobGas != nil && latestHeader.BlobGasUsed != nil { + currentBlobFee = eip4844.CalcBlobFee(eip4844.CalcExcessBlobGas(*latestHeader.ExcessBlobGas, *latestHeader.BlobGasUsed)) + } + + if arbmath.BigLessThan(newTx.FullTx.GasFeeCap(), latestHeader.BaseFee) { + log.Info( + "submitting transaction with GasFeeCap less than latest basefee", + "txBasefeeCap", newTx.FullTx.GasFeeCap(), + "latestBasefee", latestHeader.BaseFee, + "elapsed", time.Since(newTx.Created), + ) + } + + if newTx.FullTx.BlobGasFeeCap() != nil && currentBlobFee != nil && arbmath.BigLessThan(newTx.FullTx.BlobGasFeeCap(), currentBlobFee) { + log.Info( + "submitting transaction with BlobGasFeeCap less than latest blobfee", + "txBlobGasFeeCap", newTx.FullTx.BlobGasFeeCap(), + "latestBlobFee", currentBlobFee, + "elapsed", time.Since(newTx.Created), + ) + } + if err := p.saveTx(ctx, prevTx, newTx); err != nil { return err } if err := p.client.SendTransaction(ctx, newTx.FullTx); err != nil { - if !strings.Contains(err.Error(), "already known") && !strings.Contains(err.Error(), "nonce too low") { - log.Warn("DataPoster failed to send transaction", "err", err, "nonce", newTx.FullTx.Nonce(), "feeCap", newTx.FullTx.GasFeeCap(), "tipCap", newTx.FullTx.GasTipCap(), "gas", newTx.FullTx.Gas()) + if !rpcclient.IsAlreadyKnownError(err) && !strings.Contains(err.Error(), "nonce too low") { + log.Warn("DataPoster failed to send transaction", "err", err, "nonce", newTx.FullTx.Nonce(), "feeCap", newTx.FullTx.GasFeeCap(), "tipCap", newTx.FullTx.GasTipCap(), "blobFeeCap", newTx.FullTx.BlobGasFeeCap(), "gas", newTx.FullTx.Gas()) return err } log.Info("DataPoster transaction already known", "err", err, "nonce", newTx.FullTx.Nonce(), "hash", newTx.FullTx.Hash()) } else { - log.Info("DataPoster sent transaction", "nonce", newTx.FullTx.Nonce(), "hash", newTx.FullTx.Hash(), "feeCap", newTx.FullTx.GasFeeCap(), "tipCap", newTx.FullTx.GasTipCap(), "gas", newTx.FullTx.Gas()) + log.Info("DataPoster sent transaction", "nonce", newTx.FullTx.Nonce(), "hash", newTx.FullTx.Hash(), "feeCap", newTx.FullTx.GasFeeCap(), "tipCap", newTx.FullTx.GasTipCap(), "blobFeeCap", newTx.FullTx.BlobGasFeeCap(), "gas", newTx.FullTx.Gas()) } newerTx := *newTx newerTx.Sent = true @@ -754,16 +878,25 @@ func updateGasCaps(tx *types.Transaction, newFeeCap, newTipCap, newBlobFeeCap *b } // The mutex must be held by the caller. -func (p *DataPoster) replaceTx(ctx context.Context, prevTx *storage.QueuedTransaction, backlogOfBatches uint64) error { - newFeeCap, newTipCap, newBlobFeeCap, err := p.feeAndTipCaps(ctx, prevTx.FullTx.Nonce(), prevTx.FullTx.Gas(), len(prevTx.FullTx.BlobHashes()), prevTx.FullTx.GasFeeCap(), prevTx.FullTx.GasTipCap(), prevTx.Created, backlogOfBatches) +func (p *DataPoster) replaceTx(ctx context.Context, prevTx *storage.QueuedTransaction, backlogWeight uint64) error { + latestHeader, err := p.headerReader.LastHeader(ctx) if err != nil { return err } - minNewFeeCap := arbmath.BigMulByBips(prevTx.FullTx.GasFeeCap(), minRbfIncrease) + newFeeCap, newTipCap, newBlobFeeCap, err := p.feeAndTipCaps(ctx, prevTx.FullTx.Nonce(), prevTx.FullTx.Gas(), uint64(len(prevTx.FullTx.BlobHashes())), prevTx.FullTx, prevTx.Created, backlogWeight, latestHeader) + if err != nil { + return err + } + + minRbfIncrease := minNonBlobRbfIncrease + if len(prevTx.FullTx.BlobHashes()) > 0 { + minRbfIncrease = minBlobRbfIncrease + } + newTx := *prevTx - // TODO: also look at the blob fee cap - if newFeeCap.Cmp(minNewFeeCap) < 0 { + if arbmath.BigDivToBips(newFeeCap, prevTx.FullTx.GasFeeCap()) < minRbfIncrease || + (prevTx.FullTx.BlobGasFeeCap() != nil && arbmath.BigDivToBips(newBlobFeeCap, prevTx.FullTx.BlobGasFeeCap()) < minRbfIncrease) { log.Debug( "no need to replace by fee transaction", "nonce", prevTx.FullTx.Nonce(), @@ -771,13 +904,20 @@ func (p *DataPoster) replaceTx(ctx context.Context, prevTx *storage.QueuedTransa "recommendedFeeCap", newFeeCap, "lastTipCap", prevTx.FullTx.GasTipCap(), "recommendedTipCap", newTipCap, + "lastBlobFeeCap", prevTx.FullTx.BlobGasFeeCap(), + "recommendedBlobFeeCap", newBlobFeeCap, ) newTx.NextReplacement = time.Now().Add(time.Minute) return p.sendTx(ctx, prevTx, &newTx) } + replacementTimes := p.replacementTimes + if len(prevTx.FullTx.BlobHashes()) > 0 { + replacementTimes = p.blobTxReplacementTimes + } + elapsed := time.Since(prevTx.Created) - for _, replacement := range p.replacementTimes { + for _, replacement := range replacementTimes { if elapsed >= replacement { continue } @@ -877,7 +1017,7 @@ func (p *DataPoster) maybeLogError(err error, tx *storage.QueuedTransaction, msg } else { delete(p.errorCount, nonce) } - logLevel(msg, "err", err, "nonce", nonce, "feeCap", tx.FullTx.GasFeeCap(), "tipCap", tx.FullTx.GasTipCap(), "gas", tx.FullTx.Gas()) + logLevel(msg, "err", err, "nonce", nonce, "feeCap", tx.FullTx.GasFeeCap(), "tipCap", tx.FullTx.GasTipCap(), "blobFeeCap", tx.FullTx.BlobGasFeeCap(), "gas", tx.FullTx.Gas()) } const minWait = time.Second * 10 @@ -899,7 +1039,7 @@ func (p *DataPoster) Start(ctxIn context.Context) { log.Warn("failed to update tx poster nonce", "err", err) } now := time.Now() - nextCheck := now.Add(p.replacementTimes[0]) + nextCheck := now.Add(arbmath.MinInt(p.replacementTimes[0], p.blobTxReplacementTimes[0])) maxTxsToRbf := p.config().MaxMempoolTransactions if maxTxsToRbf == 0 { maxTxsToRbf = 512 @@ -917,12 +1057,23 @@ func (p *DataPoster) Start(ctxIn context.Context) { log.Error("Failed to fetch tx queue contents", "err", err) return minWait } - for index, tx := range queueContents { - backlogOfBatches := len(queueContents) - index - 1 + latestQueued, err := p.queue.FetchLast(ctx) + if err != nil { + log.Error("Failed to fetch lastest queued tx", "err", err) + return minWait + } + var latestCumulativeWeight, latestNonce uint64 + if latestQueued != nil { + latestCumulativeWeight = latestQueued.CumulativeWeight() + latestNonce = latestQueued.FullTx.Nonce() + } + for _, tx := range queueContents { replacing := false if now.After(tx.NextReplacement) { replacing = true - err := p.replaceTx(ctx, tx, uint64(backlogOfBatches)) + nonceBacklog := arbmath.SaturatingUSub(latestNonce, tx.FullTx.Nonce()) + weightBacklog := arbmath.SaturatingUSub(latestCumulativeWeight, tx.CumulativeWeight()) + err := p.replaceTx(ctx, tx, arbmath.MaxInt(nonceBacklog, weightBacklog)) p.maybeLogError(err, tx, "failed to replace-by-fee transaction") } if nextCheck.After(tx.NextReplacement) { @@ -957,7 +1108,9 @@ func (p *DataPoster) Start(ctxIn context.Context) { type QueueStorage interface { // Returns at most maxResults items starting from specified index. FetchContents(ctx context.Context, startingIndex uint64, maxResults uint64) ([]*storage.QueuedTransaction, error) - // Returns item with the biggest index. + // Returns the item at index, or nil if not found. + Get(ctx context.Context, index uint64) (*storage.QueuedTransaction, error) + // Returns item with the biggest index, or nil if the queue is empty. FetchLast(ctx context.Context) (*storage.QueuedTransaction, error) // Prunes items up to (excluding) specified index. Prune(ctx context.Context, until uint64) error @@ -970,18 +1123,22 @@ type QueueStorage interface { } type DataPosterConfig struct { - RedisSigner signature.SimpleHmacConfig `koanf:"redis-signer"` - ReplacementTimes string `koanf:"replacement-times"` + RedisSigner signature.SimpleHmacConfig `koanf:"redis-signer"` + ReplacementTimes string `koanf:"replacement-times"` + BlobTxReplacementTimes string `koanf:"blob-tx-replacement-times"` // This is forcibly disabled if the parent chain is an Arbitrum chain, // so you should probably use DataPoster's waitForL1Finality method instead of reading this field directly. WaitForL1Finality bool `koanf:"wait-for-l1-finality" reload:"hot"` MaxMempoolTransactions uint64 `koanf:"max-mempool-transactions" reload:"hot"` + MaxMempoolWeight uint64 `koanf:"max-mempool-weight" reload:"hot"` MaxQueuedTransactions int `koanf:"max-queued-transactions" reload:"hot"` TargetPriceGwei float64 `koanf:"target-price-gwei" reload:"hot"` UrgencyGwei float64 `koanf:"urgency-gwei" reload:"hot"` - MinFeeCapGwei float64 `koanf:"min-fee-cap-gwei" reload:"hot"` MinTipCapGwei float64 `koanf:"min-tip-cap-gwei" reload:"hot"` + MinBlobTxTipCapGwei float64 `koanf:"min-blob-tx-tip-cap-gwei" reload:"hot"` MaxTipCapGwei float64 `koanf:"max-tip-cap-gwei" reload:"hot"` + MaxBlobTxTipCapGwei float64 `koanf:"max-blob-tx-tip-cap-gwei" reload:"hot"` + MaxFeeBidMultipleBips arbmath.Bips `koanf:"max-fee-bid-multiple-bips" reload:"hot"` NonceRbfSoftConfs uint64 `koanf:"nonce-rbf-soft-confs" reload:"hot"` AllocateMempoolBalance bool `koanf:"allocate-mempool-balance" reload:"hot"` UseDBStorage bool `koanf:"use-db-storage"` @@ -1025,14 +1182,18 @@ type ConfigFetcher func() *DataPosterConfig func DataPosterConfigAddOptions(prefix string, f *pflag.FlagSet, defaultDataPosterConfig DataPosterConfig) { f.String(prefix+".replacement-times", defaultDataPosterConfig.ReplacementTimes, "comma-separated list of durations since first posting to attempt a replace-by-fee") + f.String(prefix+".blob-tx-replacement-times", defaultDataPosterConfig.BlobTxReplacementTimes, "comma-separated list of durations since first posting a blob transaction to attempt a replace-by-fee") f.Bool(prefix+".wait-for-l1-finality", defaultDataPosterConfig.WaitForL1Finality, "only treat a transaction as confirmed after L1 finality has been achieved (recommended)") f.Uint64(prefix+".max-mempool-transactions", defaultDataPosterConfig.MaxMempoolTransactions, "the maximum number of transactions to have queued in the mempool at once (0 = unlimited)") + f.Uint64(prefix+".max-mempool-weight", defaultDataPosterConfig.MaxMempoolWeight, "the maximum number of weight (weight = min(1, tx.blobs)) to have queued in the mempool at once (0 = unlimited)") f.Int(prefix+".max-queued-transactions", defaultDataPosterConfig.MaxQueuedTransactions, "the maximum number of unconfirmed transactions to track at once (0 = unlimited)") f.Float64(prefix+".target-price-gwei", defaultDataPosterConfig.TargetPriceGwei, "the target price to use for maximum fee cap calculation") f.Float64(prefix+".urgency-gwei", defaultDataPosterConfig.UrgencyGwei, "the urgency to use for maximum fee cap calculation") - f.Float64(prefix+".min-fee-cap-gwei", defaultDataPosterConfig.MinFeeCapGwei, "the minimum fee cap to post transactions at") f.Float64(prefix+".min-tip-cap-gwei", defaultDataPosterConfig.MinTipCapGwei, "the minimum tip cap to post transactions at") + f.Float64(prefix+".min-blob-tx-tip-cap-gwei", defaultDataPosterConfig.MinBlobTxTipCapGwei, "the minimum tip cap to post EIP-4844 blob carrying transactions at") f.Float64(prefix+".max-tip-cap-gwei", defaultDataPosterConfig.MaxTipCapGwei, "the maximum tip cap to post transactions at") + f.Float64(prefix+".max-blob-tx-tip-cap-gwei", defaultDataPosterConfig.MaxBlobTxTipCapGwei, "the maximum tip cap to post EIP-4844 blob carrying transactions at") + f.Uint64(prefix+".max-fee-bid-multiple-bips", uint64(defaultDataPosterConfig.MaxFeeBidMultipleBips), "the maximum multiple of the current price to bid for a transaction's fees (may be exceeded due to min rbf increase, 0 = unlimited)") f.Uint64(prefix+".nonce-rbf-soft-confs", defaultDataPosterConfig.NonceRbfSoftConfs, "the maximum probable reorg depth, used to determine when a transaction will no longer likely need replaced-by-fee") f.Bool(prefix+".allocate-mempool-balance", defaultDataPosterConfig.AllocateMempoolBalance, "if true, don't put transactions in the mempool that spend a total greater than the batch poster's balance") f.Bool(prefix+".use-db-storage", defaultDataPosterConfig.UseDBStorage, "uses database storage when enabled") @@ -1064,12 +1225,17 @@ func addExternalSignerOptions(prefix string, f *pflag.FlagSet) { var DefaultDataPosterConfig = DataPosterConfig{ ReplacementTimes: "5m,10m,20m,30m,1h,2h,4h,6h,8h,12h,16h,18h,20h,22h", + BlobTxReplacementTimes: "5m,10m,30m,1h,4h,8h,16h,22h", WaitForL1Finality: true, TargetPriceGwei: 60., UrgencyGwei: 2., - MaxMempoolTransactions: 20, + MaxMempoolTransactions: 18, + MaxMempoolWeight: 18, MinTipCapGwei: 0.05, + MinBlobTxTipCapGwei: 1, // default geth minimum, and relays aren't likely to accept lower values given propagation time MaxTipCapGwei: 5, + MaxBlobTxTipCapGwei: 1, // lower than normal because 4844 rbf is a minimum of a 2x + MaxFeeBidMultipleBips: arbmath.OneInBips * 10, NonceRbfSoftConfs: 1, AllocateMempoolBalance: true, UseDBStorage: true, @@ -1084,19 +1250,26 @@ var DefaultDataPosterConfig = DataPosterConfig{ var DefaultDataPosterConfigForValidator = func() DataPosterConfig { config := DefaultDataPosterConfig - config.MaxMempoolTransactions = 1 // the validator cannot queue transactions + // the validator cannot queue transactions + config.MaxMempoolTransactions = 1 + config.MaxMempoolWeight = 1 return config }() var TestDataPosterConfig = DataPosterConfig{ ReplacementTimes: "1s,2s,5s,10s,20s,30s,1m,5m", + BlobTxReplacementTimes: "1s,10s,30s,5m", RedisSigner: signature.TestSimpleHmacConfig, WaitForL1Finality: false, TargetPriceGwei: 60., UrgencyGwei: 2., - MaxMempoolTransactions: 20, + MaxMempoolTransactions: 18, + MaxMempoolWeight: 18, MinTipCapGwei: 0.05, + MinBlobTxTipCapGwei: 1, MaxTipCapGwei: 5, + MaxBlobTxTipCapGwei: 1, + MaxFeeBidMultipleBips: arbmath.OneInBips * 10, NonceRbfSoftConfs: 1, AllocateMempoolBalance: true, UseDBStorage: false, @@ -1110,6 +1283,8 @@ var TestDataPosterConfig = DataPosterConfig{ var TestDataPosterConfigForValidator = func() DataPosterConfig { config := TestDataPosterConfig - config.MaxMempoolTransactions = 1 // the validator cannot queue transactions + // the validator cannot queue transactions + config.MaxMempoolTransactions = 1 + config.MaxMempoolWeight = 1 return config }() diff --git a/arbnode/dataposter/dataposter_test.go b/arbnode/dataposter/dataposter_test.go index 3d7fa60dc..a8e2e110a 100644 --- a/arbnode/dataposter/dataposter_test.go +++ b/arbnode/dataposter/dataposter_test.go @@ -9,11 +9,18 @@ import ( "time" "github.com/Knetic/govaluate" + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rpc" "github.com/google/go-cmp/cmp" + "github.com/holiman/uint256" + "github.com/offchainlabs/nitro/arbnode/dataposter/externalsigner" "github.com/offchainlabs/nitro/arbnode/dataposter/externalsignertest" + "github.com/offchainlabs/nitro/util/arbmath" ) func TestParseReplacementTimes(t *testing.T) { @@ -66,17 +73,42 @@ func signerTestCfg(addr common.Address) (*ExternalSignerCfg, error) { }, nil } +var ( + blobTx = types.NewTx( + &types.BlobTx{ + ChainID: uint256.NewInt(1337), + Nonce: 13, + GasTipCap: uint256.NewInt(1), + GasFeeCap: uint256.NewInt(1), + Gas: 3, + To: common.Address{}, + Value: uint256.NewInt(1), + Data: []byte{0x01, 0x02, 0x03}, + BlobHashes: []common.Hash{ + common.BigToHash(big.NewInt(1)), + common.BigToHash(big.NewInt(2)), + common.BigToHash(big.NewInt(3)), + }, + Sidecar: &types.BlobTxSidecar{}, + }, + ) + dynamicFeeTx = types.NewTx( + &types.DynamicFeeTx{ + Nonce: 13, + GasTipCap: big.NewInt(1), + GasFeeCap: big.NewInt(1), + Gas: 3, + To: nil, + Value: big.NewInt(1), + Data: []byte{0x01, 0x02, 0x03}, + }, + ) +) + func TestExternalSigner(t *testing.T) { - ctx := context.Background() - httpSrv, srv := externalsignertest.NewServer(ctx, t) - t.Cleanup(func() { - if err := httpSrv.Shutdown(ctx); err != nil { - t.Fatalf("Error shutting down http server: %v", err) - } - }) + httpSrv, srv := externalsignertest.NewServer(t) cert, key := "./testdata/localhost.crt", "./testdata/localhost.key" go func() { - fmt.Println("Server is listening on port 1234...") if err := httpSrv.ListenAndServeTLS(cert, key); err != nil && err != http.ErrServerClosed { t.Errorf("ListenAndServeTLS() unexpected error: %v", err) return @@ -86,35 +118,48 @@ func TestExternalSigner(t *testing.T) { if err != nil { t.Fatalf("Error getting signer test config: %v", err) } + ctx := context.Background() signer, addr, err := externalSigner(ctx, signerCfg) if err != nil { t.Fatalf("Error getting external signer: %v", err) } - tx := types.NewTx( - &types.DynamicFeeTx{ - Nonce: 13, - GasTipCap: big.NewInt(1), - GasFeeCap: big.NewInt(1), - Gas: 3, - To: nil, - Value: big.NewInt(1), - Data: []byte{0x01, 0x02, 0x03}, + + for _, tc := range []struct { + desc string + tx *types.Transaction + }{ + { + desc: "blob transaction", + tx: blobTx, }, - ) - got, err := signer(ctx, addr, tx) - if err != nil { - t.Fatalf("Error signing transaction with external signer: %v", err) - } - args, err := txToSendTxArgs(addr, tx) - if err != nil { - t.Fatalf("Error converting transaction to sendTxArgs: %v", err) - } - want, err := srv.SignerFn(addr, args.ToTransaction()) - if err != nil { - t.Fatalf("Error signing transaction: %v", err) - } - if diff := cmp.Diff(want.Hash(), got.Hash()); diff != "" { - t.Errorf("Signing transaction: unexpected diff: %v\n", diff) + { + desc: "dynamic fee transaction", + tx: dynamicFeeTx, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + { + got, err := signer(ctx, addr, tc.tx) + if err != nil { + t.Fatalf("Error signing transaction with external signer: %v", err) + } + args, err := externalsigner.TxToSignTxArgs(addr, tc.tx) + if err != nil { + t.Fatalf("Error converting transaction to sendTxArgs: %v", err) + } + want, err := srv.SignerFn(addr, args.ToTransaction()) + if err != nil { + t.Fatalf("Error signing transaction: %v", err) + } + if diff := cmp.Diff(want.Hash(), got.Hash()); diff != "" { + t.Errorf("Signing transaction: unexpected diff: %v\n", diff) + } + hasher := types.LatestSignerForChainID(tc.tx.ChainId()) + if h, g := hasher.Hash(tc.tx), hasher.Hash(got); h != g { + t.Errorf("Signed transaction hash: %v differs from initial transaction hash: %v", g, h) + } + } + }) } } @@ -147,3 +192,293 @@ func TestMaxFeeCapFormulaCalculation(t *testing.T) { t.Fatalf("Unexpected result. Got: %d, want: >0", result) } } + +type stubL1Client struct { + senderNonce uint64 + suggestedGasTipCap *big.Int + + // Define most of the required methods that aren't used by feeAndTipCaps + backends.SimulatedBackend +} + +func (c *stubL1Client) NonceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (uint64, error) { + return c.senderNonce, nil +} + +func (c *stubL1Client) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { + return c.suggestedGasTipCap, nil +} + +// Not used but we need to define +func (c *stubL1Client) BlockNumber(ctx context.Context) (uint64, error) { + return 0, nil +} + +func (c *stubL1Client) CallContractAtHash(ctx context.Context, msg ethereum.CallMsg, blockHash common.Hash) ([]byte, error) { + return []byte{}, nil +} + +func (c *stubL1Client) ChainID(ctx context.Context) (*big.Int, error) { + return nil, nil +} + +func (c *stubL1Client) Client() rpc.ClientInterface { + return nil +} + +func (c *stubL1Client) TransactionSender(ctx context.Context, tx *types.Transaction, block common.Hash, index uint) (common.Address, error) { + return common.Address{}, nil +} + +func TestFeeAndTipCaps_EnoughBalance_NoBacklog_NoUnconfirmed_BlobTx(t *testing.T) { + conf := func() *DataPosterConfig { + // Set only the fields that are used by feeAndTipCaps + // Start with defaults, maybe change for test. + return &DataPosterConfig{ + MaxMempoolTransactions: 18, + MaxMempoolWeight: 18, + MinTipCapGwei: 0.05, + MinBlobTxTipCapGwei: 1, + MaxTipCapGwei: 5, + MaxBlobTxTipCapGwei: 10, + MaxFeeBidMultipleBips: arbmath.OneInBips * 10, + AllocateMempoolBalance: true, + + UrgencyGwei: 2., + ElapsedTimeBase: 10 * time.Minute, + ElapsedTimeImportance: 10, + TargetPriceGwei: 60., + } + } + expression, err := govaluate.NewEvaluableExpression(DefaultDataPosterConfig.MaxFeeCapFormula) + if err != nil { + t.Fatalf("error creating govaluate evaluable expression: %v", err) + } + + p := DataPoster{ + config: conf, + extraBacklog: func() uint64 { return 0 }, + balance: big.NewInt(0).Mul(big.NewInt(params.Ether), big.NewInt(10)), + usingNoOpStorage: false, + client: &stubL1Client{ + senderNonce: 1, + suggestedGasTipCap: big.NewInt(2 * params.GWei), + }, + auth: &bind.TransactOpts{ + From: common.Address{}, + }, + maxFeeCapExpression: expression, + } + + ctx := context.Background() + var nonce uint64 = 1 + var gasLimit uint64 = 300_000 // reasonable upper bound for mainnet blob batches + var numBlobs uint64 = 6 + var lastTx *types.Transaction // PostTransaction leaves this nil, used when replacing + dataCreatedAt := time.Now() + var dataPosterBacklog uint64 = 0 // Zero backlog for PostTransaction + var blobGasUsed uint64 = 0xc0000 // 6 blobs of gas + var excessBlobGas uint64 = 0 // typical current mainnet conditions + latestHeader := types.Header{ + Number: big.NewInt(1), + BaseFee: big.NewInt(1_000_000_000), + BlobGasUsed: &blobGasUsed, + ExcessBlobGas: &excessBlobGas, + } + + newGasFeeCap, newTipCap, newBlobFeeCap, err := p.feeAndTipCaps(ctx, nonce, gasLimit, numBlobs, lastTx, dataCreatedAt, dataPosterBacklog, &latestHeader) + if err != nil { + t.Fatalf("%s", err) + } + + // There is no backlog and almost no time elapses since the batch data was + // created to when it was posted so the maxNormalizedFeeCap is ~60.01 gwei. + // This is multiplied with the normalizedGas to get targetMaxCost. + // This is greatly in excess of currentTotalCost * MaxFeeBidMultipleBips, + // so targetMaxCost is reduced to the current base fee + suggested tip cap + + // current blob fee multipled by MaxFeeBidMultipleBips (factor of 10). + // The blob and non blob factors are then proportionally split out and so + // the newGasFeeCap is set to (current base fee + suggested tip cap) * 10 + // and newBlobFeeCap is set to current blob gas base fee (1 wei + // since there is no excess blob gas) * 10. + expectedGasFeeCap := big.NewInt(30 * params.GWei) + expectedBlobFeeCap := big.NewInt(10) + if !arbmath.BigEquals(expectedGasFeeCap, newGasFeeCap) { + t.Fatalf("feeAndTipCaps didn't return expected gas fee cap. Was: %d, expected: %d", expectedGasFeeCap, newGasFeeCap) + } + if !arbmath.BigEquals(expectedBlobFeeCap, newBlobFeeCap) { + t.Fatalf("feeAndTipCaps didn't return expected blob gas fee cap. Was: %d, expected: %d", expectedBlobFeeCap, newBlobFeeCap) + } + + // 2 gwei is the amount suggested by the L1 client, so that is the value + // returned because it doesn't exceed the configured bounds, there is no + // lastTx to scale against with rbf, and it is not bigger than the computed + // gasFeeCap. + expectedTipCap := big.NewInt(2 * params.GWei) + if !arbmath.BigEquals(expectedTipCap, newTipCap) { + t.Fatalf("feeAndTipCaps didn't return expected tip cap. Was: %d, expected: %d", expectedTipCap, newTipCap) + } + + lastBlobTx := &types.BlobTx{} + err = updateTxDataGasCaps(lastBlobTx, newGasFeeCap, newTipCap, newBlobFeeCap) + if err != nil { + t.Fatal(err) + } + lastTx = types.NewTx(lastBlobTx) + // Make creation time go backwards so elapsed time increases + retconnedCreationTime := dataCreatedAt.Add(-time.Minute) + // Base fee needs to have increased to simulate conditions to not include prev tx + latestHeader = types.Header{ + Number: big.NewInt(2), + BaseFee: big.NewInt(32_000_000_000), + BlobGasUsed: &blobGasUsed, + ExcessBlobGas: &excessBlobGas, + } + + newGasFeeCap, newTipCap, newBlobFeeCap, err = p.feeAndTipCaps(ctx, nonce, gasLimit, numBlobs, lastTx, retconnedCreationTime, dataPosterBacklog, &latestHeader) + _, _, _, _ = newGasFeeCap, newTipCap, newBlobFeeCap, err + /* + // I think we expect an increase by *2 due to rbf rules for blob txs, + // currently appears to be broken since the increase exceeds the + // current cost (based on current basefees and tip) * config.MaxFeeBidMultipleBips + // since the previous attempt to send the tx was already using the current cost scaled by + // the multiple (* 10 bips). + expectedGasFeeCap = expectedGasFeeCap.Mul(expectedGasFeeCap, big.NewInt(2)) + expectedBlobFeeCap = expectedBlobFeeCap.Mul(expectedBlobFeeCap, big.NewInt(2)) + expectedTipCap = expectedTipCap.Mul(expectedTipCap, big.NewInt(2)) + + t.Log("newGasFeeCap", newGasFeeCap, "newTipCap", newTipCap, "newBlobFeeCap", newBlobFeeCap, "err", err) + if !arbmath.BigEquals(expectedGasFeeCap, newGasFeeCap) { + t.Fatalf("feeAndTipCaps didn't return expected gas fee cap. Was: %d, expected: %d", expectedGasFeeCap, newGasFeeCap) + } + if !arbmath.BigEquals(expectedBlobFeeCap, newBlobFeeCap) { + t.Fatalf("feeAndTipCaps didn't return expected blob gas fee cap. Was: %d, expected: %d", expectedBlobFeeCap, newBlobFeeCap) + } + if !arbmath.BigEquals(expectedTipCap, newTipCap) { + t.Fatalf("feeAndTipCaps didn't return expected tip cap. Was: %d, expected: %d", expectedTipCap, newTipCap) + } + */ + +} + +func TestFeeAndTipCaps_RBF_RisingBlobFee_FallingBaseFee(t *testing.T) { + conf := func() *DataPosterConfig { + // Set only the fields that are used by feeAndTipCaps + // Start with defaults, maybe change for test. + return &DataPosterConfig{ + MaxMempoolTransactions: 18, + MaxMempoolWeight: 18, + MinTipCapGwei: 0.05, + MinBlobTxTipCapGwei: 1, + MaxTipCapGwei: 5, + MaxBlobTxTipCapGwei: 10, + MaxFeeBidMultipleBips: arbmath.OneInBips * 10, + AllocateMempoolBalance: true, + + UrgencyGwei: 2., + ElapsedTimeBase: 10 * time.Minute, + ElapsedTimeImportance: 10, + TargetPriceGwei: 60., + } + } + expression, err := govaluate.NewEvaluableExpression(DefaultDataPosterConfig.MaxFeeCapFormula) + if err != nil { + t.Fatalf("error creating govaluate evaluable expression: %v", err) + } + + p := DataPoster{ + config: conf, + extraBacklog: func() uint64 { return 0 }, + balance: big.NewInt(0).Mul(big.NewInt(params.Ether), big.NewInt(10)), + usingNoOpStorage: false, + client: &stubL1Client{ + senderNonce: 1, + suggestedGasTipCap: big.NewInt(2 * params.GWei), + }, + auth: &bind.TransactOpts{ + From: common.Address{}, + }, + maxFeeCapExpression: expression, + } + + ctx := context.Background() + var nonce uint64 = 1 + var gasLimit uint64 = 300_000 // reasonable upper bound for mainnet blob batches + var numBlobs uint64 = 6 + var lastTx *types.Transaction // PostTransaction leaves this nil, used when replacing + dataCreatedAt := time.Now() + var dataPosterBacklog uint64 = 0 // Zero backlog for PostTransaction + var blobGasUsed uint64 = 0xc0000 // 6 blobs of gas + var excessBlobGas uint64 = 0 // typical current mainnet conditions + latestHeader := types.Header{ + Number: big.NewInt(1), + BaseFee: big.NewInt(1_000_000_000), + BlobGasUsed: &blobGasUsed, + ExcessBlobGas: &excessBlobGas, + } + + newGasFeeCap, newTipCap, newBlobFeeCap, err := p.feeAndTipCaps(ctx, nonce, gasLimit, numBlobs, lastTx, dataCreatedAt, dataPosterBacklog, &latestHeader) + if err != nil { + t.Fatalf("%s", err) + } + + // There is no backlog and almost no time elapses since the batch data was + // created to when it was posted so the maxNormalizedFeeCap is ~60.01 gwei. + // This is multiplied with the normalizedGas to get targetMaxCost. + // This is greatly in excess of currentTotalCost * MaxFeeBidMultipleBips, + // so targetMaxCost is reduced to the current base fee + suggested tip cap + + // current blob fee multipled by MaxFeeBidMultipleBips (factor of 10). + // The blob and non blob factors are then proportionally split out and so + // the newGasFeeCap is set to (current base fee + suggested tip cap) * 10 + // and newBlobFeeCap is set to current blob gas base fee (1 wei + // since there is no excess blob gas) * 10. + expectedGasFeeCap := big.NewInt(30 * params.GWei) + expectedBlobFeeCap := big.NewInt(10) + if !arbmath.BigEquals(expectedGasFeeCap, newGasFeeCap) { + t.Fatalf("feeAndTipCaps didn't return expected gas fee cap. Was: %d, expected: %d", expectedGasFeeCap, newGasFeeCap) + } + if !arbmath.BigEquals(expectedBlobFeeCap, newBlobFeeCap) { + t.Fatalf("feeAndTipCaps didn't return expected blob gas fee cap. Was: %d, expected: %d", expectedBlobFeeCap, newBlobFeeCap) + } + + // 2 gwei is the amount suggested by the L1 client, so that is the value + // returned because it doesn't exceed the configured bounds, there is no + // lastTx to scale against with rbf, and it is not bigger than the computed + // gasFeeCap. + expectedTipCap := big.NewInt(2 * params.GWei) + if !arbmath.BigEquals(expectedTipCap, newTipCap) { + t.Fatalf("feeAndTipCaps didn't return expected tip cap. Was: %d, expected: %d", expectedTipCap, newTipCap) + } + + lastBlobTx := &types.BlobTx{} + err = updateTxDataGasCaps(lastBlobTx, newGasFeeCap, newTipCap, newBlobFeeCap) + if err != nil { + t.Fatal(err) + } + lastTx = types.NewTx(lastBlobTx) + // Make creation time go backwards so elapsed time increases + retconnedCreationTime := dataCreatedAt.Add(-time.Minute) + // Base fee has decreased but blob fee has increased + blobGasUsed = 0xc0000 // 6 blobs of gas + excessBlobGas = 8295804 // this should set blob fee to 12 wei + latestHeader = types.Header{ + Number: big.NewInt(2), + BaseFee: big.NewInt(100_000_000), + BlobGasUsed: &blobGasUsed, + ExcessBlobGas: &excessBlobGas, + } + + newGasFeeCap, newTipCap, newBlobFeeCap, err = p.feeAndTipCaps(ctx, nonce, gasLimit, numBlobs, lastTx, retconnedCreationTime, dataPosterBacklog, &latestHeader) + + t.Log("newGasFeeCap", newGasFeeCap, "newTipCap", newTipCap, "newBlobFeeCap", newBlobFeeCap, "err", err) + if arbmath.BigEquals(expectedGasFeeCap, newGasFeeCap) { + t.Fatalf("feeAndTipCaps didn't return expected gas fee cap. Was: %d, expected NOT: %d", expectedGasFeeCap, newGasFeeCap) + } + if arbmath.BigEquals(expectedBlobFeeCap, newBlobFeeCap) { + t.Fatalf("feeAndTipCaps didn't return expected blob gas fee cap. Was: %d, expected NOT: %d", expectedBlobFeeCap, newBlobFeeCap) + } + if arbmath.BigEquals(expectedTipCap, newTipCap) { + t.Fatalf("feeAndTipCaps didn't return expected tip cap. Was: %d, expected NOT: %d", expectedTipCap, newTipCap) + } + +} diff --git a/arbnode/dataposter/dbstorage/storage.go b/arbnode/dataposter/dbstorage/storage.go index 473bfa2c3..2cfda5d77 100644 --- a/arbnode/dataposter/dbstorage/storage.go +++ b/arbnode/dataposter/dbstorage/storage.go @@ -58,6 +58,18 @@ func (s *Storage) FetchContents(_ context.Context, startingIndex uint64, maxResu return res, it.Error() } +func (s *Storage) Get(_ context.Context, index uint64) (*storage.QueuedTransaction, error) { + key := idxToKey(index) + value, err := s.db.Get(key) + if err != nil { + if errors.Is(err, leveldb.ErrNotFound) { + return nil, nil + } + return nil, err + } + return s.encDec().Decode(value) +} + func (s *Storage) lastItemIdx(context.Context) ([]byte, error) { return s.db.Get(lastItemIdxKey) } diff --git a/arbnode/dataposter/externalsigner/externalsigner.go b/arbnode/dataposter/externalsigner/externalsigner.go new file mode 100644 index 000000000..10d9754cd --- /dev/null +++ b/arbnode/dataposter/externalsigner/externalsigner.go @@ -0,0 +1,115 @@ +package externalsigner + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto/kzg4844" + "github.com/ethereum/go-ethereum/signer/core/apitypes" + "github.com/holiman/uint256" +) + +type SignTxArgs struct { + *apitypes.SendTxArgs + + // Feilds for BlobTx type transactions. + BlobFeeCap *hexutil.Big `json:"maxFeePerBlobGas"` + BlobHashes []common.Hash `json:"blobVersionedHashes,omitempty"` + + // Blob sidecar fields for BlobTx type transactions. + // These are optional if BlobHashes are already present, since these + // are not included in the hash/signature. + Blobs []kzg4844.Blob `json:"blobs"` + Commitments []kzg4844.Commitment `json:"commitments"` + Proofs []kzg4844.Proof `json:"proofs"` +} + +func (a *SignTxArgs) ToTransaction() *types.Transaction { + if !a.isEIP4844() { + return a.SendTxArgs.ToTransaction() + } + to := common.Address{} + if a.To != nil { + to = a.To.Address() + } + var input []byte + if a.Input != nil { + input = *a.Input + } else if a.Data != nil { + input = *a.Data + } + al := types.AccessList{} + if a.AccessList != nil { + al = *a.AccessList + } + return types.NewTx(&types.BlobTx{ + To: to, + Nonce: uint64(a.SendTxArgs.Nonce), + Gas: uint64(a.Gas), + GasFeeCap: uint256.NewInt(a.MaxFeePerGas.ToInt().Uint64()), + GasTipCap: uint256.NewInt(a.MaxPriorityFeePerGas.ToInt().Uint64()), + Value: uint256.NewInt(a.Value.ToInt().Uint64()), + Data: input, + AccessList: al, + BlobFeeCap: uint256.NewInt(a.BlobFeeCap.ToInt().Uint64()), + BlobHashes: a.BlobHashes, + Sidecar: &types.BlobTxSidecar{ + Blobs: a.Blobs, + Commitments: a.Commitments, + Proofs: a.Proofs, + }, + ChainID: uint256.NewInt(a.ChainID.ToInt().Uint64()), + }) +} + +func (a *SignTxArgs) isEIP4844() bool { + return a.BlobHashes != nil || a.BlobFeeCap != nil +} + +// TxToSignTxArgs converts transaction to SendTxArgs. This is needed for +// external signer to specify From field. +func TxToSignTxArgs(addr common.Address, tx *types.Transaction) (*SignTxArgs, error) { + var to *common.MixedcaseAddress + if tx.To() != nil { + to = new(common.MixedcaseAddress) + *to = common.NewMixedcaseAddress(*tx.To()) + } + data := (hexutil.Bytes)(tx.Data()) + val := (*hexutil.Big)(tx.Value()) + if val == nil { + val = (*hexutil.Big)(big.NewInt(0)) + } + al := tx.AccessList() + var ( + blobs []kzg4844.Blob + commitments []kzg4844.Commitment + proofs []kzg4844.Proof + ) + if tx.BlobTxSidecar() != nil { + blobs = tx.BlobTxSidecar().Blobs + commitments = tx.BlobTxSidecar().Commitments + proofs = tx.BlobTxSidecar().Proofs + } + return &SignTxArgs{ + SendTxArgs: &apitypes.SendTxArgs{ + From: common.NewMixedcaseAddress(addr), + To: to, + Gas: hexutil.Uint64(tx.Gas()), + GasPrice: (*hexutil.Big)(tx.GasPrice()), + MaxFeePerGas: (*hexutil.Big)(tx.GasFeeCap()), + MaxPriorityFeePerGas: (*hexutil.Big)(tx.GasTipCap()), + Value: *val, + Nonce: hexutil.Uint64(tx.Nonce()), + Data: &data, + AccessList: &al, + ChainID: (*hexutil.Big)(tx.ChainId()), + }, + BlobFeeCap: (*hexutil.Big)(tx.BlobGasFeeCap()), + BlobHashes: tx.BlobHashes(), + Blobs: blobs, + Commitments: commitments, + Proofs: proofs, + }, nil +} diff --git a/arbnode/dataposter/externalsigner/externalsigner_test.go b/arbnode/dataposter/externalsigner/externalsigner_test.go new file mode 100644 index 000000000..abd5acedc --- /dev/null +++ b/arbnode/dataposter/externalsigner/externalsigner_test.go @@ -0,0 +1,74 @@ +package externalsigner + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/holiman/uint256" +) + +var ( + blobTx = types.NewTx( + &types.BlobTx{ + ChainID: uint256.NewInt(1337), + Nonce: 13, + GasTipCap: uint256.NewInt(1), + GasFeeCap: uint256.NewInt(1), + Gas: 3, + To: common.Address{}, + Value: uint256.NewInt(1), + Data: []byte{0x01, 0x02, 0x03}, + BlobHashes: []common.Hash{ + common.BigToHash(big.NewInt(1)), + common.BigToHash(big.NewInt(2)), + common.BigToHash(big.NewInt(3)), + }, + Sidecar: &types.BlobTxSidecar{}, + }, + ) + dynamicFeeTx = types.NewTx( + &types.DynamicFeeTx{ + ChainID: big.NewInt(1337), + Nonce: 13, + GasTipCap: big.NewInt(1), + GasFeeCap: big.NewInt(1), + Gas: 3, + To: nil, + Value: big.NewInt(1), + Data: []byte{0x01, 0x02, 0x03}, + }, + ) +) + +// TestToTranssaction tests that tranasction converted to SignTxArgs and then +// back to Transaction results in the same hash. +func TestToTranssaction(t *testing.T) { + for _, tc := range []struct { + desc string + tx *types.Transaction + }{ + { + desc: "blob transaction", + tx: blobTx, + }, + { + desc: "dynamic fee transaction", + tx: dynamicFeeTx, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + signTxArgs, err := TxToSignTxArgs(common.Address{}, tc.tx) + if err != nil { + t.Fatalf("TxToSignTxArgs() unexpected error: %v", err) + } + got := signTxArgs.ToTransaction() + hasher := types.LatestSignerForChainID(nil) + if h, g := hasher.Hash(tc.tx), hasher.Hash(got); h != g { + t.Errorf("ToTransaction() got hash: %v want: %v", g, h) + } + }) + } + +} diff --git a/arbnode/dataposter/externalsignertest/externalsignertest.go b/arbnode/dataposter/externalsignertest/externalsignertest.go index 7d15515fe..73a5760fb 100644 --- a/arbnode/dataposter/externalsignertest/externalsignertest.go +++ b/arbnode/dataposter/externalsignertest/externalsignertest.go @@ -19,7 +19,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/rpc" - "github.com/ethereum/go-ethereum/signer/core/apitypes" + "github.com/offchainlabs/nitro/arbnode/dataposter/externalsigner" ) var ( @@ -71,13 +71,14 @@ func CertPaths() (*CertAbsPaths, error) { }, nil } -func NewServer(ctx context.Context, t *testing.T) (*http.Server, *SignerAPI) { +func NewServer(t *testing.T) (*http.Server, *SignerAPI) { rpcServer := rpc.NewServer() signer, address, err := setupAccount("/tmp/keystore") if err != nil { t.Fatalf("Error setting up account: %v", err) } t.Cleanup(func() { os.RemoveAll("/tmp/keystore") }) + s := &SignerAPI{SignerFn: signer, Address: address} if err := rpcServer.RegisterName("test", s); err != nil { t.Fatalf("Failed to register EthSigningAPI, error: %v", err) @@ -107,6 +108,12 @@ func NewServer(ctx context.Context, t *testing.T) (*http.Server, *SignerAPI) { }, } + t.Cleanup(func() { + if err := httpServer.Close(); err != nil { + t.Fatalf("Error shutting down http server: %v", err) + } + }) + return httpServer, s } @@ -137,7 +144,7 @@ type SignerAPI struct { Address common.Address } -func (a *SignerAPI) SignTransaction(ctx context.Context, req *apitypes.SendTxArgs) (hexutil.Bytes, error) { +func (a *SignerAPI) SignTransaction(ctx context.Context, req *externalsigner.SignTxArgs) (hexutil.Bytes, error) { if req == nil { return nil, fmt.Errorf("nil request") } diff --git a/arbnode/dataposter/noop/storage.go b/arbnode/dataposter/noop/storage.go index b3947bcaa..c90e36b06 100644 --- a/arbnode/dataposter/noop/storage.go +++ b/arbnode/dataposter/noop/storage.go @@ -16,6 +16,10 @@ func (s *Storage) FetchContents(_ context.Context, _, _ uint64) ([]*storage.Queu return nil, nil } +func (s *Storage) Get(_ context.Context, _ uint64) (*storage.QueuedTransaction, error) { + return nil, nil +} + func (s *Storage) FetchLast(ctx context.Context) (*storage.QueuedTransaction, error) { return nil, nil } diff --git a/arbnode/dataposter/redis/redisstorage.go b/arbnode/dataposter/redis/redisstorage.go index f2393611b..8b6dcf65a 100644 --- a/arbnode/dataposter/redis/redisstorage.go +++ b/arbnode/dataposter/redis/redisstorage.go @@ -78,6 +78,20 @@ func (s *Storage) FetchContents(ctx context.Context, startingIndex uint64, maxRe return items, nil } +func (s *Storage) Get(ctx context.Context, index uint64) (*storage.QueuedTransaction, error) { + contents, err := s.FetchContents(ctx, index, 1) + if err != nil { + return nil, err + } + if len(contents) == 0 { + return nil, nil + } else if len(contents) == 1 { + return contents[0], nil + } else { + return nil, fmt.Errorf("expected only one return value for Get but got %v", len(contents)) + } +} + func (s *Storage) FetchLast(ctx context.Context) (*storage.QueuedTransaction, error) { query := redis.ZRangeArgs{ Key: s.key, diff --git a/arbnode/dataposter/slice/slicestorage.go b/arbnode/dataposter/slice/slicestorage.go index dbd7a3ea5..69de7564a 100644 --- a/arbnode/dataposter/slice/slicestorage.go +++ b/arbnode/dataposter/slice/slicestorage.go @@ -45,6 +45,13 @@ func (s *Storage) FetchContents(_ context.Context, startingIndex uint64, maxResu return res, nil } +func (s *Storage) Get(_ context.Context, index uint64) (*storage.QueuedTransaction, error) { + if index >= s.firstNonce+uint64(len(s.queue)) || index < s.firstNonce { + return nil, nil + } + return s.encDec().Decode(s.queue[index-s.firstNonce]) +} + func (s *Storage) FetchLast(context.Context) (*storage.QueuedTransaction, error) { if len(s.queue) == 0 { return nil, nil diff --git a/arbnode/dataposter/storage/storage.go b/arbnode/dataposter/storage/storage.go index 9586b9c9a..8e5a7e179 100644 --- a/arbnode/dataposter/storage/storage.go +++ b/arbnode/dataposter/storage/storage.go @@ -26,31 +26,42 @@ var ( ) type QueuedTransaction struct { - FullTx *types.Transaction - DeprecatedData types.DynamicFeeTx // FullTx should be used instead - Meta []byte - Sent bool - Created time.Time // may be earlier than the tx was given to the tx poster - NextReplacement time.Time + FullTx *types.Transaction + DeprecatedData types.DynamicFeeTx // FullTx should be used instead + Meta []byte + Sent bool + Created time.Time // may be earlier than the tx was given to the tx poster + NextReplacement time.Time + StoredCumulativeWeight *uint64 +} + +// CumulativeWeight returns a rough estimate of the total number of batches submitted at this point, not guaranteed to be exact +func (t *QueuedTransaction) CumulativeWeight() uint64 { + if t.StoredCumulativeWeight != nil { + return *t.StoredCumulativeWeight + } + return t.FullTx.Nonce() } type queuedTransactionForEncoding struct { - FullTx *types.Transaction - Data types.DynamicFeeTx - Meta []byte - Sent bool - Created RlpTime - NextReplacement RlpTime + FullTx *types.Transaction + Data types.DynamicFeeTx + Meta []byte + Sent bool + Created RlpTime + NextReplacement RlpTime + StoredCumulativeWeight *uint64 `rlp:"optional"` } func (qt *QueuedTransaction) EncodeRLP(w io.Writer) error { return rlp.Encode(w, queuedTransactionForEncoding{ - FullTx: qt.FullTx, - Data: qt.DeprecatedData, - Meta: qt.Meta, - Sent: qt.Sent, - Created: (RlpTime)(qt.Created), - NextReplacement: (RlpTime)(qt.NextReplacement), + FullTx: qt.FullTx, + Data: qt.DeprecatedData, + Meta: qt.Meta, + Sent: qt.Sent, + Created: (RlpTime)(qt.Created), + NextReplacement: (RlpTime)(qt.NextReplacement), + StoredCumulativeWeight: qt.StoredCumulativeWeight, }) } @@ -65,6 +76,7 @@ func (qt *QueuedTransaction) DecodeRLP(s *rlp.Stream) error { qt.Sent = qtEnc.Sent qt.Created = time.Time(qtEnc.Created) qt.NextReplacement = time.Time(qtEnc.NextReplacement) + qt.StoredCumulativeWeight = qtEnc.StoredCumulativeWeight return nil } diff --git a/arbnode/inbox_reader.go b/arbnode/inbox_reader.go index 72881b52f..a1f1a1a93 100644 --- a/arbnode/inbox_reader.go +++ b/arbnode/inbox_reader.go @@ -10,7 +10,6 @@ import ( "math" "math/big" "strings" - "sync" "sync/atomic" "time" @@ -99,10 +98,6 @@ type InboxReader struct { // Atomic lastSeenBatchCount uint64 - - // Behind the mutex - lastReadMutex sync.RWMutex - lastReadBlock uint64 lastReadBatchCount uint64 } @@ -396,10 +391,7 @@ func (r *InboxReader) run(ctx context.Context, hadError bool) error { // There's nothing to do from = arbmath.BigAddByUint(currentHeight, 1) blocksToFetch = config.DefaultBlocksToRead - r.lastReadMutex.Lock() - r.lastReadBlock = currentHeight.Uint64() - r.lastReadBatchCount = checkingBatchCount - r.lastReadMutex.Unlock() + atomic.StoreUint64(&r.lastReadBatchCount, checkingBatchCount) storeSeenBatchCount() if !r.caughtUp && readMode == "latest" { r.caughtUp = true @@ -531,10 +523,7 @@ func (r *InboxReader) run(ctx context.Context, hadError bool) error { } if len(sequencerBatches) > 0 { readAnyBatches = true - r.lastReadMutex.Lock() - r.lastReadBlock = to.Uint64() - r.lastReadBatchCount = sequencerBatches[len(sequencerBatches)-1].SequenceNumber + 1 - r.lastReadMutex.Unlock() + atomic.StoreUint64(&r.lastReadBatchCount, sequencerBatches[len(sequencerBatches)-1].SequenceNumber+1) storeSeenBatchCount() } } @@ -561,10 +550,7 @@ func (r *InboxReader) run(ctx context.Context, hadError bool) error { } if !readAnyBatches { - r.lastReadMutex.Lock() - r.lastReadBlock = currentHeight.Uint64() - r.lastReadBatchCount = checkingBatchCount - r.lastReadMutex.Unlock() + atomic.StoreUint64(&r.lastReadBatchCount, checkingBatchCount) storeSeenBatchCount() } } @@ -635,10 +621,8 @@ func (r *InboxReader) GetSequencerMessageBytes(ctx context.Context, seqNum uint6 return nil, common.Hash{}, fmt.Errorf("sequencer batch %v not found in L1 block %v (found batches %v)", seqNum, metadata.ParentChainBlock, seenBatches) } -func (r *InboxReader) GetLastReadBlockAndBatchCount() (uint64, uint64) { - r.lastReadMutex.RLock() - defer r.lastReadMutex.RUnlock() - return r.lastReadBlock, r.lastReadBatchCount +func (r *InboxReader) GetLastReadBatchCount() uint64 { + return atomic.LoadUint64(&r.lastReadBatchCount) } // GetLastSeenBatchCount returns how many sequencer batches the inbox reader has read in from L1. diff --git a/arbnode/inbox_tracker.go b/arbnode/inbox_tracker.go index 763ddcc42..b758e95e6 100644 --- a/arbnode/inbox_tracker.go +++ b/arbnode/inbox_tracker.go @@ -204,6 +204,11 @@ func (t *InboxTracker) GetBatchMessageCount(seqNum uint64) (arbutil.MessageIndex return metadata.MessageCount, err } +func (t *InboxTracker) GetBatchParentChainBlock(seqNum uint64) (uint64, error) { + metadata, err := t.GetBatchMetadata(seqNum) + return metadata.ParentChainBlock, err +} + // GetBatchAcc is a convenience function wrapping GetBatchMetadata func (t *InboxTracker) GetBatchAcc(seqNum uint64) (common.Hash, error) { metadata, err := t.GetBatchMetadata(seqNum) @@ -223,6 +228,54 @@ func (t *InboxTracker) GetBatchCount() (uint64, error) { return count, nil } +// err will return unexpected/internal errors +// bool will be false if batch not found (meaning, block not yet posted on a batch) +func (t *InboxTracker) FindInboxBatchContainingMessage(pos arbutil.MessageIndex) (uint64, bool, error) { + batchCount, err := t.GetBatchCount() + if err != nil { + return 0, false, err + } + low := uint64(0) + high := batchCount - 1 + lastBatchMessageCount, err := t.GetBatchMessageCount(high) + if err != nil { + return 0, false, err + } + if lastBatchMessageCount <= pos { + return 0, false, nil + } + // Iteration preconditions: + // - high >= low + // - msgCount(low - 1) <= pos implies low <= target + // - msgCount(high) > pos implies high >= target + // Therefore, if low == high, then low == high == target + for { + // Due to integer rounding, mid >= low && mid < high + mid := (low + high) / 2 + count, err := t.GetBatchMessageCount(mid) + if err != nil { + return 0, false, err + } + if count < pos { + // Must narrow as mid >= low, therefore mid + 1 > low, therefore newLow > oldLow + // Keeps low precondition as msgCount(mid) < pos + low = mid + 1 + } else if count == pos { + return mid + 1, true, nil + } else if count == pos+1 || mid == low { // implied: count > pos + return mid, true, nil + } else { + // implied: count > pos + 1 + // Must narrow as mid < high, therefore newHigh < oldHigh + // Keeps high precondition as msgCount(mid) > pos + high = mid + } + if high == low { + return high, true, nil + } + } +} + func (t *InboxTracker) PopulateFeedBacklog(broadcastServer *broadcaster.Broadcaster) error { batchCount, err := t.GetBatchCount() if err != nil { @@ -606,8 +659,14 @@ func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client arbutil.L ctx: ctx, client: client, } - - multiplexer := arbstate.NewInboxMultiplexer(backend, prevbatchmeta.DelayedMessageCount, t.das, t.blobReader, arbstate.KeysetValidate) + var daProviders []arbstate.DataAvailabilityProvider + if t.das != nil { + daProviders = append(daProviders, arbstate.NewDAProviderDAS(t.das)) + } + if t.blobReader != nil { + daProviders = append(daProviders, arbstate.NewDAProviderBlobReader(t.blobReader)) + } + multiplexer := arbstate.NewInboxMultiplexer(backend, prevbatchmeta.DelayedMessageCount, daProviders, arbstate.KeysetValidate) batchMessageCounts := make(map[uint64]arbutil.MessageIndex) currentpos := prevbatchmeta.MessageCount + 1 for { diff --git a/arbnode/node.go b/arbnode/node.go index df7236f0f..7a7a99ba8 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -26,6 +26,7 @@ import ( "github.com/offchainlabs/nitro/arbnode/dataposter" "github.com/offchainlabs/nitro/arbnode/dataposter/storage" "github.com/offchainlabs/nitro/arbnode/resourcemanager" + "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/broadcastclient" @@ -67,10 +68,10 @@ func GenerateRollupConfig(prod bool, wasmModuleRoot common.Hash, rollupOwner com // TODO could the ChainConfig be just []byte? ChainConfig: string(serializedChainConfig), SequencerInboxMaxTimeVariation: rollupgen.ISequencerInboxMaxTimeVariation{ - DelayBlocks: 60 * 60 * 24 / 15, - FutureBlocks: 12, - DelaySeconds: 60 * 60 * 24, - FutureSeconds: 60 * 60, + DelayBlocks: big.NewInt(60 * 60 * 24 / 15), + FutureBlocks: big.NewInt(12), + DelaySeconds: big.NewInt(60 * 60 * 24), + FutureSeconds: big.NewInt(60 * 60), }, } } @@ -87,7 +88,6 @@ type Config struct { Staker staker.L1ValidatorConfig `koanf:"staker" reload:"hot"` SeqCoordinator SeqCoordinatorConfig `koanf:"seq-coordinator"` DataAvailability das.DataAvailabilityConfig `koanf:"data-availability"` - BlobClient BlobClientConfig `koanf:"blob-client"` SyncMonitor SyncMonitorConfig `koanf:"sync-monitor"` Dangerous DangerousConfig `koanf:"dangerous"` TransactionStreamer TransactionStreamerConfig `koanf:"transaction-streamer" reload:"hot"` @@ -152,7 +152,6 @@ func ConfigAddOptions(prefix string, f *flag.FlagSet, feedInputEnable bool, feed staker.L1ValidatorConfigAddOptions(prefix+".staker", f) SeqCoordinatorConfigAddOptions(prefix+".seq-coordinator", f) das.DataAvailabilityConfigAddNodeOptions(prefix+".data-availability", f) - BlobClientAddOptions(prefix+".blob-client", f) SyncMonitorConfigAddOptions(prefix+".sync-monitor", f) DangerousConfigAddOptions(prefix+".dangerous", f) TransactionStreamerConfigAddOptions(prefix+".transaction-streamer", f) @@ -191,12 +190,14 @@ func ConfigDefaultL1Test() *Config { func ConfigDefaultL1NonSequencerTest() *Config { config := ConfigDefault + config.Dangerous = TestDangerousConfig config.ParentChainReader = headerreader.TestConfig config.InboxReader = TestInboxReaderConfig config.DelayedSequencer.Enable = false config.BatchPoster.Enable = false config.SeqCoordinator.Enable = false config.BlockValidator = staker.TestBlockValidatorConfig + config.SyncMonitor = TestSyncMonitorConfig config.Staker = staker.TestL1ValidatorConfig config.Staker.Enable = false config.BlockValidator.ValidationServerConfigs = []rpcclient.ClientConfig{{URL: ""}} @@ -206,6 +207,7 @@ func ConfigDefaultL1NonSequencerTest() *Config { func ConfigDefaultL2Test() *Config { config := ConfigDefault + config.Dangerous = TestDangerousConfig config.ParentChainReader.Enable = false config.SeqCoordinator = TestSeqCoordinatorConfig config.Feed.Input.Verify.Dangerous.AcceptMissing = true @@ -213,6 +215,7 @@ func ConfigDefaultL2Test() *Config { config.SeqCoordinator.Signer.ECDSA.AcceptSequencer = false config.SeqCoordinator.Signer.ECDSA.Dangerous.AcceptMissing = true config.Staker = staker.TestL1ValidatorConfig + config.SyncMonitor = TestSyncMonitorConfig config.Staker.Enable = false config.BlockValidator.ValidationServerConfigs = []rpcclient.ClientConfig{{URL: ""}} config.TransactionStreamer = DefaultTransactionStreamerConfig @@ -223,16 +226,25 @@ func ConfigDefaultL2Test() *Config { type DangerousConfig struct { NoL1Listener bool `koanf:"no-l1-listener"` NoSequencerCoordinator bool `koanf:"no-sequencer-coordinator"` + DisableBlobReader bool `koanf:"disable-blob-reader"` } var DefaultDangerousConfig = DangerousConfig{ NoL1Listener: false, NoSequencerCoordinator: false, + DisableBlobReader: false, +} + +var TestDangerousConfig = DangerousConfig{ + NoL1Listener: false, + NoSequencerCoordinator: false, + DisableBlobReader: true, } func DangerousConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".no-l1-listener", DefaultDangerousConfig.NoL1Listener, "DANGEROUS! disables listening to L1. To be used in test nodes only") f.Bool(prefix+".no-sequencer-coordinator", DefaultDangerousConfig.NoSequencerCoordinator, "DANGEROUS! allows sequencing without sequencer-coordinator") + f.Bool(prefix+".disable-blob-reader", DefaultDangerousConfig.DisableBlobReader, "DANGEROUS! disables the EIP-4844 blob reader, which is necessary to read batches") } type Node struct { @@ -242,6 +254,7 @@ type Node struct { L1Reader *headerreader.HeaderReader TxStreamer *TransactionStreamer DeployInfo *chaininfo.RollupAddresses + BlobReader arbstate.BlobReader InboxReader *InboxReader InboxTracker *InboxTracker DelayedSequencer *DelayedSequencer @@ -255,7 +268,6 @@ type Node struct { SeqCoordinator *SeqCoordinator MaintenanceRunner *MaintenanceRunner DASLifecycleManager *das.LifecycleManager - ClassicOutboxRetriever *ClassicOutboxRetriever SyncMonitor *SyncMonitor configFetcher ConfigFetcher ctx context.Context @@ -360,6 +372,7 @@ func createNodeImpl( dataSigner signature.DataSignerFunc, fatalErrChan chan error, parentChainID *big.Int, + blobReader arbstate.BlobReader, ) (*Node, error) { config := configFetcher.Get() @@ -370,17 +383,10 @@ func createNodeImpl( l2ChainId := l2Config.ChainID.Uint64() - syncMonitor := NewSyncMonitor(&config.SyncMonitor) - var classicOutbox *ClassicOutboxRetriever - classicMsgDb, err := stack.OpenDatabase("classic-msg", 0, 0, "", true) - if err != nil { - if l2Config.ArbitrumChainParams.GenesisBlockNum > 0 { - log.Warn("Classic Msg Database not found", "err", err) - } - classicOutbox = nil - } else { - classicOutbox = NewClassicOutboxRetriever(classicMsgDb) + syncConfigFetcher := func() *SyncMonitorConfig { + return &configFetcher.Get().SyncMonitor } + syncMonitor := NewSyncMonitor(syncConfigFetcher) var l1Reader *headerreader.HeaderReader if config.ParentChainReader.Enable { @@ -463,6 +469,7 @@ func createNodeImpl( L1Reader: nil, TxStreamer: txStreamer, DeployInfo: nil, + BlobReader: blobReader, InboxReader: nil, InboxTracker: nil, DelayedSequencer: nil, @@ -476,7 +483,6 @@ func createNodeImpl( SeqCoordinator: coordinator, MaintenanceRunner: maintenanceRunner, DASLifecycleManager: nil, - ClassicOutboxRetriever: classicOutbox, SyncMonitor: syncMonitor, configFetcher: configFetcher, ctx: ctx, @@ -523,14 +529,6 @@ func createNodeImpl( return nil, errors.New("a data availability service is required for this chain, but it was not configured") } - var blobReader arbstate.BlobReader - if config.BlobClient.BeaconChainUrl != "" { - blobReader, err = NewBlobClient(config.BlobClient, l1client) - if err != nil { - return nil, err - } - } - inboxTracker, err := NewInboxTracker(arbDb, txStreamer, daReader, blobReader) if err != nil { return nil, err @@ -688,6 +686,7 @@ func createNodeImpl( L1Reader: l1Reader, TxStreamer: txStreamer, DeployInfo: deployInfo, + BlobReader: blobReader, InboxReader: inboxReader, InboxTracker: inboxTracker, DelayedSequencer: delayedSequencer, @@ -701,7 +700,6 @@ func createNodeImpl( SeqCoordinator: coordinator, MaintenanceRunner: maintenanceRunner, DASLifecycleManager: dasLifecycleManager, - ClassicOutboxRetriever: classicOutbox, SyncMonitor: syncMonitor, configFetcher: configFetcher, ctx: ctx, @@ -727,8 +725,9 @@ func CreateNode( dataSigner signature.DataSignerFunc, fatalErrChan chan error, parentChainID *big.Int, + blobReader arbstate.BlobReader, ) (*Node, error) { - currentNode, err := createNodeImpl(ctx, stack, exec, arbDb, configFetcher, l2Config, l1client, deployInfo, txOptsValidator, txOptsBatchPoster, dataSigner, fatalErrChan, parentChainID) + currentNode, err := createNodeImpl(ctx, stack, exec, arbDb, configFetcher, l2Config, l1client, deployInfo, txOptsValidator, txOptsBatchPoster, dataSigner, fatalErrChan, parentChainID, blobReader) if err != nil { return nil, err } @@ -757,26 +756,46 @@ func CreateNode( return currentNode, nil } +func (n *Node) CacheL1PriceDataOfMsg(pos arbutil.MessageIndex, callDataUnits uint64, l1GasCharged uint64) { + n.TxStreamer.CacheL1PriceDataOfMsg(pos, callDataUnits, l1GasCharged) +} + +func (n *Node) BacklogL1GasCharged() uint64 { + return n.TxStreamer.BacklogL1GasCharged() +} +func (n *Node) BacklogCallDataUnits() uint64 { + return n.TxStreamer.BacklogCallDataUnits() +} + func (n *Node) Start(ctx context.Context) error { execClient, ok := n.Execution.(*gethexec.ExecutionNode) if !ok { execClient = nil } if execClient != nil { - err := execClient.Initialize(ctx, n, n.SyncMonitor) + err := execClient.Initialize(ctx) if err != nil { return fmt.Errorf("error initializing exec client: %w", err) } } - n.SyncMonitor.Initialize(n.InboxReader, n.TxStreamer, n.SeqCoordinator, n.Execution) + n.SyncMonitor.Initialize(n.InboxReader, n.TxStreamer, n.SeqCoordinator) err := n.Stack.Start() if err != nil { return fmt.Errorf("error starting geth stack: %w", err) } + if execClient != nil { + execClient.SetConsensusClient(n) + } err = n.Execution.Start(ctx) if err != nil { return fmt.Errorf("error starting exec client: %w", err) } + if n.BlobReader != nil { + err = n.BlobReader.Initialize(ctx) + if err != nil { + return fmt.Errorf("error initializing blob reader: %w", err) + } + } if n.InboxTracker != nil { err = n.InboxTracker.Initialize() if err != nil { @@ -817,12 +836,6 @@ func (n *Node) Start(ctx context.Context) error { if n.SeqCoordinator != nil { n.SeqCoordinator.Start(ctx) } else { - if n.DelayedSequencer != nil { - err := n.DelayedSequencer.ForceSequenceDelayed(ctx) - if err != nil { - return fmt.Errorf("error initially sequencing delayed instructions: %w", err) - } - } n.Execution.Activate() } if n.MaintenanceRunner != nil { @@ -885,6 +898,7 @@ func (n *Node) Start(ctx context.Context) error { if n.configFetcher != nil { n.configFetcher.Start(ctx) } + n.SyncMonitor.Start(ctx) return nil } @@ -938,6 +952,7 @@ func (n *Node) StopAndWait() { // Just stops the redis client (most other stuff was stopped earlier) n.SeqCoordinator.StopAndWait() } + n.SyncMonitor.StopAndWait() if n.DASLifecycleManager != nil { n.DASLifecycleManager.StopAndWaitUntil(2 * time.Second) } @@ -948,3 +963,51 @@ func (n *Node) StopAndWait() { log.Error("error on stack close", "err", err) } } + +func (n *Node) FetchBatch(ctx context.Context, batchNum uint64) ([]byte, common.Hash, error) { + return n.InboxReader.GetSequencerMessageBytes(ctx, batchNum) +} + +func (n *Node) FindInboxBatchContainingMessage(message arbutil.MessageIndex) (uint64, bool, error) { + return n.InboxTracker.FindInboxBatchContainingMessage(message) +} + +func (n *Node) GetBatchParentChainBlock(seqNum uint64) (uint64, error) { + return n.InboxTracker.GetBatchParentChainBlock(seqNum) +} + +func (n *Node) FullSyncProgressMap() map[string]interface{} { + return n.SyncMonitor.FullSyncProgressMap() +} + +func (n *Node) Synced() bool { + return n.SyncMonitor.Synced() +} + +func (n *Node) SyncTargetMessageCount() arbutil.MessageIndex { + return n.SyncMonitor.SyncTargetMessageCount() +} + +// TODO: switch from pulling to pushing safe/finalized +func (n *Node) GetSafeMsgCount(ctx context.Context) (arbutil.MessageIndex, error) { + return n.InboxReader.GetSafeMsgCount(ctx) +} + +func (n *Node) GetFinalizedMsgCount(ctx context.Context) (arbutil.MessageIndex, error) { + return n.InboxReader.GetFinalizedMsgCount(ctx) +} + +func (n *Node) WriteMessageFromSequencer(pos arbutil.MessageIndex, msgWithMeta arbostypes.MessageWithMetadata) error { + return n.TxStreamer.WriteMessageFromSequencer(pos, msgWithMeta) +} + +func (n *Node) ExpectChosenSequencer() error { + return n.TxStreamer.ExpectChosenSequencer() +} + +func (n *Node) ValidatedMessageCount() (arbutil.MessageIndex, error) { + if n.BlockValidator == nil { + return 0, errors.New("validator not set up") + } + return n.BlockValidator.GetValidated(), nil +} diff --git a/arbnode/sequencer_inbox.go b/arbnode/sequencer_inbox.go index b743bf0ef..edda4e551 100644 --- a/arbnode/sequencer_inbox.go +++ b/arbnode/sequencer_inbox.go @@ -45,7 +45,7 @@ func init() { } batchDeliveredID = sequencerBridgeABI.Events["SequencerBatchDelivered"].ID sequencerBatchDataABI = sequencerBridgeABI.Events[sequencerBatchDataEvent] - addSequencerL2BatchFromOriginCallABI = sequencerBridgeABI.Methods["addSequencerL2BatchFromOrigin"] + addSequencerL2BatchFromOriginCallABI = sequencerBridgeABI.Methods["addSequencerL2BatchFromOrigin0"] } type SequencerInbox struct { diff --git a/arbnode/sync_monitor.go b/arbnode/sync_monitor.go index 99a66abde..d3b9a7e1c 100644 --- a/arbnode/sync_monitor.go +++ b/arbnode/sync_monitor.go @@ -2,120 +2,146 @@ package arbnode import ( "context" - "errors" - "sync/atomic" + "sync" + "time" + "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbutil" - "github.com/offchainlabs/nitro/execution" + "github.com/offchainlabs/nitro/util/stopwaiter" flag "github.com/spf13/pflag" ) type SyncMonitor struct { - config *SyncMonitorConfig + stopwaiter.StopWaiter + config func() *SyncMonitorConfig inboxReader *InboxReader txStreamer *TransactionStreamer coordinator *SeqCoordinator - exec execution.FullExecutionClient initialized bool + + syncTargetLock sync.Mutex + nextSyncTarget arbutil.MessageIndex + syncTarget arbutil.MessageIndex } -func NewSyncMonitor(config *SyncMonitorConfig) *SyncMonitor { +func NewSyncMonitor(config func() *SyncMonitorConfig) *SyncMonitor { return &SyncMonitor{ config: config, } } type SyncMonitorConfig struct { - BlockBuildLag uint64 `koanf:"block-build-lag"` - BlockBuildSequencerInboxLag uint64 `koanf:"block-build-sequencer-inbox-lag"` - CoordinatorMsgLag uint64 `koanf:"coordinator-msg-lag"` - SafeBlockWaitForBlockValidator bool `koanf:"safe-block-wait-for-block-validator"` - FinalizedBlockWaitForBlockValidator bool `koanf:"finalized-block-wait-for-block-validator"` + MsgLag time.Duration `koanf:"msg-lag"` } var DefaultSyncMonitorConfig = SyncMonitorConfig{ - BlockBuildLag: 20, - BlockBuildSequencerInboxLag: 0, - CoordinatorMsgLag: 15, - SafeBlockWaitForBlockValidator: false, - FinalizedBlockWaitForBlockValidator: false, + MsgLag: time.Second, +} + +var TestSyncMonitorConfig = SyncMonitorConfig{ + MsgLag: time.Millisecond * 10, } func SyncMonitorConfigAddOptions(prefix string, f *flag.FlagSet) { - f.Uint64(prefix+".block-build-lag", DefaultSyncMonitorConfig.BlockBuildLag, "allowed lag between messages read and blocks built") - f.Uint64(prefix+".block-build-sequencer-inbox-lag", DefaultSyncMonitorConfig.BlockBuildSequencerInboxLag, "allowed lag between messages read from sequencer inbox and blocks built") - f.Uint64(prefix+".coordinator-msg-lag", DefaultSyncMonitorConfig.CoordinatorMsgLag, "allowed lag between local and remote messages") - f.Bool(prefix+".safe-block-wait-for-block-validator", DefaultSyncMonitorConfig.SafeBlockWaitForBlockValidator, "wait for block validator to complete before returning safe block number") - f.Bool(prefix+".finalized-block-wait-for-block-validator", DefaultSyncMonitorConfig.FinalizedBlockWaitForBlockValidator, "wait for block validator to complete before returning finalized block number") + f.Duration(prefix+".msg-lag", DefaultSyncMonitorConfig.MsgLag, "allowed msg lag while still considered in sync") } -func (s *SyncMonitor) Initialize(inboxReader *InboxReader, txStreamer *TransactionStreamer, coordinator *SeqCoordinator, exec execution.FullExecutionClient) { +func (s *SyncMonitor) Initialize(inboxReader *InboxReader, txStreamer *TransactionStreamer, coordinator *SeqCoordinator) { s.inboxReader = inboxReader s.txStreamer = txStreamer s.coordinator = coordinator - s.exec = exec s.initialized = true } -func (s *SyncMonitor) SyncProgressMap() map[string]interface{} { - syncing := false - res := make(map[string]interface{}) +func (s *SyncMonitor) updateSyncTarget(ctx context.Context) time.Duration { + nextSyncTarget, err := s.maxMessageCount() + if err != nil { + log.Warn("failed readin max msg count", "err", err) + return s.config().MsgLag + } + s.syncTargetLock.Lock() + defer s.syncTargetLock.Unlock() + s.syncTarget = s.nextSyncTarget + s.nextSyncTarget = nextSyncTarget + return s.config().MsgLag +} - if !s.initialized { - res["err"] = "uninitialized" - return res +func (s *SyncMonitor) SyncTargetMessageCount() arbutil.MessageIndex { + s.syncTargetLock.Lock() + defer s.syncTargetLock.Unlock() + return s.syncTarget +} + +func (s *SyncMonitor) maxMessageCount() (arbutil.MessageIndex, error) { + msgCount, err := s.txStreamer.GetMessageCount() + if err != nil { + return 0, err } - broadcasterQueuedMessagesPos := atomic.LoadUint64(&(s.txStreamer.broadcasterQueuedMessagesPos)) + pending := s.txStreamer.FeedPendingMessageCount() + if pending > msgCount { + msgCount = pending + } - if broadcasterQueuedMessagesPos != 0 { // unprocessed feed - syncing = true + if s.inboxReader != nil { + batchProcessed := s.inboxReader.GetLastReadBatchCount() + + if batchProcessed > 0 { + batchMsgCount, err := s.inboxReader.Tracker().GetBatchMessageCount(batchProcessed - 1) + if err != nil { + return msgCount, err + } + if batchMsgCount > msgCount { + msgCount = batchMsgCount + } + } } - res["broadcasterQueuedMessagesPos"] = broadcasterQueuedMessagesPos - builtMessageCount, err := s.exec.HeadMessageNumber() - if err != nil { - res["builtMessageCountError"] = err.Error() - syncing = true - builtMessageCount = 0 - } else { - blockNum := s.exec.MessageIndexToBlockNumber(builtMessageCount) - res["blockNum"] = blockNum - builtMessageCount++ - res["messageOfLastBlock"] = builtMessageCount + if s.coordinator != nil { + coordinatorMessageCount, err := s.coordinator.GetRemoteMsgCount() //NOTE: this creates a remote call + if err != nil { + return msgCount, err + } + if coordinatorMessageCount > msgCount { + msgCount = coordinatorMessageCount + } } + return msgCount, nil +} + +func (s *SyncMonitor) FullSyncProgressMap() map[string]interface{} { + res := make(map[string]interface{}) + + if !s.initialized { + res["err"] = "uninitialized" + return res + } + + syncTarget := s.SyncTargetMessageCount() + res["syncTargetMsgCount"] = syncTarget + msgCount, err := s.txStreamer.GetMessageCount() if err != nil { res["msgCountError"] = err.Error() - syncing = true - } else { - res["msgCount"] = msgCount - if builtMessageCount+arbutil.MessageIndex(s.config.BlockBuildLag) < msgCount { - syncing = true - } + return res } + res["msgCount"] = msgCount + + res["feedPendingMessageCount"] = s.txStreamer.FeedPendingMessageCount() if s.inboxReader != nil { batchSeen := s.inboxReader.GetLastSeenBatchCount() - _, batchProcessed := s.inboxReader.GetLastReadBlockAndBatchCount() - - if (batchSeen == 0) || // error or not yet read inbox - (batchProcessed < batchSeen) { // unprocessed inbox messages - syncing = true - } res["batchSeen"] = batchSeen + + batchProcessed := s.inboxReader.GetLastReadBatchCount() res["batchProcessed"] = batchProcessed - processedMetadata, err := s.inboxReader.Tracker().GetBatchMetadata(batchProcessed - 1) + processedBatchMsgs, err := s.inboxReader.Tracker().GetBatchMessageCount(batchProcessed - 1) if err != nil { res["batchMetadataError"] = err.Error() - syncing = true } else { - res["messageOfProcessedBatch"] = processedMetadata.MessageCount - if builtMessageCount+arbutil.MessageIndex(s.config.BlockBuildSequencerInboxLag) < processedMetadata.MessageCount { - syncing = true - } + res["messageOfProcessedBatch"] = processedBatchMsgs } l1reader := s.inboxReader.l1Reader @@ -135,73 +161,55 @@ func (s *SyncMonitor) SyncProgressMap() map[string]interface{} { coordinatorMessageCount, err := s.coordinator.GetRemoteMsgCount() //NOTE: this creates a remote call if err != nil { res["coordinatorMsgCountError"] = err.Error() - syncing = true } else { res["coordinatorMessageCount"] = coordinatorMessageCount - if msgCount+arbutil.MessageIndex(s.config.CoordinatorMsgLag) < coordinatorMessageCount { - syncing = true - } } } - if !syncing { - return make(map[string]interface{}) - } - return res } -func (s *SyncMonitor) SafeBlockNumber(ctx context.Context) (uint64, error) { - if s.inboxReader == nil || !s.initialized { - return 0, errors.New("not set up for safeblock") - } - msg, err := s.inboxReader.GetSafeMsgCount(ctx) - if err != nil { - return 0, err - } - // If SafeBlockWaitForBlockValidator is true, we want to wait for the block validator to finish - if s.config.SafeBlockWaitForBlockValidator { - latestValidatedCount, err := s.getLatestValidatedCount() - if err != nil { - return 0, err - } - if msg > latestValidatedCount { - msg = latestValidatedCount - } +func (s *SyncMonitor) SyncProgressMap() map[string]interface{} { + if s.Synced() { + return make(map[string]interface{}) } - block := s.exec.MessageIndexToBlockNumber(msg - 1) - return block, nil + + return s.FullSyncProgressMap() } -func (s *SyncMonitor) getLatestValidatedCount() (arbutil.MessageIndex, error) { - if s.txStreamer.validator == nil { - return 0, errors.New("validator not set up") - } - return s.txStreamer.validator.GetValidated(), nil +func (s *SyncMonitor) Start(ctx_in context.Context) { + s.StopWaiter.Start(ctx_in, s) + s.CallIteratively(s.updateSyncTarget) } -func (s *SyncMonitor) FinalizedBlockNumber(ctx context.Context) (uint64, error) { - if s.inboxReader == nil || !s.initialized { - return 0, errors.New("not set up for safeblock") +func (s *SyncMonitor) Synced() bool { + if !s.initialized { + return false } - msg, err := s.inboxReader.GetFinalizedMsgCount(ctx) + if !s.Started() { + return false + } + syncTarget := s.SyncTargetMessageCount() + + msgCount, err := s.txStreamer.GetMessageCount() if err != nil { - return 0, err + return false } - // If FinalizedBlockWaitForBlockValidator is true, we want to wait for the block validator to finish - if s.config.FinalizedBlockWaitForBlockValidator { - latestValidatedCount, err := s.getLatestValidatedCount() - if err != nil { - return 0, err + + if syncTarget > msgCount { + return false + } + + if s.inboxReader != nil { + batchSeen := s.inboxReader.GetLastSeenBatchCount() + if batchSeen == 0 { + return false } - if msg > latestValidatedCount { - msg = latestValidatedCount + batchProcessed := s.inboxReader.GetLastReadBatchCount() + + if batchProcessed < batchSeen { + return false } } - block := s.exec.MessageIndexToBlockNumber(msg - 1) - return block, nil -} - -func (s *SyncMonitor) Synced() bool { - return len(s.SyncProgressMap()) == 0 + return true } diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index ce482f367..238726e40 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -68,6 +68,9 @@ type TransactionStreamer struct { broadcastServer *broadcaster.Broadcaster inboxReader *InboxReader delayedBridge *DelayedBridge + + cachedL1PriceDataMutex sync.RWMutex + cachedL1PriceData *L1PriceData } type TransactionStreamerConfig struct { @@ -112,8 +115,10 @@ func NewTransactionStreamer( broadcastServer: broadcastServer, fatalErrChan: fatalErrChan, config: config, + cachedL1PriceData: &L1PriceData{ + msgToL1PriceData: []L1PriceDataOfMsg{}, + }, } - streamer.exec.SetTransactionStreamer(streamer) err := streamer.cleanupInconsistentState() if err != nil { return nil, err @@ -121,6 +126,120 @@ func NewTransactionStreamer( return streamer, nil } +type L1PriceDataOfMsg struct { + callDataUnits uint64 + cummulativeCallDataUnits uint64 + l1GasCharged uint64 + cummulativeL1GasCharged uint64 +} + +type L1PriceData struct { + startOfL1PriceDataCache arbutil.MessageIndex + endOfL1PriceDataCache arbutil.MessageIndex + msgToL1PriceData []L1PriceDataOfMsg + currentEstimateOfL1GasPrice uint64 +} + +func (s *TransactionStreamer) CurrentEstimateOfL1GasPrice() uint64 { + s.cachedL1PriceDataMutex.Lock() + defer s.cachedL1PriceDataMutex.Unlock() + + currentEstimate, err := s.exec.GetL1GasPriceEstimate() + if err != nil { + log.Error("error fetching current L2 estimate of L1 gas price hence reusing cached estimate", "err", err) + } else { + s.cachedL1PriceData.currentEstimateOfL1GasPrice = currentEstimate + } + return s.cachedL1PriceData.currentEstimateOfL1GasPrice +} + +func (s *TransactionStreamer) BacklogCallDataUnits() uint64 { + s.cachedL1PriceDataMutex.RLock() + defer s.cachedL1PriceDataMutex.RUnlock() + + size := len(s.cachedL1PriceData.msgToL1PriceData) + if size == 0 { + return 0 + } + return (s.cachedL1PriceData.msgToL1PriceData[size-1].cummulativeCallDataUnits - + s.cachedL1PriceData.msgToL1PriceData[0].cummulativeCallDataUnits + + s.cachedL1PriceData.msgToL1PriceData[0].callDataUnits) +} + +func (s *TransactionStreamer) BacklogL1GasCharged() uint64 { + s.cachedL1PriceDataMutex.RLock() + defer s.cachedL1PriceDataMutex.RUnlock() + + size := len(s.cachedL1PriceData.msgToL1PriceData) + if size == 0 { + return 0 + } + return (s.cachedL1PriceData.msgToL1PriceData[size-1].cummulativeL1GasCharged - + s.cachedL1PriceData.msgToL1PriceData[0].cummulativeL1GasCharged + + s.cachedL1PriceData.msgToL1PriceData[0].l1GasCharged) +} + +func (s *TransactionStreamer) TrimCache(to arbutil.MessageIndex) { + s.cachedL1PriceDataMutex.Lock() + defer s.cachedL1PriceDataMutex.Unlock() + + if to < s.cachedL1PriceData.startOfL1PriceDataCache { + log.Info("trying to trim older cache which doesnt exist anymore") + } else if to >= s.cachedL1PriceData.endOfL1PriceDataCache { + s.cachedL1PriceData.startOfL1PriceDataCache = 0 + s.cachedL1PriceData.endOfL1PriceDataCache = 0 + s.cachedL1PriceData.msgToL1PriceData = []L1PriceDataOfMsg{} + } else { + newStart := to - s.cachedL1PriceData.startOfL1PriceDataCache + 1 + s.cachedL1PriceData.msgToL1PriceData = s.cachedL1PriceData.msgToL1PriceData[newStart:] + s.cachedL1PriceData.startOfL1PriceDataCache = to + 1 + } +} + +func (s *TransactionStreamer) CacheL1PriceDataOfMsg(seqNum arbutil.MessageIndex, callDataUnits uint64, l1GasCharged uint64) { + s.cachedL1PriceDataMutex.Lock() + defer s.cachedL1PriceDataMutex.Unlock() + + resetCache := func() { + s.cachedL1PriceData.startOfL1PriceDataCache = seqNum + s.cachedL1PriceData.endOfL1PriceDataCache = seqNum + s.cachedL1PriceData.msgToL1PriceData = []L1PriceDataOfMsg{{ + callDataUnits: callDataUnits, + cummulativeCallDataUnits: callDataUnits, + l1GasCharged: l1GasCharged, + cummulativeL1GasCharged: l1GasCharged, + }} + } + size := len(s.cachedL1PriceData.msgToL1PriceData) + if size == 0 || + s.cachedL1PriceData.startOfL1PriceDataCache == 0 || + s.cachedL1PriceData.endOfL1PriceDataCache == 0 || + arbutil.MessageIndex(size) != s.cachedL1PriceData.endOfL1PriceDataCache-s.cachedL1PriceData.startOfL1PriceDataCache+1 { + resetCache() + return + } + if seqNum != s.cachedL1PriceData.endOfL1PriceDataCache+1 { + if seqNum > s.cachedL1PriceData.endOfL1PriceDataCache+1 { + log.Info("message position higher then current end of l1 price data cache, resetting cache to this message") + resetCache() + } else if seqNum < s.cachedL1PriceData.startOfL1PriceDataCache { + log.Info("message position lower than start of l1 price data cache, ignoring") + } else { + log.Info("message position already seen in l1 price data cache, ignoring") + } + } else { + cummulativeCallDataUnits := s.cachedL1PriceData.msgToL1PriceData[size-1].cummulativeCallDataUnits + cummulativeL1GasCharged := s.cachedL1PriceData.msgToL1PriceData[size-1].cummulativeL1GasCharged + s.cachedL1PriceData.msgToL1PriceData = append(s.cachedL1PriceData.msgToL1PriceData, L1PriceDataOfMsg{ + callDataUnits: callDataUnits, + cummulativeCallDataUnits: cummulativeCallDataUnits + callDataUnits, + l1GasCharged: l1GasCharged, + cummulativeL1GasCharged: cummulativeL1GasCharged + l1GasCharged, + }) + s.cachedL1PriceData.endOfL1PriceDataCache = seqNum + } +} + // Encodes a uint64 as bytes in a lexically sortable manner for database iteration. // Generally this is only used for database keys, which need sorted. // A shorter RLP encoding is usually used for database values. @@ -431,6 +550,21 @@ func (s *TransactionStreamer) AddMessages(pos arbutil.MessageIndex, messagesAreC return s.AddMessagesAndEndBatch(pos, messagesAreConfirmed, messages, nil) } +func (s *TransactionStreamer) FeedPendingMessageCount() arbutil.MessageIndex { + pos := atomic.LoadUint64(&s.broadcasterQueuedMessagesPos) + if pos == 0 { + return 0 + } + + s.insertionMutex.Lock() + defer s.insertionMutex.Unlock() + pos = atomic.LoadUint64(&s.broadcasterQueuedMessagesPos) + if pos == 0 { + return 0 + } + return arbutil.MessageIndex(pos + uint64(len(s.broadcasterQueuedMessages))) +} + func (s *TransactionStreamer) AddBroadcastMessages(feedMessages []*m.BroadcastFeedMessage) error { if len(feedMessages) == 0 { return nil @@ -567,6 +701,8 @@ func endBatch(batch ethdb.Batch) error { func (s *TransactionStreamer) AddMessagesAndEndBatch(pos arbutil.MessageIndex, messagesAreConfirmed bool, messages []arbostypes.MessageWithMetadata, batch ethdb.Batch) error { if messagesAreConfirmed { + // Trim confirmed messages from l1pricedataCache + s.TrimCache(pos + arbutil.MessageIndex(len(messages))) s.reorgMutex.RLock() dups, _, _, err := s.countDuplicateMessages(pos, messages, nil) s.reorgMutex.RUnlock() @@ -824,10 +960,6 @@ func (s *TransactionStreamer) addMessagesAndEndBatchImpl(messageStartPos arbutil return nil } -func (s *TransactionStreamer) FetchBatch(batchNum uint64) ([]byte, common.Hash, error) { - return s.inboxReader.GetSequencerMessageBytes(context.TODO(), batchNum) -} - // The caller must hold the insertionMutex func (s *TransactionStreamer) ExpectChosenSequencer() error { if s.coordinator != nil { @@ -869,10 +1001,6 @@ func (s *TransactionStreamer) WriteMessageFromSequencer(pos arbutil.MessageIndex return nil } -func (s *TransactionStreamer) GenesisBlockNumber() uint64 { - return s.chainConfig.ArbitrumChainParams.GenesisBlockNum -} - // PauseReorgs until a matching call to ResumeReorgs (may be called concurrently) func (s *TransactionStreamer) PauseReorgs() { s.reorgMutex.RLock() diff --git a/arbos/arbosState/arbosstate.go b/arbos/arbosState/arbosstate.go index 9ac6eda24..b79f25afc 100644 --- a/arbos/arbosState/arbosstate.go +++ b/arbos/arbosState/arbosstate.go @@ -319,20 +319,12 @@ func (state *ArbosState) UpgradeArbosVersion( } // ArbOS versions 12 through 19 are left to Orbit chains for custom upgrades. case 20: - if !chainConfig.DebugMode() { - // This upgrade isn't finalized so we only want to support it for testing - return fmt.Errorf( - "the chain is upgrading to unsupported ArbOS version %v, %w", - nextArbosVersion, - ErrFatalNodeOutOfDate, - ) - } // Update Brotli compression level for fast compression from 0 to 1 ensure(state.SetBrotliCompressionLevel(1)) // TODO: move to the first version that introduces stylus programs.Initialize(state.backingStorage.OpenSubStorage(programsSubspace)) default: - if nextArbosVersion >= 12 && state.arbosVersion < 20 { + if nextArbosVersion >= 12 && nextArbosVersion <= 19 { // ArbOS versions 12 through 19 are left to Orbit chains for custom upgrades. } else { return fmt.Errorf( diff --git a/arbos/block_processor.go b/arbos/block_processor.go index 4896f8268..4953431d3 100644 --- a/arbos/block_processor.go +++ b/arbos/block_processor.go @@ -25,7 +25,6 @@ import ( "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" ) @@ -40,7 +39,6 @@ var L2ToL1TransactionEventID common.Hash var L2ToL1TxEventID common.Hash var EmitReedeemScheduledEvent func(*vm.EVM, uint64, uint64, [32]byte, [32]byte, common.Address, *big.Int, *big.Int) error var EmitTicketCreatedEvent func(*vm.EVM, [32]byte) error -var gasUsedSinceStartupCounter = metrics.NewRegisteredCounter("arb/gas_used", nil) // A helper struct that implements String() by marshalling to JSON. // This is useful for logging because it's lazy, so if the log level is too high to print the transaction, @@ -148,6 +146,7 @@ func ProduceBlock( chainContext core.ChainContext, chainConfig *params.ChainConfig, batchFetcher arbostypes.FallibleBatchFetcher, + isMsgForPrefetch bool, ) (*types.Block, types.Receipts, error) { var batchFetchErr error txes, err := ParseL2Transactions(message, chainConfig.ChainID, func(batchNum uint64, batchHash common.Hash) []byte { @@ -173,7 +172,7 @@ func ProduceBlock( hooks := NoopSequencingHooks() return ProduceBlockAdvanced( - message.Header, txes, delayedMessagesRead, lastBlockHeader, statedb, chainContext, chainConfig, hooks, + message.Header, txes, delayedMessagesRead, lastBlockHeader, statedb, chainContext, chainConfig, hooks, isMsgForPrefetch, ) } @@ -187,6 +186,7 @@ func ProduceBlockAdvanced( chainContext core.ChainContext, chainConfig *params.ChainConfig, sequencingHooks *SequencingHooks, + isMsgForPrefetch bool, ) (*types.Block, types.Receipts, error) { state, err := arbosState.OpenSystemArbosState(statedb, nil, true) @@ -376,7 +376,9 @@ func ProduceBlockAdvanced( if chainConfig.DebugMode() { logLevel = log.Warn } - logLevel("error applying transaction", "tx", printTxAsJson{tx}, "err", err) + if !isMsgForPrefetch { + logLevel("error applying transaction", "tx", printTxAsJson{tx}, "err", err) + } if !hooks.DiscardInvalidTxsEarly { // we'll still deduct a TxGas's worth from the block-local rate limiter even if the tx was invalid blockGasLeft = arbmath.SaturatingUSub(blockGasLeft, params.TxGas) @@ -463,10 +465,6 @@ func ProduceBlockAdvanced( blockGasLeft = arbmath.SaturatingUSub(blockGasLeft, computeUsed) - // Add gas used since startup to prometheus metric. - gasUsed := arbmath.SaturatingUSub(receipt.GasUsed, receipt.GasUsedForL1) - gasUsedSinceStartupCounter.Inc(arbmath.SaturatingCast[int64](gasUsed)) - complete = append(complete, tx) receipts = append(receipts, receipt) diff --git a/arbos/l1pricing/l1pricing.go b/arbos/l1pricing/l1pricing.go index f2312c46d..9e00eeb58 100644 --- a/arbos/l1pricing/l1pricing.go +++ b/arbos/l1pricing/l1pricing.go @@ -195,6 +195,23 @@ func (ps *L1PricingState) SetUnitsSinceUpdate(units uint64) error { return ps.unitsSinceUpdate.Set(units) } +func (ps *L1PricingState) GetL1PricingSurplus() (*big.Int, error) { + fundsDueForRefunds, err := ps.BatchPosterTable().TotalFundsDue() + if err != nil { + return nil, err + } + fundsDueForRewards, err := ps.FundsDueForRewards() + if err != nil { + return nil, err + } + haveFunds, err := ps.L1FeesAvailable() + if err != nil { + return nil, err + } + needFunds := arbmath.BigAdd(fundsDueForRefunds, fundsDueForRewards) + return arbmath.BigSub(haveFunds, needFunds), nil +} + func (ps *L1PricingState) LastSurplus() (*big.Int, error) { return ps.lastSurplus.Get() } diff --git a/arbstate/inbox.go b/arbstate/inbox.go index dafb0700b..3105ee92b 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -46,6 +46,7 @@ type BlobReader interface { batchBlockHash common.Hash, versionedHashes []common.Hash, ) ([]kzg4844.Blob, error) + Initialize(ctx context.Context) error } type sequencerMessage struct { @@ -62,7 +63,12 @@ const maxZeroheavyDecompressedLen = 101*MaxDecompressedLen/100 + 64 const MaxSegmentsPerSequencerMessage = 100 * 1024 const MinLifetimeSecondsForDataAvailabilityCert = 7 * 24 * 60 * 60 // one week -func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash common.Hash, data []byte, dasReader DataAvailabilityReader, blobReader BlobReader, keysetValidationMode KeysetValidationMode) (*sequencerMessage, error) { +var ( + ErrNoBlobReader = errors.New("blob batch payload was encountered but no BlobReader was configured") + ErrInvalidBlobDataFormat = errors.New("blob batch data is not a list of hashes as expected") +) + +func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash common.Hash, data []byte, daProviders []DataAvailabilityProvider, keysetValidationMode KeysetValidationMode) (*sequencerMessage, error) { if len(data) < 40 { return nil, errors.New("sequencer message missing L1 header") } @@ -87,41 +93,30 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash // Stage 1: Extract the payload from any data availability header. // It's important that multiple DAS strategies can't both be invoked in the same batch, // as these headers are validated by the sequencer inbox and not other DASs. - if len(payload) > 0 && IsDASMessageHeaderByte(payload[0]) { - if dasReader == nil { - log.Error("No DAS Reader configured, but sequencer message found with DAS header") - } else { - var err error - payload, err = RecoverPayloadFromDasBatch(ctx, batchNum, data, dasReader, nil, keysetValidationMode) - if err != nil { - return nil, err - } - if payload == nil { - return parsedMsg, nil + // We try to extract payload from the first occuring valid DA provider in the daProviders list + if len(payload) > 0 { + foundDA := false + var err error + for _, provider := range daProviders { + if provider != nil && provider.IsValidHeaderByte(payload[0]) { + payload, err = provider.RecoverPayloadFromBatch(ctx, batchNum, batchBlockHash, data, nil, keysetValidationMode) + if err != nil { + return nil, err + } + if payload == nil { + return parsedMsg, nil + } + foundDA = true + break } } - } else if len(payload) > 0 && IsBlobHashesHeaderByte(payload[0]) { - blobHashes := payload[1:] - if len(blobHashes)%len(common.Hash{}) != 0 { - return nil, fmt.Errorf("blob batch data is not a list of hashes as expected") - } - versionedHashes := make([]common.Hash, len(blobHashes)/len(common.Hash{})) - for i := 0; i*32 < len(blobHashes); i += 1 { - copy(versionedHashes[i][:], blobHashes[i*32:(i+1)*32]) - } - - if blobReader == nil { - return nil, errors.New("blob batch payload was encountered but no BlobReader was configured") - } - kzgBlobs, err := blobReader.GetBlobs(ctx, batchBlockHash, versionedHashes) - if err != nil { - return nil, fmt.Errorf("failed to get blobs: %w", err) - } - payload, err = blobs.DecodeBlobs(kzgBlobs) - if err != nil { - log.Warn("Failed to decode blobs", "batchBlockHash", batchBlockHash, "versionedHashes", versionedHashes, "err", err) - return parsedMsg, nil + if !foundDA { + if IsDASMessageHeaderByte(payload[0]) { + log.Error("No DAS Reader configured, but sequencer message found with DAS header") + } else if IsBlobHashesHeaderByte(payload[0]) { + return nil, ErrNoBlobReader + } } } @@ -283,6 +278,92 @@ func RecoverPayloadFromDasBatch( return payload, nil } +type DataAvailabilityProvider interface { + // IsValidHeaderByte returns true if the given headerByte has bits corresponding to the DA provider + IsValidHeaderByte(headerByte byte) bool + + // RecoverPayloadFromBatch fetches the underlying payload from the DA provider given the batch header information + RecoverPayloadFromBatch( + ctx context.Context, + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, + preimages map[arbutil.PreimageType]map[common.Hash][]byte, + keysetValidationMode KeysetValidationMode, + ) ([]byte, error) +} + +// NewDAProviderDAS is generally meant to be only used by nitro. +// DA Providers should implement methods in the DataAvailabilityProvider interface independently +func NewDAProviderDAS(das DataAvailabilityReader) *dAProviderForDAS { + return &dAProviderForDAS{ + das: das, + } +} + +type dAProviderForDAS struct { + das DataAvailabilityReader +} + +func (d *dAProviderForDAS) IsValidHeaderByte(headerByte byte) bool { + return IsDASMessageHeaderByte(headerByte) +} + +func (d *dAProviderForDAS) RecoverPayloadFromBatch( + ctx context.Context, + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, + preimages map[arbutil.PreimageType]map[common.Hash][]byte, + keysetValidationMode KeysetValidationMode, +) ([]byte, error) { + return RecoverPayloadFromDasBatch(ctx, batchNum, sequencerMsg, d.das, preimages, keysetValidationMode) +} + +// NewDAProviderBlobReader is generally meant to be only used by nitro. +// DA Providers should implement methods in the DataAvailabilityProvider interface independently +func NewDAProviderBlobReader(blobReader BlobReader) *dAProviderForBlobReader { + return &dAProviderForBlobReader{ + blobReader: blobReader, + } +} + +type dAProviderForBlobReader struct { + blobReader BlobReader +} + +func (b *dAProviderForBlobReader) IsValidHeaderByte(headerByte byte) bool { + return IsBlobHashesHeaderByte(headerByte) +} + +func (b *dAProviderForBlobReader) RecoverPayloadFromBatch( + ctx context.Context, + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, + preimages map[arbutil.PreimageType]map[common.Hash][]byte, + keysetValidationMode KeysetValidationMode, +) ([]byte, error) { + blobHashes := sequencerMsg[41:] + if len(blobHashes)%len(common.Hash{}) != 0 { + return nil, ErrInvalidBlobDataFormat + } + versionedHashes := make([]common.Hash, len(blobHashes)/len(common.Hash{})) + for i := 0; i*32 < len(blobHashes); i += 1 { + copy(versionedHashes[i][:], blobHashes[i*32:(i+1)*32]) + } + kzgBlobs, err := b.blobReader.GetBlobs(ctx, batchBlockHash, versionedHashes) + if err != nil { + return nil, fmt.Errorf("failed to get blobs: %w", err) + } + payload, err := blobs.DecodeBlobs(kzgBlobs) + if err != nil { + log.Warn("Failed to decode blobs", "batchBlockHash", batchBlockHash, "versionedHashes", versionedHashes, "err", err) + return nil, nil + } + return payload, nil +} + type KeysetValidationMode uint8 const KeysetValidate KeysetValidationMode = 0 @@ -292,8 +373,7 @@ const KeysetDontValidate KeysetValidationMode = 2 type inboxMultiplexer struct { backend InboxBackend delayedMessagesRead uint64 - dasReader DataAvailabilityReader - blobReader BlobReader + daProviders []DataAvailabilityProvider cachedSequencerMessage *sequencerMessage cachedSequencerMessageNum uint64 cachedSegmentNum uint64 @@ -303,12 +383,11 @@ type inboxMultiplexer struct { keysetValidationMode KeysetValidationMode } -func NewInboxMultiplexer(backend InboxBackend, delayedMessagesRead uint64, dasReader DataAvailabilityReader, blobReader BlobReader, keysetValidationMode KeysetValidationMode) arbostypes.InboxMultiplexer { +func NewInboxMultiplexer(backend InboxBackend, delayedMessagesRead uint64, daProviders []DataAvailabilityProvider, keysetValidationMode KeysetValidationMode) arbostypes.InboxMultiplexer { return &inboxMultiplexer{ backend: backend, delayedMessagesRead: delayedMessagesRead, - dasReader: dasReader, - blobReader: blobReader, + daProviders: daProviders, keysetValidationMode: keysetValidationMode, } } @@ -330,7 +409,7 @@ func (r *inboxMultiplexer) Pop(ctx context.Context) (*arbostypes.MessageWithMeta } r.cachedSequencerMessageNum = r.backend.GetSequencerInboxPosition() var err error - r.cachedSequencerMessage, err = parseSequencerMessage(ctx, r.cachedSequencerMessageNum, batchBlockHash, bytes, r.dasReader, r.blobReader, r.keysetValidationMode) + r.cachedSequencerMessage, err = parseSequencerMessage(ctx, r.cachedSequencerMessageNum, batchBlockHash, bytes, r.daProviders, r.keysetValidationMode) if err != nil { return nil, err } diff --git a/arbstate/inbox_fuzz_test.go b/arbstate/inbox_fuzz_test.go index dcf43fd0d..b34c02534 100644 --- a/arbstate/inbox_fuzz_test.go +++ b/arbstate/inbox_fuzz_test.go @@ -67,7 +67,7 @@ func FuzzInboxMultiplexer(f *testing.F) { delayedMessage: delayedMsg, positionWithinMessage: 0, } - multiplexer := NewInboxMultiplexer(backend, 0, nil, nil, KeysetValidate) + multiplexer := NewInboxMultiplexer(backend, 0, nil, KeysetValidate) _, err := multiplexer.Pop(context.TODO()) if err != nil { panic(err) diff --git a/arbutil/wait_for_l1.go b/arbutil/wait_for_l1.go index 47c56c030..2df3fa562 100644 --- a/arbutil/wait_for_l1.go +++ b/arbutil/wait_for_l1.go @@ -96,3 +96,25 @@ func DetailTxError(ctx context.Context, client L1Interface, tx *types.Transactio } return fmt.Errorf("SendTxAsCall got: %w for tx hash %v", err, tx.Hash()) } + +func DetailTxErrorUsingCallMsg(ctx context.Context, client L1Interface, txHash common.Hash, txRes *types.Receipt, callMsg ethereum.CallMsg) error { + // Re-execute the transaction as a call to get a better error + if ctx.Err() != nil { + return ctx.Err() + } + if txRes == nil { + return errors.New("expected receipt") + } + if txRes.Status == types.ReceiptStatusSuccessful { + return nil + } + var err error + if _, err = client.CallContract(ctx, callMsg, txRes.BlockNumber); err == nil { + return fmt.Errorf("tx failed but call succeeded for tx hash %v", txHash) + } + callMsg.Gas = 0 + if _, err = client.CallContract(ctx, callMsg, txRes.BlockNumber); err == nil { + return fmt.Errorf("%w for tx hash %v", vm.ErrOutOfGas, txHash) + } + return fmt.Errorf("SendTxAsCall got: %w for tx hash %v", err, txHash) +} diff --git a/blocks_reexecutor/blocks_reexecutor.go b/blocks_reexecutor/blocks_reexecutor.go index 42bd1428d..bb6de00ca 100644 --- a/blocks_reexecutor/blocks_reexecutor.go +++ b/blocks_reexecutor/blocks_reexecutor.go @@ -68,7 +68,7 @@ type BlocksReExecutor struct { stopwaiter.StopWaiter config *Config blockchain *core.BlockChain - stateFor func(header *types.Header) (*state.StateDB, error) + stateFor arbitrum.StateForHeaderFunction done chan struct{} fatalErrChan chan error startBlock uint64 @@ -110,7 +110,10 @@ func New(c *Config, blockchain *core.BlockChain, fatalErrChan chan error) *Block startBlock: start, done: make(chan struct{}, c.Room), fatalErrChan: fatalErrChan, - stateFor: func(header *types.Header) (*state.StateDB, error) { return blockchain.StateAt(header.Root) }, + stateFor: func(header *types.Header) (*state.StateDB, arbitrum.StateReleaseFunc, error) { + state, err := blockchain.StateAt(header.Root) + return state, arbitrum.NoopStateRelease, err + }, } } @@ -120,7 +123,9 @@ func (s *BlocksReExecutor) LaunchBlocksReExecution(ctx context.Context, currentB if start < s.startBlock { start = s.startBlock } - startState, startHeader, err := arbitrum.FindLastAvailableState(ctx, s.blockchain, s.stateFor, s.blockchain.GetHeaderByNumber(start), nil, -1) + // we don't use state release pattern here + // TODO do we want to use release pattern here? + startState, startHeader, _, err := arbitrum.FindLastAvailableState(ctx, s.blockchain, s.stateFor, s.blockchain.GetHeaderByNumber(start), nil, -1) if err != nil { s.fatalErrChan <- fmt.Errorf("blocksReExecutor failed to get last available state while searching for state at %d, err: %w", start, err) return s.startBlock diff --git a/broadcaster/broadcaster.go b/broadcaster/broadcaster.go index ed3088ca2..242b8f9ee 100644 --- a/broadcaster/broadcaster.go +++ b/broadcaster/broadcaster.go @@ -7,6 +7,7 @@ import ( "context" "errors" "net" + "runtime/debug" "github.com/gobwas/ws" @@ -60,7 +61,7 @@ func (b *Broadcaster) NewBroadcastFeedMessage(message arbostypes.MessageWithMeta func (b *Broadcaster) BroadcastSingle(msg arbostypes.MessageWithMetadata, seq arbutil.MessageIndex) (err error) { defer func() { if r := recover(); r != nil { - log.Error("recovered error in BroadcastSingle", "recover", r) + log.Error("recovered error in BroadcastSingle", "recover", r, "backtrace", string(debug.Stack())) err = errors.New("panic in BroadcastSingle") } }() @@ -84,7 +85,7 @@ func (b *Broadcaster) BroadcastSingleFeedMessage(bfm *m.BroadcastFeedMessage) { func (b *Broadcaster) BroadcastMessages(messages []arbostypes.MessageWithMetadata, seq arbutil.MessageIndex) (err error) { defer func() { if r := recover(); r != nil { - log.Error("recovered error in BroadcastMessages", "recover", r) + log.Error("recovered error in BroadcastMessages", "recover", r, "backtrace", string(debug.Stack())) err = errors.New("panic in BroadcastMessages") } }() diff --git a/cmd/conf/chain.go b/cmd/conf/chain.go index e9ec2af0c..531945b4d 100644 --- a/cmd/conf/chain.go +++ b/cmd/conf/chain.go @@ -7,14 +7,16 @@ import ( "time" "github.com/offchainlabs/nitro/cmd/genericconf" + "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/util/rpcclient" flag "github.com/spf13/pflag" ) -type L1Config struct { - ID uint64 `koanf:"id"` - Connection rpcclient.ClientConfig `koanf:"connection" reload:"hot"` - Wallet genericconf.WalletConfig `koanf:"wallet"` +type ParentChainConfig struct { + ID uint64 `koanf:"id"` + Connection rpcclient.ClientConfig `koanf:"connection" reload:"hot"` + Wallet genericconf.WalletConfig `koanf:"wallet"` + BlobClient headerreader.BlobClientConfig `koanf:"blob-client"` } var L1ConnectionConfigDefault = rpcclient.ClientConfig{ @@ -25,10 +27,11 @@ var L1ConnectionConfigDefault = rpcclient.ClientConfig{ ArgLogLimit: 2048, } -var L1ConfigDefault = L1Config{ +var L1ConfigDefault = ParentChainConfig{ ID: 0, Connection: L1ConnectionConfigDefault, Wallet: DefaultL1WalletConfig, + BlobClient: headerreader.DefaultBlobClientConfig, } var DefaultL1WalletConfig = genericconf.WalletConfig{ @@ -43,13 +46,14 @@ func L1ConfigAddOptions(prefix string, f *flag.FlagSet) { f.Uint64(prefix+".id", L1ConfigDefault.ID, "if set other than 0, will be used to validate database and L1 connection") rpcclient.RPCClientAddOptions(prefix+".connection", f, &L1ConfigDefault.Connection) genericconf.WalletConfigAddOptions(prefix+".wallet", f, L1ConfigDefault.Wallet.Pathname) + headerreader.BlobClientAddOptions(prefix+".blob-client", f) } -func (c *L1Config) ResolveDirectoryNames(chain string) { +func (c *ParentChainConfig) ResolveDirectoryNames(chain string) { c.Wallet.ResolveDirectoryNames(chain) } -func (c *L1Config) Validate() error { +func (c *ParentChainConfig) Validate() error { return c.Connection.Validate() } diff --git a/cmd/deploy/deploy.go b/cmd/deploy/deploy.go index afbcddec6..1c8b85810 100644 --- a/cmd/deploy/deploy.go +++ b/cmd/deploy/deploy.go @@ -10,6 +10,7 @@ import ( "fmt" "math/big" "os" + "strings" "time" "github.com/offchainlabs/nitro/cmd/chaininfo" @@ -41,6 +42,8 @@ func main() { deployAccount := flag.String("l1DeployAccount", "", "l1 seq account to use (default is first account in keystore)") ownerAddressString := flag.String("ownerAddress", "", "the rollup owner's address") sequencerAddressString := flag.String("sequencerAddress", "", "the sequencer's address") + batchPostersString := flag.String("batchPosters", "", "the comma separated array of addresses of batch posters. Defaults to sequencer address") + batchPosterManagerAddressString := flag.String("batchPosterManger", "", "the batch poster manger's address. Defaults to owner address") nativeTokenAddressString := flag.String("nativeTokenAddress", "0x0000000000000000000000000000000000000000", "address of the ERC20 token which is used as native L2 currency") maxDataSizeUint := flag.Uint64("maxDataSize", 117964, "maximum data size of a batch or a cross-chain message (default = 90% of Geth's 128KB tx size limit)") loserEscrowAddressString := flag.String("loserEscrowAddress", "", "the address which half of challenge loser's funds accumulate at") @@ -56,6 +59,7 @@ func main() { authorizevalidators := flag.Uint64("authorizevalidators", 0, "Number of validators to preemptively authorize") txTimeout := flag.Duration("txtimeout", 10*time.Minute, "Timeout when waiting for a transaction to be included in a block") prod := flag.Bool("prod", false, "Whether to configure the rollup for production or testing") + isUsingFeeToken := flag.Bool("isUsingFeeToken", false, "true if the chain uses custom fee token") flag.Parse() l1ChainId := new(big.Int).SetUint64(*l1ChainIdUint) maxDataSize := new(big.Int).SetUint64(*maxDataSizeUint) @@ -92,15 +96,47 @@ func main() { if !common.IsHexAddress(*sequencerAddressString) && len(*sequencerAddressString) > 0 { panic("specified sequencer address is invalid") } + sequencerAddress := common.HexToAddress(*sequencerAddressString) + if !common.IsHexAddress(*ownerAddressString) { panic("please specify a valid rollup owner address") } + ownerAddress := common.HexToAddress(*ownerAddressString) + if *prod && !common.IsHexAddress(*loserEscrowAddressString) { panic("please specify a valid loser escrow address") } - sequencerAddress := common.HexToAddress(*sequencerAddressString) - ownerAddress := common.HexToAddress(*ownerAddressString) + var batchPosters []common.Address + if len(*batchPostersString) > 0 { + batchPostersArr := strings.Split(*batchPostersString, ",") + for _, address := range batchPostersArr { + if !common.IsHexAddress(address) { + log.Error("invalid address in batch posters array", "address", address) + continue + } + batchPosters = append(batchPosters, common.HexToAddress(address)) + } + if len(batchPosters) != len(batchPostersArr) { + panic("found at least one invalid address in batch posters array") + } + } + if len(batchPosters) == 0 { + log.Info("batch posters array was empty, defaulting to sequencer address") + batchPosters = append(batchPosters, sequencerAddress) + } + + var batchPosterManagerAddress common.Address + if common.IsHexAddress(*batchPosterManagerAddressString) { + batchPosterManagerAddress = common.HexToAddress(*batchPosterManagerAddressString) + } else { + if len(*batchPosterManagerAddressString) > 0 { + panic("please specify a valid batch poster manager address") + } + log.Info("batch poster manager address was empty, defaulting to owner address") + batchPosterManagerAddress = ownerAddress + } + loserEscrowAddress := common.HexToAddress(*loserEscrowAddressString) if sequencerAddress != (common.Address{}) && ownerAddress != l1TransactionOpts.From { panic("cannot specify sequencer address if owner is not deployer") @@ -146,11 +182,13 @@ func main() { ctx, l1Reader, l1TransactionOpts, - sequencerAddress, + batchPosters, + batchPosterManagerAddress, *authorizevalidators, arbnode.GenerateRollupConfig(*prod, moduleRoot, ownerAddress, &chainConfig, chainConfigJson, loserEscrowAddress), nativeToken, maxDataSize, + *isUsingFeeToken, ) if err != nil { flag.Usage() diff --git a/cmd/nitro-val/nitro_val.go b/cmd/nitro-val/nitro_val.go index fea95cbb1..3671c7ea8 100644 --- a/cmd/nitro-val/nitro_val.go +++ b/cmd/nitro-val/nitro_val.go @@ -20,7 +20,7 @@ import ( "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/cmd/util/confighelpers" - _ "github.com/offchainlabs/nitro/nodeInterface" + _ "github.com/offchainlabs/nitro/execution/nodeInterface" "github.com/offchainlabs/nitro/validator/valnode" ) diff --git a/cmd/nitro/init.go b/cmd/nitro/init.go index ebc57b13b..72c767d00 100644 --- a/cmd/nitro/init.go +++ b/cmd/nitro/init.go @@ -35,7 +35,6 @@ import ( "github.com/offchainlabs/nitro/cmd/ipfshelper" "github.com/offchainlabs/nitro/cmd/pruning" "github.com/offchainlabs/nitro/cmd/staterecovery" - "github.com/offchainlabs/nitro/cmd/util" "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/statetransfer" "github.com/offchainlabs/nitro/util/arbmath" @@ -284,14 +283,7 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo if err != nil { return chainDb, nil, err } - combinedL2ChainInfoFiles := config.Chain.InfoFiles - if config.Chain.InfoIpfsUrl != "" { - l2ChainInfoIpfsFile, err := util.GetL2ChainInfoIpfsFile(ctx, config.Chain.InfoIpfsUrl, config.Chain.InfoIpfsDownloadPath) - if err != nil { - log.Error("error getting l2 chain info file from ipfs", "err", err) - } - combinedL2ChainInfoFiles = append(combinedL2ChainInfoFiles, l2ChainInfoIpfsFile) - } + combinedL2ChainInfoFiles := aggregateL2ChainInfoFiles(ctx, config.Chain.InfoFiles, config.Chain.InfoIpfsUrl, config.Chain.InfoIpfsDownloadPath) chainConfig, err = chaininfo.GetChainConfig(new(big.Int).SetUint64(config.Chain.ID), config.Chain.Name, genesisBlockNr, combinedL2ChainInfoFiles, config.Chain.InfoJson) if err != nil { return chainDb, nil, err diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 6cb9a89b7..997adf936 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -42,6 +42,7 @@ import ( "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbnode/resourcemanager" + "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/arbutil" blocksreexecutor "github.com/offchainlabs/nitro/blocks_reexecutor" "github.com/offchainlabs/nitro/cmd/chaininfo" @@ -50,7 +51,7 @@ import ( "github.com/offchainlabs/nitro/cmd/util" "github.com/offchainlabs/nitro/cmd/util/confighelpers" "github.com/offchainlabs/nitro/execution/gethexec" - _ "github.com/offchainlabs/nitro/nodeInterface" + _ "github.com/offchainlabs/nitro/execution/nodeInterface" "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/solgen/go/rollupgen" @@ -292,14 +293,7 @@ func mainImpl() int { } } - combinedL2ChainInfoFile := nodeConfig.Chain.InfoFiles - if nodeConfig.Chain.InfoIpfsUrl != "" { - l2ChainInfoIpfsFile, err := util.GetL2ChainInfoIpfsFile(ctx, nodeConfig.Chain.InfoIpfsUrl, nodeConfig.Chain.InfoIpfsDownloadPath) - if err != nil { - log.Error("error getting chain info file from ipfs", "err", err) - } - combinedL2ChainInfoFile = append(combinedL2ChainInfoFile, l2ChainInfoIpfsFile) - } + combinedL2ChainInfoFile := aggregateL2ChainInfoFiles(ctx, nodeConfig.Chain.InfoFiles, nodeConfig.Chain.InfoIpfsUrl, nodeConfig.Chain.InfoIpfsDownloadPath) if nodeConfig.Node.Staker.Enable { if !nodeConfig.Node.ParentChainReader.Enable { @@ -329,6 +323,8 @@ func mainImpl() int { var rollupAddrs chaininfo.RollupAddresses var l1Client *ethclient.Client + var l1Reader *headerreader.HeaderReader + var blobReader arbstate.BlobReader if nodeConfig.Node.ParentChainReader.Enable { confFetcher := func() *rpcclient.ClientConfig { return &liveNodeConfig.Get().ParentChain.Connection } rpcClient := rpcclient.NewRpcClient(confFetcher, nil) @@ -351,6 +347,22 @@ func mainImpl() int { if err != nil { log.Crit("error getting rollup addresses", "err", err) } + arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1Client) + l1Reader, err = headerreader.New(ctx, l1Client, func() *headerreader.Config { return &liveNodeConfig.Get().Node.ParentChainReader }, arbSys) + if err != nil { + log.Crit("failed to get L1 headerreader", "err", err) + } + if !l1Reader.IsParentChainArbitrum() && !nodeConfig.Node.Dangerous.DisableBlobReader { + if nodeConfig.ParentChain.BlobClient.BeaconUrl == "" { + flag.Usage() + log.Crit("a beacon chain RPC URL is required to read batches, but it was not configured (CLI argument: --parent-chain.blob-client.beacon-url [URL])") + } + blobClient, err := headerreader.NewBlobClient(nodeConfig.ParentChain.BlobClient, l1Client) + if err != nil { + log.Crit("failed to initialize blob client", "err", err) + } + blobReader = blobClient + } } if nodeConfig.Node.Staker.OnlyCreateWalletContract { @@ -358,12 +370,10 @@ func mainImpl() int { flag.Usage() log.Crit("--node.validator.only-create-wallet-contract requires --node.validator.use-smart-contract-wallet") } - arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1Client) - l1Reader, err := headerreader.New(ctx, l1Client, func() *headerreader.Config { return &liveNodeConfig.Get().Node.ParentChainReader }, arbSys) - if err != nil { - log.Crit("failed to get L1 headerreader", "error", err) + if l1Reader == nil { + flag.Usage() + log.Crit("--node.validator.only-create-wallet-contract conflicts with --node.dangerous.no-l1-listener") } - // Just create validator smart wallet if needed then exit deployInfo, err := chaininfo.GetRollupAddressesConfig(nodeConfig.Chain.ID, nodeConfig.Chain.Name, combinedL2ChainInfoFile, nodeConfig.Chain.InfoJson) if err != nil { @@ -488,9 +498,19 @@ func mainImpl() int { return 0 } - if l2BlockChain.Config().ArbitrumChainParams.DataAvailabilityCommittee && !nodeConfig.Node.DataAvailability.Enable { + chainInfo, err := chaininfo.ProcessChainInfo(nodeConfig.Chain.ID, nodeConfig.Chain.Name, combinedL2ChainInfoFile, nodeConfig.Chain.InfoJson) + if err != nil { + log.Error("error processing l2 chain info", "err", err) + return 1 + } + if err := validateBlockChain(l2BlockChain, chainInfo.ChainConfig); err != nil { + log.Error("user provided chain config is not compatible with onchain chain config", "err", err) + return 1 + } + + if l2BlockChain.Config().ArbitrumChainParams.DataAvailabilityCommittee != nodeConfig.Node.DataAvailability.Enable { flag.Usage() - log.Error("a data availability service must be configured for this chain (see the --node.data-availability family of options)") + log.Error(fmt.Sprintf("data availability service usage for this chain is set to %v but --node.data-availability.enable is set to %v", l2BlockChain.Config().ArbitrumChainParams.DataAvailabilityCommittee, nodeConfig.Node.DataAvailability.Enable)) return 1 } @@ -536,6 +556,7 @@ func mainImpl() int { dataSigner, fatalErrChan, big.NewInt(int64(nodeConfig.ParentChain.ID)), + blobReader, ) if err != nil { log.Error("failed to create node", "err", err) @@ -667,7 +688,7 @@ type NodeConfig struct { Node arbnode.Config `koanf:"node" reload:"hot"` Execution gethexec.Config `koanf:"execution" reload:"hot"` Validation valnode.Config `koanf:"validation" reload:"hot"` - ParentChain conf.L1Config `koanf:"parent-chain" reload:"hot"` + ParentChain conf.ParentChainConfig `koanf:"parent-chain" reload:"hot"` Chain conf.L2Config `koanf:"chain"` LogLevel int `koanf:"log-level" reload:"hot"` LogType string `koanf:"log-type" reload:"hot"` @@ -885,15 +906,19 @@ func ParseNode(ctx context.Context, args []string) (*NodeConfig, *genericconf.Wa return &nodeConfig, &l1Wallet, &l2DevWallet, nil } -func applyChainParameters(ctx context.Context, k *koanf.Koanf, chainId uint64, chainName string, l2ChainInfoFiles []string, l2ChainInfoJson string, l2ChainInfoIpfsUrl string, l2ChainInfoIpfsDownloadPath string) error { - combinedL2ChainInfoFiles := l2ChainInfoFiles +func aggregateL2ChainInfoFiles(ctx context.Context, l2ChainInfoFiles []string, l2ChainInfoIpfsUrl string, l2ChainInfoIpfsDownloadPath string) []string { if l2ChainInfoIpfsUrl != "" { l2ChainInfoIpfsFile, err := util.GetL2ChainInfoIpfsFile(ctx, l2ChainInfoIpfsUrl, l2ChainInfoIpfsDownloadPath) if err != nil { log.Error("error getting l2 chain info file from ipfs", "err", err) } - combinedL2ChainInfoFiles = append(combinedL2ChainInfoFiles, l2ChainInfoIpfsFile) + l2ChainInfoFiles = append(l2ChainInfoFiles, l2ChainInfoIpfsFile) } + return l2ChainInfoFiles +} + +func applyChainParameters(ctx context.Context, k *koanf.Koanf, chainId uint64, chainName string, l2ChainInfoFiles []string, l2ChainInfoJson string, l2ChainInfoIpfsUrl string, l2ChainInfoIpfsDownloadPath string) error { + combinedL2ChainInfoFiles := aggregateL2ChainInfoFiles(ctx, l2ChainInfoFiles, l2ChainInfoIpfsUrl, l2ChainInfoIpfsDownloadPath) chainInfo, err := chaininfo.ProcessChainInfo(chainId, chainName, combinedL2ChainInfoFiles, l2ChainInfoJson) if err != nil { return err diff --git a/cmd/replay/main.go b/cmd/replay/main.go index dd8a0fd1f..536949532 100644 --- a/cmd/replay/main.go +++ b/cmd/replay/main.go @@ -143,6 +143,10 @@ func (r *BlobPreimageReader) GetBlobs( return blobs, nil } +func (r *BlobPreimageReader) Initialize(ctx context.Context) error { + return nil +} + // To generate: // key, _ := crypto.HexToECDSA("0000000000000000000000000000000000000000000000000000000000000001") // sig, _ := crypto.Sign(make([]byte, 32), key) @@ -206,7 +210,12 @@ func main() { if backend.GetPositionWithinMessage() > 0 { keysetValidationMode = arbstate.KeysetDontValidate } - inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, dasReader, &BlobPreimageReader{}, keysetValidationMode) + var daProviders []arbstate.DataAvailabilityProvider + if dasReader != nil { + daProviders = append(daProviders, arbstate.NewDAProviderDAS(dasReader)) + } + daProviders = append(daProviders, arbstate.NewDAProviderBlobReader(&BlobPreimageReader{})) + inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, daProviders, keysetValidationMode) ctx := context.Background() message, err := inboxMultiplexer.Pop(ctx) if err != nil { @@ -264,7 +273,7 @@ func main() { batchFetcher := func(batchNum uint64) ([]byte, error) { return wavmio.ReadInboxMessage(batchNum), nil } - newBlock, _, err = arbos.ProduceBlock(message.Message, message.DelayedMessagesRead, lastBlockHeader, statedb, chainContext, chainConfig, batchFetcher) + newBlock, _, err = arbos.ProduceBlock(message.Message, message.DelayedMessagesRead, lastBlockHeader, statedb, chainContext, chainConfig, batchFetcher, false) if err != nil { panic(err) } diff --git a/cmd/seq-coordinator-manager/rediscoordinator/redis_coordinator.go b/cmd/seq-coordinator-manager/rediscoordinator/redis_coordinator.go index 782ab3801..e963c0e96 100644 --- a/cmd/seq-coordinator-manager/rediscoordinator/redis_coordinator.go +++ b/cmd/seq-coordinator-manager/rediscoordinator/redis_coordinator.go @@ -16,6 +16,9 @@ type RedisCoordinator struct { // UpdatePriorities updates the priority list of sequencers func (rc *RedisCoordinator) UpdatePriorities(ctx context.Context, priorities []string) error { + if len(priorities) == 0 { + return rc.Client.Del(ctx, redisutil.PRIORITIES_KEY).Err() + } prioritiesString := strings.Join(priorities, ",") err := rc.Client.Set(ctx, redisutil.PRIORITIES_KEY, prioritiesString, 0).Err() if err != nil { diff --git a/contracts b/contracts index ed01ad872..63b8b293b 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit ed01ad872e1949a6cb0ac05053b68689dda39f26 +Subproject commit 63b8b293b716a5c984e7bd0fa7612b8495be572d diff --git a/das/bigcache_storage_service.go b/das/bigcache_storage_service.go deleted file mode 100644 index f8421bed1..000000000 --- a/das/bigcache_storage_service.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2022, Offchain Labs, Inc. -// For license information, see https://github.com/nitro/blob/master/LICENSE - -package das - -import ( - "context" - "fmt" - "time" - - "github.com/allegro/bigcache" - "github.com/offchainlabs/nitro/arbstate" - "github.com/offchainlabs/nitro/das/dastree" - "github.com/offchainlabs/nitro/util/pretty" - flag "github.com/spf13/pflag" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" -) - -type BigCacheConfig struct { - // TODO add other config information like HardMaxCacheSize - Enable bool `koanf:"enable"` - Expiration time.Duration `koanf:"expiration"` - MaxEntriesInWindow int -} - -var DefaultBigCacheConfig = BigCacheConfig{ - Expiration: time.Hour, -} - -var TestBigCacheConfig = BigCacheConfig{ - Enable: true, - Expiration: time.Hour, - MaxEntriesInWindow: 1000, -} - -func BigCacheConfigAddOptions(prefix string, f *flag.FlagSet) { - f.Bool(prefix+".enable", DefaultBigCacheConfig.Enable, "Enable local in-memory caching of sequencer batch data") - f.Duration(prefix+".expiration", DefaultBigCacheConfig.Expiration, "Expiration time for in-memory cached sequencer batches") -} - -type BigCacheStorageService struct { - baseStorageService StorageService - bigCacheConfig BigCacheConfig - bigCache *bigcache.BigCache -} - -func NewBigCacheStorageService(bigCacheConfig BigCacheConfig, baseStorageService StorageService) (StorageService, error) { - conf := bigcache.DefaultConfig(bigCacheConfig.Expiration) - if bigCacheConfig.MaxEntriesInWindow > 0 { - conf.MaxEntriesInWindow = bigCacheConfig.MaxEntriesInWindow - } - bigCache, err := bigcache.NewBigCache(conf) - if err != nil { - return nil, err - } - return &BigCacheStorageService{ - baseStorageService: baseStorageService, - bigCacheConfig: bigCacheConfig, - bigCache: bigCache, - }, nil -} - -func (bcs *BigCacheStorageService) GetByHash(ctx context.Context, key common.Hash) ([]byte, error) { - log.Trace("das.BigCacheStorageService.GetByHash", "key", pretty.PrettyHash(key), "this", bcs) - - ret, err := bcs.bigCache.Get(string(key.Bytes())) - if err != nil { - ret, err = bcs.baseStorageService.GetByHash(ctx, key) - if err != nil { - return nil, err - } - - err = bcs.bigCache.Set(string(key.Bytes()), ret) - if err != nil { - return nil, err - } - return ret, err - } - - return ret, err -} - -func (bcs *BigCacheStorageService) Put(ctx context.Context, value []byte, timeout uint64) error { - logPut("das.BigCacheStorageService.Put", value, timeout, bcs) - err := bcs.baseStorageService.Put(ctx, value, timeout) - if err != nil { - return err - } - return bcs.bigCache.Set(string(dastree.HashBytes(value)), value) -} - -func (bcs *BigCacheStorageService) Sync(ctx context.Context) error { - return bcs.baseStorageService.Sync(ctx) -} - -func (bcs *BigCacheStorageService) Close(ctx context.Context) error { - err := bcs.bigCache.Close() - if err != nil { - return err - } - return bcs.baseStorageService.Close(ctx) -} - -func (bcs *BigCacheStorageService) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { - return bcs.baseStorageService.ExpirationPolicy(ctx) -} - -func (bcs *BigCacheStorageService) String() string { - return fmt.Sprintf("BigCacheStorageService(%+v)", bcs.bigCacheConfig) -} - -func (bcs *BigCacheStorageService) HealthCheck(ctx context.Context) error { - return bcs.baseStorageService.HealthCheck(ctx) -} diff --git a/das/cache_storage_service.go b/das/cache_storage_service.go new file mode 100644 index 000000000..13bdb189d --- /dev/null +++ b/das/cache_storage_service.go @@ -0,0 +1,95 @@ +// Copyright 2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package das + +import ( + "context" + "fmt" + + "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/das/dastree" + "github.com/offchainlabs/nitro/util/pretty" + flag "github.com/spf13/pflag" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/lru" + "github.com/ethereum/go-ethereum/log" +) + +type CacheConfig struct { + Enable bool `koanf:"enable"` + Capacity int `koanf:"capacity"` +} + +var DefaultCacheConfig = CacheConfig{ + Capacity: 20_000, +} + +var TestCacheConfig = CacheConfig{ + Capacity: 1_000, +} + +func CacheConfigAddOptions(prefix string, f *flag.FlagSet) { + f.Bool(prefix+".enable", DefaultCacheConfig.Enable, "Enable local in-memory caching of sequencer batch data") + f.Int(prefix+".capacity", DefaultCacheConfig.Capacity, "Maximum number of entries (up to 64KB each) to store in the cache.") +} + +type CacheStorageService struct { + baseStorageService StorageService + cache *lru.Cache[common.Hash, []byte] +} + +func NewCacheStorageService(cacheConfig CacheConfig, baseStorageService StorageService) *CacheStorageService { + return &CacheStorageService{ + baseStorageService: baseStorageService, + cache: lru.NewCache[common.Hash, []byte](cacheConfig.Capacity), + } +} + +func (c *CacheStorageService) GetByHash(ctx context.Context, key common.Hash) ([]byte, error) { + log.Trace("das.CacheStorageService.GetByHash", "key", pretty.PrettyHash(key), "this", c) + + if val, wasCached := c.cache.Get(key); wasCached { + return val, nil + } + + val, err := c.baseStorageService.GetByHash(ctx, key) + if err != nil { + return nil, err + } + + c.cache.Add(key, val) + + return val, nil +} + +func (c *CacheStorageService) Put(ctx context.Context, value []byte, timeout uint64) error { + logPut("das.CacheStorageService.Put", value, timeout, c) + err := c.baseStorageService.Put(ctx, value, timeout) + if err != nil { + return err + } + c.cache.Add(common.Hash(dastree.Hash(value)), value) + return nil +} + +func (c *CacheStorageService) Sync(ctx context.Context) error { + return c.baseStorageService.Sync(ctx) +} + +func (c *CacheStorageService) Close(ctx context.Context) error { + return c.baseStorageService.Close(ctx) +} + +func (c *CacheStorageService) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { + return c.baseStorageService.ExpirationPolicy(ctx) +} + +func (c *CacheStorageService) String() string { + return fmt.Sprintf("CacheStorageService(size:%+v)", len(c.cache.Keys())) +} + +func (c *CacheStorageService) HealthCheck(ctx context.Context) error { + return c.baseStorageService.HealthCheck(ctx) +} diff --git a/das/bigcache_storage_service_test.go b/das/cache_storage_service_test.go similarity index 57% rename from das/bigcache_storage_service_test.go rename to das/cache_storage_service_test.go index 5fd0cf68d..8b4203dab 100644 --- a/das/bigcache_storage_service_test.go +++ b/das/cache_storage_service_test.go @@ -8,42 +8,32 @@ import ( "context" "errors" "testing" - "time" - "github.com/allegro/bigcache" "github.com/offchainlabs/nitro/das/dastree" ) -func TestBigCacheStorageService(t *testing.T) { +func TestCacheStorageService(t *testing.T) { ctx := context.Background() - timeout := uint64(time.Now().Add(time.Hour).Unix()) baseStorageService := NewMemoryBackedStorageService(ctx) - bigCache, err := bigcache.NewBigCache(bigcache.DefaultConfig(TestBigCacheConfig.Expiration)) - Require(t, err) - bigCacheService := &BigCacheStorageService{ - baseStorageService: baseStorageService, - bigCacheConfig: TestBigCacheConfig, - bigCache: bigCache, - } - Require(t, err) + cacheService := NewCacheStorageService(TestCacheConfig, baseStorageService) val1 := []byte("The first value") val1CorrectKey := dastree.Hash(val1) val1IncorrectKey := dastree.Hash(append(val1, 0)) - _, err = bigCacheService.GetByHash(ctx, val1CorrectKey) + _, err := cacheService.GetByHash(ctx, val1CorrectKey) if !errors.Is(err, ErrNotFound) { t.Fatal(err) } - err = bigCacheService.Put(ctx, val1, timeout) + err = cacheService.Put(ctx, val1, 1) Require(t, err) - _, err = bigCacheService.GetByHash(ctx, val1IncorrectKey) + _, err = cacheService.GetByHash(ctx, val1IncorrectKey) if !errors.Is(err, ErrNotFound) { t.Fatal(err) } - val, err := bigCacheService.GetByHash(ctx, val1CorrectKey) + val, err := cacheService.GetByHash(ctx, val1CorrectKey) Require(t, err) if !bytes.Equal(val, val1) { t.Fatal(val, val1) @@ -54,14 +44,14 @@ func TestBigCacheStorageService(t *testing.T) { val2CorrectKey := dastree.Hash(val2) val2IncorrectKey := dastree.Hash(append(val2, 0)) - err = baseStorageService.Put(ctx, val2, timeout) + err = baseStorageService.Put(ctx, val2, 1) Require(t, err) - _, err = bigCacheService.GetByHash(ctx, val2IncorrectKey) + _, err = cacheService.GetByHash(ctx, val2IncorrectKey) if !errors.Is(err, ErrNotFound) { t.Fatal(err) } - val, err = bigCacheService.GetByHash(ctx, val2CorrectKey) + val, err = cacheService.GetByHash(ctx, val2CorrectKey) Require(t, err) if !bytes.Equal(val, val2) { t.Fatal(val, val2) @@ -69,19 +59,18 @@ func TestBigCacheStorageService(t *testing.T) { // For Case where the value is present in the cache storage but not present in the base. emptyBaseStorageService := NewMemoryBackedStorageService(ctx) - bigCacheServiceWithEmptyBaseStorage := &BigCacheStorageService{ + cacheServiceWithEmptyBaseStorage := &CacheStorageService{ baseStorageService: emptyBaseStorageService, - bigCacheConfig: TestBigCacheConfig, - bigCache: bigCache, + cache: cacheService.cache, } - val, err = bigCacheServiceWithEmptyBaseStorage.GetByHash(ctx, val1CorrectKey) + val, err = cacheServiceWithEmptyBaseStorage.GetByHash(ctx, val1CorrectKey) Require(t, err) if !bytes.Equal(val, val1) { t.Fatal(val, val1) } // Closes the base storage properly. - err = bigCacheService.Close(ctx) + err = cacheService.Close(ctx) Require(t, err) _, err = baseStorageService.GetByHash(ctx, val1CorrectKey) if !errors.Is(err, ErrClosed) { diff --git a/das/das.go b/das/das.go index 910e51108..dd8e43a34 100644 --- a/das/das.go +++ b/das/das.go @@ -40,8 +40,8 @@ type DataAvailabilityConfig struct { RequestTimeout time.Duration `koanf:"request-timeout"` - LocalCache BigCacheConfig `koanf:"local-cache"` - RedisCache RedisConfig `koanf:"redis-cache"` + LocalCache CacheConfig `koanf:"local-cache"` + RedisCache RedisConfig `koanf:"redis-cache"` LocalDBStorage LocalDBStorageConfig `koanf:"local-db-storage"` LocalFileStorage LocalFileStorageConfig `koanf:"local-file-storage"` @@ -109,7 +109,7 @@ func dataAvailabilityConfigAddOptions(prefix string, f *flag.FlagSet, r role) { f.Bool(prefix+".disable-signature-checking", DefaultDataAvailabilityConfig.DisableSignatureChecking, "disables signature checking on Data Availability Store requests (DANGEROUS, FOR TESTING ONLY)") // Cache options - BigCacheConfigAddOptions(prefix+".local-cache", f) + CacheConfigAddOptions(prefix+".local-cache", f) RedisConfigAddOptions(prefix+".redis-cache", f) // Storage options diff --git a/das/das_test.go b/das/das_test.go index 416744535..4377dc4dc 100644 --- a/das/das_test.go +++ b/das/das_test.go @@ -30,6 +30,10 @@ func testDASStoreRetrieveMultipleInstances(t *testing.T, storageType string) { Fail(t, "unknown storage type") } + dbConfig := DefaultLocalDBStorageConfig + dbConfig.Enable = enableDbStorage + dbConfig.DataDir = dbPath + config := DataAvailabilityConfig{ Enable: true, Key: KeyConfig{ @@ -39,10 +43,7 @@ func testDASStoreRetrieveMultipleInstances(t *testing.T, storageType string) { Enable: enableFileStorage, DataDir: dbPath, }, - LocalDBStorage: LocalDBStorageConfig{ - Enable: enableDbStorage, - DataDir: dbPath, - }, + LocalDBStorage: dbConfig, ParentChainNodeURL: "none", } @@ -122,6 +123,10 @@ func testDASMissingMessage(t *testing.T, storageType string) { Fail(t, "unknown storage type") } + dbConfig := DefaultLocalDBStorageConfig + dbConfig.Enable = enableDbStorage + dbConfig.DataDir = dbPath + config := DataAvailabilityConfig{ Enable: true, Key: KeyConfig{ @@ -131,10 +136,7 @@ func testDASMissingMessage(t *testing.T, storageType string) { Enable: enableFileStorage, DataDir: dbPath, }, - LocalDBStorage: LocalDBStorageConfig{ - Enable: enableDbStorage, - DataDir: dbPath, - }, + LocalDBStorage: dbConfig, ParentChainNodeURL: "none", } diff --git a/das/db_storage_service.go b/das/db_storage_service.go index b9af530b9..33d21942b 100644 --- a/das/db_storage_service.go +++ b/das/db_storage_service.go @@ -9,7 +9,7 @@ import ( "errors" "time" - badger "github.com/dgraph-io/badger/v3" + badger "github.com/dgraph-io/badger/v4" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbstate" @@ -25,9 +25,32 @@ type LocalDBStorageConfig struct { DiscardAfterTimeout bool `koanf:"discard-after-timeout"` SyncFromStorageService bool `koanf:"sync-from-storage-service"` SyncToStorageService bool `koanf:"sync-to-storage-service"` + + // BadgerDB options + NumMemtables int `koanf:"num-memtables"` + NumLevelZeroTables int `koanf:"num-level-zero-tables"` + NumLevelZeroTablesStall int `koanf:"num-level-zero-tables-stall"` + NumCompactors int `koanf:"num-compactors"` + BaseTableSize int64 `koanf:"base-table-size"` + ValueLogFileSize int64 `koanf:"value-log-file-size"` } -var DefaultLocalDBStorageConfig = LocalDBStorageConfig{} +var badgerDefaultOptions = badger.DefaultOptions("") + +var DefaultLocalDBStorageConfig = LocalDBStorageConfig{ + Enable: false, + DataDir: "", + DiscardAfterTimeout: false, + SyncFromStorageService: false, + SyncToStorageService: false, + + NumMemtables: badgerDefaultOptions.NumMemtables, + NumLevelZeroTables: badgerDefaultOptions.NumLevelZeroTables, + NumLevelZeroTablesStall: badgerDefaultOptions.NumLevelZeroTablesStall, + NumCompactors: badgerDefaultOptions.NumCompactors, + BaseTableSize: badgerDefaultOptions.BaseTableSize, + ValueLogFileSize: badgerDefaultOptions.ValueLogFileSize, +} func LocalDBStorageConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".enable", DefaultLocalDBStorageConfig.Enable, "enable storage/retrieval of sequencer batch data from a database on the local filesystem") @@ -35,6 +58,14 @@ func LocalDBStorageConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".discard-after-timeout", DefaultLocalDBStorageConfig.DiscardAfterTimeout, "discard data after its expiry timeout") f.Bool(prefix+".sync-from-storage-service", DefaultLocalDBStorageConfig.SyncFromStorageService, "enable db storage to be used as a source for regular sync storage") f.Bool(prefix+".sync-to-storage-service", DefaultLocalDBStorageConfig.SyncToStorageService, "enable db storage to be used as a sink for regular sync storage") + + f.Int(prefix+".num-memtables", DefaultLocalDBStorageConfig.NumMemtables, "BadgerDB option: sets the maximum number of tables to keep in memory before stalling") + f.Int(prefix+".num-level-zero-tables", DefaultLocalDBStorageConfig.NumLevelZeroTables, "BadgerDB option: sets the maximum number of Level 0 tables before compaction starts") + f.Int(prefix+".num-level-zero-tables-stall", DefaultLocalDBStorageConfig.NumLevelZeroTablesStall, "BadgerDB option: sets the number of Level 0 tables that once reached causes the DB to stall until compaction succeeds") + f.Int(prefix+".num-compactors", DefaultLocalDBStorageConfig.NumCompactors, "BadgerDB option: Sets the number of compaction workers to run concurrently") + f.Int64(prefix+".base-table-size", DefaultLocalDBStorageConfig.BaseTableSize, "BadgerDB option: sets the maximum size in bytes for LSM table or file in the base level") + f.Int64(prefix+".value-log-file-size", DefaultLocalDBStorageConfig.ValueLogFileSize, "BadgerDB option: sets the maximum size of a single log file") + } type DBStorageService struct { @@ -44,16 +75,23 @@ type DBStorageService struct { stopWaiter stopwaiter.StopWaiterSafe } -func NewDBStorageService(ctx context.Context, dirPath string, discardAfterTimeout bool) (StorageService, error) { - db, err := badger.Open(badger.DefaultOptions(dirPath)) +func NewDBStorageService(ctx context.Context, config *LocalDBStorageConfig) (StorageService, error) { + options := badger.DefaultOptions(config.DataDir). + WithNumMemtables(config.NumMemtables). + WithNumLevelZeroTables(config.NumLevelZeroTables). + WithNumLevelZeroTablesStall(config.NumLevelZeroTablesStall). + WithNumCompactors(config.NumCompactors). + WithBaseTableSize(config.BaseTableSize). + WithValueLogFileSize(config.ValueLogFileSize) + db, err := badger.Open(options) if err != nil { return nil, err } ret := &DBStorageService{ db: db, - discardAfterTimeout: discardAfterTimeout, - dirPath: dirPath, + discardAfterTimeout: config.DiscardAfterTimeout, + dirPath: config.DataDir, } if err := ret.stopWaiter.Start(ctx, ret); err != nil { return nil, err diff --git a/das/factory.go b/das/factory.go index 0e6b29200..a459d1a46 100644 --- a/das/factory.go +++ b/das/factory.go @@ -28,7 +28,7 @@ func CreatePersistentStorageService( storageServices := make([]StorageService, 0, 10) var lifecycleManager LifecycleManager if config.LocalDBStorage.Enable { - s, err := NewDBStorageService(ctx, config.LocalDBStorage.DataDir, config.LocalDBStorage.DiscardAfterTimeout) + s, err := NewDBStorageService(ctx, &config.LocalDBStorage) if err != nil { return nil, nil, err } @@ -112,7 +112,7 @@ func WrapStorageWithCache( return nil, nil } - // Enable caches, Redis and (local) BigCache. Local is the outermost, so it will be tried first. + // Enable caches, Redis and (local) Cache. Local is the outermost, so it will be tried first. var err error if config.RedisCache.Enable { storageService, err = NewRedisStorageService(config.RedisCache, storageService) @@ -130,11 +130,8 @@ func WrapStorageWithCache( } } if config.LocalCache.Enable { - storageService, err = NewBigCacheStorageService(config.LocalCache, storageService) + storageService = NewCacheStorageService(config.LocalCache, storageService) lifecycleManager.Register(storageService) - if err != nil { - return nil, err - } } return storageService, nil } diff --git a/das/syncing_fallback_storage.go b/das/syncing_fallback_storage.go index 91f2e522a..c79cd8040 100644 --- a/das/syncing_fallback_storage.go +++ b/das/syncing_fallback_storage.go @@ -53,7 +53,7 @@ func init() { } BatchDeliveredID = sequencerInboxABI.Events[sequencerBatchDeliveredEvent].ID sequencerBatchDataABI = sequencerInboxABI.Events[sequencerBatchDataEvent] - addSequencerL2BatchFromOriginCallABI = sequencerInboxABI.Methods["addSequencerL2BatchFromOrigin"] + addSequencerL2BatchFromOriginCallABI = sequencerInboxABI.Methods["addSequencerL2BatchFromOrigin0"] } type SyncToStorageConfig struct { diff --git a/deploy/deploy.go b/deploy/deploy.go index 59760e2c2..5e7755cae 100644 --- a/deploy/deploy.go +++ b/deploy/deploy.go @@ -31,7 +31,7 @@ func andTxSucceeded(ctx context.Context, l1Reader *headerreader.HeaderReader, tx return nil } -func deployBridgeCreator(ctx context.Context, l1Reader *headerreader.HeaderReader, auth *bind.TransactOpts, maxDataSize *big.Int) (common.Address, error) { +func deployBridgeCreator(ctx context.Context, l1Reader *headerreader.HeaderReader, auth *bind.TransactOpts, maxDataSize *big.Int, isUsingFeeToken bool) (common.Address, error) { client := l1Reader.Client() /// deploy eth based templates @@ -46,7 +46,7 @@ func deployBridgeCreator(ctx context.Context, l1Reader *headerreader.HeaderReade if err != nil { return common.Address{}, fmt.Errorf("blob basefee reader deploy error: %w", err) } - seqInboxTemplate, tx, _, err := bridgegen.DeploySequencerInbox(auth, client, maxDataSize, reader4844) + seqInboxTemplate, tx, _, err := bridgegen.DeploySequencerInbox(auth, client, maxDataSize, reader4844, isUsingFeeToken) err = andTxSucceeded(ctx, l1Reader, tx, err) if err != nil { return common.Address{}, fmt.Errorf("sequencer inbox deploy error: %w", err) @@ -161,8 +161,8 @@ func deployChallengeFactory(ctx context.Context, l1Reader *headerreader.HeaderRe return ospEntryAddr, challengeManagerAddr, nil } -func deployRollupCreator(ctx context.Context, l1Reader *headerreader.HeaderReader, auth *bind.TransactOpts, maxDataSize *big.Int) (*rollupgen.RollupCreator, common.Address, common.Address, common.Address, error) { - bridgeCreator, err := deployBridgeCreator(ctx, l1Reader, auth, maxDataSize) +func deployRollupCreator(ctx context.Context, l1Reader *headerreader.HeaderReader, auth *bind.TransactOpts, maxDataSize *big.Int, isUsingFeeToken bool) (*rollupgen.RollupCreator, common.Address, common.Address, common.Address, error) { + bridgeCreator, err := deployBridgeCreator(ctx, l1Reader, auth, maxDataSize, isUsingFeeToken) if err != nil { return nil, common.Address{}, common.Address{}, common.Address{}, fmt.Errorf("bridge creator deploy error: %w", err) } @@ -234,12 +234,12 @@ func deployRollupCreator(ctx context.Context, l1Reader *headerreader.HeaderReade return rollupCreator, rollupCreatorAddress, validatorUtils, validatorWalletCreator, nil } -func DeployOnL1(ctx context.Context, parentChainReader *headerreader.HeaderReader, deployAuth *bind.TransactOpts, batchPoster common.Address, authorizeValidators uint64, config rollupgen.Config, nativeToken common.Address, maxDataSize *big.Int) (*chaininfo.RollupAddresses, error) { +func DeployOnL1(ctx context.Context, parentChainReader *headerreader.HeaderReader, deployAuth *bind.TransactOpts, batchPosters []common.Address, batchPosterManager common.Address, authorizeValidators uint64, config rollupgen.Config, nativeToken common.Address, maxDataSize *big.Int, isUsingFeeToken bool) (*chaininfo.RollupAddresses, error) { if config.WasmModuleRoot == (common.Hash{}) { return nil, errors.New("no machine specified") } - rollupCreator, _, validatorUtils, validatorWalletCreator, err := deployRollupCreator(ctx, parentChainReader, deployAuth, maxDataSize) + rollupCreator, _, validatorUtils, validatorWalletCreator, err := deployRollupCreator(ctx, parentChainReader, deployAuth, maxDataSize, isUsingFeeToken) if err != nil { return nil, fmt.Errorf("error deploying rollup creator: %w", err) } @@ -251,12 +251,13 @@ func DeployOnL1(ctx context.Context, parentChainReader *headerreader.HeaderReade deployParams := rollupgen.RollupCreatorRollupDeploymentParams{ Config: config, - BatchPoster: batchPoster, Validators: validatorAddrs, MaxDataSize: maxDataSize, NativeToken: nativeToken, DeployFactoriesToL2: false, MaxFeePerGasForRetryables: big.NewInt(0), // needed when utility factories are deployed + BatchPosters: batchPosters, + BatchPosterManager: batchPosterManager, } tx, err := rollupCreator.CreateRollup( diff --git a/execution/gethexec/arb_interface.go b/execution/gethexec/arb_interface.go index 50d7dfb89..dbf9c2401 100644 --- a/execution/gethexec/arb_interface.go +++ b/execution/gethexec/arb_interface.go @@ -21,30 +21,31 @@ type TransactionPublisher interface { } type ArbInterface struct { - exec *ExecutionEngine + blockchain *core.BlockChain + node *ExecutionNode txPublisher TransactionPublisher - arbNode interface{} } -func NewArbInterface(exec *ExecutionEngine, txPublisher TransactionPublisher) (*ArbInterface, error) { +func NewArbInterface(blockchain *core.BlockChain, txPublisher TransactionPublisher) (*ArbInterface, error) { return &ArbInterface{ - exec: exec, + blockchain: blockchain, txPublisher: txPublisher, }, nil } -func (a *ArbInterface) Initialize(arbnode interface{}) { - a.arbNode = arbnode +func (a *ArbInterface) Initialize(node *ExecutionNode) { + a.node = node } func (a *ArbInterface) PublishTransaction(ctx context.Context, tx *types.Transaction, options *arbitrum_types.ConditionalOptions) error { return a.txPublisher.PublishTransaction(ctx, tx, options) } +// might be used before Initialize func (a *ArbInterface) BlockChain() *core.BlockChain { - return a.exec.bc + return a.blockchain } func (a *ArbInterface) ArbNode() interface{} { - return a.arbNode + return a.node } diff --git a/execution/gethexec/block_recorder.go b/execution/gethexec/block_recorder.go index 61963d8f0..5b509b97f 100644 --- a/execution/gethexec/block_recorder.go +++ b/execution/gethexec/block_recorder.go @@ -123,7 +123,7 @@ func (r *BlockRecorder) RecordBlockCreation( var readBatchInfo []validator.BatchInfo if msg != nil { batchFetcher := func(batchNum uint64) ([]byte, error) { - data, blockHash, err := r.execEngine.streamer.FetchBatch(batchNum) + data, blockHash, err := r.execEngine.consensus.FetchBatch(ctx, batchNum) if err != nil { return nil, err } @@ -145,6 +145,7 @@ func (r *BlockRecorder) RecordBlockCreation( chaincontext, chainConfig, batchFetcher, + false, ) if err != nil { return nil, err diff --git a/arbnode/classicMessage.go b/execution/gethexec/classicMessage.go similarity index 99% rename from arbnode/classicMessage.go rename to execution/gethexec/classicMessage.go index f03ef5bd4..df749b98b 100644 --- a/arbnode/classicMessage.go +++ b/execution/gethexec/classicMessage.go @@ -1,7 +1,7 @@ // Copyright 2022, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE -package arbnode +package gethexec import ( "encoding/binary" diff --git a/execution/gethexec/executionengine.go b/execution/gethexec/executionengine.go index a662de362..16267720b 100644 --- a/execution/gethexec/executionengine.go +++ b/execution/gethexec/executionengine.go @@ -13,6 +13,7 @@ import ( "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos" "github.com/offchainlabs/nitro/arbos/arbosState" @@ -20,16 +21,25 @@ import ( "github.com/offchainlabs/nitro/arbos/l1pricing" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/execution" + "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/sharedmetrics" "github.com/offchainlabs/nitro/util/stopwaiter" ) +var ( + baseFeeGauge = metrics.NewRegisteredGauge("arb/block/basefee", nil) + blockGasUsedHistogram = metrics.NewRegisteredHistogram("arb/block/gasused", nil, metrics.NewBoundedHistogramSample()) + txCountHistogram = metrics.NewRegisteredHistogram("arb/block/transactions/count", nil, metrics.NewBoundedHistogramSample()) + txGasUsedHistogram = metrics.NewRegisteredHistogram("arb/block/transactions/gasused", nil, metrics.NewBoundedHistogramSample()) + gasUsedSinceStartupCounter = metrics.NewRegisteredCounter("arb/gas_used", nil) +) + type ExecutionEngine struct { stopwaiter.StopWaiter - bc *core.BlockChain - streamer execution.TransactionStreamer - recorder *BlockRecorder + bc *core.BlockChain + consensus execution.FullConsensusClient + recorder *BlockRecorder resequenceChan chan []*arbostypes.MessageWithMetadata createBlocksMutex sync.Mutex @@ -83,14 +93,18 @@ func (s *ExecutionEngine) EnablePrefetchBlock() { s.prefetchBlock = true } -func (s *ExecutionEngine) SetTransactionStreamer(streamer execution.TransactionStreamer) { +func (s *ExecutionEngine) SetConsensus(consensus execution.FullConsensusClient) { if s.Started() { - panic("trying to set transaction streamer after start") + panic("trying to set transaction consensus after start") } - if s.streamer != nil { - panic("trying to set transaction streamer when already set") + if s.consensus != nil { + panic("trying to set transaction consensus when already set") } - s.streamer = streamer + s.consensus = consensus +} + +func (s *ExecutionEngine) GetBatchFetcher() execution.BatchFetcher { + return s.consensus } func (s *ExecutionEngine) Reorg(count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadata, oldMessages []*arbostypes.MessageWithMetadata) error { @@ -266,7 +280,7 @@ func (s *ExecutionEngine) sequencerWrapper(sequencerFunc func() (*types.Block, e } // We got SequencerInsertLockTaken // option 1: there was a race, we are no longer main sequencer - chosenErr := s.streamer.ExpectChosenSequencer() + chosenErr := s.consensus.ExpectChosenSequencer() if chosenErr != nil { return nil, chosenErr } @@ -314,6 +328,7 @@ func (s *ExecutionEngine) sequenceTransactionsWithBlockMutex(header *arbostypes. s.bc, s.bc.Config(), hooks, + false, ) if err != nil { return nil, err @@ -353,7 +368,7 @@ func (s *ExecutionEngine) sequenceTransactionsWithBlockMutex(header *arbostypes. return nil, err } - err = s.streamer.WriteMessageFromSequencer(pos, msgWithMeta) + err = s.consensus.WriteMessageFromSequencer(pos, msgWithMeta) if err != nil { return nil, err } @@ -365,6 +380,8 @@ func (s *ExecutionEngine) sequenceTransactionsWithBlockMutex(header *arbostypes. return nil, err } + s.cacheL1PriceDataOfMsg(pos, receipts, block) + return block, nil } @@ -397,13 +414,13 @@ func (s *ExecutionEngine) sequenceDelayedMessageWithBlockMutex(message *arbostyp DelayedMessagesRead: delayedSeqNum + 1, } - err = s.streamer.WriteMessageFromSequencer(lastMsg+1, messageWithMeta) + err = s.consensus.WriteMessageFromSequencer(lastMsg+1, messageWithMeta) if err != nil { return nil, err } startTime := time.Now() - block, statedb, receipts, err := s.createBlockFromNextMessage(&messageWithMeta) + block, statedb, receipts, err := s.createBlockFromNextMessage(&messageWithMeta, false) if err != nil { return nil, err } @@ -435,7 +452,7 @@ func (s *ExecutionEngine) MessageIndexToBlockNumber(messageNum arbutil.MessageIn } // must hold createBlockMutex -func (s *ExecutionEngine) createBlockFromNextMessage(msg *arbostypes.MessageWithMetadata) (*types.Block, *state.StateDB, types.Receipts, error) { +func (s *ExecutionEngine) createBlockFromNextMessage(msg *arbostypes.MessageWithMetadata, isMsgForPrefetch bool) (*types.Block, *state.StateDB, types.Receipts, error) { currentHeader := s.bc.CurrentBlock() if currentHeader == nil { return nil, nil, nil, errors.New("failed to get current block header") @@ -458,6 +475,11 @@ func (s *ExecutionEngine) createBlockFromNextMessage(msg *arbostypes.MessageWith statedb.StartPrefetcher("TransactionStreamer") defer statedb.StopPrefetcher() + batchFetcher := func(num uint64) ([]byte, error) { + data, _, err := s.consensus.FetchBatch(s.GetContext(), num) + return data, err + } + block, receipts, err := arbos.ProduceBlock( msg.Message, msg.DelayedMessagesRead, @@ -465,10 +487,8 @@ func (s *ExecutionEngine) createBlockFromNextMessage(msg *arbostypes.MessageWith statedb, s.bc, s.bc.Config(), - func(batchNum uint64) ([]byte, error) { - data, _, err := s.streamer.FetchBatch(batchNum) - return data, err - }, + batchFetcher, + isMsgForPrefetch, ) return block, statedb, receipts, err @@ -487,6 +507,16 @@ func (s *ExecutionEngine) appendBlock(block *types.Block, statedb *state.StateDB if status == core.SideStatTy { return errors.New("geth rejected block as non-canonical") } + baseFeeGauge.Update(block.BaseFee().Int64()) + txCountHistogram.Update(int64(len(block.Transactions()) - 1)) + var blockGasused uint64 + for i := 1; i < len(receipts); i++ { + val := arbmath.SaturatingUSub(receipts[i].GasUsed, receipts[i].GasUsedForL1) + txGasUsedHistogram.Update(int64(val)) + blockGasused += val + } + blockGasUsedHistogram.Update(int64(blockGasused)) + gasUsedSinceStartupCounter.Inc(int64(blockGasused)) return nil } @@ -505,6 +535,55 @@ func (s *ExecutionEngine) ResultAtPos(pos arbutil.MessageIndex) (*execution.Mess return s.resultFromHeader(s.bc.GetHeaderByNumber(s.MessageIndexToBlockNumber(pos))) } +func (s *ExecutionEngine) GetL1GasPriceEstimate() (uint64, error) { + bc := s.bc + latestHeader := bc.CurrentBlock() + latestState, err := bc.StateAt(latestHeader.Root) + if err != nil { + return 0, errors.New("error getting latest statedb while fetching l2 Estimate of L1 GasPrice") + } + arbState, err := arbosState.OpenSystemArbosState(latestState, nil, true) + if err != nil { + return 0, errors.New("error opening system arbos state while fetching l2 Estimate of L1 GasPrice") + } + l2EstimateL1GasPrice, err := arbState.L1PricingState().PricePerUnit() + if err != nil { + return 0, errors.New("error fetching l2 Estimate of L1 GasPrice") + } + return l2EstimateL1GasPrice.Uint64(), nil +} + +func (s *ExecutionEngine) getL1PricingSurplus() (int64, error) { + bc := s.bc + latestHeader := bc.CurrentBlock() + latestState, err := bc.StateAt(latestHeader.Root) + if err != nil { + return 0, errors.New("error getting latest statedb while fetching current L1 pricing surplus") + } + arbState, err := arbosState.OpenSystemArbosState(latestState, nil, true) + if err != nil { + return 0, errors.New("error opening system arbos state while fetching current L1 pricing surplus") + } + surplus, err := arbState.L1PricingState().GetL1PricingSurplus() + if err != nil { + return 0, errors.New("error fetching current L1 pricing surplus") + } + return surplus.Int64(), nil +} + +func (s *ExecutionEngine) cacheL1PriceDataOfMsg(num arbutil.MessageIndex, receipts types.Receipts, block *types.Block) { + var gasUsedForL1 uint64 + for i := 1; i < len(receipts); i++ { + gasUsedForL1 += receipts[i].GasUsedForL1 + } + gasChargedForL1 := gasUsedForL1 * block.BaseFee().Uint64() + var callDataUnits uint64 + for _, tx := range block.Transactions() { + callDataUnits += tx.CalldataUnits + } + s.consensus.CacheL1PriceDataOfMsg(num, callDataUnits, gasChargedForL1) +} + // DigestMessage is used to create a block by executing msg against the latest state and storing it. // Also, while creating a block by executing msg against the latest state, // in parallel, creates a block by executing msgForPrefetch (msg+1) against the latest state @@ -532,23 +611,19 @@ func (s *ExecutionEngine) digestMessageWithBlockMutex(num arbutil.MessageIndex, } startTime := time.Now() - var wg sync.WaitGroup if s.prefetchBlock && msgForPrefetch != nil { - wg.Add(1) go func() { - defer wg.Done() - _, _, _, err := s.createBlockFromNextMessage(msgForPrefetch) + _, _, _, err := s.createBlockFromNextMessage(msgForPrefetch, true) if err != nil { return } }() } - block, statedb, receipts, err := s.createBlockFromNextMessage(msg) + block, statedb, receipts, err := s.createBlockFromNextMessage(msg, false) if err != nil { return err } - wg.Wait() err = s.appendBlock(block, statedb, receipts, time.Since(startTime)) if err != nil { return err diff --git a/execution/gethexec/node.go b/execution/gethexec/node.go index 80c2939af..88c141003 100644 --- a/execution/gethexec/node.go +++ b/execution/gethexec/node.go @@ -51,6 +51,7 @@ type Config struct { TxLookupLimit uint64 `koanf:"tx-lookup-limit"` Dangerous DangerousConfig `koanf:"dangerous"` EnablePrefetchBlock bool `koanf:"enable-prefetch-block"` + SyncMonitor SyncMonitorConfig `koanf:"sync-monitor"` forwardingTarget string } @@ -83,6 +84,7 @@ func ConfigAddOptions(prefix string, f *flag.FlagSet) { AddOptionsForNodeForwarderConfig(prefix+".forwarder", f) TxPreCheckerConfigAddOptions(prefix+".tx-pre-checker", f) CachingConfigAddOptions(prefix+".caching", f) + SyncMonitorConfigAddOptions(prefix+".sync-monitor", f) f.Uint64(prefix+".tx-lookup-limit", ConfigDefault.TxLookupLimit, "retain the ability to lookup transactions by hash for the past N blocks (0 = all blocks)") DangerousConfigAddOptions(prefix+".dangerous", f) f.Bool(prefix+".enable-prefetch-block", ConfigDefault.EnablePrefetchBlock, "enable prefetching of blocks") @@ -118,8 +120,8 @@ func ConfigDefaultNonSequencerTest() *Config { func ConfigDefaultTest() *Config { config := ConfigDefault config.Sequencer = TestSequencerConfig - config.ForwardingTarget = "null" config.ParentChainReader = headerreader.TestConfig + config.ForwardingTarget = "null" _ = config.Validate() @@ -138,7 +140,9 @@ type ExecutionNode struct { Sequencer *Sequencer // either nil or same as TxPublisher TxPublisher TransactionPublisher ConfigFetcher ConfigFetcher + SyncMonitor *SyncMonitor ParentChainReader *headerreader.HeaderReader + ClassicOutbox *ClassicOutboxRetriever started atomic.Bool } @@ -169,6 +173,8 @@ func CreateExecutionNode( if err != nil { return nil, err } + } else if config.Sequencer.Enable { + log.Warn("sequencer enabled without l1 client") } if config.Sequencer.Enable { @@ -192,7 +198,7 @@ func CreateExecutionNode( txprecheckConfigFetcher := func() *TxPreCheckerConfig { return &configFetcher().TxPreChecker } txPublisher = NewTxPreChecker(txPublisher, l2BlockChain, txprecheckConfigFetcher) - arbInterface, err := NewArbInterface(execEngine, txPublisher) + arbInterface, err := NewArbInterface(l2BlockChain, txPublisher) if err != nil { return nil, err } @@ -205,6 +211,20 @@ func CreateExecutionNode( return nil, err } + syncMon := NewSyncMonitor(&config.SyncMonitor, execEngine) + + var classicOutbox *ClassicOutboxRetriever + + if l2BlockChain.Config().ArbitrumChainParams.GenesisBlockNum > 0 { + classicMsgDb, err := stack.OpenDatabase("classic-msg", 0, 0, "", true) + if err != nil { + log.Warn("Classic Msg Database not found", "err", err) + classicOutbox = nil + } else { + classicOutbox = NewClassicOutboxRetriever(classicMsgDb) + } + } + apis := []rpc.API{{ Namespace: "arb", Version: "1.0", @@ -248,13 +268,19 @@ func CreateExecutionNode( Sequencer: sequencer, TxPublisher: txPublisher, ConfigFetcher: configFetcher, + SyncMonitor: syncMon, ParentChainReader: parentChainReader, + ClassicOutbox: classicOutbox, }, nil } -func (n *ExecutionNode) Initialize(ctx context.Context, arbnode interface{}, sync arbitrum.SyncProgressBackend) error { - n.ArbInterface.Initialize(arbnode) +func (n *ExecutionNode) GetL1GasPriceEstimate() (uint64, error) { + return n.ExecEngine.GetL1GasPriceEstimate() +} + +func (n *ExecutionNode) Initialize(ctx context.Context) error { + n.ArbInterface.Initialize(n) err := n.Backend.Start() if err != nil { return fmt.Errorf("error starting geth backend: %w", err) @@ -263,7 +289,7 @@ func (n *ExecutionNode) Initialize(ctx context.Context, arbnode interface{}, syn if err != nil { return fmt.Errorf("error initializing transaction publisher: %w", err) } - err = n.Backend.APIBackend().SetSyncBackend(sync) + err = n.Backend.APIBackend().SetSyncBackend(n.SyncMonitor) if err != nil { return fmt.Errorf("error setting sync backend: %w", err) } @@ -361,11 +387,13 @@ func (n *ExecutionNode) Pause() { n.Sequencer.Pause() } } + func (n *ExecutionNode) Activate() { if n.Sequencer != nil { n.Sequencer.Activate() } } + func (n *ExecutionNode) ForwardTo(url string) error { if n.Sequencer != nil { return n.Sequencer.ForwardTo(url) @@ -373,9 +401,12 @@ func (n *ExecutionNode) ForwardTo(url string) error { return errors.New("forwardTo not supported - sequencer not active") } } -func (n *ExecutionNode) SetTransactionStreamer(streamer execution.TransactionStreamer) { - n.ExecEngine.SetTransactionStreamer(streamer) + +func (n *ExecutionNode) SetConsensusClient(consensus execution.FullConsensusClient) { + n.ExecEngine.SetConsensus(consensus) + n.SyncMonitor.SetConsensusInfo(consensus) } + func (n *ExecutionNode) MessageIndexToBlockNumber(messageNum arbutil.MessageIndex) uint64 { return n.ExecEngine.MessageIndexToBlockNumber(messageNum) } diff --git a/execution/gethexec/sequencer.go b/execution/gethexec/sequencer.go index 5db38cbb4..23340594c 100644 --- a/execution/gethexec/sequencer.go +++ b/execution/gethexec/sequencer.go @@ -10,6 +10,7 @@ import ( "math" "math/big" "runtime/debug" + "strconv" "strings" "sync" "sync/atomic" @@ -25,10 +26,12 @@ import ( "github.com/ethereum/go-ethereum/arbitrum" "github.com/ethereum/go-ethereum/arbitrum_types" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/params" @@ -51,21 +54,30 @@ var ( successfulBlocksCounter = metrics.NewRegisteredCounter("arb/sequencer/block/successful", nil) conditionalTxRejectedBySequencerCounter = metrics.NewRegisteredCounter("arb/sequencer/condtionaltx/rejected", nil) conditionalTxAcceptedBySequencerCounter = metrics.NewRegisteredCounter("arb/sequencer/condtionaltx/accepted", nil) + l1GasPriceGauge = metrics.NewRegisteredGauge("arb/sequencer/l1gasprice", nil) + callDataUnitsBacklogGauge = metrics.NewRegisteredGauge("arb/sequencer/calldataunitsbacklog", nil) + unusedL1GasChargeGauge = metrics.NewRegisteredGauge("arb/sequencer/unusedl1gascharge", nil) + currentSurplusGauge = metrics.NewRegisteredGauge("arb/sequencer/currentsurplus", nil) + expectedSurplusGauge = metrics.NewRegisteredGauge("arb/sequencer/expectedsurplus", nil) ) type SequencerConfig struct { - Enable bool `koanf:"enable"` - MaxBlockSpeed time.Duration `koanf:"max-block-speed" reload:"hot"` - MaxRevertGasReject uint64 `koanf:"max-revert-gas-reject" reload:"hot"` - MaxAcceptableTimestampDelta time.Duration `koanf:"max-acceptable-timestamp-delta" reload:"hot"` - SenderWhitelist string `koanf:"sender-whitelist"` - Forwarder ForwarderConfig `koanf:"forwarder"` - QueueSize int `koanf:"queue-size"` - QueueTimeout time.Duration `koanf:"queue-timeout" reload:"hot"` - NonceCacheSize int `koanf:"nonce-cache-size" reload:"hot"` - MaxTxDataSize int `koanf:"max-tx-data-size" reload:"hot"` - NonceFailureCacheSize int `koanf:"nonce-failure-cache-size" reload:"hot"` - NonceFailureCacheExpiry time.Duration `koanf:"nonce-failure-cache-expiry" reload:"hot"` + Enable bool `koanf:"enable"` + MaxBlockSpeed time.Duration `koanf:"max-block-speed" reload:"hot"` + MaxRevertGasReject uint64 `koanf:"max-revert-gas-reject" reload:"hot"` + MaxAcceptableTimestampDelta time.Duration `koanf:"max-acceptable-timestamp-delta" reload:"hot"` + SenderWhitelist string `koanf:"sender-whitelist"` + Forwarder ForwarderConfig `koanf:"forwarder"` + QueueSize int `koanf:"queue-size"` + QueueTimeout time.Duration `koanf:"queue-timeout" reload:"hot"` + NonceCacheSize int `koanf:"nonce-cache-size" reload:"hot"` + MaxTxDataSize int `koanf:"max-tx-data-size" reload:"hot"` + NonceFailureCacheSize int `koanf:"nonce-failure-cache-size" reload:"hot"` + NonceFailureCacheExpiry time.Duration `koanf:"nonce-failure-cache-expiry" reload:"hot"` + ExpectedSurplusSoftThreshold string `koanf:"expected-surplus-soft-threshold" reload:"hot"` + ExpectedSurplusHardThreshold string `koanf:"expected-surplus-hard-threshold" reload:"hot"` + expectedSurplusSoftThreshold int + expectedSurplusHardThreshold int } func (c *SequencerConfig) Validate() error { @@ -78,6 +90,20 @@ func (c *SequencerConfig) Validate() error { return fmt.Errorf("sequencer sender whitelist entry \"%v\" is not a valid address", address) } } + var err error + if c.ExpectedSurplusSoftThreshold != "default" { + if c.expectedSurplusSoftThreshold, err = strconv.Atoi(c.ExpectedSurplusSoftThreshold); err != nil { + return fmt.Errorf("invalid expected-surplus-soft-threshold value provided in batchposter config %w", err) + } + } + if c.ExpectedSurplusHardThreshold != "default" { + if c.expectedSurplusHardThreshold, err = strconv.Atoi(c.ExpectedSurplusHardThreshold); err != nil { + return fmt.Errorf("invalid expected-surplus-hard-threshold value provided in batchposter config %w", err) + } + } + if c.expectedSurplusSoftThreshold < c.expectedSurplusHardThreshold { + return errors.New("expected-surplus-soft-threshold cannot be lower than expected-surplus-hard-threshold") + } return nil } @@ -86,7 +112,7 @@ type SequencerConfigFetcher func() *SequencerConfig var DefaultSequencerConfig = SequencerConfig{ Enable: false, MaxBlockSpeed: time.Millisecond * 250, - MaxRevertGasReject: params.TxGas + 10000, + MaxRevertGasReject: 0, MaxAcceptableTimestampDelta: time.Hour, Forwarder: DefaultSequencerForwarderConfig, QueueSize: 1024, @@ -94,24 +120,28 @@ var DefaultSequencerConfig = SequencerConfig{ NonceCacheSize: 1024, // 95% of the default batch poster limit, leaving 5KB for headers and such // This default is overridden for L3 chains in applyChainParameters in cmd/nitro/nitro.go - MaxTxDataSize: 95000, - NonceFailureCacheSize: 1024, - NonceFailureCacheExpiry: time.Second, + MaxTxDataSize: 95000, + NonceFailureCacheSize: 1024, + NonceFailureCacheExpiry: time.Second, + ExpectedSurplusSoftThreshold: "default", + ExpectedSurplusHardThreshold: "default", } var TestSequencerConfig = SequencerConfig{ - Enable: true, - MaxBlockSpeed: time.Millisecond * 10, - MaxRevertGasReject: params.TxGas + 10000, - MaxAcceptableTimestampDelta: time.Hour, - SenderWhitelist: "", - Forwarder: DefaultTestForwarderConfig, - QueueSize: 128, - QueueTimeout: time.Second * 5, - NonceCacheSize: 4, - MaxTxDataSize: 95000, - NonceFailureCacheSize: 1024, - NonceFailureCacheExpiry: time.Second, + Enable: true, + MaxBlockSpeed: time.Millisecond * 10, + MaxRevertGasReject: params.TxGas + 10000, + MaxAcceptableTimestampDelta: time.Hour, + SenderWhitelist: "", + Forwarder: DefaultTestForwarderConfig, + QueueSize: 128, + QueueTimeout: time.Second * 5, + NonceCacheSize: 4, + MaxTxDataSize: 95000, + NonceFailureCacheSize: 1024, + NonceFailureCacheExpiry: time.Second, + ExpectedSurplusSoftThreshold: "default", + ExpectedSurplusHardThreshold: "default", } func SequencerConfigAddOptions(prefix string, f *flag.FlagSet) { @@ -127,6 +157,8 @@ func SequencerConfigAddOptions(prefix string, f *flag.FlagSet) { f.Int(prefix+".max-tx-data-size", DefaultSequencerConfig.MaxTxDataSize, "maximum transaction size the sequencer will accept") f.Int(prefix+".nonce-failure-cache-size", DefaultSequencerConfig.NonceFailureCacheSize, "number of transactions with too high of a nonce to keep in memory while waiting for their predecessor") f.Duration(prefix+".nonce-failure-cache-expiry", DefaultSequencerConfig.NonceFailureCacheExpiry, "maximum amount of time to wait for a predecessor before rejecting a tx with nonce too high") + f.String(prefix+".expected-surplus-soft-threshold", DefaultSequencerConfig.ExpectedSurplusSoftThreshold, "if expected surplus is lower than this value, warnings are posted") + f.String(prefix+".expected-surplus-hard-threshold", DefaultSequencerConfig.ExpectedSurplusHardThreshold, "if expected surplus is lower than this value, new incoming transactions will be denied") } type txQueueItem struct { @@ -291,6 +323,10 @@ type Sequencer struct { activeMutex sync.Mutex pauseChan chan struct{} forwarder *TxForwarder + + expectedSurplusMutex sync.RWMutex + expectedSurplus int64 + expectedSurplusUpdated bool } func NewSequencer(execEngine *ExecutionEngine, l1Reader *headerreader.HeaderReader, configFetcher SequencerConfigFetcher) (*Sequencer, error) { @@ -364,6 +400,16 @@ func ctxWithTimeout(ctx context.Context, timeout time.Duration) (context.Context } func (s *Sequencer) PublishTransaction(parentCtx context.Context, tx *types.Transaction, options *arbitrum_types.ConditionalOptions) error { + // Only try to acquire Rlock and check for hard threshold if l1reader is not nil + // And hard threshold was enabled, this prevents spamming of read locks when not needed + if s.l1Reader != nil && s.config().ExpectedSurplusHardThreshold != "default" { + s.expectedSurplusMutex.RLock() + if s.expectedSurplusUpdated && s.expectedSurplus < int64(s.config().expectedSurplusHardThreshold) { + return errors.New("currently not accepting transactions due to expected surplus being below threshold") + } + s.expectedSurplusMutex.RUnlock() + } + sequencerBacklogGauge.Inc(1) defer sequencerBacklogGauge.Dec(1) @@ -481,7 +527,7 @@ func (s *Sequencer) CheckHealth(ctx context.Context) error { if pauseChan != nil { return nil } - return s.execEngine.streamer.ExpectChosenSequencer() + return s.execEngine.consensus.ExpectChosenSequencer() } func (s *Sequencer) ForwardTarget() string { @@ -944,14 +990,83 @@ func (s *Sequencer) Initialize(ctx context.Context) error { return nil } +var ( + usableBytesInBlob = big.NewInt(int64(len(kzg4844.Blob{}) * 31 / 32)) + blobTxBlobGasPerBlob = big.NewInt(params.BlobTxBlobGasPerBlob) +) + +func (s *Sequencer) updateExpectedSurplus(ctx context.Context) (int64, error) { + header, err := s.l1Reader.LastHeader(ctx) + if err != nil { + return 0, fmt.Errorf("error encountered getting latest header from l1reader while updating expectedSurplus: %w", err) + } + l1GasPrice := header.BaseFee.Uint64() + if header.BlobGasUsed != nil { + if header.ExcessBlobGas != nil { + blobFeePerByte := eip4844.CalcBlobFee(eip4844.CalcExcessBlobGas(*header.ExcessBlobGas, *header.BlobGasUsed)) + blobFeePerByte.Mul(blobFeePerByte, blobTxBlobGasPerBlob) + blobFeePerByte.Div(blobFeePerByte, usableBytesInBlob) + if l1GasPrice > blobFeePerByte.Uint64()/16 { + l1GasPrice = blobFeePerByte.Uint64() / 16 + } + } + } + surplus, err := s.execEngine.getL1PricingSurplus() + if err != nil { + return 0, fmt.Errorf("error encountered getting l1 pricing surplus while updating expectedSurplus: %w", err) + } + backlogL1GasCharged := int64(s.execEngine.consensus.BacklogL1GasCharged()) + backlogCallDataUnits := int64(s.execEngine.consensus.BacklogCallDataUnits()) + expectedSurplus := int64(surplus) + backlogL1GasCharged - backlogCallDataUnits*int64(l1GasPrice) + // update metrics + l1GasPriceGauge.Update(int64(l1GasPrice)) + callDataUnitsBacklogGauge.Update(backlogCallDataUnits) + unusedL1GasChargeGauge.Update(backlogL1GasCharged) + currentSurplusGauge.Update(surplus) + expectedSurplusGauge.Update(expectedSurplus) + if s.config().ExpectedSurplusSoftThreshold != "default" && expectedSurplus < int64(s.config().expectedSurplusSoftThreshold) { + log.Warn("expected surplus is below soft threshold", "value", expectedSurplus, "threshold", s.config().expectedSurplusSoftThreshold) + } + return expectedSurplus, nil +} + func (s *Sequencer) Start(ctxIn context.Context) error { s.StopWaiter.Start(ctxIn, s) + + if (s.config().ExpectedSurplusHardThreshold != "default" || s.config().ExpectedSurplusSoftThreshold != "default") && s.l1Reader == nil { + return errors.New("expected surplus soft/hard thresholds are enabled but l1Reader is nil") + } + if s.l1Reader != nil { initialBlockNr := atomic.LoadUint64(&s.l1BlockNumber) if initialBlockNr == 0 { return errors.New("sequencer not initialized") } + expectedSurplus, err := s.updateExpectedSurplus(ctxIn) + if err != nil { + if s.config().ExpectedSurplusHardThreshold != "default" { + return fmt.Errorf("expected-surplus-hard-threshold is enabled but error fetching initial expected surplus value: %w", err) + } + log.Error("expected-surplus-soft-threshold is enabled but error fetching initial expected surplus value", "err", err) + } else { + s.expectedSurplus = expectedSurplus + s.expectedSurplusUpdated = true + } + s.CallIteratively(func(ctx context.Context) time.Duration { + expectedSurplus, err := s.updateExpectedSurplus(ctxIn) + s.expectedSurplusMutex.Lock() + defer s.expectedSurplusMutex.Unlock() + if err != nil { + s.expectedSurplusUpdated = false + log.Error("expected surplus soft/hard thresholds are enabled but unable to fetch latest expected surplus, retrying", "err", err) + return 0 + } + s.expectedSurplusUpdated = true + s.expectedSurplus = expectedSurplus + return 5 * time.Second + }) + headerChan, cancel := s.l1Reader.Subscribe(false) s.LaunchThread(func(ctx context.Context) { diff --git a/execution/gethexec/sync_monitor.go b/execution/gethexec/sync_monitor.go new file mode 100644 index 000000000..35256f72a --- /dev/null +++ b/execution/gethexec/sync_monitor.go @@ -0,0 +1,113 @@ +package gethexec + +import ( + "context" + + "github.com/offchainlabs/nitro/execution" + "github.com/pkg/errors" + flag "github.com/spf13/pflag" +) + +type SyncMonitorConfig struct { + SafeBlockWaitForBlockValidator bool `koanf:"safe-block-wait-for-block-validator"` + FinalizedBlockWaitForBlockValidator bool `koanf:"finalized-block-wait-for-block-validator"` +} + +var DefaultSyncMonitorConfig = SyncMonitorConfig{ + SafeBlockWaitForBlockValidator: false, + FinalizedBlockWaitForBlockValidator: false, +} + +func SyncMonitorConfigAddOptions(prefix string, f *flag.FlagSet) { + f.Bool(prefix+".safe-block-wait-for-block-validator", DefaultSyncMonitorConfig.SafeBlockWaitForBlockValidator, "wait for block validator to complete before returning safe block number") + f.Bool(prefix+".finalized-block-wait-for-block-validator", DefaultSyncMonitorConfig.FinalizedBlockWaitForBlockValidator, "wait for block validator to complete before returning finalized block number") +} + +type SyncMonitor struct { + config *SyncMonitorConfig + consensus execution.ConsensusInfo + exec *ExecutionEngine +} + +func NewSyncMonitor(config *SyncMonitorConfig, exec *ExecutionEngine) *SyncMonitor { + return &SyncMonitor{ + config: config, + exec: exec, + } +} + +func (s *SyncMonitor) FullSyncProgressMap() map[string]interface{} { + res := s.consensus.FullSyncProgressMap() + consensusSyncTarget := s.consensus.SyncTargetMessageCount() + + built, err := s.exec.HeadMessageNumber() + if err != nil { + res["headMsgNumberError"] = err + } + + res["builtBlock"] = built + res["consensusSyncTarget"] = consensusSyncTarget + + return res +} + +func (s *SyncMonitor) SyncProgressMap() map[string]interface{} { + if s.consensus.Synced() { + built, err := s.exec.HeadMessageNumber() + consensusSyncTarget := s.consensus.SyncTargetMessageCount() + if err == nil && built+1 >= consensusSyncTarget { + return make(map[string]interface{}) + } + } + return s.FullSyncProgressMap() +} + +func (s *SyncMonitor) SafeBlockNumber(ctx context.Context) (uint64, error) { + if s.consensus == nil { + return 0, errors.New("not set up for safeblock") + } + msg, err := s.consensus.GetSafeMsgCount(ctx) + if err != nil { + return 0, err + } + if s.config.SafeBlockWaitForBlockValidator { + latestValidatedCount, err := s.consensus.ValidatedMessageCount() + if err != nil { + return 0, err + } + if msg > latestValidatedCount { + msg = latestValidatedCount + } + } + block := s.exec.MessageIndexToBlockNumber(msg - 1) + return block, nil +} + +func (s *SyncMonitor) FinalizedBlockNumber(ctx context.Context) (uint64, error) { + if s.consensus == nil { + return 0, errors.New("not set up for safeblock") + } + msg, err := s.consensus.GetFinalizedMsgCount(ctx) + if err != nil { + return 0, err + } + if s.config.FinalizedBlockWaitForBlockValidator { + latestValidatedCount, err := s.consensus.ValidatedMessageCount() + if err != nil { + return 0, err + } + if msg > latestValidatedCount { + msg = latestValidatedCount + } + } + block := s.exec.MessageIndexToBlockNumber(msg - 1) + return block, nil +} + +func (s *SyncMonitor) Synced() bool { + return len(s.SyncProgressMap()) == 0 +} + +func (s *SyncMonitor) SetConsensusInfo(consensus execution.ConsensusInfo) { + s.consensus = consensus +} diff --git a/execution/interface.go b/execution/interface.go index 7fc62a2dc..e2b41b9a0 100644 --- a/execution/interface.go +++ b/execution/interface.go @@ -56,7 +56,7 @@ type ExecutionSequencer interface { ForwardTo(url string) error SequenceDelayedMessage(message *arbostypes.L1IncomingMessage, delayedSeqNum uint64) error NextDelayedMessageNumber() (uint64, error) - SetTransactionStreamer(streamer TransactionStreamer) + GetL1GasPriceEstimate() (uint64, error) } type FullExecutionClient interface { @@ -69,19 +69,38 @@ type FullExecutionClient interface { Maintenance() error - // TODO: only used to get safe/finalized block numbers - MessageIndexToBlockNumber(messageNum arbutil.MessageIndex) uint64 - ArbOSVersionForMessageNumber(messageNum arbutil.MessageIndex) (uint64, error) } // not implemented in execution, used as input +// BatchFetcher is required for any execution node type BatchFetcher interface { - FetchBatch(batchNum uint64) ([]byte, common.Hash, error) + FetchBatch(ctx context.Context, batchNum uint64) ([]byte, common.Hash, error) + FindInboxBatchContainingMessage(message arbutil.MessageIndex) (uint64, bool, error) + GetBatchParentChainBlock(seqNum uint64) (uint64, error) } -type TransactionStreamer interface { - BatchFetcher +type ConsensusInfo interface { + Synced() bool + FullSyncProgressMap() map[string]interface{} + SyncTargetMessageCount() arbutil.MessageIndex + + // TODO: switch from pulling to pushing safe/finalized + GetSafeMsgCount(ctx context.Context) (arbutil.MessageIndex, error) + GetFinalizedMsgCount(ctx context.Context) (arbutil.MessageIndex, error) + ValidatedMessageCount() (arbutil.MessageIndex, error) +} + +type ConsensusSequencer interface { WriteMessageFromSequencer(pos arbutil.MessageIndex, msgWithMeta arbostypes.MessageWithMetadata) error ExpectChosenSequencer() error + CacheL1PriceDataOfMsg(pos arbutil.MessageIndex, callDataUnits uint64, l1GasCharged uint64) + BacklogL1GasCharged() uint64 + BacklogCallDataUnits() uint64 +} + +type FullConsensusClient interface { + BatchFetcher + ConsensusInfo + ConsensusSequencer } diff --git a/nodeInterface/NodeInterface.go b/execution/nodeInterface/NodeInterface.go similarity index 88% rename from nodeInterface/NodeInterface.go rename to execution/nodeInterface/NodeInterface.go index bdcfb569f..7e524731d 100644 --- a/nodeInterface/NodeInterface.go +++ b/execution/nodeInterface/NodeInterface.go @@ -20,14 +20,12 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rpc" - "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbos" "github.com/offchainlabs/nitro/arbos/l1pricing" "github.com/offchainlabs/nitro/arbos/retryables" "github.com/offchainlabs/nitro/arbos/util" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/solgen/go/node_interfacegen" - "github.com/offchainlabs/nitro/staker" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/merkletree" ) @@ -53,90 +51,129 @@ var merkleTopic common.Hash var l2ToL1TxTopic common.Hash var l2ToL1TransactionTopic common.Hash -var blockInGenesis = errors.New("") -var blockAfterLatestBatch = errors.New("") - func (n NodeInterface) NitroGenesisBlock(c ctx) (huge, error) { block := n.backend.ChainConfig().ArbitrumChainParams.GenesisBlockNum return arbmath.UintToBig(block), nil } +// bool will be false but no error if behind genesis +func (n NodeInterface) blockNumToMessageIndex(blockNum uint64) (arbutil.MessageIndex, bool, error) { + node, err := gethExecFromNodeInterfaceBackend(n.backend) + if err != nil { + return 0, false, err + } + blockchain, err := blockchainFromNodeInterfaceBackend(n.backend) + if err != nil { + return 0, false, err + } + if blockNum < blockchain.Config().ArbitrumChainParams.GenesisBlockNum { + return 0, true, nil + } + msgIndex, err := node.ExecEngine.BlockNumberToMessageIndex(blockNum) + if err != nil { + return 0, false, err + } + return msgIndex, true, nil +} + +func (n NodeInterface) msgNumToInboxBatch(msgIndex arbutil.MessageIndex) (uint64, bool, error) { + node, err := gethExecFromNodeInterfaceBackend(n.backend) + if err != nil { + return 0, false, err + } + fetcher := node.ExecEngine.GetBatchFetcher() + if fetcher == nil { + return 0, false, errors.New("batch fetcher not set") + } + return fetcher.FindInboxBatchContainingMessage(msgIndex) +} + func (n NodeInterface) FindBatchContainingBlock(c ctx, evm mech, blockNum uint64) (uint64, error) { - node, err := arbNodeFromNodeInterfaceBackend(n.backend) + msgIndex, found, err := n.blockNumToMessageIndex(blockNum) if err != nil { return 0, err } - return findBatchContainingBlock(node, node.TxStreamer.GenesisBlockNumber(), blockNum) + if !found { + return 0, fmt.Errorf("block %v is part of genesis", blockNum) + } + res, found, err := n.msgNumToInboxBatch(msgIndex) + if err == nil && !found { + return 0, errors.New("block not yet found on any batch") + } + return res, err } func (n NodeInterface) GetL1Confirmations(c ctx, evm mech, blockHash bytes32) (uint64, error) { - node, err := arbNodeFromNodeInterfaceBackend(n.backend) + node, err := gethExecFromNodeInterfaceBackend(n.backend) if err != nil { return 0, err } - if node.InboxReader == nil { - return 0, nil - } - bc, err := blockchainFromNodeInterfaceBackend(n.backend) + blockchain, err := blockchainFromNodeInterfaceBackend(n.backend) if err != nil { return 0, err } - header := bc.GetHeaderByHash(blockHash) + header := blockchain.GetHeaderByHash(blockHash) if header == nil { return 0, errors.New("unknown block hash") } blockNum := header.Number.Uint64() - genesis := node.TxStreamer.GenesisBlockNumber() - batch, err := findBatchContainingBlock(node, genesis, blockNum) + + // blocks behind genesis are treated as belonging to batch 0 + msgNum, _, err := n.blockNumToMessageIndex(blockNum) if err != nil { - if errors.Is(err, blockInGenesis) { - batch = 0 - } else if errors.Is(err, blockAfterLatestBatch) { - return 0, nil - } else { - return 0, err - } + return 0, err + } + // batches not yet posted have 0 confirmations but no error + batchNum, found, err := n.msgNumToInboxBatch(msgNum) + if err != nil { + return 0, err + } + if !found { + return 0, nil } - meta, err := node.InboxTracker.GetBatchMetadata(batch) + parentChainBlockNum, err := node.ExecEngine.GetBatchFetcher().GetBatchParentChainBlock(batchNum) if err != nil { return 0, err } - if node.L1Reader.IsParentChainArbitrum() { - parentChainClient := node.L1Reader.Client() + + if node.ParentChainReader.IsParentChainArbitrum() { + parentChainClient := node.ParentChainReader.Client() parentNodeInterface, err := node_interfacegen.NewNodeInterface(types.NodeInterfaceAddress, parentChainClient) if err != nil { return 0, err } - parentChainBlock, err := parentChainClient.BlockByNumber(n.context, new(big.Int).SetUint64(meta.ParentChainBlock)) + parentChainBlock, err := parentChainClient.BlockByNumber(n.context, new(big.Int).SetUint64(parentChainBlockNum)) if err != nil { // Hide the parent chain RPC error from the client in case it contains sensitive information. // Likely though, this error is just "not found" because the block got reorg'd. - return 0, fmt.Errorf("failed to get parent chain block %v containing batch", meta.ParentChainBlock) + return 0, fmt.Errorf("failed to get parent chain block %v containing batch", parentChainBlockNum) } confs, err := parentNodeInterface.GetL1Confirmations(&bind.CallOpts{Context: n.context}, parentChainBlock.Hash()) if err != nil { log.Warn( "Failed to get L1 confirmations from parent chain", - "blockNumber", meta.ParentChainBlock, + "blockNumber", parentChainBlockNum, "blockHash", parentChainBlock.Hash(), "err", err, ) return 0, fmt.Errorf("failed to get L1 confirmations from parent chain for block %v", parentChainBlock.Hash()) } return confs, nil } - latestL1Block, latestBatchCount := node.InboxReader.GetLastReadBlockAndBatchCount() - if latestBatchCount <= batch { - return 0, nil // batch was reorg'd out? - } - if latestL1Block < meta.ParentChainBlock || arbutil.BlockNumberToMessageCount(blockNum, genesis) > meta.MessageCount { + if node.ParentChainReader == nil { return 0, nil } - canonicalHash := bc.GetCanonicalHash(header.Number.Uint64()) - if canonicalHash != header.Hash() { - return 0, errors.New("block hash is non-canonical") + latestHeader, err := node.ParentChainReader.LastHeaderWithError() + if err != nil { + return 0, err + } + if latestHeader == nil { + return 0, errors.New("no headers read from l1") } - confs := (latestL1Block - meta.ParentChainBlock) + 1 + node.InboxReader.GetDelayBlocks() - return confs, nil + latestBlockNum := latestHeader.Number.Uint64() + if latestBlockNum < parentChainBlockNum { + return 0, nil + } + return (latestBlockNum - parentChainBlockNum), nil } func (n NodeInterface) EstimateRetryableTicket( @@ -561,42 +598,18 @@ func (n NodeInterface) GasEstimateComponents( return total, gasForL1, baseFee, l1BaseFeeEstimate, nil } -func findBatchContainingBlock(node *arbnode.Node, genesis uint64, block uint64) (uint64, error) { - if block <= genesis { - return 0, fmt.Errorf("%wblock %v is part of genesis", blockInGenesis, block) - } - pos := arbutil.BlockNumberToMessageCount(block, genesis) - 1 - high, err := node.InboxTracker.GetBatchCount() - if err != nil { - return 0, err - } - high-- - latestCount, err := node.InboxTracker.GetBatchMessageCount(high) - if err != nil { - return 0, err - } - latestBlock := arbutil.MessageCountToBlockNumber(latestCount, genesis) - if int64(block) > latestBlock { - return 0, fmt.Errorf( - "%wrequested block %v is after latest on-chain block %v published in batch %v", - blockAfterLatestBatch, block, latestBlock, high, - ) - } - return staker.FindBatchContainingMessageIndex(node.InboxTracker, pos, high) -} - func (n NodeInterface) LegacyLookupMessageBatchProof(c ctx, evm mech, batchNum huge, index uint64) ( proof []bytes32, path huge, l2Sender addr, l1Dest addr, l2Block huge, l1Block huge, timestamp huge, amount huge, calldataForL1 []byte, err error) { - node, err := arbNodeFromNodeInterfaceBackend(n.backend) + node, err := gethExecFromNodeInterfaceBackend(n.backend) if err != nil { return } - if node.ClassicOutboxRetriever == nil { + if node.ClassicOutbox == nil { err = errors.New("this node doesnt support classicLookupMessageBatchProof") return } - msg, err := node.ClassicOutboxRetriever.GetMsg(batchNum, index) + msg, err := node.ClassicOutbox.GetMsg(batchNum, index) if err != nil { return } diff --git a/nodeInterface/NodeInterfaceDebug.go b/execution/nodeInterface/NodeInterfaceDebug.go similarity index 100% rename from nodeInterface/NodeInterfaceDebug.go rename to execution/nodeInterface/NodeInterfaceDebug.go diff --git a/nodeInterface/virtual-contracts.go b/execution/nodeInterface/virtual-contracts.go similarity index 96% rename from nodeInterface/virtual-contracts.go rename to execution/nodeInterface/virtual-contracts.go index b35381a77..3a863e31b 100644 --- a/nodeInterface/virtual-contracts.go +++ b/execution/nodeInterface/virtual-contracts.go @@ -15,10 +15,10 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbos" "github.com/offchainlabs/nitro/arbos/arbosState" "github.com/offchainlabs/nitro/arbos/l1pricing" + "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/gethhook" "github.com/offchainlabs/nitro/precompiles" "github.com/offchainlabs/nitro/solgen/go/node_interfacegen" @@ -173,16 +173,16 @@ func init() { merkleTopic = arbSys.Events["SendMerkleUpdate"].ID } -func arbNodeFromNodeInterfaceBackend(backend BackendAPI) (*arbnode.Node, error) { +func gethExecFromNodeInterfaceBackend(backend BackendAPI) (*gethexec.ExecutionNode, error) { apiBackend, ok := backend.(*arbitrum.APIBackend) if !ok { return nil, errors.New("API backend isn't Arbitrum") } - arbNode, ok := apiBackend.GetArbitrumNode().(*arbnode.Node) + exec, ok := apiBackend.GetArbitrumNode().(*gethexec.ExecutionNode) if !ok { return nil, errors.New("failed to get Arbitrum Node from backend") } - return arbNode, nil + return exec, nil } func blockchainFromNodeInterfaceBackend(backend BackendAPI) (*core.BlockChain, error) { diff --git a/go-ethereum b/go-ethereum index 846839164..874a12338 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 8468391640657b3eb9ed791d4b1c3ff3b6577a8e +Subproject commit 874a123385727c11534ed6d82e18a012f7a94708 diff --git a/go.mod b/go.mod index 530e9b241..11476d83e 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,6 @@ require ( github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible github.com/Shopify/toxiproxy v2.1.4+incompatible github.com/alicebob/miniredis/v2 v2.21.0 - github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 github.com/andybalholm/brotli v1.0.4 github.com/aws/aws-sdk-go-v2 v1.16.4 github.com/aws/aws-sdk-go-v2/config v1.15.5 @@ -20,7 +19,7 @@ require ( github.com/cavaliergopher/grab/v3 v3.0.1 github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 github.com/codeclysm/extract/v3 v3.0.2 - github.com/dgraph-io/badger/v3 v3.2103.2 + github.com/dgraph-io/badger/v4 v4.2.0 github.com/enescakir/emoji v1.0.0 github.com/ethereum/go-ethereum v1.10.26 github.com/fatih/structtag v1.2.0 @@ -29,21 +28,22 @@ require ( github.com/gobwas/httphead v0.1.0 github.com/gobwas/ws v1.2.1 github.com/gobwas/ws-examples v0.0.0-20190625122829-a9e8908d9484 - github.com/google/go-cmp v0.5.9 + github.com/google/go-cmp v0.6.0 github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/holiman/uint256 v1.2.3 github.com/knadh/koanf v1.4.0 github.com/mailru/easygo v0.0.0-20190618140210-3c14a0dc985f github.com/mitchellh/mapstructure v1.4.1 + github.com/pkg/errors v0.9.1 github.com/r3labs/diff/v3 v3.0.1 github.com/rivo/tview v0.0.0-20240307173318-e804876934a1 github.com/spf13/pflag v1.0.5 github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 github.com/wasmerio/wasmer-go v1.0.4 github.com/wealdtech/go-merkletree v1.0.0 - golang.org/x/crypto v0.16.0 - golang.org/x/sys v0.17.0 - golang.org/x/term v0.17.0 + golang.org/x/crypto v0.21.0 + golang.org/x/sys v0.18.0 + golang.org/x/term v0.18.0 golang.org/x/tools v0.16.0 gopkg.in/natefinch/lumberjack.v2 v2.0.0 ) @@ -70,12 +70,10 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/bits-and-blooms/bitset v1.5.0 // indirect github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect - github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/cockroachdb/errors v1.8.1 // indirect - github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f // indirect - github.com/cockroachdb/redact v1.0.8 // indirect - github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2 // indirect + github.com/cockroachdb/errors v1.9.1 // indirect + github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f // indirect + github.com/cockroachdb/redact v1.1.3 // indirect github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect github.com/consensys/bavard v0.1.13 // indirect github.com/consensys/gnark-crypto v0.10.0 // indirect @@ -84,7 +82,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/deckarep/golang-set/v2 v2.1.0 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect - github.com/dgraph-io/ristretto v0.1.0 // indirect + github.com/dgraph-io/ristretto v0.1.1 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/dlclark/regexp2 v1.7.0 // indirect github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 // indirect @@ -95,6 +93,7 @@ require ( github.com/gammazero/deque v0.2.1 // indirect github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect github.com/gdamore/encoding v1.0.0 // indirect + github.com/getsentry/sentry-go v0.12.0 // indirect github.com/go-ole/go-ole v1.2.5 // indirect github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect github.com/go-stack/stack v1.8.1 // indirect @@ -102,7 +101,7 @@ require ( github.com/gofrs/flock v0.8.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.3.0 // indirect - github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect + github.com/golang/glog v1.0.0 // indirect github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect @@ -119,6 +118,7 @@ require ( github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5 // indirect + github.com/juju/loggo v0.0.0-20180524022052-584905176618 // indirect github.com/klauspost/compress v1.17.2 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/kr/text v0.2.0 // indirect @@ -133,14 +133,13 @@ require ( github.com/mmcloughlin/addchain v0.4.0 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/opentracing/opentracing-go v1.1.0 // indirect - github.com/pkg/errors v0.9.1 // indirect github.com/prometheus/client_golang v1.14.0 // indirect github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/common v0.37.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect github.com/rhnvrm/simples3 v0.6.1 // indirect github.com/rivo/uniseg v0.4.7 // indirect - github.com/rogpeppe/go-internal v1.6.1 // indirect + github.com/rogpeppe/go-internal v1.8.1 // indirect github.com/rs/cors v1.7.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect @@ -158,7 +157,7 @@ require ( go.opencensus.io v0.22.5 // indirect golang.org/x/exp v0.0.0-20231006140011-7918f672742d // indirect golang.org/x/mod v0.14.0 // indirect - golang.org/x/net v0.19.0 // indirect + golang.org/x/net v0.21.0 // indirect golang.org/x/sync v0.5.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.3.0 // indirect diff --git a/go.sum b/go.sum index 685c98381..c8ffc6859 100644 --- a/go.sum +++ b/go.sum @@ -36,18 +36,15 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/CloudyKit/fastprinter v0.0.0-20170127035650-74b38d55f37a/go.mod h1:EFZQ978U7x8IRnstaskI3IysnWY5Ao3QgZUKOXlsAdw= -github.com/CloudyKit/jet v2.1.3-0.20180809161101-62edd43e4f88+incompatible/go.mod h1:HPYO+50pSWkPoj9Q/eq0aRGByCL6ScRlUmiEX5Zgm+w= +github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno= +github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo= github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= -github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible h1:1G1pk05UrOh0NlF1oeaaix1x8XzrfjIDK47TY0Zehcw= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= @@ -136,8 +133,6 @@ github.com/cavaliergopher/grab/v3 v3.0.1/go.mod h1:1U/KNnD+Ft6JJiYoYBAimKH2XrYpt github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= @@ -150,20 +145,18 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cockroachdb/datadriven v1.0.0/go.mod h1:5Ib8Meh+jk1RlHIXej6Pzevx/NLlNvQB9pmSBZErGA4= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= -github.com/cockroachdb/errors v1.6.1/go.mod h1:tm6FTP5G81vwJ5lC0SizQo374JNCOPrHyXGitRJoDqM= -github.com/cockroachdb/errors v1.8.1 h1:A5+txlVZfOqFBDa4mGz2bUWSp0aHElvHX2bKkdbQu+Y= -github.com/cockroachdb/errors v1.8.1/go.mod h1:qGwQn6JmZ+oMjuLwjWzUNqblqk0xl4CVV3SQbGwK7Ac= -github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY= -github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= +github.com/cockroachdb/errors v1.9.1 h1:yFVvsI0VxmRShfawbt/laCIDy/mtTqqnvoNgiy5bEV8= +github.com/cockroachdb/errors v1.9.1/go.mod h1:2sxOtL2WIc096WSZqZ5h8fa17rdDq9HZOZLBCor4mBk= +github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f h1:6jduT9Hfc0njg5jJ1DdKCFPdMBrp/mdZfCpa5h+WM74= +github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 h1:aPEJyR4rPBvDmeyi+l/FS/VtA00IWvjeFvjen1m1l1A= github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593/go.mod h1:6hk1eMY/u5t+Cf18q5lFMUA1Rc+Sm5I6Ra1QuPyxXCo= -github.com/cockroachdb/redact v1.0.8 h1:8QG/764wK+vmEYoOlfobpe12EQcS81ukx/a4hdVMxNw= -github.com/cockroachdb/redact v1.0.8/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= -github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2 h1:IKgmqgMQlVJIZj19CdocBeSfSaiCbEBZGKODaixqtHM= -github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2/go.mod h1:8BT+cPK6xvFOcRlk0R8eg+OTkcqI6baNH4xAkpiYVvQ= +github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ= +github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/codeclysm/extract/v3 v3.0.2 h1:sB4LcE3Php7LkhZwN0n2p8GCwZe92PEQutdbGURf5xc= @@ -192,11 +185,10 @@ github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPc github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= -github.com/dgraph-io/badger/v3 v3.2103.2 h1:dpyM5eCJAtQCBcMCZcT4UBZchuTJgCywerHHgmxfxM8= -github.com/dgraph-io/badger/v3 v3.2103.2/go.mod h1:RHo4/GmYcKKh5Lxu63wLEMHJ70Pac2JqZRYGhlyAo2M= -github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI= -github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgraph-io/badger/v4 v4.2.0 h1:kJrlajbXXL9DFTNuhhu9yCx7JJa4qpYWxtE8BzuWsEs= +github.com/dgraph-io/badger/v4 v4.2.0/go.mod h1:qfCqhPoWDFJRx1gp5QwwyGo8xk1lbHUxvK9nK0OGAak= +github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= +github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= @@ -217,6 +209,7 @@ github.com/enescakir/emoji v1.0.0/go.mod h1:Bt1EKuLnKDTYpLALApstIkAjdDrS/8IAgTkK github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= github.com/ethereum/c-kzg-4844 v0.4.0 h1:3MS1s4JtA868KpJxroZoepdV0ZKBp3u/O5HcZ7R3nlY= @@ -228,7 +221,6 @@ github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4 github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= -github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= @@ -242,6 +234,8 @@ github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdk github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= github.com/gdamore/tcell/v2 v2.7.1 h1:TiCcmpWHiAU7F0rA2I3S2Y4mmLmO9KHxJ7E1QhYzQbc= github.com/gdamore/tcell/v2 v2.7.1/go.mod h1:dSXtXTSK0VsW1biw65DZLZ2NKr7j0qP/0J7ONmsraWg= +github.com/getsentry/sentry-go v0.12.0 h1:era7g0re5iY13bHSdN/xMkyV+5zZppjRVQhZrXCaEIk= +github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c= github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM= github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= @@ -284,16 +278,18 @@ github.com/gobwas/ws-examples v0.0.0-20190625122829-a9e8908d9484/go.mod h1:5nDZF github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= +github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-jwt/jwt/v4 v4.3.0 h1:kHL1vqdqWNfATmA0FNMdmZNMyZI1U6O31X4rlIPoBog= github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= @@ -324,7 +320,6 @@ github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -344,8 +339,8 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= @@ -362,12 +357,13 @@ github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b h1:RMpPgZTSApbPf7xaVe github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/graph-gophers/graphql-go v1.3.0 h1:Eb9x/q6MFpCLz7jBCiP/WTxjSDrYLR1QY41SORZyNJ0= @@ -410,14 +406,15 @@ github.com/holiman/uint256 v1.2.3/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZm github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= -github.com/hydrogen18/memlistener v0.0.0-20141126152155-54553eb933fb/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= +github.com/hydrogen18/memlistener v0.0.0-20200120041712-dcc25e7acd91/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI= github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0= -github.com/iris-contrib/i18n v0.0.0-20171121225848-987a633949d0/go.mod h1:pMCz62A0xJL6I+umB2YTlFRwWXaDFA0jy+5HzGiJjqI= +github.com/iris-contrib/jade v1.1.3/go.mod h1:H/geBymxJhShH5kecoiOCSssPX7QWYH7UaeZTSWddIk= +github.com/iris-contrib/pongo2 v0.0.1/go.mod h1:Ssh+00+3GAZqSQb30AvBRNxBx7rf0GqwkjqxNd0u65g= github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= @@ -429,6 +426,7 @@ github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= @@ -443,7 +441,6 @@ github.com/juju/loggo v0.0.0-20170605014607-8232ab8918d9/go.mod h1:vgyd7OREkbtVE github.com/juju/loggo v0.0.0-20180524022052-584905176618 h1:MK144iBQF9hTSwBW/9eJm034bVoG30IshVm688T2hi8= github.com/juju/loggo v0.0.0-20180524022052-584905176618/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= github.com/juju/retry v0.0.0-20160928201858-1998d01ba1c3/go.mod h1:OohPQGsr4pnxwD5YljhQ+TZnuVRYpa5irjugL1Yuif4= -github.com/juju/testing v0.0.0-20180920084828-472a3e8b2073/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA= github.com/juju/testing v0.0.0-20200510222523-6c8c298c77a0 h1:+WWUkhnTjV6RNOxkcwk79qrjeyHEHvBzlneueBsatX4= github.com/juju/testing v0.0.0-20200510222523-6c8c298c77a0/go.mod h1:hpGvhGHPVbNBraRLZEhoQwFLMrjK8PSlO4D3nDjKYXo= github.com/juju/utils v0.0.0-20180808125547-9dfc6dbfb02b/go.mod h1:6/KLg8Wz/y2KVGWEpkK9vMNGkOnu4k/cqs8Z1fKjTOk= @@ -451,16 +448,15 @@ github.com/juju/version v0.0.0-20161031051906-1f41e27e54f2/go.mod h1:kE8gK5X0CIm github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= -github.com/kataras/golog v0.0.9/go.mod h1:12HJgwBIZFNGL0EJnMRhmvGA0PQGx8VFwrZtM4CqbAk= -github.com/kataras/iris/v12 v12.0.1/go.mod h1:udK4vLQKkdDqMGJJVd/msuMtN6hpYJhg/lSzuxjhO+U= -github.com/kataras/neffos v0.0.10/go.mod h1:ZYmJC07hQPW67eKuzlfY7SO3bC0mw83A3j6im82hfqw= -github.com/kataras/pio v0.0.0-20190103105442-ea782b38602d/go.mod h1:NV88laa9UiiDuX9AhMbDPkGYSPugBOV6yTZB1l2K9Z0= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kataras/golog v0.0.10/go.mod h1:yJ8YKCmyL+nWjERB90Qwn+bdyBZsaQwU3bTVFgkFIp8= +github.com/kataras/iris/v12 v12.1.8/go.mod h1:LMYy4VlP67TQ3Zgriz8RE2h2kMZV2SgMYbq3UhfoFmE= +github.com/kataras/neffos v0.0.14/go.mod h1:8lqADm8PnbeFfL7CLXh1WHw53dG27MC3pgi2R1rmoTE= +github.com/kataras/pio v0.0.2/go.mod h1:hAoW0t9UmXi4R5Oyq5Z4irTbaTsOemSrDGUtaTl7Dro= +github.com/kataras/sitemap v0.0.5/go.mod h1:KY2eugMKiPwsJgx7+U103YZehfvNGOXURubcGyk0Bz8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.9.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= @@ -479,7 +475,7 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/labstack/echo/v4 v4.1.11/go.mod h1:i541M3Fj6f76NZtHSj7TXnyM8n2gaodfvfxNnFqi74g= +github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awStJ6ArI7Y= github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= @@ -490,12 +486,16 @@ github.com/mailru/easygo v0.0.0-20190618140210-3c14a0dc985f h1:4+gHs0jJFJ06bfN8P github.com/mailru/easygo v0.0.0-20190618140210-3c14a0dc985f/go.mod h1:tHCZHV8b2A90ObojrEAzY0Lb03gxUxjDHr5IJyAh4ew= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= @@ -506,8 +506,7 @@ github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpe github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/mediocregopher/mediocre-go-lib v0.0.0-20181029021733-cb65787f37ed/go.mod h1:dSsfyI2zABAdhcbvkXqgxOxrCsbYeHCPgrZkku60dSg= -github.com/mediocregopher/radix/v3 v3.3.0/go.mod h1:EmfVyvspXz1uZEyPBMyGK+kjWiKQGvsUt6O3Pj+LDCQ= +github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= @@ -536,8 +535,9 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nats-io/nats.go v1.8.1/go.mod h1:BrFz9vVn0fU3AcH9Vn4Kd7W0NpJ651tD5omQ3M8LwxM= -github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/npillmayer/nestext v0.1.3/go.mod h1:h2lrijH8jpicr25dFY+oAJLyzlya6jhnuG+zWp9L0Uk= @@ -548,8 +548,8 @@ github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQ github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= @@ -566,6 +566,7 @@ github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAv github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -610,8 +611,9 @@ github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUc github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= +github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= @@ -619,8 +621,8 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible h1:Bn1aCHHRnjv4Bl16T8rcaFjYSrGrIZvpiGO6P3Q4GpU= github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= @@ -629,9 +631,6 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= -github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= @@ -647,6 +646,7 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= @@ -662,13 +662,16 @@ github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9f github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs= github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU= github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= @@ -692,6 +695,7 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 h1:k/gmLsJDWwWqbLCur2yWnJzwQEKRcAHXo6seXGuSwWw= github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA= @@ -710,10 +714,12 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= -golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -738,6 +744,7 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -746,6 +753,7 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= @@ -787,13 +795,15 @@ golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -846,6 +856,7 @@ golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -865,34 +876,45 @@ golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= @@ -902,10 +924,10 @@ golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -950,6 +972,7 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM= @@ -1011,6 +1034,7 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -1026,6 +1050,7 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1053,6 +1078,7 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= +gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/mgo.v2 v2.0.0-20160818015218-f2b6f6c918c4/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce h1:xcEWjVhvbDy+nHP67nPDDpbYrY+ILlfndk4bRioVHaU= gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= @@ -1070,7 +1096,9 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/nitro-testnode b/nitro-testnode index aee6ceff9..3922df9ca 160000 --- a/nitro-testnode +++ b/nitro-testnode @@ -1 +1 @@ -Subproject commit aee6ceff9c9d3fb2749da55a7d7842f23d1bfc8e +Subproject commit 3922df9caf7a65dd4168b8158c1244c5fe88780e diff --git a/precompiles/ArbGasInfo.go b/precompiles/ArbGasInfo.go index cda5350a4..cb0045c49 100644 --- a/precompiles/ArbGasInfo.go +++ b/precompiles/ArbGasInfo.go @@ -202,20 +202,7 @@ func (con ArbGasInfo) GetL1PricingSurplus(c ctx, evm mech) (*big.Int, error) { return con._preversion10_GetL1PricingSurplus(c, evm) } ps := c.State.L1PricingState() - fundsDueForRefunds, err := ps.BatchPosterTable().TotalFundsDue() - if err != nil { - return nil, err - } - fundsDueForRewards, err := ps.FundsDueForRewards() - if err != nil { - return nil, err - } - haveFunds, err := ps.L1FeesAvailable() - if err != nil { - return nil, err - } - needFunds := arbmath.BigAdd(fundsDueForRefunds, fundsDueForRewards) - return arbmath.BigSub(haveFunds, needFunds), nil + return ps.GetL1PricingSurplus() } func (con ArbGasInfo) _preversion10_GetL1PricingSurplus(c ctx, evm mech) (*big.Int, error) { diff --git a/precompiles/precompile.go b/precompiles/precompile.go index 2a21ff5f8..6d982506f 100644 --- a/precompiles/precompile.go +++ b/precompiles/precompile.go @@ -4,10 +4,12 @@ package precompiles import ( + "bytes" "errors" "fmt" "math/big" "reflect" + "sort" "strconv" "strings" "unicode" @@ -814,6 +816,9 @@ func (p *Precompile) Get4ByteMethodSignatures() [][4]byte { for sig := range p.methods { ret = append(ret, sig) } + sort.Slice(ret, func(i, j int) bool { + return bytes.Compare(ret[i][:], ret[j][:]) < 0 + }) return ret } diff --git a/relay/relay_stress_test.go b/relay/relay_stress_test.go new file mode 100644 index 000000000..9a8875a42 --- /dev/null +++ b/relay/relay_stress_test.go @@ -0,0 +1,182 @@ +package relay + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/ethereum/go-ethereum/log" + "github.com/offchainlabs/nitro/arbos/arbostypes" + "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/broadcastclient" + "github.com/offchainlabs/nitro/broadcaster" + "github.com/offchainlabs/nitro/broadcaster/message" + "github.com/offchainlabs/nitro/util/stopwaiter" + "github.com/offchainlabs/nitro/wsbroadcastserver" +) + +type DummyUpStream struct { + stopwaiter.StopWaiter + broadcaster *broadcaster.Broadcaster +} + +func NewDummyUpStream(config *Config, feedErrChan chan error) *DummyUpStream { + dataSignerErr := func([]byte) ([]byte, error) { + return nil, errors.New("relay attempted to sign feed message") + } + return &DummyUpStream{ + broadcaster: broadcaster.NewBroadcaster(func() *wsbroadcastserver.BroadcasterConfig { return &config.Node.Feed.Output }, config.Chain.ID, feedErrChan, dataSignerErr), + } +} + +func (r *DummyUpStream) Start(ctx context.Context) error { + r.StopWaiter.Start(ctx, r) + err := r.broadcaster.Initialize() + if err != nil { + return errors.New("broadcast unable to initialize") + } + err = r.broadcaster.Start(ctx) + if err != nil { + return errors.New("broadcast unable to start") + } + return nil +} + +func (r *DummyUpStream) PopulateFeedBacklogByNumber(ctx context.Context, backlogSize, l2MsgSize int) { + was := r.broadcaster.GetCachedMessageCount() + var seqNums []arbutil.MessageIndex + for i := was; i < was+backlogSize; i++ { + seqNums = append(seqNums, arbutil.MessageIndex(i)) + } + + messages := make([]*message.BroadcastFeedMessage, 0, len(seqNums)) + for _, seqNum := range seqNums { + broadcastMessage := &message.BroadcastFeedMessage{ + SequenceNumber: seqNum, + Message: arbostypes.MessageWithMetadata{ + Message: &arbostypes.L1IncomingMessage{ + L2msg: make([]byte, l2MsgSize), + }, + }, + } + messages = append(messages, broadcastMessage) + } + r.broadcaster.BroadcastFeedMessages(messages) + waitForBacklog(r.broadcaster, was, was+backlogSize) +} + +func waitForBacklog(b *broadcaster.Broadcaster, was, target int) { + time.Sleep(time.Second) + prevCount := was + for count := b.GetCachedMessageCount(); count != target; count = b.GetCachedMessageCount() { + if prevCount == count { + log.Warn("unable to populate feed backlog. Cached message count did not increment") + break + } else { + prevCount = count + } + log.Info("populating feed backlog to stress test relay", "current", count, "target", target) + time.Sleep(5 * time.Second) + } +} + +type dummyTxStreamer struct { + id int + logConnection bool +} + +func (ts *dummyTxStreamer) AddBroadcastMessages(feedMessages []*message.BroadcastFeedMessage) error { + // to mimic latency of txstreamer + time.Sleep(50 * time.Millisecond) + if !ts.logConnection { + ts.logConnection = true + log.Info("test client is succesfully receiving messages", "client_Id", ts.id, "msg_size", feedMessages[0].Size()) + } + return nil +} + +func largeBacklogRelayTestImpl(t *testing.T, numClients, backlogSize, l2MsgSize int, connectDeadline time.Duration, upStreamPort, relayPort string) { + // total size of the backlog = backlogSize * (l2MsgSize + 160) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + upStreamConfig := &ConfigDefault + upStreamConfig.Node.Feed.Output.Addr = "127.0.0.1" + upStreamConfig.Node.Feed.Output.Port = upStreamPort + upStreamConfig.Node.Feed.Output.ClientTimeout = 5 * time.Minute + upStream := NewDummyUpStream(upStreamConfig, nil) + err := upStream.Start(ctx) + if err != nil { + t.Fatalf("error starting relay's broadcast client %v", err) + } + defer upStream.StopOnly() + upStream.PopulateFeedBacklogByNumber(ctx, backlogSize, l2MsgSize) + + relayConfig := &ConfigDefault + relayConfig.Node.Feed.Input.URL = []string{"ws://127.0.0.1:" + upStreamPort} + relayConfig.Node.Feed.Output.Addr = "127.0.0.1" + relayConfig.Node.Feed.Output.Port = relayPort + relayConfig.Node.Feed.Output.ClientTimeout = 5 * time.Minute + relay, err := NewRelay(relayConfig, nil) + if err != nil { + t.Fatalf("error initializing relay %v", err) + } + err = relay.Start(ctx) + if err != nil { + t.Fatalf("error starting relay %v", err) + } + defer relay.StopOnly() + waitForBacklog(relay.broadcaster, 0, backlogSize) + + relayURL := "ws://" + relay.GetListenerAddr().String() + clientConfig := broadcastclient.DefaultTestConfig + clientConfig.Timeout = 5 * time.Minute + fatalErrChan := make(chan error, 10) + var streamers []*dummyTxStreamer + for i := 0; i < numClients; i++ { + ts := &dummyTxStreamer{id: i} + streamers = append(streamers, ts) + client, err := broadcastclient.NewBroadcastClient(func() *broadcastclient.Config { return &clientConfig }, relayURL, relayConfig.Chain.ID, 0, ts, nil, fatalErrChan, nil, func(_ int32) {}) + if err != nil { + t.FailNow() + } + client.Start(ctx) + defer client.StopOnly() + } + + // wait for all clients to atleast connect once + connectDeadlineTimer := time.NewTicker(connectDeadline) + defer connectDeadlineTimer.Stop() + select { + case err := <-fatalErrChan: + t.Fatalf("a client received a fatal error %v", err) + case <-connectDeadlineTimer.C: + } + + connected := 0 + for _, ts := range streamers { + if ts.logConnection { + connected++ + } + } + if int32(connected) != int32(numClients) { + t.Fail() + } + log.Info("number of clients connected", "expected", numClients, "got", connected) +} + +func TestRelayLargeBacklog16MB(t *testing.T) { + t.Skip("This test is for manual inspection and would be unreliable in CI even if automated") + largeBacklogRelayTestImpl(t, 150, 100000, 0, 40*time.Second, "9642", "9643") +} + +func TestRelayLargeBacklog50MB(t *testing.T) { + t.Skip("This test is for manual inspection and would be unreliable in CI even if automated") + largeBacklogRelayTestImpl(t, 150, 100000, 340, 40*time.Second, "9644", "9645") +} + +func TestRelayLargeBacklog100MB(t *testing.T) { + t.Skip("This test is for manual inspection and would be unreliable in CI even if automated") + largeBacklogRelayTestImpl(t, 150, 100000, 840, 40*time.Second, "9646", "9647") +} diff --git a/staker/block_validator.go b/staker/block_validator.go index 0c1f72803..f9d6ec7c5 100644 --- a/staker/block_validator.go +++ b/staker/block_validator.go @@ -580,15 +580,21 @@ func (v *BlockValidator) iterativeValidationEntryCreator(ctx context.Context, ig return v.config().ValidationPoll } +func (v *BlockValidator) isMemoryLimitExceeded() bool { + if v.MemoryFreeLimitChecker == nil { + return false + } + exceeded, err := v.MemoryFreeLimitChecker.IsLimitExceeded() + if err != nil { + log.Error("error checking if free-memory limit exceeded using MemoryFreeLimitChecker", "err", err) + } + return exceeded +} + func (v *BlockValidator) sendNextRecordRequests(ctx context.Context) (bool, error) { - if v.MemoryFreeLimitChecker != nil { - exceeded, err := v.MemoryFreeLimitChecker.IsLimitExceeded() - if err != nil { - log.Error("error checking if free-memory limit exceeded using MemoryFreeLimitChecker", "err", err) - } - if exceeded { - return false, nil - } + if v.isMemoryLimitExceeded() { + log.Warn("sendNextRecordRequests: aborting due to running low on memory") + return false, nil } v.reorgMutex.RLock() pos := v.recordSent() @@ -619,14 +625,9 @@ func (v *BlockValidator) sendNextRecordRequests(ctx context.Context) (bool, erro return true, nil } for pos <= recordUntil { - if v.MemoryFreeLimitChecker != nil { - exceeded, err := v.MemoryFreeLimitChecker.IsLimitExceeded() - if err != nil { - log.Error("error checking if free-memory limit exceeded using MemoryFreeLimitChecker", "err", err) - } - if exceeded { - return false, nil - } + if v.isMemoryLimitExceeded() { + log.Warn("sendNextRecordRequests: aborting due to running low on memory") + return false, nil } validationStatus, found := v.validations.Load(pos) if !found { @@ -781,14 +782,9 @@ validationsLoop: log.Trace("advanceValidations: no more room", "pos", pos) return nil, nil } - if v.MemoryFreeLimitChecker != nil { - exceeded, err := v.MemoryFreeLimitChecker.IsLimitExceeded() - if err != nil { - log.Error("error checking if free-memory limit exceeded using MemoryFreeLimitChecker", "err", err) - } - if exceeded { - return nil, nil - } + if v.isMemoryLimitExceeded() { + log.Warn("advanceValidations: aborting due to running low on memory") + return nil, nil } if currentStatus == Prepared { input, err := validationStatus.Entry.ToInput() diff --git a/staker/l1_validator.go b/staker/l1_validator.go index 4e7aa22cb..56389ae80 100644 --- a/staker/l1_validator.go +++ b/staker/l1_validator.go @@ -339,10 +339,14 @@ func (v *L1Validator) generateNodeAction( batchNum = localBatchCount - 1 validatedCount = messageCount } else { - batchNum, err = FindBatchContainingMessageIndex(v.inboxTracker, validatedCount-1, localBatchCount) + var found bool + batchNum, found, err = v.inboxTracker.FindInboxBatchContainingMessage(validatedCount - 1) if err != nil { return nil, false, err } + if !found { + return nil, false, errors.New("batch not found on L1") + } } execResult, err := v.txStreamer.ResultAtCount(validatedCount) if err != nil { diff --git a/staker/stateless_block_validator.go b/staker/stateless_block_validator.go index 3a72ecc2f..5230691ab 100644 --- a/staker/stateless_block_validator.go +++ b/staker/stateless_block_validator.go @@ -58,6 +58,7 @@ type InboxTrackerInterface interface { GetBatchMessageCount(seqNum uint64) (arbutil.MessageIndex, error) GetBatchAcc(seqNum uint64) (common.Hash, error) GetBatchCount() (uint64, error) + FindInboxBatchContainingMessage(pos arbutil.MessageIndex) (uint64, bool, error) } type TransactionStreamerInterface interface { @@ -111,39 +112,6 @@ func GlobalStatePositionsAtCount( return startPos, GlobalStatePosition{batch, posInBatch + 1}, nil } -func FindBatchContainingMessageIndex( - tracker InboxTrackerInterface, pos arbutil.MessageIndex, high uint64, -) (uint64, error) { - var low uint64 - // Iteration preconditions: - // - high >= low - // - msgCount(low - 1) <= pos implies low <= target - // - msgCount(high) > pos implies high >= target - // Therefore, if low == high, then low == high == target - for high > low { - // Due to integer rounding, mid >= low && mid < high - mid := (low + high) / 2 - count, err := tracker.GetBatchMessageCount(mid) - if err != nil { - return 0, err - } - if count < pos { - // Must narrow as mid >= low, therefore mid + 1 > low, therefore newLow > oldLow - // Keeps low precondition as msgCount(mid) < pos - low = mid + 1 - } else if count == pos { - return mid + 1, nil - } else if count == pos+1 || mid == low { // implied: count > pos - return mid, nil - } else { // implied: count > pos + 1 - // Must narrow as mid < high, therefore newHigh < lowHigh - // Keeps high precondition as msgCount(mid) > pos - high = mid - } - } - return low, nil -} - type ValidationEntryStage uint32 const ( @@ -320,7 +288,10 @@ func (v *StatelessBlockValidator) ValidationEntryRecord(ctx context.Context, e * e.Preimages[arbutil.EthVersionedHashPreimageType] = make(map[common.Hash][]byte) } for i, blob := range blobs { - e.Preimages[arbutil.EthVersionedHashPreimageType][versionedHashes[i]] = blob[:] + // Prevent aliasing `blob` when slicing it, as for range loops overwrite the same variable + // Won't be necessary after Go 1.22 with https://go.dev/blog/loopvar-preview + b := blob + e.Preimages[arbutil.EthVersionedHashPreimageType][versionedHashes[i]] = b[:] } } if arbstate.IsDASMessageHeaderByte(batch.Data[40]) { @@ -359,13 +330,12 @@ func (v *StatelessBlockValidator) GlobalStatePositionsAtCount(count arbutil.Mess if count == 1 { return GlobalStatePosition{}, GlobalStatePosition{1, 0}, nil } - batchCount, err := v.inboxTracker.GetBatchCount() + batch, found, err := v.inboxTracker.FindInboxBatchContainingMessage(count - 1) if err != nil { return GlobalStatePosition{}, GlobalStatePosition{}, err } - batch, err := FindBatchContainingMessageIndex(v.inboxTracker, count-1, batchCount) - if err != nil { - return GlobalStatePosition{}, GlobalStatePosition{}, err + if !found { + return GlobalStatePosition{}, GlobalStatePosition{}, errors.New("batch not found on L1 yet") } return GlobalStatePositionsAtCount(v.inboxTracker, count, batch) } diff --git a/system_tests/batch_poster_test.go b/system_tests/batch_poster_test.go index 68dea4167..0fc127d0e 100644 --- a/system_tests/batch_poster_test.go +++ b/system_tests/batch_poster_test.go @@ -80,7 +80,7 @@ func externalSignerTestCfg(addr common.Address) (*dataposter.ExternalSignerCfg, func testBatchPosterParallel(t *testing.T, useRedis bool) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - httpSrv, srv := externalsignertest.NewServer(ctx, t) + httpSrv, srv := externalsignertest.NewServer(t) cp, err := externalsignertest.CertPaths() if err != nil { t.Fatalf("Error getting cert paths: %v", err) diff --git a/system_tests/blocks_reexecutor_test.go b/system_tests/blocks_reexecutor_test.go index 76034570b..c2941ddcc 100644 --- a/system_tests/blocks_reexecutor_test.go +++ b/system_tests/blocks_reexecutor_test.go @@ -26,7 +26,7 @@ func TestBlocksReExecutorModes(t *testing.T) { parentChainID := big.NewInt(1234) feedErrChan := make(chan error, 10) - node, err := arbnode.CreateNode(ctx, stack, execNode, arbDb, NewFetcherFromConfig(arbnode.ConfigDefaultL2Test()), blockchain.Config(), nil, nil, nil, nil, nil, feedErrChan, parentChainID) + node, err := arbnode.CreateNode(ctx, stack, execNode, arbDb, NewFetcherFromConfig(arbnode.ConfigDefaultL2Test()), blockchain.Config(), nil, nil, nil, nil, nil, feedErrChan, parentChainID, nil) Require(t, err) err = node.TxStreamer.AddFakeInitMessage() Require(t, err) diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 71e749410..040574126 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -36,6 +36,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/keystore" + "github.com/ethereum/go-ethereum/arbitrum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core" @@ -60,7 +61,7 @@ import ( "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbos" "github.com/offchainlabs/nitro/arbutil" - _ "github.com/offchainlabs/nitro/nodeInterface" + _ "github.com/offchainlabs/nitro/execution/nodeInterface" "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/solgen/go/mocksgen" "github.com/offchainlabs/nitro/solgen/go/precompilesgen" @@ -187,6 +188,13 @@ func (b *NodeBuilder) DefaultConfig(t *testing.T, withL1 bool) *NodeBuilder { } func (b *NodeBuilder) Build(t *testing.T) func() { + if b.execConfig.RPC.MaxRecreateStateDepth == arbitrum.UninitializedMaxRecreateStateDepth { + if b.execConfig.Caching.Archive { + b.execConfig.RPC.MaxRecreateStateDepth = arbitrum.DefaultArchiveNodeMaxRecreateStateDepth + } else { + b.execConfig.RPC.MaxRecreateStateDepth = arbitrum.DefaultNonArchiveNodeMaxRecreateStateDepth + } + } if b.withL1 { l1, l2 := NewTestClient(b.ctx), NewTestClient(b.ctx) b.L2Info, l2.ConsensusNode, l2.Client, l2.Stack, b.L1Info, l1.L1Backend, l1.Client, l1.Stack = @@ -233,6 +241,13 @@ func (b *NodeBuilder) Build2ndNode(t *testing.T, params *SecondNodeParams) (*Tes if params.execConfig == nil { params.execConfig = b.execConfig } + if params.execConfig.RPC.MaxRecreateStateDepth == arbitrum.UninitializedMaxRecreateStateDepth { + if params.execConfig.Caching.Archive { + params.execConfig.RPC.MaxRecreateStateDepth = arbitrum.DefaultArchiveNodeMaxRecreateStateDepth + } else { + params.execConfig.RPC.MaxRecreateStateDepth = arbitrum.DefaultNonArchiveNodeMaxRecreateStateDepth + } + } l2 := NewTestClient(b.ctx) l2.Client, l2.ConsensusNode = @@ -670,11 +685,13 @@ func DeployOnTestL1( ctx, l1Reader, &l1TransactionOpts, - l1info.GetAddress("Sequencer"), + []common.Address{l1info.GetAddress("Sequencer")}, + l1info.GetAddress("RollupOwner"), 0, arbnode.GenerateRollupConfig(false, locator.LatestWasmModuleRoot(), l1info.GetAddress("RollupOwner"), chainConfig, serializedChainConfig, common.Address{}), nativeToken, maxDataSize, + false, ) Require(t, err) l1info.SetContract("Bridge", addresses.Bridge) @@ -793,7 +810,7 @@ func createTestNodeWithL1( Require(t, err) currentNode, err = arbnode.CreateNode( ctx, l2stack, execNode, l2arbDb, NewFetcherFromConfig(nodeConfig), l2blockchain.Config(), l1client, - addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, dataSigner, fatalErrChan, big.NewInt(1337), + addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, dataSigner, fatalErrChan, big.NewInt(1337), nil, ) Require(t, err) @@ -829,7 +846,7 @@ func createTestNode( execNode, err := gethexec.CreateExecutionNode(ctx, stack, chainDb, blockchain, nil, execConfigFetcher) Require(t, err) - currentNode, err := arbnode.CreateNode(ctx, stack, execNode, arbDb, NewFetcherFromConfig(nodeConfig), blockchain.Config(), nil, nil, nil, nil, nil, feedErrChan, big.NewInt(1337)) + currentNode, err := arbnode.CreateNode(ctx, stack, execNode, arbDb, NewFetcherFromConfig(nodeConfig), blockchain.Config(), nil, nil, nil, nil, nil, feedErrChan, big.NewInt(1337), nil) Require(t, err) // Give the node an init message @@ -934,7 +951,7 @@ func Create2ndNodeWithConfig( currentExec, err := gethexec.CreateExecutionNode(ctx, l2stack, l2chainDb, l2blockchain, l1client, configFetcher) Require(t, err) - currentNode, err := arbnode.CreateNode(ctx, l2stack, currentExec, l2arbDb, NewFetcherFromConfig(nodeConfig), l2blockchain.Config(), l1client, first.DeployInfo, &txOpts, &txOpts, dataSigner, feedErrChan, big.NewInt(1337)) + currentNode, err := arbnode.CreateNode(ctx, l2stack, currentExec, l2arbDb, NewFetcherFromConfig(nodeConfig), l2blockchain.Config(), l1client, first.DeployInfo, &txOpts, &txOpts, dataSigner, feedErrChan, big.NewInt(1337), nil) Require(t, err) err = currentNode.Start(ctx) @@ -1018,6 +1035,10 @@ func setupConfigWithDAS( dasSignerKey, _, err := das.GenerateAndStoreKeys(dbPath) Require(t, err) + dbConfig := das.DefaultLocalDBStorageConfig + dbConfig.Enable = enableDbStorage + dbConfig.DataDir = dbPath + dasConfig := &das.DataAvailabilityConfig{ Enable: enableDas, Key: das.KeyConfig{ @@ -1027,10 +1048,7 @@ func setupConfigWithDAS( Enable: enableFileStorage, DataDir: dbPath, }, - LocalDBStorage: das.LocalDBStorageConfig{ - Enable: enableDbStorage, - DataDir: dbPath, - }, + LocalDBStorage: dbConfig, RequestTimeout: 5 * time.Second, ParentChainNodeURL: "none", SequencerInboxAddress: "none", diff --git a/system_tests/das_test.go b/system_tests/das_test.go index 96de52e19..602c6da5e 100644 --- a/system_tests/das_test.go +++ b/system_tests/das_test.go @@ -141,7 +141,7 @@ func TestDASRekey(t *testing.T) { l1NodeConfigA.DataAvailability.ParentChainNodeURL = "none" execA, err := gethexec.CreateExecutionNode(ctx, l2stackA, l2chainDb, l2blockchain, l1client, gethexec.ConfigDefaultTest) Require(t, err) - nodeA, err := arbnode.CreateNode(ctx, l2stackA, execA, l2arbDb, NewFetcherFromConfig(l1NodeConfigA), l2blockchain.Config(), l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, nil, feedErrChan, parentChainID) + nodeA, err := arbnode.CreateNode(ctx, l2stackA, execA, l2arbDb, NewFetcherFromConfig(l1NodeConfigA), l2blockchain.Config(), l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, nil, feedErrChan, parentChainID, nil) Require(t, err) Require(t, nodeA.Start(ctx)) l2clientA := ClientForStack(t, l2stackA) @@ -188,7 +188,7 @@ func TestDASRekey(t *testing.T) { Require(t, err) l1NodeConfigA.DataAvailability.RPCAggregator = aggConfigForBackend(t, backendConfigB) - nodeA, err := arbnode.CreateNode(ctx, l2stackA, execA, l2arbDb, NewFetcherFromConfig(l1NodeConfigA), l2blockchain.Config(), l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, nil, feedErrChan, parentChainID) + nodeA, err := arbnode.CreateNode(ctx, l2stackA, execA, l2arbDb, NewFetcherFromConfig(l1NodeConfigA), l2blockchain.Config(), l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, nil, feedErrChan, parentChainID, nil) Require(t, err) Require(t, nodeA.Start(ctx)) l2clientA := ClientForStack(t, l2stackA) @@ -253,19 +253,20 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { pubkey, _, err := das.GenerateAndStoreKeys(keyDir) Require(t, err) + dbConfig := das.DefaultLocalDBStorageConfig + dbConfig.Enable = true + dbConfig.DataDir = dbDataDir + serverConfig := das.DataAvailabilityConfig{ Enable: true, - LocalCache: das.TestBigCacheConfig, + LocalCache: das.TestCacheConfig, LocalFileStorage: das.LocalFileStorageConfig{ Enable: true, DataDir: fileDataDir, }, - LocalDBStorage: das.LocalDBStorageConfig{ - Enable: true, - DataDir: dbDataDir, - }, + LocalDBStorage: dbConfig, Key: das.KeyConfig{ KeyDir: keyDir, @@ -321,7 +322,7 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { sequencerTxOpts := l1info.GetDefaultTransactOpts("Sequencer", ctx) sequencerTxOptsPtr := &sequencerTxOpts - nodeA, err := arbnode.CreateNode(ctx, l2stackA, execA, l2arbDb, NewFetcherFromConfig(l1NodeConfigA), l2blockchain.Config(), l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, dataSigner, feedErrChan, big.NewInt(1337)) + nodeA, err := arbnode.CreateNode(ctx, l2stackA, execA, l2arbDb, NewFetcherFromConfig(l1NodeConfigA), l2blockchain.Config(), l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, dataSigner, feedErrChan, big.NewInt(1337), nil) Require(t, err) Require(t, nodeA.Start(ctx)) l2clientA := ClientForStack(t, l2stackA) diff --git a/system_tests/debug_trace_test.go b/system_tests/debug_trace_test.go new file mode 100644 index 000000000..1a83e5ad2 --- /dev/null +++ b/system_tests/debug_trace_test.go @@ -0,0 +1,168 @@ +package arbtest + +import ( + "context" + "encoding/binary" + "encoding/json" + "fmt" + "math/big" + "strings" + "sync" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" +) + +func TestDebugTraceCallForRecentBlock(t *testing.T) { + threads := 32 + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.execConfig.Caching.Archive = true + cleanup := builder.Build(t) + defer cleanup() + builder.L2Info.GenerateAccount("User2") + builder.L2Info.GenerateAccount("User3") + + errors := make(chan error, threads+1) + senderDone := make(chan struct{}) + go func() { + defer close(senderDone) + for ctx.Err() == nil { + tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, new(big.Int).Lsh(big.NewInt(1), 128), nil) + err := builder.L2.Client.SendTransaction(ctx, tx) + if ctx.Err() != nil { + return + } + if err != nil { + errors <- err + return + } + _, err = builder.L2.EnsureTxSucceeded(tx) + if ctx.Err() != nil { + return + } + if err != nil { + errors <- err + return + } + time.Sleep(10 * time.Millisecond) + } + }() + type TransactionArgs struct { + From *common.Address `json:"from"` + To *common.Address `json:"to"` + Gas *hexutil.Uint64 `json:"gas"` + GasPrice *hexutil.Big `json:"gasPrice"` + MaxFeePerGas *hexutil.Big `json:"maxFeePerGas"` + MaxPriorityFeePerGas *hexutil.Big `json:"maxPriorityFeePerGas"` + Value *hexutil.Big `json:"value"` + Nonce *hexutil.Uint64 `json:"nonce"` + SkipL1Charging *bool `json:"skipL1Charging"` + Data *hexutil.Bytes `json:"data"` + Input *hexutil.Bytes `json:"input"` + AccessList *types.AccessList `json:"accessList,omitempty"` + ChainID *hexutil.Big `json:"chainId,omitempty"` + } + rpcClient := builder.L2.ConsensusNode.Stack.Attach() + sometx := builder.L2Info.PrepareTx("User2", "User3", builder.L2Info.TransferGas, common.Big1, nil) + from := builder.L2Info.GetAddress("User2") + to := sometx.To() + gas := sometx.Gas() + maxFeePerGas := sometx.GasFeeCap() + value := sometx.Value() + nonce := sometx.Nonce() + data := sometx.Data() + txargs := TransactionArgs{ + From: &from, + To: to, + Gas: (*hexutil.Uint64)(&gas), + MaxFeePerGas: (*hexutil.Big)(maxFeePerGas), + Value: (*hexutil.Big)(value), + Nonce: (*hexutil.Uint64)(&nonce), + Data: (*hexutil.Bytes)(&data), + } + db := builder.L2.ExecNode.Backend.ChainDb() + + i := 1 + var mtx sync.RWMutex + var wgTrace sync.WaitGroup + for j := 0; j < threads && ctx.Err() == nil; j++ { + wgTrace.Add(1) + go func() { + defer wgTrace.Done() + mtx.RLock() + blockNumber := i + mtx.RUnlock() + for blockNumber < 300 && ctx.Err() == nil { + var err error + prefix := make([]byte, 8) + binary.BigEndian.PutUint64(prefix, uint64(blockNumber)) + prefix = append([]byte("b"), prefix...) + it := db.NewIterator(prefix, nil) + defer it.Release() + if it.Next() { + key := it.Key() + if len(key) != len(prefix)+common.HashLength { + Fatal(t, "Wrong key length, have:", len(key), "want:", len(prefix)+common.HashLength) + } + blockHash := common.BytesToHash(key[len(prefix):]) + start := time.Now() + for ctx.Err() == nil { + var res json.RawMessage + err = rpcClient.CallContext(ctx, &res, "debug_traceCall", txargs, blockHash, nil) + if err == nil { + mtx.Lock() + if blockNumber == i { + i++ + } + mtx.Unlock() + break + } + if ctx.Err() != nil { + return + } + if !strings.Contains(err.Error(), "not currently canonical") && !strings.Contains(err.Error(), "not found") || strings.Contains(err.Error(), "missing trie node") { + errors <- err + return + } + if time.Since(start) > 5*time.Second { + errors <- fmt.Errorf("timeout - failed to trace call for more then 5 seconds, block: %d, err: %w", blockNumber, err) + return + } + } + } + it.Release() + mtx.RLock() + blockNumber = i + mtx.RUnlock() + } + }() + } + traceDone := make(chan struct{}) + go func() { + wgTrace.Wait() + close(traceDone) + }() + + select { + case <-traceDone: + cancel() + case <-senderDone: + cancel() + case err := <-errors: + t.Error(err) + cancel() + } + <-traceDone + <-senderDone + close(errors) + for err := range errors { + if err != nil { + t.Error(err) + } + } +} diff --git a/system_tests/fees_test.go b/system_tests/fees_test.go index e18517740..4d8fbf43f 100644 --- a/system_tests/fees_test.go +++ b/system_tests/fees_test.go @@ -119,8 +119,6 @@ func TestSequencerFeePaid(t *testing.T) { } func testSequencerPriceAdjustsFrom(t *testing.T, initialEstimate uint64) { - t.Parallel() - _ = os.Mkdir("test-data", 0766) path := filepath.Join("test-data", fmt.Sprintf("testSequencerPriceAdjustsFrom%v.csv", initialEstimate)) diff --git a/system_tests/full_challenge_impl_test.go b/system_tests/full_challenge_impl_test.go index b8f891e3e..03b6d690f 100644 --- a/system_tests/full_challenge_impl_test.go +++ b/system_tests/full_challenge_impl_test.go @@ -1,10 +1,6 @@ // Copyright 2021-2022, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE -// race detection makes things slow and miss timeouts -//go:build !race -// +build !race - package arbtest import ( @@ -29,14 +25,17 @@ import ( "github.com/offchainlabs/nitro/arbcompress" "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbos" + "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/cmd/chaininfo" "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/solgen/go/challengegen" "github.com/offchainlabs/nitro/solgen/go/mocksgen" "github.com/offchainlabs/nitro/solgen/go/ospgen" "github.com/offchainlabs/nitro/solgen/go/yulgen" "github.com/offchainlabs/nitro/staker" + "github.com/offchainlabs/nitro/util/signature" "github.com/offchainlabs/nitro/validator" "github.com/offchainlabs/nitro/validator/server_common" "github.com/offchainlabs/nitro/validator/valnode" @@ -165,7 +164,7 @@ func makeBatch(t *testing.T, l2Node *arbnode.Node, l2Info *BlockchainTestInfo, b seqNum := new(big.Int).Lsh(common.Big1, 256) seqNum.Sub(seqNum, common.Big1) - tx, err := seqInbox.AddSequencerL2BatchFromOrigin0(sequencer, seqNum, message, big.NewInt(1), common.Address{}, big.NewInt(0), big.NewInt(0)) + tx, err := seqInbox.AddSequencerL2BatchFromOrigin8f111f3c(sequencer, seqNum, message, big.NewInt(1), common.Address{}, big.NewInt(0), big.NewInt(0)) Require(t, err) receipt, err := EnsureTxSucceeded(ctx, backend, tx) Require(t, err) @@ -184,6 +183,7 @@ func makeBatch(t *testing.T, l2Node *arbnode.Node, l2Info *BlockchainTestInfo, b } func confirmLatestBlock(ctx context.Context, t *testing.T, l1Info *BlockchainTestInfo, backend arbutil.L1Interface) { + t.Helper() // With SimulatedBeacon running in on-demand block production mode, the // finalized block is considered to be be the nearest multiple of 32 less // than or equal to the block number. @@ -205,10 +205,10 @@ func setupSequencerInboxStub(ctx context.Context, t *testing.T, l1Info *Blockcha _, err = EnsureTxSucceeded(ctx, l1Client, tx) Require(t, err) timeBounds := mocksgen.ISequencerInboxMaxTimeVariation{ - DelayBlocks: 10000, - FutureBlocks: 10000, - DelaySeconds: 10000, - FutureSeconds: 10000, + DelayBlocks: big.NewInt(10000), + FutureBlocks: big.NewInt(10000), + DelaySeconds: big.NewInt(10000), + FutureSeconds: big.NewInt(10000), } seqInboxAddr, tx, seqInbox, err := mocksgen.DeploySequencerInboxStub( &txOpts, @@ -218,6 +218,7 @@ func setupSequencerInboxStub(ctx context.Context, t *testing.T, l1Info *Blockcha timeBounds, big.NewInt(117964), reader4844, + false, ) Require(t, err) _, err = EnsureTxSucceeded(ctx, l1Client, tx) @@ -237,6 +238,16 @@ func setupSequencerInboxStub(ctx context.Context, t *testing.T, l1Info *Blockcha return bridgeAddr, seqInbox, seqInboxAddr } +func createL2Nodes(t *testing.T, ctx context.Context, conf *arbnode.Config, chainConfig *params.ChainConfig, l1Client arbutil.L1Interface, l2info *BlockchainTestInfo, rollupAddresses *chaininfo.RollupAddresses, initMsg *arbostypes.ParsedInitMessage, txOpts *bind.TransactOpts, signer signature.DataSignerFunc, fatalErrChan chan error) (*arbnode.Node, *gethexec.ExecutionNode) { + _, stack, l2ChainDb, l2ArbDb, l2Blockchain := createL2BlockChainWithStackConfig(t, l2info, "", chainConfig, initMsg, nil, nil) + execNode, err := gethexec.CreateExecutionNode(ctx, stack, l2ChainDb, l2Blockchain, l1Client, gethexec.ConfigDefaultTest) + Require(t, err) + consensusNode, err := arbnode.CreateNode(ctx, stack, execNode, l2ArbDb, NewFetcherFromConfig(conf), chainConfig, l1Client, rollupAddresses, txOpts, txOpts, signer, fatalErrChan, big.NewInt(1337), nil) + Require(t, err) + + return consensusNode, execNode +} + func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, challengeMsgIdx int64) { glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false))) glogger.Verbosity(log.LvlInfo) @@ -279,25 +290,18 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall asserterBridgeAddr, asserterSeqInbox, asserterSeqInboxAddr := setupSequencerInboxStub(ctx, t, l1Info, l1Backend, chainConfig) challengerBridgeAddr, challengerSeqInbox, challengerSeqInboxAddr := setupSequencerInboxStub(ctx, t, l1Info, l1Backend, chainConfig) - asserterL2Info, asserterL2Stack, asserterL2ChainDb, asserterL2ArbDb, asserterL2Blockchain := createL2BlockChainWithStackConfig(t, nil, "", chainConfig, initMessage, nil, nil) asserterRollupAddresses.Bridge = asserterBridgeAddr asserterRollupAddresses.SequencerInbox = asserterSeqInboxAddr - asserterExec, err := gethexec.CreateExecutionNode(ctx, asserterL2Stack, asserterL2ChainDb, asserterL2Blockchain, l1Backend, gethexec.ConfigDefaultTest) - Require(t, err) - parentChainID := big.NewInt(1337) - asserterL2, err := arbnode.CreateNode(ctx, asserterL2Stack, asserterExec, asserterL2ArbDb, NewFetcherFromConfig(conf), chainConfig, l1Backend, asserterRollupAddresses, nil, nil, nil, fatalErrChan, parentChainID) - Require(t, err) - err = asserterL2.Start(ctx) + asserterL2Info := NewArbTestInfo(t, chainConfig.ChainID) + asserterL2, asserterExec := createL2Nodes(t, ctx, conf, chainConfig, l1Backend, asserterL2Info, asserterRollupAddresses, initMessage, nil, nil, fatalErrChan) + err := asserterL2.Start(ctx) Require(t, err) - challengerL2Info, challengerL2Stack, challengerL2ChainDb, challengerL2ArbDb, challengerL2Blockchain := createL2BlockChainWithStackConfig(t, nil, "", chainConfig, initMessage, nil, nil) challengerRollupAddresses := *asserterRollupAddresses challengerRollupAddresses.Bridge = challengerBridgeAddr challengerRollupAddresses.SequencerInbox = challengerSeqInboxAddr - challengerExec, err := gethexec.CreateExecutionNode(ctx, challengerL2Stack, challengerL2ChainDb, challengerL2Blockchain, l1Backend, gethexec.ConfigDefaultTest) - Require(t, err) - challengerL2, err := arbnode.CreateNode(ctx, challengerL2Stack, challengerExec, challengerL2ArbDb, NewFetcherFromConfig(conf), chainConfig, l1Backend, &challengerRollupAddresses, nil, nil, nil, fatalErrChan, parentChainID) - Require(t, err) + challengerL2Info := NewArbTestInfo(t, chainConfig.ChainID) + challengerL2, challengerExec := createL2Nodes(t, ctx, conf, chainConfig, l1Backend, challengerL2Info, &challengerRollupAddresses, initMessage, nil, nil, fatalErrChan) err = challengerL2.Start(ctx) Require(t, err) @@ -385,7 +389,7 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall confirmLatestBlock(ctx, t, l1Info, l1Backend) - asserterValidator, err := staker.NewStatelessBlockValidator(asserterL2.InboxReader, asserterL2.InboxTracker, asserterL2.TxStreamer, asserterExec.Recorder, asserterL2ArbDb, nil, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) + asserterValidator, err := staker.NewStatelessBlockValidator(asserterL2.InboxReader, asserterL2.InboxTracker, asserterL2.TxStreamer, asserterExec.Recorder, asserterL2.ArbDB, nil, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) if err != nil { Fatal(t, err) } @@ -402,7 +406,7 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall if err != nil { Fatal(t, err) } - challengerValidator, err := staker.NewStatelessBlockValidator(challengerL2.InboxReader, challengerL2.InboxTracker, challengerL2.TxStreamer, challengerExec.Recorder, challengerL2ArbDb, nil, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) + challengerValidator, err := staker.NewStatelessBlockValidator(challengerL2.InboxReader, challengerL2.InboxTracker, challengerL2.TxStreamer, challengerExec.Recorder, challengerL2.ArbDB, nil, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) if err != nil { Fatal(t, err) } @@ -490,17 +494,3 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall Fatal(t, "challenge timed out without winner") } - -func TestMockChallengeManagerAsserterIncorrect(t *testing.T) { - t.Parallel() - for i := int64(1); i <= makeBatch_MsgsPerBatch*3; i++ { - RunChallengeTest(t, false, true, i) - } -} - -func TestMockChallengeManagerAsserterCorrect(t *testing.T) { - t.Parallel() - for i := int64(1); i <= makeBatch_MsgsPerBatch*3; i++ { - RunChallengeTest(t, true, true, i) - } -} diff --git a/system_tests/full_challenge_mock_test.go b/system_tests/full_challenge_mock_test.go new file mode 100644 index 000000000..d32c2b40a --- /dev/null +++ b/system_tests/full_challenge_mock_test.go @@ -0,0 +1,21 @@ +// race detection makes things slow and miss timeouts +//go:build !race +// +build !race + +package arbtest + +import "testing" + +func TestMockChallengeManagerAsserterIncorrect(t *testing.T) { + t.Parallel() + for i := int64(1); i <= makeBatch_MsgsPerBatch*3; i++ { + RunChallengeTest(t, false, true, i) + } +} + +func TestMockChallengeManagerAsserterCorrect(t *testing.T) { + t.Parallel() + for i := int64(1); i <= makeBatch_MsgsPerBatch*3; i++ { + RunChallengeTest(t, true, true, i) + } +} diff --git a/system_tests/meaningless_reorg_test.go b/system_tests/meaningless_reorg_test.go index e1715dc63..27ed8572c 100644 --- a/system_tests/meaningless_reorg_test.go +++ b/system_tests/meaningless_reorg_test.go @@ -27,7 +27,7 @@ func TestMeaninglessBatchReorg(t *testing.T) { Require(t, err) seqOpts := builder.L1Info.GetDefaultTransactOpts("Sequencer", ctx) - tx, err := seqInbox.AddSequencerL2BatchFromOrigin(&seqOpts, big.NewInt(1), nil, big.NewInt(1), common.Address{}) + tx, err := seqInbox.AddSequencerL2BatchFromOrigin8f111f3c(&seqOpts, big.NewInt(1), nil, big.NewInt(1), common.Address{}, common.Big0, common.Big0) Require(t, err) batchReceipt, err := builder.L1.EnsureTxSucceeded(tx) Require(t, err) @@ -69,7 +69,7 @@ func TestMeaninglessBatchReorg(t *testing.T) { // Produce a new l1Block so that the batch ends up in a different l1Block than before builder.L1.TransferBalance(t, "User", "User", common.Big1, builder.L1Info) - tx, err = seqInbox.AddSequencerL2BatchFromOrigin(&seqOpts, big.NewInt(1), nil, big.NewInt(1), common.Address{}) + tx, err = seqInbox.AddSequencerL2BatchFromOrigin8f111f3c(&seqOpts, big.NewInt(1), nil, big.NewInt(1), common.Address{}, common.Big0, common.Big0) Require(t, err) newBatchReceipt, err := builder.L1.EnsureTxSucceeded(tx) Require(t, err) diff --git a/system_tests/nodeinterface_test.go b/system_tests/nodeinterface_test.go index 3424a58e9..b692af6e3 100644 --- a/system_tests/nodeinterface_test.go +++ b/system_tests/nodeinterface_test.go @@ -1,6 +1,10 @@ // Copyright 2021-2022, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE +// race detection makes things slow and miss timeouts +//go:build !race +// +build !race + package arbtest import ( @@ -11,10 +15,82 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" + "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbos/util" "github.com/offchainlabs/nitro/solgen/go/node_interfacegen" ) +func TestFindBatch(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + l1Info := NewL1TestInfo(t) + initialBalance := new(big.Int).Lsh(big.NewInt(1), 200) + l1Info.GenerateGenesisAccount("deployer", initialBalance) + l1Info.GenerateGenesisAccount("asserter", initialBalance) + l1Info.GenerateGenesisAccount("challenger", initialBalance) + l1Info.GenerateGenesisAccount("sequencer", initialBalance) + + l1Info, l1Backend, _, _ := createTestL1BlockChain(t, l1Info) + conf := arbnode.ConfigDefaultL1Test() + conf.BlockValidator.Enable = false + conf.BatchPoster.Enable = false + + chainConfig := params.ArbitrumDevTestChainConfig() + fatalErrChan := make(chan error, 10) + rollupAddresses, initMsg := DeployOnTestL1(t, ctx, l1Info, l1Backend, chainConfig) + + bridgeAddr, seqInbox, seqInboxAddr := setupSequencerInboxStub(ctx, t, l1Info, l1Backend, chainConfig) + + callOpts := bind.CallOpts{Context: ctx} + + rollupAddresses.Bridge = bridgeAddr + rollupAddresses.SequencerInbox = seqInboxAddr + l2Info := NewArbTestInfo(t, chainConfig.ChainID) + consensus, _ := createL2Nodes(t, ctx, conf, chainConfig, l1Backend, l2Info, rollupAddresses, initMsg, nil, nil, fatalErrChan) + err := consensus.Start(ctx) + Require(t, err) + + l2Client := ClientForStack(t, consensus.Stack) + nodeInterface, err := node_interfacegen.NewNodeInterface(types.NodeInterfaceAddress, l2Client) + Require(t, err) + sequencerTxOpts := l1Info.GetDefaultTransactOpts("sequencer", ctx) + + l2Info.GenerateAccount("Destination") + makeBatch(t, consensus, l2Info, l1Backend, &sequencerTxOpts, seqInbox, seqInboxAddr, -1) + makeBatch(t, consensus, l2Info, l1Backend, &sequencerTxOpts, seqInbox, seqInboxAddr, -1) + makeBatch(t, consensus, l2Info, l1Backend, &sequencerTxOpts, seqInbox, seqInboxAddr, -1) + + for blockNum := uint64(0); blockNum < uint64(makeBatch_MsgsPerBatch)*3; blockNum++ { + gotBatchNum, err := nodeInterface.FindBatchContainingBlock(&callOpts, blockNum) + Require(t, err) + expBatchNum := uint64(0) + if blockNum > 0 { + expBatchNum = 1 + (blockNum-1)/uint64(makeBatch_MsgsPerBatch) + } + if expBatchNum != gotBatchNum { + Fatal(t, "wrong result from findBatchContainingBlock. blocknum ", blockNum, " expected ", expBatchNum, " got ", gotBatchNum) + } + batchL1Block, err := consensus.InboxTracker.GetBatchParentChainBlock(gotBatchNum) + Require(t, err) + blockHeader, err := l2Client.HeaderByNumber(ctx, new(big.Int).SetUint64(blockNum)) + Require(t, err) + blockHash := blockHeader.Hash() + + minCurrentL1Block, err := l1Backend.BlockNumber(ctx) + Require(t, err) + gotConfirmations, err := nodeInterface.GetL1Confirmations(&callOpts, blockHash) + Require(t, err) + maxCurrentL1Block, err := l1Backend.BlockNumber(ctx) + Require(t, err) + + if gotConfirmations > (maxCurrentL1Block-batchL1Block) || gotConfirmations < (minCurrentL1Block-batchL1Block) { + Fatal(t, "wrong number of confirmations. got ", gotConfirmations) + } + } +} + func TestL2BlockRangeForL1(t *testing.T) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) diff --git a/system_tests/pendingblock_test.go b/system_tests/pendingblock_test.go new file mode 100644 index 000000000..dc21bca52 --- /dev/null +++ b/system_tests/pendingblock_test.go @@ -0,0 +1,53 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package arbtest + +import ( + "context" + "testing" + "time" + + "github.com/offchainlabs/nitro/solgen/go/mocksgen" +) + +func TestPendingBlockTimeAndNumberAdvance(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + cleanup := builder.Build(t) + defer cleanup() + + auth := builder.L2Info.GetDefaultTransactOpts("Faucet", ctx) + + _, _, testTimeAndNr, err := mocksgen.DeployPendingBlkTimeAndNrAdvanceCheck(&auth, builder.L2.Client) + Require(t, err) + + time.Sleep(1 * time.Second) + + _, err = testTimeAndNr.IsAdvancing(&auth) + Require(t, err) +} + +func TestPendingBlockArbBlockHashReturnsLatest(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + cleanup := builder.Build(t) + defer cleanup() + + auth := builder.L2Info.GetDefaultTransactOpts("Faucet", ctx) + + _, _, pendingBlk, err := mocksgen.DeployPendingBlkTimeAndNrAdvanceCheck(&auth, builder.L2.Client) + Require(t, err) + + header, err := builder.L2.Client.HeaderByNumber(ctx, nil) + Require(t, err) + + _, err = pendingBlk.CheckArbBlockHashReturnsLatest(&auth, header.Hash()) + Require(t, err) +} diff --git a/system_tests/recreatestate_rpc_test.go b/system_tests/recreatestate_rpc_test.go index f5bdca097..777ed1796 100644 --- a/system_tests/recreatestate_rpc_test.go +++ b/system_tests/recreatestate_rpc_test.go @@ -2,31 +2,30 @@ package arbtest import ( "context" + "encoding/binary" "errors" + "fmt" "math/big" "strings" + "sync" "testing" + "time" "github.com/ethereum/go-ethereum/arbitrum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/trie" "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/util" ) -func prepareNodeWithHistory(t *testing.T, ctx context.Context, execConfig *gethexec.Config, txCount uint64) (node *arbnode.Node, executionNode *gethexec.ExecutionNode, l2client *ethclient.Client, cancel func()) { - t.Helper() - builder := NewNodeBuilder(ctx).DefaultConfig(t, true) - builder.execConfig = execConfig - cleanup := builder.Build(t) - builder.L2Info.GenerateAccount("User2") +func makeSomeTransfers(t *testing.T, ctx context.Context, builder *NodeBuilder, txCount uint64) { var txs []*types.Transaction for i := uint64(0); i < txCount; i++ { tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, common.Big1, nil) @@ -38,8 +37,16 @@ func prepareNodeWithHistory(t *testing.T, ctx context.Context, execConfig *gethe _, err := builder.L2.EnsureTxSucceeded(tx) Require(t, err) } +} - return builder.L2.ConsensusNode, builder.L2.ExecNode, builder.L2.Client, cleanup +func prepareNodeWithHistory(t *testing.T, ctx context.Context, execConfig *gethexec.Config, txCount uint64) (*NodeBuilder, func()) { + t.Helper() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.execConfig = execConfig + cleanup := builder.Build(t) + builder.L2Info.GenerateAccount("User2") + makeSomeTransfers(t, ctx, builder, txCount) + return builder, cleanup } func fillHeaderCache(t *testing.T, bc *core.BlockChain, from, to uint64) { @@ -89,17 +96,19 @@ func removeStatesFromDb(t *testing.T, bc *core.BlockChain, db ethdb.Database, fr func TestRecreateStateForRPCNoDepthLimit(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - nodeConfig := gethexec.ConfigDefaultTest() - nodeConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth - nodeConfig.Sequencer.MaxBlockSpeed = 0 - nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 - nodeConfig.Caching.Archive = true + execConfig := gethexec.ConfigDefaultTest() + execConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth + execConfig.Sequencer.MaxBlockSpeed = 0 + execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + execConfig.Caching.Archive = true + execConfig.Caching.SnapshotCache = 0 // disable snapshots // disable trie/Database.cleans cache, so as states removed from ChainDb won't be cached there - nodeConfig.Caching.TrieCleanCache = 0 - nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 - nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 - _, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, 32) + execConfig.Caching.TrieCleanCache = 0 + execConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 + execConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 + builder, cancelNode := prepareNodeWithHistory(t, ctx, execConfig, 32) defer cancelNode() + execNode, l2client := builder.L2.ExecNode, builder.L2.Client bc := execNode.Backend.ArbInterface().BlockChain() db := execNode.Backend.ChainDb() @@ -123,17 +132,18 @@ func TestRecreateStateForRPCBigEnoughDepthLimit(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() depthGasLimit := int64(256 * util.NormalizeL2GasForL1GasInitial(800_000, params.GWei)) - nodeConfig := gethexec.ConfigDefaultTest() - nodeConfig.RPC.MaxRecreateStateDepth = depthGasLimit - nodeConfig.Sequencer.MaxBlockSpeed = 0 - nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 - nodeConfig.Caching.Archive = true + execConfig := gethexec.ConfigDefaultTest() + execConfig.RPC.MaxRecreateStateDepth = depthGasLimit + execConfig.Sequencer.MaxBlockSpeed = 0 + execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + execConfig.Caching.Archive = true // disable trie/Database.cleans cache, so as states removed from ChainDb won't be cached there - nodeConfig.Caching.TrieCleanCache = 0 - nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 - nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 - _, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, 32) + execConfig.Caching.TrieCleanCache = 0 + execConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 + execConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 + builder, cancelNode := prepareNodeWithHistory(t, ctx, execConfig, 32) defer cancelNode() + execNode, l2client := builder.L2.ExecNode, builder.L2.Client bc := execNode.Backend.ArbInterface().BlockChain() db := execNode.Backend.ChainDb() @@ -157,17 +167,18 @@ func TestRecreateStateForRPCBigEnoughDepthLimit(t *testing.T) { func TestRecreateStateForRPCDepthLimitExceeded(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - nodeConfig := gethexec.ConfigDefaultTest() - nodeConfig.RPC.MaxRecreateStateDepth = int64(200) - nodeConfig.Sequencer.MaxBlockSpeed = 0 - nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 - nodeConfig.Caching.Archive = true + execConfig := gethexec.ConfigDefaultTest() + execConfig.RPC.MaxRecreateStateDepth = int64(200) + execConfig.Sequencer.MaxBlockSpeed = 0 + execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + execConfig.Caching.Archive = true // disable trie/Database.cleans cache, so as states removed from ChainDb won't be cached there - nodeConfig.Caching.TrieCleanCache = 0 - nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 - nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 - _, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, 32) + execConfig.Caching.TrieCleanCache = 0 + execConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 + execConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 + builder, cancelNode := prepareNodeWithHistory(t, ctx, execConfig, 32) defer cancelNode() + execNode, l2client := builder.L2.ExecNode, builder.L2.Client bc := execNode.Backend.ArbInterface().BlockChain() db := execNode.Backend.ChainDb() @@ -191,17 +202,18 @@ func TestRecreateStateForRPCMissingBlockParent(t *testing.T) { var headerCacheLimit uint64 = 512 ctx, cancel := context.WithCancel(context.Background()) defer cancel() - nodeConfig := gethexec.ConfigDefaultTest() - nodeConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth - nodeConfig.Sequencer.MaxBlockSpeed = 0 - nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 - nodeConfig.Caching.Archive = true + execConfig := gethexec.ConfigDefaultTest() + execConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth + execConfig.Sequencer.MaxBlockSpeed = 0 + execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + execConfig.Caching.Archive = true // disable trie/Database.cleans cache, so as states removed from ChainDb won't be cached there - nodeConfig.Caching.TrieCleanCache = 0 - nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 - nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 - _, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, headerCacheLimit+5) + execConfig.Caching.TrieCleanCache = 0 + execConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 + execConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 + builder, cancelNode := prepareNodeWithHistory(t, ctx, execConfig, headerCacheLimit+5) defer cancelNode() + execNode, l2client := builder.L2.ExecNode, builder.L2.Client bc := execNode.Backend.ArbInterface().BlockChain() db := execNode.Backend.ChainDb() @@ -236,16 +248,17 @@ func TestRecreateStateForRPCBeyondGenesis(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - nodeConfig := gethexec.ConfigDefaultTest() - nodeConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth - nodeConfig.Sequencer.MaxBlockSpeed = 0 - nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 - nodeConfig.Caching.Archive = true + execConfig := gethexec.ConfigDefaultTest() + execConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth + execConfig.Sequencer.MaxBlockSpeed = 0 + execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + execConfig.Caching.Archive = true // disable trie/Database.cleans cache, so as states removed from ChainDb won't be cached there - nodeConfig.Caching.TrieCleanCache = 0 - nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 - nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 - _, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, 32) + execConfig.Caching.TrieCleanCache = 0 + execConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 + execConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 + builder, cancelNode := prepareNodeWithHistory(t, ctx, execConfig, 32) + execNode, l2client := builder.L2.ExecNode, builder.L2.Client defer cancelNode() bc := execNode.Backend.ArbInterface().BlockChain() db := execNode.Backend.ChainDb() @@ -271,17 +284,18 @@ func TestRecreateStateForRPCBlockNotFoundWhileRecreating(t *testing.T) { var blockCacheLimit uint64 = 256 ctx, cancel := context.WithCancel(context.Background()) defer cancel() - nodeConfig := gethexec.ConfigDefaultTest() - nodeConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth - nodeConfig.Sequencer.MaxBlockSpeed = 0 - nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 - nodeConfig.Caching.Archive = true + execConfig := gethexec.ConfigDefaultTest() + execConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth + execConfig.Sequencer.MaxBlockSpeed = 0 + execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + execConfig.Caching.Archive = true // disable trie/Database.cleans cache, so as states removed from ChainDb won't be cached there - nodeConfig.Caching.TrieCleanCache = 0 + execConfig.Caching.TrieCleanCache = 0 - nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 - nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 - _, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, blockCacheLimit+4) + execConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 + execConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 + builder, cancelNode := prepareNodeWithHistory(t, ctx, execConfig, blockCacheLimit+4) + execNode, l2client := builder.L2.ExecNode, builder.L2.Client defer cancelNode() bc := execNode.Backend.ArbInterface().BlockChain() db := execNode.Backend.ChainDb() @@ -306,7 +320,7 @@ func TestRecreateStateForRPCBlockNotFoundWhileRecreating(t *testing.T) { hash := rawdb.ReadCanonicalHash(db, lastBlock) Fatal(t, "Didn't fail to get balance at block:", lastBlock, " with hash:", hash, ", lastBlock:", lastBlock) } - if !strings.Contains(err.Error(), "block not found while recreating") { + if !strings.Contains(err.Error(), fmt.Sprintf("block #%d not found", blockBodyToRemove)) { Fatal(t, "Failed with unexpected error: \"", err, "\", at block:", lastBlock, "lastBlock:", lastBlock) } } @@ -335,7 +349,7 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig Require(t, err) parentChainID := big.NewInt(1337) - node, err := arbnode.CreateNode(ctx1, stack, execNode, arbDb, NewFetcherFromConfig(arbnode.ConfigDefaultL2Test()), blockchain.Config(), nil, nil, nil, nil, nil, feedErrChan, parentChainID) + node, err := arbnode.CreateNode(ctx1, stack, execNode, arbDb, NewFetcherFromConfig(arbnode.ConfigDefaultL2Test()), blockchain.Config(), nil, nil, nil, nil, nil, feedErrChan, parentChainID, nil) Require(t, err) err = node.TxStreamer.AddFakeInitMessage() Require(t, err) @@ -358,9 +372,13 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig Fatal(t, "internal test error - tx got included in unexpected block number, have:", have, "want:", want) } } + bc := execNode.Backend.ArbInterface().BlockChain() genesis := uint64(0) - lastBlock, err := client.BlockNumber(ctx) - Require(t, err) + currentHeader := bc.CurrentBlock() + if currentHeader == nil { + Fatal(t, "missing current block") + } + lastBlock := currentHeader.Number.Uint64() if want := genesis + uint64(txCount); lastBlock < want { Fatal(t, "internal test error - not enough blocks produced during preparation, want:", want, "have:", lastBlock) } @@ -376,12 +394,12 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig execNode, err = gethexec.CreateExecutionNode(ctx1, stack, chainDb, blockchain, nil, execConfigFetcher) Require(t, err) - node, err = arbnode.CreateNode(ctx, stack, execNode, arbDb, NewFetcherFromConfig(arbnode.ConfigDefaultL2Test()), blockchain.Config(), nil, node.DeployInfo, nil, nil, nil, feedErrChan, parentChainID) + node, err = arbnode.CreateNode(ctx, stack, execNode, arbDb, NewFetcherFromConfig(arbnode.ConfigDefaultL2Test()), blockchain.Config(), nil, node.DeployInfo, nil, nil, nil, feedErrChan, parentChainID, nil) Require(t, err) Require(t, node.Start(ctx)) client = ClientForStack(t, stack) defer node.StopAndWait() - bc := execNode.Backend.ArbInterface().BlockChain() + bc = execNode.Backend.ArbInterface().BlockChain() gas := skipGas blocks := skipBlocks for i := genesis + 1; i <= genesis+uint64(txCount); i++ { @@ -391,8 +409,8 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig continue } gas += block.GasUsed() - blocks++ _, err := bc.StateAt(block.Root()) + blocks++ if (skipBlocks == 0 && skipGas == 0) || (skipBlocks != 0 && blocks > skipBlocks) || (skipGas != 0 && gas > skipGas) { if err != nil { t.Log("blocks:", blocks, "skipBlocks:", skipBlocks, "gas:", gas, "skipGas:", skipGas) @@ -401,13 +419,17 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig gas = 0 blocks = 0 } else { + if int(i) >= int(lastBlock)-int(cacheConfig.BlockCount) { + // skipping nonexistence check - the state might have been saved on node shutdown + continue + } if err == nil { t.Log("blocks:", blocks, "skipBlocks:", skipBlocks, "gas:", gas, "skipGas:", skipGas) Fatal(t, "state shouldn't be available, root:", block.Root(), "blockNumber:", i, "blockHash", block.Hash()) } expectedErr := &trie.MissingNodeError{} if !errors.As(err, &expectedErr) { - Fatal(t, "getting state failed with unexpected error, root:", block.Root(), "blockNumber:", i, "blockHash", block.Hash()) + Fatal(t, "getting state failed with unexpected error, root:", block.Root(), "blockNumber:", i, "blockHash:", block.Hash(), "err:", err) } } } @@ -429,7 +451,10 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig func TestSkippingSavingStateAndRecreatingAfterRestart(t *testing.T) { cacheConfig := gethexec.DefaultCachingConfig cacheConfig.Archive = true - //// test defaults + cacheConfig.SnapshotCache = 0 // disable snapshots + cacheConfig.BlockAge = 0 // use only Caching.BlockCount to keep only last N blocks in dirties cache, no matter how new they are + + // test defaults testSkippingSavingStateAndRecreatingAfterRestart(t, &cacheConfig, 512) cacheConfig.MaxNumberOfBlocksToSkipStateSaving = 127 @@ -444,8 +469,10 @@ func TestSkippingSavingStateAndRecreatingAfterRestart(t *testing.T) { cacheConfig.MaxAmountOfGasToSkipStateSaving = 15 * 1000 * 1000 testSkippingSavingStateAndRecreatingAfterRestart(t, &cacheConfig, 512) - // one test block ~ 925000 gas - testBlockGas := uint64(925000) + // lower number of blocks in triegc below 100 blocks, to be able to check for nonexistence in testSkippingSavingStateAndRecreatingAfterRestart (it doesn't check last BlockCount blocks as some of them may be persisted on node shutdown) + cacheConfig.BlockCount = 16 + + testBlockGas := uint64(925000) // one test block ~ 925000 gas skipBlockValues := []uint64{0, 1, 2, 3, 5, 21, 51, 100, 101} var skipGasValues []uint64 for _, i := range skipBlockValues { @@ -459,3 +486,206 @@ func TestSkippingSavingStateAndRecreatingAfterRestart(t *testing.T) { } } } + +func TestGettingStateForRPCFullNode(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + execConfig := gethexec.ConfigDefaultTest() + execConfig.Caching.SnapshotCache = 0 // disable snapshots + execConfig.Caching.BlockAge = 0 // use only Caching.BlockCount to keep only last N blocks in dirties cache, no matter how new they are + execConfig.Sequencer.MaxBlockSpeed = 0 + execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + builder, cancelNode := prepareNodeWithHistory(t, ctx, execConfig, 16) + execNode, _ := builder.L2.ExecNode, builder.L2.Client + defer cancelNode() + bc := execNode.Backend.ArbInterface().BlockChain() + api := execNode.Backend.APIBackend() + + header := bc.CurrentBlock() + if header == nil { + Fatal(t, "failed to get current block header") + } + state, _, err := api.StateAndHeaderByNumber(ctx, rpc.BlockNumber(header.Number.Uint64())) + Require(t, err) + addr := builder.L2Info.GetAddress("User2") + exists := state.Exist(addr) + err = state.Error() + Require(t, err) + if !exists { + Fatal(t, "User2 address does not exist in the state") + } + // Get the state again to avoid caching + state, _, err = api.StateAndHeaderByNumber(ctx, rpc.BlockNumber(header.Number.Uint64())) + Require(t, err) + + blockCountRequiredToFlushDirties := builder.execConfig.Caching.BlockCount + makeSomeTransfers(t, ctx, builder, blockCountRequiredToFlushDirties) + + exists = state.Exist(addr) + err = state.Error() + Require(t, err) + if !exists { + Fatal(t, "User2 address does not exist in the state") + } +} + +func TestGettingStateForRPCHybridArchiveNode(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + execConfig := gethexec.ConfigDefaultTest() + execConfig.Caching.Archive = true + execConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 128 + execConfig.Caching.BlockCount = 128 + execConfig.Caching.SnapshotCache = 0 // disable snapshots + execConfig.Caching.BlockAge = 0 // use only Caching.BlockCount to keep only last N blocks in dirties cache, no matter how new they are + execConfig.Sequencer.MaxBlockSpeed = 0 + execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + builder, cancelNode := prepareNodeWithHistory(t, ctx, execConfig, 16) + execNode, _ := builder.L2.ExecNode, builder.L2.Client + defer cancelNode() + bc := execNode.Backend.ArbInterface().BlockChain() + api := execNode.Backend.APIBackend() + + header := bc.CurrentBlock() + if header == nil { + Fatal(t, "failed to get current block header") + } + state, _, err := api.StateAndHeaderByNumber(ctx, rpc.BlockNumber(header.Number.Uint64())) + Require(t, err) + addr := builder.L2Info.GetAddress("User2") + exists := state.Exist(addr) + err = state.Error() + Require(t, err) + if !exists { + Fatal(t, "User2 address does not exist in the state") + } + // Get the state again to avoid caching + state, _, err = api.StateAndHeaderByNumber(ctx, rpc.BlockNumber(header.Number.Uint64())) + Require(t, err) + + blockCountRequiredToFlushDirties := builder.execConfig.Caching.BlockCount + makeSomeTransfers(t, ctx, builder, blockCountRequiredToFlushDirties) + + exists = state.Exist(addr) + err = state.Error() + Require(t, err) + if !exists { + Fatal(t, "User2 address does not exist in the state") + } +} + +func TestStateAndHeaderForRecentBlock(t *testing.T) { + threads := 32 + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.execConfig.Caching.Archive = true + builder.execConfig.RPC.MaxRecreateStateDepth = 0 + cleanup := builder.Build(t) + defer cleanup() + builder.L2Info.GenerateAccount("User2") + + errors := make(chan error, threads+1) + senderDone := make(chan struct{}) + go func() { + defer close(senderDone) + for ctx.Err() == nil { + tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, new(big.Int).Lsh(big.NewInt(1), 128), nil) + err := builder.L2.Client.SendTransaction(ctx, tx) + if ctx.Err() != nil { + return + } + if err != nil { + errors <- err + return + } + _, err = builder.L2.EnsureTxSucceeded(tx) + if ctx.Err() != nil { + return + } + if err != nil { + errors <- err + return + } + time.Sleep(10 * time.Millisecond) + } + }() + api := builder.L2.ExecNode.Backend.APIBackend() + db := builder.L2.ExecNode.Backend.ChainDb() + i := 1 + var mtx sync.RWMutex + var wgCallers sync.WaitGroup + for j := 0; j < threads && ctx.Err() == nil; j++ { + wgCallers.Add(1) + go func() { + defer wgCallers.Done() + mtx.RLock() + blockNumber := i + mtx.RUnlock() + for blockNumber < 300 && ctx.Err() == nil { + prefix := make([]byte, 8) + binary.BigEndian.PutUint64(prefix, uint64(blockNumber)) + prefix = append([]byte("b"), prefix...) + it := db.NewIterator(prefix, nil) + defer it.Release() + if it.Next() { + key := it.Key() + if len(key) != len(prefix)+common.HashLength { + Fatal(t, "Wrong key length, have:", len(key), "want:", len(prefix)+common.HashLength) + } + blockHash := common.BytesToHash(key[len(prefix):]) + start := time.Now() + for ctx.Err() == nil { + _, _, err := api.StateAndHeaderByNumberOrHash(ctx, rpc.BlockNumberOrHash{BlockHash: &blockHash}) + if err == nil { + mtx.Lock() + if blockNumber == i { + i++ + } + mtx.Unlock() + break + } + if ctx.Err() != nil { + return + } + if !strings.Contains(err.Error(), "ahead of current block") { + errors <- err + return + } + if time.Since(start) > 5*time.Second { + errors <- fmt.Errorf("timeout - failed to get state for more then 5 seconds, block: %d, err: %w", blockNumber, err) + return + } + } + } + it.Release() + mtx.RLock() + blockNumber = i + mtx.RUnlock() + } + }() + } + callersDone := make(chan struct{}) + go func() { + wgCallers.Wait() + close(callersDone) + }() + + select { + case <-callersDone: + cancel() + case <-senderDone: + cancel() + case err := <-errors: + t.Error(err) + cancel() + } + <-callersDone + <-senderDone + close(errors) + for err := range errors { + if err != nil { + t.Error(err) + } + } +} diff --git a/system_tests/retryable_test.go b/system_tests/retryable_test.go index be0ecc590..b0691db17 100644 --- a/system_tests/retryable_test.go +++ b/system_tests/retryable_test.go @@ -563,7 +563,7 @@ func TestDepositETH(t *testing.T) { txOpts := builder.L1Info.GetDefaultTransactOpts("Faucet", ctx) txOpts.Value = big.NewInt(13) - l1tx, err := delayedInbox.DepositEth0(&txOpts) + l1tx, err := delayedInbox.DepositEth439370b1(&txOpts) if err != nil { t.Fatalf("DepositEth0() unexected error: %v", err) } diff --git a/system_tests/ipc_test.go b/system_tests/rpc_test.go similarity index 100% rename from system_tests/ipc_test.go rename to system_tests/rpc_test.go diff --git a/system_tests/seqinbox_test.go b/system_tests/seqinbox_test.go index c4dd17ef5..81dd2ad0d 100644 --- a/system_tests/seqinbox_test.go +++ b/system_tests/seqinbox_test.go @@ -355,7 +355,7 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { if i%5 == 0 { tx, err = seqInbox.AddSequencerL2Batch(&seqOpts, big.NewInt(int64(len(blockStates))), batchData, big.NewInt(1), gasRefunderAddr, big.NewInt(0), big.NewInt(0)) } else { - tx, err = seqInbox.AddSequencerL2BatchFromOrigin(&seqOpts, big.NewInt(int64(len(blockStates))), batchData, big.NewInt(1), gasRefunderAddr) + tx, err = seqInbox.AddSequencerL2BatchFromOrigin8f111f3c(&seqOpts, big.NewInt(int64(len(blockStates))), batchData, big.NewInt(1), gasRefunderAddr, common.Big0, common.Big0) } Require(t, err) txRes, err := builder.L1.EnsureTxSucceeded(tx) diff --git a/system_tests/staker_test.go b/system_tests/staker_test.go index 62e89ff78..d5bbeaa07 100644 --- a/system_tests/staker_test.go +++ b/system_tests/staker_test.go @@ -61,7 +61,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) t.Parallel() ctx, cancelCtx := context.WithCancel(context.Background()) defer cancelCtx() - httpSrv, srv := externalsignertest.NewServer(ctx, t) + httpSrv, srv := externalsignertest.NewServer(t) cp, err := externalsignertest.CertPaths() if err != nil { t.Fatalf("Error getting cert paths: %v", err) diff --git a/system_tests/state_fuzz_test.go b/system_tests/state_fuzz_test.go index 28bcbec9b..9c34a8255 100644 --- a/system_tests/state_fuzz_test.go +++ b/system_tests/state_fuzz_test.go @@ -41,7 +41,7 @@ func BuildBlock( if lastBlockHeader != nil { delayedMessagesRead = lastBlockHeader.Nonce.Uint64() } - inboxMultiplexer := arbstate.NewInboxMultiplexer(inbox, delayedMessagesRead, nil, nil, arbstate.KeysetValidate) + inboxMultiplexer := arbstate.NewInboxMultiplexer(inbox, delayedMessagesRead, nil, arbstate.KeysetValidate) ctx := context.Background() message, err := inboxMultiplexer.Pop(ctx) @@ -56,7 +56,7 @@ func BuildBlock( return seqBatch, nil } block, _, err := arbos.ProduceBlock( - l1Message, delayedMessagesRead, lastBlockHeader, statedb, chainContext, chainConfig, batchFetcher, + l1Message, delayedMessagesRead, lastBlockHeader, statedb, chainContext, chainConfig, batchFetcher, false, ) return block, err } @@ -121,6 +121,9 @@ func (c noopChainContext) GetHeader(common.Hash, uint64) *types.Header { func FuzzStateTransition(f *testing.F) { f.Fuzz(func(t *testing.T, compressSeqMsg bool, seqMsg []byte, delayedMsg []byte) { + if len(seqMsg) > 0 && arbstate.IsL1AuthenticatedMessageHeaderByte(seqMsg[0]) { + return + } chainDb := rawdb.NewMemoryDatabase() chainConfig := params.ArbitrumRollupGoerliTestnetChainConfig() serializedChainConfig, err := json.Marshal(chainConfig) diff --git a/system_tests/unsupported_txtypes_test.go b/system_tests/unsupported_txtypes_test.go new file mode 100644 index 000000000..4c3c8661c --- /dev/null +++ b/system_tests/unsupported_txtypes_test.go @@ -0,0 +1,133 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +// race detection makes things slow and miss timeouts +//go:build !race +// +build !race + +package arbtest + +import ( + "context" + "errors" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" + "github.com/holiman/uint256" +) + +func TestBlobAndInternalTxsReject(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() + + builder.L2Info.GenerateAccount("User") + builder.L2Info.GenerateAccount("User2") + l2ChainID := builder.L2Info.Signer.ChainID() + + privKey := GetTestKeyForAccountName(t, "User") + txDataBlob := &types.BlobTx{ + ChainID: &uint256.Int{l2ChainID.Uint64()}, + Nonce: 0, + GasFeeCap: &uint256.Int{params.GWei}, + Gas: 500000, + To: builder.L2Info.GetAddress("User2"), + Value: &uint256.Int{0}, + } + blobTx, err := types.SignNewTx(privKey, types.NewCancunSigner(l2ChainID), txDataBlob) + Require(t, err) + err = builder.L2.Client.SendTransaction(ctx, blobTx) + if err == nil && !errors.Is(err, types.ErrTxTypeNotSupported) { + t.Fatalf("did not receive expected error when submitting blob transaction. Want: %v, Got: %v", types.ErrTxTypeNotSupported, err) + } + + txDataInternal := &types.ArbitrumInternalTx{ChainId: l2ChainID} + internalTx := types.NewTx(txDataInternal) + err = builder.L2.Client.SendTransaction(ctx, internalTx) + if err == nil && !errors.Is(err, types.ErrTxTypeNotSupported) { + t.Fatalf("did not receive expected error when submitting arbitrum internal transaction. Want: %v, Got: %v", types.ErrTxTypeNotSupported, err) + } +} +func TestBlobAndInternalTxsAsDelayedMsgReject(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + cleanup := builder.Build(t) + defer cleanup() + + builder.L2Info.GenerateAccount("User2") + + l1Txs := make([]*types.Transaction, 0, 4) + txAcceptStatus := make(map[common.Hash]bool, 4) + l2ChainID := builder.L2Info.Signer.ChainID() + + privKey := GetTestKeyForAccountName(t, "Owner") + txDataBlob := &types.BlobTx{ + ChainID: &uint256.Int{l2ChainID.Uint64()}, + Nonce: 0, + GasFeeCap: &uint256.Int{params.GWei}, + Gas: 500000, + To: builder.L2Info.GetAddress("User2"), + Value: &uint256.Int{0}, + } + delayedBlobTx, err := types.SignNewTx(privKey, types.NewCancunSigner(l2ChainID), txDataBlob) + Require(t, err) + txAcceptStatus[delayedBlobTx.Hash()] = false + l1TxBlob := WrapL2ForDelayed(t, delayedBlobTx, builder.L1Info, "User", 100000) + l1Txs = append(l1Txs, l1TxBlob) + + txDataInternal := &types.ArbitrumInternalTx{ChainId: l2ChainID} + delayedInternalTx := types.NewTx(txDataInternal) + txAcceptStatus[delayedInternalTx.Hash()] = false + l1TxInternal := WrapL2ForDelayed(t, delayedInternalTx, builder.L1Info, "User", 100000) + l1Txs = append(l1Txs, l1TxInternal) + + delayedTx1 := builder.L2Info.PrepareTx("Owner", "User2", 50001, big.NewInt(10000), nil) + txAcceptStatus[delayedTx1.Hash()] = false + l1tx := WrapL2ForDelayed(t, delayedTx1, builder.L1Info, "User", 100000) + l1Txs = append(l1Txs, l1tx) + + delayedTx2 := builder.L2Info.PrepareTx("Owner", "User2", 50001, big.NewInt(10000), nil) + txAcceptStatus[delayedTx2.Hash()] = false + l1tx = WrapL2ForDelayed(t, delayedTx2, builder.L1Info, "User", 100000) + l1Txs = append(l1Txs, l1tx) + + errs := builder.L1.L1Backend.TxPool().Add(l1Txs, true, false) + for _, err := range errs { + Require(t, err) + } + + confirmLatestBlock(ctx, t, builder.L1Info, builder.L1.Client) + for _, tx := range l1Txs { + _, err = builder.L1.EnsureTxSucceeded(tx) + Require(t, err) + } + + blocknum, err := builder.L2.Client.BlockNumber(ctx) + Require(t, err) + for i := int64(0); i <= int64(blocknum); i++ { + block, err := builder.L2.Client.BlockByNumber(ctx, big.NewInt(i)) + Require(t, err) + for _, tx := range block.Transactions() { + if _, ok := txAcceptStatus[tx.Hash()]; ok { + txAcceptStatus[tx.Hash()] = true + } + } + } + if !txAcceptStatus[delayedTx1.Hash()] || !txAcceptStatus[delayedTx2.Hash()] { + t.Fatalf("transaction of valid transaction type wasn't accepted as a delayed message") + } + if txAcceptStatus[delayedBlobTx.Hash()] { + t.Fatalf("blob transaction was successfully accepted as a delayed message") + } + if txAcceptStatus[delayedInternalTx.Hash()] { + t.Fatalf("arbitrum internal transaction was successfully accepted as a delayed message") + } +} diff --git a/util/arbmath/bips.go b/util/arbmath/bips.go index f298eee29..8b7c47d82 100644 --- a/util/arbmath/bips.go +++ b/util/arbmath/bips.go @@ -46,3 +46,10 @@ func (bips UBips) Uint64() uint64 { func (bips Bips) Uint64() uint64 { return uint64(bips) } + +// BigDivToBips returns dividend/divisor as bips, saturating if out of bounds +func BigDivToBips(dividend, divisor *big.Int) Bips { + value := BigMulByInt(dividend, int64(OneInBips)) + value.Div(value, divisor) + return Bips(BigToUintSaturating(value)) +} diff --git a/util/blobs/blobs.go b/util/blobs/blobs.go index 2852f2b29..405c776ba 100644 --- a/util/blobs/blobs.go +++ b/util/blobs/blobs.go @@ -29,6 +29,9 @@ func fillBlobBytes(blob []byte, data []byte) []byte { // The number of bits in a BLS scalar that aren't part of a whole byte. const spareBlobBits = 6 // = math.floor(math.log2(BLS_MODULUS)) % 8 +// The number of bytes encodable in a blob with the current encoding scheme. +const BlobEncodableData = 254 * params.BlobTxFieldElementsPerBlob / 8 + func fillBlobBits(blob []byte, data []byte) ([]byte, error) { var acc uint16 accBits := 0 diff --git a/arbnode/blob_reader.go b/util/headerreader/blob_client.go similarity index 51% rename from arbnode/blob_reader.go rename to util/headerreader/blob_client.go index c2d2121be..664dbb5e3 100644 --- a/arbnode/blob_reader.go +++ b/util/headerreader/blob_client.go @@ -1,11 +1,12 @@ -// Copyright 2023, Offchain Labs, Inc. +// Copyright 2023-2024, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE -package arbnode +package headerreader import ( "context" "encoding/json" + "errors" "fmt" "io" "net/http" @@ -16,6 +17,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto/kzg4844" + "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/util/blobs" "github.com/offchainlabs/nitro/util/jsonapi" @@ -25,38 +27,52 @@ import ( ) type BlobClient struct { - ec arbutil.L1Interface - beaconUrl *url.URL - httpClient *http.Client + ec arbutil.L1Interface + beaconUrl *url.URL + secondaryBeaconUrl *url.URL + httpClient *http.Client + authorization string - // The genesis time time and seconds per slot won't change so only request them once. - cachedGenesisTime uint64 - cachedSecondsPerSlot uint64 + // Filled in in Initialize() + genesisTime uint64 + secondsPerSlot uint64 - // Directory to save the fetcehd blobs + // Directory to save the fetched blobs blobDirectory string } type BlobClientConfig struct { - BeaconChainUrl string `koanf:"beacon-chain-url"` - BlobDirectory string `koanf:"blob-directory"` + BeaconUrl string `koanf:"beacon-url"` + SecondaryBeaconUrl string `koanf:"secondary-beacon-url"` + BlobDirectory string `koanf:"blob-directory"` + Authorization string `koanf:"authorization"` } var DefaultBlobClientConfig = BlobClientConfig{ - BeaconChainUrl: "", - BlobDirectory: "", + BeaconUrl: "", + SecondaryBeaconUrl: "", + BlobDirectory: "", + Authorization: "", } func BlobClientAddOptions(prefix string, f *pflag.FlagSet) { - f.String(prefix+".beacon-chain-url", DefaultBlobClientConfig.BeaconChainUrl, "Beacon Chain url to use for fetching blobs") + f.String(prefix+".beacon-url", DefaultBlobClientConfig.BeaconUrl, "Beacon Chain RPC URL to use for fetching blobs (normally on port 3500)") + f.String(prefix+".secondary-beacon-url", DefaultBlobClientConfig.SecondaryBeaconUrl, "Backup beacon Chain RPC URL to use for fetching blobs (normally on port 3500) when unable to fetch from primary") f.String(prefix+".blob-directory", DefaultBlobClientConfig.BlobDirectory, "Full path of the directory to save fetched blobs") + f.String(prefix+".authorization", DefaultBlobClientConfig.Authorization, "Value to send with the HTTP Authorization: header for Beacon REST requests, must include both scheme and scheme parameters") } func NewBlobClient(config BlobClientConfig, ec arbutil.L1Interface) (*BlobClient, error) { - beaconUrl, err := url.Parse(config.BeaconChainUrl) + beaconUrl, err := url.Parse(config.BeaconUrl) if err != nil { return nil, fmt.Errorf("failed to parse beacon chain URL: %w", err) } + var secondaryBeaconUrl *url.URL + if config.SecondaryBeaconUrl != "" { + if secondaryBeaconUrl, err = url.Parse(config.BeaconUrl); err != nil { + return nil, fmt.Errorf("failed to parse secondary beacon chain URL: %w", err) + } + } if config.BlobDirectory != "" { if _, err = os.Stat(config.BlobDirectory); err != nil { if os.IsNotExist(err) { @@ -69,10 +85,12 @@ func NewBlobClient(config BlobClientConfig, ec arbutil.L1Interface) (*BlobClient } } return &BlobClient{ - ec: ec, - beaconUrl: beaconUrl, - httpClient: &http.Client{}, - blobDirectory: config.BlobDirectory, + ec: ec, + beaconUrl: beaconUrl, + secondaryBeaconUrl: secondaryBeaconUrl, + authorization: config.Authorization, + httpClient: &http.Client{}, + blobDirectory: config.BlobDirectory, }, nil } @@ -85,18 +103,43 @@ func beaconRequest[T interface{}](b *BlobClient, ctx context.Context, beaconPath var empty T - // not really a deep copy, but copies the Path part we care about - url := *b.beaconUrl - url.Path = path.Join(url.Path, beaconPath) - - req, err := http.NewRequestWithContext(ctx, "GET", url.String(), http.NoBody) - if err != nil { - return empty, err + fetchData := func(url url.URL) (*http.Response, error) { + url.Path = path.Join(url.Path, beaconPath) + req, err := http.NewRequestWithContext(ctx, "GET", url.String(), http.NoBody) + if err != nil { + return nil, err + } + if b.authorization != "" { + req.Header.Set("Authorization", b.authorization) + } + resp, err := b.httpClient.Do(req) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + bodyStr := string(body) + log.Debug("beacon request returned response with non 200 OK status", "status", resp.Status, "body", bodyStr) + if len(bodyStr) > 100 { + return nil, fmt.Errorf("response returned with status %s, want 200 OK. body: %s ", resp.Status, bodyStr[len(bodyStr)-trailingCharsOfResponse:]) + } else { + return nil, fmt.Errorf("response returned with status %s, want 200 OK. body: %s", resp.Status, bodyStr) + } + } + return resp, nil } - resp, err := b.httpClient.Do(req) - if err != nil { - return empty, err + var resp *http.Response + var err error + if resp, err = fetchData(*b.beaconUrl); err != nil { + if b.secondaryBeaconUrl != nil { + log.Info("error fetching blob data from primary beacon URL, switching to secondary beacon URL", "err", err) + if resp, err = fetchData(*b.secondaryBeaconUrl); err != nil { + return empty, fmt.Errorf("error fetching blob data from secondary beacon URL: %w", err) + } + } else { + return empty, err + } } defer resp.Body.Close() @@ -119,16 +162,15 @@ func (b *BlobClient) GetBlobs(ctx context.Context, blockHash common.Hash, versio if err != nil { return nil, err } - genesisTime, err := b.genesisTime(ctx) - if err != nil { - return nil, err + if b.secondsPerSlot == 0 { + return nil, errors.New("BlobClient hasn't been initialized") } - secondsPerSlot, err := b.secondsPerSlot(ctx) + slot := (header.Time - b.genesisTime) / b.secondsPerSlot + blobs, err := b.blobSidecars(ctx, slot, versionedHashes) if err != nil { - return nil, err + return nil, fmt.Errorf("error fetching blobs in %d l1 block: %w", header.Number, err) } - slot := (header.Time - genesisTime) / secondsPerSlot - return b.blobSidecars(ctx, slot, versionedHashes) + return blobs, nil } type blobResponseItem struct { @@ -142,11 +184,23 @@ type blobResponseItem struct { KzgProof hexutil.Bytes `json:"kzg_proof"` } +const trailingCharsOfResponse = 25 + func (b *BlobClient) blobSidecars(ctx context.Context, slot uint64, versionedHashes []common.Hash) ([]kzg4844.Blob, error) { - response, err := beaconRequest[[]blobResponseItem](b, ctx, fmt.Sprintf("/eth/v1/beacon/blob_sidecars/%d", slot)) + rawData, err := beaconRequest[json.RawMessage](b, ctx, fmt.Sprintf("/eth/v1/beacon/blob_sidecars/%d", slot)) if err != nil { return nil, fmt.Errorf("error calling beacon client in blobSidecars: %w", err) } + var response []blobResponseItem + if err := json.Unmarshal(rawData, &response); err != nil { + rawDataStr := string(rawData) + log.Debug("response from beacon URL cannot be unmarshalled into array of blobResponseItem in blobSidecars", "slot", slot, "responseLength", len(rawDataStr), "response", rawDataStr) + if len(rawDataStr) > 100 { + return nil, fmt.Errorf("error unmarshalling response from beacon URL into array of blobResponseItem in blobSidecars: %w. Trailing %d characters of the response: %s", err, trailingCharsOfResponse, rawDataStr[len(rawDataStr)-trailingCharsOfResponse:]) + } else { + return nil, fmt.Errorf("error unmarshalling response from beacon URL into array of blobResponseItem in blobSidecars: %w. Response: %s", err, rawDataStr) + } + } if len(response) < len(versionedHashes) { return nil, fmt.Errorf("expected at least %d blobs for slot %d but only got %d", len(versionedHashes), slot, len(response)) @@ -198,7 +252,7 @@ func (b *BlobClient) blobSidecars(ctx context.Context, slot uint64, versionedHas } if b.blobDirectory != "" { - if err := saveBlobDataToDisk(response, slot, b.blobDirectory); err != nil { + if err := saveBlobDataToDisk(rawData, slot, b.blobDirectory); err != nil { return nil, err } } @@ -206,13 +260,13 @@ func (b *BlobClient) blobSidecars(ctx context.Context, slot uint64, versionedHas return output, nil } -func saveBlobDataToDisk(response []blobResponseItem, slot uint64, blobDirectory string) error { +func saveBlobDataToDisk(rawData json.RawMessage, slot uint64, blobDirectory string) error { filePath := path.Join(blobDirectory, fmt.Sprint(slot)) file, err := os.Create(filePath) if err != nil { return fmt.Errorf("could not create file to store fetched blobs") } - full := fullResult[[]blobResponseItem]{Data: response} + full := fullResult[json.RawMessage]{Data: rawData} fullbytes, err := json.Marshal(full) if err != nil { return fmt.Errorf("unable to marshal data into bytes while attempting to store fetched blobs") @@ -229,31 +283,25 @@ type genesisResponse struct { // don't currently care about other fields, add if needed } -func (b *BlobClient) genesisTime(ctx context.Context) (uint64, error) { - if b.cachedGenesisTime > 0 { - return b.cachedGenesisTime, nil - } - gr, err := beaconRequest[genesisResponse](b, ctx, "/eth/v1/beacon/genesis") - if err != nil { - return 0, fmt.Errorf("error calling beacon client in genesisTime: %w", err) - } - b.cachedGenesisTime = uint64(gr.GenesisTime) - return b.cachedGenesisTime, nil -} - type getSpecResponse struct { SecondsPerSlot jsonapi.Uint64String `json:"SECONDS_PER_SLOT"` } -func (b *BlobClient) secondsPerSlot(ctx context.Context) (uint64, error) { - if b.cachedSecondsPerSlot > 0 { - return b.cachedSecondsPerSlot, nil +func (b *BlobClient) Initialize(ctx context.Context) error { + genesis, err := beaconRequest[genesisResponse](b, ctx, "/eth/v1/beacon/genesis") + if err != nil { + return fmt.Errorf("error calling beacon client to get genesisTime: %w", err) } - gr, err := beaconRequest[getSpecResponse](b, ctx, "/eth/v1/config/spec") + b.genesisTime = uint64(genesis.GenesisTime) + + spec, err := beaconRequest[getSpecResponse](b, ctx, "/eth/v1/config/spec") if err != nil { - return 0, fmt.Errorf("error calling beacon client in secondsPerSlot: %w", err) + return fmt.Errorf("error calling beacon client to get secondsPerSlot: %w", err) } - b.cachedSecondsPerSlot = uint64(gr.SecondsPerSlot) - return b.cachedSecondsPerSlot, nil + if spec.SecondsPerSlot == 0 { + return errors.New("got SECONDS_PER_SLOT of zero from beacon client") + } + b.secondsPerSlot = uint64(spec.SecondsPerSlot) + return nil } diff --git a/arbnode/blob_reader_test.go b/util/headerreader/blob_client_test.go similarity index 67% rename from arbnode/blob_reader_test.go rename to util/headerreader/blob_client_test.go index 287f5fee0..9735899da 100644 --- a/arbnode/blob_reader_test.go +++ b/util/headerreader/blob_client_test.go @@ -1,4 +1,7 @@ -package arbnode +// Copyright 2024, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package headerreader import ( "encoding/json" @@ -8,6 +11,7 @@ import ( "reflect" "testing" + "github.com/offchainlabs/nitro/util/testhelpers" "github.com/r3labs/diff/v3" ) @@ -32,7 +36,9 @@ func TestSaveBlobsToDisk(t *testing.T) { KzgProof: []byte{2}, }} testDir := t.TempDir() - err := saveBlobDataToDisk(response, 5, testDir) + rawData, err := json.Marshal(response) + Require(t, err) + err = saveBlobDataToDisk(rawData, 5, testDir) Require(t, err) filePath := path.Join(testDir, "5") @@ -51,3 +57,13 @@ func TestSaveBlobsToDisk(t *testing.T) { Fail(t, "blob data saved to disk does not match actual blob data", changelog) } } + +func Require(t *testing.T, err error, printables ...interface{}) { + t.Helper() + testhelpers.RequireImpl(t, err, printables...) +} + +func Fail(t *testing.T, printables ...interface{}) { + t.Helper() + testhelpers.FailImpl(t, printables...) +} diff --git a/util/redisutil/redis_coordinator.go b/util/redisutil/redis_coordinator.go index 6af141c66..59e3b0e0f 100644 --- a/util/redisutil/redis_coordinator.go +++ b/util/redisutil/redis_coordinator.go @@ -79,10 +79,10 @@ func (c *RedisCoordinator) CurrentChosenSequencer(ctx context.Context) (string, // GetPriorities returns the priority list of sequencers func (rc *RedisCoordinator) GetPriorities(ctx context.Context) ([]string, error) { prioritiesString, err := rc.Client.Get(ctx, PRIORITIES_KEY).Result() + if errors.Is(err, redis.Nil) { + return []string{}, nil + } if err != nil { - if errors.Is(err, redis.Nil) { - err = errors.New("sequencer priorities unset") - } return []string{}, err } prioritiesList := strings.Split(prioritiesString, ",") diff --git a/util/rpcclient/rpcclient.go b/util/rpcclient/rpcclient.go index 275acdb28..02b41cf15 100644 --- a/util/rpcclient/rpcclient.go +++ b/util/rpcclient/rpcclient.go @@ -127,6 +127,25 @@ func (m limitedArgumentsMarshal) String() string { return res } +var blobTxUnderpricedRegexp = regexp.MustCompile(`replacement transaction underpriced: new tx gas fee cap (\d*) <= (\d*) queued`) + +// IsAlreadyKnownError returns true if the error appears to be an "already known" error. +// This check is based on the error's string form and is not precise. +func IsAlreadyKnownError(err error) bool { + s := err.Error() + if strings.Contains(s, "already known") { + return true + } + // go-ethereum returns "replacement transaction underpriced" instead of "already known" for blob txs. + // This is fixed in https://github.com/ethereum/go-ethereum/pull/29210 + // TODO: Once a new geth release is out with this fix, we can remove this check. + matches := blobTxUnderpricedRegexp.FindSubmatch([]byte(s)) + if len(matches) == 3 { + return string(matches[1]) == string(matches[2]) + } + return false +} + func (c *RpcClient) CallContext(ctx_in context.Context, result interface{}, method string, args ...interface{}) error { if c.client == nil { return errors.New("not connected") @@ -159,7 +178,7 @@ func (c *RpcClient) CallContext(ctx_in context.Context, result interface{}, meth cancelCtx() logger := log.Trace limit := int(c.config().ArgLogLimit) - if err != nil && err.Error() != "already known" { + if err != nil && !IsAlreadyKnownError(err) { logger = log.Info } logEntry := []interface{}{ diff --git a/util/rpcclient/rpcclient_test.go b/util/rpcclient/rpcclient_test.go index b885770f6..8613671d3 100644 --- a/util/rpcclient/rpcclient_test.go +++ b/util/rpcclient/rpcclient_test.go @@ -182,6 +182,25 @@ func TestRpcClientRetry(t *testing.T) { } } +func TestIsAlreadyKnownError(t *testing.T) { + for _, testCase := range []struct { + input string + expected bool + }{ + {"already known", true}, + {"insufficient balance", false}, + {"foo already known\nbar", true}, + {"replacement transaction underpriced: new tx gas fee cap 3824396284 \u003c= 3824396284 queued", true}, + {"replacement transaction underpriced: new tx gas fee cap 1234 \u003c= 5678 queued", false}, + {"foo replacement transaction underpriced: new tx gas fee cap 3824396284 \u003c= 3824396284 queued bar", true}, + } { + got := IsAlreadyKnownError(errors.New(testCase.input)) + if got != testCase.expected { + t.Errorf("IsAlreadyKnownError(%q) = %v expected %v", testCase.input, got, testCase.expected) + } + } +} + func Require(t *testing.T, err error, printables ...interface{}) { t.Helper() testhelpers.RequireImpl(t, err, printables...)