diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 000000000..03273c93e --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,21 @@ +### Description + +add a description of your changes here... + +### Rationale + +tell us why we need these changes... + +### Example + +add an example CLI or API response... + +### Changes + +Notable changes: +* add each change in a bullet point here +* ... + +### Potential Impacts +* add potential impacts for other components here +* ... diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 1eea7d1f8..2764d038f 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -6,7 +6,7 @@ on: pull_request: merge_group: push: - branches: [main] + branches: [ main ] env: CARGO_TERM_COLOR: always @@ -19,7 +19,7 @@ concurrency: jobs: test: name: test / ${{ matrix.network }} - runs-on: ubuntu-latest + runs-on: [ bnb-chain-ap-qa-cicd-runners ] env: RUST_BACKTRACE: 1 strategy: @@ -56,7 +56,7 @@ jobs: name: sync / 100k blocks # Only run sync tests in merge groups if: github.event_name == 'merge_group' - runs-on: ubuntu-latest + runs-on: [ bnb-chain-ap-qa-cicd-runners ] env: RUST_LOG: info,sync=error RUST_BACKTRACE: 1 @@ -84,7 +84,7 @@ jobs: name: integration success runs-on: ubuntu-latest if: always() - needs: [test] + needs: [ test ] timeout-minutes: 30 steps: - name: Decide whether the needed jobs succeeded or failed diff --git a/.github/workflows/unit.yml b/.github/workflows/unit.yml index 8cae0f703..1ab412e0d 100644 --- a/.github/workflows/unit.yml +++ b/.github/workflows/unit.yml @@ -6,7 +6,7 @@ on: pull_request: merge_group: push: - branches: [main] + branches: [ main ] env: CARGO_TERM_COLOR: always @@ -19,13 +19,13 @@ concurrency: jobs: test: name: test / ${{ matrix.network }} (${{ matrix.partition }}/2) - runs-on: ubuntu-latest + runs-on: [ bnb-chain-ap-qa-cicd-runners ] env: RUST_BACKTRACE: 1 strategy: matrix: - partition: [1, 2] - network: [ethereum, optimism] + partition: [ 1, 2 ] + network: [ ethereum, optimism ] timeout-minutes: 30 steps: - uses: actions/checkout@v4 @@ -53,7 +53,7 @@ jobs: state: name: Ethereum state tests - runs-on: ubuntu-latest + runs-on: [ bnb-chain-ap-qa-cicd-runners ] env: RUST_LOG: info,sync=error RUST_BACKTRACE: 1 @@ -77,7 +77,7 @@ jobs: doc: name: doc tests (${{ matrix.network }}) - runs-on: ubuntu-latest + runs-on: [ bnb-chain-ap-qa-cicd-runners ] env: RUST_BACKTRACE: 1 timeout-minutes: 30 @@ -101,7 +101,7 @@ jobs: name: unit success runs-on: ubuntu-latest if: always() - needs: [test, state, doc] + needs: [ test, state, doc ] timeout-minutes: 30 steps: - name: Decide whether the needed jobs succeeded or failed diff --git a/CHANGELOG.md b/CHANGELOG.md index 5375ba6fc..1ea901166 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,3 +2,14 @@ ## v0.1.0-beta.1 ### FEATURE * [\#10](https://github.com/bnb-chain/reth/pull/10) feat: support opbnb network + +## v0.1.0-beta.2 +### FEATURE +* [\#27](https://github.com/bnb-chain/reth/pull/27) feat: introduce Haber fork into opBNB testnet + +### BUGFIX +* [\#17](https://github.com/bnb-chain/reth/pull/17) fix: p2p incompatible forks for opbnb testnet and mainnet +* [\#19](https://github.com/bnb-chain/reth/pull/19) chore: fix ci issues +* [\#24](https://github.com/bnb-chain/reth/pull/24) fix: opbnb synchronization failure issue +* [\#25](https://github.com/bnb-chain/reth/pull/25) chore: add pr template +* [\#26](https://github.com/bnb-chain/reth/pull/26) fix: opbnb p2p forkid mismatch issue diff --git a/Cargo.lock b/Cargo.lock index e42d0a91f..22dd5abf9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5734,6 +5734,18 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" +[[package]] +name = "p256" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" +dependencies = [ + "ecdsa", + "elliptic-curve", + "primeorder", + "sha2 0.10.8", +] + [[package]] name = "page_size" version = "0.6.0" @@ -6170,6 +6182,15 @@ dependencies = [ "syn 2.0.65", ] +[[package]] +name = "primeorder" +version = "0.13.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "353e1ca18966c16d9deb1c69278edbc5f194139612772bd9537af60ac231e1e6" +dependencies = [ + "elliptic-curve", +] + [[package]] name = "primitive-types" version = "0.12.2" @@ -8383,7 +8404,7 @@ dependencies = [ [[package]] name = "revm" version = "8.0.0" -source = "git+https://github.com/bnb-chain/revm.git?rev=f5cc71d#f5cc71d443a1957285ee4230562b2a59c2457552" +source = "git+https://github.com/bnb-chain/revm.git?rev=2f38e6c#2f38e6c6f82c8c664e4302cadde0df68f6b4cfbf" dependencies = [ "auto_impl", "cfg-if", @@ -8415,7 +8436,7 @@ dependencies = [ [[package]] name = "revm-interpreter" version = "4.0.0" -source = "git+https://github.com/bnb-chain/revm.git?rev=f5cc71d#f5cc71d443a1957285ee4230562b2a59c2457552" +source = "git+https://github.com/bnb-chain/revm.git?rev=2f38e6c#2f38e6c6f82c8c664e4302cadde0df68f6b4cfbf" dependencies = [ "revm-primitives", "serde", @@ -8424,7 +8445,7 @@ dependencies = [ [[package]] name = "revm-precompile" version = "6.0.0" -source = "git+https://github.com/bnb-chain/revm.git?rev=f5cc71d#f5cc71d443a1957285ee4230562b2a59c2457552" +source = "git+https://github.com/bnb-chain/revm.git?rev=2f38e6c#2f38e6c6f82c8c664e4302cadde0df68f6b4cfbf" dependencies = [ "aurora-engine-modexp", "bls_on_arkworks", @@ -8435,6 +8456,7 @@ dependencies = [ "cometbft-proto", "k256", "once_cell", + "p256", "prost", "revm-primitives", "ripemd", @@ -8446,7 +8468,7 @@ dependencies = [ [[package]] name = "revm-primitives" version = "3.1.1" -source = "git+https://github.com/bnb-chain/revm.git?rev=f5cc71d#f5cc71d443a1957285ee4230562b2a59c2457552" +source = "git+https://github.com/bnb-chain/revm.git?rev=2f38e6c#2f38e6c6f82c8c664e4302cadde0df68f6b4cfbf" dependencies = [ "alloy-primitives", "auto_impl", diff --git a/Cargo.toml b/Cargo.toml index d3ecd23c7..60a4c30ca 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -103,7 +103,7 @@ rust.missing_debug_implementations = "warn" rust.missing_docs = "warn" rust.unreachable_pub = "warn" rust.unused_must_use = "deny" -rust.rust_2018_idioms = "deny" +rust.rust_2018_idioms = { level = "deny", priority = -1 } rustdoc.all = "warn" [workspace.lints.clippy] @@ -401,6 +401,6 @@ similar-asserts = "1.5.0" test-fuzz = "5" [patch.crates-io] -revm = { git = "https://github.com/bnb-chain/revm.git", rev = "f5cc71d" } -revm-primitives = { git = "https://github.com/bnb-chain/revm.git", rev = "f5cc71d" } -alloy-chains = { git = "https://github.com/alloy-rs/chains.git", rev = "906d6fb" } \ No newline at end of file +revm = { git = "https://github.com/bnb-chain/revm.git", rev = "2f38e6c" } +revm-primitives = { git = "https://github.com/bnb-chain/revm.git", rev = "2f38e6c" } +alloy-chains = { git = "https://github.com/alloy-rs/chains.git", rev = "906d6fb" } diff --git a/Makefile b/Makefile index 2b19bb581..cf5861585 100644 --- a/Makefile +++ b/Makefile @@ -414,9 +414,9 @@ fix-lint-other-targets: -- -D warnings fix-lint: - make lint-reth && \ - make lint-op-reth && \ - make lint-other-targets && \ + make fix-lint-reth && \ + make fix-lint-op-reth && \ + make fix-lint-other-targets && \ make fmt .PHONY: rustdocs diff --git a/README.md b/README.md index 486956e83..71510597f 100644 --- a/README.md +++ b/README.md @@ -53,6 +53,17 @@ Coming soon...... ## Run Reth for opBNB +The op-reth can function as both a full node and an archive node. Due to its unique storage advantages, it is primarily utilized for running archive nodes. + +### Hardware Requirements + +* CPU with 16+ cores +* 128GB RAM +* High-performance NVMe SSD with at least 3TB of free space +* A broadband internet connection with upload/download speeds of 25 MB/s + +### Steps to Run op-reth + The op-reth is an [execution client](https://ethereum.org/en/developers/docs/nodes-and-clients/#execution-clients) for opBNB. You need to run op-node along with op-reth to synchronize with the opBNB network. @@ -105,10 +116,12 @@ The command below is for an archive node. To run a full node, simply add the `-- # for testnet export network=testnet export L2_RPC=https://opbnb-testnet-rpc.bnbchain.org +export TRUST_NODES="enode://1a8f2d3160ad6efd6591981c026bd31807b79844422d99107f8ffa0bd966f35dd6b44d3169e05fcb15be492a58c3098c1d5ab04a3b2769f1aa87ab871b3ef49b@54.238.146.8:30303,enode://28a8309f958c58a0f6fd3cee83951033d20f2b7369e25c63f66caf0d2bac1df89df52b82d74d828f35c76152e4b2aa8dae816a2e3ea5a03c40d4ec08005d426c@35.74.91.224:30303" # for mainnet # export network=mainnet # export L2_RPC=https://opbnb-mainnet-rpc.bnbchain.org +# export TRUST_NODES="enode://db109c6cac5c8b6225edd3176fc3764c58e0720950fe94c122c80978e706a9c9e976629b718e48b6306ea0f9126e5394d3424c9716c5703549e2e7eba216353b@52.193.218.151:30303,enode://e74ecea4943c27d7d4d0c40f84fc3426a7e80f8a9035c0b383725b693ebf9a6376b8c9db12690b513a6ac83041d9b6418d51dc079dce1f13ef948b32f63a589d@54.150.37.120:30303" ./target/release/op-reth node \ --datadir=./datadir \ @@ -126,6 +139,7 @@ export L2_RPC=https://opbnb-testnet-rpc.bnbchain.org --ws.port=8546 \ --builder.gaslimit=150000000 \ --nat=any \ + --trusted-peers=${TRUST_NODES} \ --log.file.directory ./datadir/logs ``` diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 1ce036aa3..b7bbeba7e 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -50,6 +50,7 @@ reth-node-api.workspace = true reth-node-ethereum.workspace = true reth-node-optimism = { workspace = true, optional = true, features = [ "optimism", + "opbnb", ] } reth-node-core.workspace = true reth-node-builder.workspace = true diff --git a/bin/reth/src/commands/db/stats.rs b/bin/reth/src/commands/db/stats.rs index b47e7980b..7d9020fc7 100644 --- a/bin/reth/src/commands/db/stats.rs +++ b/bin/reth/src/commands/db/stats.rs @@ -376,7 +376,7 @@ impl Command { let max_widths = table.column_max_content_widths(); let mut separator = Row::new(); for width in max_widths { - separator.add_cell(Cell::new(&"-".repeat(width as usize))); + separator.add_cell(Cell::new("-".repeat(width as usize))); } table.add_row(separator); diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 689994471..5eb35b499 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -297,7 +297,12 @@ where // get parent hashes let mut parent_block_hashes = self.all_chain_hashes(chain_id); let first_pending_block_number = - *parent_block_hashes.first_key_value().expect("There is at least one block hash").0; + if let Some(key_value) = parent_block_hashes.first_key_value() { + *key_value.0 + } else { + debug!(target: "blockchain_tree", ?chain_id, "No blockhashes stored"); + return None + }; let canonical_chain = canonical_chain .iter() .filter(|&(key, _)| key < first_pending_block_number) diff --git a/crates/consensus/beacon/src/engine/hooks/prune.rs b/crates/consensus/beacon/src/engine/hooks/prune.rs index a9bb4f05b..d2c2e2d33 100644 --- a/crates/consensus/beacon/src/engine/hooks/prune.rs +++ b/crates/consensus/beacon/src/engine/hooks/prune.rs @@ -78,10 +78,10 @@ impl PruneHook { /// This will try to spawn the pruner if it is idle: /// 1. Check if pruning is needed through [Pruner::is_pruning_needed]. - /// 2. - /// 1. If pruning is needed, pass tip block number to the [Pruner::run] and spawn it in a - /// separate task. Set pruner state to [PrunerState::Running]. - /// 2. If pruning is not needed, set pruner state back to [PrunerState::Idle]. + /// + /// 2.1. If pruning is needed, pass tip block number to the [Pruner::run] and spawn it in a + /// separate task. Set pruner state to [PrunerState::Running]. + /// 2.2. If pruning is not needed, set pruner state back to [PrunerState::Idle]. /// /// If pruner is already running, do nothing. fn try_spawn_pruner(&mut self, tip_block_number: BlockNumber) -> Option { diff --git a/crates/consensus/beacon/src/engine/hooks/static_file.rs b/crates/consensus/beacon/src/engine/hooks/static_file.rs index 2cff68e1d..29ad2aba2 100644 --- a/crates/consensus/beacon/src/engine/hooks/static_file.rs +++ b/crates/consensus/beacon/src/engine/hooks/static_file.rs @@ -71,13 +71,13 @@ impl StaticFileHook { /// 1. Check if producing static files is needed through /// [StaticFileProducer::get_static_file_targets](reth_static_file::StaticFileProducerInner::get_static_file_targets) /// and then [StaticFileTargets::any](reth_static_file::StaticFileTargets::any). - /// 2. - /// 1. If producing static files is needed, pass static file request to the - /// [StaticFileProducer::run](reth_static_file::StaticFileProducerInner::run) and spawn - /// it in a separate task. Set static file producer state to - /// [StaticFileProducerState::Running]. - /// 2. If producing static files is not needed, set static file producer state back to - /// [StaticFileProducerState::Idle]. + /// + /// 2.1. If producing static files is needed, pass static file request to the + /// [StaticFileProducer::run](reth_static_file::StaticFileProducerInner::run) and + /// spawn it in a separate task. Set static file producer state to + /// [StaticFileProducerState::Running]. + /// 2.2. If producing static files is not needed, set static file producer state back to + /// [StaticFileProducerState::Idle]. /// /// If static_file_producer is already running, do nothing. fn try_spawn_static_file_producer( diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 1057457c7..47a311f4d 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -710,13 +710,13 @@ where /// If validation fails, the response MUST contain the latest valid hash: /// /// - The block hash of the ancestor of the invalid payload satisfying the following two - /// conditions: + /// conditions: /// - It is fully validated and deemed VALID /// - Any other ancestor of the invalid payload with a higher blockNumber is INVALID /// - 0x0000000000000000000000000000000000000000000000000000000000000000 if the above - /// conditions are satisfied by a PoW block. + /// conditions are satisfied by a PoW block. /// - null if client software cannot determine the ancestor of the invalid payload satisfying - /// the above conditions. + /// the above conditions. fn latest_valid_hash_for_invalid_payload( &mut self, parent_hash: B256, @@ -1110,8 +1110,8 @@ where /// - invalid extra data /// - invalid transactions /// - incorrect hash - /// - the versioned hashes passed with the payload do not exactly match transaction - /// versioned hashes + /// - the versioned hashes passed with the payload do not exactly match transaction versioned + /// hashes /// - the block does not contain blob transactions if it is pre-cancun /// /// This validates the following engine API rule: diff --git a/crates/ethereum-forks/src/forkid.rs b/crates/ethereum-forks/src/forkid.rs index ee4edb8bd..0f2ff839b 100644 --- a/crates/ethereum-forks/src/forkid.rs +++ b/crates/ethereum-forks/src/forkid.rs @@ -229,6 +229,10 @@ impl ForkFilter { forks.remove(&ForkFilterKey::Time(0)); forks.remove(&ForkFilterKey::Block(0)); + // Skip Fermat hardfork for opbnb + forks.remove(&ForkFilterKey::Time(1698991506)); + forks.remove(&ForkFilterKey::Time(1701151200)); + let forks = forks .into_iter() // filter out forks that are pre-genesis by timestamp diff --git a/crates/ethereum-forks/src/hardfork.rs b/crates/ethereum-forks/src/hardfork.rs index 22ebb40f2..ec4664c0c 100644 --- a/crates/ethereum-forks/src/hardfork.rs +++ b/crates/ethereum-forks/src/hardfork.rs @@ -74,6 +74,9 @@ pub enum Hardfork { /// Fermat #[cfg(all(feature = "optimism", feature = "opbnb"))] Fermat, + /// Haber + #[cfg(all(feature = "optimism", feature = "opbnb"))] + Haber, // ArbOS20Atlas, // Upcoming @@ -564,6 +567,8 @@ impl FromStr for Hardfork { "precontractforkblock" => Hardfork::PreContractForkBlock, #[cfg(all(feature = "optimism", feature = "opbnb"))] "fermat" => Hardfork::Fermat, + #[cfg(all(feature = "optimism", feature = "opbnb"))] + "haber" => Hardfork::Haber, #[cfg(feature = "optimism")] "canyon" => Hardfork::Canyon, #[cfg(feature = "optimism")] diff --git a/crates/exex/src/manager.rs b/crates/exex/src/manager.rs index 1de8c102e..088328c86 100644 --- a/crates/exex/src/manager.rs +++ b/crates/exex/src/manager.rs @@ -160,7 +160,7 @@ pub struct ExExManagerMetrics { /// The manager is responsible for: /// /// - Receiving relevant events from the rest of the node, and sending these to the execution -/// extensions +/// extensions /// - Backpressure /// - Error handling /// - Monitoring diff --git a/crates/interfaces/src/blockchain_tree/mod.rs b/crates/interfaces/src/blockchain_tree/mod.rs index 7d2b50e41..0c1a9553d 100644 --- a/crates/interfaces/src/blockchain_tree/mod.rs +++ b/crates/interfaces/src/blockchain_tree/mod.rs @@ -210,6 +210,7 @@ pub enum BlockStatus { /// This is required to: /// - differentiate whether trie state updates should be cached. /// - inform other +/// /// This is required because the state root check can only be performed if the targeted block can be /// traced back to the canonical __head__. #[derive(Debug, Clone, Copy, PartialEq, Eq)] diff --git a/crates/net/ecies/src/algorithm.rs b/crates/net/ecies/src/algorithm.rs index 52398de4f..65d74627e 100644 --- a/crates/net/ecies/src/algorithm.rs +++ b/crates/net/ecies/src/algorithm.rs @@ -45,8 +45,8 @@ fn ecdh_x(public_key: &PublicKey, secret_key: &SecretKey) -> B256 { /// # Panics /// * If the `dest` is empty /// * If the `dest` len is greater than or equal to the hash output len * the max counter value. In -/// this case, the hash output len is 32 bytes, and the max counter value is 2^32 - 1. So the dest -/// cannot have a len greater than 32 * 2^32 - 1. +/// this case, the hash output len is 32 bytes, and the max counter value is 2^32 - 1. So the dest +/// cannot have a len greater than 32 * 2^32 - 1. fn kdf(secret: B256, s1: &[u8], dest: &mut [u8]) { concat_kdf::derive_key_into::(secret.as_slice(), s1, dest).unwrap(); } diff --git a/crates/net/eth-wire-types/src/message.rs b/crates/net/eth-wire-types/src/message.rs index dc8011879..c4101e852 100644 --- a/crates/net/eth-wire-types/src/message.rs +++ b/crates/net/eth-wire-types/src/message.rs @@ -169,7 +169,7 @@ impl From for ProtocolBroadcastMessage { /// The ethereum wire protocol is a set of messages that are broadcast to the network in two /// styles: /// * A request message sent by a peer (such as [`GetPooledTransactions`]), and an associated -/// response message (such as [`PooledTransactions`]). +/// response message (such as [`PooledTransactions`]). /// * A message that is broadcast to the network, without a corresponding request. /// /// The newer `eth/66` is an efficiency upgrade on top of `eth/65`, introducing a request id to diff --git a/crates/node-core/src/args/utils.rs b/crates/node-core/src/args/utils.rs index 6abc9a33d..c340a4039 100644 --- a/crates/node-core/src/args/utils.rs +++ b/crates/node-core/src/args/utils.rs @@ -152,7 +152,7 @@ pub enum SocketAddressParsingError { /// The following formats are checked: /// /// - If the value can be parsed as a `u16` or starts with `:` it is considered a port, and the -/// hostname is set to `localhost`. +/// hostname is set to `localhost`. /// - If the value contains `:` it is assumed to be the format `:` /// - Otherwise it is assumed to be a hostname /// diff --git a/crates/node-core/src/dirs.rs b/crates/node-core/src/dirs.rs index 75919f6f0..b33df18f2 100644 --- a/crates/node-core/src/dirs.rs +++ b/crates/node-core/src/dirs.rs @@ -257,6 +257,7 @@ impl From for MaybePlatformPath { /// * mainnet: `/mainnet` /// * goerli: `/goerli` /// * sepolia: `/sepolia` +/// /// Otherwise, the path will be dependent on the chain ID: /// * `/` #[derive(Clone, Debug, PartialEq, Eq)] diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index a1c3a168b..a1182459b 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -36,3 +36,7 @@ optimism = [ "reth-interfaces/optimism", "revm-primitives/optimism", ] +opbnb = [ + "reth-primitives/opbnb", + "revm-primitives/opbnb", +] diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index abd8b1ce2..2e7255f3f 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -16,8 +16,8 @@ use reth_interfaces::{ provider::ProviderError, }; use reth_primitives::{ - BlockNumber, BlockWithSenders, ChainSpec, GotExpected, Hardfork, Header, PruneModes, Receipt, - Receipts, TxType, Withdrawals, U256, + Address, BlockNumber, BlockWithSenders, ChainSpec, GotExpected, Hardfork, Header, PruneModes, + Receipt, Receipts, TxType, Withdrawals, U256, }; use reth_revm::{ batch::{BlockBatchRecord, BlockExecutorStats}, @@ -27,9 +27,9 @@ use reth_revm::{ }; use revm_primitives::{ db::{Database, DatabaseCommit}, - BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, + BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, StorageSlot, }; -use std::sync::Arc; +use std::{collections::HashMap, str::FromStr, sync::Arc}; use tracing::{debug, trace}; /// Provides executors to execute regular ethereum blocks @@ -358,46 +358,44 @@ where ); #[cfg(all(feature = "optimism", feature = "opbnb"))] - if self.chain_spec.fork(Hardfork::PreContractForkBlock).transitions_at_block(block.number) { + if self.chain_spec().fork(Hardfork::PreContractForkBlock).transitions_at_block(block.number) + { // WBNBContract WBNB preDeploy contract address let w_bnb_contract_address = Address::from_str("0x4200000000000000000000000000000000000006").unwrap(); - let mut w_bnb_storage = PlainStorage::new(); - // insert storage for wBNB contract - // nameSlot { Name: "Wrapped BNB" } - w_bnb_storage.insert( - U256::from_str( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ) - .unwrap(), - U256::from_str( - "0x5772617070656420424e42000000000000000000000000000000000000000016", - ) - .unwrap(), - ); - // symbolSlot { Symbol: "wBNB" } - w_bnb_storage.insert( - U256::from_str( - "0x0000000000000000000000000000000000000000000000000000000000000001", - ) - .unwrap(), - U256::from_str( - "0x57424e4200000000000000000000000000000000000000000000000000000008", - ) - .unwrap(), - ); - // insert wBNB contract with storage - self.db_mut().insert_account_with_storage( - w_bnb_contract_address, - AccountInfo::default(), - w_bnb_storage, - ); // GovernanceToken contract address let governance_token_contract_address = Address::from_str("0x4200000000000000000000000000000000000042").unwrap(); - // destruct the governance token contract - self.evm - .selfdestruct(governance_token_contract_address, governance_token_contract_address); + // touch in cache + let mut w_bnb_contract_account = + self.state.load_cache_account(w_bnb_contract_address).unwrap().clone(); + let mut governance_token_account = + self.state.load_cache_account(governance_token_contract_address).unwrap().clone(); + // change the token symbol and token name + let w_bnb_contract_change = w_bnb_contract_account.change( + w_bnb_contract_account.account_info().unwrap(), HashMap::from([ + // nameSlot { Name: "Wrapped BNB" } + ( + U256::from_str("0x0000000000000000000000000000000000000000000000000000000000000000").unwrap(), + StorageSlot { present_value: U256::from_str("0x5772617070656420424e42000000000000000000000000000000000000000016").unwrap(), ..Default::default() }, + ), + // symbolSlot { Symbol: "wBNB" } + ( + U256::from_str("0x0000000000000000000000000000000000000000000000000000000000000001").unwrap(), + StorageSlot { present_value: U256::from_str("0x57424e4200000000000000000000000000000000000000000000000000000008").unwrap(), ..Default::default() }, + ), + ]) + ); + // destroy governance token contract + let governance_token_change = governance_token_account.selfdestruct().unwrap(); + + if let Some(s) = self.state.transition_state.as_mut() { + let transitions = vec![ + (w_bnb_contract_address, w_bnb_contract_change), + (governance_token_contract_address, governance_token_change), + ]; + s.add_transitions(transitions); + } } // increment balances diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 9c985825d..5b689eebb 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -67,4 +67,9 @@ optimism = [ "reth-evm-optimism/optimism", "reth-optimism-payload-builder/optimism", "reth-beacon-consensus/optimism", +] +opbnb = [ + "reth-primitives/opbnb", + "reth-evm-optimism/opbnb", + "reth-optimism-payload-builder/opbnb", ] \ No newline at end of file diff --git a/crates/payload/optimism/Cargo.toml b/crates/payload/optimism/Cargo.toml index 567c02833..4639dc057 100644 --- a/crates/payload/optimism/Cargo.toml +++ b/crates/payload/optimism/Cargo.toml @@ -40,4 +40,8 @@ optimism = [ "reth-provider/optimism", "reth-rpc-types-compat/optimism", "reth-evm-optimism/optimism", +] +opbnb = [ + "reth-primitives/opbnb", + "reth-evm-optimism/opbnb", ] \ No newline at end of file diff --git a/crates/payload/validator/src/lib.rs b/crates/payload/validator/src/lib.rs index 6b95b0425..3faa24e77 100644 --- a/crates/payload/validator/src/lib.rs +++ b/crates/payload/validator/src/lib.rs @@ -84,8 +84,8 @@ impl ExecutionPayloadValidator { /// - invalid extra data /// - invalid transactions /// - incorrect hash - /// - the versioned hashes passed with the payload do not exactly match transaction - /// versioned hashes + /// - the versioned hashes passed with the payload do not exactly match transaction versioned + /// hashes /// - the block does not contain blob transactions if it is pre-cancun /// /// The checks are done in the order that conforms with the engine-API specification. diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index b68a63b81..d7eb3ba11 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -526,6 +526,11 @@ pub static OPBNB_TESTNET: Lazy> = Lazy::new(|| { (Hardfork::Regolith, ForkCondition::Timestamp(0)), (Hardfork::PreContractForkBlock, ForkCondition::Block(5805494)), (Hardfork::Fermat, ForkCondition::Timestamp(1698991506)), + (Hardfork::Shanghai, ForkCondition::Timestamp(1715753400)), + (Hardfork::Canyon, ForkCondition::Timestamp(1715753400)), + (Hardfork::Cancun, ForkCondition::Timestamp(1715754600)), + (Hardfork::Ecotone, ForkCondition::Timestamp(1715754600)), + (Hardfork::Haber, ForkCondition::Timestamp(1717048800)), ]), base_fee_params: BaseFeeParamsKind::Variable( vec![(Hardfork::London, BaseFeeParams::ethereum())].into(), @@ -961,6 +966,11 @@ impl ChainSpec { for timestamp in self.forks_iter().filter_map(|(_, cond)| { cond.as_timestamp().filter(|time| time > &self.genesis.timestamp) }) { + // Skip Fermat hardfork for opbnb + if timestamp == 1698991506 || timestamp == 1701151200 { + continue; + } + let cond = ForkCondition::Timestamp(timestamp); if cond.active_at_head(head) { if timestamp != current_applied { diff --git a/crates/primitives/src/prune/target.rs b/crates/primitives/src/prune/target.rs index 1300b9b0b..7f39c8d74 100644 --- a/crates/primitives/src/prune/target.rs +++ b/crates/primitives/src/prune/target.rs @@ -70,8 +70,8 @@ impl PruneModes { /// /// 1. For [PruneMode::Full], it fails if `MIN_BLOCKS > 0`. /// 2. For [PruneMode::Distance(distance)], it fails if `distance < MIN_BLOCKS + 1`. `+ 1` is needed -/// because `PruneMode::Distance(0)` means that we leave zero blocks from the latest, meaning we -/// have one block in the database. +/// because `PruneMode::Distance(0)` means that we leave zero blocks from the latest, meaning we +/// have one block in the database. fn deserialize_opt_prune_mode_with_min_blocks<'de, const MIN_BLOCKS: u64, D: Deserializer<'de>>( deserializer: D, ) -> Result, D::Error> { diff --git a/crates/primitives/src/revm/config.rs b/crates/primitives/src/revm/config.rs index 2fe8111a6..ad44b79ad 100644 --- a/crates/primitives/src/revm/config.rs +++ b/crates/primitives/src/revm/config.rs @@ -10,7 +10,9 @@ pub fn revm_spec_by_timestamp_after_merge( ) -> revm_primitives::SpecId { #[cfg(feature = "optimism")] if chain_spec.is_optimism() { - return if chain_spec.fork(Hardfork::Ecotone).active_at_timestamp(timestamp) { + return if chain_spec.fork(Hardfork::Haber).active_at_timestamp(timestamp) { + revm_primitives::HABER + } else if chain_spec.fork(Hardfork::Ecotone).active_at_timestamp(timestamp) { revm_primitives::ECOTONE } else if chain_spec.fork(Hardfork::Canyon).active_at_timestamp(timestamp) { revm_primitives::CANYON @@ -36,7 +38,9 @@ pub fn revm_spec_by_timestamp_after_merge( pub fn revm_spec(chain_spec: &ChainSpec, block: Head) -> revm_primitives::SpecId { #[cfg(feature = "optimism")] if chain_spec.is_optimism() { - if chain_spec.fork(Hardfork::Ecotone).active_at_head(&block) { + if chain_spec.fork(Hardfork::Haber).active_at_head(&block) { + return revm_primitives::HABER + } else if chain_spec.fork(Hardfork::Ecotone).active_at_head(&block) { return revm_primitives::ECOTONE } else if chain_spec.fork(Hardfork::Canyon).active_at_head(&block) { return revm_primitives::CANYON diff --git a/crates/primitives/src/revm/env.rs b/crates/primitives/src/revm/env.rs index b13a7018f..e217ad354 100644 --- a/crates/primitives/src/revm/env.rs +++ b/crates/primitives/src/revm/env.rs @@ -138,8 +138,8 @@ pub fn tx_env_with_recovered(transaction: &TransactionSignedEcRecovered) -> TxEn /// and therefore: /// * the call must execute to completion /// * the call does not count against the block’s gas limit -/// * the call does not follow the EIP-1559 burn semantics - no value should be transferred as -/// part of the call +/// * the call does not follow the EIP-1559 burn semantics - no value should be transferred as part +/// of the call /// * if no code exists at `BEACON_ROOTS_ADDRESS`, the call must fail silently pub fn fill_tx_env_with_beacon_root_contract_call(env: &mut Env, parent_beacon_block_root: B256) { env.tx = TxEnv { diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index 9d8cf6ac6..0713763fc 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -58,8 +58,7 @@ use tracing::*; /// - [tables::BlockBodyIndices] get tx index to know what needs to be unwinded /// - [tables::AccountsHistory] to remove change set and apply old values to /// - [tables::PlainAccountState] [tables::StoragesHistory] to remove change set and apply old -/// values -/// to [tables::PlainStorageState] +/// values to [tables::PlainStorageState] // false positive, we cannot derive it if !DB: Debug. #[allow(missing_debug_implementations)] pub struct ExecutionStage { diff --git a/crates/stages/src/stages/hashing_account.rs b/crates/stages/src/stages/hashing_account.rs index 051b6a85f..5489811cd 100644 --- a/crates/stages/src/stages/hashing_account.rs +++ b/crates/stages/src/stages/hashing_account.rs @@ -72,11 +72,10 @@ impl Default for AccountHashingStage { /// /// In order to check the "full hashing" mode of the stage you want to generate more /// transitions than `AccountHashingStage.clean_threshold`. This requires: -/// 1. Creating enough blocks so there's enough transactions to generate -/// the required transition keys in the `BlockTransitionIndex` (which depends on the -/// `TxTransitionIndex` internally) -/// 2. Setting `blocks.len() > clean_threshold` so that there's enough diffs to actually -/// take the 2nd codepath +/// 1. Creating enough blocks so there's enough transactions to generate the required transition +/// keys in the `BlockTransitionIndex` (which depends on the `TxTransitionIndex` internally) +/// 2. Setting `blocks.len() > clean_threshold` so that there's enough diffs to actually take the +/// 2nd codepath #[derive(Clone, Debug)] pub struct SeedOpts { /// The range of blocks to be generated diff --git a/crates/stages/src/stages/sender_recovery.rs b/crates/stages/src/stages/sender_recovery.rs index e078fd954..977e14336 100644 --- a/crates/stages/src/stages/sender_recovery.rs +++ b/crates/stages/src/stages/sender_recovery.rs @@ -506,10 +506,9 @@ mod tests { /// # Panics /// /// 1. If there are any entries in the [tables::TransactionSenders] table above a given - /// block number. - /// + /// block number. /// 2. If the is no requested block entry in the bodies table, but - /// [tables::TransactionSenders] is not empty. + /// [tables::TransactionSenders] is not empty. fn ensure_no_senders_by_block(&self, block: BlockNumber) -> Result<(), TestRunnerError> { let body_result = self .db diff --git a/crates/stages/src/stages/tx_lookup.rs b/crates/stages/src/stages/tx_lookup.rs index 342183905..a41339b1f 100644 --- a/crates/stages/src/stages/tx_lookup.rs +++ b/crates/stages/src/stages/tx_lookup.rs @@ -430,10 +430,9 @@ mod tests { /// # Panics /// /// 1. If there are any entries in the [tables::TransactionHashNumbers] table above a given - /// block number. - /// + /// block number. /// 2. If the is no requested block entry in the bodies table, but - /// [tables::TransactionHashNumbers] is not empty. + /// [tables::TransactionHashNumbers] is not empty. fn ensure_no_hash_by_block(&self, number: BlockNumber) -> Result<(), TestRunnerError> { let body_result = self .db diff --git a/crates/storage/db/src/implementation/mdbx/cursor.rs b/crates/storage/db/src/implementation/mdbx/cursor.rs index 43adc2492..3d1a88152 100644 --- a/crates/storage/db/src/implementation/mdbx/cursor.rs +++ b/crates/storage/db/src/implementation/mdbx/cursor.rs @@ -191,8 +191,7 @@ impl DbDupCursorRO for Cursor { /// - Some(key), Some(subkey): a `key` item whose data is >= than `subkey` /// - Some(key), None: first item of a specified `key` /// - None, Some(subkey): like first case, but in the first key - /// - None, None: first item in the table - /// of a DUPSORT table. + /// - None, None: first item in the table of a DUPSORT table. fn walk_dup( &mut self, key: Option, diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 6e07b7c46..643bc23e6 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -375,20 +375,20 @@ impl DatabaseProvider { /// /// If UNWIND is false we will just read the state/blocks and return them. /// - /// 1. Iterate over the [BlockBodyIndices][tables::BlockBodyIndices] table to get all - /// the transaction ids. - /// 2. Iterate over the [StorageChangeSets][tables::StorageChangeSets] table - /// and the [AccountChangeSets][tables::AccountChangeSets] tables in reverse order to - /// reconstruct the changesets. - /// - In order to have both the old and new values in the changesets, we also access the - /// plain state tables. + /// 1. Iterate over the [BlockBodyIndices][tables::BlockBodyIndices] table to get all the + /// transaction ids. + /// 2. Iterate over the [StorageChangeSets][tables::StorageChangeSets] table and the + /// [AccountChangeSets][tables::AccountChangeSets] tables in reverse order to reconstruct + /// the changesets. + /// - In order to have both the old and new values in the changesets, we also access the + /// plain state tables. /// 3. While iterating over the changeset tables, if we encounter a new account or storage slot, - /// we: + /// we: /// 1. Take the old value from the changeset /// 2. Take the new value from the plain state /// 3. Save the old value to the local state /// 4. While iterating over the changeset tables, if we encounter an account/storage slot we - /// have seen before we: + /// have seen before we: /// 1. Take the old value from the changeset /// 2. Take the new value from the local state /// 3. Set the local state to the value in the changeset diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index a27b9d021..163f30ea6 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -34,16 +34,16 @@ //! //! In essence the transaction pool is made of three separate sub-pools: //! -//! - Pending Pool: Contains all transactions that are valid on the current state and satisfy -//! (3. a)(1): _No_ nonce gaps. A _pending_ transaction is considered _ready_ when it has the lowest -//! nonce of all transactions from the same sender. Once a _ready_ transaction with nonce `n` has -//! been executed, the next highest transaction from the same sender `n + 1` becomes ready. +//! - Pending Pool: Contains all transactions that are valid on the current state and satisfy (3. +//! a)(1): _No_ nonce gaps. A _pending_ transaction is considered _ready_ when it has the lowest +//! nonce of all transactions from the same sender. Once a _ready_ transaction with nonce `n` has +//! been executed, the next highest transaction from the same sender `n + 1` becomes ready. //! -//! - Queued Pool: Contains all transactions that are currently blocked by missing -//! transactions: (3. a)(2): _With_ nonce gaps or due to lack of funds. +//! - Queued Pool: Contains all transactions that are currently blocked by missing transactions: +//! (3. a)(2): _With_ nonce gaps or due to lack of funds. //! -//! - Basefee Pool: To account for the dynamic base fee requirement (3. b) which could render -//! an EIP-1559 and all subsequent transactions of the sender currently invalid. +//! - Basefee Pool: To account for the dynamic base fee requirement (3. b) which could render an +//! EIP-1559 and all subsequent transactions of the sender currently invalid. //! //! The classification of transactions is always dependent on the current state that is changed as //! soon as a new block is mined. Once a new block is mined, the account changeset must be applied diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index 7e733a659..d78af7908 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -90,9 +90,9 @@ impl PendingPool { /// Returns an iterator over all transactions that are _currently_ ready. /// /// 1. The iterator _always_ returns transaction in order: It never returns a transaction with - /// an unsatisfied dependency and only returns them if dependency transaction were yielded - /// previously. In other words: The nonces of transactions with the same sender will _always_ - /// increase by exactly 1. + /// an unsatisfied dependency and only returns them if dependency transaction were yielded + /// previously. In other words: The nonces of transactions with the same sender will _always_ + /// increase by exactly 1. /// /// The order of transactions which satisfy (1.) is determent by their computed priority: A /// transaction with a higher priority is returned before a transaction with a lower priority. diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index bcad71edb..4e35733d4 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -1002,6 +1002,7 @@ impl AllTransactions { /// For all transactions: /// - decreased basefee: promotes from `basefee` to `pending` sub-pool. /// - increased basefee: demotes from `pending` to `basefee` sub-pool. + /// /// Individually: /// - decreased sender allowance: demote from (`basefee`|`pending`) to `queued`. /// - increased sender allowance: promote from `queued` to diff --git a/examples/node-event-hooks/src/main.rs b/examples/node-event-hooks/src/main.rs index b9cd53298..e8a751840 100644 --- a/examples/node-event-hooks/src/main.rs +++ b/examples/node-event-hooks/src/main.rs @@ -8,12 +8,8 @@ //! ``` //! //! This launch the regular reth node and also print: -//! -//! > "All components initialized" -//! once all components have been initialized and -//! -//! > "Node started" -//! once the node has been started. +//! > "All components initialized" – once all components have been initialized +//! > "Node started" – once the node has been started. use reth::cli::Cli; use reth_node_ethereum::EthereumNode;