From afb76433d69407786567888b2bf8a57d54f61947 Mon Sep 17 00:00:00 2001 From: Loocapro Date: Thu, 11 Jul 2024 10:32:52 +0200 Subject: [PATCH 01/40] moved opt bin and cli runner to optimism-cli --- Cargo.lock | 5 ++ crates/optimism/cli/Cargo.toml | 6 ++ crates/optimism/cli/src/bin.rs | 43 ++++++++++++++ crates/optimism/cli/src/lib.rs | 105 +++++++++++++++++++++++++++++++++ 4 files changed, 159 insertions(+) create mode 100644 crates/optimism/cli/src/bin.rs diff --git a/Cargo.lock b/Cargo.lock index 8741d4a5c1f3..5d8ee7671c5c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7834,6 +7834,8 @@ dependencies = [ "reth-chainspec", "reth-cli", "reth-cli-commands", + "reth-cli-runner", + "reth-cli-util", "reth-config", "reth-consensus", "reth-db", @@ -7843,8 +7845,10 @@ dependencies = [ "reth-evm-optimism", "reth-execution-types", "reth-network-p2p", + "reth-node-builder", "reth-node-core", "reth-node-events", + "reth-node-optimism", "reth-optimism-primitives", "reth-primitives", "reth-provider", @@ -7853,6 +7857,7 @@ dependencies = [ "reth-stages-types", "reth-static-file", "reth-static-file-types", + "reth-tracing", "serde_json", "shellexpand", "tokio", diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml index 3ca591480ebc..da34a8062a65 100644 --- a/crates/optimism/cli/Cargo.toml +++ b/crates/optimism/cli/Cargo.toml @@ -36,6 +36,12 @@ reth-errors.workspace = true reth-config.workspace = true reth-evm-optimism.workspace = true reth-cli.workspace = true +reth-cli-runner.workspace = true +reth-node-builder.workspace = true +reth-cli-util.workspace = true +reth-node-optimism.workspace = true +reth-tracing.workspace = true + # eth alloy-genesis.workspace = true diff --git a/crates/optimism/cli/src/bin.rs b/crates/optimism/cli/src/bin.rs new file mode 100644 index 000000000000..ff830d609132 --- /dev/null +++ b/crates/optimism/cli/src/bin.rs @@ -0,0 +1,43 @@ +#![allow(missing_docs, rustdoc::missing_crate_level_docs)] + +use clap::Parser; +use reth_node_optimism::{args::RollupArgs, rpc::SequencerClient, OptimismNode}; +use std::sync::Arc; + +use crate::Cli; + +// We use jemalloc for performance reasons +#[cfg(all(feature = "jemalloc", unix))] +#[global_allocator] +static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; + +fn main() { + reth_cli_util::sigsegv_handler::install(); + + // Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided. + if std::env::var_os("RUST_BACKTRACE").is_none() { + std::env::set_var("RUST_BACKTRACE", "1"); + } + + if let Err(err) = Cli::::parse().run(|builder, rollup_args| async move { + let handle = builder + .node(OptimismNode::new(rollup_args.clone())) + .extend_rpc_modules(move |ctx| { + // register sequencer tx forwarder + if let Some(sequencer_http) = rollup_args.sequencer_http { + ctx.registry.set_eth_raw_transaction_forwarder(Arc::new(SequencerClient::new( + sequencer_http, + ))); + } + + Ok(()) + }) + .launch() + .await?; + + handle.node_exit_future.await + }) { + eprintln!("Error: {err:?}"); + std::process::exit(1); + } +} diff --git a/crates/optimism/cli/src/lib.rs b/crates/optimism/cli/src/lib.rs index 6260a8e9044e..d89642fe8720 100644 --- a/crates/optimism/cli/src/lib.rs +++ b/crates/optimism/cli/src/lib.rs @@ -34,13 +34,30 @@ use std::{ffi::OsString, fmt, sync::Arc}; use chainspec::OpChainSpecParser; use clap::{command, value_parser, Parser}; use commands::Commands; +use futures_util::Future; use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::node::NoArgs; +use reth_cli_runner::CliRunner; +use reth_db::DatabaseEnv; +use reth_evm_optimism::OpExecutorProvider; +use reth_node_builder::{NodeBuilder, WithLaunchContext}; use reth_node_core::{ args::{utils::chain_help, LogArgs}, version::{LONG_VERSION, SHORT_VERSION}, }; +use reth_tracing::FileWorkerGuard; +use std::{ffi::OsString, fmt, sync::Arc}; +use tracing::info; + +/// Optimism binary entrypoint. +pub mod bin; + +/// Optimism chain specification parser. +pub mod chainspec; +/// Optimism CLI commands. +pub mod commands; +pub use commands::{import::ImportOpCommand, import_receipts::ImportReceiptsOpCommand}; /// The main reth cli interface. /// @@ -100,3 +117,91 @@ impl Cli { Self::try_parse_from(itr) } } + +impl Cli { + /// Execute the configured cli command. + /// + /// This accepts a closure that is used to launch the node via the + /// [`NodeCommand`](node::NodeCommand). + /// + /// + /// # Example + /// + /// ```no_run + /// use reth::cli::Cli; + /// use reth_node_ethereum::EthereumNode; + /// + /// Cli::parse_args() + /// .run(|builder, _| async move { + /// let handle = builder.launch_node(EthereumNode::default()).await?; + /// + /// handle.wait_for_node_exit().await + /// }) + /// .unwrap(); + /// ``` + /// + /// # Example + /// + /// Parse additional CLI arguments for the node command and use it to configure the node. + /// + /// ```no_run + /// use clap::Parser; + /// use reth::cli::Cli; + /// + /// #[derive(Debug, Parser)] + /// pub struct MyArgs { + /// pub enable: bool, + /// } + /// + /// Cli::parse() + /// .run(|builder, my_args: MyArgs| async move { + /// // launch the node + /// + /// Ok(()) + /// }) + /// .unwrap(); + /// ```` + pub fn run(mut self, launcher: L) -> eyre::Result<()> + where + L: FnOnce(WithLaunchContext>>, Ext) -> Fut, + Fut: Future>, + { + // add network name to logs dir + self.logs.log_file_directory = + self.logs.log_file_directory.join(self.chain.chain.to_string()); + + let _guard = self.init_tracing()?; + info!(target: "reth::cli", "Initialized tracing, debug log directory: {}", self.logs.log_file_directory); + + let runner = CliRunner::default(); + match self.command { + Commands::Node(command) => { + runner.run_command_until_exit(|ctx| command.execute(ctx, launcher)) + } + Commands::Init(command) => runner.run_blocking_until_ctrl_c(command.execute()), + Commands::InitState(command) => runner.run_blocking_until_ctrl_c(command.execute()), + Commands::ImportOp(command) => runner.run_blocking_until_ctrl_c(command.execute()), + Commands::ImportReceiptsOp(command) => { + runner.run_blocking_until_ctrl_c(command.execute()) + } + Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()), + Commands::Db(command) => runner.run_blocking_until_ctrl_c(command.execute()), + Commands::Stage(command) => runner.run_command_until_exit(|ctx| { + command.execute(ctx, |chain_spec| OpExecutorProvider::optimism(chain_spec)) + }), + Commands::P2P(command) => runner.run_until_ctrl_c(command.execute()), + Commands::Config(command) => runner.run_until_ctrl_c(command.execute()), + Commands::Recover(command) => runner.run_command_until_exit(|ctx| command.execute(ctx)), + Commands::Prune(command) => runner.run_until_ctrl_c(command.execute()), + } + } + + /// Initializes tracing with the configured options. + /// + /// If file logging is enabled, this function returns a guard that must be kept alive to ensure + /// that all logs are flushed to disk. + pub fn init_tracing(&self) -> eyre::Result> { + let guard = self.logs.init_tracing()?; + Ok(guard) + } +} From e3052d15ed52816df98dbbadaf83ce1385b2c63c Mon Sep 17 00:00:00 2001 From: Loocapro Date: Thu, 11 Jul 2024 10:45:39 +0200 Subject: [PATCH 02/40] added cargo bin sections --- bin/reth/Cargo.toml | 4 -- bin/reth/src/optimism.rs | 46 --------------------- crates/optimism/cli/Cargo.toml | 6 ++- crates/optimism/cli/src/lib.rs | 10 ++--- crates/optimism/cli/src/{bin.rs => main.rs} | 3 +- 5 files changed, 10 insertions(+), 59 deletions(-) delete mode 100644 bin/reth/src/optimism.rs rename crates/optimism/cli/src/{bin.rs => main.rs} (97%) diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index d67436121e7c..a09fbbf0c4d1 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -152,7 +152,3 @@ ethereum = [] name = "reth" path = "src/main.rs" -[[bin]] -name = "op-reth" -path = "src/optimism.rs" -required-features = ["optimism"] diff --git a/bin/reth/src/optimism.rs b/bin/reth/src/optimism.rs deleted file mode 100644 index 9ed6f552c737..000000000000 --- a/bin/reth/src/optimism.rs +++ /dev/null @@ -1,46 +0,0 @@ -#![allow(missing_docs, rustdoc::missing_crate_level_docs)] - -use clap::Parser; -use reth::cli::Cli; -use reth_node_optimism::{args::RollupArgs, rpc::SequencerClient, OptimismNode}; -use std::sync::Arc; - -// We use jemalloc for performance reasons -#[cfg(all(feature = "jemalloc", unix))] -#[global_allocator] -static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; - -#[cfg(not(feature = "optimism"))] -compile_error!("Cannot build the `op-reth` binary with the `optimism` feature flag disabled. Did you mean to build `reth`?"); - -#[cfg(feature = "optimism")] -fn main() { - reth_cli_util::sigsegv_handler::install(); - - // Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided. - if std::env::var_os("RUST_BACKTRACE").is_none() { - std::env::set_var("RUST_BACKTRACE", "1"); - } - - if let Err(err) = Cli::::parse().run(|builder, rollup_args| async move { - let handle = builder - .node(OptimismNode::new(rollup_args.clone())) - .extend_rpc_modules(move |ctx| { - // register sequencer tx forwarder - if let Some(sequencer_http) = rollup_args.sequencer_http { - ctx.registry.set_eth_raw_transaction_forwarder(Arc::new(SequencerClient::new( - sequencer_http, - ))); - } - - Ok(()) - }) - .launch() - .await?; - - handle.node_exit_future.await - }) { - eprintln!("Error: {err:?}"); - std::process::exit(1); - } -} diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml index da34a8062a65..fd80a16931af 100644 --- a/crates/optimism/cli/Cargo.toml +++ b/crates/optimism/cli/Cargo.toml @@ -70,4 +70,8 @@ eyre.workspace = true optimism = [ "reth-primitives/optimism", "reth-evm-optimism/optimism", - ] \ No newline at end of file + ] + +[[bin]] +name = "op-reth" +path = "src/main.rs" \ No newline at end of file diff --git a/crates/optimism/cli/src/lib.rs b/crates/optimism/cli/src/lib.rs index d89642fe8720..b125ba0c0ba6 100644 --- a/crates/optimism/cli/src/lib.rs +++ b/crates/optimism/cli/src/lib.rs @@ -39,6 +39,7 @@ use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::node::NoArgs; use reth_cli_runner::CliRunner; +use reth_cli_util as _; use reth_db::DatabaseEnv; use reth_evm_optimism::OpExecutorProvider; use reth_node_builder::{NodeBuilder, WithLaunchContext}; @@ -46,13 +47,11 @@ use reth_node_core::{ args::{utils::chain_help, LogArgs}, version::{LONG_VERSION, SHORT_VERSION}, }; +use reth_node_optimism as _; use reth_tracing::FileWorkerGuard; use std::{ffi::OsString, fmt, sync::Arc}; use tracing::info; -/// Optimism binary entrypoint. -pub mod bin; - /// Optimism chain specification parser. pub mod chainspec; /// Optimism CLI commands. @@ -186,9 +185,8 @@ impl Cli { } Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::Db(command) => runner.run_blocking_until_ctrl_c(command.execute()), - Commands::Stage(command) => runner.run_command_until_exit(|ctx| { - command.execute(ctx, |chain_spec| OpExecutorProvider::optimism(chain_spec)) - }), + Commands::Stage(command) => runner + .run_command_until_exit(|ctx| command.execute(ctx, OpExecutorProvider::optimism)), Commands::P2P(command) => runner.run_until_ctrl_c(command.execute()), Commands::Config(command) => runner.run_until_ctrl_c(command.execute()), Commands::Recover(command) => runner.run_command_until_exit(|ctx| command.execute(ctx)), diff --git a/crates/optimism/cli/src/bin.rs b/crates/optimism/cli/src/main.rs similarity index 97% rename from crates/optimism/cli/src/bin.rs rename to crates/optimism/cli/src/main.rs index ff830d609132..29e297544413 100644 --- a/crates/optimism/cli/src/bin.rs +++ b/crates/optimism/cli/src/main.rs @@ -4,14 +4,13 @@ use clap::Parser; use reth_node_optimism::{args::RollupArgs, rpc::SequencerClient, OptimismNode}; use std::sync::Arc; -use crate::Cli; - // We use jemalloc for performance reasons #[cfg(all(feature = "jemalloc", unix))] #[global_allocator] static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; fn main() { + use reth_optimism_cli::Cli; reth_cli_util::sigsegv_handler::install(); // Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided. From d83bc31fa0fdfdc812bcb3a7795edf4fa825a437 Mon Sep 17 00:00:00 2001 From: Loocapro Date: Thu, 11 Jul 2024 11:03:43 +0200 Subject: [PATCH 03/40] added optimism bin crate --- crates/optimism/bin/Cargo.toml | 24 +++++++++++++++++++ crates/optimism/bin/src/main.rs | 42 +++++++++++++++++++++++++++++++++ 2 files changed, 66 insertions(+) create mode 100644 crates/optimism/bin/Cargo.toml create mode 100644 crates/optimism/bin/src/main.rs diff --git a/crates/optimism/bin/Cargo.toml b/crates/optimism/bin/Cargo.toml new file mode 100644 index 000000000000..233342519344 --- /dev/null +++ b/crates/optimism/bin/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "op-reth" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +exclude.workspace = true + +[dependencies] +reth-node-builder.workspace = true +reth-cli-util.workspace = true +reth-node-optimism.workspace = true +reth-optimism-cli.workspace = true +reth-tracing.workspace = true +clap = { workspace = true, features = ["derive", "env"] } + +[lints] +workspace = true + +[[bin]] +name = "op-reth" +path = "src/main.rs" diff --git a/crates/optimism/bin/src/main.rs b/crates/optimism/bin/src/main.rs new file mode 100644 index 000000000000..29e297544413 --- /dev/null +++ b/crates/optimism/bin/src/main.rs @@ -0,0 +1,42 @@ +#![allow(missing_docs, rustdoc::missing_crate_level_docs)] + +use clap::Parser; +use reth_node_optimism::{args::RollupArgs, rpc::SequencerClient, OptimismNode}; +use std::sync::Arc; + +// We use jemalloc for performance reasons +#[cfg(all(feature = "jemalloc", unix))] +#[global_allocator] +static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; + +fn main() { + use reth_optimism_cli::Cli; + reth_cli_util::sigsegv_handler::install(); + + // Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided. + if std::env::var_os("RUST_BACKTRACE").is_none() { + std::env::set_var("RUST_BACKTRACE", "1"); + } + + if let Err(err) = Cli::::parse().run(|builder, rollup_args| async move { + let handle = builder + .node(OptimismNode::new(rollup_args.clone())) + .extend_rpc_modules(move |ctx| { + // register sequencer tx forwarder + if let Some(sequencer_http) = rollup_args.sequencer_http { + ctx.registry.set_eth_raw_transaction_forwarder(Arc::new(SequencerClient::new( + sequencer_http, + ))); + } + + Ok(()) + }) + .launch() + .await?; + + handle.node_exit_future.await + }) { + eprintln!("Error: {err:?}"); + std::process::exit(1); + } +} From b5c707ac3eb988d995a51807959acd6ddc081d5b Mon Sep 17 00:00:00 2001 From: Loocapro Date: Thu, 11 Jul 2024 11:26:01 +0200 Subject: [PATCH 04/40] bin features --- crates/optimism/bin/Cargo.toml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/crates/optimism/bin/Cargo.toml b/crates/optimism/bin/Cargo.toml index 233342519344..1d54dfcbe94b 100644 --- a/crates/optimism/bin/Cargo.toml +++ b/crates/optimism/bin/Cargo.toml @@ -16,9 +16,23 @@ reth-optimism-cli.workspace = true reth-tracing.workspace = true clap = { workspace = true, features = ["derive", "env"] } +[target.'cfg(unix)'.dependencies] +tikv-jemallocator = { version = "0.5.0", optional = true } + [lints] workspace = true [[bin]] name = "op-reth" path = "src/main.rs" + + +[features] +default = ["jemalloc"] + +jemalloc = ["dep:tikv-jemallocator"] +jemalloc-prof = ["jemalloc", "tikv-jemallocator?/profiling"] + +optimism = [ + "reth-optimism-cli/optimism", +] From 08bd2b28293e045cd1f7243f18e5350ca35b2653 Mon Sep 17 00:00:00 2001 From: Loocapro Date: Thu, 11 Jul 2024 11:34:05 +0200 Subject: [PATCH 05/40] removed crate --- Cargo.lock | 15 ++++++++++-- Cargo.toml | 2 +- crates/optimism/cli/Cargo.toml | 5 ---- crates/optimism/cli/src/lib.rs | 2 -- crates/optimism/cli/src/main.rs | 42 --------------------------------- 5 files changed, 14 insertions(+), 52 deletions(-) delete mode 100644 crates/optimism/cli/src/main.rs diff --git a/Cargo.lock b/Cargo.lock index 5d8ee7671c5c..41a2d909df20 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5231,6 +5231,19 @@ dependencies = [ "serde_json", ] +[[package]] +name = "op-reth" +version = "1.0.1" +dependencies = [ + "clap", + "reth-cli-util", + "reth-node-builder", + "reth-node-optimism", + "reth-optimism-cli", + "reth-tracing", + "tikv-jemallocator", +] + [[package]] name = "opaque-debug" version = "0.3.1" @@ -7835,7 +7848,6 @@ dependencies = [ "reth-cli", "reth-cli-commands", "reth-cli-runner", - "reth-cli-util", "reth-config", "reth-consensus", "reth-db", @@ -7848,7 +7860,6 @@ dependencies = [ "reth-node-builder", "reth-node-core", "reth-node-events", - "reth-node-optimism", "reth-optimism-primitives", "reth-primitives", "reth-provider", diff --git a/Cargo.toml b/Cargo.toml index b81e5f5962c7..9fe3b2aee6fb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -137,7 +137,7 @@ members = [ "examples/txpool-tracing/", "examples/custom-rlpx-subprotocol", "testing/ef-tests/", - "testing/testing-utils", + "testing/testing-utils", "crates/optimism/bin", ] default-members = ["bin/reth"] diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml index fd80a16931af..698856f4915d 100644 --- a/crates/optimism/cli/Cargo.toml +++ b/crates/optimism/cli/Cargo.toml @@ -38,8 +38,6 @@ reth-evm-optimism.workspace = true reth-cli.workspace = true reth-cli-runner.workspace = true reth-node-builder.workspace = true -reth-cli-util.workspace = true -reth-node-optimism.workspace = true reth-tracing.workspace = true @@ -72,6 +70,3 @@ eyre.workspace = true "reth-evm-optimism/optimism", ] -[[bin]] -name = "op-reth" -path = "src/main.rs" \ No newline at end of file diff --git a/crates/optimism/cli/src/lib.rs b/crates/optimism/cli/src/lib.rs index b125ba0c0ba6..5fd940e3d4ae 100644 --- a/crates/optimism/cli/src/lib.rs +++ b/crates/optimism/cli/src/lib.rs @@ -39,7 +39,6 @@ use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::node::NoArgs; use reth_cli_runner::CliRunner; -use reth_cli_util as _; use reth_db::DatabaseEnv; use reth_evm_optimism::OpExecutorProvider; use reth_node_builder::{NodeBuilder, WithLaunchContext}; @@ -47,7 +46,6 @@ use reth_node_core::{ args::{utils::chain_help, LogArgs}, version::{LONG_VERSION, SHORT_VERSION}, }; -use reth_node_optimism as _; use reth_tracing::FileWorkerGuard; use std::{ffi::OsString, fmt, sync::Arc}; use tracing::info; diff --git a/crates/optimism/cli/src/main.rs b/crates/optimism/cli/src/main.rs deleted file mode 100644 index 29e297544413..000000000000 --- a/crates/optimism/cli/src/main.rs +++ /dev/null @@ -1,42 +0,0 @@ -#![allow(missing_docs, rustdoc::missing_crate_level_docs)] - -use clap::Parser; -use reth_node_optimism::{args::RollupArgs, rpc::SequencerClient, OptimismNode}; -use std::sync::Arc; - -// We use jemalloc for performance reasons -#[cfg(all(feature = "jemalloc", unix))] -#[global_allocator] -static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; - -fn main() { - use reth_optimism_cli::Cli; - reth_cli_util::sigsegv_handler::install(); - - // Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided. - if std::env::var_os("RUST_BACKTRACE").is_none() { - std::env::set_var("RUST_BACKTRACE", "1"); - } - - if let Err(err) = Cli::::parse().run(|builder, rollup_args| async move { - let handle = builder - .node(OptimismNode::new(rollup_args.clone())) - .extend_rpc_modules(move |ctx| { - // register sequencer tx forwarder - if let Some(sequencer_http) = rollup_args.sequencer_http { - ctx.registry.set_eth_raw_transaction_forwarder(Arc::new(SequencerClient::new( - sequencer_http, - ))); - } - - Ok(()) - }) - .launch() - .await?; - - handle.node_exit_future.await - }) { - eprintln!("Error: {err:?}"); - std::process::exit(1); - } -} From c9c5c4e0c9f76e5a295a916a42cf278fb43313a0 Mon Sep 17 00:00:00 2001 From: Loocapro Date: Thu, 11 Jul 2024 11:44:43 +0200 Subject: [PATCH 06/40] chore: fix cli runner docs --- crates/optimism/cli/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/optimism/cli/src/lib.rs b/crates/optimism/cli/src/lib.rs index 5fd940e3d4ae..f1dfea08a169 100644 --- a/crates/optimism/cli/src/lib.rs +++ b/crates/optimism/cli/src/lib.rs @@ -119,7 +119,7 @@ impl Cli { /// Execute the configured cli command. /// /// This accepts a closure that is used to launch the node via the - /// [`NodeCommand`](node::NodeCommand). + /// [`NodeCommand`](reth_cli_commands::node::NodeCommand). /// /// /// # Example From 5083d63fb1c632c1277b4b9729246d516081707a Mon Sep 17 00:00:00 2001 From: Loocapro Date: Thu, 11 Jul 2024 11:51:40 +0200 Subject: [PATCH 07/40] review --- Cargo.toml | 2 ++ bin/reth-bench/Cargo.toml | 2 +- bin/reth/Cargo.toml | 2 +- crates/optimism/bin/Cargo.toml | 10 +++++----- crates/optimism/cli/src/lib.rs | 6 ++---- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 9fe3b2aee6fb..acc7062463bb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -538,3 +538,5 @@ serial_test = "3" similar-asserts = "1.5.0" test-fuzz = "5" iai-callgrind = "0.11" + +tikv-jemallocator = { version = "0.5.0" } diff --git a/bin/reth-bench/Cargo.toml b/bin/reth-bench/Cargo.toml index a0bf299f19c2..00a5124fa2da 100644 --- a/bin/reth-bench/Cargo.toml +++ b/bin/reth-bench/Cargo.toml @@ -72,7 +72,7 @@ clap = { workspace = true, features = ["derive", "env"] } csv = "1.3.0" [target.'cfg(unix)'.dependencies] -tikv-jemallocator = { version = "0.5.0", optional = true } +tikv-jemallocator = { workspace = true, optional = true } libc = "0.2" [dev-dependencies] diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index a09fbbf0c4d1..c60364838889 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -109,7 +109,7 @@ itertools.workspace = true discv5.workspace = true [target.'cfg(unix)'.dependencies] -tikv-jemallocator = { version = "0.5.0", optional = true } +tikv-jemallocator = { workspace = true, optional = true } libc = "0.2" [dev-dependencies] diff --git a/crates/optimism/bin/Cargo.toml b/crates/optimism/bin/Cargo.toml index 1d54dfcbe94b..1cb7c1ecf460 100644 --- a/crates/optimism/bin/Cargo.toml +++ b/crates/optimism/bin/Cargo.toml @@ -11,13 +11,15 @@ exclude.workspace = true [dependencies] reth-node-builder.workspace = true reth-cli-util.workspace = true -reth-node-optimism.workspace = true reth-optimism-cli.workspace = true reth-tracing.workspace = true +reth-node-optimism = { workspace = true, optional = true, features = [ + "optimism", +] } clap = { workspace = true, features = ["derive", "env"] } [target.'cfg(unix)'.dependencies] -tikv-jemallocator = { version = "0.5.0", optional = true } +tikv-jemallocator = { workspace = true, optional = true } [lints] workspace = true @@ -33,6 +35,4 @@ default = ["jemalloc"] jemalloc = ["dep:tikv-jemallocator"] jemalloc-prof = ["jemalloc", "tikv-jemallocator?/profiling"] -optimism = [ - "reth-optimism-cli/optimism", -] + diff --git a/crates/optimism/cli/src/lib.rs b/crates/optimism/cli/src/lib.rs index f1dfea08a169..b22840bdc459 100644 --- a/crates/optimism/cli/src/lib.rs +++ b/crates/optimism/cli/src/lib.rs @@ -7,8 +7,6 @@ )] #![cfg_attr(all(not(test), feature = "optimism"), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -// The `optimism` feature must be enabled to use this crate. -#![cfg(feature = "optimism")] /// Optimism chain specification parser. pub mod chainspec; @@ -126,11 +124,11 @@ impl Cli { /// /// ```no_run /// use reth::cli::Cli; - /// use reth_node_ethereum::EthereumNode; + /// use reth_node_optimism::OptimismNode; /// /// Cli::parse_args() /// .run(|builder, _| async move { - /// let handle = builder.launch_node(EthereumNode::default()).await?; + /// let handle = builder.launch_node(OptimismNode::default()).await?; /// /// handle.wait_for_node_exit().await /// }) From 6d9b4274176eb14720d29bb399aa5e1af9a923f4 Mon Sep 17 00:00:00 2001 From: Loocapro Date: Thu, 11 Jul 2024 12:00:59 +0200 Subject: [PATCH 08/40] reth optimism cli feature --- crates/optimism/bin/Cargo.toml | 4 +++- crates/optimism/cli/src/lib.rs | 2 ++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/crates/optimism/bin/Cargo.toml b/crates/optimism/bin/Cargo.toml index 1cb7c1ecf460..d3c066a35ac5 100644 --- a/crates/optimism/bin/Cargo.toml +++ b/crates/optimism/bin/Cargo.toml @@ -11,7 +11,9 @@ exclude.workspace = true [dependencies] reth-node-builder.workspace = true reth-cli-util.workspace = true -reth-optimism-cli.workspace = true +reth-optimism-cli = { workspace = true, optional = true, features = [ + "optimism", +] } reth-tracing.workspace = true reth-node-optimism = { workspace = true, optional = true, features = [ "optimism", diff --git a/crates/optimism/cli/src/lib.rs b/crates/optimism/cli/src/lib.rs index b22840bdc459..6d402e551c84 100644 --- a/crates/optimism/cli/src/lib.rs +++ b/crates/optimism/cli/src/lib.rs @@ -7,6 +7,8 @@ )] #![cfg_attr(all(not(test), feature = "optimism"), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +// The `optimism` feature must be enabled to use this crate. +#![cfg(feature = "optimism")] /// Optimism chain specification parser. pub mod chainspec; From d2ea7c561c6610cd3714506a735da192e8301dce Mon Sep 17 00:00:00 2001 From: Loocapro Date: Fri, 12 Jul 2024 14:30:34 +0200 Subject: [PATCH 09/40] removing optimism flags from reth bin and op specific commands --- bin/reth/Cargo.toml | 16 ---------------- bin/reth/src/cli/mod.rs | 14 -------------- 2 files changed, 30 deletions(-) diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index c60364838889..3a5cc7941e80 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -132,22 +132,6 @@ min-info-logs = ["tracing/release_max_level_info"] min-debug-logs = ["tracing/release_max_level_debug"] min-trace-logs = ["tracing/release_max_level_trace"] -optimism = [ - "dep:reth-optimism-cli", - "reth-optimism-cli?/optimism", - "reth-primitives/optimism", - "reth-rpc/optimism", - "reth-provider/optimism", - "reth-beacon-consensus/optimism", - "reth-blockchain-tree/optimism", - "dep:reth-node-optimism", - "reth-node-core/optimism", - "reth-rpc-eth-types/optimism", -] - -# no-op feature flag for switching between the `optimism` and default functionality in CI matrices -ethereum = [] - [[bin]] name = "reth" path = "src/main.rs" diff --git a/bin/reth/src/cli/mod.rs b/bin/reth/src/cli/mod.rs index d6fc7d3c4df4..7326c780d1b0 100644 --- a/bin/reth/src/cli/mod.rs +++ b/bin/reth/src/cli/mod.rs @@ -154,12 +154,6 @@ impl Cli { Commands::Import(command) => runner.run_blocking_until_ctrl_c( command.execute(|chain_spec| block_executor!(chain_spec)), ), - #[cfg(feature = "optimism")] - Commands::ImportOp(command) => runner.run_blocking_until_ctrl_c(command.execute()), - #[cfg(feature = "optimism")] - Commands::ImportReceiptsOp(command) => { - runner.run_blocking_until_ctrl_c(command.execute()) - } Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::Db(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::Stage(command) => runner.run_command_until_exit(|ctx| { @@ -200,14 +194,6 @@ pub enum Commands { /// This syncs RLP encoded blocks from a file. #[command(name = "import")] Import(import::ImportCommand), - /// This syncs RLP encoded OP blocks below Bedrock from a file, without executing. - #[cfg(feature = "optimism")] - #[command(name = "import-op")] - ImportOp(reth_optimism_cli::ImportOpCommand), - /// This imports RLP encoded receipts from a file. - #[cfg(feature = "optimism")] - #[command(name = "import-receipts-op")] - ImportReceiptsOp(reth_optimism_cli::ImportReceiptsOpCommand), /// Dumps genesis block JSON configuration to stdout. DumpGenesis(dump_genesis::DumpGenesisCommand), /// Database debugging utilities From 649c3b4e04e78201f94d5ef7f4f8a92b7fbc3390 Mon Sep 17 00:00:00 2001 From: Loocapro Date: Fri, 12 Jul 2024 14:36:03 +0200 Subject: [PATCH 10/40] removed cfg flag on block_excutor --- bin/reth/src/macros.rs | 8 -------- 1 file changed, 8 deletions(-) diff --git a/bin/reth/src/macros.rs b/bin/reth/src/macros.rs index 7ff81a0f9058..a8d618cf9bea 100644 --- a/bin/reth/src/macros.rs +++ b/bin/reth/src/macros.rs @@ -3,18 +3,10 @@ /// Creates the block executor type based on the configured feature. /// /// Note(mattsse): This is incredibly horrible and will be replaced -#[cfg(not(feature = "optimism"))] macro_rules! block_executor { ($chain_spec:expr) => { reth_node_ethereum::EthExecutorProvider::ethereum($chain_spec) }; } -#[cfg(feature = "optimism")] -macro_rules! block_executor { - ($chain_spec:expr) => { - reth_node_optimism::OpExecutorProvider::optimism($chain_spec) - }; -} - pub(crate) use block_executor; From 39f8de18c2ca2892b36ed49520ba36e53619caa8 Mon Sep 17 00:00:00 2001 From: Loocapro Date: Fri, 12 Jul 2024 15:14:41 +0200 Subject: [PATCH 11/40] removing optimism crates and cfg from reth bin --- Cargo.lock | 3 --- bin/reth/Cargo.toml | 5 ----- bin/reth/src/commands/debug_cmd/build_block.rs | 18 ------------------ .../src/commands/debug_cmd/replay_engine.rs | 14 -------------- bin/reth/src/main.rs | 4 ---- 5 files changed, 44 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 41a2d909df20..82dc4c981cdb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6249,9 +6249,6 @@ dependencies = [ "reth-node-core", "reth-node-ethereum", "reth-node-events", - "reth-node-optimism", - "reth-optimism-cli", - "reth-optimism-primitives", "reth-payload-builder", "reth-payload-primitives", "reth-payload-validator", diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 3a5cc7941e80..c8465165cd16 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -55,9 +55,6 @@ reth-static-file.workspace = true reth-static-file-types = { workspace = true, features = ["clap"] } reth-trie = { workspace = true, features = ["metrics"] } reth-node-api.workspace = true -reth-node-optimism = { workspace = true, optional = true, features = [ - "optimism", -] } reth-node-core.workspace = true reth-ethereum-payload-builder.workspace = true reth-db-common.workspace = true @@ -65,11 +62,9 @@ reth-node-ethereum.workspace = true reth-node-builder.workspace = true reth-node-events.workspace = true reth-consensus.workspace = true -reth-optimism-primitives.workspace = true reth-engine-util.workspace = true reth-prune.workspace = true reth-stages-api.workspace = true -reth-optimism-cli = { workspace = true, optional = true } # crypto alloy-rlp.workspace = true diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index af42a7fa6c66..26fe47d59aa0 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -219,17 +219,6 @@ impl Command { let payload_config = PayloadConfig::new( Arc::clone(&best_block), Bytes::default(), - #[cfg(feature = "optimism")] - reth_node_optimism::OptimismPayloadBuilderAttributes::try_new( - best_block.hash(), - reth_rpc_types::engine::OptimismPayloadAttributes { - payload_attributes: payload_attrs, - transactions: None, - no_tx_pool: None, - gas_limit: None, - }, - )?, - #[cfg(not(feature = "optimism"))] reth_payload_builder::EthPayloadBuilderAttributes::try_new( best_block.hash(), payload_attrs, @@ -246,13 +235,6 @@ impl Command { None, ); - #[cfg(feature = "optimism")] - let payload_builder = reth_node_optimism::OptimismPayloadBuilder::new( - reth_node_optimism::OptimismEvmConfig::default(), - ) - .compute_pending_block(); - - #[cfg(not(feature = "optimism"))] let payload_builder = reth_ethereum_payload_builder::EthereumPayloadBuilder::default(); match payload_builder.try_build(args)? { diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs index 4339a0f76063..dd176cdb5a28 100644 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ b/bin/reth/src/commands/debug_cmd/replay_engine.rs @@ -107,15 +107,8 @@ impl Command { .await?; // Set up payload builder - #[cfg(not(feature = "optimism"))] let payload_builder = reth_ethereum_payload_builder::EthereumPayloadBuilder::default(); - // Optimism's payload builder is implemented on the OptimismPayloadBuilder type. - #[cfg(feature = "optimism")] - let payload_builder = reth_node_optimism::OptimismPayloadBuilder::new( - reth_node_optimism::OptimismEvmConfig::default(), - ); - let payload_generator = BasicPayloadJobGenerator::with_builder( blockchain_db.clone(), NoopTransactionPool::default(), @@ -125,13 +118,6 @@ impl Command { payload_builder, ); - #[cfg(feature = "optimism")] - let (payload_service, payload_builder): ( - _, - PayloadBuilderHandle, - ) = PayloadBuilderService::new(payload_generator, blockchain_db.canonical_state_stream()); - - #[cfg(not(feature = "optimism"))] let (payload_service, payload_builder): ( _, PayloadBuilderHandle, diff --git a/bin/reth/src/main.rs b/bin/reth/src/main.rs index 4eacb9df3f2d..abe4dc34ce08 100644 --- a/bin/reth/src/main.rs +++ b/bin/reth/src/main.rs @@ -5,10 +5,6 @@ #[global_allocator] static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; -#[cfg(all(feature = "optimism", not(test)))] -compile_error!("Cannot build the `reth` binary with the `optimism` feature flag enabled. Did you mean to build `op-reth`?"); - -#[cfg(not(feature = "optimism"))] fn main() { use reth::cli::Cli; use reth_node_ethereum::EthereumNode; From 5f1e37c0b2d51aed3e2c3ac533aebadd82b389c7 Mon Sep 17 00:00:00 2001 From: Loocapro Date: Fri, 12 Jul 2024 15:28:56 +0200 Subject: [PATCH 12/40] removing top level cfg optimism --- crates/optimism/cli/src/lib.rs | 2 -- crates/optimism/node/src/lib.rs | 2 -- 2 files changed, 4 deletions(-) diff --git a/crates/optimism/cli/src/lib.rs b/crates/optimism/cli/src/lib.rs index 6d402e551c84..b22840bdc459 100644 --- a/crates/optimism/cli/src/lib.rs +++ b/crates/optimism/cli/src/lib.rs @@ -7,8 +7,6 @@ )] #![cfg_attr(all(not(test), feature = "optimism"), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -// The `optimism` feature must be enabled to use this crate. -#![cfg(feature = "optimism")] /// Optimism chain specification parser. pub mod chainspec; diff --git a/crates/optimism/node/src/lib.rs b/crates/optimism/node/src/lib.rs index 68aebd0835fb..b6f1ce7fc35e 100644 --- a/crates/optimism/node/src/lib.rs +++ b/crates/optimism/node/src/lib.rs @@ -6,8 +6,6 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -// The `optimism` feature must be enabled to use this crate. -#![cfg(feature = "optimism")] /// CLI argument parsing for the optimism node. pub mod args; From 25d6e829189559cf07e9459676e50848d89cf02e Mon Sep 17 00:00:00 2001 From: Loocapro Date: Fri, 12 Jul 2024 15:31:35 +0200 Subject: [PATCH 13/40] removing opt feature from cargo.toml --- crates/optimism/bin/Cargo.toml | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/crates/optimism/bin/Cargo.toml b/crates/optimism/bin/Cargo.toml index d3c066a35ac5..10af13c447f4 100644 --- a/crates/optimism/bin/Cargo.toml +++ b/crates/optimism/bin/Cargo.toml @@ -11,13 +11,10 @@ exclude.workspace = true [dependencies] reth-node-builder.workspace = true reth-cli-util.workspace = true -reth-optimism-cli = { workspace = true, optional = true, features = [ - "optimism", -] } +reth-optimism-cli.workspace = true reth-tracing.workspace = true -reth-node-optimism = { workspace = true, optional = true, features = [ - "optimism", -] } +reth-node-optimism.workspace = true + clap = { workspace = true, features = ["derive", "env"] } [target.'cfg(unix)'.dependencies] From d4004ec07d76c709970b5893e5199e735e3f800d Mon Sep 17 00:00:00 2001 From: Loocapro Date: Fri, 12 Jul 2024 15:38:44 +0200 Subject: [PATCH 14/40] still removing cfg flags --- crates/optimism/consensus/src/lib.rs | 2 -- crates/optimism/evm/src/lib.rs | 2 -- crates/optimism/payload/src/lib.rs | 2 -- 3 files changed, 6 deletions(-) diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index 61aa23bde15f..d6eb28dbb90e 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -6,8 +6,6 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -// The `optimism` feature must be enabled to use this crate. -#![cfg(feature = "optimism")] use reth_chainspec::{ChainSpec, EthereumHardforks, OptimismHardforks}; use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index 8a56014c5688..7c4371b699c0 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -6,8 +6,6 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -// The `optimism` feature must be enabled to use this crate. -#![cfg(feature = "optimism")] use reth_chainspec::ChainSpec; use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; diff --git a/crates/optimism/payload/src/lib.rs b/crates/optimism/payload/src/lib.rs index 2bb60594287a..645b997f6fd9 100644 --- a/crates/optimism/payload/src/lib.rs +++ b/crates/optimism/payload/src/lib.rs @@ -8,8 +8,6 @@ #![cfg_attr(all(not(test), feature = "optimism"), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![allow(clippy::useless_let_if_seq)] -// The `optimism` feature must be enabled to use this crate. -#![cfg(feature = "optimism")] pub mod builder; pub use builder::OptimismPayloadBuilder; From 9ec5af10c4549ba856ac93001f4f350d5591ab15 Mon Sep 17 00:00:00 2001 From: Loocapro Date: Fri, 12 Jul 2024 15:44:23 +0200 Subject: [PATCH 15/40] removed flag to calculate_receipt_root_opt --- crates/primitives/src/proofs.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/crates/primitives/src/proofs.rs b/crates/primitives/src/proofs.rs index ab57be8ffd1d..0ae707a90aa7 100644 --- a/crates/primitives/src/proofs.rs +++ b/crates/primitives/src/proofs.rs @@ -38,7 +38,6 @@ pub fn calculate_requests_root(requests: &[Request]) -> B256 { } /// Calculates the receipt root for a header. -#[cfg(feature = "optimism")] pub fn calculate_receipt_root_optimism( receipts: &[ReceiptWithBloom], chain_spec: &reth_chainspec::ChainSpec, From bad053b5c0c3c0fdfa27cf896f9592dbcc2c1219 Mon Sep 17 00:00:00 2001 From: Loocapro Date: Fri, 12 Jul 2024 16:03:20 +0200 Subject: [PATCH 16/40] enabling optimism feature for reth-primitives --- crates/optimism/consensus/Cargo.toml | 2 +- crates/primitives/src/proofs.rs | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/optimism/consensus/Cargo.toml b/crates/optimism/consensus/Cargo.toml index bd538a167f10..6be3d67fb4d5 100644 --- a/crates/optimism/consensus/Cargo.toml +++ b/crates/optimism/consensus/Cargo.toml @@ -15,7 +15,7 @@ workspace = true # reth reth-consensus-common.workspace = true reth-chainspec.workspace = true -reth-primitives.workspace = true +reth-primitives = { workspace = true, features = [ "optimism" ]} reth-consensus.workspace = true tracing.workspace = true diff --git a/crates/primitives/src/proofs.rs b/crates/primitives/src/proofs.rs index 0ae707a90aa7..ab57be8ffd1d 100644 --- a/crates/primitives/src/proofs.rs +++ b/crates/primitives/src/proofs.rs @@ -38,6 +38,7 @@ pub fn calculate_requests_root(requests: &[Request]) -> B256 { } /// Calculates the receipt root for a header. +#[cfg(feature = "optimism")] pub fn calculate_receipt_root_optimism( receipts: &[ReceiptWithBloom], chain_spec: &reth_chainspec::ChainSpec, From dbe771d314bd5e0c1c01dd62b55d2f5bc6e4e8ab Mon Sep 17 00:00:00 2001 From: Loocapro Date: Fri, 12 Jul 2024 16:12:14 +0200 Subject: [PATCH 17/40] removed block executor macro --- bin/reth/src/cli/mod.rs | 13 ++++++------- bin/reth/src/commands/debug_cmd/build_block.rs | 7 ++++--- bin/reth/src/commands/debug_cmd/execution.rs | 5 +++-- bin/reth/src/commands/debug_cmd/in_memory_merkle.rs | 4 ++-- bin/reth/src/commands/debug_cmd/merkle.rs | 5 +++-- bin/reth/src/commands/debug_cmd/replay_engine.rs | 5 +++-- bin/reth/src/lib.rs | 1 - bin/reth/src/macros.rs | 12 ------------ 8 files changed, 21 insertions(+), 31 deletions(-) delete mode 100644 bin/reth/src/macros.rs diff --git a/bin/reth/src/cli/mod.rs b/bin/reth/src/cli/mod.rs index 7326c780d1b0..f8966f694d9d 100644 --- a/bin/reth/src/cli/mod.rs +++ b/bin/reth/src/cli/mod.rs @@ -6,7 +6,6 @@ use crate::{ LogArgs, }, commands::debug_cmd, - macros::block_executor, version::{LONG_VERSION, SHORT_VERSION}, }; use clap::{value_parser, Parser, Subcommand}; @@ -19,6 +18,7 @@ use reth_cli_commands::{ use reth_cli_runner::CliRunner; use reth_db::DatabaseEnv; use reth_node_builder::{NodeBuilder, WithLaunchContext}; +use reth_node_ethereum::EthExecutorProvider; use reth_tracing::FileWorkerGuard; use std::{ffi::OsString, fmt, future::Future, sync::Arc}; use tracing::info; @@ -151,14 +151,13 @@ impl Cli { } Commands::Init(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::InitState(command) => runner.run_blocking_until_ctrl_c(command.execute()), - Commands::Import(command) => runner.run_blocking_until_ctrl_c( - command.execute(|chain_spec| block_executor!(chain_spec)), - ), + Commands::Import(command) => { + runner.run_blocking_until_ctrl_c(command.execute(EthExecutorProvider::ethereum)) + } Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::Db(command) => runner.run_blocking_until_ctrl_c(command.execute()), - Commands::Stage(command) => runner.run_command_until_exit(|ctx| { - command.execute(ctx, |chain_spec| block_executor!(chain_spec)) - }), + Commands::Stage(command) => runner + .run_command_until_exit(|ctx| command.execute(ctx, EthExecutorProvider::ethereum)), Commands::P2P(command) => runner.run_until_ctrl_c(command.execute()), #[cfg(feature = "dev")] Commands::TestVectors(command) => runner.run_until_ctrl_c(command.execute()), diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index 26fe47d59aa0..e98c22b1dc90 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -1,5 +1,4 @@ //! Command for debugging block building. -use crate::macros::block_executor; use alloy_rlp::Decodable; use clap::Parser; use eyre::Context; @@ -19,6 +18,7 @@ use reth_evm::execute::{BlockExecutionOutput, BlockExecutorProvider, Executor}; use reth_execution_types::ExecutionOutcome; use reth_fs_util as fs; use reth_node_api::PayloadBuilderAttributes; +use reth_node_ethereum::EthExecutorProvider; use reth_payload_builder::database::CachedReads; use reth_primitives::{ constants::eip4844::LoadKzgSettingsError, revm_primitives::KzgSettings, Address, @@ -118,7 +118,7 @@ impl Command { let consensus: Arc = Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec())); - let executor = block_executor!(provider_factory.chain_spec()); + let executor = EthExecutorProvider::ethereum(provider_factory.chain_spec()); // configure blockchain tree let tree_externals = @@ -251,7 +251,8 @@ impl Command { SealedBlockWithSenders::new(block.clone(), senders).unwrap(); let db = StateProviderDatabase::new(blockchain_db.latest()?); - let executor = block_executor!(provider_factory.chain_spec()).executor(db); + let executor = + EthExecutorProvider::ethereum(provider_factory.chain_spec()).executor(db); let BlockExecutionOutput { state, receipts, requests, .. } = executor.execute((&block_with_senders.clone().unseal(), U256::MAX).into())?; diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index 3298fe5d6367..175f1e0571d2 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -1,6 +1,6 @@ //! Command for debugging execution. -use crate::{args::NetworkArgs, macros::block_executor, utils::get_single_header}; +use crate::{args::NetworkArgs, utils::get_single_header}; use clap::Parser; use futures::{stream::select as stream_select, StreamExt}; use reth_beacon_consensus::EthBeaconConsensus; @@ -19,6 +19,7 @@ use reth_exex::ExExManagerHandle; use reth_network::{NetworkEvents, NetworkHandle}; use reth_network_api::NetworkInfo; use reth_network_p2p::{bodies::client::BodiesClient, headers::client::HeadersClient}; +use reth_node_ethereum::EthExecutorProvider; use reth_primitives::{BlockHashOrNumber, BlockNumber, B256}; use reth_provider::{ BlockExecutionWriter, ChainSpecProvider, ProviderFactory, StageCheckpointReader, @@ -80,7 +81,7 @@ impl Command { let prune_modes = config.prune.clone().map(|prune| prune.segments).unwrap_or_default(); let (tip_tx, tip_rx) = watch::channel(B256::ZERO); - let executor = block_executor!(provider_factory.chain_spec()); + let executor = EthExecutorProvider::ethereum(provider_factory.chain_spec()); let pipeline = Pipeline::builder() .with_tip_sender(tip_tx) diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index e362ab8d440d..d5ea36a59ecc 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -2,7 +2,6 @@ use crate::{ args::NetworkArgs, - macros::block_executor, utils::{get_single_body, get_single_header}, }; use backon::{ConstantBuilder, Retryable}; @@ -17,6 +16,7 @@ use reth_evm::execute::{BlockExecutionOutput, BlockExecutorProvider, Executor}; use reth_execution_types::ExecutionOutcome; use reth_network::NetworkHandle; use reth_network_api::NetworkInfo; +use reth_node_ethereum::EthExecutorProvider; use reth_primitives::BlockHashOrNumber; use reth_provider::{ AccountExtReader, ChainSpecProvider, HashingWriter, HeaderProvider, LatestStateProviderRef, @@ -129,7 +129,7 @@ impl Command { provider_factory.static_file_provider(), )); - let executor = block_executor!(provider_factory.chain_spec()).executor(db); + let executor = EthExecutorProvider::ethereum(provider_factory.chain_spec()).executor(db); let merkle_block_td = provider.header_td_by_number(merkle_block_number)?.unwrap_or_default(); diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index b6cad5fc3dee..1b330ea6a0f4 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -1,5 +1,5 @@ //! Command for debugging merkle trie calculation. -use crate::{args::NetworkArgs, macros::block_executor, utils::get_single_header}; +use crate::{args::NetworkArgs, utils::get_single_header}; use backon::{ConstantBuilder, Retryable}; use clap::Parser; use reth_beacon_consensus::EthBeaconConsensus; @@ -14,6 +14,7 @@ use reth_evm::execute::{BatchExecutor, BlockExecutorProvider}; use reth_network::NetworkHandle; use reth_network_api::NetworkInfo; use reth_network_p2p::full_block::FullBlockClient; +use reth_node_ethereum::EthExecutorProvider; use reth_primitives::BlockHashOrNumber; use reth_provider::{ BlockNumReader, BlockWriter, ChainSpecProvider, HeaderProvider, LatestStateProviderRef, @@ -91,7 +92,7 @@ impl Command { ) .await?; - let executor_provider = block_executor!(provider_factory.chain_spec()); + let executor_provider = EthExecutorProvider::ethereum(provider_factory.chain_spec()); // Initialize the fetch client info!(target: "reth::cli", target_block_number=self.to, "Downloading tip of block range"); diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs index dd176cdb5a28..2cb6e6d8f77f 100644 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ b/bin/reth/src/commands/debug_cmd/replay_engine.rs @@ -1,4 +1,4 @@ -use crate::{args::NetworkArgs, macros::block_executor}; +use crate::args::NetworkArgs; use clap::Parser; use eyre::Context; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; @@ -16,6 +16,7 @@ use reth_engine_util::engine_store::{EngineMessageStore, StoredEngineApiMessage} use reth_fs_util as fs; use reth_network::NetworkHandle; use reth_network_api::NetworkInfo; +use reth_node_ethereum::EthExecutorProvider; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; use reth_provider::{ providers::BlockchainProvider, CanonStateSubscriptions, ChainSpecProvider, ProviderFactory, @@ -78,7 +79,7 @@ impl Command { let consensus: Arc = Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec())); - let executor = block_executor!(provider_factory.chain_spec()); + let executor = EthExecutorProvider::ethereum(provider_factory.chain_spec()); // Configure blockchain tree let tree_externals = diff --git a/bin/reth/src/lib.rs b/bin/reth/src/lib.rs index aa8cb9f0597b..2e1760d9888a 100644 --- a/bin/reth/src/lib.rs +++ b/bin/reth/src/lib.rs @@ -31,7 +31,6 @@ pub mod cli; pub mod commands; -mod macros; /// Re-exported utils. pub mod utils { diff --git a/bin/reth/src/macros.rs b/bin/reth/src/macros.rs deleted file mode 100644 index a8d618cf9bea..000000000000 --- a/bin/reth/src/macros.rs +++ /dev/null @@ -1,12 +0,0 @@ -//! Helper macros - -/// Creates the block executor type based on the configured feature. -/// -/// Note(mattsse): This is incredibly horrible and will be replaced -macro_rules! block_executor { - ($chain_spec:expr) => { - reth_node_ethereum::EthExecutorProvider::ethereum($chain_spec) - }; -} - -pub(crate) use block_executor; From 963f41d7f7647b81ecc798a928b2590bbb8be201 Mon Sep 17 00:00:00 2001 From: Loocapro Date: Thu, 18 Jul 2024 10:11:50 +0200 Subject: [PATCH 18/40] rebase --- Cargo.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 82dc4c981cdb..1b4fbd82479e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5233,7 +5233,7 @@ dependencies = [ [[package]] name = "op-reth" -version = "1.0.1" +version = "1.0.2" dependencies = [ "clap", "reth-cli-util", From 5a5abeb40aa45fc8fdf6e90d402a130ba4c6a7c2 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Mon, 29 Jul 2024 11:49:22 +0200 Subject: [PATCH 19/40] merge origin/main --- .github/assets/check_no_std.sh | 20 +- .github/workflows/lint.yml | 4 +- Cargo.lock | 479 ++-- Cargo.toml | 16 +- bin/reth-bench/Cargo.toml | 2 +- bin/reth-bench/src/bench/new_payload_fcu.rs | 8 +- bin/reth/Cargo.toml | 7 +- .../src/commands/debug_cmd/build_block.rs | 8 +- .../commands/debug_cmd/in_memory_merkle.rs | 16 +- bin/reth/src/commands/debug_cmd/merkle.rs | 9 +- bin/reth/src/engine2.rs | 39 + bin/reth/src/lib.rs | 4 +- book/developers/exex/hello-world.md | 2 +- crates/blockchain-tree/Cargo.toml | 1 + crates/blockchain-tree/src/block_indices.rs | 5 +- crates/blockchain-tree/src/blockchain_tree.rs | 8 +- crates/blockchain-tree/src/state.rs | 2 +- crates/chain-state/Cargo.toml | 47 + .../src}/chain_info.rs | 0 crates/chain-state/src/in_memory.rs | 964 +++++++++ crates/chain-state/src/lib.rs | 29 + .../src}/memory_overlay.rs | 41 +- .../src/notifications.rs} | 4 +- crates/chain-state/src/test_utils.rs | 102 + crates/cli/commands/Cargo.toml | 5 +- crates/cli/commands/src/common.rs | 2 - crates/cli/commands/src/node.rs | 3 +- .../cli/commands/src/recover/storage_tries.rs | 1 + crates/cli/commands/src/stage/run.rs | 33 +- crates/cli/util/Cargo.toml | 3 - crates/cli/util/src/parsers.rs | 2 +- crates/consensus/auto-seal/src/task.rs | 2 +- crates/consensus/beacon/src/engine/event.rs | 11 + crates/consensus/beacon/src/engine/handle.rs | 3 + .../beacon/src/engine/invalid_headers.rs | 2 +- crates/consensus/beacon/src/engine/mod.rs | 4 +- crates/e2e-test-utils/src/engine_api.rs | 2 +- crates/e2e-test-utils/src/payload.rs | 4 +- crates/e2e-test-utils/src/rpc.rs | 11 +- crates/engine/primitives/src/lib.rs | 1 + crates/engine/tree/Cargo.toml | 8 +- crates/engine/tree/src/backfill.rs | 40 +- crates/engine/tree/src/chain.rs | 66 +- crates/engine/tree/src/database.rs | 261 --- crates/engine/tree/src/engine.rs | 64 +- crates/engine/tree/src/lib.rs | 6 +- crates/engine/tree/src/persistence.rs | 420 +++- crates/engine/tree/src/static_files.rs | 272 --- crates/engine/tree/src/test_utils.rs | 49 +- crates/engine/tree/src/tree.rs | 1923 +++++++++++++++++ crates/engine/tree/src/tree/mod.rs | 1328 ------------ crates/engine/tree/test-data/holesky/1.rlp | 1 + crates/engine/tree/test-data/holesky/2.rlp | 1 + crates/ethereum-forks/Cargo.toml | 1 - crates/ethereum-forks/src/hardfork/dev.rs | 2 + crates/ethereum/engine/Cargo.toml | 14 +- crates/ethereum/engine/src/service.rs | 98 +- crates/ethereum/evm/src/execute.rs | 10 +- crates/ethereum/evm/src/lib.rs | 7 +- crates/ethereum/node/Cargo.toml | 1 + crates/ethereum/node/src/launch.rs | 102 +- crates/ethereum/node/tests/it/builder.rs | 10 +- crates/ethereum/payload/src/lib.rs | 37 +- crates/evm/execution-errors/Cargo.toml | 1 + crates/evm/execution-errors/src/lib.rs | 2 +- crates/evm/execution-errors/src/trie.rs | 30 +- crates/evm/execution-types/src/execute.rs | 41 + crates/evm/execution-types/src/lib.rs | 9 +- crates/evm/src/builder.rs | 150 ++ crates/evm/src/either.rs | 6 +- crates/evm/src/execute.rs | 47 +- crates/evm/src/lib.rs | 15 +- crates/evm/src/noop.rs | 6 +- crates/evm/src/provider.rs | 2 +- crates/evm/src/system_calls.rs | 13 +- crates/exex/exex/Cargo.toml | 23 +- crates/exex/exex/src/backfill/factory.rs | 79 + .../exex/src/{backfill.rs => backfill/job.rs} | 273 +-- crates/exex/exex/src/backfill/mod.rs | 9 + crates/exex/exex/src/backfill/stream.rs | 161 ++ crates/exex/exex/src/backfill/test_utils.rs | 162 ++ .../downloaders/src/receipt_file_client.rs | 2 +- crates/net/eth-wire-types/Cargo.toml | 1 - crates/net/eth-wire/Cargo.toml | 1 - crates/net/eth-wire/src/p2pstream.rs | 19 +- crates/net/network-api/src/lib.rs | 4 +- crates/net/network/src/lib.rs | 4 +- crates/node/api/src/node.rs | 2 +- crates/node/builder/Cargo.toml | 1 + crates/node/builder/src/builder/mod.rs | 101 +- crates/node/builder/src/launch/common.rs | 132 +- crates/node/builder/src/launch/mod.rs | 25 +- crates/node/core/Cargo.toml | 25 +- crates/node/core/src/args/mod.rs | 2 +- crates/node/core/src/args/pruning.rs | 5 +- crates/node/core/src/lib.rs | 5 - crates/node/core/src/metrics/mod.rs | 4 - .../core/src/metrics/prometheus_exporter.rs | 317 --- crates/node/core/src/node_config.rs | 59 +- crates/node/core/src/version.rs | 9 +- crates/node/events/src/node.rs | 54 +- crates/node/metrics/Cargo.toml | 52 + crates/node/metrics/src/hooks.rs | 126 ++ crates/node/metrics/src/lib.rs | 18 + crates/node/metrics/src/recorder.rs | 58 + crates/node/metrics/src/server.rs | 270 +++ .../src/version.rs} | 15 - crates/node/metrics/src/version_metrics.rs | 75 + .../cli/src/commands/import_receipts.rs | 13 +- crates/optimism/evm/src/lib.rs | 6 +- crates/optimism/node/Cargo.toml | 1 - crates/optimism/rpc/Cargo.toml | 2 +- crates/optimism/rpc/src/error.rs | 70 +- crates/optimism/rpc/src/eth/block.rs | 19 +- crates/optimism/rpc/src/eth/call.rs | 20 +- crates/optimism/rpc/src/eth/mod.rs | 17 +- crates/optimism/rpc/src/eth/receipt.rs | 17 +- crates/optimism/rpc/src/eth/transaction.rs | 6 +- crates/payload/builder/src/events.rs | 3 +- crates/payload/builder/src/lib.rs | 6 +- crates/primitives-traits/Cargo.toml | 1 - crates/primitives-traits/src/account.rs | 18 +- crates/primitives-traits/src/lib.rs | 2 +- crates/primitives/Cargo.toml | 2 - crates/primitives/src/transaction/eip4844.rs | 2 + crates/primitives/src/transaction/eip7702.rs | 72 +- crates/primitives/src/transaction/tx_type.rs | 3 +- crates/prune/types/Cargo.toml | 1 - crates/revm/src/batch.rs | 11 +- crates/revm/src/state_change.rs | 10 +- crates/revm/src/test_utils.rs | 5 +- crates/rpc/ipc/src/server/mod.rs | 7 +- crates/rpc/rpc-builder/src/auth.rs | 2 +- crates/rpc/rpc-builder/src/cors.rs | 13 +- crates/rpc/rpc-builder/src/lib.rs | 27 +- crates/rpc/rpc-builder/src/metrics.rs | 8 +- crates/rpc/rpc-builder/tests/it/main.rs | 1 + crates/rpc/rpc-builder/tests/it/middleware.rs | 80 + crates/rpc/rpc-engine-api/src/engine_api.rs | 4 +- crates/rpc/rpc-eth-api/Cargo.toml | 2 +- crates/rpc/rpc-eth-api/src/core.rs | 14 +- crates/rpc/rpc-eth-api/src/helpers/block.rs | 91 +- .../rpc-eth-api/src/helpers/blocking_task.rs | 14 +- crates/rpc/rpc-eth-api/src/helpers/call.rs | 143 +- crates/rpc/rpc-eth-api/src/helpers/error.rs | 88 + crates/rpc/rpc-eth-api/src/helpers/fee.rs | 45 +- crates/rpc/rpc-eth-api/src/helpers/mod.rs | 17 +- .../rpc-eth-api/src/helpers/pending_block.rs | 57 +- crates/rpc/rpc-eth-api/src/helpers/receipt.rs | 18 +- crates/rpc/rpc-eth-api/src/helpers/state.rs | 83 +- crates/rpc/rpc-eth-api/src/helpers/trace.rs | 64 +- .../rpc-eth-api/src/helpers/transaction.rs | 126 +- crates/rpc/rpc-eth-api/src/helpers/types.rs | 17 + crates/rpc/rpc-eth-api/src/lib.rs | 4 + crates/rpc/rpc-eth-types/Cargo.toml | 11 - .../rpc-eth-types/src/cache/multi_consumer.rs | 13 +- crates/rpc/rpc-eth-types/src/error.rs | 56 +- crates/rpc/rpc-layer/src/auth_layer.rs | 2 + crates/rpc/rpc-layer/src/lib.rs | 2 + crates/rpc/rpc-server-types/src/constants.rs | 3 + crates/rpc/rpc-types/Cargo.toml | 3 +- crates/rpc/rpc-types/src/lib.rs | 7 +- crates/rpc/rpc-types/src/net.rs | 13 - crates/rpc/rpc-types/src/peer.rs | 4 - crates/rpc/rpc/Cargo.toml | 1 - crates/rpc/rpc/src/debug.rs | 160 +- crates/rpc/rpc/src/eth/bundle.rs | 38 +- crates/rpc/rpc/src/eth/core.rs | 14 +- crates/rpc/rpc/src/eth/helpers/receipt.rs | 4 +- crates/rpc/rpc/src/eth/helpers/state.rs | 6 +- crates/rpc/rpc/src/eth/helpers/transaction.rs | 6 +- crates/rpc/rpc/src/otterscan.rs | 12 +- crates/rpc/rpc/src/trace.rs | 189 +- crates/stages/api/src/pipeline/ctrl.rs | 2 +- crates/stages/api/src/pipeline/set.rs | 16 +- crates/stages/stages/Cargo.toml | 1 + crates/stages/stages/benches/setup/mod.rs | 10 +- crates/stages/stages/src/stages/bodies.rs | 4 +- crates/stages/stages/src/stages/execution.rs | 6 +- .../stages/src/stages/hashing_storage.rs | 2 +- crates/stages/stages/src/stages/headers.rs | 2 +- crates/stages/stages/src/stages/merkle.rs | 23 +- crates/stages/stages/src/stages/utils.rs | 2 +- .../stages/stages/src/test_utils/test_db.rs | 8 +- crates/stages/types/Cargo.toml | 1 - .../static-file/src/segments/headers.rs | 2 +- .../static-file/src/segments/transactions.rs | 2 +- crates/storage/codecs/Cargo.toml | 1 - crates/storage/db-api/Cargo.toml | 3 +- crates/storage/db-common/Cargo.toml | 1 + crates/storage/db-common/src/init.rs | 23 +- crates/storage/db/Cargo.toml | 22 +- crates/storage/db/src/lib.rs | 3 + crates/storage/db/src/tables/mod.rs | 1 + crates/storage/errors/Cargo.toml | 1 + crates/storage/errors/src/provider.rs | 3 + crates/storage/provider/Cargo.toml | 9 +- .../src/bundle_state/execution_outcome.rs | 1036 --------- .../storage/provider/src/bundle_state/mod.rs | 7 +- .../src/bundle_state/state_changes.rs | 88 - .../src/bundle_state/state_reverts.rs | 100 +- crates/storage/provider/src/lib.rs | 9 +- .../src/providers/blockchain_provider.rs | 826 +++++++ .../provider/src/providers/database/mod.rs | 5 +- .../src/providers/database/provider.rs | 314 ++- crates/storage/provider/src/providers/mod.rs | 27 +- .../src/providers/state/historical.rs | 16 +- .../provider/src/providers/state/latest.rs | 14 +- .../src/providers/static_file/manager.rs | 12 + .../provider/src/providers/static_file/mod.rs | 2 +- .../src/providers/static_file/writer.rs | 17 +- .../storage/provider/src/test_utils/events.rs | 35 - crates/storage/provider/src/test_utils/mod.rs | 47 +- .../storage/provider/src/test_utils/noop.rs | 14 +- crates/storage/provider/src/traits/full.rs | 7 +- crates/storage/provider/src/traits/mod.rs | 12 +- crates/storage/provider/src/traits/state.rs | 38 +- .../provider/src/traits/tree_viewer.rs | 3 +- crates/storage/provider/src/traits/trie.rs | 36 + crates/storage/provider/src/writer/mod.rs | 1228 ++++++++++- .../provider/src/writer/static_file.rs | 6 +- crates/storage/storage-api/src/block.rs | 12 +- .../storage-api/src/stage_checkpoint.rs | 4 + crates/transaction-pool/Cargo.toml | 2 +- crates/trie/common/Cargo.toml | 1 - crates/trie/common/src/account.rs | 2 +- crates/trie/common/src/lib.rs | 2 +- crates/trie/common/src/proofs.rs | 140 +- crates/trie/db/Cargo.toml | 78 + crates/trie/db/src/lib.rs | 9 + crates/trie/db/src/proof.rs | 46 + crates/trie/db/src/state.rs | 208 ++ crates/trie/db/src/storage.rs | 39 + crates/trie/db/tests/fuzz_in_memory_nodes.rs | 59 + crates/trie/db/tests/proof.rs | 288 +++ crates/trie/db/tests/trie.rs | 773 +++++++ crates/trie/parallel/Cargo.toml | 1 + crates/trie/parallel/benches/root.rs | 16 +- crates/trie/parallel/src/async_root.rs | 15 +- crates/trie/parallel/src/parallel_root.rs | 18 +- crates/trie/trie/src/hashed_cursor/default.rs | 43 +- crates/trie/trie/src/hashed_cursor/mod.rs | 2 +- .../trie/trie/src/hashed_cursor/post_state.rs | 52 +- crates/trie/trie/src/proof.rs | 444 +--- crates/trie/trie/src/state.rs | 138 +- crates/trie/trie/src/trie.rs | 901 +------- .../trie/src/trie_cursor/database_cursors.rs | 66 +- crates/trie/trie/src/trie_cursor/in_memory.rs | 62 - crates/trie/trie/src/updates.rs | 147 +- docs/crates/network.md | 2 +- .../beacon-api-sidecar-fetcher/src/main.rs | 41 +- examples/custom-evm/src/main.rs | 6 +- examples/db-access/src/main.rs | 11 +- examples/stateful-precompile/src/main.rs | 6 +- testing/ef-tests/src/models.rs | 10 +- testing/testing-utils/src/generators.rs | 2 +- 256 files changed, 11451 insertions(+), 7060 deletions(-) create mode 100644 bin/reth/src/engine2.rs create mode 100644 crates/chain-state/Cargo.toml rename crates/{storage/provider/src/providers => chain-state/src}/chain_info.rs (100%) create mode 100644 crates/chain-state/src/in_memory.rs create mode 100644 crates/chain-state/src/lib.rs rename crates/{engine/tree/src/tree => chain-state/src}/memory_overlay.rs (84%) rename crates/{storage/provider/src/traits/chain.rs => chain-state/src/notifications.rs} (98%) create mode 100644 crates/chain-state/src/test_utils.rs delete mode 100644 crates/engine/tree/src/database.rs delete mode 100644 crates/engine/tree/src/static_files.rs create mode 100644 crates/engine/tree/src/tree.rs delete mode 100644 crates/engine/tree/src/tree/mod.rs create mode 100644 crates/engine/tree/test-data/holesky/1.rlp create mode 100644 crates/engine/tree/test-data/holesky/2.rlp create mode 100644 crates/evm/execution-types/src/execute.rs create mode 100644 crates/evm/src/builder.rs create mode 100644 crates/exex/exex/src/backfill/factory.rs rename crates/exex/exex/src/{backfill.rs => backfill/job.rs} (57%) create mode 100644 crates/exex/exex/src/backfill/mod.rs create mode 100644 crates/exex/exex/src/backfill/stream.rs create mode 100644 crates/exex/exex/src/backfill/test_utils.rs delete mode 100644 crates/node/core/src/metrics/mod.rs delete mode 100644 crates/node/core/src/metrics/prometheus_exporter.rs create mode 100644 crates/node/metrics/Cargo.toml create mode 100644 crates/node/metrics/src/hooks.rs create mode 100644 crates/node/metrics/src/lib.rs create mode 100644 crates/node/metrics/src/recorder.rs create mode 100644 crates/node/metrics/src/server.rs rename crates/node/{core/src/metrics/version_metrics.rs => metrics/src/version.rs} (71%) create mode 100644 crates/node/metrics/src/version_metrics.rs create mode 100644 crates/rpc/rpc-builder/tests/it/middleware.rs create mode 100644 crates/rpc/rpc-eth-api/src/helpers/error.rs create mode 100644 crates/rpc/rpc-eth-api/src/helpers/types.rs delete mode 100644 crates/rpc/rpc-types/src/net.rs delete mode 100644 crates/rpc/rpc-types/src/peer.rs delete mode 100644 crates/storage/provider/src/bundle_state/execution_outcome.rs delete mode 100644 crates/storage/provider/src/bundle_state/state_changes.rs create mode 100644 crates/storage/provider/src/providers/blockchain_provider.rs delete mode 100644 crates/storage/provider/src/test_utils/events.rs create mode 100644 crates/storage/provider/src/traits/trie.rs create mode 100644 crates/trie/db/Cargo.toml create mode 100644 crates/trie/db/src/lib.rs create mode 100644 crates/trie/db/src/proof.rs create mode 100644 crates/trie/db/src/state.rs create mode 100644 crates/trie/db/src/storage.rs create mode 100644 crates/trie/db/tests/fuzz_in_memory_nodes.rs create mode 100644 crates/trie/db/tests/proof.rs create mode 100644 crates/trie/db/tests/trie.rs diff --git a/.github/assets/check_no_std.sh b/.github/assets/check_no_std.sh index f19e39ddac90..441ef5d0d48d 100755 --- a/.github/assets/check_no_std.sh +++ b/.github/assets/check_no_std.sh @@ -3,21 +3,23 @@ set -eo pipefail # TODO no_std_packages=( -# reth-codecs -# reth-consensus +# The following were confirmed not working in the past, but could be enabled if issues have been resolved # reth-db -# reth-errors -# reth-ethereum-forks -# reth-evm -# reth-evm-ethereum -# reth-network-peers # reth-primitives -# reth-primitives-traits # reth-revm +# reth-evm +# reth-evm-ethereum +# reth-consensus +# the following are confirmed working + reth-errors + reth-ethereum-forks + reth-network-peers + reth-primitives-traits + reth-codecs ) for package in "${no_std_packages[@]}"; do - cmd="cargo +stable build -p $package --target riscv32imac-unknown-none-elf --no-default-features" + cmd="cargo +stable build -p $package --target wasm32-wasip1 --no-default-features" if [ -n "$CI" ]; then echo "::group::$cmd" diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 3aefc21c8389..b108ddb96b48 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -52,7 +52,7 @@ jobs: - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@stable with: - target: riscv32imac-unknown-none-elf + target: wasm32-wasip1 - uses: taiki-e/install-action@cargo-hack - uses: Swatinem/rust-cache@v2 with: @@ -168,7 +168,7 @@ jobs: - uses: dtolnay/rust-toolchain@stable - name: Ensure no arbitrary or proptest dependency on default build run: cargo tree --package reth -e=features,no-dev | grep -Eq "arbitrary|proptest" && exit 1 || exit 0 - + lint-success: name: lint success runs-on: ubuntu-latest diff --git a/Cargo.lock b/Cargo.lock index 1b4fbd82479e..42d5518233c3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -151,7 +151,7 @@ dependencies = [ "itoa", "serde", "serde_json", - "winnow 0.6.13", + "winnow 0.6.14", ] [[package]] @@ -294,7 +294,7 @@ dependencies = [ "async-stream", "async-trait", "auto_impl", - "dashmap", + "dashmap 5.5.3", "futures", "futures-utils-wasm", "lru", @@ -598,7 +598,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cbcba3ca07cf7975f15d871b721fb18031eec8bce51103907f6dcce00b255d98" dependencies = [ "serde", - "winnow 0.6.13", + "winnow 0.6.14", ] [[package]] @@ -929,9 +929,9 @@ dependencies = [ [[package]] name = "arrayref" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" +checksum = "9d151e35f61089500b617991b791fc8bfd237ae50cd5950803758a179b41e67a" [[package]] name = "arrayvec" @@ -1324,7 +1324,7 @@ dependencies = [ "boa_string", "bytemuck", "cfg-if", - "dashmap", + "dashmap 5.5.3", "fast-float", "hashbrown 0.14.5", "icu_normalizer", @@ -1598,13 +1598,12 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.2" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47de7e88bbbd467951ae7f5a6f34f70d1b4d9cfce53d5fd70f74ebe118b3db56" +checksum = "2aba8f4e9906c7ce3c73463f62a7f0c65183ada1a2d47e397cc8810827f9694f" dependencies = [ "jobserver", "libc", - "once_cell", ] [[package]] @@ -2268,6 +2267,20 @@ dependencies = [ "parking_lot_core 0.9.10", ] +[[package]] +name = "dashmap" +version = "6.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "804c8821570c3f8b70230c2ba75ffa5c0f9a4189b9a432b6656c536712acae28" +dependencies = [ + "cfg-if", + "crossbeam-utils", + "hashbrown 0.14.5", + "lock_api", + "once_cell", + "parking_lot_core 0.9.10", +] + [[package]] name = "data-encoding" version = "2.6.0" @@ -2559,7 +2572,7 @@ dependencies = [ [[package]] name = "ef-tests" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-rlp", "rayon", @@ -3992,9 +4005,9 @@ checksum = "64e9829a50b42bb782c1df523f78d332fe371b10c661e78b7a3c34b0198e9fac" [[package]] name = "inferno" -version = "0.11.19" +version = "0.11.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "321f0f839cd44a4686e9504b0a62b4d69a50b62072144c71c68f5873c167b8d9" +checksum = "7c77a3ae7d4761b9c64d2c030f70746ceb8cfba32dce0325a56792e0a4816c31" dependencies = [ "ahash", "indexmap 2.2.6", @@ -4443,9 +4456,9 @@ checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" [[package]] name = "libloading" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e310b3a6b5907f99202fcdb4960ff45b93735d7c7d96b760fcff8db2dc0e103d" +checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ "cfg-if", "windows-targets 0.52.6", @@ -5233,7 +5246,7 @@ dependencies = [ [[package]] name = "op-reth" -version = "1.0.2" +version = "1.0.3" dependencies = [ "clap", "reth-cli-util", @@ -5384,7 +5397,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.2", + "redox_syscall 0.5.3", "smallvec", "windows-targets 0.52.6", ] @@ -5602,9 +5615,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0" +checksum = "da544ee218f0d287a911e9c99a39a8c9bc8fcad3cb8db5959940044ecfc67265" [[package]] name = "powerfmt" @@ -6032,9 +6045,9 @@ dependencies = [ [[package]] name = "raw-cpuid" -version = "11.0.2" +version = "11.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e29830cbb1290e404f24c73af91c5d8d631ce7e128691e9477556b540cd01ecd" +checksum = "cb9ee317cfe3fbd54b36a511efc1edd42e216903c9cd575e686dd68a2ba90d8d" dependencies = [ "bitflags 2.6.0", ] @@ -6076,9 +6089,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c82cf8cff14456045f55ec4241383baeff27af886adb72ffb2162f99911de0fd" +checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4" dependencies = [ "bitflags 2.6.0", ] @@ -6205,7 +6218,7 @@ dependencies = [ [[package]] name = "reth" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-rlp", "aquamarine", @@ -6249,6 +6262,7 @@ dependencies = [ "reth-node-core", "reth-node-ethereum", "reth-node-events", + "reth-node-metrics", "reth-payload-builder", "reth-payload-primitives", "reth-payload-validator", @@ -6271,6 +6285,7 @@ dependencies = [ "reth-tracing", "reth-transaction-pool", "reth-trie", + "reth-trie-db", "serde", "serde_json", "similar-asserts", @@ -6283,7 +6298,7 @@ dependencies = [ [[package]] name = "reth-auto-seal-consensus" -version = "1.0.2" +version = "1.0.3" dependencies = [ "futures-util", "reth-beacon-consensus", @@ -6309,7 +6324,7 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-rlp", "futures-core", @@ -6331,7 +6346,7 @@ dependencies = [ [[package]] name = "reth-beacon-consensus" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-genesis", "assert_matches", @@ -6382,7 +6397,7 @@ dependencies = [ [[package]] name = "reth-bench" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6423,7 +6438,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-genesis", "aquamarine", @@ -6450,6 +6465,7 @@ dependencies = [ "reth-storage-errors", "reth-testing-utils", "reth-trie", + "reth-trie-db", "reth-trie-parallel", "tokio", "tracing", @@ -6457,7 +6473,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree-api" -version = "1.0.2" +version = "1.0.3" dependencies = [ "reth-consensus", "reth-execution-errors", @@ -6466,9 +6482,30 @@ dependencies = [ "thiserror", ] +[[package]] +name = "reth-chain-state" +version = "1.0.3" +dependencies = [ + "auto_impl", + "derive_more", + "parking_lot 0.12.3", + "pin-project", + "rand 0.8.5", + "reth-chainspec", + "reth-errors", + "reth-execution-types", + "reth-primitives", + "reth-storage-api", + "reth-trie", + "revm", + "tokio", + "tokio-stream", + "tracing", +] + [[package]] name = "reth-chainspec" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-chains", "alloy-eips", @@ -6492,7 +6529,7 @@ dependencies = [ [[package]] name = "reth-cli" -version = "1.0.2" +version = "1.0.3" dependencies = [ "clap", "eyre", @@ -6502,7 +6539,7 @@ dependencies = [ [[package]] name = "reth-cli-commands" -version = "1.0.2" +version = "1.0.3" dependencies = [ "ahash", "arbitrary", @@ -6516,7 +6553,6 @@ dependencies = [ "futures", "human_bytes", "itertools 0.13.0", - "metrics-process", "proptest", "proptest-arbitrary-interop", "ratatui", @@ -6539,6 +6575,7 @@ dependencies = [ "reth-node-builder", "reth-node-core", "reth-node-events", + "reth-node-metrics", "reth-primitives", "reth-provider", "reth-prune", @@ -6546,6 +6583,7 @@ dependencies = [ "reth-static-file", "reth-static-file-types", "reth-trie", + "reth-trie-db", "serde", "serde_json", "tokio", @@ -6555,7 +6593,7 @@ dependencies = [ [[package]] name = "reth-cli-runner" -version = "1.0.2" +version = "1.0.3" dependencies = [ "reth-tasks", "tokio", @@ -6564,13 +6602,12 @@ dependencies = [ [[package]] name = "reth-cli-util" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-eips", "alloy-primitives", "eyre", "libc", - "proptest", "rand 0.8.5", "reth-fs-util", "secp256k1", @@ -6579,7 +6616,7 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6591,7 +6628,6 @@ dependencies = [ "modular-bitfield", "proptest", "proptest-arbitrary-interop", - "proptest-derive 0.5.0", "reth-codecs-derive", "serde", "serde_json", @@ -6600,7 +6636,7 @@ dependencies = [ [[package]] name = "reth-codecs-derive" -version = "1.0.2" +version = "1.0.3" dependencies = [ "convert_case 0.6.0", "proc-macro2", @@ -6611,7 +6647,7 @@ dependencies = [ [[package]] name = "reth-config" -version = "1.0.2" +version = "1.0.3" dependencies = [ "confy", "humantime-serde", @@ -6625,7 +6661,7 @@ dependencies = [ [[package]] name = "reth-consensus" -version = "1.0.2" +version = "1.0.3" dependencies = [ "auto_impl", "reth-primitives", @@ -6634,7 +6670,7 @@ dependencies = [ [[package]] name = "reth-consensus-common" -version = "1.0.2" +version = "1.0.3" dependencies = [ "mockall", "rand 0.8.5", @@ -6646,7 +6682,7 @@ dependencies = [ [[package]] name = "reth-consensus-debug-client" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6668,7 +6704,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "1.0.2" +version = "1.0.3" dependencies = [ "arbitrary", "assert_matches", @@ -6707,7 +6743,7 @@ dependencies = [ [[package]] name = "reth-db-api" -version = "1.0.2" +version = "1.0.3" dependencies = [ "arbitrary", "assert_matches", @@ -6722,7 +6758,6 @@ dependencies = [ "pprof", "proptest", "proptest-arbitrary-interop", - "proptest-derive 0.5.0", "rand 0.8.5", "reth-codecs", "reth-primitives", @@ -6738,7 +6773,7 @@ dependencies = [ [[package]] name = "reth-db-common" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-genesis", "boyer-moore-magiclen", @@ -6755,6 +6790,7 @@ dependencies = [ "reth-provider", "reth-stages-types", "reth-trie", + "reth-trie-db", "serde", "serde_json", "thiserror", @@ -6763,7 +6799,7 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -6790,7 +6826,7 @@ dependencies = [ [[package]] name = "reth-discv5" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -6816,7 +6852,7 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-chains", "alloy-primitives", @@ -6844,7 +6880,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-rlp", "assert_matches", @@ -6879,7 +6915,7 @@ dependencies = [ [[package]] name = "reth-e2e-test-utils" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-consensus", "alloy-network", @@ -6911,7 +6947,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "1.0.2" +version = "1.0.3" dependencies = [ "aes 0.8.4", "alloy-primitives", @@ -6941,7 +6977,7 @@ dependencies = [ [[package]] name = "reth-engine-primitives" -version = "1.0.2" +version = "1.0.3" dependencies = [ "reth-chainspec", "reth-payload-primitives", @@ -6950,8 +6986,9 @@ dependencies = [ [[package]] name = "reth-engine-tree" -version = "1.0.2" +version = "1.0.3" dependencies = [ + "alloy-rlp", "aquamarine", "assert_matches", "futures", @@ -6961,6 +6998,7 @@ dependencies = [ "reth-beacon-consensus", "reth-blockchain-tree", "reth-blockchain-tree-api", + "reth-chain-state", "reth-chainspec", "reth-consensus", "reth-db", @@ -6982,6 +7020,7 @@ dependencies = [ "reth-prune-types", "reth-revm", "reth-rpc-types", + "reth-rpc-types-compat", "reth-stages", "reth-stages-api", "reth-stages-types", @@ -6998,7 +7037,7 @@ dependencies = [ [[package]] name = "reth-engine-util" -version = "1.0.2" +version = "1.0.3" dependencies = [ "eyre", "futures", @@ -7016,7 +7055,7 @@ dependencies = [ [[package]] name = "reth-errors" -version = "1.0.2" +version = "1.0.3" dependencies = [ "reth-blockchain-tree-api", "reth-consensus", @@ -7028,7 +7067,7 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-rlp", "arbitrary", @@ -7039,7 +7078,6 @@ dependencies = [ "pin-project", "proptest", "proptest-arbitrary-interop", - "proptest-derive 0.5.0", "rand 0.8.5", "reth-chainspec", "reth-codecs", @@ -7062,7 +7100,7 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-chains", "alloy-genesis", @@ -7072,7 +7110,6 @@ dependencies = [ "derive_more", "proptest", "proptest-arbitrary-interop", - "proptest-derive 0.5.0", "rand 0.8.5", "reth-chainspec", "reth-codecs-derive", @@ -7083,7 +7120,7 @@ dependencies = [ [[package]] name = "reth-ethereum-cli" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-genesis", "clap", @@ -7096,7 +7133,7 @@ dependencies = [ [[package]] name = "reth-ethereum-consensus" -version = "1.0.2" +version = "1.0.3" dependencies = [ "reth-chainspec", "reth-consensus", @@ -7107,16 +7144,27 @@ dependencies = [ [[package]] name = "reth-ethereum-engine" -version = "1.0.2" +version = "1.0.3" dependencies = [ "futures", "pin-project", "reth-beacon-consensus", + "reth-blockchain-tree", "reth-chainspec", + "reth-consensus", "reth-db-api", "reth-engine-tree", "reth-ethereum-engine-primitives", + "reth-evm", + "reth-evm-ethereum", + "reth-exex-types", "reth-network-p2p", + "reth-payload-builder", + "reth-payload-validator", + "reth-primitives", + "reth-provider", + "reth-prune", + "reth-prune-types", "reth-stages-api", "reth-tasks", "thiserror", @@ -7126,7 +7174,7 @@ dependencies = [ [[package]] name = "reth-ethereum-engine-primitives" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-rlp", "reth-chainspec", @@ -7144,7 +7192,7 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-chains", "alloy-primitives", @@ -7163,7 +7211,7 @@ dependencies = [ [[package]] name = "reth-ethereum-payload-builder" -version = "1.0.2" +version = "1.0.3" dependencies = [ "reth-basic-payload-builder", "reth-errors", @@ -7181,7 +7229,7 @@ dependencies = [ [[package]] name = "reth-etl" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-primitives", "rayon", @@ -7191,7 +7239,7 @@ dependencies = [ [[package]] name = "reth-evm" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-eips", "auto_impl", @@ -7209,7 +7257,7 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-eips", "alloy-sol-types", @@ -7229,7 +7277,7 @@ dependencies = [ [[package]] name = "reth-evm-optimism" -version = "1.0.2" +version = "1.0.3" dependencies = [ "reth-chainspec", "reth-consensus-common", @@ -7249,10 +7297,11 @@ dependencies = [ [[package]] name = "reth-execution-errors" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-eips", "alloy-primitives", + "alloy-rlp", "reth-consensus", "reth-prune-types", "reth-storage-errors", @@ -7262,7 +7311,7 @@ dependencies = [ [[package]] name = "reth-execution-types" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7276,9 +7325,10 @@ dependencies = [ [[package]] name = "reth-exex" -version = "1.0.2" +version = "1.0.3" dependencies = [ "eyre", + "futures", "metrics", "reth-blockchain-tree", "reth-chainspec", @@ -7309,7 +7359,7 @@ dependencies = [ [[package]] name = "reth-exex-test-utils" -version = "1.0.2" +version = "1.0.3" dependencies = [ "eyre", "futures-util", @@ -7339,7 +7389,7 @@ dependencies = [ [[package]] name = "reth-exex-types" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-primitives", "reth-provider", @@ -7348,7 +7398,7 @@ dependencies = [ [[package]] name = "reth-fs-util" -version = "1.0.2" +version = "1.0.3" dependencies = [ "serde", "serde_json", @@ -7357,7 +7407,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "1.0.2" +version = "1.0.3" dependencies = [ "async-trait", "bytes", @@ -7379,12 +7429,12 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "1.0.2" +version = "1.0.3" dependencies = [ "bitflags 2.6.0", "byteorder", "criterion", - "dashmap", + "dashmap 6.0.1", "derive_more", "indexmap 2.2.6", "parking_lot 0.12.3", @@ -7399,7 +7449,7 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "1.0.2" +version = "1.0.3" dependencies = [ "bindgen", "cc", @@ -7407,7 +7457,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "1.0.2" +version = "1.0.3" dependencies = [ "futures", "metrics", @@ -7418,7 +7468,7 @@ dependencies = [ [[package]] name = "reth-metrics-derive" -version = "1.0.2" +version = "1.0.3" dependencies = [ "metrics", "once_cell", @@ -7432,14 +7482,14 @@ dependencies = [ [[package]] name = "reth-net-banlist" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-primitives", ] [[package]] name = "reth-net-nat" -version = "1.0.2" +version = "1.0.3" dependencies = [ "futures-util", "reqwest", @@ -7451,7 +7501,7 @@ dependencies = [ [[package]] name = "reth-network" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-node-bindings", "alloy-provider", @@ -7509,7 +7559,7 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-primitives", "alloy-rpc-types-admin", @@ -7523,7 +7573,7 @@ dependencies = [ [[package]] name = "reth-network-p2p" -version = "1.0.2" +version = "1.0.3" dependencies = [ "auto_impl", "futures", @@ -7541,7 +7591,7 @@ dependencies = [ [[package]] name = "reth-network-peers" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7557,7 +7607,7 @@ dependencies = [ [[package]] name = "reth-network-types" -version = "1.0.2" +version = "1.0.3" dependencies = [ "humantime-serde", "reth-net-banlist", @@ -7570,7 +7620,7 @@ dependencies = [ [[package]] name = "reth-nippy-jar" -version = "1.0.2" +version = "1.0.3" dependencies = [ "anyhow", "bincode", @@ -7591,7 +7641,7 @@ dependencies = [ [[package]] name = "reth-node-api" -version = "1.0.2" +version = "1.0.3" dependencies = [ "reth-db-api", "reth-engine-primitives", @@ -7606,7 +7656,7 @@ dependencies = [ [[package]] name = "reth-node-builder" -version = "1.0.2" +version = "1.0.3" dependencies = [ "aquamarine", "backon", @@ -7635,6 +7685,7 @@ dependencies = [ "reth-node-api", "reth-node-core", "reth-node-events", + "reth-node-metrics", "reth-payload-builder", "reth-primitives", "reth-provider", @@ -7659,7 +7710,7 @@ dependencies = [ [[package]] name = "reth-node-core" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-genesis", "alloy-rpc-types-engine", @@ -7669,15 +7720,7 @@ dependencies = [ "dirs-next", "eyre", "futures", - "http 1.1.0", "humantime", - "jsonrpsee", - "metrics", - "metrics-exporter-prometheus", - "metrics-process", - "metrics-util", - "once_cell", - "procfs", "proptest", "rand 0.8.5", "reth-chainspec", @@ -7689,7 +7732,6 @@ dependencies = [ "reth-discv4", "reth-discv5", "reth-fs-util", - "reth-metrics", "reth-net-nat", "reth-network", "reth-network-p2p", @@ -7705,22 +7747,19 @@ dependencies = [ "reth-rpc-types-compat", "reth-stages-types", "reth-storage-errors", - "reth-tasks", "reth-tracing", "reth-transaction-pool", "secp256k1", "serde_json", "shellexpand", - "tikv-jemalloc-ctl", "tokio", - "tower", "tracing", "vergen", ] [[package]] name = "reth-node-ethereum" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -7731,6 +7770,7 @@ dependencies = [ "reth-auto-seal-consensus", "reth-basic-payload-builder", "reth-beacon-consensus", + "reth-blockchain-tree", "reth-chainspec", "reth-consensus", "reth-db", @@ -7761,7 +7801,7 @@ dependencies = [ [[package]] name = "reth-node-events" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-rpc-types-engine", "futures", @@ -7781,9 +7821,37 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-node-metrics" +version = "1.0.3" +dependencies = [ + "eyre", + "http 1.1.0", + "jsonrpsee", + "metrics", + "metrics-exporter-prometheus", + "metrics-process", + "metrics-util", + "once_cell", + "procfs", + "reqwest", + "reth-chainspec", + "reth-db", + "reth-db-api", + "reth-metrics", + "reth-provider", + "reth-tasks", + "socket2 0.4.10", + "tikv-jemalloc-ctl", + "tokio", + "tower", + "tracing", + "vergen", +] + [[package]] name = "reth-node-optimism" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -7833,7 +7901,7 @@ dependencies = [ [[package]] name = "reth-optimism-cli" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -7875,7 +7943,7 @@ dependencies = [ [[package]] name = "reth-optimism-consensus" -version = "1.0.2" +version = "1.0.3" dependencies = [ "reth-chainspec", "reth-consensus", @@ -7886,7 +7954,7 @@ dependencies = [ [[package]] name = "reth-optimism-payload-builder" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-rlp", "reth-basic-payload-builder", @@ -7910,14 +7978,15 @@ dependencies = [ [[package]] name = "reth-optimism-primitives" -version = "1.0.2" +version = "1.0.3" [[package]] name = "reth-optimism-rpc" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-primitives", "jsonrpsee", + "jsonrpsee-types", "parking_lot 0.12.3", "reth-chainspec", "reth-errors", @@ -7942,7 +8011,7 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "1.0.2" +version = "1.0.3" dependencies = [ "futures-util", "metrics", @@ -7964,7 +8033,7 @@ dependencies = [ [[package]] name = "reth-payload-primitives" -version = "1.0.2" +version = "1.0.3" dependencies = [ "reth-chainspec", "reth-errors", @@ -7978,7 +8047,7 @@ dependencies = [ [[package]] name = "reth-payload-validator" -version = "1.0.2" +version = "1.0.3" dependencies = [ "reth-chainspec", "reth-primitives", @@ -7988,7 +8057,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-eips", "alloy-genesis", @@ -8008,7 +8077,6 @@ dependencies = [ "pprof", "proptest", "proptest-arbitrary-interop", - "proptest-derive 0.5.0", "rand 0.8.5", "rayon", "reth-chainspec", @@ -8032,7 +8100,7 @@ dependencies = [ [[package]] name = "reth-primitives-traits" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8047,7 +8115,6 @@ dependencies = [ "modular-bitfield", "proptest", "proptest-arbitrary-interop", - "proptest-derive 0.5.0", "rand 0.8.5", "reth-codecs", "revm-primitives", @@ -8059,21 +8126,20 @@ dependencies = [ [[package]] name = "reth-provider" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-rlp", "alloy-rpc-types-engine", "assert_matches", "auto_impl", - "dashmap", - "derive_more", + "dashmap 6.0.1", "itertools 0.13.0", "metrics", "parking_lot 0.12.3", - "pin-project", "rand 0.8.5", "rayon", "reth-blockchain-tree-api", + "reth-chain-state", "reth-chainspec", "reth-codecs", "reth-db", @@ -8092,17 +8158,17 @@ dependencies = [ "reth-storage-errors", "reth-testing-utils", "reth-trie", + "reth-trie-db", "revm", "strum", "tempfile", "tokio", - "tokio-stream", "tracing", ] [[package]] name = "reth-prune" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-primitives", "assert_matches", @@ -8131,7 +8197,7 @@ dependencies = [ [[package]] name = "reth-prune-types" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-primitives", "arbitrary", @@ -8141,7 +8207,6 @@ dependencies = [ "modular-bitfield", "proptest", "proptest-arbitrary-interop", - "proptest-derive 0.5.0", "reth-codecs", "serde", "serde_json", @@ -8152,7 +8217,7 @@ dependencies = [ [[package]] name = "reth-revm" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-eips", "reth-chainspec", @@ -8170,7 +8235,7 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-dyn-abi", "alloy-genesis", @@ -8227,7 +8292,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "1.0.2" +version = "1.0.3" dependencies = [ "jsonrpsee", "reth-engine-primitives", @@ -8240,7 +8305,7 @@ dependencies = [ [[package]] name = "reth-rpc-api-testing-util" -version = "1.0.2" +version = "1.0.3" dependencies = [ "futures", "jsonrpsee", @@ -8255,7 +8320,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "1.0.2" +version = "1.0.3" dependencies = [ "clap", "http 1.1.0", @@ -8301,7 +8366,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-rlp", "assert_matches", @@ -8334,7 +8399,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-api" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-dyn-abi", "async-trait", @@ -8342,6 +8407,7 @@ dependencies = [ "dyn-clone", "futures", "jsonrpsee", + "jsonrpsee-types", "parking_lot 0.12.3", "reth-chainspec", "reth-errors", @@ -8365,7 +8431,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-types" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-sol-types", "derive_more", @@ -8402,7 +8468,7 @@ dependencies = [ [[package]] name = "reth-rpc-layer" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-rpc-types-engine", "assert_matches", @@ -8419,7 +8485,7 @@ dependencies = [ [[package]] name = "reth-rpc-server-types" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-primitives", "jsonrpsee-core", @@ -8434,7 +8500,7 @@ dependencies = [ [[package]] name = "reth-rpc-types" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-primitives", "alloy-rpc-types", @@ -8450,7 +8516,6 @@ dependencies = [ "bytes", "jsonrpsee-types", "proptest", - "proptest-derive 0.5.0", "rand 0.8.5", "serde_json", "similar-asserts", @@ -8458,7 +8523,7 @@ dependencies = [ [[package]] name = "reth-rpc-types-compat" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-rlp", "alloy-rpc-types", @@ -8470,7 +8535,7 @@ dependencies = [ [[package]] name = "reth-stages" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-rlp", "assert_matches", @@ -8508,6 +8573,7 @@ dependencies = [ "reth-storage-errors", "reth-testing-utils", "reth-trie", + "reth-trie-db", "serde_json", "tempfile", "thiserror", @@ -8517,7 +8583,7 @@ dependencies = [ [[package]] name = "reth-stages-api" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-primitives", "aquamarine", @@ -8546,7 +8612,7 @@ dependencies = [ [[package]] name = "reth-stages-types" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-primitives", "arbitrary", @@ -8554,7 +8620,6 @@ dependencies = [ "modular-bitfield", "proptest", "proptest-arbitrary-interop", - "proptest-derive 0.5.0", "rand 0.8.5", "reth-codecs", "reth-trie-common", @@ -8564,7 +8629,7 @@ dependencies = [ [[package]] name = "reth-static-file" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-primitives", "assert_matches", @@ -8587,7 +8652,7 @@ dependencies = [ [[package]] name = "reth-static-file-types" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-primitives", "clap", @@ -8598,7 +8663,7 @@ dependencies = [ [[package]] name = "reth-storage-api" -version = "1.0.2" +version = "1.0.3" dependencies = [ "auto_impl", "reth-chainspec", @@ -8614,8 +8679,9 @@ dependencies = [ [[package]] name = "reth-storage-errors" -version = "1.0.2" +version = "1.0.3" dependencies = [ + "alloy-rlp", "reth-fs-util", "reth-primitives", "thiserror-no-std", @@ -8623,7 +8689,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "1.0.2" +version = "1.0.3" dependencies = [ "auto_impl", "dyn-clone", @@ -8640,7 +8706,7 @@ dependencies = [ [[package]] name = "reth-testing-utils" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-genesis", "rand 0.8.5", @@ -8650,7 +8716,7 @@ dependencies = [ [[package]] name = "reth-tokio-util" -version = "1.0.2" +version = "1.0.3" dependencies = [ "tokio", "tokio-stream", @@ -8659,7 +8725,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "1.0.2" +version = "1.0.3" dependencies = [ "clap", "eyre", @@ -8673,7 +8739,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-rlp", "aquamarine", @@ -8713,7 +8779,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-rlp", "auto_impl", @@ -8747,7 +8813,7 @@ dependencies = [ [[package]] name = "reth-trie-common" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -8764,7 +8830,6 @@ dependencies = [ "plain_hasher", "proptest", "proptest-arbitrary-interop", - "proptest-derive 0.5.0", "reth-codecs", "reth-primitives-traits", "revm-primitives", @@ -8774,9 +8839,44 @@ dependencies = [ "toml", ] +[[package]] +name = "reth-trie-db" +version = "1.0.3" +dependencies = [ + "alloy-rlp", + "auto_impl", + "criterion", + "derive_more", + "itertools 0.13.0", + "metrics", + "once_cell", + "proptest", + "proptest-arbitrary-interop", + "rayon", + "reth-chainspec", + "reth-db", + "reth-db-api", + "reth-execution-errors", + "reth-metrics", + "reth-primitives", + "reth-provider", + "reth-stages-types", + "reth-storage-errors", + "reth-trie", + "reth-trie-common", + "revm", + "serde", + "serde_json", + "similar-asserts", + "tokio", + "tokio-stream", + "tracing", + "triehash", +] + [[package]] name = "reth-trie-parallel" -version = "1.0.2" +version = "1.0.3" dependencies = [ "alloy-rlp", "criterion", @@ -8795,6 +8895,7 @@ dependencies = [ "reth-provider", "reth-tasks", "reth-trie", + "reth-trie-db", "thiserror", "tokio", "tracing", @@ -8817,9 +8918,9 @@ dependencies = [ [[package]] name = "revm-inspectors" -version = "0.5.1" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d485a7ccfbbcaf2d0c08c3d866dae279c6f71d7357862cbea637f23f27b7b695" +checksum = "5296ccad8d7ccbeb6c5a037a57bfe1ff27e81d8c4efbd3ae7df0a554eb1a818a" dependencies = [ "alloy-primitives", "alloy-rpc-types", @@ -8898,9 +8999,9 @@ dependencies = [ [[package]] name = "rgb" -version = "0.8.44" +version = "0.8.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aee83dc281d5a3200d37b299acd13b81066ea126a7f16f0eae70fc9aed241d9" +checksum = "ade4539f42266ded9e755c605bdddf546242b2c961b03b06a7375260788a0523" dependencies = [ "bytemuck", ] @@ -9199,9 +9300,9 @@ dependencies = [ [[package]] name = "scc" -version = "2.1.2" +version = "2.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af947d0ca10a2f3e00c7ec1b515b7c83e5cb3fa62d4c11a64301d9eec54440e9" +checksum = "a4465c22496331e20eb047ff46e7366455bc01c0c02015c4a376de0b2cd3a1af" dependencies = [ "sdd", ] @@ -9234,9 +9335,9 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "sdd" -version = "0.2.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b84345e4c9bd703274a082fb80caaa99b7612be48dfaa1dd9266577ec412309d" +checksum = "85f05a494052771fc5bd0619742363b5e24e5ad72ab3111ec2e27925b8edc5f3" [[package]] name = "sec1" @@ -9274,9 +9375,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.11.0" +version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ "bitflags 2.6.0", "core-foundation", @@ -9288,9 +9389,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.11.0" +version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" +checksum = "75da29fe9b9b08fe9d6b22b5b4bcbc75d8db3aa31e639aa56bb62e9d46bfceaf" dependencies = [ "core-foundation-sys", "libc", @@ -9410,9 +9511,9 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.8.3" +version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e73139bc5ec2d45e6c5fd85be5a46949c1c39a4c18e56915f5eb4c12f975e377" +checksum = "69cecfa94848272156ea67b2b1a53f20fc7bc638c4a46d2f8abde08f05f4b857" dependencies = [ "base64 0.22.1", "chrono", @@ -9428,9 +9529,9 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.8.3" +version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b80d3d6b56b64335c0180e5ffde23b3c5e08c14c585b51a15bd0e95393f46703" +checksum = "a8fee4991ef4f274617a51ad4af30519438dacb2f56ac773b08a1922ff743350" dependencies = [ "darling", "proc-macro2", @@ -9987,18 +10088,18 @@ checksum = "a38c90d48152c236a3ab59271da4f4ae63d678c5d7ad6b7714d7cb9760be5e4b" [[package]] name = "thiserror" -version = "1.0.62" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2675633b1499176c2dff06b0856a27976a8f9d436737b4cf4f312d4d91d8bbb" +checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.62" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d20468752b09f49e909e55a5d338caa8bedf615594e9d80bc4c565d30faf798c" +checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" dependencies = [ "proc-macro2", "quote", @@ -10155,9 +10256,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.38.0" +version = "1.38.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba4f4a02a7a80d6f274636f0aa95c7e383b912d41fe721a31f29e29698585a4a" +checksum = "eb2caba9f80616f438e09748d5acda951967e1ea58508ef53d9c6402485a46df" dependencies = [ "backtrace", "bytes", @@ -10239,14 +10340,14 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.14" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f49eb2ab21d2f26bd6db7bf383edc527a7ebaee412d17af4d40fdccd442f335" +checksum = "ac2caab0bf757388c6c0ae23b3293fdb463fee59434529014f85e3263b995c28" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.15", + "toml_edit 0.22.16", ] [[package]] @@ -10271,15 +10372,15 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.15" +version = "0.22.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d59a3a72298453f564e2b111fa896f8d07fabb36f51f06d7e875fc5e0b5a3ef1" +checksum = "278f3d518e152219c994ce877758516bca5e118eaed6996192a774fb9fbf0788" dependencies = [ "indexmap 2.2.6", "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.13", + "winnow 0.6.14", ] [[package]] @@ -11176,9 +11277,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.13" +version = "0.6.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59b5e5f6c299a3c7890b876a2a587f3115162487e704907d9b6cd29473052ba1" +checksum = "374ec40a2d767a3c1b4972d9475ecd557356637be906f2cb3f7fe17a6eb5e22f" dependencies = [ "memchr", ] diff --git a/Cargo.toml b/Cargo.toml index acc7062463bb..2e519aa35b96 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace.package] -version = "1.0.2" +version = "1.0.3" edition = "2021" rust-version = "1.79" license = "MIT OR Apache-2.0" @@ -14,6 +14,7 @@ members = [ "crates/blockchain-tree/", "crates/blockchain-tree-api/", "crates/chainspec/", + "crates/chain-state/", "crates/cli/cli/", "crates/cli/commands/", "crates/cli/runner/", @@ -65,6 +66,7 @@ members = [ "crates/node/api/", "crates/node/builder/", "crates/node/events/", + "crates/node/metrics", "crates/optimism/cli", "crates/optimism/consensus", "crates/optimism/evm/", @@ -114,6 +116,7 @@ members = [ "crates/tracing/", "crates/transaction-pool/", "crates/trie/common", + "crates/trie/db", "crates/trie/parallel/", "crates/trie/trie", "examples/beacon-api-sidecar-fetcher/", @@ -272,6 +275,7 @@ reth-beacon-consensus = { path = "crates/consensus/beacon" } reth-blockchain-tree = { path = "crates/blockchain-tree" } reth-blockchain-tree-api = { path = "crates/blockchain-tree-api" } reth-chainspec = { path = "crates/chainspec" } +reth-chain-state = { path = "crates/chain-state" } reth-cli = { path = "crates/cli/cli" } reth-cli-commands = { path = "crates/cli/commands" } reth-cli-runner = { path = "crates/cli/runner" } @@ -331,6 +335,7 @@ reth-node-builder = { path = "crates/node/builder" } reth-node-core = { path = "crates/node/core" } reth-node-ethereum = { path = "crates/ethereum/node" } reth-node-events = { path = "crates/node/events" } +reth-node-metrics = { path = "crates/node/metrics" } reth-node-optimism = { path = "crates/optimism/node" } reth-optimism-cli = { path = "crates/optimism/cli" } reth-optimism-consensus = { path = "crates/optimism/consensus" } @@ -340,7 +345,7 @@ reth-optimism-rpc = { path = "crates/optimism/rpc" } reth-payload-builder = { path = "crates/payload/builder" } reth-payload-primitives = { path = "crates/payload/primitives" } reth-payload-validator = { path = "crates/payload/validator" } -reth-primitives = { path = "crates/primitives" } +reth-primitives = { path = "crates/primitives", default-features = false, features = ["std"] } reth-primitives-traits = { path = "crates/primitives-traits", default-features = false } reth-provider = { path = "crates/storage/provider" } reth-prune = { path = "crates/prune/prune" } @@ -371,6 +376,7 @@ reth-tracing = { path = "crates/tracing" } reth-transaction-pool = { path = "crates/transaction-pool" } reth-trie = { path = "crates/trie/trie" } reth-trie-common = { path = "crates/trie/common" } +reth-trie-db = { path = "crates/trie/db" } reth-trie-parallel = { path = "crates/trie/parallel" } # revm @@ -433,7 +439,7 @@ bytes = "1.5" bitflags = "2.4" clap = "4" const_format = { version = "0.2.32", features = ["rust_1_64"] } -dashmap = "5.5" +dashmap = "6.0" derive_more = "0.99.17" fdlimit = "0.3.0" eyre = "0.6" @@ -449,9 +455,9 @@ serde_with = "3.3.0" humantime = "2.1" humantime-serde = "1.1" rand = "0.8.5" -rustc-hash = { version = "2.0", default-features = false } +rustc-hash = { version = "2.0", default-features = false } schnellru = "0.2" -strum = "0.26" +strum = { version = "0.26", default-features = false } rayon = "1.7" itertools = "0.13" parking_lot = "0.12" diff --git a/bin/reth-bench/Cargo.toml b/bin/reth-bench/Cargo.toml index 00a5124fa2da..4023c1c17375 100644 --- a/bin/reth-bench/Cargo.toml +++ b/bin/reth-bench/Cargo.toml @@ -83,7 +83,7 @@ default = ["jemalloc"] asm-keccak = ["reth-primitives/asm-keccak"] -jemalloc = ["dep:tikv-jemallocator", "reth-node-core/jemalloc"] +jemalloc = ["dep:tikv-jemallocator"] jemalloc-prof = ["jemalloc", "tikv-jemallocator?/profiling"] min-error-logs = ["tracing/release_max_level_error"] diff --git a/bin/reth-bench/src/bench/new_payload_fcu.rs b/bin/reth-bench/src/bench/new_payload_fcu.rs index c7ea5683175f..190217f6a8cf 100644 --- a/bin/reth-bench/src/bench/new_payload_fcu.rs +++ b/bin/reth-bench/src/bench/new_payload_fcu.rs @@ -57,11 +57,11 @@ impl Command { }; let head_block_hash = block.hash(); - let safe_block_hash = - block_provider.get_block_by_number((block.number - 32).into(), false); + let safe_block_hash = block_provider + .get_block_by_number(block.number.saturating_sub(32).into(), false); - let finalized_block_hash = - block_provider.get_block_by_number((block.number - 64).into(), false); + let finalized_block_hash = block_provider + .get_block_by_number(block.number.saturating_sub(64).into(), false); let (safe, finalized) = tokio::join!(safe_block_hash, finalized_block_hash,); diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index c8465165cd16..33c8a24cc312 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -54,6 +54,7 @@ reth-basic-payload-builder.workspace = true reth-static-file.workspace = true reth-static-file-types = { workspace = true, features = ["clap"] } reth-trie = { workspace = true, features = ["metrics"] } +reth-trie-db = { workspace = true, features = ["metrics"] } reth-node-api.workspace = true reth-node-core.workspace = true reth-ethereum-payload-builder.workspace = true @@ -61,6 +62,7 @@ reth-db-common.workspace = true reth-node-ethereum.workspace = true reth-node-builder.workspace = true reth-node-events.workspace = true +reth-node-metrics.workspace = true reth-consensus.workspace = true reth-engine-util.workspace = true reth-prune.workspace = true @@ -118,7 +120,7 @@ dev = ["reth-cli-commands/dev"] asm-keccak = ["reth-primitives/asm-keccak"] -jemalloc = ["dep:tikv-jemallocator", "reth-node-core/jemalloc"] +jemalloc = ["dep:tikv-jemallocator", "reth-node-metrics/jemalloc"] jemalloc-prof = ["jemalloc", "tikv-jemallocator?/profiling"] min-error-logs = ["tracing/release_max_level_error"] @@ -131,3 +133,6 @@ min-trace-logs = ["tracing/release_max_level_trace"] name = "reth" path = "src/main.rs" +[[bin]] +name = "engine2" +path = "src/engine2.rs" diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index e98c22b1dc90..fd238637e508 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -37,6 +37,8 @@ use reth_transaction_pool::{ blobstore::InMemoryBlobStore, BlobStore, EthPooledTransaction, PoolConfig, TransactionOrigin, TransactionPool, TransactionValidationTaskExecutor, }; +use reth_trie::StateRoot; +use reth_trie_db::DatabaseStateRoot; use std::{path::PathBuf, str::FromStr, sync::Arc}; use tracing::*; @@ -266,8 +268,10 @@ impl Command { debug!(target: "reth::cli", ?execution_outcome, "Executed block"); let hashed_post_state = execution_outcome.hash_state_slow(); - let (state_root, trie_updates) = hashed_post_state - .state_root_with_updates(provider_factory.provider()?.tx_ref())?; + let (state_root, trie_updates) = StateRoot::overlay_root_with_updates( + provider_factory.provider()?.tx_ref(), + hashed_post_state.clone(), + )?; if state_root != block_with_senders.state_root { eyre::bail!( diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index d5ea36a59ecc..5378a30bde56 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -19,14 +19,15 @@ use reth_network_api::NetworkInfo; use reth_node_ethereum::EthExecutorProvider; use reth_primitives::BlockHashOrNumber; use reth_provider::{ - AccountExtReader, ChainSpecProvider, HashingWriter, HeaderProvider, LatestStateProviderRef, - OriginalValuesKnown, ProviderFactory, StageCheckpointReader, StateWriter, - StaticFileProviderFactory, StorageReader, + writer::StorageWriter, AccountExtReader, ChainSpecProvider, HashingWriter, HeaderProvider, + LatestStateProviderRef, OriginalValuesKnown, ProviderFactory, StageCheckpointReader, + StateWriter, StaticFileProviderFactory, StorageReader, }; use reth_revm::database::StateProviderDatabase; use reth_stages::StageId; use reth_tasks::TaskExecutor; use reth_trie::StateRoot; +use reth_trie_db::DatabaseStateRoot; use std::{path::PathBuf, sync::Arc}; use tracing::*; @@ -148,8 +149,10 @@ impl Command { ExecutionOutcome::new(state, receipts.into(), block.number, vec![requests.into()]); // Unpacked `BundleState::state_root_slow` function - let (in_memory_state_root, in_memory_updates) = - execution_outcome.hash_state_slow().state_root_with_updates(provider.tx_ref())?; + let (in_memory_state_root, in_memory_updates) = StateRoot::overlay_root_with_updates( + provider.tx_ref(), + execution_outcome.hash_state_slow(), + )?; if in_memory_state_root == block.state_root { info!(target: "reth::cli", state_root = ?in_memory_state_root, "Computed in-memory state root matches"); @@ -165,7 +168,8 @@ impl Command { .try_seal_with_senders() .map_err(|_| BlockValidationError::SenderRecoveryError)?, )?; - execution_outcome.write_to_storage(&provider_rw, None, OriginalValuesKnown::No)?; + let mut storage_writer = StorageWriter::new(Some(&provider_rw), None); + storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::No)?; let storage_lists = provider_rw.changed_storages_with_range(block.number..=block.number)?; let storages = provider_rw.plain_state_storages(storage_lists)?; provider_rw.insert_storage_for_hashing(storages)?; diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index 1b330ea6a0f4..1c6e804148c4 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -17,8 +17,8 @@ use reth_network_p2p::full_block::FullBlockClient; use reth_node_ethereum::EthExecutorProvider; use reth_primitives::BlockHashOrNumber; use reth_provider::{ - BlockNumReader, BlockWriter, ChainSpecProvider, HeaderProvider, LatestStateProviderRef, - OriginalValuesKnown, ProviderError, ProviderFactory, StateWriter, + writer::StorageWriter, BlockNumReader, BlockWriter, ChainSpecProvider, HeaderProvider, + LatestStateProviderRef, OriginalValuesKnown, ProviderError, ProviderFactory, StateWriter, }; use reth_revm::database::StateProviderDatabase; use reth_stages::{ @@ -151,7 +151,10 @@ impl Command { ), )); executor.execute_and_verify_one((&sealed_block.clone().unseal(), td).into())?; - executor.finalize().write_to_storage(&provider_rw, None, OriginalValuesKnown::Yes)?; + let execution_outcome = executor.finalize(); + + let mut storage_writer = StorageWriter::new(Some(&provider_rw), None); + storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::Yes)?; let checkpoint = Some(StageCheckpoint::new( block_number.checked_sub(1).ok_or(eyre::eyre!("GenesisBlockHasNoParent"))?, diff --git a/bin/reth/src/engine2.rs b/bin/reth/src/engine2.rs new file mode 100644 index 000000000000..3f0470699ca3 --- /dev/null +++ b/bin/reth/src/engine2.rs @@ -0,0 +1,39 @@ +#![allow(missing_docs)] +#![allow(rustdoc::missing_crate_level_docs)] + +// We use jemalloc for performance reasons. +#[cfg(all(feature = "jemalloc", unix))] +#[global_allocator] +static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; + +fn main() { + use reth::cli::Cli; + use reth_node_ethereum::{launch::EthNodeLauncher, node::EthereumAddOns, EthereumNode}; + use reth_provider::providers::BlockchainProvider2; + + reth_cli_util::sigsegv_handler::install(); + + // Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided. + if std::env::var_os("RUST_BACKTRACE").is_none() { + std::env::set_var("RUST_BACKTRACE", "1"); + } + + if let Err(err) = Cli::parse_args().run(|builder, _| async { + let handle = builder + .with_types_and_provider::>() + .with_components(EthereumNode::components()) + .with_add_ons::() + .launch_with_fn(|builder| { + let launcher = EthNodeLauncher::new( + builder.task_executor().clone(), + builder.config().datadir(), + ); + builder.launch_with(launcher) + }) + .await?; + handle.node_exit_future.await + }) { + eprintln!("Error: {err:?}"); + std::process::exit(1); + } +} diff --git a/bin/reth/src/lib.rs b/bin/reth/src/lib.rs index 2e1760d9888a..f49c909b94bc 100644 --- a/bin/reth/src/lib.rs +++ b/bin/reth/src/lib.rs @@ -58,9 +58,9 @@ pub mod core { pub use reth_node_core::*; } -/// Re-exported from `reth_node_core`. +/// Re-exported from `reth_node_metrics`. pub mod prometheus_exporter { - pub use reth_node_core::prometheus_exporter::*; + pub use reth_node_metrics::recorder::*; } /// Re-export of the `reth_node_core` types specifically in the `args` module. diff --git a/book/developers/exex/hello-world.md b/book/developers/exex/hello-world.md index 0f50cacbb9a6..6ee68807d40c 100644 --- a/book/developers/exex/hello-world.md +++ b/book/developers/exex/hello-world.md @@ -159,7 +159,7 @@ and it's safe to prune the associated data. -What we've arrived at is the [minimal ExEx example](https://github.com/paradigmxyz/reth/blob/b8cd7be6c92a71aea5341cdeba685f124c6de540/examples/exex/minimal/src/main.rs) that we provide in the Reth repository. +What we've arrived at is the [minimal ExEx example](https://github.com/paradigmxyz/reth-exex-examples/blob/4f3498f0cc00e038d6d8c32cd94fe82788862f49/minimal/src/main.rs) that we provide in the [reth-exex-examples](https://github.com/paradigmxyz/reth-exex-examples) repository. ## What's next? diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml index b3679677a13c..988bb54e8580 100644 --- a/crates/blockchain-tree/Cargo.toml +++ b/crates/blockchain-tree/Cargo.toml @@ -25,6 +25,7 @@ reth-execution-types.workspace = true reth-prune-types.workspace = true reth-stages-api.workspace = true reth-trie = { workspace = true, features = ["metrics"] } +reth-trie-db = { workspace = true, features = ["metrics"] } reth-trie-parallel = { workspace = true, features = ["parallel"] } reth-network.workspace = true reth-consensus.workspace = true diff --git a/crates/blockchain-tree/src/block_indices.rs b/crates/blockchain-tree/src/block_indices.rs index b080f26bda33..3b4c30eae96f 100644 --- a/crates/blockchain-tree/src/block_indices.rs +++ b/crates/blockchain-tree/src/block_indices.rs @@ -63,7 +63,8 @@ impl BlockIndices { } /// Return block to chain id - pub const fn blocks_to_chain(&self) -> &HashMap { + #[allow(dead_code)] + pub(crate) const fn blocks_to_chain(&self) -> &HashMap { &self.blocks_to_chain } @@ -202,7 +203,7 @@ impl BlockIndices { /// Remove chain from indices and return dependent chains that need to be removed. /// Does the cleaning of the tree and removing blocks from the chain. - pub fn remove_chain(&mut self, chain: &Chain) -> BTreeSet { + pub(crate) fn remove_chain(&mut self, chain: &Chain) -> BTreeSet { chain .blocks() .iter() diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index b41e4dbbffba..2fb567463686 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -26,7 +26,11 @@ use reth_provider::{ use reth_prune_types::PruneModes; use reth_stages_api::{MetricEvent, MetricEventsSender}; use reth_storage_errors::provider::{ProviderResult, RootMismatch}; -use reth_trie::{hashed_cursor::HashedPostStateCursorFactory, StateRoot}; +use reth_trie::{ + hashed_cursor::{DatabaseHashedCursorFactory, HashedPostStateCursorFactory}, + StateRoot, +}; +use reth_trie_db::DatabaseStateRoot; use std::{ collections::{btree_map::Entry, BTreeMap, HashSet}, sync::Arc, @@ -1238,7 +1242,7 @@ where .disable_long_read_transaction_safety(); let (state_root, trie_updates) = StateRoot::from_tx(provider.tx_ref()) .with_hashed_cursor_factory(HashedPostStateCursorFactory::new( - provider.tx_ref(), + DatabaseHashedCursorFactory::new(provider.tx_ref()), &hashed_state_sorted, )) .with_prefix_sets(prefix_sets) diff --git a/crates/blockchain-tree/src/state.rs b/crates/blockchain-tree/src/state.rs index e44e1aae552a..43e47743b837 100644 --- a/crates/blockchain-tree/src/state.rs +++ b/crates/blockchain-tree/src/state.rs @@ -113,7 +113,7 @@ impl TreeState { /// The ID of a sidechain internally in a [`BlockchainTree`][super::BlockchainTree]. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Ord, PartialOrd)] -pub struct BlockchainId(u64); +pub(crate) struct BlockchainId(u64); impl From for u64 { fn from(value: BlockchainId) -> Self { diff --git a/crates/chain-state/Cargo.toml b/crates/chain-state/Cargo.toml new file mode 100644 index 000000000000..1615c3f5eb5b --- /dev/null +++ b/crates/chain-state/Cargo.toml @@ -0,0 +1,47 @@ +[package] +name = "reth-chain-state" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "Reth state related types and functionality." + +[lints] +workspace = true + +[dependencies] +# reth +reth-chainspec.workspace = true +reth-errors.workspace = true +reth-execution-types.workspace = true +reth-primitives.workspace = true +reth-storage-api.workspace = true +reth-trie.workspace = true + +revm = { workspace = true, optional = true} + +# async +tokio = { workspace = true, features = ["sync", "macros", "rt-multi-thread"] } +tokio-stream = { workspace = true, features = ["sync"] } + +# tracing +tracing.workspace = true + +# misc +auto_impl.workspace = true +derive_more.workspace = true +parking_lot.workspace = true +pin-project.workspace = true +rand = { workspace = true, optional = true } + +[dev-dependencies] +rand.workspace = true +revm.workspace = true + +[features] +test-utils = [ + "rand", + "revm" +] \ No newline at end of file diff --git a/crates/storage/provider/src/providers/chain_info.rs b/crates/chain-state/src/chain_info.rs similarity index 100% rename from crates/storage/provider/src/providers/chain_info.rs rename to crates/chain-state/src/chain_info.rs diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs new file mode 100644 index 000000000000..1ac01b57b153 --- /dev/null +++ b/crates/chain-state/src/in_memory.rs @@ -0,0 +1,964 @@ +//! Types for tracking the canonical chain state in memory. + +use crate::{ + CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, + ChainInfoTracker, MemoryOverlayStateProvider, +}; +use parking_lot::RwLock; +use reth_chainspec::ChainInfo; +use reth_execution_types::{Chain, ExecutionOutcome}; +use reth_primitives::{ + Address, BlockNumHash, Header, Receipt, Receipts, SealedBlock, SealedBlockWithSenders, + SealedHeader, B256, +}; +use reth_storage_api::StateProviderBox; +use reth_trie::{updates::TrieUpdates, HashedPostState}; +use std::{collections::HashMap, ops::Deref, sync::Arc, time::Instant}; +use tokio::sync::broadcast; + +/// Size of the broadcast channel used to notify canonical state events. +const CANON_STATE_NOTIFICATION_CHANNEL_SIZE: usize = 256; + +/// Container type for in memory state data of the canonical chain. +/// +/// This tracks blocks and their state that haven't been persisted to disk yet but are part of the +/// canonical chain that can be traced back to a canonical block on disk. +#[derive(Debug, Default)] +pub(crate) struct InMemoryState { + /// All canonical blocks that are not on disk yet. + blocks: RwLock>>, + /// Mapping of block numbers to block hashes. + numbers: RwLock>, + /// The pending block that has not yet been made canonical. + pending: RwLock>, +} + +impl InMemoryState { + pub(crate) const fn new( + blocks: HashMap>, + numbers: HashMap, + pending: Option, + ) -> Self { + Self { + blocks: RwLock::new(blocks), + numbers: RwLock::new(numbers), + pending: RwLock::new(pending), + } + } + + /// Returns the state for a given block hash. + pub(crate) fn state_by_hash(&self, hash: B256) -> Option> { + self.blocks.read().get(&hash).cloned() + } + + /// Returns the state for a given block number. + pub(crate) fn state_by_number(&self, number: u64) -> Option> { + self.numbers.read().get(&number).and_then(|hash| self.blocks.read().get(hash).cloned()) + } + + /// Returns the current chain head state. + pub(crate) fn head_state(&self) -> Option> { + self.numbers + .read() + .iter() + .max_by_key(|(&number, _)| number) + .and_then(|(_, hash)| self.blocks.read().get(hash).cloned()) + } + + /// Returns the pending state corresponding to the current head plus one, + /// from the payload received in newPayload that does not have a FCU yet. + pub(crate) fn pending_state(&self) -> Option> { + self.pending.read().as_ref().map(|state| Arc::new(BlockState::new(state.block.clone()))) + } + + #[cfg(test)] + fn block_count(&self) -> usize { + self.blocks.read().len() + } +} + +/// Inner type to provide in memory state. It includes a chain tracker to be +/// advanced internally by the tree. +#[derive(Debug)] +pub(crate) struct CanonicalInMemoryStateInner { + pub(crate) chain_info_tracker: ChainInfoTracker, + pub(crate) in_memory_state: InMemoryState, + pub(crate) canon_state_notification_sender: CanonStateNotificationSender, +} + +/// This type is responsible for providing the blocks, receipts, and state for +/// all canonical blocks not on disk yet and keeps track of the block range that +/// is in memory. +#[derive(Debug, Clone)] +pub struct CanonicalInMemoryState { + pub(crate) inner: Arc, +} + +impl CanonicalInMemoryState { + /// Create a new in memory state with the given blocks, numbers, and pending state. + pub fn new( + blocks: HashMap>, + numbers: HashMap, + pending: Option, + ) -> Self { + let in_memory_state = InMemoryState::new(blocks, numbers, pending); + let head_state = in_memory_state.head_state(); + let header = match head_state { + Some(state) => state.block().block().header.clone(), + None => SealedHeader::default(), + }; + let chain_info_tracker = ChainInfoTracker::new(header); + let (canon_state_notification_sender, _canon_state_notification_receiver) = + broadcast::channel(CANON_STATE_NOTIFICATION_CHANNEL_SIZE); + + let inner = CanonicalInMemoryStateInner { + chain_info_tracker, + in_memory_state, + canon_state_notification_sender, + }; + + Self { inner: Arc::new(inner) } + } + + /// Create a new in memory state with the given local head. + pub fn with_head(head: SealedHeader) -> Self { + let chain_info_tracker = ChainInfoTracker::new(head); + let in_memory_state = InMemoryState::default(); + let (canon_state_notification_sender, _canon_state_notification_receiver) = + broadcast::channel(CANON_STATE_NOTIFICATION_CHANNEL_SIZE); + let inner = CanonicalInMemoryStateInner { + chain_info_tracker, + in_memory_state, + canon_state_notification_sender, + }; + + Self { inner: Arc::new(inner) } + } + + /// Returns in the header corresponding to the given hash. + pub fn header_by_hash(&self, hash: B256) -> Option { + self.state_by_hash(hash).map(|block| block.block().block.header.clone()) + } + + /// Append new blocks to the in memory state. + fn update_blocks(&self, new_blocks: I, reorged: I) + where + I: IntoIterator, + { + // acquire all locks + let mut blocks = self.inner.in_memory_state.blocks.write(); + let mut numbers = self.inner.in_memory_state.numbers.write(); + let mut pending = self.inner.in_memory_state.pending.write(); + + // we first remove the blocks from the reorged chain + for block in reorged { + let hash = block.block().hash(); + let number = block.block().number; + blocks.remove(&hash); + numbers.remove(&number); + } + + // insert the new blocks + for block in new_blocks { + let parent = blocks.get(&block.block().parent_hash).cloned(); + let block_state = BlockState::with_parent(block.clone(), parent.map(|p| (*p).clone())); + let hash = block_state.hash(); + let number = block_state.number(); + + // append new blocks + blocks.insert(hash, Arc::new(block_state)); + numbers.insert(number, hash); + } + + // remove the pending state + pending.take(); + } + + /// Update the in memory state with the given chain update. + pub fn update_chain(&self, new_chain: NewCanonicalChain) { + match new_chain { + NewCanonicalChain::Commit { new } => { + self.update_blocks(new, vec![]); + } + NewCanonicalChain::Reorg { new, old } => { + self.update_blocks(new, old); + } + } + } + + /// Removes blocks from the in memory state that are persisted to the given height. + /// + /// This will update the links between blocks and remove all blocks that are [.. + /// `persisted_height`]. + pub fn remove_persisted_blocks(&self, persisted_height: u64) { + let mut blocks = self.inner.in_memory_state.blocks.write(); + let mut numbers = self.inner.in_memory_state.numbers.write(); + let _pending = self.inner.in_memory_state.pending.write(); + + // clear all numbers + numbers.clear(); + + // drain all blocks and only keep the ones that are not persisted + let mut old_blocks = blocks + .drain() + .map(|(_, b)| b.block.clone()) + .filter(|b| b.block().number > persisted_height) + .collect::>(); + + // sort the blocks by number so we can insert them back in order + old_blocks.sort_unstable_by_key(|block| block.block().number); + + for block in old_blocks { + let parent = blocks.get(&block.block().parent_hash).cloned(); + let block_state = BlockState::with_parent(block.clone(), parent.map(|p| (*p).clone())); + let hash = block_state.hash(); + let number = block_state.number(); + + // append new blocks + blocks.insert(hash, Arc::new(block_state)); + numbers.insert(number, hash); + } + } + + /// Returns in memory state corresponding the given hash. + pub fn state_by_hash(&self, hash: B256) -> Option> { + self.inner.in_memory_state.state_by_hash(hash) + } + + /// Returns in memory state corresponding the block number. + pub fn state_by_number(&self, number: u64) -> Option> { + self.inner.in_memory_state.state_by_number(number) + } + + /// Returns the in memory head state. + pub fn head_state(&self) -> Option> { + self.inner.in_memory_state.head_state() + } + + /// Returns the in memory pending state. + pub fn pending_state(&self) -> Option> { + self.inner.in_memory_state.pending_state() + } + + /// Returns the in memory pending `BlockNumHash`. + pub fn pending_block_num_hash(&self) -> Option { + self.inner + .in_memory_state + .pending_state() + .map(|state| BlockNumHash { number: state.number(), hash: state.hash() }) + } + + /// Returns the current `ChainInfo`. + pub fn chain_info(&self) -> ChainInfo { + self.inner.chain_info_tracker.chain_info() + } + + /// Returns the latest canonical block number. + pub fn get_canonical_block_number(&self) -> u64 { + self.inner.chain_info_tracker.get_canonical_block_number() + } + + /// Returns the `BlockNumHash` of the safe head. + pub fn get_safe_num_hash(&self) -> Option { + self.inner.chain_info_tracker.get_safe_num_hash() + } + + /// Returns the `BlockNumHash` of the finalized head. + pub fn get_finalized_num_hash(&self) -> Option { + self.inner.chain_info_tracker.get_finalized_num_hash() + } + + /// Hook for new fork choice update. + pub fn on_forkchoice_update_received(&self) { + self.inner.chain_info_tracker.on_forkchoice_update_received(); + } + + /// Returns the timestamp of the last received update. + pub fn last_received_update_timestamp(&self) -> Option { + self.inner.chain_info_tracker.last_forkchoice_update_received_at() + } + + /// Hook for transition configuration exchanged. + pub fn on_transition_configuration_exchanged(&self) { + self.inner.chain_info_tracker.on_transition_configuration_exchanged(); + } + + /// Returns the timepstamp of the last transition configuration exchanged, + pub fn last_exchanged_transition_configuration_timestamp(&self) -> Option { + self.inner.chain_info_tracker.last_transition_configuration_exchanged_at() + } + + /// Canonical head setter. + pub fn set_canonical_head(&self, header: SealedHeader) { + self.inner.chain_info_tracker.set_canonical_head(header); + } + + /// Safe head setter. + pub fn set_safe(&self, header: SealedHeader) { + self.inner.chain_info_tracker.set_safe(header); + } + + /// Finalized head setter. + pub fn set_finalized(&self, header: SealedHeader) { + self.inner.chain_info_tracker.set_finalized(header); + } + + /// Canonical head getter. + pub fn get_canonical_head(&self) -> SealedHeader { + self.inner.chain_info_tracker.get_canonical_head() + } + + /// Finalized header getter. + pub fn get_finalized_header(&self) -> Option { + self.inner.chain_info_tracker.get_finalized_header() + } + + /// Safe header getter. + pub fn get_safe_header(&self) -> Option { + self.inner.chain_info_tracker.get_safe_header() + } + + /// Returns the `SealedHeader` corresponding to the pending state. + pub fn pending_sealed_header(&self) -> Option { + self.pending_state().map(|h| h.block().block().header.clone()) + } + + /// Returns the `Header` corresponding to the pending state. + pub fn pending_header(&self) -> Option
{ + self.pending_sealed_header().map(|sealed_header| sealed_header.unseal()) + } + + /// Returns the `SealedBlock` corresponding to the pending state. + pub fn pending_block(&self) -> Option { + self.pending_state().map(|block_state| block_state.block().block().clone()) + } + + /// Returns the `SealedBlockWithSenders` corresponding to the pending state. + pub fn pending_block_with_senders(&self) -> Option { + self.pending_state() + .and_then(|block_state| block_state.block().block().clone().seal_with_senders()) + } + + /// Returns a tuple with the `SealedBlock` corresponding to the pending + /// state and a vector of its `Receipt`s. + pub fn pending_block_and_receipts(&self) -> Option<(SealedBlock, Vec)> { + self.pending_state().map(|block_state| { + (block_state.block().block().clone(), block_state.executed_block_receipts()) + }) + } + + /// Subscribe to new blocks events. + pub fn subscribe_canon_state(&self) -> CanonStateNotifications { + self.inner.canon_state_notification_sender.subscribe() + } + + /// Attempts to send a new [`CanonStateNotification`] to all active Receiver handles. + pub fn notify_canon_state(&self, event: CanonStateNotification) { + self.inner.canon_state_notification_sender.send(event).ok(); + } + + /// Return state provider with reference to in-memory blocks that overlay database state. + /// + /// This merges the state of all blocks that are part of the chain that the requested block is + /// the head of. This includes all blocks that connect back to the canonical block on disk. + pub fn state_provider( + &self, + hash: B256, + historical: StateProviderBox, + ) -> MemoryOverlayStateProvider { + let in_memory = if let Some(state) = self.state_by_hash(hash) { + state.chain().into_iter().map(|block_state| block_state.block()).collect() + } else { + Vec::new() + }; + + MemoryOverlayStateProvider::new(in_memory, historical) + } +} + +/// State after applying the given block, this block is part of the canonical chain that partially +/// stored in memory and can be traced back to a canonical block on disk. +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct BlockState { + /// The executed block that determines the state after this block has been executed. + block: ExecutedBlock, + /// The block's parent block if it exists. + parent: Option>, +} + +#[allow(dead_code)] +impl BlockState { + /// `BlockState` constructor. + pub const fn new(block: ExecutedBlock) -> Self { + Self { block, parent: None } + } + + /// `BlockState` constructor with parent. + pub fn with_parent(block: ExecutedBlock, parent: Option) -> Self { + Self { block, parent: parent.map(Box::new) } + } + + /// Returns the hash and block of the on disk block this state can be traced back to. + pub fn anchor(&self) -> BlockNumHash { + if let Some(parent) = &self.parent { + parent.anchor() + } else { + self.block.block().parent_num_hash() + } + } + + /// Returns the executed block that determines the state. + pub fn block(&self) -> ExecutedBlock { + self.block.clone() + } + + /// Returns the hash of executed block that determines the state. + pub fn hash(&self) -> B256 { + self.block.block().hash() + } + + /// Returns the block number of executed block that determines the state. + pub fn number(&self) -> u64 { + self.block.block().number + } + + /// Returns the state root after applying the executed block that determines + /// the state. + pub fn state_root(&self) -> B256 { + self.block.block().header.state_root + } + + /// Returns the `Receipts` of executed block that determines the state. + pub fn receipts(&self) -> &Receipts { + &self.block.execution_outcome().receipts + } + + /// Returns a vector of `Receipt` of executed block that determines the state. + /// We assume that the `Receipts` in the executed block `ExecutionOutcome` + /// has only one element corresponding to the executed block associated to + /// the state. + pub fn executed_block_receipts(&self) -> Vec { + let receipts = self.receipts(); + + debug_assert!( + receipts.receipt_vec.len() <= 1, + "Expected at most one block's worth of receipts, found {}", + receipts.receipt_vec.len() + ); + + receipts + .receipt_vec + .first() + .map(|block_receipts| { + block_receipts.iter().filter_map(|opt_receipt| opt_receipt.clone()).collect() + }) + .unwrap_or_default() + } + + /// Returns a vector of parent `BlockStates` starting from the oldest one. + pub fn parent_state_chain(&self) -> Vec<&Self> { + let mut parents = Vec::new(); + let mut current = self.parent.as_deref(); + + while let Some(parent) = current { + parents.insert(0, parent); + current = parent.parent.as_deref(); + } + + parents + } + + /// Returns a vector of `BlockStates` representing the entire in memory chain, + /// including self as the last element. + pub fn chain(&self) -> Vec<&Self> { + let mut chain = self.parent_state_chain(); + chain.push(self); + chain + } +} + +/// Represents an executed block stored in-memory. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ExecutedBlock { + /// Sealed block the rest of fields refer to. + pub block: Arc, + /// Block's senders. + pub senders: Arc>, + /// Block's execution outcome. + pub execution_output: Arc, + /// Block's hashedst state. + pub hashed_state: Arc, + /// Trie updates that result of applying the block. + pub trie: Arc, +} + +impl ExecutedBlock { + /// `ExecutedBlock` constructor. + pub const fn new( + block: Arc, + senders: Arc>, + execution_output: Arc, + hashed_state: Arc, + trie: Arc, + ) -> Self { + Self { block, senders, execution_output, hashed_state, trie } + } + + /// Returns a reference to the executed block. + pub fn block(&self) -> &SealedBlock { + &self.block + } + + /// Returns a reference to the block's senders + pub fn senders(&self) -> &Vec
{ + &self.senders + } + + /// Returns a [`SealedBlockWithSenders`] + /// + /// Note: this clones the block and senders. + pub fn sealed_block_with_senders(&self) -> SealedBlockWithSenders { + SealedBlockWithSenders { block: (*self.block).clone(), senders: (*self.senders).clone() } + } + + /// Returns a reference to the block's execution outcome + pub fn execution_outcome(&self) -> &ExecutionOutcome { + &self.execution_output + } + + /// Returns a reference to the hashed state result of the execution outcome + pub fn hashed_state(&self) -> &HashedPostState { + &self.hashed_state + } + + /// Returns a reference to the trie updates for the block + pub fn trie_updates(&self) -> &TrieUpdates { + &self.trie + } +} + +/// Non-empty chain of blocks. +#[derive(Debug)] +pub enum NewCanonicalChain { + /// A simple append to the current canonical head + Commit { + /// all blocks that lead back to the canonical head + new: Vec, + }, + /// A reorged chain consists of two chains that trace back to a shared ancestor block at which + /// point they diverge. + Reorg { + /// All blocks of the _new_ chain + new: Vec, + /// All blocks of the _old_ chain + old: Vec, + }, +} + +impl NewCanonicalChain { + /// Returns the length of the new chain. + pub fn new_block_count(&self) -> usize { + match self { + Self::Commit { new } | Self::Reorg { new, .. } => new.len(), + } + } + + /// Returns the length of the reorged chain. + pub fn reorged_block_count(&self) -> usize { + match self { + Self::Commit { .. } => 0, + Self::Reorg { old, .. } => old.len(), + } + } + + /// Converts the new chain into a notification that will be emitted to listeners + pub fn to_chain_notification(&self) -> CanonStateNotification { + // TODO: do we need to merge execution outcome for multiblock commit or reorg? + // implement this properly + match self { + Self::Commit { new } => CanonStateNotification::Commit { + new: Arc::new(Chain::new( + new.iter().map(ExecutedBlock::sealed_block_with_senders), + new.last().unwrap().execution_output.deref().clone(), + None, + )), + }, + Self::Reorg { new, old } => CanonStateNotification::Reorg { + new: Arc::new(Chain::new( + new.iter().map(ExecutedBlock::sealed_block_with_senders), + new.last().unwrap().execution_output.deref().clone(), + None, + )), + old: Arc::new(Chain::new( + old.iter().map(ExecutedBlock::sealed_block_with_senders), + old.last().unwrap().execution_output.deref().clone(), + None, + )), + }, + } + } + + /// Returns the new tip of the chain. + /// + /// Returns the new tip for [`Self::Reorg`] and [`Self::Commit`] variants which commit at least + /// 1 new block. + pub fn tip(&self) -> &SealedBlock { + match self { + Self::Commit { new } | Self::Reorg { new, .. } => { + new.last().expect("non empty blocks").block() + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::{get_executed_block_with_number, get_executed_block_with_receipts}; + use rand::Rng; + use reth_errors::ProviderResult; + use reth_primitives::{Account, BlockNumber, Bytecode, Receipt, StorageKey, StorageValue}; + use reth_storage_api::{ + AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateRootProvider, + }; + use reth_trie::AccountProof; + + fn create_mock_state(block_number: u64, parent_hash: B256) -> BlockState { + BlockState::new(get_executed_block_with_number(block_number, parent_hash)) + } + + fn create_mock_state_chain(num_blocks: u64) -> Vec { + let mut chain = Vec::with_capacity(num_blocks as usize); + let mut parent_hash = B256::random(); + let mut parent_state: Option = None; + + for i in 1..=num_blocks { + let mut state = create_mock_state(i, parent_hash); + if let Some(parent) = parent_state { + state.parent = Some(Box::new(parent)); + } + parent_hash = state.hash(); + parent_state = Some(state.clone()); + chain.push(state); + } + + chain + } + + struct MockStateProvider; + + impl StateProvider for MockStateProvider { + fn storage( + &self, + _address: Address, + _storage_key: StorageKey, + ) -> ProviderResult> { + Ok(None) + } + + fn bytecode_by_hash(&self, _code_hash: B256) -> ProviderResult> { + Ok(None) + } + } + + impl BlockHashReader for MockStateProvider { + fn block_hash(&self, _number: BlockNumber) -> ProviderResult> { + Ok(None) + } + + fn canonical_hashes_range( + &self, + _start: BlockNumber, + _end: BlockNumber, + ) -> ProviderResult> { + Ok(vec![]) + } + } + + impl AccountReader for MockStateProvider { + fn basic_account(&self, _address: Address) -> ProviderResult> { + Ok(None) + } + } + + impl StateRootProvider for MockStateProvider { + fn hashed_state_root(&self, _hashed_state: &HashedPostState) -> ProviderResult { + Ok(B256::random()) + } + + fn hashed_state_root_with_updates( + &self, + _hashed_state: &HashedPostState, + ) -> ProviderResult<(B256, TrieUpdates)> { + Ok((B256::random(), TrieUpdates::default())) + } + + fn state_root(&self, _bundle_state: &revm::db::BundleState) -> ProviderResult { + Ok(B256::random()) + } + } + + impl StateProofProvider for MockStateProvider { + fn hashed_proof( + &self, + _hashed_state: &HashedPostState, + _address: Address, + _slots: &[B256], + ) -> ProviderResult { + Ok(AccountProof::new(Address::random())) + } + } + + #[test] + fn test_in_memory_state_impl_state_by_hash() { + let mut state_by_hash = HashMap::new(); + let number = rand::thread_rng().gen::(); + let state = Arc::new(create_mock_state(number, B256::random())); + state_by_hash.insert(state.hash(), state.clone()); + + let in_memory_state = InMemoryState::new(state_by_hash, HashMap::new(), None); + + assert_eq!(in_memory_state.state_by_hash(state.hash()), Some(state)); + assert_eq!(in_memory_state.state_by_hash(B256::random()), None); + } + + #[test] + fn test_in_memory_state_impl_state_by_number() { + let mut state_by_hash = HashMap::new(); + let mut hash_by_number = HashMap::new(); + + let number = rand::thread_rng().gen::(); + let state = Arc::new(create_mock_state(number, B256::random())); + let hash = state.hash(); + + state_by_hash.insert(hash, state.clone()); + hash_by_number.insert(number, hash); + + let in_memory_state = InMemoryState::new(state_by_hash, hash_by_number, None); + + assert_eq!(in_memory_state.state_by_number(number), Some(state)); + assert_eq!(in_memory_state.state_by_number(number + 1), None); + } + + #[test] + fn test_in_memory_state_impl_head_state() { + let mut state_by_hash = HashMap::new(); + let mut hash_by_number = HashMap::new(); + let state1 = Arc::new(create_mock_state(1, B256::random())); + let hash1 = state1.hash(); + let state2 = Arc::new(create_mock_state(2, hash1)); + let hash2 = state2.hash(); + hash_by_number.insert(1, hash1); + hash_by_number.insert(2, hash2); + state_by_hash.insert(hash1, state1); + state_by_hash.insert(hash2, state2); + + let in_memory_state = InMemoryState::new(state_by_hash, hash_by_number, None); + let head_state = in_memory_state.head_state().unwrap(); + + assert_eq!(head_state.hash(), hash2); + assert_eq!(head_state.number(), 2); + } + + #[test] + fn test_in_memory_state_impl_pending_state() { + let pending_number = rand::thread_rng().gen::(); + let pending_state = create_mock_state(pending_number, B256::random()); + let pending_hash = pending_state.hash(); + + let in_memory_state = + InMemoryState::new(HashMap::new(), HashMap::new(), Some(pending_state)); + + let result = in_memory_state.pending_state(); + assert!(result.is_some()); + let actual_pending_state = result.unwrap(); + assert_eq!(actual_pending_state.block.block().hash(), pending_hash); + assert_eq!(actual_pending_state.block.block().number, pending_number); + } + + #[test] + fn test_in_memory_state_impl_no_pending_state() { + let in_memory_state = InMemoryState::new(HashMap::new(), HashMap::new(), None); + + assert_eq!(in_memory_state.pending_state(), None); + } + + #[test] + fn test_state_new() { + let number = rand::thread_rng().gen::(); + let block = get_executed_block_with_number(number, B256::random()); + + let state = BlockState::new(block.clone()); + + assert_eq!(state.block(), block); + } + + #[test] + fn test_state_block() { + let number = rand::thread_rng().gen::(); + let block = get_executed_block_with_number(number, B256::random()); + + let state = BlockState::new(block.clone()); + + assert_eq!(state.block(), block); + } + + #[test] + fn test_state_hash() { + let number = rand::thread_rng().gen::(); + let block = get_executed_block_with_number(number, B256::random()); + + let state = BlockState::new(block.clone()); + + assert_eq!(state.hash(), block.block.hash()); + } + + #[test] + fn test_state_number() { + let number = rand::thread_rng().gen::(); + let block = get_executed_block_with_number(number, B256::random()); + + let state = BlockState::new(block); + + assert_eq!(state.number(), number); + } + + #[test] + fn test_state_state_root() { + let number = rand::thread_rng().gen::(); + let block = get_executed_block_with_number(number, B256::random()); + + let state = BlockState::new(block.clone()); + + assert_eq!(state.state_root(), block.block().state_root); + } + + #[test] + fn test_state_receipts() { + let receipts = Receipts { receipt_vec: vec![vec![Some(Receipt::default())]] }; + + let block = get_executed_block_with_receipts(receipts.clone(), B256::random()); + + let state = BlockState::new(block); + + assert_eq!(state.receipts(), &receipts); + } + + #[test] + fn test_in_memory_state_chain_update() { + let state = CanonicalInMemoryState::new(HashMap::new(), HashMap::new(), None); + let block1 = get_executed_block_with_number(0, B256::random()); + let block2 = get_executed_block_with_number(0, B256::random()); + let chain = NewCanonicalChain::Commit { new: vec![block1.clone()] }; + state.update_chain(chain); + assert_eq!(state.head_state().unwrap().block().block().hash(), block1.block().hash()); + assert_eq!(state.state_by_number(0).unwrap().block().block().hash(), block1.block().hash()); + + let chain = NewCanonicalChain::Reorg { new: vec![block2.clone()], old: vec![block1] }; + state.update_chain(chain); + assert_eq!(state.head_state().unwrap().block().block().hash(), block2.block().hash()); + assert_eq!(state.state_by_number(0).unwrap().block().block().hash(), block2.block().hash()); + + assert_eq!(state.inner.in_memory_state.block_count(), 1); + } + + #[test] + fn test_canonical_in_memory_state_state_provider() { + let block1 = get_executed_block_with_number(1, B256::random()); + let block2 = get_executed_block_with_number(2, block1.block().hash()); + let block3 = get_executed_block_with_number(3, block2.block().hash()); + + let state1 = BlockState::new(block1.clone()); + let state2 = BlockState::with_parent(block2.clone(), Some(state1.clone())); + let state3 = BlockState::with_parent(block3.clone(), Some(state2.clone())); + + let mut blocks = HashMap::new(); + blocks.insert(block1.block().hash(), Arc::new(state1)); + blocks.insert(block2.block().hash(), Arc::new(state2)); + blocks.insert(block3.block().hash(), Arc::new(state3)); + + let mut numbers = HashMap::new(); + numbers.insert(1, block1.block().hash()); + numbers.insert(2, block2.block().hash()); + numbers.insert(3, block3.block().hash()); + + let canonical_state = CanonicalInMemoryState::new(blocks, numbers, None); + + let historical: StateProviderBox = Box::new(MockStateProvider); + + let overlay_provider = canonical_state.state_provider(block3.block().hash(), historical); + + assert_eq!(overlay_provider.in_memory.len(), 3); + assert_eq!(overlay_provider.in_memory[0].block().number, 1); + assert_eq!(overlay_provider.in_memory[1].block().number, 2); + assert_eq!(overlay_provider.in_memory[2].block().number, 3); + + assert_eq!( + overlay_provider.in_memory[1].block().parent_hash, + overlay_provider.in_memory[0].block().hash() + ); + assert_eq!( + overlay_provider.in_memory[2].block().parent_hash, + overlay_provider.in_memory[1].block().hash() + ); + + let unknown_hash = B256::random(); + let empty_overlay_provider = + canonical_state.state_provider(unknown_hash, Box::new(MockStateProvider)); + assert_eq!(empty_overlay_provider.in_memory.len(), 0); + } + + #[test] + fn test_block_state_parent_blocks() { + let chain = create_mock_state_chain(4); + + let parents = chain[3].parent_state_chain(); + assert_eq!(parents.len(), 3); + assert_eq!(parents[0].block().block.number, 1); + assert_eq!(parents[1].block().block.number, 2); + assert_eq!(parents[2].block().block.number, 3); + + let parents = chain[2].parent_state_chain(); + assert_eq!(parents.len(), 2); + assert_eq!(parents[0].block().block.number, 1); + assert_eq!(parents[1].block().block.number, 2); + + let parents = chain[0].parent_state_chain(); + assert_eq!(parents.len(), 0); + } + + #[test] + fn test_block_state_single_block_state_chain() { + let single_block_number = 1; + let single_block = create_mock_state(single_block_number, B256::random()); + let single_block_hash = single_block.block().block.hash(); + + let parents = single_block.parent_state_chain(); + assert_eq!(parents.len(), 0); + + let block_state_chain = single_block.chain(); + assert_eq!(block_state_chain.len(), 1); + assert_eq!(block_state_chain[0].block().block.number, single_block_number); + assert_eq!(block_state_chain[0].block().block.hash(), single_block_hash); + } + + #[test] + fn test_block_state_chain() { + let chain = create_mock_state_chain(3); + + let block_state_chain = chain[2].chain(); + assert_eq!(block_state_chain.len(), 3); + assert_eq!(block_state_chain[0].block().block.number, 1); + assert_eq!(block_state_chain[1].block().block.number, 2); + assert_eq!(block_state_chain[2].block().block.number, 3); + + let block_state_chain = chain[1].chain(); + assert_eq!(block_state_chain.len(), 2); + assert_eq!(block_state_chain[0].block().block.number, 1); + assert_eq!(block_state_chain[1].block().block.number, 2); + + let block_state_chain = chain[0].chain(); + assert_eq!(block_state_chain.len(), 1); + assert_eq!(block_state_chain[0].block().block.number, 1); + } +} diff --git a/crates/chain-state/src/lib.rs b/crates/chain-state/src/lib.rs new file mode 100644 index 000000000000..50a103111071 --- /dev/null +++ b/crates/chain-state/src/lib.rs @@ -0,0 +1,29 @@ +//! Reth state related types and functionality. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +mod in_memory; +pub use in_memory::*; + +mod chain_info; +pub use chain_info::ChainInfoTracker; + +mod notifications; +pub use notifications::{ + CanonStateNotification, CanonStateNotificationSender, CanonStateNotificationStream, + CanonStateNotifications, CanonStateSubscriptions, ForkChoiceNotifications, ForkChoiceStream, + ForkChoiceSubscriptions, +}; + +mod memory_overlay; +pub use memory_overlay::MemoryOverlayStateProvider; + +#[cfg(any(test, feature = "test-utils"))] +/// Common test helpers +pub mod test_utils; diff --git a/crates/engine/tree/src/tree/memory_overlay.rs b/crates/chain-state/src/memory_overlay.rs similarity index 84% rename from crates/engine/tree/src/tree/memory_overlay.rs rename to crates/chain-state/src/memory_overlay.rs index 11c04f3998aa..cba585018a25 100644 --- a/crates/engine/tree/src/tree/memory_overlay.rs +++ b/crates/chain-state/src/memory_overlay.rs @@ -1,26 +1,26 @@ use super::ExecutedBlock; use reth_errors::ProviderResult; use reth_primitives::{Account, Address, BlockNumber, Bytecode, StorageKey, StorageValue, B256}; -use reth_provider::{ +use reth_storage_api::{ AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateRootProvider, }; use reth_trie::{updates::TrieUpdates, AccountProof, HashedPostState}; /// A state provider that stores references to in-memory blocks along with their state as well as /// the historical state provider for fallback lookups. -#[derive(Debug)] -pub struct MemoryOverlayStateProvider { +#[allow(missing_debug_implementations)] +pub struct MemoryOverlayStateProvider { /// The collection of executed parent blocks. - in_memory: Vec, + pub(crate) in_memory: Vec, /// The collection of hashed state from in-memory blocks. - hashed_post_state: HashedPostState, + pub(crate) hashed_post_state: HashedPostState, /// Historical state provider for state lookups that are not found in in-memory blocks. - historical: H, + pub(crate) historical: Box, } -impl MemoryOverlayStateProvider { +impl MemoryOverlayStateProvider { /// Create new memory overlay state provider. - pub fn new(in_memory: Vec, historical: H) -> Self { + pub fn new(in_memory: Vec, historical: Box) -> Self { let mut hashed_post_state = HashedPostState::default(); for block in &in_memory { hashed_post_state.extend(block.hashed_state.as_ref().clone()); @@ -29,10 +29,7 @@ impl MemoryOverlayStateProvider { } } -impl BlockHashReader for MemoryOverlayStateProvider -where - H: BlockHashReader, -{ +impl BlockHashReader for MemoryOverlayStateProvider { fn block_hash(&self, number: BlockNumber) -> ProviderResult> { for block in self.in_memory.iter().rev() { if block.block.number == number { @@ -65,10 +62,7 @@ where } } -impl AccountReader for MemoryOverlayStateProvider -where - H: AccountReader + Send, -{ +impl AccountReader for MemoryOverlayStateProvider { fn basic_account(&self, address: Address) -> ProviderResult> { for block in self.in_memory.iter().rev() { if let Some(account) = block.execution_output.account(&address) { @@ -80,10 +74,7 @@ where } } -impl StateRootProvider for MemoryOverlayStateProvider -where - H: StateRootProvider + Send, -{ +impl StateRootProvider for MemoryOverlayStateProvider { // TODO: Currently this does not reuse available in-memory trie nodes. fn hashed_state_root(&self, hashed_state: &HashedPostState) -> ProviderResult { let mut state = self.hashed_post_state.clone(); @@ -102,10 +93,7 @@ where } } -impl StateProofProvider for MemoryOverlayStateProvider -where - H: StateProofProvider + Send, -{ +impl StateProofProvider for MemoryOverlayStateProvider { // TODO: Currently this does not reuse available in-memory trie nodes. fn hashed_proof( &self, @@ -119,10 +107,7 @@ where } } -impl StateProvider for MemoryOverlayStateProvider -where - H: StateProvider + Send, -{ +impl StateProvider for MemoryOverlayStateProvider { fn storage( &self, address: Address, diff --git a/crates/storage/provider/src/traits/chain.rs b/crates/chain-state/src/notifications.rs similarity index 98% rename from crates/storage/provider/src/traits/chain.rs rename to crates/chain-state/src/notifications.rs index 878e67a9f237..d0279b5bc80b 100644 --- a/crates/storage/provider/src/traits/chain.rs +++ b/crates/chain-state/src/notifications.rs @@ -1,8 +1,8 @@ //! Canonical chain state notification trait and types. -use crate::{BlockReceipts, Chain}; use auto_impl::auto_impl; use derive_more::{Deref, DerefMut}; +use reth_execution_types::{BlockReceipts, Chain}; use reth_primitives::{SealedBlockWithSenders, SealedHeader}; use std::{ pin::Pin, @@ -61,7 +61,7 @@ impl Stream for CanonStateNotificationStream { } /// Chain action that is triggered when a new block is imported or old block is reverted. -/// and will return all [`crate::ExecutionOutcome`] and +/// and will return all `ExecutionOutcome` and /// [`reth_primitives::SealedBlockWithSenders`] of both reverted and committed blocks. #[derive(Clone, Debug)] pub enum CanonStateNotification { diff --git a/crates/chain-state/src/test_utils.rs b/crates/chain-state/src/test_utils.rs new file mode 100644 index 000000000000..4cb2d270ab07 --- /dev/null +++ b/crates/chain-state/src/test_utils.rs @@ -0,0 +1,102 @@ +use crate::{ + in_memory::ExecutedBlock, CanonStateNotification, CanonStateNotifications, + CanonStateSubscriptions, +}; +use rand::Rng; +use reth_execution_types::{Chain, ExecutionOutcome}; +use reth_primitives::{ + Address, Block, BlockNumber, Receipts, Requests, SealedBlockWithSenders, TransactionSigned, + B256, +}; +use reth_trie::{updates::TrieUpdates, HashedPostState}; +use revm::db::BundleState; +use std::{ + ops::Range, + sync::{Arc, Mutex}, +}; +use tokio::sync::broadcast::{self, Sender}; + +fn get_executed_block( + block_number: BlockNumber, + receipts: Receipts, + parent_hash: B256, +) -> ExecutedBlock { + let mut block = Block::default(); + let mut header = block.header.clone(); + header.number = block_number; + header.parent_hash = parent_hash; + header.ommers_hash = B256::random(); + block.header = header; + let tx = TransactionSigned::default(); + block.body.push(tx); + let sealed = block.seal_slow(); + let sender = Address::random(); + let sealed_with_senders = SealedBlockWithSenders::new(sealed.clone(), vec![sender]).unwrap(); + ExecutedBlock::new( + Arc::new(sealed), + Arc::new(sealed_with_senders.senders), + Arc::new(ExecutionOutcome::new( + BundleState::default(), + receipts, + block_number, + vec![Requests::default()], + )), + Arc::new(HashedPostState::default()), + Arc::new(TrieUpdates::default()), + ) +} + +/// Generates an `ExecutedBlock` that includes the given `Receipts`. +pub fn get_executed_block_with_receipts(receipts: Receipts, parent_hash: B256) -> ExecutedBlock { + let number = rand::thread_rng().gen::(); + get_executed_block(number, receipts, parent_hash) +} + +/// Generates an `ExecutedBlock` with the given `BlockNumber`. +pub fn get_executed_block_with_number( + block_number: BlockNumber, + parent_hash: B256, +) -> ExecutedBlock { + get_executed_block(block_number, Receipts { receipt_vec: vec![vec![]] }, parent_hash) +} + +/// Generates a range of executed blocks with ascending block numbers. +pub fn get_executed_blocks(range: Range) -> impl Iterator { + let mut parent_hash = B256::default(); + range.map(move |number| { + let block = get_executed_block_with_number(number, parent_hash); + parent_hash = block.block.hash(); + block + }) +} + +/// A test `ChainEventSubscriptions` +#[derive(Clone, Debug, Default)] +pub struct TestCanonStateSubscriptions { + canon_notif_tx: Arc>>>, +} + +impl TestCanonStateSubscriptions { + /// Adds new block commit to the queue that can be consumed with + /// [`TestCanonStateSubscriptions::subscribe_to_canonical_state`] + pub fn add_next_commit(&self, new: Arc) { + let event = CanonStateNotification::Commit { new }; + self.canon_notif_tx.lock().as_mut().unwrap().retain(|tx| tx.send(event.clone()).is_ok()) + } + + /// Adds reorg to the queue that can be consumed with + /// [`TestCanonStateSubscriptions::subscribe_to_canonical_state`] + pub fn add_next_reorg(&self, old: Arc, new: Arc) { + let event = CanonStateNotification::Reorg { old, new }; + self.canon_notif_tx.lock().as_mut().unwrap().retain(|tx| tx.send(event.clone()).is_ok()) + } +} + +impl CanonStateSubscriptions for TestCanonStateSubscriptions { + fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { + let (canon_notif_tx, canon_notif_rx) = broadcast::channel(100); + self.canon_notif_tx.lock().as_mut().unwrap().push(canon_notif_tx); + + canon_notif_rx + } +} diff --git a/crates/cli/commands/Cargo.toml b/crates/cli/commands/Cargo.toml index 35ed25eda66d..1d9983ad8d45 100644 --- a/crates/cli/commands/Cargo.toml +++ b/crates/cli/commands/Cargo.toml @@ -28,6 +28,7 @@ reth-network-p2p.workspace = true reth-node-builder.workspace = true reth-node-core.workspace = true reth-node-events.workspace = true +reth-node-metrics.workspace = true reth-primitives.workspace = true reth-provider.workspace = true reth-prune.workspace = true @@ -35,6 +36,7 @@ reth-stages.workspace = true reth-static-file-types.workspace = true reth-static-file.workspace = true reth-trie = { workspace = true, features = ["metrics"] } +reth-trie-db = { workspace = true, features = ["metrics"] } itertools.workspace = true futures.workspace = true @@ -62,9 +64,6 @@ ratatui = { version = "0.27", default-features = false, features = [ "crossterm", ] } -# metrics -metrics-process.workspace = true - # reth test-vectors proptest = { workspace = true, optional = true } arbitrary = { workspace = true, optional = true } diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index b382f7312cac..a303b8934daa 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -114,8 +114,6 @@ impl EnvironmentArgs { let factory = ProviderFactory::new(db, self.chain.clone(), static_file_provider) .with_prune_modes(prune_modes.clone()); - info!(target: "reth::cli", "Verifying storage consistency."); - // Check for consistency between database and static files. if let Some(unwind_target) = factory .static_file_provider() diff --git a/crates/cli/commands/src/node.rs b/crates/cli/commands/src/node.rs index 233a7d5b3a4a..7a8c47d1f696 100644 --- a/crates/cli/commands/src/node.rs +++ b/crates/cli/commands/src/node.rs @@ -15,6 +15,7 @@ use reth_node_core::{ node_config::NodeConfig, version, }; +use reth_node_metrics::recorder::install_prometheus_recorder; use std::{ffi::OsString, fmt, future::Future, net::SocketAddr, path::PathBuf, sync::Arc}; /// Start the node @@ -173,7 +174,7 @@ impl NodeCommand { // Register the prometheus recorder before creating the database, // because database init needs it to register metrics. - let _ = node_config.install_prometheus_recorder()?; + let _ = install_prometheus_recorder(); let data_dir = node_config.datadir(); let db_path = data_dir.db(); diff --git a/crates/cli/commands/src/recover/storage_tries.rs b/crates/cli/commands/src/recover/storage_tries.rs index 2b4087144805..5c1ae7bfca57 100644 --- a/crates/cli/commands/src/recover/storage_tries.rs +++ b/crates/cli/commands/src/recover/storage_tries.rs @@ -8,6 +8,7 @@ use reth_db_api::{ }; use reth_provider::{BlockNumReader, HeaderProvider, ProviderError}; use reth_trie::StateRoot; +use reth_trie_db::DatabaseStateRoot; use tracing::*; /// `reth recover storage-tries` command diff --git a/crates/cli/commands/src/stage/run.rs b/crates/cli/commands/src/stage/run.rs index 2a2dd6f8a25e..5a02ec417ae9 100644 --- a/crates/cli/commands/src/stage/run.rs +++ b/crates/cli/commands/src/stage/run.rs @@ -13,7 +13,15 @@ use reth_evm::execute::BlockExecutorProvider; use reth_exex::ExExManagerHandle; use reth_node_core::{ args::{NetworkArgs, StageEnum}, - prometheus_exporter, + version::{ + BUILD_PROFILE_NAME, CARGO_PKG_VERSION, VERGEN_BUILD_TIMESTAMP, VERGEN_CARGO_FEATURES, + VERGEN_CARGO_TARGET_TRIPLE, VERGEN_GIT_SHA, + }, +}; +use reth_node_metrics::{ + hooks::Hooks, + server::{MetricServer, MetricServerConfig}, + version::VersionInfo, }; use reth_provider::{ ChainSpecProvider, StageCheckpointReader, StageCheckpointWriter, StaticFileProviderFactory, @@ -99,15 +107,24 @@ impl Command { if let Some(listen_addr) = self.metrics { info!(target: "reth::cli", "Starting metrics endpoint at {}", listen_addr); - prometheus_exporter::serve( + let config = MetricServerConfig::new( listen_addr, - prometheus_exporter::install_recorder()?, - provider_factory.db_ref().clone(), - provider_factory.static_file_provider(), - metrics_process::Collector::default(), + VersionInfo { + version: CARGO_PKG_VERSION, + build_timestamp: VERGEN_BUILD_TIMESTAMP, + cargo_features: VERGEN_CARGO_FEATURES, + git_sha: VERGEN_GIT_SHA, + target_triple: VERGEN_CARGO_TARGET_TRIPLE, + build_profile: BUILD_PROFILE_NAME, + }, ctx.task_executor, - ) - .await?; + Hooks::new( + provider_factory.db_ref().clone(), + provider_factory.static_file_provider(), + ), + ); + + MetricServer::new(config).serve().await?; } let batch_size = self.batch_size.unwrap_or(self.to.saturating_sub(self.from) + 1); diff --git a/crates/cli/util/Cargo.toml b/crates/cli/util/Cargo.toml index bb8b511e6bb0..ba090935599b 100644 --- a/crates/cli/util/Cargo.toml +++ b/crates/cli/util/Cargo.toml @@ -25,8 +25,5 @@ rand.workspace = true thiserror.workspace = true eyre.workspace = true -[dev-dependencies] -proptest.workspace = true - [target.'cfg(unix)'.dependencies] libc = "0.2" diff --git a/crates/cli/util/src/parsers.rs b/crates/cli/util/src/parsers.rs index c708a9a29e07..173af7d6d1f7 100644 --- a/crates/cli/util/src/parsers.rs +++ b/crates/cli/util/src/parsers.rs @@ -68,7 +68,7 @@ pub fn parse_socket_address(value: &str) -> eyre::Result::Transaction: IntoRecoveredTransaction, - Engine: EngineTypes + 'static, + Engine: EngineTypes, Executor: BlockExecutorProvider, { type Output = (); diff --git a/crates/consensus/beacon/src/engine/event.rs b/crates/consensus/beacon/src/engine/event.rs index 4b092bd2feb2..4561f2e5ef74 100644 --- a/crates/consensus/beacon/src/engine/event.rs +++ b/crates/consensus/beacon/src/engine/event.rs @@ -18,6 +18,17 @@ pub enum BeaconConsensusEngineEvent { ForkBlockAdded(Arc), } +impl BeaconConsensusEngineEvent { + /// Returns the canonical header if the event is a + /// [`BeaconConsensusEngineEvent::CanonicalChainCommitted`]. + pub const fn canonical_header(&self) -> Option<&SealedHeader> { + match self { + Self::CanonicalChainCommitted(header, _) => Some(header), + _ => None, + } + } +} + /// Progress of the consensus engine during live sync. #[derive(Clone, Debug)] pub enum ConsensusEngineLiveSyncProgress { diff --git a/crates/consensus/beacon/src/engine/handle.rs b/crates/consensus/beacon/src/engine/handle.rs index 0cffc67b3ff1..aee554f8241a 100644 --- a/crates/consensus/beacon/src/engine/handle.rs +++ b/crates/consensus/beacon/src/engine/handle.rs @@ -87,6 +87,9 @@ where /// Sends a transition configuration exchange message to the beacon consensus engine. /// /// See also + /// + /// This only notifies about the exchange. The actual exchange is done by the engine API impl + /// itself. pub fn transition_configuration_exchanged(&self) { let _ = self.to_engine.send(BeaconEngineMessage::TransitionConfigurationExchanged); } diff --git a/crates/consensus/beacon/src/engine/invalid_headers.rs b/crates/consensus/beacon/src/engine/invalid_headers.rs index fbe6bf462bb3..b93138901d03 100644 --- a/crates/consensus/beacon/src/engine/invalid_headers.rs +++ b/crates/consensus/beacon/src/engine/invalid_headers.rs @@ -67,7 +67,7 @@ impl InvalidHeaderCache { } /// Inserts an invalid ancestor into the map. - pub(crate) fn insert(&mut self, invalid_ancestor: SealedHeader) { + pub fn insert(&mut self, invalid_ancestor: SealedHeader) { if self.get(&invalid_ancestor.hash()).is_none() { let hash = invalid_ancestor.hash(); let header = invalid_ancestor.unseal(); diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 9673f6205db2..0e9f91ac6585 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -235,7 +235,7 @@ where + ChainSpecProvider + 'static, Client: HeadersClient + BodiesClient + Clone + Unpin + 'static, - EngineT: EngineTypes + Unpin + 'static, + EngineT: EngineTypes + Unpin, { /// Create a new instance of the [`BeaconConsensusEngine`]. #[allow(clippy::too_many_arguments)] @@ -1801,7 +1801,7 @@ where + ChainSpecProvider + Unpin + 'static, - EngineT: EngineTypes + Unpin + 'static, + EngineT: EngineTypes + Unpin, { type Output = Result<(), BeaconConsensusEngineError>; diff --git a/crates/e2e-test-utils/src/engine_api.rs b/crates/e2e-test-utils/src/engine_api.rs index 66e8900323be..d66bd6135a62 100644 --- a/crates/e2e-test-utils/src/engine_api.rs +++ b/crates/e2e-test-utils/src/engine_api.rs @@ -23,7 +23,7 @@ pub struct EngineApiTestContext { pub _marker: PhantomData, } -impl EngineApiTestContext { +impl EngineApiTestContext { /// Retrieves a v3 payload from the engine api pub async fn get_payload_v3( &self, diff --git a/crates/e2e-test-utils/src/payload.rs b/crates/e2e-test-utils/src/payload.rs index 828bc5f32c4f..c29eccef923d 100644 --- a/crates/e2e-test-utils/src/payload.rs +++ b/crates/e2e-test-utils/src/payload.rs @@ -4,13 +4,13 @@ use reth_payload_builder::{Events, PayloadBuilderHandle, PayloadId}; use tokio_stream::wrappers::BroadcastStream; /// Helper for payload operations -pub struct PayloadTestContext { +pub struct PayloadTestContext { pub payload_event_stream: BroadcastStream>, payload_builder: PayloadBuilderHandle, pub timestamp: u64, } -impl PayloadTestContext { +impl PayloadTestContext { /// Creates a new payload helper pub async fn new(payload_builder: PayloadBuilderHandle) -> eyre::Result { let payload_events = payload_builder.subscribe().await?; diff --git a/crates/e2e-test-utils/src/rpc.rs b/crates/e2e-test-utils/src/rpc.rs index 8e499bcca60c..77f4b27e21a9 100644 --- a/crates/e2e-test-utils/src/rpc.rs +++ b/crates/e2e-test-utils/src/rpc.rs @@ -2,12 +2,9 @@ use alloy_consensus::TxEnvelope; use alloy_network::eip2718::Decodable2718; use reth::{ builder::{rpc::RpcRegistry, FullNodeComponents}, - rpc::{ - api::{ - eth::helpers::{EthApiSpec, EthTransactions, TraceExt}, - DebugApiServer, - }, - server_types::eth::EthResult, + rpc::api::{ + eth::helpers::{EthApiSpec, EthTransactions, TraceExt}, + DebugApiServer, }, }; use reth_primitives::{Bytes, B256}; @@ -21,7 +18,7 @@ where EthApi: EthApiSpec + EthTransactions + TraceExt, { /// Injects a raw transaction into the node tx pool via RPC server - pub async fn inject_tx(&mut self, raw_tx: Bytes) -> EthResult { + pub async fn inject_tx(&mut self, raw_tx: Bytes) -> Result { let eth_api = self.inner.eth_api(); eth_api.send_raw_transaction(raw_tx).await } diff --git a/crates/engine/primitives/src/lib.rs b/crates/engine/primitives/src/lib.rs index b83abc39e6cc..4b0db7c0a14c 100644 --- a/crates/engine/primitives/src/lib.rs +++ b/crates/engine/primitives/src/lib.rs @@ -27,6 +27,7 @@ pub trait EngineTypes: + TryInto, > + DeserializeOwned + Serialize + + 'static { /// Execution Payload V1 type. type ExecutionPayloadV1: DeserializeOwned + Serialize + Clone + Unpin + Send + Sync + 'static; diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 475aa1c45a1f..ad1c0fc18934 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -16,6 +16,7 @@ reth-beacon-consensus.workspace = true reth-blockchain-tree.workspace = true reth-blockchain-tree-api.workspace = true reth-chainspec.workspace = true +reth-chain-state.workspace = true reth-consensus.workspace = true reth-db.workspace = true reth-db-api.workspace = true @@ -33,8 +34,8 @@ reth-prune.workspace = true reth-prune-types.workspace = true reth-revm.workspace = true reth-rpc-types.workspace = true -reth-stages-types.workspace = true reth-stages-api.workspace = true +reth-stages-types.workspace = true reth-static-file.workspace = true reth-tasks.workspace = true reth-tokio-util.workspace = true @@ -71,7 +72,11 @@ reth-network-p2p = { workspace = true, features = ["test-utils"] } reth-prune.workspace = true reth-prune-types.workspace = true reth-stages = { workspace = true, features = ["test-utils"] } +reth-chain-state = { workspace = true, features = ["test-utils"] } reth-tracing.workspace = true +reth-rpc-types-compat.workspace = true + +alloy-rlp.workspace = true assert_matches.workspace = true rand.workspace = true @@ -81,6 +86,7 @@ test-utils = [ "reth-db/test-utils", "reth-network-p2p/test-utils", "reth-stages/test-utils", + "reth-chain-state/test-utils", "reth-tracing", "rand" ] diff --git a/crates/engine/tree/src/backfill.rs b/crates/engine/tree/src/backfill.rs index f575bff81234..aa075b6ad099 100644 --- a/crates/engine/tree/src/backfill.rs +++ b/crates/engine/tree/src/backfill.rs @@ -15,6 +15,37 @@ use std::task::{ready, Context, Poll}; use tokio::sync::oneshot; use tracing::trace; +/// Represents the state of the backfill synchronization process. +#[derive(Debug, PartialEq, Eq, Default)] +pub enum BackfillSyncState { + /// The node is not performing any backfill synchronization. + /// This is the initial or default state. + #[default] + Idle, + /// A backfill synchronization has been requested or planned, but processing has not started + /// yet. + Pending, + /// The node is actively engaged in backfill synchronization. + Active, +} + +impl BackfillSyncState { + /// Returns true if the state is idle. + pub const fn is_idle(&self) -> bool { + matches!(self, Self::Idle) + } + + /// Returns true if the state is pending. + pub const fn is_pending(&self) -> bool { + matches!(self, Self::Pending) + } + + /// Returns true if the state is active. + pub const fn is_active(&self) -> bool { + matches!(self, Self::Active) + } +} + /// Backfill sync mode functionality. pub trait BackfillSync: Send + Sync { /// Performs a backfill action. @@ -34,8 +65,6 @@ pub enum BackfillAction { /// The events that can be emitted on backfill sync. #[derive(Debug)] pub enum BackfillEvent { - /// Backfill sync idle. - Idle, /// Backfill sync started. Started(PipelineTarget), /// Backfill sync finished. @@ -141,7 +170,10 @@ where } }; let ev = match res { - Ok((_, result)) => BackfillEvent::Finished(result), + Ok((pipeline, result)) => { + self.pipeline_state = PipelineState::Idle(Some(pipeline)); + BackfillEvent::Finished(result) + } Err(why) => { // failed to receive the pipeline BackfillEvent::TaskDropped(why.to_string()) @@ -168,7 +200,7 @@ where } // make sure we poll the pipeline if it's active, and return any ready pipeline events - if !self.is_pipeline_idle() { + if self.is_pipeline_active() { // advance the pipeline if let Poll::Ready(event) = self.poll_pipeline(cx) { return Poll::Ready(event) diff --git a/crates/engine/tree/src/chain.rs b/crates/engine/tree/src/chain.rs index 4ac015d23a5a..e77139d3e3ec 100644 --- a/crates/engine/tree/src/chain.rs +++ b/crates/engine/tree/src/chain.rs @@ -1,10 +1,11 @@ use crate::backfill::{BackfillAction, BackfillEvent, BackfillSync}; use futures::Stream; -use reth_stages_api::PipelineTarget; +use reth_stages_api::{ControlFlow, PipelineTarget}; use std::{ pin::Pin, task::{Context, Poll}, }; +use tracing::*; /// The type that drives the chain forward. /// @@ -81,7 +82,6 @@ where // try to poll the backfill sync to completion, if active match this.backfill_sync.poll(cx) { Poll::Ready(backfill_sync_event) => match backfill_sync_event { - BackfillEvent::Idle => {} BackfillEvent::Started(_) => { // notify handler that backfill sync started this.handler.on_event(FromOrchestrator::BackfillSyncStarted); @@ -89,10 +89,10 @@ where } BackfillEvent::Finished(res) => { return match res { - Ok(event) => { - tracing::debug!(?event, "backfill sync finished"); + Ok(ctrl) => { + tracing::debug!(?ctrl, "backfill sync finished"); // notify handler that backfill sync finished - this.handler.on_event(FromOrchestrator::BackfillSyncFinished); + this.handler.on_event(FromOrchestrator::BackfillSyncFinished(ctrl)); Poll::Ready(ChainEvent::BackfillSyncFinished) } Err(err) => { @@ -113,15 +113,19 @@ where match this.handler.poll(cx) { Poll::Ready(handler_event) => { match handler_event { - HandlerEvent::BackfillSync(target) => { - // trigger backfill sync and start polling it - this.backfill_sync.on_action(BackfillAction::Start(target)); + HandlerEvent::BackfillAction(action) => { + // forward action to backfill_sync + this.backfill_sync.on_action(action); continue 'outer } HandlerEvent::Event(ev) => { // bubble up the event return Poll::Ready(ChainEvent::Handler(ev)); } + HandlerEvent::FatalError => { + error!(target: "engine::tree", "Fatal error"); + return Poll::Ready(ChainEvent::FatalError) + } } } Poll::Pending => { @@ -147,14 +151,6 @@ where } } -/// Represents the sync mode the chain is operating in. -#[derive(Debug, Default)] -enum SyncMode { - #[default] - Handler, - Backfill, -} - /// Event emitted by the [`ChainOrchestrator`] /// /// These are meant to be used for observability and debugging purposes. @@ -173,6 +169,14 @@ pub enum ChainEvent { /// A trait that advances the chain by handling actions. /// /// This is intended to be implement the chain consensus logic, for example `engine` API. +/// +/// ## Control flow +/// +/// The [`ChainOrchestrator`] is responsible for advancing this handler through +/// [`ChainHandler::poll`] and handling the emitted events, for example +/// [`HandlerEvent::BackfillAction`] to start a backfill sync. Events from the [`ChainOrchestrator`] +/// are passed to the handler via [`ChainHandler::on_event`], e.g. +/// [`FromOrchestrator::BackfillSyncStarted`] once the backfill sync started or finished. pub trait ChainHandler: Send + Sync { /// Event generated by this handler that orchestrator can bubble up; type Event: Send; @@ -187,39 +191,19 @@ pub trait ChainHandler: Send + Sync { /// Events/Requests that the [`ChainHandler`] can emit to the [`ChainOrchestrator`]. #[derive(Clone, Debug)] pub enum HandlerEvent { - /// Request to start a backfill sync - BackfillSync(PipelineTarget), + /// Request an action to backfill sync + BackfillAction(BackfillAction), /// Other event emitted by the handler Event(T), + // Fatal error + FatalError, } /// Internal events issued by the [`ChainOrchestrator`]. #[derive(Clone, Debug)] pub enum FromOrchestrator { /// Invoked when backfill sync finished - BackfillSyncFinished, + BackfillSyncFinished(ControlFlow), /// Invoked when backfill sync started BackfillSyncStarted, } - -/// Represents the state of the chain. -#[derive(Clone, Copy, PartialEq, Eq, Default, Debug)] -pub enum OrchestratorState { - /// Orchestrator has exclusive write access to the database. - BackfillSyncActive, - /// Node is actively processing the chain. - #[default] - Idle, -} - -impl OrchestratorState { - /// Returns `true` if the state is [`OrchestratorState::BackfillSyncActive`]. - pub const fn is_backfill_sync_active(&self) -> bool { - matches!(self, Self::BackfillSyncActive) - } - - /// Returns `true` if the state is [`OrchestratorState::Idle`]. - pub const fn is_idle(&self) -> bool { - matches!(self, Self::Idle) - } -} diff --git a/crates/engine/tree/src/database.rs b/crates/engine/tree/src/database.rs deleted file mode 100644 index e9b62111ab8d..000000000000 --- a/crates/engine/tree/src/database.rs +++ /dev/null @@ -1,261 +0,0 @@ -#![allow(dead_code)] - -use crate::{ - static_files::{StaticFileAction, StaticFileServiceHandle}, - tree::ExecutedBlock, -}; -use reth_db::database::Database; -use reth_errors::ProviderResult; -use reth_primitives::B256; -use reth_provider::{ - writer::StorageWriter, BlockExecutionWriter, BlockNumReader, BlockWriter, HistoryWriter, - OriginalValuesKnown, ProviderFactory, StageCheckpointWriter, StateWriter, -}; -use reth_prune::{Pruner, PrunerOutput}; -use reth_stages_types::{StageCheckpoint, StageId}; -use std::sync::mpsc::{Receiver, SendError, Sender}; -use tokio::sync::oneshot; -use tracing::debug; - -/// Writes parts of reth's in memory tree state to the database. -/// -/// This is meant to be a spawned service that listens for various incoming database operations, -/// performing those actions on disk, and returning the result in a channel. -/// -/// There are two types of operations this service can perform: -/// - Writing executed blocks to disk, returning the hash of the latest block that was inserted. -/// - Removing blocks from disk, returning the hash of the lowest block removed. -/// -/// This should be spawned in its own thread with [`std::thread::spawn`], since this performs -/// blocking database operations in an endless loop. -#[derive(Debug)] -pub struct DatabaseService { - /// The db / static file provider to use - provider: ProviderFactory, - /// Incoming requests to persist stuff - incoming: Receiver, - /// Handle for the static file service. - static_file_handle: StaticFileServiceHandle, - /// The pruner - pruner: Pruner>, -} - -impl DatabaseService { - /// Create a new database service - pub const fn new( - provider: ProviderFactory, - incoming: Receiver, - static_file_handle: StaticFileServiceHandle, - pruner: Pruner>, - ) -> Self { - Self { provider, incoming, static_file_handle, pruner } - } - - /// Writes the cloned tree state to the database - fn write(&self, blocks: Vec) -> ProviderResult<()> { - let provider_rw = self.provider.provider_rw()?; - - if blocks.is_empty() { - debug!(target: "tree::persistence::db", "Attempted to write empty block range"); - return Ok(()) - } - - let first_number = blocks.first().unwrap().block().number; - - let last = blocks.last().unwrap().block(); - let last_block_number = last.number; - - // TODO: remove all the clones and do performant / batched writes for each type of object - // instead of a loop over all blocks, - // meaning: - // * blocks - // * state - // * hashed state - // * trie updates (cannot naively extend, need helper) - // * indices (already done basically) - // Insert the blocks - for block in blocks { - let sealed_block = - block.block().clone().try_with_senders_unchecked(block.senders().clone()).unwrap(); - provider_rw.insert_block(sealed_block)?; - - // Write state and changesets to the database. - // Must be written after blocks because of the receipt lookup. - let execution_outcome = block.execution_outcome().clone(); - // TODO: use single storage writer in task when sf / db tasks are combined - execution_outcome.write_to_storage(&provider_rw, None, OriginalValuesKnown::No)?; - - // insert hashes and intermediate merkle nodes - { - let trie_updates = block.trie_updates().clone(); - let hashed_state = block.hashed_state(); - // TODO: use single storage writer in task when sf / db tasks are combined - let storage_writer = StorageWriter::new(Some(&provider_rw), None); - storage_writer.write_hashed_state(&hashed_state.clone().into_sorted())?; - trie_updates.write_to_database(provider_rw.tx_ref())?; - } - - // update history indices - provider_rw.update_history_indices(first_number..=last_block_number)?; - - // Update pipeline progress - provider_rw.update_pipeline_stages(last_block_number, false)?; - } - - debug!(target: "tree::persistence::db", range = ?first_number..=last_block_number, "Appended blocks"); - - Ok(()) - } - - /// Removes block data above the given block number from the database. - /// This is exclusive, i.e., it only removes blocks above `block_number`, and does not remove - /// `block_number`. - /// - /// This will then send a command to the static file service, to remove the actual block data. - fn remove_blocks_above( - &self, - block_number: u64, - sender: oneshot::Sender<()>, - ) -> ProviderResult<()> { - let provider_rw = self.provider.provider_rw()?; - let highest_block = self.provider.last_block_number()?; - provider_rw.remove_block_and_execution_range(block_number..=highest_block)?; - - // send a command to the static file service to also remove blocks - let _ = self - .static_file_handle - .send_action(StaticFileAction::RemoveBlocksAbove((block_number, sender))); - Ok(()) - } - - /// Prunes block data before the given block hash according to the configured prune - /// configuration. - fn prune_before(&mut self, block_num: u64) -> PrunerOutput { - // TODO: doing this properly depends on pruner segment changes - self.pruner.run(block_num).expect("todo: handle errors") - } - - /// Updates checkpoints related to block headers and bodies. This should be called by the static - /// file service, after new transactions have been successfully written to disk. - fn update_transaction_meta(&self, block_num: u64) -> ProviderResult<()> { - let provider_rw = self.provider.provider_rw()?; - provider_rw.save_stage_checkpoint(StageId::Headers, StageCheckpoint::new(block_num))?; - provider_rw.save_stage_checkpoint(StageId::Bodies, StageCheckpoint::new(block_num))?; - provider_rw.commit()?; - Ok(()) - } -} - -impl DatabaseService -where - DB: Database, -{ - /// This is the main loop, that will listen to database events and perform the requested - /// database actions - pub fn run(mut self) { - // If the receiver errors then senders have disconnected, so the loop should then end. - while let Ok(action) = self.incoming.recv() { - match action { - DatabaseAction::RemoveBlocksAbove((new_tip_num, sender)) => { - self.remove_blocks_above(new_tip_num, sender).expect("todo: handle errors"); - } - DatabaseAction::SaveBlocks((blocks, sender)) => { - if blocks.is_empty() { - todo!("return error or something"); - } - let last_block_hash = blocks.last().unwrap().block().hash(); - self.write(blocks).unwrap(); - - // we ignore the error because the caller may or may not care about the result - let _ = sender.send(last_block_hash); - } - DatabaseAction::PruneBefore((block_num, sender)) => { - let res = self.prune_before(block_num); - - // we ignore the error because the caller may or may not care about the result - let _ = sender.send(res); - } - DatabaseAction::UpdateTransactionMeta((block_num, sender)) => { - self.update_transaction_meta(block_num).expect("todo: handle errors"); - - // we ignore the error because the caller may or may not care about the result - let _ = sender.send(()); - } - } - } - } -} - -/// A signal to the database service that part of the tree state can be persisted. -#[derive(Debug)] -pub enum DatabaseAction { - /// The section of tree state that should be persisted. These blocks are expected in order of - /// increasing block number. - /// - /// This should just store the execution history-related data. Header, transaction, and - /// receipt-related data should already be written to static files. - SaveBlocks((Vec, oneshot::Sender)), - - /// Updates checkpoints related to block headers and bodies. This should be called by the - /// static file service, after new transactions have been successfully written to disk. - UpdateTransactionMeta((u64, oneshot::Sender<()>)), - - /// Removes block data above the given block number from the database. - /// - /// This will then send a command to the static file service, to remove the actual block data. - RemoveBlocksAbove((u64, oneshot::Sender<()>)), - - /// Prune associated block data before the given block number, according to already-configured - /// prune modes. - PruneBefore((u64, oneshot::Sender)), -} - -/// A handle to the database service -#[derive(Debug, Clone)] -pub struct DatabaseServiceHandle { - /// The channel used to communicate with the database service - sender: Sender, -} - -impl DatabaseServiceHandle { - /// Create a new [`DatabaseServiceHandle`] from a [`Sender`]. - pub const fn new(sender: Sender) -> Self { - Self { sender } - } - - /// Sends a specific [`DatabaseAction`] in the contained channel. The caller is responsible - /// for creating any channels for the given action. - pub fn send_action(&self, action: DatabaseAction) -> Result<(), SendError> { - self.sender.send(action) - } - - /// Tells the database service to save a certain list of finalized blocks. The blocks are - /// assumed to be ordered by block number. - /// - /// This returns the latest hash that has been saved, allowing removal of that block and any - /// previous blocks from in-memory data structures. - pub async fn save_blocks(&self, blocks: Vec) -> B256 { - let (tx, rx) = oneshot::channel(); - self.sender.send(DatabaseAction::SaveBlocks((blocks, tx))).expect("should be able to send"); - rx.await.expect("todo: err handling") - } - - /// Tells the database service to remove blocks above a certain block number. - pub async fn remove_blocks_above(&self, block_num: u64) { - let (tx, rx) = oneshot::channel(); - self.sender - .send(DatabaseAction::RemoveBlocksAbove((block_num, tx))) - .expect("should be able to send"); - rx.await.expect("todo: err handling") - } - - /// Tells the database service to remove block data before the given hash, according to the - /// configured prune config. - pub async fn prune_before(&self, block_num: u64) -> PrunerOutput { - let (tx, rx) = oneshot::channel(); - self.sender - .send(DatabaseAction::PruneBefore((block_num, tx))) - .expect("should be able to send"); - rx.await.expect("todo: err handling") - } -} diff --git a/crates/engine/tree/src/engine.rs b/crates/engine/tree/src/engine.rs index 9b965e892268..bd4c220565a2 100644 --- a/crates/engine/tree/src/engine.rs +++ b/crates/engine/tree/src/engine.rs @@ -1,18 +1,18 @@ //! An engine API handler for the chain. use crate::{ + backfill::BackfillAction, chain::{ChainHandler, FromOrchestrator, HandlerEvent}, download::{BlockDownloader, DownloadAction, DownloadOutcome}, - tree::TreeEvent, }; use futures::{Stream, StreamExt}; -use reth_beacon_consensus::BeaconEngineMessage; +use reth_beacon_consensus::{BeaconConsensusEngineEvent, BeaconEngineMessage}; use reth_engine_primitives::EngineTypes; use reth_primitives::{SealedBlockWithSenders, B256}; use std::{ collections::HashSet, sync::mpsc::Sender, - task::{Context, Poll}, + task::{ready, Context, Poll}, }; use tokio::sync::mpsc::UnboundedReceiver; @@ -27,6 +27,8 @@ use tokio::sync::mpsc::UnboundedReceiver; /// received from the CL to the handler. /// /// It is responsible for handling the following: +/// - Delegating incoming requests to the [`EngineRequestHandler`]. +/// - Advancing the [`EngineRequestHandler`] by polling it and emitting events. /// - Downloading blocks on demand from the network if requested by the [`EngineApiRequestHandler`]. /// /// The core logic is part of the [`EngineRequestHandler`], which is responsible for processing the @@ -71,18 +73,18 @@ where // drain the handler first while let Poll::Ready(ev) = self.handler.poll(cx) { match ev { - RequestHandlerEvent::Idle => break, RequestHandlerEvent::HandlerEvent(ev) => { return match ev { - HandlerEvent::BackfillSync(target) => { + HandlerEvent::BackfillAction(target) => { // bubble up backfill sync request request self.downloader.on_action(DownloadAction::Clear); - Poll::Ready(HandlerEvent::BackfillSync(target)) + Poll::Ready(HandlerEvent::BackfillAction(target)) } HandlerEvent::Event(ev) => { // bubble up the event Poll::Ready(HandlerEvent::Event(ev)) } + HandlerEvent::FatalError => Poll::Ready(HandlerEvent::FatalError), } } RequestHandlerEvent::Download(req) => { @@ -112,7 +114,14 @@ where } } -/// A type that processes incoming requests (e.g. requests from the consensus layer, engine API) +/// A type that processes incoming requests (e.g. requests from the consensus layer, engine API, +/// such as newPayload). +/// +/// ## Control flow +/// +/// Requests and certain updates, such as a change in backfill sync status, are delegated to this +/// type via [`EngineRequestHandler::on_event`]. This type is responsible for processing the +/// incoming requests and advancing the chain and emit events when it is polled. pub trait EngineRequestHandler: Send + Sync { /// Even type this handler can emit type Event: Send; @@ -169,7 +178,7 @@ impl EngineRequestHandler for EngineApiRequestHandler where T: EngineTypes, { - type Event = EngineApiEvent; + type Event = BeaconConsensusEngineEvent; type Request = BeaconEngineMessage; fn on_event(&mut self, event: FromEngine) { @@ -178,15 +187,39 @@ where } fn poll(&mut self, cx: &mut Context<'_>) -> Poll> { - todo!("poll tree") + let Some(ev) = ready!(self.from_tree.poll_recv(cx)) else { + return Poll::Ready(RequestHandlerEvent::HandlerEvent(HandlerEvent::FatalError)) + }; + + let ev = match ev { + EngineApiEvent::BeaconConsensus(ev) => { + RequestHandlerEvent::HandlerEvent(HandlerEvent::Event(ev)) + } + EngineApiEvent::BackfillAction(action) => { + RequestHandlerEvent::HandlerEvent(HandlerEvent::BackfillAction(action)) + } + EngineApiEvent::Download(action) => RequestHandlerEvent::Download(action), + }; + Poll::Ready(ev) } } /// Events emitted by the engine API handler. #[derive(Debug)] pub enum EngineApiEvent { - /// Bubbled from tree. - FromTree(TreeEvent), + /// Event from the consensus engine. + // TODO(mattsse): find a more appropriate name for this variant, consider phasing it out. + BeaconConsensus(BeaconConsensusEngineEvent), + /// Backfill action is needed. + BackfillAction(BackfillAction), + /// Block download is needed. + Download(DownloadRequest), +} + +impl From for EngineApiEvent { + fn from(event: BeaconConsensusEngineEvent) -> Self { + Self::BeaconConsensus(event) + } } #[derive(Debug)] @@ -208,8 +241,6 @@ impl From for FromEngine { /// Requests produced by a [`EngineRequestHandler`]. #[derive(Debug)] pub enum RequestHandlerEvent { - /// The handler is idle. - Idle, /// An event emitted by the handler. HandlerEvent(HandlerEvent), /// Request to download blocks. @@ -224,3 +255,10 @@ pub enum DownloadRequest { /// Download the given range of blocks. BlockRange(B256, u64), } + +impl DownloadRequest { + /// Returns a [`DownloadRequest`] for a single block. + pub fn single_block(hash: B256) -> Self { + Self::BlockSet(HashSet::from([hash])) + } +} diff --git a/crates/engine/tree/src/lib.rs b/crates/engine/tree/src/lib.rs index b4ac74992c21..d238bf879ca6 100644 --- a/crates/engine/tree/src/lib.rs +++ b/crates/engine/tree/src/lib.rs @@ -20,18 +20,14 @@ pub use reth_blockchain_tree_api::*; pub mod backfill; /// The type that drives the chain forward. pub mod chain; -/// The background writer service for batch db writes. -pub mod database; /// Support for downloading blocks on demand for live sync. pub mod download; /// Engine Api chain handler support. pub mod engine; /// Metrics support. pub mod metrics; -/// The background writer service, coordinating the static file and database services. +/// The background writer service, coordinating write operations on static files and the database. pub mod persistence; -/// The background writer service for static file writes. -pub mod static_files; /// Support for interacting with the blockchain tree. pub mod tree; diff --git a/crates/engine/tree/src/persistence.rs b/crates/engine/tree/src/persistence.rs index b3e73ffcfdcf..2a5baf76d3d4 100644 --- a/crates/engine/tree/src/persistence.rs +++ b/crates/engine/tree/src/persistence.rs @@ -1,35 +1,317 @@ #![allow(dead_code)] -use crate::{ - database::{DatabaseAction, DatabaseService, DatabaseServiceHandle}, - static_files::{StaticFileAction, StaticFileService, StaticFileServiceHandle}, - tree::ExecutedBlock, -}; +use reth_chain_state::ExecutedBlock; use reth_db::Database; -use reth_primitives::{SealedBlock, B256, U256}; -use reth_provider::ProviderFactory; +use reth_errors::ProviderResult; +use reth_primitives::{SealedBlock, StaticFileSegment, TransactionSignedNoHash, B256}; +use reth_provider::{ + writer::StorageWriter, BlockExecutionWriter, BlockNumReader, BlockWriter, HistoryWriter, + OriginalValuesKnown, ProviderFactory, StageCheckpointWriter, StateWriter, + StaticFileProviderFactory, StaticFileWriter, TransactionsProviderExt, +}; use reth_prune::{Pruner, PrunerOutput}; +use reth_stages_types::{StageCheckpoint, StageId}; use std::sync::{ - mpsc::{SendError, Sender}, + mpsc::{Receiver, SendError, Sender}, Arc, }; use tokio::sync::oneshot; +use tracing::debug; -/// A signal to the database and static file services that part of the tree state can be persisted. +/// Writes parts of reth's in memory tree state to the database and static files. +/// +/// This is meant to be a spawned service that listens for various incoming persistence operations, +/// performing those actions on disk, and returning the result in a channel. +/// +/// This should be spawned in its own thread with [`std::thread::spawn`], since this performs +/// blocking I/O operations in an endless loop. #[derive(Debug)] -pub enum PersistenceAction { - /// The given block has been added to the canonical chain, its transactions and headers will be - /// persisted for durability. - LogTransactions((Arc, u64, U256, oneshot::Sender<()>)), +pub struct PersistenceService { + /// The provider factory to use + provider: ProviderFactory, + /// Incoming requests + incoming: Receiver, + /// The pruner + pruner: Pruner>, +} + +impl PersistenceService { + /// Create a new persistence service + pub const fn new( + provider: ProviderFactory, + incoming: Receiver, + pruner: Pruner>, + ) -> Self { + Self { provider, incoming, pruner } + } + + /// Writes the cloned tree state to database + fn write(&self, blocks: &[ExecutedBlock]) -> ProviderResult<()> { + debug!(target: "tree::persistence", "Writing blocks to database"); + let provider_rw = self.provider.provider_rw()?; + + if blocks.is_empty() { + debug!(target: "tree::persistence", "Attempted to write empty block range"); + return Ok(()) + } + + let first_number = blocks.first().unwrap().block().number; + + let last = blocks.last().unwrap().block(); + let last_block_number = last.number; + + // TODO: remove all the clones and do performant / batched writes for each type of object + // instead of a loop over all blocks, + // meaning: + // * blocks + // * state + // * hashed state + // * trie updates (cannot naively extend, need helper) + // * indices (already done basically) + // Insert the blocks + for block in blocks { + let sealed_block = + block.block().clone().try_with_senders_unchecked(block.senders().clone()).unwrap(); + provider_rw.insert_block(sealed_block)?; + + // Write state and changesets to the database. + // Must be written after blocks because of the receipt lookup. + let execution_outcome = block.execution_outcome().clone(); + // TODO: do we provide a static file producer here? + let mut storage_writer = StorageWriter::new(Some(&provider_rw), None); + storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::No)?; + + // insert hashes and intermediate merkle nodes + { + let trie_updates = block.trie_updates().clone(); + let hashed_state = block.hashed_state(); + storage_writer.write_hashed_state(&hashed_state.clone().into_sorted())?; + storage_writer.write_trie_updates(&trie_updates)?; + } + + // update history indices + provider_rw.update_history_indices(first_number..=last_block_number)?; + + // Update pipeline progress + provider_rw.update_pipeline_stages(last_block_number, false)?; + } + + debug!(target: "tree::persistence", range = ?first_number..=last_block_number, "Appended block data"); + + Ok(()) + } + + /// Removes block data above the given block number from the database. + /// This is exclusive, i.e., it only removes blocks above `block_number`, and does not remove + /// `block_number`. + /// + /// This will then send a command to the static file service, to remove the actual block data. + fn remove_blocks_above(&self, block_number: u64) -> ProviderResult<()> { + debug!(target: "tree::persistence", ?block_number, "Removing blocks from database above block_number"); + let provider_rw = self.provider.provider_rw()?; + let highest_block = self.provider.last_block_number()?; + provider_rw.remove_block_and_execution_range(block_number..=highest_block)?; + + Ok(()) + } + + /// Prunes block data before the given block hash according to the configured prune + /// configuration. + fn prune_before(&mut self, block_num: u64) -> PrunerOutput { + debug!(target: "tree::persistence", ?block_num, "Running pruner"); + // TODO: doing this properly depends on pruner segment changes + self.pruner.run(block_num).expect("todo: handle errors") + } + + /// Updates checkpoints related to block headers and bodies. This should be called after new + /// transactions have been successfully written to disk. + fn update_transaction_meta(&self, block_num: u64) -> ProviderResult<()> { + debug!(target: "tree::persistence", ?block_num, "Updating transaction metadata after writing"); + let provider_rw = self.provider.provider_rw()?; + provider_rw.save_stage_checkpoint(StageId::Headers, StageCheckpoint::new(block_num))?; + provider_rw.save_stage_checkpoint(StageId::Bodies, StageCheckpoint::new(block_num))?; + provider_rw.commit()?; + Ok(()) + } + + /// Writes the transactions to static files. + /// + /// The [`update_transaction_meta`](Self::update_transaction_meta) method should be called + /// after this, to update the checkpoints for headers and block bodies. + fn write_transactions(&self, block: Arc) -> ProviderResult { + debug!(target: "tree::persistence", "Writing transactions"); + let provider = self.provider.static_file_provider(); + + let header_writer = provider.get_writer(block.number, StaticFileSegment::Headers)?; + let provider_ro = self.provider.provider()?; + let mut storage_writer = StorageWriter::new(Some(&provider_ro), Some(header_writer)); + storage_writer.append_headers_from_blocks( + block.header().number, + std::iter::once(&(block.header(), block.hash())), + )?; + + let transactions_writer = + provider.get_writer(block.number, StaticFileSegment::Transactions)?; + let mut storage_writer = StorageWriter::new(Some(&provider_ro), Some(transactions_writer)); + let no_hash_transactions = + block.body.clone().into_iter().map(TransactionSignedNoHash::from).collect(); + storage_writer.append_transactions_from_blocks( + block.header().number, + std::iter::once(&no_hash_transactions), + )?; + + Ok(block.number) + } + + /// Write execution-related block data to static files. + /// + /// This will then send a command to the db service, that it should write new data, and update + /// the checkpoints for execution and beyond. + fn write_execution_data(&self, blocks: &[ExecutedBlock]) -> ProviderResult<()> { + if blocks.is_empty() { + return Ok(()) + } + let provider_rw = self.provider.provider_rw()?; + let provider = self.provider.static_file_provider(); + + // NOTE: checked non-empty above + let first_block = blocks.first().unwrap().block(); + let last_block = blocks.last().unwrap().block().clone(); + + // use the storage writer + let current_block = first_block.number; + debug!(target: "tree::persistence", len=blocks.len(), ?current_block, "Writing execution data to static files"); + + let receipts_writer = + provider.get_writer(first_block.number, StaticFileSegment::Receipts)?; + let mut storage_writer = StorageWriter::new(Some(&provider_rw), Some(receipts_writer)); + let receipts_iter = blocks.iter().map(|block| { + let receipts = block.execution_outcome().receipts().receipt_vec.clone(); + debug_assert!(receipts.len() == 1); + receipts.first().unwrap().clone() + }); + storage_writer.append_receipts_from_blocks(current_block, receipts_iter)?; + + Ok(()) + } + + /// Removes the blocks above the given block number from static files. Also removes related + /// receipt and header data. + /// + /// This is exclusive, i.e., it only removes blocks above `block_number`, and does not remove + /// `block_number`. + /// + /// Returns the block hash for the lowest block removed from the database, which should be + /// the hash for `block_number + 1`. + /// + /// This is meant to be called by the db service, as this should only be done after related data + /// is removed from the database, and checkpoints are updated. + /// + /// Returns the hash of the lowest removed block. + fn remove_static_file_blocks_above(&self, block_number: u64) -> ProviderResult<()> { + debug!(target: "tree::persistence", ?block_number, "Removing static file blocks above block_number"); + let sf_provider = self.provider.static_file_provider(); + let db_provider_ro = self.provider.provider()?; + + // get highest static file block for the total block range + let highest_static_file_block = sf_provider + .get_highest_static_file_block(StaticFileSegment::Headers) + .expect("todo: error handling, headers should exist"); + + // Get the total txs for the block range, so we have the correct number of columns for + // receipts and transactions + let tx_range = db_provider_ro + .transaction_range_by_block_range(block_number..=highest_static_file_block)?; + let total_txs = tx_range.end().saturating_sub(*tx_range.start()); + + // get the writers + let mut header_writer = sf_provider.get_writer(block_number, StaticFileSegment::Headers)?; + let mut transactions_writer = + sf_provider.get_writer(block_number, StaticFileSegment::Transactions)?; + let mut receipts_writer = + sf_provider.get_writer(block_number, StaticFileSegment::Receipts)?; + + // finally actually truncate, these internally commit + receipts_writer.prune_receipts(total_txs, block_number)?; + transactions_writer.prune_transactions(total_txs, block_number)?; + header_writer.prune_headers(highest_static_file_block.saturating_sub(block_number))?; + + sf_provider.commit()?; + + Ok(()) + } +} + +impl PersistenceService +where + DB: Database, +{ + /// This is the main loop, that will listen to database events and perform the requested + /// database actions + pub fn run(mut self) { + // If the receiver errors then senders have disconnected, so the loop should then end. + while let Ok(action) = self.incoming.recv() { + match action { + PersistenceAction::RemoveBlocksAbove((new_tip_num, sender)) => { + self.remove_blocks_above(new_tip_num).expect("todo: handle errors"); + self.remove_static_file_blocks_above(new_tip_num).expect("todo: handle errors"); + + // we ignore the error because the caller may or may not care about the result + let _ = sender.send(()); + } + PersistenceAction::SaveBlocks((blocks, sender)) => { + if blocks.is_empty() { + todo!("return error or something"); + } + let last_block_hash = blocks.last().unwrap().block().hash(); + // first write to static files + self.write_execution_data(&blocks).expect("todo: handle errors"); + // then write to db + self.write(&blocks).expect("todo: handle errors"); + + // we ignore the error because the caller may or may not care about the result + let _ = sender.send(last_block_hash); + } + PersistenceAction::PruneBefore((block_num, sender)) => { + let res = self.prune_before(block_num); + + // we ignore the error because the caller may or may not care about the result + let _ = sender.send(res); + } + PersistenceAction::WriteTransactions((block, sender)) => { + let block_num = self.write_transactions(block).expect("todo: handle errors"); + self.update_transaction_meta(block_num).expect("todo: handle errors"); + + // we ignore the error because the caller may or may not care about the result + let _ = sender.send(()); + } + } + } + } +} + +/// A signal to the persistence service that part of the tree state can be persisted. +#[derive(Debug)] +pub enum PersistenceAction { /// The section of tree state that should be persisted. These blocks are expected in order of /// increasing block number. /// - /// This should just store the execution history-related data. Header, transaction, and - /// receipt-related data should already be written to static files. + /// First, header, transaction, and receipt-related data should be written to static files. + /// Then the execution history-related data will be written to the database. SaveBlocks((Vec, oneshot::Sender)), + /// The given block has been added to the canonical chain, its transactions and headers will be + /// persisted for durability. + /// + /// This will first append the header and transactions to static files, then update the + /// checkpoints for headers and block bodies in the database. + WriteTransactions((Arc, oneshot::Sender<()>)), + /// Removes block data above the given block number from the database. + /// + /// This will first update checkpoints from the database, then remove actual block data from + /// static files. RemoveBlocksAbove((u64, oneshot::Sender<()>)), /// Prune associated block data before the given block number, according to already-configured @@ -37,87 +319,35 @@ pub enum PersistenceAction { PruneBefore((u64, oneshot::Sender)), } -/// An error type for when there is a [`SendError`] while sending an action to one of the services. -#[derive(Debug)] -pub enum PersistenceSendError { - /// When there is an error sending to the static file service - StaticFile(SendError), - /// When there is an error sending to the database service - Database(SendError), -} - -impl From> for PersistenceSendError { - fn from(value: SendError) -> Self { - Self::StaticFile(value) - } -} - -impl From> for PersistenceSendError { - fn from(value: SendError) -> Self { - Self::Database(value) - } -} - -/// A handle to the database and static file services. This will send commands to the correct -/// service, depending on the command. -/// -/// Some commands should be sent to the database service, and others should be sent to the static -/// file service, despite having the same name. This is because some actions require work to be done -/// by both the static file _and_ the database service, and require some coordination. -/// -/// This type is what actually coordinates the two services, and should be used by consumers of the -/// persistence related services. +/// A handle to the persistence service #[derive(Debug, Clone)] pub struct PersistenceHandle { - /// The channel used to communicate with the database service - db_sender: Sender, - /// The channel used to communicate with the static file service - static_file_sender: Sender, + /// The channel used to communicate with the persistence service + sender: Sender, } impl PersistenceHandle { /// Create a new [`PersistenceHandle`] from a [`Sender`]. - pub const fn new( - db_sender: Sender, - static_file_sender: Sender, - ) -> Self { - Self { db_sender, static_file_sender } + pub const fn new(sender: Sender) -> Self { + Self { sender } } - /// Create a new [`PersistenceHandle`], and spawn the database and static file services. - pub fn spawn_services( + /// Create a new [`PersistenceHandle`], and spawn the persistence service. + pub fn spawn_service( provider_factory: ProviderFactory, pruner: Pruner>, ) -> Self { // create the initial channels - let (static_file_service_tx, static_file_service_rx) = std::sync::mpsc::channel(); let (db_service_tx, db_service_rx) = std::sync::mpsc::channel(); // construct persistence handle - let persistence_handle = Self::new(db_service_tx.clone(), static_file_service_tx.clone()); - - // construct handles for the services to talk to each other - let static_file_handle = StaticFileServiceHandle::new(static_file_service_tx); - let database_handle = DatabaseServiceHandle::new(db_service_tx); - - // spawn the db service - let db_service = DatabaseService::new( - provider_factory.clone(), - db_service_rx, - static_file_handle, - pruner, - ); - std::thread::Builder::new() - .name("Database Service".to_string()) - .spawn(|| db_service.run()) - .unwrap(); + let persistence_handle = Self::new(db_service_tx); - // spawn the static file service - let static_file_service = - StaticFileService::new(provider_factory, static_file_service_rx, database_handle); + // spawn the persistence service + let db_service = PersistenceService::new(provider_factory, db_service_rx, pruner); std::thread::Builder::new() - .name("Static File Service".to_string()) - .spawn(|| static_file_service.run()) + .name("Persistence Service".to_string()) + .spawn(|| db_service.run()) .unwrap(); persistence_handle @@ -125,23 +355,11 @@ impl PersistenceHandle { /// Sends a specific [`PersistenceAction`] in the contained channel. The caller is responsible /// for creating any channels for the given action. - pub fn send_action(&self, action: PersistenceAction) -> Result<(), PersistenceSendError> { - match action { - PersistenceAction::LogTransactions(input) => self - .static_file_sender - .send(StaticFileAction::LogTransactions(input)) - .map_err(From::from), - PersistenceAction::SaveBlocks(input) => self - .static_file_sender - .send(StaticFileAction::WriteExecutionData(input)) - .map_err(From::from), - PersistenceAction::RemoveBlocksAbove(input) => { - self.db_sender.send(DatabaseAction::RemoveBlocksAbove(input)).map_err(From::from) - } - PersistenceAction::PruneBefore(input) => { - self.db_sender.send(DatabaseAction::PruneBefore(input)).map_err(From::from) - } - } + pub fn send_action( + &self, + action: PersistenceAction, + ) -> Result<(), SendError> { + self.sender.send(action) } /// Tells the persistence service to save a certain list of finalized blocks. The blocks are @@ -181,7 +399,7 @@ impl PersistenceHandle { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{get_executed_block_with_number, get_executed_blocks}; + use reth_chain_state::test_utils::{get_executed_block_with_number, get_executed_blocks}; use reth_exex_types::FinishedExExHeight; use reth_primitives::B256; use reth_provider::{test_utils::create_test_provider_factory, ProviderFactory}; @@ -202,11 +420,12 @@ mod tests { finished_exex_height_rx, ); - PersistenceHandle::spawn_services(provider, pruner) + PersistenceHandle::spawn_service(provider, pruner) } #[tokio::test] async fn test_save_blocks_empty() { + reth_tracing::init_test_tracing(); let persistence_handle = default_persistence_handle(); let blocks = vec![]; @@ -220,9 +439,10 @@ mod tests { #[tokio::test] async fn test_save_blocks_single_block() { + reth_tracing::init_test_tracing(); let persistence_handle = default_persistence_handle(); let block_number = 0; - let executed = get_executed_block_with_number(block_number); + let executed = get_executed_block_with_number(block_number, B256::random()); let block_hash = executed.block().hash(); let blocks = vec![executed]; @@ -236,6 +456,7 @@ mod tests { #[tokio::test] async fn test_save_blocks_multiple_blocks() { + reth_tracing::init_test_tracing(); let persistence_handle = default_persistence_handle(); let blocks = get_executed_blocks(0..5).collect::>(); @@ -250,6 +471,7 @@ mod tests { #[tokio::test] async fn test_save_blocks_multiple_calls() { + reth_tracing::init_test_tracing(); let persistence_handle = default_persistence_handle(); let ranges = [0..1, 1..2, 2..4, 4..5]; diff --git a/crates/engine/tree/src/static_files.rs b/crates/engine/tree/src/static_files.rs deleted file mode 100644 index fcdf0292bd3d..000000000000 --- a/crates/engine/tree/src/static_files.rs +++ /dev/null @@ -1,272 +0,0 @@ -#![allow(dead_code)] - -use reth_db::database::Database; -use reth_errors::ProviderResult; -use reth_primitives::{SealedBlock, StaticFileSegment, TransactionSignedNoHash, B256, U256}; -use reth_provider::{ - ProviderFactory, StaticFileProviderFactory, StaticFileWriter, TransactionsProviderExt, -}; -use std::sync::{ - mpsc::{Receiver, SendError, Sender}, - Arc, -}; -use tokio::sync::oneshot; - -use crate::{ - database::{DatabaseAction, DatabaseServiceHandle}, - tree::ExecutedBlock, -}; - -/// Writes finalized blocks to reth's static files. -/// -/// This is meant to be a spawned service that listens for various incoming finalization operations, -/// and writing to or producing new static files. -/// -/// This should be spawned in its own thread with [`std::thread::spawn`], since this performs -/// blocking file operations in an endless loop. -#[derive(Debug)] -pub struct StaticFileService { - /// The db / static file provider to use - provider: ProviderFactory, - /// Handle for the database service - database_handle: DatabaseServiceHandle, - /// Incoming requests to write static files - incoming: Receiver, -} - -impl StaticFileService -where - DB: Database + 'static, -{ - /// Create a new static file service. - pub const fn new( - provider: ProviderFactory, - incoming: Receiver, - database_handle: DatabaseServiceHandle, - ) -> Self { - Self { provider, database_handle, incoming } - } - - // TODO: some things about this are a bit weird, and just to make the underlying static file - // writes work - tx number, total difficulty inclusion. They require either additional in memory - // data or a db lookup. Maybe we can use a db read here - /// Writes the transactions to static files, to act as a log. - /// - /// This will then send a command to the db service, that it should update the checkpoints for - /// headers and block bodies. - fn log_transactions( - &self, - block: Arc, - start_tx_number: u64, - td: U256, - sender: oneshot::Sender<()>, - ) -> ProviderResult<()> { - let provider = self.provider.static_file_provider(); - let mut header_writer = provider.get_writer(block.number, StaticFileSegment::Headers)?; - let mut transactions_writer = - provider.get_writer(block.number, StaticFileSegment::Transactions)?; - - // TODO: does to_compact require ownership? - header_writer.append_header(block.header().clone(), td, block.hash())?; - let no_hash_transactions = - block.body.clone().into_iter().map(TransactionSignedNoHash::from); - - let mut tx_number = start_tx_number; - for tx in no_hash_transactions { - transactions_writer.append_transaction(tx_number, tx)?; - tx_number += 1; - } - - // increment block for transactions - transactions_writer.increment_block(StaticFileSegment::Transactions, block.number)?; - - // finally commit - transactions_writer.commit()?; - header_writer.commit()?; - - // TODO: do we care about the mpsc error here? - // send a command to the db service to update the checkpoints for headers / bodies - let _ = self - .database_handle - .send_action(DatabaseAction::UpdateTransactionMeta((block.number, sender))); - - Ok(()) - } - - /// Write execution-related block data to static files. - /// - /// This will then send a command to the db service, that it should write new data, and update - /// the checkpoints for execution and beyond. - fn write_execution_data( - &self, - blocks: Vec, - sender: oneshot::Sender, - ) -> ProviderResult<()> { - if blocks.is_empty() { - return Ok(()) - } - let provider = self.provider.static_file_provider(); - - // NOTE: checked non-empty above - let first_block = blocks.first().unwrap().block(); - let last_block = blocks.last().unwrap().block(); - - // get highest receipt, if it returns none, use zero (this is the first static file write) - let mut current_receipt = provider - .get_highest_static_file_tx(StaticFileSegment::Receipts) - .map(|num| num + 1) - .unwrap_or_default(); - let mut current_block = first_block.number; - - let mut receipts_writer = - provider.get_writer(first_block.number, StaticFileSegment::Receipts)?; - for receipts in blocks.iter().map(|block| block.execution_outcome().receipts.clone()) { - debug_assert!(receipts.len() == 1); - // TODO: should we also assert that the receipt is not None here, that means the - // receipt is pruned - for maybe_receipt in receipts.first().unwrap() { - if let Some(receipt) = maybe_receipt { - receipts_writer.append_receipt(current_receipt, receipt.clone())?; - } - current_receipt += 1; - } - - // increment the block - receipts_writer.increment_block(StaticFileSegment::Receipts, current_block)?; - current_block += 1; - } - - // finally increment block and commit - receipts_writer.commit()?; - - // TODO: do we care about the mpsc error here? - // send a command to the db service to update the checkpoints for execution etc. - let _ = self.database_handle.send_action(DatabaseAction::SaveBlocks((blocks, sender))); - - Ok(()) - } - - /// Removes the blocks above the given block number from static files. Also removes related - /// receipt and header data. - /// - /// This is exclusive, i.e., it only removes blocks above `block_number`, and does not remove - /// `block_number`. - /// - /// Returns the block hash for the lowest block removed from the database, which should be - /// the hash for `block_number + 1`. - /// - /// This is meant to be called by the db service, as this should only be done after related data - /// is removed from the database, and checkpoints are updated. - /// - /// Returns the hash of the lowest removed block. - fn remove_blocks_above( - &self, - block_num: u64, - sender: oneshot::Sender<()>, - ) -> ProviderResult<()> { - let sf_provider = self.provider.static_file_provider(); - let db_provider_rw = self.provider.provider_rw()?; - - // get highest static file block for the total block range - let highest_static_file_block = sf_provider - .get_highest_static_file_block(StaticFileSegment::Headers) - .expect("todo: error handling, headers should exist"); - - // Get the total txs for the block range, so we have the correct number of columns for - // receipts and transactions - let tx_range = db_provider_rw - .transaction_range_by_block_range(block_num..=highest_static_file_block)?; - let total_txs = tx_range.end().saturating_sub(*tx_range.start()); - - // get the writers - let mut header_writer = sf_provider.get_writer(block_num, StaticFileSegment::Headers)?; - let mut transactions_writer = - sf_provider.get_writer(block_num, StaticFileSegment::Transactions)?; - let mut receipts_writer = sf_provider.get_writer(block_num, StaticFileSegment::Receipts)?; - - // finally actually truncate, these internally commit - receipts_writer.prune_receipts(total_txs, block_num)?; - transactions_writer.prune_transactions(total_txs, block_num)?; - header_writer.prune_headers(highest_static_file_block.saturating_sub(block_num))?; - - sf_provider.commit()?; - - Ok(()) - } -} - -impl StaticFileService -where - DB: Database + 'static, -{ - /// This is the main loop, that will listen to static file actions, and write DB data to static - /// files. - pub fn run(self) { - // If the receiver errors then senders have disconnected, so the loop should then end. - while let Ok(action) = self.incoming.recv() { - match action { - StaticFileAction::LogTransactions(( - block, - start_tx_number, - td, - response_sender, - )) => { - self.log_transactions(block, start_tx_number, td, response_sender) - .expect("todo: handle errors"); - } - StaticFileAction::RemoveBlocksAbove((block_num, response_sender)) => { - self.remove_blocks_above(block_num, response_sender) - .expect("todo: handle errors"); - } - StaticFileAction::WriteExecutionData((blocks, response_sender)) => { - self.write_execution_data(blocks, response_sender) - .expect("todo: handle errors"); - } - } - } - } -} - -/// A signal to the static file service that some data should be copied from the DB to static files. -#[derive(Debug)] -pub enum StaticFileAction { - /// The given block has been added to the canonical chain, its transactions and headers will be - /// persisted for durability. - /// - /// This will then send a command to the db service, that it should update the checkpoints for - /// headers and block bodies. - LogTransactions((Arc, u64, U256, oneshot::Sender<()>)), - - /// Write execution-related block data to static files. - /// - /// This will then send a command to the db service, that it should write new data, and update - /// the checkpoints for execution and beyond. - WriteExecutionData((Vec, oneshot::Sender)), - - /// Removes the blocks above the given block number from static files. Also removes related - /// receipt and header data. - /// - /// This is meant to be called by the db service, as this should only be done after related - /// data is removed from the database, and checkpoints are updated. - RemoveBlocksAbove((u64, oneshot::Sender<()>)), -} - -/// A handle to the static file service -#[derive(Debug, Clone)] -pub struct StaticFileServiceHandle { - /// The channel used to communicate with the static file service - sender: Sender, -} - -impl StaticFileServiceHandle { - /// Create a new [`StaticFileServiceHandle`] from a [`Sender`]. - pub const fn new(sender: Sender) -> Self { - Self { sender } - } - - /// Sends a specific [`StaticFileAction`] in the contained channel. The caller is responsible - /// for creating any channels for the given action. - pub fn send_action(&self, action: StaticFileAction) -> Result<(), SendError> { - self.sender.send(action) - } -} diff --git a/crates/engine/tree/src/test_utils.rs b/crates/engine/tree/src/test_utils.rs index f946f2259a07..0a5fbd5ad560 100644 --- a/crates/engine/tree/src/test_utils.rs +++ b/crates/engine/tree/src/test_utils.rs @@ -1,19 +1,12 @@ -use crate::tree::ExecutedBlock; -use rand::Rng; use reth_chainspec::ChainSpec; use reth_db::{mdbx::DatabaseEnv, test_utils::TempDatabase}; use reth_network_p2p::test_utils::TestFullBlockClient; -use reth_primitives::{ - Address, Block, BlockBody, BlockNumber, Receipts, Requests, SealedBlockWithSenders, - SealedHeader, TransactionSigned, B256, -}; +use reth_primitives::{BlockBody, SealedHeader, B256}; use reth_provider::{test_utils::create_test_provider_factory_with_chain_spec, ExecutionOutcome}; use reth_prune_types::PruneModes; use reth_stages::{test_utils::TestStages, ExecOutput, StageError}; use reth_stages_api::Pipeline; use reth_static_file::StaticFileProducer; -use reth_trie::{updates::TrieUpdates, HashedPostState}; -use revm::db::BundleState; use std::{collections::VecDeque, ops::Range, sync::Arc}; use tokio::sync::watch; @@ -82,43 +75,3 @@ pub(crate) fn insert_headers_into_client( client.insert(sealed_header.clone(), body.clone()); } } - -fn get_executed_block(block_number: BlockNumber, receipts: Receipts) -> ExecutedBlock { - let mut block = Block::default(); - let mut header = block.header.clone(); - header.number = block_number; - block.header = header; - - let sender = Address::random(); - let tx = TransactionSigned::default(); - block.body.push(tx); - let sealed = block.seal_slow(); - let sealed_with_senders = SealedBlockWithSenders::new(sealed.clone(), vec![sender]).unwrap(); - - ExecutedBlock::new( - Arc::new(sealed), - Arc::new(sealed_with_senders.senders), - Arc::new(ExecutionOutcome::new( - BundleState::default(), - receipts, - block_number, - vec![Requests::default()], - )), - Arc::new(HashedPostState::default()), - Arc::new(TrieUpdates::default()), - ) -} - -pub(crate) fn get_executed_block_with_receipts(receipts: Receipts) -> ExecutedBlock { - let number = rand::thread_rng().gen::(); - - get_executed_block(number, receipts) -} - -pub(crate) fn get_executed_block_with_number(block_number: BlockNumber) -> ExecutedBlock { - get_executed_block(block_number, Receipts { receipt_vec: vec![vec![]] }) -} - -pub(crate) fn get_executed_blocks(range: Range) -> impl Iterator { - range.map(get_executed_block_with_number) -} diff --git a/crates/engine/tree/src/tree.rs b/crates/engine/tree/src/tree.rs new file mode 100644 index 000000000000..271984d20f88 --- /dev/null +++ b/crates/engine/tree/src/tree.rs @@ -0,0 +1,1923 @@ +use crate::{ + backfill::{BackfillAction, BackfillSyncState}, + chain::FromOrchestrator, + engine::{DownloadRequest, EngineApiEvent, FromEngine}, + persistence::PersistenceHandle, +}; +use reth_beacon_consensus::{ + BeaconConsensusEngineEvent, BeaconEngineMessage, ForkchoiceStateTracker, InvalidHeaderCache, + OnForkChoiceUpdated, MIN_BLOCKS_FOR_PIPELINE_RUN, +}; +use reth_blockchain_tree::{ + error::InsertBlockErrorKind, BlockAttachment, BlockBuffer, BlockStatus, +}; +use reth_blockchain_tree_api::{error::InsertBlockError, InsertPayloadOk}; +use reth_chain_state::{ + CanonicalInMemoryState, ExecutedBlock, MemoryOverlayStateProvider, NewCanonicalChain, +}; +use reth_consensus::{Consensus, PostExecutionInput}; +use reth_engine_primitives::EngineTypes; +use reth_errors::{ConsensusError, ProviderResult}; +use reth_evm::execute::{BlockExecutorProvider, Executor}; +use reth_payload_builder::PayloadBuilderHandle; +use reth_payload_primitives::{PayloadAttributes, PayloadBuilderAttributes, PayloadTypes}; +use reth_payload_validator::ExecutionPayloadValidator; +use reth_primitives::{ + Block, BlockNumHash, BlockNumber, GotExpected, Header, Receipts, Requests, SealedBlock, + SealedBlockWithSenders, SealedHeader, B256, U256, +}; +use reth_provider::{ + BlockReader, ExecutionOutcome, ProviderError, StateProviderFactory, StateRootProvider, +}; +use reth_revm::database::StateProviderDatabase; +use reth_rpc_types::{ + engine::{ + CancunPayloadFields, ForkchoiceState, PayloadStatus, PayloadStatusEnum, + PayloadValidationError, + }, + ExecutionPayload, +}; +use reth_stages_api::ControlFlow; +use reth_trie::HashedPostState; +use std::{ + collections::{BTreeMap, HashMap, HashSet}, + sync::{mpsc::Receiver, Arc}, +}; +use tokio::sync::{ + mpsc::{UnboundedReceiver, UnboundedSender}, + oneshot, +}; +use tracing::*; + +/// Maximum number of blocks to be kept only in memory without triggering persistence. +const PERSISTENCE_THRESHOLD: u64 = 256; +/// Number of pending blocks that cannot be executed due to missing parent and +/// are kept in cache. +const DEFAULT_BLOCK_BUFFER_LIMIT: u32 = 256; +/// Number of invalid headers to keep in cache. +const DEFAULT_MAX_INVALID_HEADER_CACHE_LENGTH: u32 = 256; + +/// Keeps track of the state of the tree. +/// +/// ## Invariants +/// +/// - This only stores blocks that are connected to the canonical chain. +/// - All executed blocks are valid and have been executed. +#[derive(Debug, Default)] +pub struct TreeState { + /// __All__ executed blocks by block hash. + /// + /// This includes blocks of all forks. + blocks_by_hash: HashMap, + /// Executed blocks grouped by their respective block number. + /// + /// This maps unique block number to all known blocks for that height. + blocks_by_number: BTreeMap>, + /// Currently tracked canonical head of the chain. + current_canonical_head: BlockNumHash, + /// Map of any parent block hash to its children. + parent_to_child: HashMap>, +} + +impl TreeState { + /// Returns a new, empty tree state that points to the given canonical head. + fn new(current_canonical_head: BlockNumHash) -> Self { + Self { + blocks_by_hash: HashMap::new(), + blocks_by_number: BTreeMap::new(), + current_canonical_head, + parent_to_child: HashMap::new(), + } + } + + /// Returns the block by hash. + fn block_by_hash(&self, hash: B256) -> Option> { + self.blocks_by_hash.get(&hash).map(|b| b.block.clone()) + } + + fn block_by_number(&self, number: BlockNumber) -> Option> { + self.blocks_by_number + .get(&number) + .and_then(|blocks| blocks.last()) + .map(|executed_block| executed_block.block.clone()) + } + + /// Insert executed block into the state. + fn insert_executed(&mut self, executed: ExecutedBlock) { + let hash = executed.block.hash(); + let parent_hash = executed.block.parent_hash; + let block_number = executed.block.number; + + if self.blocks_by_hash.contains_key(&hash) { + return; + } + + self.blocks_by_hash.insert(hash, executed.clone()); + + self.blocks_by_number.entry(block_number).or_default().push(executed); + + self.parent_to_child.entry(parent_hash).or_default().insert(hash); + + if let Some(existing_blocks) = self.blocks_by_number.get(&block_number) { + if existing_blocks.len() > 1 { + self.parent_to_child.entry(parent_hash).or_default().insert(hash); + } + } + + for children in self.parent_to_child.values_mut() { + children.retain(|child| self.blocks_by_hash.contains_key(child)); + } + } + + /// Remove blocks before specified block number. + pub(crate) fn remove_before(&mut self, block_number: BlockNumber) { + let mut numbers_to_remove = Vec::new(); + for (&number, _) in self.blocks_by_number.range(..block_number) { + numbers_to_remove.push(number); + } + + for number in numbers_to_remove { + if let Some(blocks) = self.blocks_by_number.remove(&number) { + for block in blocks { + let block_hash = block.block.hash(); + self.blocks_by_hash.remove(&block_hash); + + if let Some(parent_children) = + self.parent_to_child.get_mut(&block.block.parent_hash) + { + parent_children.remove(&block_hash); + if parent_children.is_empty() { + self.parent_to_child.remove(&block.block.parent_hash); + } + } + + self.parent_to_child.remove(&block_hash); + } + } + } + } + + /// Returns the maximum block number stored. + pub(crate) fn max_block_number(&self) -> BlockNumber { + *self.blocks_by_number.last_key_value().unwrap_or((&BlockNumber::default(), &vec![])).0 + } + + /// Returns the block number of the pending block: `head + 1` + const fn pending_block_number(&self) -> BlockNumber { + self.current_canonical_head.number + 1 + } + + /// Updates the canonical head to the given block. + fn set_canonical_head(&mut self, new_head: BlockNumHash) { + self.current_canonical_head = new_head; + } + + /// Returns the tracked canonical head. + const fn canonical_head(&self) -> &BlockNumHash { + &self.current_canonical_head + } + + /// Returns the block hash of the canonical head. + const fn canonical_block_hash(&self) -> B256 { + self.canonical_head().hash + } + + /// Returns the new chain for the given head. + /// + /// This also handles reorgs. + fn on_new_head(&self, new_head: B256) -> Option { + let mut new_chain = Vec::new(); + let mut current_hash = new_head; + let mut fork_point = None; + + // walk back the chain until we reach the canonical block + while current_hash != self.canonical_block_hash() { + let current_block = self.blocks_by_hash.get(¤t_hash)?; + new_chain.push(current_block.clone()); + + // check if this block's parent has multiple children + if let Some(children) = self.parent_to_child.get(¤t_block.block.parent_hash) { + if children.len() > 1 || + self.canonical_block_hash() == current_block.block.parent_hash + { + // we've found a fork point + fork_point = Some(current_block.block.parent_hash); + break; + } + } + + current_hash = current_block.block.parent_hash; + } + + new_chain.reverse(); + + // if we found a fork point, collect the reorged blocks + let reorged = if let Some(fork_hash) = fork_point { + let mut reorged = Vec::new(); + let mut current_hash = self.current_canonical_head.hash; + // walk back the chain up to the fork hash + while current_hash != fork_hash { + if let Some(block) = self.blocks_by_hash.get(¤t_hash) { + reorged.push(block.clone()); + current_hash = block.block.parent_hash; + } else { + // current hash not found in memory + warn!(target: "consensus::engine", invalid_hash=?current_hash, "Block not found in TreeState while walking back fork"); + return None; + } + } + reorged.reverse(); + reorged + } else { + Vec::new() + }; + + if reorged.is_empty() { + Some(NewCanonicalChain::Commit { new: new_chain }) + } else { + Some(NewCanonicalChain::Reorg { new: new_chain, old: reorged }) + } + } +} + +/// Tracks the state of the engine api internals. +/// +/// This type is not shareable. +#[derive(Debug)] +pub struct EngineApiTreeState { + /// Tracks the state of the blockchain tree. + tree_state: TreeState, + /// Tracks the forkchoice state updates received by the CL. + forkchoice_state_tracker: ForkchoiceStateTracker, + /// Buffer of detached blocks. + buffer: BlockBuffer, + /// Tracks the header of invalid payloads that were rejected by the engine because they're + /// invalid. + invalid_headers: InvalidHeaderCache, +} + +impl EngineApiTreeState { + fn new( + block_buffer_limit: u32, + max_invalid_header_cache_length: u32, + canonical_block: BlockNumHash, + ) -> Self { + Self { + invalid_headers: InvalidHeaderCache::new(max_invalid_header_cache_length), + buffer: BlockBuffer::new(block_buffer_limit), + tree_state: TreeState::new(canonical_block), + forkchoice_state_tracker: ForkchoiceStateTracker::default(), + } + } +} + +/// The type responsible for processing engine API requests. +pub trait EngineApiTreeHandler { + /// The engine type that this handler is for. + type Engine: EngineTypes; + + /// Invoked when previously requested blocks were downloaded. + fn on_downloaded(&mut self, blocks: Vec) -> Option; + + /// When the Consensus layer receives a new block via the consensus gossip protocol, + /// the transactions in the block are sent to the execution layer in the form of a + /// [`ExecutionPayload`]. The Execution layer executes the transactions and validates the + /// state in the block header, then passes validation data back to Consensus layer, that + /// adds the block to the head of its own blockchain and attests to it. The block is then + /// broadcast over the consensus p2p network in the form of a "Beacon block". + /// + /// These responses should adhere to the [Engine API Spec for + /// `engine_newPayload`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#specification). + /// + /// This returns a [`PayloadStatus`] that represents the outcome of a processed new payload and + /// returns an error if an internal error occurred. + fn on_new_payload( + &mut self, + payload: ExecutionPayload, + cancun_fields: Option, + ) -> ProviderResult>; + + /// Invoked when we receive a new forkchoice update message. Calls into the blockchain tree + /// to resolve chain forks and ensure that the Execution Layer is working with the latest valid + /// chain. + /// + /// These responses should adhere to the [Engine API Spec for + /// `engine_forkchoiceUpdated`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#specification-1). + /// + /// Returns an error if an internal error occurred like a database error. + fn on_forkchoice_updated( + &mut self, + state: ForkchoiceState, + attrs: Option<::PayloadAttributes>, + ) -> ProviderResult>; +} + +/// The outcome of a tree operation. +#[derive(Debug)] +pub struct TreeOutcome { + /// The outcome of the operation. + pub outcome: T, + /// An optional event to tell the caller to do something. + pub event: Option, +} + +impl TreeOutcome { + /// Create new tree outcome. + pub const fn new(outcome: T) -> Self { + Self { outcome, event: None } + } + + /// Set event on the outcome. + pub fn with_event(mut self, event: TreeEvent) -> Self { + self.event = Some(event); + self + } +} + +/// Events that can be emitted by the [`EngineApiTreeHandler`]. +#[derive(Debug)] +pub enum TreeEvent { + /// Tree action is needed. + TreeAction(TreeAction), + /// Backfill action is needed. + BackfillAction(BackfillAction), + /// Block download is needed. + Download(DownloadRequest), +} + +impl TreeEvent { + /// Returns true if the event is a backfill action. + const fn is_backfill_action(&self) -> bool { + matches!(self, Self::BackfillAction(_)) + } +} + +/// The actions that can be performed on the tree. +#[derive(Debug)] +pub enum TreeAction { + /// Make target canonical. + MakeCanonical(B256), +} + +/// The engine API tree handler implementation. +/// +/// This type is responsible for processing engine API requests, maintaining the canonical state and +/// emitting events. +#[derive(Debug)] +pub struct EngineApiTreeHandlerImpl { + provider: P, + executor_provider: E, + consensus: Arc, + payload_validator: ExecutionPayloadValidator, + /// Keeps track of internals such as executed and buffered blocks. + state: EngineApiTreeState, + /// Incoming engine API requests. + incoming: Receiver>>, + /// Outgoing events that are emitted to the handler. + outgoing: UnboundedSender, + /// Channels to the persistence layer. + persistence: PersistenceHandle, + /// Tracks the state changes of the persistence task. + persistence_state: PersistenceState, + /// Flag indicating the state of the node's backfill synchronization process. + backfill_sync_state: BackfillSyncState, + /// Keeps track of the state of the canonical chain that isn't persisted yet. + /// This is intended to be accessed from external sources, such as rpc. + canonical_in_memory_state: CanonicalInMemoryState, + /// Handle to the payload builder that will receive payload attributes for valid forkchoice + /// updates + payload_builder: PayloadBuilderHandle, +} + +impl EngineApiTreeHandlerImpl +where + P: BlockReader + StateProviderFactory + Clone + 'static, + E: BlockExecutorProvider, + T: EngineTypes, +{ + #[allow(clippy::too_many_arguments)] + pub fn new( + provider: P, + executor_provider: E, + consensus: Arc, + payload_validator: ExecutionPayloadValidator, + incoming: Receiver>>, + outgoing: UnboundedSender, + state: EngineApiTreeState, + canonical_in_memory_state: CanonicalInMemoryState, + persistence: PersistenceHandle, + payload_builder: PayloadBuilderHandle, + ) -> Self { + Self { + provider, + executor_provider, + consensus, + payload_validator, + incoming, + outgoing, + persistence, + persistence_state: PersistenceState::default(), + backfill_sync_state: BackfillSyncState::Idle, + state, + canonical_in_memory_state, + payload_builder, + } + } + + /// Creates a new `EngineApiTreeHandlerImpl` instance and spawns it in its + /// own thread. Returns the receiver end of a `EngineApiEvent` unbounded + /// channel to receive events from the engine. + #[allow(clippy::too_many_arguments)] + pub fn spawn_new( + provider: P, + executor_provider: E, + consensus: Arc, + payload_validator: ExecutionPayloadValidator, + incoming: Receiver>>, + persistence: PersistenceHandle, + payload_builder: PayloadBuilderHandle, + canonical_in_memory_state: CanonicalInMemoryState, + ) -> UnboundedReceiver { + let best_block_number = provider.best_block_number().unwrap_or(0); + let header = provider.sealed_header(best_block_number).ok().flatten().unwrap_or_default(); + + let (tx, outgoing) = tokio::sync::mpsc::unbounded_channel(); + let state = EngineApiTreeState::new( + DEFAULT_BLOCK_BUFFER_LIMIT, + DEFAULT_MAX_INVALID_HEADER_CACHE_LENGTH, + header.num_hash(), + ); + + let task = Self::new( + provider, + executor_provider, + consensus, + payload_validator, + incoming, + tx, + state, + canonical_in_memory_state, + persistence, + payload_builder, + ); + std::thread::Builder::new().name("Tree Task".to_string()).spawn(|| task.run()).unwrap(); + outgoing + } + + /// Run the engine API handler. + /// + /// This will block the current thread and process incoming messages. + pub fn run(mut self) { + while let Ok(msg) = self.incoming.recv() { + self.run_once(msg); + } + } + + /// Run the engine API handler once. + fn run_once(&mut self, msg: FromEngine>) { + self.on_engine_message(msg); + + if self.should_persist() && !self.persistence_state.in_progress() { + let blocks_to_persist = self.get_blocks_to_persist(); + let (tx, rx) = oneshot::channel(); + self.persistence.save_blocks(blocks_to_persist, tx); + self.persistence_state.start(rx); + } + + if self.persistence_state.in_progress() { + let rx = self + .persistence_state + .rx + .as_mut() + .expect("if a persistence task is in progress Receiver must be Some"); + // Check if persistence has completed + if let Ok(last_persisted_block_hash) = rx.try_recv() { + if let Some(block) = self.state.tree_state.block_by_hash(last_persisted_block_hash) + { + self.persistence_state.finish(last_persisted_block_hash, block.number); + self.on_new_persisted_block(); + } else { + error!("could not find persisted block with hash {last_persisted_block_hash} in memory"); + } + } + } + } + + /// Handles a message from the engine. + fn on_engine_message(&mut self, msg: FromEngine>) { + match msg { + FromEngine::Event(event) => match event { + FromOrchestrator::BackfillSyncStarted => { + debug!(target: "consensus::engine", "received backfill sync started event"); + self.backfill_sync_state = BackfillSyncState::Active; + } + FromOrchestrator::BackfillSyncFinished(ctrl) => { + self.on_backfill_sync_finished(ctrl); + } + }, + FromEngine::Request(request) => match request { + BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx } => { + let mut output = self.on_forkchoice_updated(state, payload_attrs); + + if let Ok(res) = &mut output { + // emit an event about the handled FCU + self.emit_event(BeaconConsensusEngineEvent::ForkchoiceUpdated( + state, + res.outcome.forkchoice_status(), + )); + + // handle the event if any + self.on_maybe_tree_event(res.event.take()); + } + + if let Err(err) = tx.send(output.map(|o| o.outcome).map_err(Into::into)) { + error!("Failed to send event: {err:?}"); + } + } + BeaconEngineMessage::NewPayload { payload, cancun_fields, tx } => { + let output = self.on_new_payload(payload, cancun_fields); + if let Err(err) = tx.send(output.map(|o| o.outcome).map_err(|e| { + reth_beacon_consensus::BeaconOnNewPayloadError::Internal(Box::new(e)) + })) { + error!("Failed to send event: {err:?}"); + } + } + BeaconEngineMessage::TransitionConfigurationExchanged => { + // triggering this hook will record that we received a request from the CL + self.canonical_in_memory_state.on_transition_configuration_exchanged(); + } + }, + FromEngine::DownloadedBlocks(blocks) => { + if let Some(event) = self.on_downloaded(blocks) { + self.on_tree_event(event); + } + } + } + } + + /// Invoked if the backfill sync has finished to target. + /// + /// Checks the tracked finalized block against the block on disk and restarts backfill if + /// needed. + /// + /// This will also try to connect the buffered blocks. + fn on_backfill_sync_finished(&mut self, ctrl: ControlFlow) { + debug!(target: "consensus::engine", "received backfill sync finished event"); + self.backfill_sync_state = BackfillSyncState::Idle; + + // Pipeline unwound, memorize the invalid block and wait for CL for next sync target. + if let ControlFlow::Unwind { bad_block, .. } = ctrl { + warn!(target: "consensus::engine", invalid_hash=?bad_block.hash(), invalid_number=?bad_block.number, "Bad block detected in unwind"); + // update the `invalid_headers` cache with the new invalid header + self.state.invalid_headers.insert(*bad_block); + return + } + + let Some(sync_target_state) = self.state.forkchoice_state_tracker.sync_target_state() + else { + return + }; + + if sync_target_state.finalized_block_hash.is_zero() { + return + } + + // get the block number of the finalized block, if we have it + let newest_finalized = self + .state + .buffer + .block(&sync_target_state.finalized_block_hash) + .map(|block| block.number); + + // TODO(mattsse): state housekeeping, this needs to update the tracked canonical state and + // attempt to make the current target canonical if we have all the blocks buffered + + // The block number that the backfill finished at - if the progress or newest + // finalized is None then we can't check the distance anyways. + // + // If both are Some, we perform another distance check and return the desired + // backfill target + let Some(backfill_target) = + ctrl.block_number().zip(newest_finalized).and_then(|(progress, finalized_number)| { + // Determines whether or not we should run backfill again, in case + // the new gap is still large enough and requires running backfill again + self.backfill_sync_target(progress, finalized_number, None) + }) + else { + return + }; + + // request another backfill run + self.emit_event(EngineApiEvent::BackfillAction(BackfillAction::Start( + backfill_target.into(), + ))); + } + + /// Attempts to make the given target canonical. + /// + /// This will update the tracked canonical in memory state and do the necessary housekeeping. + const fn make_canonical(&self, target: B256) { + // TODO: implement state updates and shift canonical state + } + + /// Convenience function to handle an optional tree event. + fn on_maybe_tree_event(&self, event: Option) { + if let Some(event) = event { + self.on_tree_event(event); + } + } + + /// Handles a tree event. + fn on_tree_event(&self, event: TreeEvent) { + match event { + TreeEvent::TreeAction(action) => match action { + TreeAction::MakeCanonical(target) => { + self.make_canonical(target); + } + }, + TreeEvent::BackfillAction(action) => { + self.emit_event(EngineApiEvent::BackfillAction(action)); + } + TreeEvent::Download(action) => { + self.emit_event(EngineApiEvent::Download(action)); + } + } + } + + /// Emits an outgoing event to the engine. + fn emit_event(&self, event: impl Into) { + let _ = self + .outgoing + .send(event.into()) + .inspect_err(|err| error!("Failed to send internal event: {err:?}")); + } + + /// Returns true if the canonical chain length minus the last persisted + /// block is greater than or equal to the persistence threshold. + fn should_persist(&self) -> bool { + self.state.tree_state.max_block_number() - + self.persistence_state.last_persisted_block_number >= + PERSISTENCE_THRESHOLD + } + + fn get_blocks_to_persist(&self) -> Vec { + let start = self.persistence_state.last_persisted_block_number; + let end = start + PERSISTENCE_THRESHOLD; + + // NOTE: this is an exclusive range, to try to include exactly PERSISTENCE_THRESHOLD blocks + self.state + .tree_state + .blocks_by_number + .range(start..end) + .flat_map(|(_, blocks)| blocks.iter().cloned()) + .collect() + } + + /// This clears the blocks from the in-memory tree state that have been persisted to the + /// database. + /// + /// This also updates the canonical in-memory state to reflect the newest persisted block + /// height. + fn on_new_persisted_block(&mut self) { + self.remove_persisted_blocks_from_tree_state(); + self.canonical_in_memory_state + .remove_persisted_blocks(self.persistence_state.last_persisted_block_number); + } + + /// Clears persisted blocks from the in-memory tree state. + fn remove_persisted_blocks_from_tree_state(&mut self) { + let keys_to_remove: Vec = self + .state + .tree_state + .blocks_by_number + .range(..=self.persistence_state.last_persisted_block_number) + .map(|(&k, _)| k) + .collect(); + + for key in keys_to_remove { + if let Some(blocks) = self.state.tree_state.blocks_by_number.remove(&key) { + // Remove corresponding blocks from blocks_by_hash + for block in blocks { + self.state.tree_state.blocks_by_hash.remove(&block.block().hash()); + } + } + } + } + + /// Return block from database or in-memory state by hash. + fn block_by_hash(&self, hash: B256) -> ProviderResult> { + // check database first + let mut block = self.provider.block_by_hash(hash)?; + if block.is_none() { + // Note: it's fine to return the unsealed block because the caller already has + // the hash + block = self + .state + .tree_state + .block_by_hash(hash) + // TODO: clone for compatibility. should we return an Arc here? + .map(|block| block.as_ref().clone().unseal()); + } + Ok(block) + } + + /// Return state provider with reference to in-memory blocks that overlay database state. + /// + /// This merges the state of all blocks that are part of the chain that the requested block is + /// the head of. This includes all blocks that connect back to the canonical block on disk. + // TODO: return error if the chain has gaps + fn state_provider(&self, hash: B256) -> ProviderResult { + let mut in_memory = Vec::new(); + let mut parent_hash = hash; + while let Some(executed) = self.state.tree_state.blocks_by_hash.get(&parent_hash) { + parent_hash = executed.block.parent_hash; + in_memory.insert(0, executed.clone()); + } + + let historical = self.provider.state_by_block_hash(parent_hash)?; + Ok(MemoryOverlayStateProvider::new(in_memory, historical)) + } + + /// Return the parent hash of the lowest buffered ancestor for the requested block, if there + /// are any buffered ancestors. If there are no buffered ancestors, and the block itself does + /// not exist in the buffer, this returns the hash that is passed in. + /// + /// Returns the parent hash of the block itself if the block is buffered and has no other + /// buffered ancestors. + fn lowest_buffered_ancestor_or(&self, hash: B256) -> B256 { + self.state + .buffer + .lowest_ancestor(&hash) + .map(|block| block.parent_hash) + .unwrap_or_else(|| hash) + } + + /// If validation fails, the response MUST contain the latest valid hash: + /// + /// - The block hash of the ancestor of the invalid payload satisfying the following two + /// conditions: + /// - It is fully validated and deemed VALID + /// - Any other ancestor of the invalid payload with a higher blockNumber is INVALID + /// - 0x0000000000000000000000000000000000000000000000000000000000000000 if the above + /// conditions are satisfied by a `PoW` block. + /// - null if client software cannot determine the ancestor of the invalid payload satisfying + /// the above conditions. + fn latest_valid_hash_for_invalid_payload( + &mut self, + parent_hash: B256, + ) -> ProviderResult> { + // Check if parent exists in side chain or in canonical chain. + if self.block_by_hash(parent_hash)?.is_some() { + return Ok(Some(parent_hash)) + } + + // iterate over ancestors in the invalid cache + // until we encounter the first valid ancestor + let mut current_hash = parent_hash; + let mut current_header = self.state.invalid_headers.get(¤t_hash); + while let Some(header) = current_header { + current_hash = header.parent_hash; + current_header = self.state.invalid_headers.get(¤t_hash); + + // If current_header is None, then the current_hash does not have an invalid + // ancestor in the cache, check its presence in blockchain tree + if current_header.is_none() && self.block_by_hash(current_hash)?.is_some() { + return Ok(Some(current_hash)) + } + } + Ok(None) + } + + /// Prepares the invalid payload response for the given hash, checking the + /// database for the parent hash and populating the payload status with the latest valid hash + /// according to the engine api spec. + fn prepare_invalid_response(&mut self, mut parent_hash: B256) -> ProviderResult { + // Edge case: the `latestValid` field is the zero hash if the parent block is the terminal + // PoW block, which we need to identify by looking at the parent's block difficulty + if let Some(parent) = self.block_by_hash(parent_hash)? { + if !parent.is_zero_difficulty() { + parent_hash = B256::ZERO; + } + } + + let valid_parent_hash = self.latest_valid_hash_for_invalid_payload(parent_hash)?; + Ok(PayloadStatus::from_status(PayloadStatusEnum::Invalid { + validation_error: PayloadValidationError::LinksToRejectedPayload.to_string(), + }) + .with_latest_valid_hash(valid_parent_hash.unwrap_or_default())) + } + + /// Returns true if the given hash is the last received sync target block. + /// + /// See [`ForkchoiceStateTracker::sync_target_state`] + fn is_sync_target_head(&self, block_hash: B256) -> bool { + if let Some(target) = self.state.forkchoice_state_tracker.sync_target_state() { + return target.head_block_hash == block_hash + } + false + } + + /// Checks if the given `check` hash points to an invalid header, inserting the given `head` + /// block into the invalid header cache if the `check` hash has a known invalid ancestor. + /// + /// Returns a payload status response according to the engine API spec if the block is known to + /// be invalid. + fn check_invalid_ancestor_with_head( + &mut self, + check: B256, + head: B256, + ) -> ProviderResult> { + // check if the check hash was previously marked as invalid + let Some(header) = self.state.invalid_headers.get(&check) else { return Ok(None) }; + + // populate the latest valid hash field + let status = self.prepare_invalid_response(header.parent_hash)?; + + // insert the head block into the invalid header cache + self.state.invalid_headers.insert_with_invalid_ancestor(head, header); + + Ok(Some(status)) + } + + /// Checks if the given `head` points to an invalid header, which requires a specific response + /// to a forkchoice update. + fn check_invalid_ancestor(&mut self, head: B256) -> ProviderResult> { + // check if the head was previously marked as invalid + let Some(header) = self.state.invalid_headers.get(&head) else { return Ok(None) }; + // populate the latest valid hash field + Ok(Some(self.prepare_invalid_response(header.parent_hash)?)) + } + + /// Validate if block is correct and satisfies all the consensus rules that concern the header + /// and block body itself. + fn validate_block(&self, block: &SealedBlockWithSenders) -> Result<(), ConsensusError> { + if let Err(e) = self.consensus.validate_header_with_total_difficulty(block, U256::MAX) { + error!( + ?block, + "Failed to validate total difficulty for block {}: {e}", + block.header.hash() + ); + return Err(e) + } + + if let Err(e) = self.consensus.validate_header(block) { + error!(?block, "Failed to validate header {}: {e}", block.header.hash()); + return Err(e) + } + + if let Err(e) = self.consensus.validate_block_pre_execution(block) { + error!(?block, "Failed to validate block {}: {e}", block.header.hash()); + return Err(e) + } + + Ok(()) + } + + fn buffer_block_without_senders(&mut self, block: SealedBlock) -> Result<(), InsertBlockError> { + match block.try_seal_with_senders() { + Ok(block) => self.buffer_block(block), + Err(block) => Err(InsertBlockError::sender_recovery_error(block)), + } + } + + fn buffer_block(&mut self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { + if let Err(err) = self.validate_block(&block) { + return Err(InsertBlockError::consensus_error(err, block.block)) + } + self.state.buffer.insert_block(block); + Ok(()) + } + + /// Returns true if the distance from the local tip to the block is greater than the configured + /// threshold. + /// + /// If the `local_tip` is greater than the `block`, then this will return false. + #[inline] + const fn exceeds_backfill_run_threshold(&self, local_tip: u64, block: u64) -> bool { + block > local_tip && block - local_tip > MIN_BLOCKS_FOR_PIPELINE_RUN + } + + /// Returns how far the local tip is from the given block. If the local tip is at the same + /// height or its block number is greater than the given block, this returns None. + #[inline] + const fn distance_from_local_tip(&self, local_tip: u64, block: u64) -> Option { + if block > local_tip { + Some(block - local_tip) + } else { + None + } + } + + /// Returns the target hash to sync to if the distance from the local tip to the block is + /// greater than the threshold and we're not synced to the finalized block yet (if we've seen + /// that block already). + /// + /// If this is invoked after a new block has been downloaded, the downloaded block could be the + /// (missing) finalized block. + fn backfill_sync_target( + &self, + canonical_tip_num: u64, + target_block_number: u64, + downloaded_block: Option, + ) -> Option { + let sync_target_state = self.state.forkchoice_state_tracker.sync_target_state(); + + // check if the distance exceeds the threshold for backfill sync + let mut exceeds_backfill_threshold = + self.exceeds_backfill_run_threshold(canonical_tip_num, target_block_number); + + // check if the downloaded block is the tracked finalized block + if let Some(buffered_finalized) = sync_target_state + .as_ref() + .and_then(|state| self.state.buffer.block(&state.finalized_block_hash)) + { + // if we have buffered the finalized block, we should check how far + // we're off + exceeds_backfill_threshold = + self.exceeds_backfill_run_threshold(canonical_tip_num, buffered_finalized.number); + } + + // If this is invoked after we downloaded a block we can check if this block is the + // finalized block + if let (Some(downloaded_block), Some(ref state)) = (downloaded_block, sync_target_state) { + if downloaded_block.hash == state.finalized_block_hash { + // we downloaded the finalized block and can now check how far we're off + exceeds_backfill_threshold = + self.exceeds_backfill_run_threshold(canonical_tip_num, downloaded_block.number); + } + } + + // if the number of missing blocks is greater than the max, trigger backfill + if exceeds_backfill_threshold { + if let Some(state) = sync_target_state { + // if we have already canonicalized the finalized block, we should skip backfill + match self.provider.header_by_hash_or_number(state.finalized_block_hash.into()) { + Err(err) => { + warn!(target: "consensus::engine", %err, "Failed to get finalized block header"); + } + Ok(None) => { + // ensure the finalized block is known (not the zero hash) + if !state.finalized_block_hash.is_zero() { + // we don't have the block yet and the distance exceeds the allowed + // threshold + return Some(state.finalized_block_hash) + } + + // OPTIMISTIC SYNCING + // + // It can happen when the node is doing an + // optimistic sync, where the CL has no knowledge of the finalized hash, + // but is expecting the EL to sync as high + // as possible before finalizing. + // + // This usually doesn't happen on ETH mainnet since CLs use the more + // secure checkpoint syncing. + // + // However, optimism chains will do this. The risk of a reorg is however + // low. + debug!(target: "consensus::engine", hash=?state.head_block_hash, "Setting head hash as an optimistic backfill target."); + return Some(state.head_block_hash) + } + Ok(Some(_)) => { + // we're fully synced to the finalized block + } + } + } + } + + None + } + + /// This handles downloaded blocks that are shown to be disconnected from the canonical chain. + /// + /// This mainly compares the missing parent of the downloaded block with the current canonical + /// tip, and decides whether or not backfill sync should be triggered. + fn on_disconnected_downloaded_block( + &self, + downloaded_block: BlockNumHash, + missing_parent: BlockNumHash, + head: BlockNumHash, + ) -> Option { + // compare the missing parent with the canonical tip + if let Some(target) = + self.backfill_sync_target(head.number, missing_parent.number, Some(downloaded_block)) + { + return Some(TreeEvent::BackfillAction(BackfillAction::Start(target.into()))); + } + + // continue downloading the missing parent + // + // this happens if either: + // * the missing parent block num < canonical tip num + // * this case represents a missing block on a fork that is shorter than the canonical + // chain + // * the missing parent block num >= canonical tip num, but the number of missing blocks is + // less than the backfill threshold + // * this case represents a potentially long range of blocks to download and execute + let request = if let Some(distance) = + self.distance_from_local_tip(head.number, missing_parent.number) + { + DownloadRequest::BlockRange(missing_parent.hash, distance) + } else { + // This happens when the missing parent is on an outdated + // sidechain and we can only download the missing block itself + DownloadRequest::single_block(missing_parent.hash) + }; + + Some(TreeEvent::Download(request)) + } + + /// Invoked with a block downloaded from the network + /// + /// Returns an event with the appropriate action to take, such as: + /// - download more missing blocks + /// - try to canonicalize the target if the `block` is the tracked target (head) block. + fn on_downloaded_block(&mut self, block: SealedBlockWithSenders) -> Option { + let block_num_hash = block.num_hash(); + let lowest_buffered_ancestor = self.lowest_buffered_ancestor_or(block_num_hash.hash); + if self + .check_invalid_ancestor_with_head(lowest_buffered_ancestor, block_num_hash.hash) + .ok()? + .is_some() + { + return None + } + + if !self.backfill_sync_state.is_idle() { + return None + } + + // try to append the block + match self.insert_block(block) { + Ok(InsertPayloadOk::Inserted(BlockStatus::Valid(_))) => { + if self.is_sync_target_head(block_num_hash.hash) { + return Some(TreeEvent::TreeAction(TreeAction::MakeCanonical( + block_num_hash.hash, + ))) + } + } + Ok(InsertPayloadOk::Inserted(BlockStatus::Disconnected { head, missing_ancestor })) => { + // block is not connected to the canonical head, we need to download + // its missing branch first + return self.on_disconnected_downloaded_block(block_num_hash, missing_ancestor, head) + } + _ => {} + } + None + } + + fn insert_block_without_senders( + &mut self, + block: SealedBlock, + ) -> Result { + match block.try_seal_with_senders() { + Ok(block) => self.insert_block(block), + Err(block) => Err(InsertBlockError::sender_recovery_error(block)), + } + } + + fn insert_block( + &mut self, + block: SealedBlockWithSenders, + ) -> Result { + self.insert_block_inner(block.clone()) + .map_err(|kind| InsertBlockError::new(block.block, kind)) + } + + fn insert_block_inner( + &mut self, + block: SealedBlockWithSenders, + ) -> Result { + if self.block_by_hash(block.hash())?.is_some() { + let attachment = BlockAttachment::Canonical; // TODO: remove or revise attachment + return Ok(InsertPayloadOk::AlreadySeen(BlockStatus::Valid(attachment))) + } + + // validate block consensus rules + self.validate_block(&block)?; + + let state_provider = self.state_provider(block.parent_hash).unwrap(); + let executor = self.executor_provider.executor(StateProviderDatabase::new(&state_provider)); + + let block_number = block.number; + let block_hash = block.hash(); + let block = block.unseal(); + let output = executor.execute((&block, U256::MAX).into()).unwrap(); + self.consensus.validate_block_post_execution( + &block, + PostExecutionInput::new(&output.receipts, &output.requests), + )?; + + // TODO: change StateRootProvider API to accept hashed post state + let hashed_state = HashedPostState::from_bundle_state(&output.state.state); + + let (state_root, trie_output) = state_provider.state_root_with_updates(&output.state)?; + if state_root != block.state_root { + return Err(ConsensusError::BodyStateRootDiff( + GotExpected { got: state_root, expected: block.state_root }.into(), + ) + .into()) + } + + let executed = ExecutedBlock { + block: Arc::new(block.block.seal(block_hash)), + senders: Arc::new(block.senders), + execution_output: Arc::new(ExecutionOutcome::new( + output.state, + Receipts::from(output.receipts), + block_number, + vec![Requests::from(output.requests)], + )), + hashed_state: Arc::new(hashed_state), + trie: Arc::new(trie_output), + }; + self.state.tree_state.insert_executed(executed); + + let attachment = BlockAttachment::Canonical; // TODO: remove or revise attachment + Ok(InsertPayloadOk::Inserted(BlockStatus::Valid(attachment))) + } + + /// Attempts to find the header for the given block hash if it is canonical. + pub fn find_canonical_header(&self, hash: B256) -> Result, ProviderError> { + let mut canonical = self.canonical_in_memory_state.header_by_hash(hash); + + if canonical.is_none() { + canonical = self.provider.header(&hash)?.map(|header| header.seal(hash)); + } + + Ok(canonical) + } + + /// Updates the tracked finalized block if we have it. + fn update_finalized_block( + &self, + finalized_block_hash: B256, + ) -> Result<(), OnForkChoiceUpdated> { + if finalized_block_hash.is_zero() { + return Ok(()) + } + + match self.find_canonical_header(finalized_block_hash) { + Ok(None) => { + debug!(target: "engine", "Finalized block not found in canonical chain"); + // if the finalized block is not known, we can't update the finalized block + return Err(OnForkChoiceUpdated::invalid_state()) + } + Ok(Some(finalized)) => { + self.canonical_in_memory_state.set_finalized(finalized); + } + Err(err) => { + error!(target: "engine", %err, "Failed to fetch finalized block header"); + } + } + + Ok(()) + } + + /// Updates the tracked safe block if we have it + fn update_safe_block(&self, safe_block_hash: B256) -> Result<(), OnForkChoiceUpdated> { + if safe_block_hash.is_zero() { + return Ok(()) + } + + match self.find_canonical_header(safe_block_hash) { + Ok(None) => { + debug!(target: "engine", "Safe block not found in canonical chain"); + // if the safe block is not known, we can't update the safe block + return Err(OnForkChoiceUpdated::invalid_state()) + } + Ok(Some(finalized)) => { + self.canonical_in_memory_state.set_safe(finalized); + } + Err(err) => { + error!(target: "engine", %err, "Failed to fetch safe block header"); + } + } + + Ok(()) + } + + /// Ensures that the given forkchoice state is consistent, assuming the head block has been + /// made canonical. + /// + /// If the forkchoice state is consistent, this will return Ok(()). Otherwise, this will + /// return an instance of [`OnForkChoiceUpdated`] that is INVALID. + /// + /// This also updates the safe and finalized blocks in the [`CanonicalInMemoryState`], if they + /// are consistent with the head block. + fn ensure_consistent_forkchoice_state( + &self, + state: ForkchoiceState, + ) -> Result<(), OnForkChoiceUpdated> { + // Ensure that the finalized block, if not zero, is known and in the canonical chain + // after the head block is canonicalized. + // + // This ensures that the finalized block is consistent with the head block, i.e. the + // finalized block is an ancestor of the head block. + self.update_finalized_block(state.finalized_block_hash)?; + + // Also ensure that the safe block, if not zero, is known and in the canonical chain + // after the head block is canonicalized. + // + // This ensures that the safe block is consistent with the head block, i.e. the safe + // block is an ancestor of the head block. + self.update_safe_block(state.safe_block_hash) + } + + /// Pre-validate forkchoice update and check whether it can be processed. + /// + /// This method returns the update outcome if validation fails or + /// the node is syncing and the update cannot be processed at the moment. + fn pre_validate_forkchoice_update( + &mut self, + state: ForkchoiceState, + ) -> ProviderResult> { + if state.head_block_hash.is_zero() { + return Ok(Some(OnForkChoiceUpdated::invalid_state())) + } + + // check if the new head hash is connected to any ancestor that we previously marked as + // invalid + let lowest_buffered_ancestor_fcu = self.lowest_buffered_ancestor_or(state.head_block_hash); + if let Some(status) = self.check_invalid_ancestor(lowest_buffered_ancestor_fcu)? { + return Ok(Some(OnForkChoiceUpdated::with_invalid(status))) + } + + if !self.backfill_sync_state.is_idle() { + // We can only process new forkchoice updates if the pipeline is idle, since it requires + // exclusive access to the database + trace!(target: "consensus::engine", "Pipeline is syncing, skipping forkchoice update"); + return Ok(Some(OnForkChoiceUpdated::syncing())) + } + + Ok(None) + } + + /// Validates the payload attributes with respect to the header and fork choice state. + /// + /// Note: At this point, the fork choice update is considered to be VALID, however, we can still + /// return an error if the payload attributes are invalid. + fn process_payload_attributes( + &self, + attrs: T::PayloadAttributes, + head: &Header, + state: ForkchoiceState, + ) -> OnForkChoiceUpdated { + // 7. Client software MUST ensure that payloadAttributes.timestamp is greater than timestamp + // of a block referenced by forkchoiceState.headBlockHash. If this condition isn't held + // client software MUST respond with -38003: `Invalid payload attributes` and MUST NOT + // begin a payload build process. In such an event, the forkchoiceState update MUST NOT + // be rolled back. + if attrs.timestamp() <= head.timestamp { + return OnForkChoiceUpdated::invalid_payload_attributes() + } + + // 8. Client software MUST begin a payload build process building on top of + // forkchoiceState.headBlockHash and identified via buildProcessId value if + // payloadAttributes is not null and the forkchoice state has been updated successfully. + // The build process is specified in the Payload building section. + match ::try_new( + state.head_block_hash, + attrs, + ) { + Ok(attributes) => { + // send the payload to the builder and return the receiver for the pending payload + // id, initiating payload job is handled asynchronously + let pending_payload_id = self.payload_builder.send_new_payload(attributes); + + // Client software MUST respond to this method call in the following way: + // { + // payloadStatus: { + // status: VALID, + // latestValidHash: forkchoiceState.headBlockHash, + // validationError: null + // }, + // payloadId: buildProcessId + // } + // + // if the payload is deemed VALID and the build process has begun. + OnForkChoiceUpdated::updated_with_pending_payload_id( + PayloadStatus::new(PayloadStatusEnum::Valid, Some(state.head_block_hash)), + pending_payload_id, + ) + } + Err(_) => OnForkChoiceUpdated::invalid_payload_attributes(), + } + } +} + +impl EngineApiTreeHandler for EngineApiTreeHandlerImpl +where + P: BlockReader + StateProviderFactory + Clone + 'static, + E: BlockExecutorProvider, + T: EngineTypes, +{ + type Engine = T; + + fn on_downloaded(&mut self, blocks: Vec) -> Option { + for block in blocks { + if let Some(event) = self.on_downloaded_block(block) { + let needs_backfill = event.is_backfill_action(); + self.on_tree_event(event); + if needs_backfill { + // can exit early if backfill is needed + break + } + } + } + None + } + + #[instrument(level = "trace", skip_all, fields(block_hash = %payload.block_hash(), block_num = %payload.block_number(),), target = "engine")] + fn on_new_payload( + &mut self, + payload: ExecutionPayload, + cancun_fields: Option, + ) -> ProviderResult> { + trace!(target: "engine", "invoked new payload"); + // Ensures that the given payload does not violate any consensus rules that concern the + // block's layout, like: + // - missing or invalid base fee + // - invalid extra data + // - invalid transactions + // - incorrect hash + // - the versioned hashes passed with the payload do not exactly match transaction + // versioned hashes + // - the block does not contain blob transactions if it is pre-cancun + // + // This validates the following engine API rule: + // + // 3. Given the expected array of blob versioned hashes client software **MUST** run its + // validation by taking the following steps: + // + // 1. Obtain the actual array by concatenating blob versioned hashes lists + // (`tx.blob_versioned_hashes`) of each [blob + // transaction](https://eips.ethereum.org/EIPS/eip-4844#new-transaction-type) included + // in the payload, respecting the order of inclusion. If the payload has no blob + // transactions the expected array **MUST** be `[]`. + // + // 2. Return `{status: INVALID, latestValidHash: null, validationError: errorMessage | + // null}` if the expected and the actual arrays don't match. + // + // This validation **MUST** be instantly run in all cases even during active sync process. + let parent_hash = payload.parent_hash(); + let block = match self + .payload_validator + .ensure_well_formed_payload(payload, cancun_fields.into()) + { + Ok(block) => block, + Err(error) => { + error!(target: "engine::tree", %error, "Invalid payload"); + // we need to convert the error to a payload status (response to the CL) + + let latest_valid_hash = + if error.is_block_hash_mismatch() || error.is_invalid_versioned_hashes() { + // Engine-API rules: + // > `latestValidHash: null` if the blockHash validation has failed () + // > `latestValidHash: null` if the expected and the actual arrays don't match () + None + } else { + self.latest_valid_hash_for_invalid_payload(parent_hash)? + }; + + let status = PayloadStatusEnum::from(error); + return Ok(TreeOutcome::new(PayloadStatus::new(status, latest_valid_hash))) + } + }; + + let block_hash = block.hash(); + let mut lowest_buffered_ancestor = self.lowest_buffered_ancestor_or(block_hash); + if lowest_buffered_ancestor == block_hash { + lowest_buffered_ancestor = block.parent_hash; + } + + // now check the block itself + if let Some(status) = + self.check_invalid_ancestor_with_head(lowest_buffered_ancestor, block_hash)? + { + return Ok(TreeOutcome::new(status)) + } + + let status = if !self.backfill_sync_state.is_idle() { + self.buffer_block_without_senders(block).unwrap(); + PayloadStatus::from_status(PayloadStatusEnum::Syncing) + } else { + let mut latest_valid_hash = None; + let status = match self.insert_block_without_senders(block).unwrap() { + InsertPayloadOk::Inserted(BlockStatus::Valid(_)) | + InsertPayloadOk::AlreadySeen(BlockStatus::Valid(_)) => { + latest_valid_hash = Some(block_hash); + PayloadStatusEnum::Valid + } + InsertPayloadOk::Inserted(BlockStatus::Disconnected { .. }) | + InsertPayloadOk::AlreadySeen(BlockStatus::Disconnected { .. }) => { + // not known to be invalid, but we don't know anything else + PayloadStatusEnum::Syncing + } + }; + PayloadStatus::new(status, latest_valid_hash) + }; + + let mut outcome = TreeOutcome::new(status); + if outcome.outcome.is_valid() && self.is_sync_target_head(block_hash) { + // if the block is valid and it is the sync target head, make it canonical + outcome = + outcome.with_event(TreeEvent::TreeAction(TreeAction::MakeCanonical(block_hash))); + } + + Ok(outcome) + } + + #[instrument(level = "trace", skip_all, fields(head = % state.head_block_hash, safe = % state.safe_block_hash,finalized = % state.finalized_block_hash), target = "engine")] + fn on_forkchoice_updated( + &mut self, + state: ForkchoiceState, + attrs: Option<::PayloadAttributes>, + ) -> ProviderResult> { + trace!(target: "engine", ?attrs, "invoked forkchoice update"); + self.canonical_in_memory_state.on_forkchoice_update_received(); + + if let Some(on_updated) = self.pre_validate_forkchoice_update(state)? { + self.state.forkchoice_state_tracker.set_latest(state, on_updated.forkchoice_status()); + return Ok(TreeOutcome::new(on_updated)) + } + + let valid_outcome = |head| { + TreeOutcome::new(OnForkChoiceUpdated::valid(PayloadStatus::new( + PayloadStatusEnum::Valid, + Some(head), + ))) + }; + + // Process the forkchoice update by trying to make the head block canonical + // + // We can only process this forkchoice update if: + // - we have the `head` block + // - the head block is part of a chain that is connected to the canonical chain. This + // includes reorgs. + // + // Performing a FCU involves: + // - marking the FCU's head block as canonical + // - updating in memory state to reflect the new canonical chain + // - updating canonical state trackers + // - emitting a canonicalization event for the new chain (including reorg) + // - if we have payload attributes, delegate them to the payload service + + // 1. ensure we have a new head block + if self.state.tree_state.canonical_block_hash() == state.head_block_hash { + trace!(target: "engine", "fcu head hash is already canonical"); + // the head block is already canonical + return Ok(valid_outcome(state.head_block_hash)) + } + + // 2. ensure we can apply a new chain update for the head block + if let Some(chain_update) = self.state.tree_state.on_new_head(state.head_block_hash) { + trace!(target: "engine", new_blocks = %chain_update.new_block_count(), reorged_blocks = %chain_update.reorged_block_count() ,"applying new chain update"); + // update the tracked canonical head + self.state.tree_state.set_canonical_head(chain_update.tip().num_hash()); + + let tip = chain_update.tip().header.clone(); + let notification = chain_update.to_chain_notification(); + + // update the tracked in-memory state with the new chain + self.canonical_in_memory_state.update_chain(chain_update); + self.canonical_in_memory_state.set_canonical_head(tip.clone()); + + // sends an event to all active listeners about the new canonical chain + self.canonical_in_memory_state.notify_canon_state(notification); + + // update the safe and finalized blocks and ensure their values are valid, but only + // after the head block is made canonical + if let Err(outcome) = self.ensure_consistent_forkchoice_state(state) { + // safe or finalized hashes are invalid + return Ok(TreeOutcome::new(outcome)) + } + + if let Some(attr) = attrs { + let updated = self.process_payload_attributes(attr, &tip, state); + return Ok(TreeOutcome::new(updated)) + } + + return Ok(valid_outcome(state.head_block_hash)) + } + + // 3. we don't have the block to perform the update + let target = self.lowest_buffered_ancestor_or(state.head_block_hash); + + Ok(TreeOutcome::new(OnForkChoiceUpdated::valid(PayloadStatus::from_status( + PayloadStatusEnum::Syncing, + ))) + .with_event(TreeEvent::Download(DownloadRequest::single_block(target)))) + } +} + +/// The state of the persistence task. +#[derive(Default, Debug)] +struct PersistenceState { + /// Hash of the last block persisted. + last_persisted_block_hash: B256, + /// Receiver end of channel where the result of the persistence task will be + /// sent when done. A None value means there's no persistence task in progress. + rx: Option>, + /// The last persisted block number. + last_persisted_block_number: u64, +} + +impl PersistenceState { + /// Determines if there is a persistence task in progress by checking if the + /// receiver is set. + const fn in_progress(&self) -> bool { + self.rx.is_some() + } + + /// Sets state for a started persistence task. + fn start(&mut self, rx: oneshot::Receiver) { + self.rx = Some(rx); + } + + /// Sets state for a finished persistence task. + fn finish(&mut self, last_persisted_block_hash: B256, last_persisted_block_number: u64) { + self.rx = None; + self.last_persisted_block_number = last_persisted_block_number; + self.last_persisted_block_hash = last_persisted_block_hash; + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::persistence::PersistenceAction; + use alloy_rlp::Decodable; + use reth_beacon_consensus::EthBeaconConsensus; + use reth_chain_state::{ + test_utils::{get_executed_block_with_number, get_executed_blocks}, + BlockState, + }; + use reth_chainspec::{ChainSpecBuilder, HOLESKY, MAINNET}; + use reth_ethereum_engine_primitives::EthEngineTypes; + use reth_evm::test_utils::MockExecutorProvider; + use reth_payload_builder::PayloadServiceCommand; + use reth_primitives::Bytes; + use reth_provider::test_utils::MockEthProvider; + use reth_rpc_types_compat::engine::block_to_payload_v1; + use std::{ + str::FromStr, + sync::mpsc::{channel, Sender}, + }; + use tokio::sync::mpsc::unbounded_channel; + + struct TestHarness { + tree: EngineApiTreeHandlerImpl, + to_tree_tx: Sender>>, + blocks: Vec, + action_rx: Receiver, + payload_command_rx: UnboundedReceiver>, + } + + impl TestHarness { + fn holesky() -> Self { + let (action_tx, action_rx) = channel(); + let persistence_handle = PersistenceHandle::new(action_tx); + + let chain_spec = HOLESKY.clone(); + let consensus = Arc::new(EthBeaconConsensus::new(chain_spec.clone())); + + let provider = MockEthProvider::default(); + let executor_factory = MockExecutorProvider::default(); + + let payload_validator = ExecutionPayloadValidator::new(chain_spec.clone()); + + let (to_tree_tx, to_tree_rx) = channel(); + let (from_tree_tx, from_tree_rx) = unbounded_channel(); + + let header = chain_spec.genesis_header().seal_slow(); + let engine_api_tree_state = EngineApiTreeState::new(10, 10, header.num_hash()); + let canonical_in_memory_state = CanonicalInMemoryState::with_head(header); + + let (to_payload_service, payload_command_rx) = unbounded_channel(); + let payload_builder = PayloadBuilderHandle::new(to_payload_service); + let tree = EngineApiTreeHandlerImpl::new( + provider, + executor_factory, + consensus, + payload_validator, + to_tree_rx, + from_tree_tx, + engine_api_tree_state, + canonical_in_memory_state, + persistence_handle, + payload_builder, + ); + + Self { tree, to_tree_tx, blocks: vec![], action_rx, payload_command_rx } + } + } + + fn get_default_test_harness(number_of_blocks: u64) -> TestHarness { + let blocks: Vec<_> = get_executed_blocks(0..number_of_blocks).collect(); + + let mut blocks_by_hash = HashMap::new(); + let mut blocks_by_number = BTreeMap::new(); + let mut state_by_hash = HashMap::new(); + let mut hash_by_number = HashMap::new(); + for block in &blocks { + let sealed_block = block.block(); + let hash = sealed_block.hash(); + let number = sealed_block.number; + blocks_by_hash.insert(hash, block.clone()); + blocks_by_number.entry(number).or_insert_with(Vec::new).push(block.clone()); + state_by_hash.insert(hash, Arc::new(BlockState::new(block.clone()))); + hash_by_number.insert(number, hash); + } + let tree_state = TreeState { blocks_by_hash, blocks_by_number, ..Default::default() }; + + let (action_tx, action_rx) = channel(); + let persistence_handle = PersistenceHandle::new(action_tx); + + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(MAINNET.genesis.clone()) + .paris_activated() + .build(), + ); + let consensus = Arc::new(EthBeaconConsensus::new(chain_spec.clone())); + + let provider = MockEthProvider::default(); + let executor_factory = MockExecutorProvider::default(); + executor_factory.extend(vec![ExecutionOutcome::default()]); + + let payload_validator = ExecutionPayloadValidator::new(chain_spec); + + let (to_tree_tx, to_tree_rx) = channel(); + let (from_tree_tx, from_tree_rx) = unbounded_channel(); + + let engine_api_tree_state = EngineApiTreeState { + invalid_headers: InvalidHeaderCache::new(10), + buffer: BlockBuffer::new(10), + tree_state, + forkchoice_state_tracker: ForkchoiceStateTracker::default(), + }; + + let header = blocks.first().unwrap().block().header.clone(); + let canonical_in_memory_state = CanonicalInMemoryState::with_head(header); + + let (to_payload_service, payload_command_rx) = unbounded_channel(); + let payload_builder = PayloadBuilderHandle::new(to_payload_service); + let mut tree = EngineApiTreeHandlerImpl::new( + provider, + executor_factory, + consensus, + payload_validator, + to_tree_rx, + from_tree_tx, + engine_api_tree_state, + canonical_in_memory_state, + persistence_handle, + payload_builder, + ); + let last_executed_block = blocks.last().unwrap().clone(); + let pending = Some(BlockState::new(last_executed_block)); + tree.canonical_in_memory_state = + CanonicalInMemoryState::new(state_by_hash, hash_by_number, pending); + + TestHarness { tree, to_tree_tx, blocks, action_rx, payload_command_rx } + } + + #[tokio::test] + async fn test_tree_persist_blocks() { + // we need more than PERSISTENCE_THRESHOLD blocks to trigger the + // persistence task. + let TestHarness { tree, to_tree_tx, action_rx, mut blocks, payload_command_rx } = + get_default_test_harness(PERSISTENCE_THRESHOLD + 1); + std::thread::Builder::new().name("Tree Task".to_string()).spawn(|| tree.run()).unwrap(); + + // send a message to the tree to enter the main loop. + to_tree_tx.send(FromEngine::DownloadedBlocks(vec![])).unwrap(); + + let received_action = action_rx.recv().expect("Failed to receive saved blocks"); + if let PersistenceAction::SaveBlocks((saved_blocks, _)) = received_action { + // only PERSISTENCE_THRESHOLD will be persisted + blocks.pop(); + assert_eq!(saved_blocks.len() as u64, PERSISTENCE_THRESHOLD); + assert_eq!(saved_blocks, blocks); + } else { + panic!("unexpected action received {received_action:?}"); + } + } + + #[tokio::test] + async fn test_in_memory_state_trait_impl() { + let TestHarness { tree, to_tree_tx, action_rx, blocks, payload_command_rx } = + get_default_test_harness(10); + + let head_block = blocks.last().unwrap().block(); + let first_block = blocks.first().unwrap().block(); + + for executed_block in blocks { + let sealed_block = executed_block.block(); + + let expected_state = BlockState::new(executed_block.clone()); + + let actual_state_by_hash = + tree.canonical_in_memory_state.state_by_hash(sealed_block.hash()).unwrap(); + assert_eq!(expected_state, *actual_state_by_hash); + + let actual_state_by_number = + tree.canonical_in_memory_state.state_by_number(sealed_block.number).unwrap(); + assert_eq!(expected_state, *actual_state_by_number); + } + } + + #[tokio::test] + async fn test_engine_request_during_backfill() { + let TestHarness { mut tree, to_tree_tx, action_rx, blocks, payload_command_rx } = + get_default_test_harness(PERSISTENCE_THRESHOLD); + + // set backfill active + tree.backfill_sync_state = BackfillSyncState::Active; + + let (tx, rx) = oneshot::channel(); + tree.on_engine_message(FromEngine::Request(BeaconEngineMessage::ForkchoiceUpdated { + state: ForkchoiceState { + head_block_hash: B256::random(), + safe_block_hash: B256::random(), + finalized_block_hash: B256::random(), + }, + payload_attrs: None, + tx, + })); + + let resp = rx.await.unwrap().unwrap().await.unwrap(); + assert!(resp.payload_status.is_syncing()); + } + + #[tokio::test] + async fn test_holesky_payload() { + let s = include_str!("../test-data/holesky/1.rlp"); + let data = Bytes::from_str(s).unwrap(); + let block = Block::decode(&mut data.as_ref()).unwrap(); + let sealed = block.seal_slow(); + let payload = block_to_payload_v1(sealed); + + let TestHarness { mut tree, to_tree_tx, action_rx, blocks, payload_command_rx } = + TestHarness::holesky(); + + // set backfill active + tree.backfill_sync_state = BackfillSyncState::Active; + + let (tx, rx) = oneshot::channel(); + tree.on_engine_message(FromEngine::Request(BeaconEngineMessage::NewPayload { + payload: payload.clone().into(), + cancun_fields: None, + tx, + })); + + let resp = rx.await.unwrap().unwrap(); + assert!(resp.is_syncing()); + } + + #[tokio::test] + async fn test_tree_state_insert_executed() { + let mut tree_state = TreeState::new(BlockNumHash::default()); + let blocks: Vec<_> = get_executed_blocks(1..4).collect(); + + tree_state.insert_executed(blocks[0].clone()); + tree_state.insert_executed(blocks[1].clone()); + + assert_eq!( + tree_state.parent_to_child.get(&blocks[0].block.hash()), + Some(&HashSet::from([blocks[1].block.hash()])) + ); + + assert!(!tree_state.parent_to_child.contains_key(&blocks[1].block.hash())); + + tree_state.insert_executed(blocks[2].clone()); + + assert_eq!( + tree_state.parent_to_child.get(&blocks[1].block.hash()), + Some(&HashSet::from([blocks[2].block.hash()])) + ); + assert!(tree_state.parent_to_child.contains_key(&blocks[1].block.hash())); + + assert!(!tree_state.parent_to_child.contains_key(&blocks[2].block.hash())); + } + + #[tokio::test] + async fn test_tree_state_insert_executed_with_reorg() { + let mut tree_state = TreeState::new(BlockNumHash::default()); + let blocks: Vec<_> = get_executed_blocks(1..6).collect(); + + for block in &blocks { + tree_state.insert_executed(block.clone()); + } + assert_eq!(tree_state.blocks_by_hash.len(), 5); + + let fork_block_3 = get_executed_block_with_number(3, blocks[1].block.hash()); + let fork_block_4 = get_executed_block_with_number(4, fork_block_3.block.hash()); + let fork_block_5 = get_executed_block_with_number(5, fork_block_4.block.hash()); + + tree_state.insert_executed(fork_block_3.clone()); + tree_state.insert_executed(fork_block_4.clone()); + tree_state.insert_executed(fork_block_5.clone()); + + assert_eq!(tree_state.blocks_by_hash.len(), 8); + assert_eq!(tree_state.blocks_by_number[&3].len(), 2); // two blocks at height 3 (original and fork) + assert_eq!(tree_state.parent_to_child[&blocks[1].block.hash()].len(), 2); // block 2 should have two children + + // verify that we can insert the same block again without issues + tree_state.insert_executed(fork_block_4.clone()); + assert_eq!(tree_state.blocks_by_hash.len(), 8); + + assert!(tree_state.parent_to_child[&fork_block_3.block.hash()] + .contains(&fork_block_4.block.hash())); + assert!(tree_state.parent_to_child[&fork_block_4.block.hash()] + .contains(&fork_block_5.block.hash())); + + assert_eq!(tree_state.blocks_by_number[&4].len(), 2); + assert_eq!(tree_state.blocks_by_number[&5].len(), 2); + } + + #[tokio::test] + async fn test_tree_state_remove_before() { + let mut tree_state = TreeState::new(BlockNumHash::default()); + let blocks: Vec<_> = get_executed_blocks(1..6).collect(); + + for block in &blocks { + tree_state.insert_executed(block.clone()); + } + + tree_state.remove_before(3); + + assert!(!tree_state.blocks_by_hash.contains_key(&blocks[0].block.hash())); + assert!(!tree_state.blocks_by_hash.contains_key(&blocks[1].block.hash())); + assert!(!tree_state.blocks_by_number.contains_key(&1)); + assert!(!tree_state.blocks_by_number.contains_key(&2)); + + assert!(tree_state.blocks_by_hash.contains_key(&blocks[2].block.hash())); + assert!(tree_state.blocks_by_hash.contains_key(&blocks[3].block.hash())); + assert!(tree_state.blocks_by_hash.contains_key(&blocks[4].block.hash())); + assert!(tree_state.blocks_by_number.contains_key(&3)); + assert!(tree_state.blocks_by_number.contains_key(&4)); + assert!(tree_state.blocks_by_number.contains_key(&5)); + + assert!(!tree_state.parent_to_child.contains_key(&blocks[0].block.hash())); + assert!(!tree_state.parent_to_child.contains_key(&blocks[1].block.hash())); + assert!(tree_state.parent_to_child.contains_key(&blocks[2].block.hash())); + assert!(tree_state.parent_to_child.contains_key(&blocks[3].block.hash())); + assert!(!tree_state.parent_to_child.contains_key(&blocks[4].block.hash())); + + assert_eq!( + tree_state.parent_to_child.get(&blocks[2].block.hash()), + Some(&HashSet::from([blocks[3].block.hash()])) + ); + assert_eq!( + tree_state.parent_to_child.get(&blocks[3].block.hash()), + Some(&HashSet::from([blocks[4].block.hash()])) + ); + } + + #[tokio::test] + async fn test_tree_state_on_new_head() { + let mut tree_state = TreeState::new(BlockNumHash::default()); + let blocks: Vec<_> = get_executed_blocks(1..6).collect(); + + for block in &blocks { + tree_state.insert_executed(block.clone()); + } + + // set block 3 as the current canonical head + tree_state.set_canonical_head(blocks[2].block.num_hash()); + + // create a fork from block 2 + let fork_block_3 = get_executed_block_with_number(3, blocks[1].block.hash()); + let fork_block_4 = get_executed_block_with_number(4, fork_block_3.block.hash()); + let fork_block_5 = get_executed_block_with_number(5, fork_block_4.block.hash()); + + tree_state.insert_executed(fork_block_3.clone()); + tree_state.insert_executed(fork_block_4.clone()); + tree_state.insert_executed(fork_block_5.clone()); + + // normal (non-reorg) case + let result = tree_state.on_new_head(blocks[4].block.hash()); + assert!(matches!(result, Some(NewCanonicalChain::Commit { .. }))); + if let Some(NewCanonicalChain::Commit { new }) = result { + assert_eq!(new.len(), 2); + assert_eq!(new[0].block.hash(), blocks[3].block.hash()); + assert_eq!(new[1].block.hash(), blocks[4].block.hash()); + } + + // reorg case + let result = tree_state.on_new_head(fork_block_5.block.hash()); + assert!(matches!(result, Some(NewCanonicalChain::Reorg { .. }))); + if let Some(NewCanonicalChain::Reorg { new, old }) = result { + assert_eq!(new.len(), 3); + assert_eq!(new[0].block.hash(), fork_block_3.block.hash()); + assert_eq!(new[1].block.hash(), fork_block_4.block.hash()); + assert_eq!(new[2].block.hash(), fork_block_5.block.hash()); + + assert_eq!(old.len(), 1); + assert_eq!(old[0].block.hash(), blocks[2].block.hash()); + } + } +} diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs deleted file mode 100644 index 0ab987a33096..000000000000 --- a/crates/engine/tree/src/tree/mod.rs +++ /dev/null @@ -1,1328 +0,0 @@ -use crate::{ - backfill::BackfillAction, - chain::FromOrchestrator, - engine::{DownloadRequest, EngineApiEvent, FromEngine}, - persistence::PersistenceHandle, -}; -use parking_lot::RwLock; -use reth_beacon_consensus::{ - BeaconEngineMessage, ForkchoiceStateTracker, InvalidHeaderCache, OnForkChoiceUpdated, -}; -use reth_blockchain_tree::{ - error::InsertBlockErrorKind, BlockAttachment, BlockBuffer, BlockStatus, -}; -use reth_blockchain_tree_api::{error::InsertBlockError, InsertPayloadOk}; -use reth_consensus::{Consensus, PostExecutionInput}; -use reth_engine_primitives::EngineTypes; -use reth_errors::{ConsensusError, ProviderResult}; -use reth_evm::execute::{BlockExecutorProvider, Executor}; -use reth_payload_primitives::PayloadTypes; -use reth_payload_validator::ExecutionPayloadValidator; -use reth_primitives::{ - Address, Block, BlockNumber, GotExpected, Receipts, Requests, SealedBlock, - SealedBlockWithSenders, SealedHeader, B256, U256, -}; -use reth_provider::{ - providers::ChainInfoTracker, BlockReader, ExecutionOutcome, StateProvider, - StateProviderFactory, StateRootProvider, -}; -use reth_revm::database::StateProviderDatabase; -use reth_rpc_types::{ - engine::{ - CancunPayloadFields, ForkchoiceState, PayloadStatus, PayloadStatusEnum, - PayloadValidationError, - }, - ExecutionPayload, -}; -use reth_trie::{updates::TrieUpdates, HashedPostState}; -use std::{ - collections::{BTreeMap, HashMap}, - marker::PhantomData, - sync::{mpsc::Receiver, Arc}, -}; -use tokio::sync::{mpsc::UnboundedSender, oneshot}; -use tracing::*; - -mod memory_overlay; -pub use memory_overlay::MemoryOverlayStateProvider; - -/// Maximum number of blocks to be kept only in memory without triggering persistence. -const PERSISTENCE_THRESHOLD: u64 = 256; - -/// Represents an executed block stored in-memory. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct ExecutedBlock { - block: Arc, - senders: Arc>, - execution_output: Arc, - hashed_state: Arc, - trie: Arc, -} - -impl ExecutedBlock { - pub(crate) const fn new( - block: Arc, - senders: Arc>, - execution_output: Arc, - hashed_state: Arc, - trie: Arc, - ) -> Self { - Self { block, senders, execution_output, hashed_state, trie } - } - - /// Returns a reference to the executed block. - pub(crate) fn block(&self) -> &SealedBlock { - &self.block - } - - /// Returns a reference to the block's senders - pub(crate) fn senders(&self) -> &Vec
{ - &self.senders - } - - /// Returns a reference to the block's execution outcome - pub(crate) fn execution_outcome(&self) -> &ExecutionOutcome { - &self.execution_output - } - - /// Returns a reference to the hashed state result of the execution outcome - pub(crate) fn hashed_state(&self) -> &HashedPostState { - &self.hashed_state - } - - /// Returns a reference to the trie updates for the block - pub(crate) fn trie_updates(&self) -> &TrieUpdates { - &self.trie - } -} - -/// Keeps track of the state of the tree. -#[derive(Debug, Default)] -pub struct TreeState { - /// All executed blocks by hash. - blocks_by_hash: HashMap, - /// Executed blocks grouped by their respective block number. - blocks_by_number: BTreeMap>, - /// Pending state not yet applied - pending: Option>, - /// Block number and hash of the current head. - current_head: Option<(BlockNumber, B256)>, -} - -impl TreeState { - fn block_by_hash(&self, hash: B256) -> Option> { - self.blocks_by_hash.get(&hash).map(|b| b.block.clone()) - } - - fn block_by_number(&self, number: BlockNumber) -> Option> { - self.blocks_by_number - .get(&number) - .and_then(|blocks| blocks.last()) - .map(|executed_block| executed_block.block.clone()) - } - - /// Insert executed block into the state. - fn insert_executed(&mut self, executed: ExecutedBlock) { - self.blocks_by_number.entry(executed.block.number).or_default().push(executed.clone()); - let existing = self.blocks_by_hash.insert(executed.block.hash(), executed); - debug_assert!(existing.is_none(), "inserted duplicate block"); - } - - /// Remove blocks before specified block number. - pub(crate) fn remove_before(&mut self, block_number: BlockNumber) { - while self - .blocks_by_number - .first_key_value() - .map(|entry| entry.0 < &block_number) - .unwrap_or_default() - { - let (_, to_remove) = self.blocks_by_number.pop_first().unwrap(); - for block in to_remove { - let block_hash = block.block.hash(); - let removed = self.blocks_by_hash.remove(&block_hash); - debug_assert!( - removed.is_some(), - "attempted to remove non-existing block {block_hash}" - ); - } - } - } - - /// Returns the maximum block number stored. - pub(crate) fn max_block_number(&self) -> BlockNumber { - *self.blocks_by_number.last_key_value().unwrap_or((&BlockNumber::default(), &vec![])).0 - } -} - -/// Container type for in memory state data. -#[derive(Debug, Default)] -pub struct InMemoryStateImpl { - blocks: RwLock>>, - numbers: RwLock>, - pending: RwLock>, -} - -impl InMemoryStateImpl { - const fn new( - blocks: HashMap>, - numbers: HashMap, - pending: Option, - ) -> Self { - Self { - blocks: RwLock::new(blocks), - numbers: RwLock::new(numbers), - pending: RwLock::new(pending), - } - } -} - -impl InMemoryState for InMemoryStateImpl { - fn state_by_hash(&self, hash: B256) -> Option> { - self.blocks.read().get(&hash).cloned() - } - - fn state_by_number(&self, number: u64) -> Option> { - self.numbers.read().get(&number).and_then(|hash| self.blocks.read().get(hash).cloned()) - } - - fn head_state(&self) -> Option> { - self.numbers - .read() - .iter() - .max_by_key(|(&number, _)| number) - .and_then(|(_, hash)| self.blocks.read().get(hash).cloned()) - } - - fn pending_state(&self) -> Option> { - self.pending.read().as_ref().map(|state| Arc::new(State(state.0.clone()))) - } -} - -/// Inner type to provide in memory state. It includes a chain tracker to be -/// advanced internally by the tree. -#[derive(Debug)] -struct CanonicalInMemoryStateInner { - chain_info_tracker: ChainInfoTracker, - in_memory_state: InMemoryStateImpl, -} - -/// This type is responsible for providing the blocks, receipts, and state for -/// all canonical blocks not on disk yet and keeps track of the block range that -/// is in memory. -#[derive(Debug, Clone)] -pub struct CanonicalInMemoryState { - inner: Arc, -} - -impl CanonicalInMemoryState { - fn new( - blocks: HashMap>, - numbers: HashMap, - pending: Option, - ) -> Self { - let in_memory_state = InMemoryStateImpl::new(blocks, numbers, pending); - let head_state = in_memory_state.head_state(); - let header = match head_state { - Some(state) => state.block().block().header.clone(), - None => SealedHeader::default(), - }; - let chain_info_tracker = ChainInfoTracker::new(header); - let inner = CanonicalInMemoryStateInner { chain_info_tracker, in_memory_state }; - - Self { inner: Arc::new(inner) } - } - - fn with_header(header: SealedHeader) -> Self { - let chain_info_tracker = ChainInfoTracker::new(header); - let in_memory_state = InMemoryStateImpl::default(); - let inner = CanonicalInMemoryStateInner { chain_info_tracker, in_memory_state }; - - Self { inner: Arc::new(inner) } - } -} - -impl InMemoryState for CanonicalInMemoryState { - fn state_by_hash(&self, hash: B256) -> Option> { - self.inner.in_memory_state.state_by_hash(hash) - } - - fn state_by_number(&self, number: u64) -> Option> { - self.inner.in_memory_state.state_by_number(number) - } - - fn head_state(&self) -> Option> { - self.inner.in_memory_state.head_state() - } - - fn pending_state(&self) -> Option> { - self.inner.in_memory_state.pending_state() - } -} - -/// Tracks the state of the engine api internals. -/// -/// This type is shareable. -#[derive(Debug)] -pub struct EngineApiTreeState { - /// Tracks the state of the blockchain tree. - tree_state: TreeState, - /// Tracks the forkchoice state updates received by the CL. - forkchoice_state_tracker: ForkchoiceStateTracker, - /// Buffer of detached blocks. - buffer: BlockBuffer, - /// Tracks the header of invalid payloads that were rejected by the engine because they're - /// invalid. - invalid_headers: InvalidHeaderCache, -} - -impl EngineApiTreeState { - fn new(block_buffer_limit: u32, max_invalid_header_cache_length: u32) -> Self { - Self { - invalid_headers: InvalidHeaderCache::new(max_invalid_header_cache_length), - buffer: BlockBuffer::new(block_buffer_limit), - tree_state: TreeState::default(), - forkchoice_state_tracker: ForkchoiceStateTracker::default(), - } - } -} - -/// The type responsible for processing engine API requests. -/// -/// TODO: design: should the engine handler functions also accept the response channel or return the -/// result and the caller redirects the response -pub trait EngineApiTreeHandler { - /// The engine type that this handler is for. - type Engine: EngineTypes; - - /// Invoked when previously requested blocks were downloaded. - fn on_downloaded(&mut self, blocks: Vec) -> Option; - - /// When the Consensus layer receives a new block via the consensus gossip protocol, - /// the transactions in the block are sent to the execution layer in the form of a - /// [`ExecutionPayload`]. The Execution layer executes the transactions and validates the - /// state in the block header, then passes validation data back to Consensus layer, that - /// adds the block to the head of its own blockchain and attests to it. The block is then - /// broadcast over the consensus p2p network in the form of a "Beacon block". - /// - /// These responses should adhere to the [Engine API Spec for - /// `engine_newPayload`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#specification). - /// - /// This returns a [`PayloadStatus`] that represents the outcome of a processed new payload and - /// returns an error if an internal error occurred. - fn on_new_payload( - &mut self, - payload: ExecutionPayload, - cancun_fields: Option, - ) -> ProviderResult>; - - /// Invoked when we receive a new forkchoice update message. Calls into the blockchain tree - /// to resolve chain forks and ensure that the Execution Layer is working with the latest valid - /// chain. - /// - /// These responses should adhere to the [Engine API Spec for - /// `engine_forkchoiceUpdated`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#specification-1). - /// - /// Returns an error if an internal error occurred like a database error. - fn on_forkchoice_updated( - &mut self, - state: ForkchoiceState, - attrs: Option<::PayloadAttributes>, - ) -> ProviderResult>; -} - -/// The outcome of a tree operation. -#[derive(Debug)] -pub struct TreeOutcome { - /// The outcome of the operation. - pub outcome: T, - /// An optional event to tell the caller to do something. - pub event: Option, -} - -impl TreeOutcome { - /// Create new tree outcome. - pub const fn new(outcome: T) -> Self { - Self { outcome, event: None } - } - - /// Set event on the outcome. - pub fn with_event(mut self, event: TreeEvent) -> Self { - self.event = Some(event); - self - } -} - -/// Events that can be emitted by the [`EngineApiTreeHandler`]. -#[derive(Debug)] -pub enum TreeEvent { - /// Tree action is needed. - TreeAction(TreeAction), - /// Backfill action is needed. - BackfillAction(BackfillAction), - /// Block download is needed. - Download(DownloadRequest), -} - -/// The actions that can be performed on the tree. -#[derive(Debug)] -pub enum TreeAction { - /// Make target canonical. - MakeCanonical(B256), -} - -#[derive(Debug)] -pub struct EngineApiTreeHandlerImpl { - provider: P, - executor_provider: E, - consensus: Arc, - payload_validator: ExecutionPayloadValidator, - state: EngineApiTreeState, - incoming: Receiver>>, - outgoing: UnboundedSender, - persistence: PersistenceHandle, - persistence_state: PersistenceState, - /// (tmp) The flag indicating whether the pipeline is active. - is_pipeline_active: bool, - canonical_in_memory_state: CanonicalInMemoryState, - _marker: PhantomData, -} - -impl EngineApiTreeHandlerImpl -where - P: BlockReader + StateProviderFactory + Clone + 'static, - E: BlockExecutorProvider, - T: EngineTypes + 'static, -{ - #[allow(clippy::too_many_arguments)] - fn new( - provider: P, - executor_provider: E, - consensus: Arc, - payload_validator: ExecutionPayloadValidator, - incoming: Receiver>>, - outgoing: UnboundedSender, - state: EngineApiTreeState, - header: SealedHeader, - persistence: PersistenceHandle, - ) -> Self { - Self { - provider, - executor_provider, - consensus, - payload_validator, - incoming, - outgoing, - persistence, - persistence_state: PersistenceState::default(), - is_pipeline_active: false, - state, - canonical_in_memory_state: CanonicalInMemoryState::with_header(header), - _marker: PhantomData, - } - } - - #[allow(clippy::too_many_arguments)] - fn spawn_new( - provider: P, - executor_provider: E, - consensus: Arc, - payload_validator: ExecutionPayloadValidator, - incoming: Receiver>>, - state: EngineApiTreeState, - header: SealedHeader, - persistence: PersistenceHandle, - ) -> UnboundedSender { - let (outgoing, rx) = tokio::sync::mpsc::unbounded_channel(); - let task = Self::new( - provider, - executor_provider, - consensus, - payload_validator, - incoming, - outgoing.clone(), - state, - header, - persistence, - ); - std::thread::Builder::new().name("Tree Task".to_string()).spawn(|| task.run()).unwrap(); - outgoing - } - - fn run(mut self) { - while let Ok(msg) = self.incoming.recv() { - match msg { - FromEngine::Event(event) => match event { - FromOrchestrator::BackfillSyncFinished => { - todo!() - } - FromOrchestrator::BackfillSyncStarted => { - todo!() - } - }, - FromEngine::Request(request) => match request { - BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx } => { - let output = self.on_forkchoice_updated(state, payload_attrs); - if let Err(err) = tx.send(output.map(|o| o.outcome).map_err(Into::into)) { - error!("Failed to send event: {err:?}"); - } - } - BeaconEngineMessage::NewPayload { payload, cancun_fields, tx } => { - let output = self.on_new_payload(payload, cancun_fields); - if let Err(err) = tx.send(output.map(|o| o.outcome).map_err(|e| { - reth_beacon_consensus::BeaconOnNewPayloadError::Internal(Box::new(e)) - })) { - error!("Failed to send event: {err:?}"); - } - } - BeaconEngineMessage::TransitionConfigurationExchanged => { - todo!() - } - }, - FromEngine::DownloadedBlocks(blocks) => { - if let Some(event) = self.on_downloaded(blocks) { - if let Err(err) = self.outgoing.send(EngineApiEvent::FromTree(event)) { - error!("Failed to send event: {err:?}"); - } - } - } - } - - if self.should_persist() && !self.persistence_state.in_progress() { - let blocks_to_persist = self.get_blocks_to_persist(); - let (tx, rx) = oneshot::channel(); - self.persistence.save_blocks(blocks_to_persist, tx); - self.persistence_state.start(rx); - } - - if self.persistence_state.in_progress() { - let rx = self - .persistence_state - .rx - .as_mut() - .expect("if a persistence task is in progress Receiver must be Some"); - // Check if persistence has completed - if let Ok(last_persisted_block_hash) = rx.try_recv() { - if let Some(block) = - self.state.tree_state.block_by_hash(last_persisted_block_hash) - { - self.persistence_state.finish(last_persisted_block_hash, block.number); - self.remove_persisted_blocks_from_memory(); - } else { - error!("could not find persisted block with hash {last_persisted_block_hash} in memory"); - } - } - } - } - } - - /// Returns true if the canonical chain length minus the last persisted - /// block is greater than or equal to the persistence threshold. - fn should_persist(&self) -> bool { - self.state.tree_state.max_block_number() - - self.persistence_state.last_persisted_block_number >= - PERSISTENCE_THRESHOLD - } - - fn get_blocks_to_persist(&self) -> Vec { - let start = self.persistence_state.last_persisted_block_number; - let end = start + PERSISTENCE_THRESHOLD; - - // NOTE: this is an exclusive range, to try to include exactly PERSISTENCE_THRESHOLD blocks - self.state - .tree_state - .blocks_by_number - .range(start..end) - .flat_map(|(_, blocks)| blocks.iter().cloned()) - .collect() - } - - fn remove_persisted_blocks_from_memory(&mut self) { - let keys_to_remove: Vec = self - .state - .tree_state - .blocks_by_number - .range(..=self.persistence_state.last_persisted_block_number) - .map(|(&k, _)| k) - .collect(); - - for key in keys_to_remove { - if let Some(blocks) = self.state.tree_state.blocks_by_number.remove(&key) { - // Remove corresponding blocks from blocks_by_hash - for block in blocks { - self.state.tree_state.blocks_by_hash.remove(&block.block().hash()); - } - } - } - } - - /// Return block from database or in-memory state by hash. - fn block_by_hash(&self, hash: B256) -> ProviderResult> { - // check database first - let mut block = self.provider.block_by_hash(hash)?; - if block.is_none() { - // Note: it's fine to return the unsealed block because the caller already has - // the hash - block = self - .state - .tree_state - .block_by_hash(hash) - // TODO: clone for compatibility. should we return an Arc here? - .map(|block| block.as_ref().clone().unseal()); - } - Ok(block) - } - - /// Return state provider with reference to in-memory blocks that overlay database state. - fn state_provider( - &self, - hash: B256, - ) -> ProviderResult>> { - let mut in_memory = Vec::new(); - let mut parent_hash = hash; - while let Some(executed) = self.state.tree_state.blocks_by_hash.get(&parent_hash) { - parent_hash = executed.block.parent_hash; - in_memory.insert(0, executed.clone()); - } - - let historical = self.provider.state_by_block_hash(parent_hash)?; - Ok(MemoryOverlayStateProvider::new(in_memory, historical)) - } - - /// Return the parent hash of the lowest buffered ancestor for the requested block, if there - /// are any buffered ancestors. If there are no buffered ancestors, and the block itself does - /// not exist in the buffer, this returns the hash that is passed in. - /// - /// Returns the parent hash of the block itself if the block is buffered and has no other - /// buffered ancestors. - fn lowest_buffered_ancestor_or(&self, hash: B256) -> B256 { - self.state - .buffer - .lowest_ancestor(&hash) - .map(|block| block.parent_hash) - .unwrap_or_else(|| hash) - } - - /// If validation fails, the response MUST contain the latest valid hash: - /// - /// - The block hash of the ancestor of the invalid payload satisfying the following two - /// conditions: - /// - It is fully validated and deemed VALID - /// - Any other ancestor of the invalid payload with a higher blockNumber is INVALID - /// - 0x0000000000000000000000000000000000000000000000000000000000000000 if the above - /// conditions are satisfied by a `PoW` block. - /// - null if client software cannot determine the ancestor of the invalid payload satisfying - /// the above conditions. - fn latest_valid_hash_for_invalid_payload( - &mut self, - parent_hash: B256, - ) -> ProviderResult> { - // Check if parent exists in side chain or in canonical chain. - if self.block_by_hash(parent_hash)?.is_some() { - return Ok(Some(parent_hash)) - } - - // iterate over ancestors in the invalid cache - // until we encounter the first valid ancestor - let mut current_hash = parent_hash; - let mut current_header = self.state.invalid_headers.get(¤t_hash); - while let Some(header) = current_header { - current_hash = header.parent_hash; - current_header = self.state.invalid_headers.get(¤t_hash); - - // If current_header is None, then the current_hash does not have an invalid - // ancestor in the cache, check its presence in blockchain tree - if current_header.is_none() && self.block_by_hash(current_hash)?.is_some() { - return Ok(Some(current_hash)) - } - } - Ok(None) - } - - /// Prepares the invalid payload response for the given hash, checking the - /// database for the parent hash and populating the payload status with the latest valid hash - /// according to the engine api spec. - fn prepare_invalid_response(&mut self, mut parent_hash: B256) -> ProviderResult { - // Edge case: the `latestValid` field is the zero hash if the parent block is the terminal - // PoW block, which we need to identify by looking at the parent's block difficulty - if let Some(parent) = self.block_by_hash(parent_hash)? { - if !parent.is_zero_difficulty() { - parent_hash = B256::ZERO; - } - } - - let valid_parent_hash = self.latest_valid_hash_for_invalid_payload(parent_hash)?; - Ok(PayloadStatus::from_status(PayloadStatusEnum::Invalid { - validation_error: PayloadValidationError::LinksToRejectedPayload.to_string(), - }) - .with_latest_valid_hash(valid_parent_hash.unwrap_or_default())) - } - - /// Checks if the given `check` hash points to an invalid header, inserting the given `head` - /// block into the invalid header cache if the `check` hash has a known invalid ancestor. - /// - /// Returns a payload status response according to the engine API spec if the block is known to - /// be invalid. - fn check_invalid_ancestor_with_head( - &mut self, - check: B256, - head: B256, - ) -> ProviderResult> { - // check if the check hash was previously marked as invalid - let Some(header) = self.state.invalid_headers.get(&check) else { return Ok(None) }; - - // populate the latest valid hash field - let status = self.prepare_invalid_response(header.parent_hash)?; - - // insert the head block into the invalid header cache - self.state.invalid_headers.insert_with_invalid_ancestor(head, header); - - Ok(Some(status)) - } - - /// Checks if the given `head` points to an invalid header, which requires a specific response - /// to a forkchoice update. - fn check_invalid_ancestor(&mut self, head: B256) -> ProviderResult> { - // check if the head was previously marked as invalid - let Some(header) = self.state.invalid_headers.get(&head) else { return Ok(None) }; - // populate the latest valid hash field - Ok(Some(self.prepare_invalid_response(header.parent_hash)?)) - } - - /// Validate if block is correct and satisfies all the consensus rules that concern the header - /// and block body itself. - fn validate_block(&self, block: &SealedBlockWithSenders) -> Result<(), ConsensusError> { - if let Err(e) = self.consensus.validate_header_with_total_difficulty(block, U256::MAX) { - error!( - ?block, - "Failed to validate total difficulty for block {}: {e}", - block.header.hash() - ); - return Err(e) - } - - if let Err(e) = self.consensus.validate_header(block) { - error!(?block, "Failed to validate header {}: {e}", block.header.hash()); - return Err(e) - } - - if let Err(e) = self.consensus.validate_block_pre_execution(block) { - error!(?block, "Failed to validate block {}: {e}", block.header.hash()); - return Err(e) - } - - Ok(()) - } - - fn buffer_block_without_senders(&mut self, block: SealedBlock) -> Result<(), InsertBlockError> { - match block.try_seal_with_senders() { - Ok(block) => self.buffer_block(block), - Err(block) => Err(InsertBlockError::sender_recovery_error(block)), - } - } - - fn buffer_block(&mut self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { - if let Err(err) = self.validate_block(&block) { - return Err(InsertBlockError::consensus_error(err, block.block)) - } - self.state.buffer.insert_block(block); - Ok(()) - } - - fn insert_block_without_senders( - &mut self, - block: SealedBlock, - ) -> Result { - match block.try_seal_with_senders() { - Ok(block) => self.insert_block(block), - Err(block) => Err(InsertBlockError::sender_recovery_error(block)), - } - } - - fn insert_block( - &mut self, - block: SealedBlockWithSenders, - ) -> Result { - self.insert_block_inner(block.clone()) - .map_err(|kind| InsertBlockError::new(block.block, kind)) - } - - fn insert_block_inner( - &mut self, - block: SealedBlockWithSenders, - ) -> Result { - if self.block_by_hash(block.hash())?.is_some() { - let attachment = BlockAttachment::Canonical; // TODO: remove or revise attachment - return Ok(InsertPayloadOk::AlreadySeen(BlockStatus::Valid(attachment))) - } - - // validate block consensus rules - self.validate_block(&block)?; - - let state_provider = self.state_provider(block.parent_hash).unwrap(); - let executor = self.executor_provider.executor(StateProviderDatabase::new(&state_provider)); - - let block_number = block.number; - let block_hash = block.hash(); - let block = block.unseal(); - let output = executor.execute((&block, U256::MAX).into()).unwrap(); - self.consensus.validate_block_post_execution( - &block, - PostExecutionInput::new(&output.receipts, &output.requests), - )?; - - // TODO: change StateRootProvider API to accept hashed post state - let hashed_state = HashedPostState::from_bundle_state(&output.state.state); - - let (state_root, trie_output) = state_provider.state_root_with_updates(&output.state)?; - if state_root != block.state_root { - return Err(ConsensusError::BodyStateRootDiff( - GotExpected { got: state_root, expected: block.state_root }.into(), - ) - .into()) - } - - let executed = ExecutedBlock { - block: Arc::new(block.block.seal(block_hash)), - senders: Arc::new(block.senders), - execution_output: Arc::new(ExecutionOutcome::new( - output.state, - Receipts::from(output.receipts), - block_number, - vec![Requests::from(output.requests)], - )), - hashed_state: Arc::new(hashed_state), - trie: Arc::new(trie_output), - }; - self.state.tree_state.insert_executed(executed); - - let attachment = BlockAttachment::Canonical; // TODO: remove or revise attachment - Ok(InsertPayloadOk::Inserted(BlockStatus::Valid(attachment))) - } - - /// Pre-validate forkchoice update and check whether it can be processed. - /// - /// This method returns the update outcome if validation fails or - /// the node is syncing and the update cannot be processed at the moment. - fn pre_validate_forkchoice_update( - &mut self, - state: ForkchoiceState, - ) -> ProviderResult> { - if state.head_block_hash.is_zero() { - return Ok(Some(OnForkChoiceUpdated::invalid_state())) - } - - // check if the new head hash is connected to any ancestor that we previously marked as - // invalid - let lowest_buffered_ancestor_fcu = self.lowest_buffered_ancestor_or(state.head_block_hash); - if let Some(status) = self.check_invalid_ancestor(lowest_buffered_ancestor_fcu)? { - return Ok(Some(OnForkChoiceUpdated::with_invalid(status))) - } - - if self.is_pipeline_active { - // We can only process new forkchoice updates if the pipeline is idle, since it requires - // exclusive access to the database - trace!(target: "consensus::engine", "Pipeline is syncing, skipping forkchoice update"); - return Ok(Some(OnForkChoiceUpdated::syncing())) - } - - Ok(None) - } -} - -impl EngineApiTreeHandler for EngineApiTreeHandlerImpl -where - P: BlockReader + StateProviderFactory + Clone + 'static, - E: BlockExecutorProvider, - T: EngineTypes + 'static, -{ - type Engine = T; - - fn on_downloaded(&mut self, _blocks: Vec) -> Option { - debug!("not implemented"); - None - } - - fn on_new_payload( - &mut self, - payload: ExecutionPayload, - cancun_fields: Option, - ) -> ProviderResult> { - // Ensures that the given payload does not violate any consensus rules that concern the - // block's layout, like: - // - missing or invalid base fee - // - invalid extra data - // - invalid transactions - // - incorrect hash - // - the versioned hashes passed with the payload do not exactly match transaction - // versioned hashes - // - the block does not contain blob transactions if it is pre-cancun - // - // This validates the following engine API rule: - // - // 3. Given the expected array of blob versioned hashes client software **MUST** run its - // validation by taking the following steps: - // - // 1. Obtain the actual array by concatenating blob versioned hashes lists - // (`tx.blob_versioned_hashes`) of each [blob - // transaction](https://eips.ethereum.org/EIPS/eip-4844#new-transaction-type) included - // in the payload, respecting the order of inclusion. If the payload has no blob - // transactions the expected array **MUST** be `[]`. - // - // 2. Return `{status: INVALID, latestValidHash: null, validationError: errorMessage | - // null}` if the expected and the actual arrays don't match. - // - // This validation **MUST** be instantly run in all cases even during active sync process. - let parent_hash = payload.parent_hash(); - let block = match self - .payload_validator - .ensure_well_formed_payload(payload, cancun_fields.into()) - { - Ok(block) => block, - Err(error) => { - error!(target: "engine::tree", %error, "Invalid payload"); - // we need to convert the error to a payload status (response to the CL) - - let latest_valid_hash = - if error.is_block_hash_mismatch() || error.is_invalid_versioned_hashes() { - // Engine-API rules: - // > `latestValidHash: null` if the blockHash validation has failed () - // > `latestValidHash: null` if the expected and the actual arrays don't match () - None - } else { - self.latest_valid_hash_for_invalid_payload(parent_hash)? - }; - - let status = PayloadStatusEnum::from(error); - return Ok(TreeOutcome::new(PayloadStatus::new(status, latest_valid_hash))) - } - }; - - let block_hash = block.hash(); - let mut lowest_buffered_ancestor = self.lowest_buffered_ancestor_or(block_hash); - if lowest_buffered_ancestor == block_hash { - lowest_buffered_ancestor = block.parent_hash; - } - - // now check the block itself - if let Some(status) = - self.check_invalid_ancestor_with_head(lowest_buffered_ancestor, block_hash)? - { - return Ok(TreeOutcome::new(status)) - } - - let status = if self.is_pipeline_active { - self.buffer_block_without_senders(block).unwrap(); - PayloadStatus::from_status(PayloadStatusEnum::Syncing) - } else { - let mut latest_valid_hash = None; - let status = match self.insert_block_without_senders(block).unwrap() { - InsertPayloadOk::Inserted(BlockStatus::Valid(_)) | - InsertPayloadOk::AlreadySeen(BlockStatus::Valid(_)) => { - latest_valid_hash = Some(block_hash); - PayloadStatusEnum::Valid - } - InsertPayloadOk::Inserted(BlockStatus::Disconnected { .. }) | - InsertPayloadOk::AlreadySeen(BlockStatus::Disconnected { .. }) => { - // TODO: isn't this check redundant? - // check if the block's parent is already marked as invalid - // if let Some(status) = self - // .check_invalid_ancestor_with_head(block.parent_hash, block.hash()) - // .map_err(|error| { - // InsertBlockError::new(block, InsertBlockErrorKind::Provider(error)) - // })? - // { - // return Ok(status) - // } - - // not known to be invalid, but we don't know anything else - PayloadStatusEnum::Syncing - } - }; - PayloadStatus::new(status, latest_valid_hash) - }; - - let mut outcome = TreeOutcome::new(status); - if outcome.outcome.is_valid() { - if let Some(target) = self.state.forkchoice_state_tracker.sync_target_state() { - if target.head_block_hash == block_hash { - outcome = outcome - .with_event(TreeEvent::TreeAction(TreeAction::MakeCanonical(block_hash))); - } - } - } - Ok(outcome) - } - - fn on_forkchoice_updated( - &mut self, - state: ForkchoiceState, - attrs: Option<::PayloadAttributes>, - ) -> ProviderResult> { - if let Some(on_updated) = self.pre_validate_forkchoice_update(state)? { - self.state.forkchoice_state_tracker.set_latest(state, on_updated.forkchoice_status()); - return Ok(TreeOutcome::new(on_updated)) - } - - todo!() - } -} - -/// The state of the persistence task. -#[derive(Default, Debug)] -struct PersistenceState { - /// Hash of the last block persisted. - last_persisted_block_hash: B256, - /// Receiver end of channel where the result of the persistence task will be - /// sent when done. A None value means there's no persistence task in progress. - rx: Option>, - /// The last persisted block number. - last_persisted_block_number: u64, -} - -impl PersistenceState { - /// Determines if there is a persistence task in progress by checking if the - /// receiver is set. - const fn in_progress(&self) -> bool { - self.rx.is_some() - } - - /// Sets state for a started persistence task. - fn start(&mut self, rx: oneshot::Receiver) { - self.rx = Some(rx); - } - - /// Sets state for a finished persistence task. - fn finish(&mut self, last_persisted_block_hash: B256, last_persisted_block_number: u64) { - self.rx = None; - self.last_persisted_block_number = last_persisted_block_number; - self.last_persisted_block_hash = last_persisted_block_hash; - } -} - -/// Represents the tree state kept in memory. -trait InMemoryState: Send + Sync { - /// Returns the state for a given block hash. - fn state_by_hash(&self, hash: B256) -> Option>; - /// Returns the state for a given block number. - fn state_by_number(&self, number: u64) -> Option>; - /// Returns the current chain head state. - fn head_state(&self) -> Option>; - /// Returns the pending state corresponding to the current head plus one, - /// from the payload received in newPayload that does not have a FCU yet. - fn pending_state(&self) -> Option>; -} - -/// State after applying the given block. -#[derive(Debug, PartialEq, Eq, Clone)] -pub struct State(ExecutedBlock); - -impl State { - const fn new(executed_block: ExecutedBlock) -> Self { - Self(executed_block) - } - - fn block(&self) -> ExecutedBlock { - self.0.clone() - } - - fn hash(&self) -> B256 { - self.0.block().hash() - } - - fn number(&self) -> u64 { - self.0.block().number - } - - fn state_root(&self) -> B256 { - self.0.block().header.state_root - } - - fn receipts(&self) -> &Receipts { - &self.0.execution_outcome().receipts - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - static_files::StaticFileAction, - test_utils::{ - get_executed_block_with_number, get_executed_block_with_receipts, get_executed_blocks, - }, - }; - use rand::Rng; - use reth_beacon_consensus::EthBeaconConsensus; - use reth_chainspec::{ChainSpecBuilder, MAINNET}; - use reth_ethereum_engine_primitives::EthEngineTypes; - use reth_evm::test_utils::MockExecutorProvider; - use reth_primitives::Receipt; - use reth_provider::test_utils::MockEthProvider; - use std::sync::mpsc::{channel, Sender}; - use tokio::sync::mpsc::unbounded_channel; - - struct TestHarness { - tree: EngineApiTreeHandlerImpl, - to_tree_tx: Sender>>, - blocks: Vec, - sf_action_rx: Receiver, - } - - fn get_default_test_harness(number_of_blocks: u64) -> TestHarness { - let blocks: Vec<_> = get_executed_blocks(0..number_of_blocks).collect(); - - let mut blocks_by_hash = HashMap::new(); - let mut blocks_by_number = BTreeMap::new(); - let mut state_by_hash = HashMap::new(); - let mut hash_by_number = HashMap::new(); - for block in &blocks { - let sealed_block = block.block(); - let hash = sealed_block.hash(); - let number = sealed_block.number; - blocks_by_hash.insert(hash, block.clone()); - blocks_by_number.entry(number).or_insert_with(Vec::new).push(block.clone()); - state_by_hash.insert(hash, Arc::new(State(block.clone()))); - hash_by_number.insert(number, hash); - } - let tree_state = TreeState { blocks_by_hash, blocks_by_number, ..Default::default() }; - - let (action_tx, action_rx) = channel(); - let (sf_action_tx, sf_action_rx) = channel(); - let persistence_handle = PersistenceHandle::new(action_tx, sf_action_tx); - - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - let consensus = Arc::new(EthBeaconConsensus::new(chain_spec.clone())); - - let provider = MockEthProvider::default(); - let executor_factory = MockExecutorProvider::default(); - executor_factory.extend(vec![ExecutionOutcome::default()]); - - let payload_validator = ExecutionPayloadValidator::new(chain_spec); - - let (to_tree_tx, to_tree_rx) = channel(); - let (from_tree_tx, from_tree_rx) = unbounded_channel(); - - let engine_api_tree_state = EngineApiTreeState { - invalid_headers: InvalidHeaderCache::new(10), - buffer: BlockBuffer::new(10), - tree_state, - forkchoice_state_tracker: ForkchoiceStateTracker::default(), - }; - - let header = blocks.first().unwrap().block().header.clone(); - let mut tree = EngineApiTreeHandlerImpl::new( - provider, - executor_factory, - consensus, - payload_validator, - to_tree_rx, - from_tree_tx, - engine_api_tree_state, - header, - persistence_handle, - ); - let last_executed_block = blocks.last().unwrap().clone(); - let pending = Some(State::new(last_executed_block)); - tree.canonical_in_memory_state = - CanonicalInMemoryState::new(state_by_hash, hash_by_number, pending); - - TestHarness { tree, to_tree_tx, blocks, sf_action_rx } - } - - fn create_mock_state(block_number: u64) -> State { - State::new(get_executed_block_with_number(block_number)) - } - - #[tokio::test] - async fn test_tree_persist_blocks() { - // we need more than PERSISTENCE_THRESHOLD blocks to trigger the - // persistence task. - let TestHarness { tree, to_tree_tx, sf_action_rx, mut blocks } = - get_default_test_harness(PERSISTENCE_THRESHOLD + 1); - std::thread::Builder::new().name("Tree Task".to_string()).spawn(|| tree.run()).unwrap(); - - // send a message to the tree to enter the main loop. - to_tree_tx.send(FromEngine::DownloadedBlocks(vec![])).unwrap(); - - let received_action = sf_action_rx.recv().expect("Failed to receive saved blocks"); - if let StaticFileAction::WriteExecutionData((saved_blocks, _)) = received_action { - // only PERSISTENCE_THRESHOLD will be persisted - blocks.pop(); - assert_eq!(saved_blocks.len() as u64, PERSISTENCE_THRESHOLD); - assert_eq!(saved_blocks, blocks); - } else { - panic!("unexpected action received {received_action:?}"); - } - } - - #[tokio::test] - async fn test_in_memory_state_trait_impl() { - let TestHarness { tree, to_tree_tx, sf_action_rx, blocks } = get_default_test_harness(10); - - let head_block = blocks.last().unwrap().block(); - let first_block = blocks.first().unwrap().block(); - - for executed_block in blocks { - let sealed_block = executed_block.block(); - - let expected_state = State::new(executed_block.clone()); - - let actual_state_by_hash = tree - .canonical_in_memory_state - .inner - .in_memory_state - .state_by_hash(sealed_block.hash()) - .unwrap(); - assert_eq!(expected_state, *actual_state_by_hash); - - let actual_state_by_number = tree - .canonical_in_memory_state - .inner - .in_memory_state - .state_by_number(sealed_block.number) - .unwrap(); - assert_eq!(expected_state, *actual_state_by_number); - } - } - - #[tokio::test] - async fn test_in_memory_state_impl_state_by_hash() { - let mut state_by_hash = HashMap::new(); - let number = rand::thread_rng().gen::(); - let state = Arc::new(create_mock_state(number)); - state_by_hash.insert(state.hash(), state.clone()); - - let in_memory_state = InMemoryStateImpl::new(state_by_hash, HashMap::new(), None); - - assert_eq!(in_memory_state.state_by_hash(state.hash()), Some(state)); - assert_eq!(in_memory_state.state_by_hash(B256::random()), None); - } - - #[tokio::test] - async fn test_in_memory_state_impl_state_by_number() { - let mut state_by_hash = HashMap::new(); - let mut hash_by_number = HashMap::new(); - - let number = rand::thread_rng().gen::(); - let state = Arc::new(create_mock_state(number)); - let hash = state.hash(); - - state_by_hash.insert(hash, state.clone()); - hash_by_number.insert(number, hash); - - let in_memory_state = InMemoryStateImpl::new(state_by_hash, hash_by_number, None); - - assert_eq!(in_memory_state.state_by_number(number), Some(state)); - assert_eq!(in_memory_state.state_by_number(number + 1), None); - } - - #[tokio::test] - async fn test_in_memory_state_impl_head_state() { - let mut state_by_hash = HashMap::new(); - let mut hash_by_number = HashMap::new(); - let state1 = Arc::new(create_mock_state(1)); - let state2 = Arc::new(create_mock_state(2)); - let hash1 = state1.hash(); - let hash2 = state2.hash(); - hash_by_number.insert(1, hash1); - hash_by_number.insert(2, hash2); - state_by_hash.insert(hash1, state1); - state_by_hash.insert(hash2, state2); - - let in_memory_state = InMemoryStateImpl::new(state_by_hash, hash_by_number, None); - let head_state = in_memory_state.head_state().unwrap(); - - assert_eq!(head_state.hash(), hash2); - assert_eq!(head_state.number(), 2); - } - - #[tokio::test] - async fn test_in_memory_state_impl_pending_state() { - let pending_number = rand::thread_rng().gen::(); - let pending_state = create_mock_state(pending_number); - let pending_hash = pending_state.hash(); - - let in_memory_state = - InMemoryStateImpl::new(HashMap::new(), HashMap::new(), Some(pending_state)); - - let result = in_memory_state.pending_state(); - assert!(result.is_some()); - let actual_pending_state = result.unwrap(); - assert_eq!(actual_pending_state.0.block().hash(), pending_hash); - assert_eq!(actual_pending_state.0.block().number, pending_number); - } - - #[tokio::test] - async fn test_in_memory_state_impl_no_pending_state() { - let in_memory_state = InMemoryStateImpl::new(HashMap::new(), HashMap::new(), None); - - assert_eq!(in_memory_state.pending_state(), None); - } - - #[tokio::test] - async fn test_state_new() { - let number = rand::thread_rng().gen::(); - let block = get_executed_block_with_number(number); - - let state = State::new(block.clone()); - - assert_eq!(state.0, block); - } - - #[tokio::test] - async fn test_state_block() { - let number = rand::thread_rng().gen::(); - let block = get_executed_block_with_number(number); - - let state = State::new(block.clone()); - - assert_eq!(state.block(), block); - } - - #[tokio::test] - async fn test_state_hash() { - let number = rand::thread_rng().gen::(); - let block = get_executed_block_with_number(number); - - let state = State::new(block.clone()); - - assert_eq!(state.hash(), block.block().hash()); - } - - #[tokio::test] - async fn test_state_number() { - let number = rand::thread_rng().gen::(); - let block = get_executed_block_with_number(number); - - let state = State::new(block); - - assert_eq!(state.number(), number); - } - - #[tokio::test] - async fn test_state_state_root() { - let number = rand::thread_rng().gen::(); - let block = get_executed_block_with_number(number); - - let state = State::new(block.clone()); - - assert_eq!(state.state_root(), block.block().state_root); - } - - #[tokio::test] - async fn test_state_receipts() { - let receipts = Receipts { receipt_vec: vec![vec![Some(Receipt::default())]] }; - - let block = get_executed_block_with_receipts(receipts.clone()); - - let state = State::new(block); - - assert_eq!(state.receipts(), &receipts); - } -} diff --git a/crates/engine/tree/test-data/holesky/1.rlp b/crates/engine/tree/test-data/holesky/1.rlp new file mode 100644 index 000000000000..454e6b04ca2c --- /dev/null +++ b/crates/engine/tree/test-data/holesky/1.rlp @@ -0,0 +1 @@ +f90218f90213a0b5f7f912443c940f21fd611f12828d75b534364ed9e95ca4e307729a4661bde4a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a069d8c9d72f6fa4ad42d4702b433707212f90db395eb54dc20bc85de253788783a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800184017dd79d808465156ad899d883010d02846765746888676f312e32312e31856c696e7578a0b5f7f912443c940f21fd611f12828d75b534364ed9e95ca4e307729a4661bde488000000000000000084342770c0c0c0 \ No newline at end of file diff --git a/crates/engine/tree/test-data/holesky/2.rlp b/crates/engine/tree/test-data/holesky/2.rlp new file mode 100644 index 000000000000..1b7d04893c0c --- /dev/null +++ b/crates/engine/tree/test-data/holesky/2.rlp @@ -0,0 +1 @@ +f90218f90213a0e9011e6d15a0d0c16f65a38f84375bf1a6b88201b0ad75a2660df0bb8d1ac381a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794c6e2459991bfe27cca6d86722f35da23a1e4cb97a069d8c9d72f6fa4ad42d4702b433707212f90db395eb54dc20bc85de253788783a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800284017e3711808465156af099d883010d02846765746888676f312e32312e31856c696e7578a0b212724aac0df10d75b1b6d795fd4cd17e4ca4f9ee1bfe97871a16a3af64eed1880000000000000000842da282a8c0c0 \ No newline at end of file diff --git a/crates/ethereum-forks/Cargo.toml b/crates/ethereum-forks/Cargo.toml index a1d25b5713ec..c0c0f83fe756 100644 --- a/crates/ethereum-forks/Cargo.toml +++ b/crates/ethereum-forks/Cargo.toml @@ -36,7 +36,6 @@ auto_impl.workspace = true [dev-dependencies] arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true -proptest-derive.workspace = true [features] default = ["std", "serde", "rustc-hash"] diff --git a/crates/ethereum-forks/src/hardfork/dev.rs b/crates/ethereum-forks/src/hardfork/dev.rs index 1abc7e75ecb1..6ba54a421770 100644 --- a/crates/ethereum-forks/src/hardfork/dev.rs +++ b/crates/ethereum-forks/src/hardfork/dev.rs @@ -31,5 +31,7 @@ pub static DEV_HARDFORKS: Lazy = Lazy::new(|| { (crate::OptimismHardfork::Bedrock.boxed(), ForkCondition::Block(0)), #[cfg(feature = "optimism")] (crate::OptimismHardfork::Ecotone.boxed(), ForkCondition::Timestamp(0)), + #[cfg(feature = "optimism")] + (crate::OptimismHardfork::Canyon.boxed(), ForkCondition::Timestamp(0)), ]) }); diff --git a/crates/ethereum/engine/Cargo.toml b/crates/ethereum/engine/Cargo.toml index 492eb16bb54d..732d1f40bd84 100644 --- a/crates/ethereum/engine/Cargo.toml +++ b/crates/ethereum/engine/Cargo.toml @@ -17,18 +17,30 @@ reth-chainspec.workspace = true reth-db-api.workspace = true reth-engine-tree.workspace = true reth-ethereum-engine-primitives.workspace = true +reth-evm-ethereum.workspace = true reth-network-p2p.workspace = true +reth-payload-validator.workspace = true +reth-provider.workspace = true +reth-prune.workspace = true reth-stages-api.workspace = true reth-tasks.workspace = true +reth-payload-builder.workspace = true # async futures.workspace = true pin-project.workspace = true -tokio = { workspace = true, features = ["sync"] } tokio-stream.workspace = true # misc thiserror.workspace = true [dev-dependencies] +reth-blockchain-tree.workspace = true +reth-consensus.workspace = true reth-engine-tree = { workspace = true, features = ["test-utils"] } +reth-evm.workspace = true +reth-exex-types.workspace = true +reth-primitives.workspace = true +reth-prune-types.workspace = true + +tokio = { workspace = true, features = ["sync"] } \ No newline at end of file diff --git a/crates/ethereum/engine/src/service.rs b/crates/ethereum/engine/src/service.rs index bb7e8b06bb9a..b4f63aa7254a 100644 --- a/crates/ethereum/engine/src/service.rs +++ b/crates/ethereum/engine/src/service.rs @@ -1,25 +1,33 @@ -use futures::{ready, StreamExt}; +use futures::{Stream, StreamExt}; use pin_project::pin_project; -use reth_beacon_consensus::{BeaconEngineMessage, EthBeaconConsensus}; +use reth_beacon_consensus::{BeaconConsensusEngineEvent, BeaconEngineMessage, EthBeaconConsensus}; use reth_chainspec::ChainSpec; use reth_db_api::database::Database; use reth_engine_tree::{ backfill::PipelineSync, - chain::ChainOrchestrator, download::BasicBlockDownloader, - engine::{EngineApiEvent, EngineApiRequestHandler, EngineHandler, FromEngine}, + engine::{EngineApiRequestHandler, EngineHandler}, + persistence::PersistenceHandle, + tree::EngineApiTreeHandlerImpl, +}; +pub use reth_engine_tree::{ + chain::{ChainEvent, ChainOrchestrator}, + engine::EngineApiEvent, }; use reth_ethereum_engine_primitives::EthEngineTypes; +use reth_evm_ethereum::execute::EthExecutorProvider; use reth_network_p2p::{bodies::client::BodiesClient, headers::client::HeadersClient}; +use reth_payload_builder::PayloadBuilderHandle; +use reth_payload_validator::ExecutionPayloadValidator; +use reth_provider::{providers::BlockchainProvider2, ProviderFactory}; +use reth_prune::Pruner; use reth_stages_api::Pipeline; use reth_tasks::TaskSpawner; use std::{ - future::Future, pin::Pin, - sync::{mpsc::Sender, Arc}, + sync::{mpsc::channel, Arc}, task::{Context, Poll}, }; -use tokio::sync::mpsc::UnboundedReceiver; use tokio_stream::wrappers::UnboundedReceiverStream; /// Alias for Ethereum chain orchestrator. @@ -49,43 +57,64 @@ where Client: HeadersClient + BodiesClient + Clone + Unpin + 'static, { /// Constructor for `EthService`. + #[allow(clippy::too_many_arguments)] pub fn new( chain_spec: Arc, client: Client, - to_tree: Sender>>, - from_tree: UnboundedReceiver, incoming_requests: UnboundedReceiverStream>, pipeline: Pipeline, pipeline_task_spawner: Box, + provider: ProviderFactory, + blockchain_db: BlockchainProvider2, + pruner: Pruner>, + payload_builder: PayloadBuilderHandle, ) -> Self { - let consensus = Arc::new(EthBeaconConsensus::new(chain_spec)); - let downloader = BasicBlockDownloader::new(client, consensus); + let consensus = Arc::new(EthBeaconConsensus::new(chain_spec.clone())); + let downloader = BasicBlockDownloader::new(client, consensus.clone()); + + let (to_tree_tx, to_tree_rx) = channel(); + + let persistence_handle = PersistenceHandle::spawn_service(provider, pruner); + let payload_validator = ExecutionPayloadValidator::new(chain_spec.clone()); + let executor_factory = EthExecutorProvider::ethereum(chain_spec); + + let canonical_in_memory_state = blockchain_db.canonical_in_memory_state(); + + let from_tree = EngineApiTreeHandlerImpl::spawn_new( + blockchain_db, + executor_factory, + consensus, + payload_validator, + to_tree_rx, + persistence_handle, + payload_builder, + canonical_in_memory_state, + ); - let engine_handler = EngineApiRequestHandler::new(to_tree, from_tree); + let engine_handler = EngineApiRequestHandler::new(to_tree_tx, from_tree); let handler = EngineHandler::new(engine_handler, downloader, incoming_requests); let backfill_sync = PipelineSync::new(pipeline, pipeline_task_spawner); Self { orchestrator: ChainOrchestrator::new(handler, backfill_sync) } } + + /// Returns a mutable reference to the orchestrator. + pub fn orchestrator_mut(&mut self) -> &mut EthServiceType { + &mut self.orchestrator + } } -impl Future for EthService +impl Stream for EthService where DB: Database + 'static, Client: HeadersClient + BodiesClient + Clone + Unpin + 'static, { - type Output = Result<(), EthServiceError>; + type Item = ChainEvent; - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - // Call poll on the inner orchestrator. + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut orchestrator = self.project().orchestrator; - loop { - match ready!(StreamExt::poll_next_unpin(&mut orchestrator, cx)) { - Some(_event) => continue, - None => return Poll::Ready(Ok(())), - } - } + StreamExt::poll_next_unpin(&mut orchestrator, cx) } } @@ -100,10 +129,13 @@ mod tests { use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_engine_tree::test_utils::TestPipelineBuilder; use reth_ethereum_engine_primitives::EthEngineTypes; + use reth_exex_types::FinishedExExHeight; use reth_network_p2p::test_utils::TestFullBlockClient; + use reth_primitives::SealedHeader; + use reth_provider::test_utils::create_test_provider_factory_with_chain_spec; use reth_tasks::TokioTaskExecutor; - use std::sync::{mpsc::channel, Arc}; - use tokio::sync::mpsc::unbounded_channel; + use std::sync::Arc; + use tokio::sync::{mpsc::unbounded_channel, watch}; #[test] fn eth_chain_orchestrator_build() { @@ -122,18 +154,26 @@ mod tests { let pipeline = TestPipelineBuilder::new().build(chain_spec.clone()); let pipeline_task_spawner = Box::::default(); + let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); + + let blockchain_db = + BlockchainProvider2::with_latest(provider_factory.clone(), SealedHeader::default()); - let (to_tree_tx, _to_tree_rx) = channel(); - let (_from_tree_tx, from_tree_rx) = unbounded_channel(); + let (_tx, rx) = watch::channel(FinishedExExHeight::NoExExs); + let pruner = + Pruner::<_, ProviderFactory<_>>::new(provider_factory.clone(), vec![], 0, 0, None, rx); - let _eth_chain_orchestrator = EthService::new( + let (tx, _rx) = unbounded_channel(); + let _eth_service = EthService::new( chain_spec, client, - to_tree_tx, - from_tree_rx, incoming_requests, pipeline, pipeline_task_spawner, + provider_factory, + blockchain_db, + pruner, + PayloadBuilderHandle::new(tx), ); } } diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index cfee186c6334..ee77ee0db4e9 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -4,6 +4,7 @@ use crate::{ dao_fork::{DAO_HARDFORK_BENEFICIARY, DAO_HARDKFORK_ACCOUNTS}, EthEvmConfig, }; +use core::fmt::Display; use reth_chainspec::{ChainSpec, EthereumHardforks, MAINNET}; use reth_ethereum_consensus::validate_block_post_execution; use reth_evm::{ @@ -33,8 +34,11 @@ use revm_primitives::{ BlockEnv, CfgEnvWithHandlerCfg, EVMError, EnvWithHandlerCfg, ResultAndState, }; +#[cfg(not(feature = "std"))] +use alloc::{boxed::Box, sync::Arc, vec, vec::Vec}; #[cfg(feature = "std")] -use std::{fmt::Display, sync::Arc, vec, vec::Vec}; +use std::sync::Arc; + /// Provides executors to execute regular ethereum blocks #[derive(Debug, Clone)] pub struct EthExecutorProvider { @@ -145,7 +149,7 @@ where ) -> Result where DB: Database, - DB::Error: Into + std::fmt::Display, + DB::Error: Into + Display, { // apply pre execution changes apply_beacon_root_contract_call( @@ -363,7 +367,7 @@ where impl Executor for EthBlockExecutor where EvmConfig: ConfigureEvm, - DB: Database + std::fmt::Display>, + DB: Database + Display>, { type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; type Output = BlockExecutionOutput; diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index cd8398ebe963..fdb121ef8496 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -18,6 +18,9 @@ use reth_primitives::{transaction::FillTxEnv, Address, Header, TransactionSigned use reth_revm::{Database, EvmBuilder}; use revm_primitives::{AnalysisKind, Bytes, CfgEnvWithHandlerCfg, Env, TxEnv, TxKind}; +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; + mod config; pub use config::{revm_spec, revm_spec_by_timestamp_after_merge}; @@ -107,10 +110,10 @@ impl ConfigureEvmEnv for EthEvmConfig { impl ConfigureEvm for EthEvmConfig { type DefaultExternalContext<'a> = (); - fn evm<'a, DB: Database + 'a>( + fn evm( &self, db: DB, - ) -> reth_revm::Evm<'a, Self::DefaultExternalContext<'a>, DB> { + ) -> reth_revm::Evm<'_, Self::DefaultExternalContext<'_>, DB> { EvmBuilder::default().with_db(db).build() } } diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index 2cce8650d153..f22490859a95 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -35,6 +35,7 @@ reth-tokio-util.workspace = true reth-node-events.workspace = true reth-node-core.workspace = true reth-exex.workspace = true +reth-blockchain-tree.workspace = true # misc eyre.workspace = true diff --git a/crates/ethereum/node/src/launch.rs b/crates/ethereum/node/src/launch.rs index eb699cea2d58..898b376025fb 100644 --- a/crates/ethereum/node/src/launch.rs +++ b/crates/ethereum/node/src/launch.rs @@ -2,13 +2,14 @@ use futures::{future::Either, stream, stream_select, StreamExt}; use reth_beacon_consensus::{ - hooks::{EngineHooks, PruneHook, StaticFileHook}, + hooks::{EngineHooks, StaticFileHook}, BeaconConsensusEngineHandle, }; -use reth_ethereum_engine::service::EthService; +use reth_blockchain_tree::BlockchainTreeConfig; +use reth_ethereum_engine::service::{ChainEvent, EthService}; use reth_ethereum_engine_primitives::EthEngineTypes; use reth_exex::ExExManagerHandle; -use reth_network::NetworkEvents; +use reth_network::{NetworkEvents, NetworkSyncUpdater, SyncState}; use reth_node_api::{FullNodeTypes, NodeAddOns}; use reth_node_builder::{ hooks::NodeHooks, @@ -19,17 +20,17 @@ use reth_node_builder::{ use reth_node_core::{ dirs::{ChainPath, DataDirPath}, exit::NodeExitFuture, + primitives::Head, rpc::eth::{helpers::AddDevSigners, FullEthApiServer}, version::{CARGO_PKG_VERSION, CLIENT_CODE, NAME_CLIENT, VERGEN_GIT_SHA}, }; use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; -use reth_provider::providers::BlockchainProvider; +use reth_provider::providers::BlockchainProvider2; use reth_rpc_engine_api::{capabilities::EngineCapabilities, EngineApi}; use reth_rpc_types::engine::ClientVersionV1; use reth_tasks::TaskExecutor; use reth_tokio_util::EventSender; -use reth_tracing::tracing::{debug, info}; -use std::sync::mpsc::channel; +use reth_tracing::tracing::{debug, error, info}; use tokio::sync::{mpsc::unbounded_channel, oneshot}; use tokio_stream::wrappers::UnboundedReceiverStream; @@ -50,7 +51,7 @@ impl EthNodeLauncher { impl LaunchNode> for EthNodeLauncher where T: FullNodeTypes< - Provider = BlockchainProvider<::DB>, + Provider = BlockchainProvider2<::DB>, Engine = EthEngineTypes, >, CB: NodeComponentsBuilder, @@ -73,6 +74,15 @@ where } = target; let NodeHooks { on_component_initialized, on_node_started, .. } = hooks; + // TODO: move tree_config and canon_state_notification_sender + // initialization to with_blockchain_db once the engine revamp is done + // https://github.com/paradigmxyz/reth/issues/8742 + let tree_config = BlockchainTreeConfig::default(); + + // NOTE: This is a temporary workaround to provide the canon state notification sender to the components builder because there's a cyclic dependency between the blockchain provider and the tree component. This will be removed once the Blockchain provider no longer depends on an instance of the tree: + let (canon_state_notification_sender, _receiver) = + tokio::sync::broadcast::channel(tree_config.max_reorg_depth() as usize * 2); + // setup the launch context let ctx = ctx .with_configured_globals() @@ -89,7 +99,7 @@ where .inspect(|_| { info!(target: "reth::cli", "Database opened"); }) - .with_prometheus().await? + .with_prometheus_server().await? .inspect(|this| { debug!(target: "reth::cli", chain=%this.chain_id(), genesis=?this.genesis_hash(), "Initializing genesis"); }) @@ -97,10 +107,12 @@ where .inspect(|this| { info!(target: "reth::cli", "\n{}", this.chain_spec().display_hardforks()); }) - .with_metrics() + .with_metrics_task() // passing FullNodeTypes as type parameter here so that we can build // later the components. - .with_blockchain_db::()? + .with_blockchain_db::(move |provider_factory| { + Ok(BlockchainProvider2::new(provider_factory)?) + }, tree_config, canon_state_notification_sender)? .with_components(components_builder, on_component_initialized).await?; // spawn exexs @@ -147,8 +159,7 @@ where let pipeline_events = pipeline.events(); - // TODO: support --debug.tip - let _initial_target = ctx.node_config().debug.tip; + let initial_target = ctx.node_config().debug.tip; let mut pruner_builder = ctx.pruner_builder(); if let Some(exex_manager_handle) = &exex_manager_handle { @@ -159,34 +170,30 @@ where let pruner_events = pruner.events(); info!(target: "reth::cli", prune_config=?ctx.prune_config().unwrap_or_default(), "Pruner initialized"); - hooks.add(PruneHook::new(pruner, Box::new(ctx.task_executor().clone()))); - - let (to_tree_tx, _to_tree_rx) = channel(); - let (_from_tree_tx, from_tree_rx) = unbounded_channel(); // Configure the consensus engine - let eth_service = EthService::new( + let mut eth_service = EthService::new( ctx.chain_spec(), network_client.clone(), - // to tree - to_tree_tx, - // from tree - from_tree_rx, UnboundedReceiverStream::new(consensus_engine_rx), pipeline, Box::new(ctx.task_executor().clone()), + ctx.provider_factory().clone(), + ctx.blockchain_db().clone(), + pruner, + ctx.components().payload_builder().clone(), ); let event_sender = EventSender::default(); let beacon_engine_handle = - BeaconConsensusEngineHandle::new(consensus_engine_tx, event_sender); + BeaconConsensusEngineHandle::new(consensus_engine_tx, event_sender.clone()); info!(target: "reth::cli", "Consensus engine initialized"); let events = stream_select!( ctx.components().network().event_listener().map(Into::into), - // TODO get engine events + beacon_engine_handle.event_listener().map(Into::into), pipeline_events.map(Into::into), if ctx.node_config().debug.tip.is_none() && !ctx.is_dev() { Either::Left( @@ -240,11 +247,52 @@ where .await?; // Run consensus engine to completion - let (tx, rx) = oneshot::channel(); + let network_handle = ctx.components().network().clone(); + let chainspec = ctx.chain_spec(); + let (exit, rx) = oneshot::channel(); info!(target: "reth::cli", "Starting consensus engine"); ctx.task_executor().spawn_critical_blocking("consensus engine", async move { - let res = eth_service.await; - let _ = tx.send(res); + if let Some(initial_target) = initial_target { + debug!(target: "reth::cli", %initial_target, "start backfill sync"); + eth_service.orchestrator_mut().start_backfill_sync(initial_target); + } + + let mut res = Ok(()); + + // advance the chain and handle events + while let Some(event) = eth_service.next().await { + debug!(target: "reth::cli", "Event: {event:?}"); + match event { + ChainEvent::BackfillSyncFinished => { + network_handle.update_sync_state(SyncState::Idle); + } + ChainEvent::BackfillSyncStarted => { + network_handle.update_sync_state(SyncState::Syncing); + } + ChainEvent::FatalError => { + error!(target: "reth::cli", "Fatal error in consensus engine"); + res = Err(eyre::eyre!("Fatal error in consensus engine")); + break + } + ChainEvent::Handler(ev) => { + if let Some(head) = ev.canonical_header() { + let head_block = Head { + number: head.number, + hash: head.hash(), + difficulty: head.difficulty, + timestamp: head.timestamp, + total_difficulty: chainspec + .final_paris_total_difficulty(head.number) + .unwrap_or_default(), + }; + network_handle.update_status(head_block); + } + event_sender.notify(ev); + } + } + } + + let _ = exit.send(res); }); let full_node = FullNode { @@ -265,7 +313,7 @@ where let handle = NodeHandle { node_exit_future: NodeExitFuture::new( - async { Ok(rx.await??) }, + async { rx.await? }, full_node.config.debug.terminate, ), node: full_node, diff --git a/crates/ethereum/node/tests/it/builder.rs b/crates/ethereum/node/tests/it/builder.rs index 32ebf2d22b23..fe2ff7effe41 100644 --- a/crates/ethereum/node/tests/it/builder.rs +++ b/crates/ethereum/node/tests/it/builder.rs @@ -1,11 +1,17 @@ //! Node builder setup tests. -use reth_db::test_utils::create_test_rw_db; +use std::sync::Arc; + +use reth_db::{ + test_utils::{create_test_rw_db, TempDatabase}, + DatabaseEnv, +}; use reth_node_builder::{FullNodeComponents, NodeBuilder, NodeConfig}; use reth_node_ethereum::{ launch::EthNodeLauncher, node::{EthereumAddOns, EthereumNode}, }; +use reth_provider::providers::BlockchainProvider2; use reth_tasks::TaskManager; #[test] @@ -45,7 +51,7 @@ async fn test_eth_launcher() { let db = create_test_rw_db(); let _builder = NodeBuilder::new(config) .with_database(db) - .with_types::() + .with_types_and_provider::>>>() .with_components(EthereumNode::components()) .with_add_ons::() .launch_with_fn(|builder| { diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index c7b491243864..e853cb1ad12c 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -196,25 +196,24 @@ where } // Calculate the requests and the requests root. - let (requests, requests_root) = if chain_spec - .is_prague_active_at_timestamp(attributes.timestamp) - { - // We do not calculate the EIP-6110 deposit requests because there are no - // transactions in an empty payload. - let withdrawal_requests = post_block_withdrawal_requests_contract_call::( - &self.evm_config, - &mut db, - &initialized_cfg, - &initialized_block_env, - ) - .map_err(|err| PayloadBuilderError::Internal(err.into()))?; - - let requests = withdrawal_requests; - let requests_root = calculate_requests_root(&requests); - (Some(requests.into()), Some(requests_root)) - } else { - (None, None) - }; + let (requests, requests_root) = + if chain_spec.is_prague_active_at_timestamp(attributes.timestamp) { + // We do not calculate the EIP-6110 deposit requests because there are no + // transactions in an empty payload. + let withdrawal_requests = post_block_withdrawal_requests_contract_call( + &self.evm_config, + &mut db, + &initialized_cfg, + &initialized_block_env, + ) + .map_err(|err| PayloadBuilderError::Internal(err.into()))?; + + let requests = withdrawal_requests; + let requests_root = calculate_requests_root(&requests); + (Some(requests.into()), Some(requests_root)) + } else { + (None, None) + }; let header = Header { parent_hash: parent_block.hash(), diff --git a/crates/evm/execution-errors/Cargo.toml b/crates/evm/execution-errors/Cargo.toml index 8ec3a7024cb5..b60067dfd7d4 100644 --- a/crates/evm/execution-errors/Cargo.toml +++ b/crates/evm/execution-errors/Cargo.toml @@ -17,6 +17,7 @@ reth-storage-errors.workspace = true reth-prune-types.workspace = true alloy-primitives.workspace = true +alloy-rlp.workspace = true alloy-eips.workspace = true revm-primitives.workspace = true diff --git a/crates/evm/execution-errors/src/lib.rs b/crates/evm/execution-errors/src/lib.rs index 1fdee985606b..5d8ec12bdbf3 100644 --- a/crates/evm/execution-errors/src/lib.rs +++ b/crates/evm/execution-errors/src/lib.rs @@ -23,7 +23,7 @@ use revm_primitives::EVMError; use alloc::{boxed::Box, string::String}; pub mod trie; -pub use trie::{StateRootError, StorageRootError}; +pub use trie::*; /// Transaction validation errors #[derive(thiserror_no_std::Error, Debug, Clone, PartialEq, Eq)] diff --git a/crates/evm/execution-errors/src/trie.rs b/crates/evm/execution-errors/src/trie.rs index fd3533977ab2..5690bc97e3aa 100644 --- a/crates/evm/execution-errors/src/trie.rs +++ b/crates/evm/execution-errors/src/trie.rs @@ -1,14 +1,34 @@ //! Errors when computing the state root. -use reth_storage_errors::db::DatabaseError; +use reth_storage_errors::{db::DatabaseError, provider::ProviderError}; use thiserror_no_std::Error; +/// State root errors. +#[derive(Error, Debug, PartialEq, Eq, Clone)] +pub enum StateProofError { + /// Internal database error. + #[error(transparent)] + Database(#[from] DatabaseError), + /// RLP decoding error. + #[error(transparent)] + Rlp(#[from] alloy_rlp::Error), +} + +impl From for ProviderError { + fn from(value: StateProofError) -> Self { + match value { + StateProofError::Database(error) => Self::Database(error), + StateProofError::Rlp(error) => Self::Rlp(error), + } + } +} + /// State root errors. #[derive(Error, Debug, PartialEq, Eq, Clone)] pub enum StateRootError { /// Internal database error. #[error(transparent)] - DB(#[from] DatabaseError), + Database(#[from] DatabaseError), /// Storage root error. #[error(transparent)] StorageRootError(#[from] StorageRootError), @@ -17,8 +37,8 @@ pub enum StateRootError { impl From for DatabaseError { fn from(err: StateRootError) -> Self { match err { - StateRootError::DB(err) | - StateRootError::StorageRootError(StorageRootError::DB(err)) => err, + StateRootError::Database(err) | + StateRootError::StorageRootError(StorageRootError::Database(err)) => err, } } } @@ -28,5 +48,5 @@ impl From for DatabaseError { pub enum StorageRootError { /// Internal database error. #[error(transparent)] - DB(#[from] DatabaseError), + Database(#[from] DatabaseError), } diff --git a/crates/evm/execution-types/src/execute.rs b/crates/evm/execution-types/src/execute.rs new file mode 100644 index 000000000000..2c132576415d --- /dev/null +++ b/crates/evm/execution-types/src/execute.rs @@ -0,0 +1,41 @@ +use reth_primitives::{Request, U256}; +use revm::db::BundleState; + +/// A helper type for ethereum block inputs that consists of a block and the total difficulty. +#[derive(Debug)] +pub struct BlockExecutionInput<'a, Block> { + /// The block to execute. + pub block: &'a Block, + /// The total difficulty of the block. + pub total_difficulty: U256, +} + +impl<'a, Block> BlockExecutionInput<'a, Block> { + /// Creates a new input. + pub const fn new(block: &'a Block, total_difficulty: U256) -> Self { + Self { block, total_difficulty } + } +} + +impl<'a, Block> From<(&'a Block, U256)> for BlockExecutionInput<'a, Block> { + fn from((block, total_difficulty): (&'a Block, U256)) -> Self { + Self::new(block, total_difficulty) + } +} + +/// The output of an ethereum block. +/// +/// Contains the state changes, transaction receipts, and total gas used in the block. +/// +/// TODO(mattsse): combine with `ExecutionOutcome` +#[derive(Debug, PartialEq, Eq)] +pub struct BlockExecutionOutput { + /// The changed state of the block after execution. + pub state: BundleState, + /// All the receipts of the transactions in the block. + pub receipts: Vec, + /// All the EIP-7685 requests of the transactions in the block. + pub requests: Vec, + /// The total gas used by the block. + pub gas_used: u64, +} diff --git a/crates/evm/execution-types/src/lib.rs b/crates/evm/execution-types/src/lib.rs index 0692fa57eb94..881b2a33dad0 100644 --- a/crates/evm/execution-types/src/lib.rs +++ b/crates/evm/execution-types/src/lib.rs @@ -8,8 +8,11 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -mod execution_outcome; -pub use execution_outcome::*; - mod chain; pub use chain::*; + +mod execute; +pub use execute::*; + +mod execution_outcome; +pub use execution_outcome::*; diff --git a/crates/evm/src/builder.rs b/crates/evm/src/builder.rs new file mode 100644 index 000000000000..019e7d9a6be1 --- /dev/null +++ b/crates/evm/src/builder.rs @@ -0,0 +1,150 @@ +//! Builder for creating an EVM with a database and environment. + +use revm::{inspector_handle_register, Database, Evm, EvmBuilder, GetInspector}; +use revm_primitives::{Env, EnvWithHandlerCfg}; + +/// Builder for creating an EVM with a database and environment. +/// +/// Wrapper around [`EvmBuilder`] that allows for setting the database and environment for the EVM. +/// +/// This is useful for creating an EVM with a custom database and environment without having to +/// necessarily rely on Revm inspector. +#[derive(Debug)] +pub struct RethEvmBuilder { + /// The database to use for the EVM. + db: DB, + /// The environment to use for the EVM. + env: Option>, + /// The external context for the EVM. + external_context: EXT, +} + +impl RethEvmBuilder +where + DB: Database, +{ + /// Create a new EVM builder with the given database. + pub const fn new(db: DB, external_context: EXT) -> Self { + Self { db, env: None, external_context } + } + + /// Set the environment for the EVM. + pub fn with_env(mut self, env: Box) -> Self { + self.env = Some(env); + self + } + + /// Set the external context for the EVM. + pub fn with_external_context(self, external_context: EXT1) -> RethEvmBuilder { + RethEvmBuilder { db: self.db, env: self.env, external_context } + } + + /// Build the EVM with the given database and environment. + pub fn build<'a>(self) -> Evm<'a, EXT, DB> { + let mut builder = + EvmBuilder::default().with_db(self.db).with_external_context(self.external_context); + if let Some(env) = self.env { + builder = builder.with_env(env); + } + + builder.build() + } + + /// Build the EVM with the given database and environment, using the given inspector. + pub fn build_with_inspector<'a, I>(self, inspector: I) -> Evm<'a, I, DB> + where + I: GetInspector, + EXT: 'a, + { + let mut builder = + EvmBuilder::default().with_db(self.db).with_external_context(self.external_context); + if let Some(env) = self.env { + builder = builder.with_env(env); + } + builder + .with_external_context(inspector) + .append_handler_register(inspector_handle_register) + .build() + } +} + +/// Trait for configuring an EVM builder. +pub trait ConfigureEvmBuilder { + /// The type of EVM builder that this trait can configure. + type Builder<'a, DB: Database>: EvmFactory; +} + +/// Trait for configuring the EVM for executing full blocks. +pub trait EvmFactory { + /// Associated type for the default external context that should be configured for the EVM. + type DefaultExternalContext<'a>; + + /// Provides the default external context. + fn default_external_context<'a>(&self) -> Self::DefaultExternalContext<'a>; + + /// Returns new EVM with the given database + /// + /// This does not automatically configure the EVM with [`crate::ConfigureEvmEnv`] methods. It is + /// up to the caller to call an appropriate method to fill the transaction and block + /// environment before executing any transactions using the provided EVM. + fn evm(self, db: DB) -> Evm<'static, Self::DefaultExternalContext<'static>, DB> + where + Self: Sized, + { + RethEvmBuilder::new(db, self.default_external_context()).build() + } + + /// Returns a new EVM with the given database configured with the given environment settings, + /// including the spec id. + /// + /// This will preserve any handler modifications + fn evm_with_env<'a, DB: Database + 'a>( + &self, + db: DB, + env: EnvWithHandlerCfg, + ) -> Evm<'a, Self::DefaultExternalContext<'a>, DB> { + RethEvmBuilder::new(db, self.default_external_context()).with_env(env.env).build() + } + + /// Returns a new EVM with the given database configured with the given environment settings, + /// including the spec id. + /// + /// This will use the given external inspector as the EVM external context. + /// + /// This will preserve any handler modifications + fn evm_with_env_and_inspector( + &self, + db: DB, + env: EnvWithHandlerCfg, + inspector: I, + ) -> Evm<'_, I, DB> + where + DB: Database, + I: GetInspector, + { + RethEvmBuilder::new(db, self.default_external_context()) + .with_env(env.env) + .build_with_inspector(inspector) + } + + /// Returns a new EVM with the given inspector. + /// + /// Caution: This does not automatically configure the EVM with [`crate::ConfigureEvmEnv`] + /// methods. It is up to the caller to call an appropriate method to fill the transaction + /// and block environment before executing any transactions using the provided EVM. + fn evm_with_inspector(&self, db: DB, inspector: I) -> Evm<'_, I, DB> + where + DB: Database, + I: GetInspector, + { + RethEvmBuilder::new(db, self.default_external_context()).build_with_inspector(inspector) + } +} + +impl EvmFactory for RethEvmBuilder { + type DefaultExternalContext<'a> = EXT; + + fn default_external_context<'a>(&self) -> Self::DefaultExternalContext<'a> { + self.external_context.clone() + } +} diff --git a/crates/evm/src/either.rs b/crates/evm/src/either.rs index f6af36d2eb63..0a490b813596 100644 --- a/crates/evm/src/either.rs +++ b/crates/evm/src/either.rs @@ -2,11 +2,9 @@ use std::fmt::Display; -use crate::execute::{ - BatchExecutor, BlockExecutionInput, BlockExecutionOutput, BlockExecutorProvider, Executor, -}; +use crate::execute::{BatchExecutor, BlockExecutorProvider, Executor}; use reth_execution_errors::BlockExecutionError; -use reth_execution_types::ExecutionOutcome; +use reth_execution_types::{BlockExecutionInput, BlockExecutionOutput, ExecutionOutcome}; use reth_primitives::{BlockNumber, BlockWithSenders, Receipt}; use reth_prune_types::PruneModes; use reth_storage_errors::provider::ProviderError; diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index 9d3fd0a5e824..68c398506120 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -1,9 +1,10 @@ //! Traits for execution. -use reth_execution_types::ExecutionOutcome; -use reth_primitives::{BlockNumber, BlockWithSenders, Receipt, Request, U256}; +// Re-export execution types +pub use reth_execution_types::{BlockExecutionInput, BlockExecutionOutput, ExecutionOutcome}; + +use reth_primitives::{BlockNumber, BlockWithSenders, Receipt}; use reth_prune_types::PruneModes; -use revm::db::BundleState; use revm_primitives::db::Database; use std::fmt::Display; @@ -96,45 +97,6 @@ pub trait BatchExecutor { fn size_hint(&self) -> Option; } -/// The output of an ethereum block. -/// -/// Contains the state changes, transaction receipts, and total gas used in the block. -/// -/// TODO(mattsse): combine with `ExecutionOutcome` -#[derive(Debug, PartialEq, Eq)] -pub struct BlockExecutionOutput { - /// The changed state of the block after execution. - pub state: BundleState, - /// All the receipts of the transactions in the block. - pub receipts: Vec, - /// All the EIP-7685 requests of the transactions in the block. - pub requests: Vec, - /// The total gas used by the block. - pub gas_used: u64, -} - -/// A helper type for ethereum block inputs that consists of a block and the total difficulty. -#[derive(Debug)] -pub struct BlockExecutionInput<'a, Block> { - /// The block to execute. - pub block: &'a Block, - /// The total difficulty of the block. - pub total_difficulty: U256, -} - -impl<'a, Block> BlockExecutionInput<'a, Block> { - /// Creates a new input. - pub const fn new(block: &'a Block, total_difficulty: U256) -> Self { - Self { block, total_difficulty } - } -} - -impl<'a, Block> From<(&'a Block, U256)> for BlockExecutionInput<'a, Block> { - fn from((block, total_difficulty): (&'a Block, U256)) -> Self { - Self::new(block, total_difficulty) - } -} - /// A type that can create a new executor for block execution. pub trait BlockExecutorProvider: Send + Sync + Clone + Unpin + 'static { /// An executor that can execute a single block given a database. @@ -184,6 +146,7 @@ mod tests { use super::*; use reth_primitives::Block; use revm::db::{CacheDB, EmptyDBTyped}; + use revm_primitives::U256; use std::marker::PhantomData; #[derive(Clone, Default)] diff --git a/crates/evm/src/lib.rs b/crates/evm/src/lib.rs index 27eeb42ec4bd..c92eb8e923be 100644 --- a/crates/evm/src/lib.rs +++ b/crates/evm/src/lib.rs @@ -21,6 +21,7 @@ use revm_primitives::{ BlockEnv, Bytes, CfgEnvWithHandlerCfg, Env, EnvWithHandlerCfg, SpecId, TxEnv, }; +pub mod builder; pub mod either; pub mod execute; pub mod noop; @@ -42,17 +43,17 @@ pub trait ConfigureEvm: ConfigureEvmEnv { /// This does not automatically configure the EVM with [`ConfigureEvmEnv`] methods. It is up to /// the caller to call an appropriate method to fill the transaction and block environment /// before executing any transactions using the provided EVM. - fn evm<'a, DB: Database + 'a>(&self, db: DB) -> Evm<'a, Self::DefaultExternalContext<'a>, DB>; + fn evm(&self, db: DB) -> Evm<'_, Self::DefaultExternalContext<'_>, DB>; /// Returns a new EVM with the given database configured with the given environment settings, /// including the spec id. /// /// This will preserve any handler modifications - fn evm_with_env<'a, DB: Database + 'a>( + fn evm_with_env( &self, db: DB, env: EnvWithHandlerCfg, - ) -> Evm<'a, Self::DefaultExternalContext<'a>, DB> { + ) -> Evm<'_, Self::DefaultExternalContext<'_>, DB> { let mut evm = self.evm(db); evm.modify_spec_id(env.spec_id()); evm.context.evm.env = env.env; @@ -65,12 +66,12 @@ pub trait ConfigureEvm: ConfigureEvmEnv { /// This will use the given external inspector as the EVM external context. /// /// This will preserve any handler modifications - fn evm_with_env_and_inspector<'a, DB, I>( + fn evm_with_env_and_inspector( &self, db: DB, env: EnvWithHandlerCfg, inspector: I, - ) -> Evm<'a, I, DB> + ) -> Evm<'_, I, DB> where DB: Database, I: GetInspector, @@ -86,9 +87,9 @@ pub trait ConfigureEvm: ConfigureEvmEnv { /// Caution: This does not automatically configure the EVM with [`ConfigureEvmEnv`] methods. It /// is up to the caller to call an appropriate method to fill the transaction and block /// environment before executing any transactions using the provided EVM. - fn evm_with_inspector<'a, DB, I>(&self, db: DB, inspector: I) -> Evm<'a, I, DB> + fn evm_with_inspector(&self, db: DB, inspector: I) -> Evm<'_, I, DB> where - DB: Database + 'a, + DB: Database, I: GetInspector, { EvmBuilder::default() diff --git a/crates/evm/src/noop.rs b/crates/evm/src/noop.rs index 80a2b76de834..beac15be1662 100644 --- a/crates/evm/src/noop.rs +++ b/crates/evm/src/noop.rs @@ -3,15 +3,13 @@ use std::fmt::Display; use reth_execution_errors::BlockExecutionError; -use reth_execution_types::ExecutionOutcome; +use reth_execution_types::{BlockExecutionInput, BlockExecutionOutput, ExecutionOutcome}; use reth_primitives::{BlockNumber, BlockWithSenders, Receipt}; use reth_prune_types::PruneModes; use reth_storage_errors::provider::ProviderError; use revm_primitives::db::Database; -use crate::execute::{ - BatchExecutor, BlockExecutionInput, BlockExecutionOutput, BlockExecutorProvider, Executor, -}; +use crate::execute::{BatchExecutor, BlockExecutorProvider, Executor}; const UNAVAILABLE_FOR_NOOP: &str = "execution unavailable for noop"; diff --git a/crates/evm/src/provider.rs b/crates/evm/src/provider.rs index 2e73ff2fa985..b847a0665a35 100644 --- a/crates/evm/src/provider.rs +++ b/crates/evm/src/provider.rs @@ -36,7 +36,7 @@ pub trait EvmEnvProvider: Send + Sync { { let mut cfg = CfgEnvWithHandlerCfg::new_with_spec_id(CfgEnv::default(), SpecId::LATEST); let mut block_env = BlockEnv::default(); - self.fill_env_with_header::(&mut cfg, &mut block_env, header, evm_config)?; + self.fill_env_with_header(&mut cfg, &mut block_env, header, evm_config)?; Ok((cfg, block_env)) } diff --git a/crates/evm/src/system_calls.rs b/crates/evm/src/system_calls.rs index 9d493f51795e..58759a866556 100644 --- a/crates/evm/src/system_calls.rs +++ b/crates/evm/src/system_calls.rs @@ -90,7 +90,7 @@ where // if the block number is zero (genesis block) then the parent beacon block root must // be 0x0 and no system transaction may occur as per EIP-4788 if block_number == 0 { - if parent_beacon_block_root != B256::ZERO { + if !parent_beacon_block_root.is_zero() { return Err(BlockValidationError::CancunGenesisParentBeaconBlockRootNotZero { parent_beacon_block_root, } @@ -162,7 +162,7 @@ where .build(); // initialize a block from the env, because the post block call needs the block itself - apply_withdrawal_requests_contract_call::(evm_config, &mut evm_post_block) + apply_withdrawal_requests_contract_call(evm_config, &mut evm_post_block) } /// Applies the post-block call to the EIP-7002 withdrawal requests contract. @@ -256,11 +256,8 @@ where let amount = data.get_u64(); - withdrawal_requests.push(Request::WithdrawalRequest(WithdrawalRequest { - source_address, - validator_pubkey, - amount, - })); + withdrawal_requests + .push(WithdrawalRequest { source_address, validator_pubkey, amount }.into()); } Ok(withdrawal_requests) @@ -295,7 +292,7 @@ where .build(); // initialize a block from the env, because the post block call needs the block itself - apply_consolidation_requests_contract_call::(evm_config, &mut evm_post_block) + apply_consolidation_requests_contract_call(evm_config, &mut evm_post_block) } /// Applies the post-block call to the EIP-7251 consolidation requests contract. diff --git a/crates/exex/exex/Cargo.toml b/crates/exex/exex/Cargo.toml index 58a2695b89e0..48e658c408de 100644 --- a/crates/exex/exex/Cargo.toml +++ b/crates/exex/exex/Cargo.toml @@ -14,39 +14,40 @@ workspace = true [dependencies] ## reth reth-config.workspace = true +reth-evm.workspace = true reth-exex-types.workspace = true reth-metrics.workspace = true +reth-network.workspace = true reth-node-api.workspace = true reth-node-core.workspace = true -reth-primitives.workspace = true +reth-payload-builder.workspace = true reth-primitives-traits.workspace = true +reth-primitives.workspace = true reth-provider.workspace = true -reth-tasks.workspace = true -reth-tracing.workspace = true -reth-network.workspace = true -reth-payload-builder.workspace = true -reth-evm.workspace = true reth-prune-types.workspace = true reth-revm.workspace = true reth-stages-api.workspace = true +reth-tasks.workspace = true +reth-tracing.workspace = true ## async -tokio.workspace = true +futures.workspace = true tokio-util.workspace = true +tokio.workspace = true ## misc eyre.workspace = true metrics.workspace = true [dev-dependencies] -reth-chainspec.workspace = true -reth-evm-ethereum.workspace = true -reth-testing-utils.workspace = true reth-blockchain-tree.workspace = true +reth-chainspec.workspace = true +reth-db-api.workspace = true reth-db-common.workspace = true +reth-evm-ethereum.workspace = true reth-node-api.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } -reth-db-api.workspace = true +reth-testing-utils.workspace = true secp256k1.workspace = true diff --git a/crates/exex/exex/src/backfill/factory.rs b/crates/exex/exex/src/backfill/factory.rs new file mode 100644 index 000000000000..6e845e240954 --- /dev/null +++ b/crates/exex/exex/src/backfill/factory.rs @@ -0,0 +1,79 @@ +use crate::BackfillJob; +use std::ops::RangeInclusive; + +use reth_node_api::FullNodeComponents; +use reth_primitives::BlockNumber; +use reth_prune_types::PruneModes; +use reth_stages_api::ExecutionStageThresholds; + +use super::stream::DEFAULT_PARALLELISM; + +/// Factory for creating new backfill jobs. +#[derive(Debug, Clone)] +pub struct BackfillJobFactory { + executor: E, + provider: P, + prune_modes: PruneModes, + thresholds: ExecutionStageThresholds, + stream_parallelism: usize, +} + +impl BackfillJobFactory { + /// Creates a new [`BackfillJobFactory`]. + pub fn new(executor: E, provider: P) -> Self { + Self { + executor, + provider, + prune_modes: PruneModes::none(), + thresholds: ExecutionStageThresholds::default(), + stream_parallelism: DEFAULT_PARALLELISM, + } + } + + /// Sets the prune modes + pub fn with_prune_modes(mut self, prune_modes: PruneModes) -> Self { + self.prune_modes = prune_modes; + self + } + + /// Sets the thresholds + pub const fn with_thresholds(mut self, thresholds: ExecutionStageThresholds) -> Self { + self.thresholds = thresholds; + self + } + + /// Sets the stream parallelism. + /// + /// Configures the [`BackFillJobStream`](super::stream::BackFillJobStream) created via + /// [`BackfillJob::into_stream`]. + pub const fn with_stream_parallelism(mut self, stream_parallelism: usize) -> Self { + self.stream_parallelism = stream_parallelism; + self + } +} + +impl BackfillJobFactory { + /// Creates a new backfill job for the given range. + pub fn backfill(&self, range: RangeInclusive) -> BackfillJob { + BackfillJob { + executor: self.executor.clone(), + provider: self.provider.clone(), + prune_modes: self.prune_modes.clone(), + range, + thresholds: self.thresholds.clone(), + stream_parallelism: self.stream_parallelism, + } + } +} + +impl BackfillJobFactory<(), ()> { + /// Creates a new [`BackfillJobFactory`] from [`FullNodeComponents`]. + pub fn new_from_components( + components: Node, + ) -> BackfillJobFactory { + BackfillJobFactory::<_, _>::new( + components.block_executor().clone(), + components.provider().clone(), + ) + } +} diff --git a/crates/exex/exex/src/backfill.rs b/crates/exex/exex/src/backfill/job.rs similarity index 57% rename from crates/exex/exex/src/backfill.rs rename to crates/exex/exex/src/backfill/job.rs index 36f00573437c..e3c04815bb2f 100644 --- a/crates/exex/exex/src/backfill.rs +++ b/crates/exex/exex/src/backfill/job.rs @@ -1,7 +1,12 @@ +use crate::BackFillJobStream; +use std::{ + ops::RangeInclusive, + time::{Duration, Instant}, +}; + use reth_evm::execute::{ BatchExecutor, BlockExecutionError, BlockExecutionOutput, BlockExecutorProvider, Executor, }; -use reth_node_api::FullNodeComponents; use reth_primitives::{Block, BlockNumber, BlockWithSenders, Receipt}; use reth_primitives_traits::format_gas_throughput; use reth_provider::{ @@ -11,68 +16,6 @@ use reth_prune_types::PruneModes; use reth_revm::database::StateProviderDatabase; use reth_stages_api::ExecutionStageThresholds; use reth_tracing::tracing::{debug, trace}; -use std::{ - ops::RangeInclusive, - time::{Duration, Instant}, -}; - -/// Factory for creating new backfill jobs. -#[derive(Debug, Clone)] -pub struct BackfillJobFactory { - executor: E, - provider: P, - prune_modes: PruneModes, - thresholds: ExecutionStageThresholds, -} - -impl BackfillJobFactory { - /// Creates a new [`BackfillJobFactory`]. - pub fn new(executor: E, provider: P) -> Self { - Self { - executor, - provider, - prune_modes: PruneModes::none(), - thresholds: ExecutionStageThresholds::default(), - } - } - - /// Sets the prune modes - pub fn with_prune_modes(mut self, prune_modes: PruneModes) -> Self { - self.prune_modes = prune_modes; - self - } - - /// Sets the thresholds - pub const fn with_thresholds(mut self, thresholds: ExecutionStageThresholds) -> Self { - self.thresholds = thresholds; - self - } -} - -impl BackfillJobFactory { - /// Creates a new backfill job for the given range. - pub fn backfill(&self, range: RangeInclusive) -> BackfillJob { - BackfillJob { - executor: self.executor.clone(), - provider: self.provider.clone(), - prune_modes: self.prune_modes.clone(), - range, - thresholds: self.thresholds.clone(), - } - } -} - -impl BackfillJobFactory<(), ()> { - /// Creates a new [`BackfillJobFactory`] from [`FullNodeComponents`]. - pub fn new_from_components( - components: Node, - ) -> BackfillJobFactory { - BackfillJobFactory::<_, _>::new( - components.block_executor().clone(), - components.provider().clone(), - ) - } -} /// Backfill job started for a specific range. /// @@ -80,11 +23,12 @@ impl BackfillJobFactory<(), ()> { /// and yields [`Chain`] #[derive(Debug)] pub struct BackfillJob { - executor: E, - provider: P, - prune_modes: PruneModes, - thresholds: ExecutionStageThresholds, - range: RangeInclusive, + pub(crate) executor: E, + pub(crate) provider: P, + pub(crate) prune_modes: PruneModes, + pub(crate) thresholds: ExecutionStageThresholds, + pub(crate) range: RangeInclusive, + pub(crate) stream_parallelism: usize, } impl Iterator for BackfillJob @@ -198,11 +142,15 @@ impl BackfillJob { pub fn into_single_blocks(self) -> SingleBlockBackfillJob { self.into() } -} -impl From> for SingleBlockBackfillJob { - fn from(value: BackfillJob) -> Self { - Self { executor: value.executor, provider: value.provider, range: value.range } + /// Converts the backfill job into a backfill job stream. + pub fn into_stream(self) -> BackFillJobStream + where + E: BlockExecutorProvider + Clone + 'static, + P: HeaderProvider + BlockReader + StateProviderFactory + Clone + 'static, + { + let parallelism = self.stream_parallelism; + BackFillJobStream::new(self.into_single_blocks()).with_parallelism(parallelism) } } @@ -210,11 +158,11 @@ impl From> for SingleBlockBackfillJob { /// /// It implements [`Iterator`] which executes a block each time the /// iterator is advanced and yields ([`BlockWithSenders`], [`BlockExecutionOutput`]) -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct SingleBlockBackfillJob { executor: E, provider: P, - range: RangeInclusive, + pub(crate) range: RangeInclusive, } impl Iterator for SingleBlockBackfillJob @@ -234,7 +182,7 @@ where E: BlockExecutorProvider, P: HeaderProvider + BlockReader + StateProviderFactory, { - fn execute_block( + pub(crate) fn execute_block( &self, block_number: u64, ) -> Result<(BlockWithSenders, BlockExecutionOutput), BlockExecutionError> { @@ -262,176 +210,29 @@ where } } +impl From> for SingleBlockBackfillJob { + fn from(value: BackfillJob) -> Self { + Self { executor: value.executor, provider: value.provider, range: value.range } + } +} + #[cfg(test)] mod tests { - use crate::BackfillJobFactory; - use eyre::OptionExt; + use std::sync::Arc; + + use crate::{ + backfill::test_utils::{blocks_and_execution_outputs, chain_spec, to_execution_outcome}, + BackfillJobFactory, + }; use reth_blockchain_tree::noop::NoopBlockchainTree; - use reth_chainspec::{ChainSpec, ChainSpecBuilder, EthereumHardfork, MAINNET}; use reth_db_common::init::init_genesis; - use reth_evm::execute::{ - BlockExecutionInput, BlockExecutionOutput, BlockExecutorProvider, Executor, - }; use reth_evm_ethereum::execute::EthExecutorProvider; - use reth_primitives::{ - b256, constants::ETH_TO_WEI, public_key_to_address, Address, Block, BlockWithSenders, - Genesis, GenesisAccount, Header, Receipt, Requests, SealedBlockWithSenders, Transaction, - TxEip2930, TxKind, U256, - }; + use reth_primitives::public_key_to_address; use reth_provider::{ providers::BlockchainProvider, test_utils::create_test_provider_factory_with_chain_spec, - BlockWriter, ExecutionOutcome, LatestStateProviderRef, ProviderFactory, }; - use reth_revm::database::StateProviderDatabase; - use reth_testing_utils::generators::{self, sign_tx_with_key_pair}; + use reth_testing_utils::generators; use secp256k1::Keypair; - use std::sync::Arc; - - fn to_execution_outcome( - block_number: u64, - block_execution_output: &BlockExecutionOutput, - ) -> ExecutionOutcome { - ExecutionOutcome { - bundle: block_execution_output.state.clone(), - receipts: block_execution_output.receipts.clone().into(), - first_block: block_number, - requests: vec![Requests(block_execution_output.requests.clone())], - } - } - - fn chain_spec(address: Address) -> Arc { - // Create a chain spec with a genesis state that contains the - // provided sender - Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(Genesis { - alloc: [( - address, - GenesisAccount { balance: U256::from(ETH_TO_WEI), ..Default::default() }, - )] - .into(), - ..MAINNET.genesis.clone() - }) - .paris_activated() - .build(), - ) - } - - fn execute_block_and_commit_to_database( - provider_factory: &ProviderFactory, - chain_spec: Arc, - block: &BlockWithSenders, - ) -> eyre::Result> - where - DB: reth_db_api::database::Database, - { - let provider = provider_factory.provider()?; - - // Execute the block to produce a block execution output - let mut block_execution_output = EthExecutorProvider::ethereum(chain_spec) - .executor(StateProviderDatabase::new(LatestStateProviderRef::new( - provider.tx_ref(), - provider.static_file_provider().clone(), - ))) - .execute(BlockExecutionInput { block, total_difficulty: U256::ZERO })?; - block_execution_output.state.reverts.sort(); - - // Convert the block execution output to an execution outcome for committing to the database - let execution_outcome = to_execution_outcome(block.number, &block_execution_output); - - // Commit the block's execution outcome to the database - let provider_rw = provider_factory.provider_rw()?; - let block = block.clone().seal_slow(); - provider_rw.append_blocks_with_state( - vec![block], - execution_outcome, - Default::default(), - Default::default(), - )?; - provider_rw.commit()?; - - Ok(block_execution_output) - } - - fn blocks_and_execution_outputs( - provider_factory: ProviderFactory, - chain_spec: Arc, - key_pair: Keypair, - ) -> eyre::Result)>> - where - DB: reth_db_api::database::Database, - { - // First block has a transaction that transfers some ETH to zero address - let block1 = Block { - header: Header { - parent_hash: chain_spec.genesis_hash(), - receipts_root: b256!( - "d3a6acf9a244d78b33831df95d472c4128ea85bf079a1d41e32ed0b7d2244c9e" - ), - difficulty: chain_spec.fork(EthereumHardfork::Paris).ttd().expect("Paris TTD"), - number: 1, - gas_limit: 21000, - gas_used: 21000, - ..Default::default() - }, - body: vec![sign_tx_with_key_pair( - key_pair, - Transaction::Eip2930(TxEip2930 { - chain_id: chain_spec.chain.id(), - nonce: 0, - gas_limit: 21000, - gas_price: 1_500_000_000, - to: TxKind::Call(Address::ZERO), - value: U256::from(0.1 * ETH_TO_WEI as f64), - ..Default::default() - }), - )], - ..Default::default() - } - .with_recovered_senders() - .ok_or_eyre("failed to recover senders")?; - - // Second block resends the same transaction with increased nonce - let block2 = Block { - header: Header { - parent_hash: block1.header.hash_slow(), - receipts_root: b256!( - "d3a6acf9a244d78b33831df95d472c4128ea85bf079a1d41e32ed0b7d2244c9e" - ), - difficulty: chain_spec.fork(EthereumHardfork::Paris).ttd().expect("Paris TTD"), - number: 2, - gas_limit: 21000, - gas_used: 21000, - ..Default::default() - }, - body: vec![sign_tx_with_key_pair( - key_pair, - Transaction::Eip2930(TxEip2930 { - chain_id: chain_spec.chain.id(), - nonce: 1, - gas_limit: 21000, - gas_price: 1_500_000_000, - to: TxKind::Call(Address::ZERO), - value: U256::from(0.1 * ETH_TO_WEI as f64), - ..Default::default() - }), - )], - ..Default::default() - } - .with_recovered_senders() - .ok_or_eyre("failed to recover senders")?; - - let block_output1 = - execute_block_and_commit_to_database(&provider_factory, chain_spec.clone(), &block1)?; - let block_output2 = - execute_block_and_commit_to_database(&provider_factory, chain_spec, &block2)?; - - let block1 = block1.seal_slow(); - let block2 = block2.seal_slow(); - - Ok(vec![(block1, block_output1), (block2, block_output2)]) - } #[test] fn test_backfill() -> eyre::Result<()> { diff --git a/crates/exex/exex/src/backfill/mod.rs b/crates/exex/exex/src/backfill/mod.rs new file mode 100644 index 000000000000..51f126223fdf --- /dev/null +++ b/crates/exex/exex/src/backfill/mod.rs @@ -0,0 +1,9 @@ +mod factory; +mod job; +mod stream; +#[cfg(test)] +mod test_utils; + +pub use factory::BackfillJobFactory; +pub use job::{BackfillJob, SingleBlockBackfillJob}; +pub use stream::BackFillJobStream; diff --git a/crates/exex/exex/src/backfill/stream.rs b/crates/exex/exex/src/backfill/stream.rs new file mode 100644 index 000000000000..5529301bccc6 --- /dev/null +++ b/crates/exex/exex/src/backfill/stream.rs @@ -0,0 +1,161 @@ +use crate::SingleBlockBackfillJob; +use std::{ + ops::RangeInclusive, + pin::Pin, + task::{ready, Context, Poll}, +}; + +use futures::{ + stream::{FuturesOrdered, Stream}, + StreamExt, +}; +use reth_evm::execute::{BlockExecutionError, BlockExecutionOutput, BlockExecutorProvider}; +use reth_primitives::{BlockNumber, BlockWithSenders, Receipt}; +use reth_provider::{BlockReader, HeaderProvider, StateProviderFactory}; +use tokio::task::JoinHandle; + +type BackfillTasks = FuturesOrdered< + JoinHandle), BlockExecutionError>>, +>; + +/// The default parallelism for active tasks in [`BackFillJobStream`]. +pub(crate) const DEFAULT_PARALLELISM: usize = 4; + +/// Stream for processing backfill jobs asynchronously. +/// +/// This struct manages the execution of [`SingleBlockBackfillJob`] tasks, allowing blocks to be +/// processed asynchronously but in order within a specified range. +#[derive(Debug)] +pub struct BackFillJobStream { + job: SingleBlockBackfillJob, + tasks: BackfillTasks, + range: RangeInclusive, + parallelism: usize, +} + +impl BackFillJobStream +where + E: BlockExecutorProvider + Clone + Send + 'static, + P: HeaderProvider + BlockReader + StateProviderFactory + Clone + Send + 'static, +{ + /// Creates a new [`BackFillJobStream`] with the default parallelism. + /// + /// # Parameters + /// - `job`: The [`SingleBlockBackfillJob`] to be executed asynchronously. + /// + /// # Returns + /// A new instance of [`BackFillJobStream`] with the default parallelism. + pub fn new(job: SingleBlockBackfillJob) -> Self { + let range = job.range.clone(); + Self { job, tasks: FuturesOrdered::new(), range, parallelism: DEFAULT_PARALLELISM } + } + + /// Configures the parallelism of the [`BackFillJobStream`] to handle active tasks. + /// + /// # Parameters + /// - `parallelism`: The parallelism to handle active tasks. + /// + /// # Returns + /// The modified instance of [`BackFillJobStream`] with the specified parallelism. + pub const fn with_parallelism(mut self, parallelism: usize) -> Self { + self.parallelism = parallelism; + self + } + + fn spawn_task( + &self, + block_number: BlockNumber, + ) -> JoinHandle), BlockExecutionError>> + { + let job = self.job.clone(); + tokio::task::spawn_blocking(move || job.execute_block(block_number)) + } +} + +impl Stream for BackFillJobStream +where + E: BlockExecutorProvider + Clone + Send + 'static, + P: HeaderProvider + BlockReader + StateProviderFactory + Clone + Send + 'static + Unpin, +{ + type Item = Result<(BlockWithSenders, BlockExecutionOutput), BlockExecutionError>; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.get_mut(); + + // Spawn new tasks only if we are below the parallelism configured. + while this.tasks.len() < this.parallelism { + if let Some(block_number) = this.range.next() { + let task = this.spawn_task(block_number); + this.tasks.push_back(task); + } else { + break; + } + } + + match ready!(this.tasks.poll_next_unpin(cx)) { + Some(res) => Poll::Ready(Some(res.map_err(|e| BlockExecutionError::Other(e.into()))?)), + None => Poll::Ready(None), + } + } +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use crate::{ + backfill::test_utils::{blocks_and_execution_outputs, chain_spec}, + BackfillJobFactory, + }; + use futures::StreamExt; + use reth_blockchain_tree::noop::NoopBlockchainTree; + use reth_db_common::init::init_genesis; + use reth_evm_ethereum::execute::EthExecutorProvider; + use reth_primitives::public_key_to_address; + use reth_provider::{ + providers::BlockchainProvider, test_utils::create_test_provider_factory_with_chain_spec, + }; + use reth_testing_utils::generators; + use secp256k1::Keypair; + + #[tokio::test] + async fn test_async_backfill() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + // Create a key pair for the sender + let key_pair = Keypair::new_global(&mut generators::rng()); + let address = public_key_to_address(key_pair.public_key()); + + let chain_spec = chain_spec(address); + + let executor = EthExecutorProvider::ethereum(chain_spec.clone()); + let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); + init_genesis(provider_factory.clone())?; + let blockchain_db = BlockchainProvider::new( + provider_factory.clone(), + Arc::new(NoopBlockchainTree::default()), + )?; + + // Create first 2 blocks + let blocks_and_execution_outcomes = + blocks_and_execution_outputs(provider_factory, chain_spec, key_pair)?; + + // Backfill the first block + let factory = BackfillJobFactory::new(executor.clone(), blockchain_db.clone()); + let mut backfill_stream = factory.backfill(1..=1).into_stream(); + + // execute first block + let (block, mut execution_output) = backfill_stream.next().await.unwrap().unwrap(); + execution_output.state.reverts.sort(); + let sealed_block_with_senders = blocks_and_execution_outcomes[0].0.clone(); + let expected_block = sealed_block_with_senders.unseal(); + let expected_output = &blocks_and_execution_outcomes[0].1; + assert_eq!(block, expected_block); + assert_eq!(&execution_output, expected_output); + + // expect no more blocks + assert!(backfill_stream.next().await.is_none()); + + Ok(()) + } +} diff --git a/crates/exex/exex/src/backfill/test_utils.rs b/crates/exex/exex/src/backfill/test_utils.rs new file mode 100644 index 000000000000..05b41cd2b998 --- /dev/null +++ b/crates/exex/exex/src/backfill/test_utils.rs @@ -0,0 +1,162 @@ +use std::sync::Arc; + +use eyre::OptionExt; +use reth_chainspec::{ChainSpec, ChainSpecBuilder, EthereumHardfork, MAINNET}; +use reth_evm::execute::{ + BlockExecutionInput, BlockExecutionOutput, BlockExecutorProvider, Executor, +}; +use reth_evm_ethereum::execute::EthExecutorProvider; +use reth_primitives::{ + b256, constants::ETH_TO_WEI, Address, Block, BlockWithSenders, Genesis, GenesisAccount, Header, + Receipt, Requests, SealedBlockWithSenders, Transaction, TxEip2930, TxKind, U256, +}; +use reth_provider::{BlockWriter as _, ExecutionOutcome, LatestStateProviderRef, ProviderFactory}; +use reth_revm::database::StateProviderDatabase; +use reth_testing_utils::generators::sign_tx_with_key_pair; +use secp256k1::Keypair; + +pub(crate) fn to_execution_outcome( + block_number: u64, + block_execution_output: &BlockExecutionOutput, +) -> ExecutionOutcome { + ExecutionOutcome { + bundle: block_execution_output.state.clone(), + receipts: block_execution_output.receipts.clone().into(), + first_block: block_number, + requests: vec![Requests(block_execution_output.requests.clone())], + } +} + +pub(crate) fn chain_spec(address: Address) -> Arc { + // Create a chain spec with a genesis state that contains the + // provided sender + Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(Genesis { + alloc: [( + address, + GenesisAccount { balance: U256::from(ETH_TO_WEI), ..Default::default() }, + )] + .into(), + ..MAINNET.genesis.clone() + }) + .paris_activated() + .build(), + ) +} + +pub(crate) fn execute_block_and_commit_to_database( + provider_factory: &ProviderFactory, + chain_spec: Arc, + block: &BlockWithSenders, +) -> eyre::Result> +where + DB: reth_db_api::database::Database, +{ + let provider = provider_factory.provider()?; + + // Execute the block to produce a block execution output + let mut block_execution_output = EthExecutorProvider::ethereum(chain_spec) + .executor(StateProviderDatabase::new(LatestStateProviderRef::new( + provider.tx_ref(), + provider.static_file_provider().clone(), + ))) + .execute(BlockExecutionInput { block, total_difficulty: U256::ZERO })?; + block_execution_output.state.reverts.sort(); + + // Convert the block execution output to an execution outcome for committing to the database + let execution_outcome = to_execution_outcome(block.number, &block_execution_output); + + // Commit the block's execution outcome to the database + let provider_rw = provider_factory.provider_rw()?; + let block = block.clone().seal_slow(); + provider_rw.append_blocks_with_state( + vec![block], + execution_outcome, + Default::default(), + Default::default(), + )?; + provider_rw.commit()?; + + Ok(block_execution_output) +} + +pub(crate) fn blocks_and_execution_outputs( + provider_factory: ProviderFactory, + chain_spec: Arc, + key_pair: Keypair, +) -> eyre::Result)>> +where + DB: reth_db_api::database::Database, +{ + // First block has a transaction that transfers some ETH to zero address + let block1 = Block { + header: Header { + parent_hash: chain_spec.genesis_hash(), + receipts_root: b256!( + "d3a6acf9a244d78b33831df95d472c4128ea85bf079a1d41e32ed0b7d2244c9e" + ), + difficulty: chain_spec.fork(EthereumHardfork::Paris).ttd().expect("Paris TTD"), + number: 1, + gas_limit: 21000, + gas_used: 21000, + ..Default::default() + }, + body: vec![sign_tx_with_key_pair( + key_pair, + Transaction::Eip2930(TxEip2930 { + chain_id: chain_spec.chain.id(), + nonce: 0, + gas_limit: 21000, + gas_price: 1_500_000_000, + to: TxKind::Call(Address::ZERO), + value: U256::from(0.1 * ETH_TO_WEI as f64), + ..Default::default() + }), + )], + ..Default::default() + } + .with_recovered_senders() + .ok_or_eyre("failed to recover senders")?; + + // Second block resends the same transaction with increased nonce + let block2 = Block { + header: Header { + parent_hash: block1.header.hash_slow(), + receipts_root: b256!( + "d3a6acf9a244d78b33831df95d472c4128ea85bf079a1d41e32ed0b7d2244c9e" + ), + difficulty: chain_spec.fork(EthereumHardfork::Paris).ttd().expect("Paris TTD"), + number: 2, + gas_limit: 21000, + gas_used: 21000, + ..Default::default() + }, + body: vec![sign_tx_with_key_pair( + key_pair, + Transaction::Eip2930(TxEip2930 { + chain_id: chain_spec.chain.id(), + nonce: 1, + gas_limit: 21000, + gas_price: 1_500_000_000, + to: TxKind::Call(Address::ZERO), + value: U256::from(0.1 * ETH_TO_WEI as f64), + ..Default::default() + }), + )], + ..Default::default() + } + .with_recovered_senders() + .ok_or_eyre("failed to recover senders")?; + + let block_output1 = + execute_block_and_commit_to_database(&provider_factory, chain_spec.clone(), &block1)?; + let block_output2 = + execute_block_and_commit_to_database(&provider_factory, chain_spec, &block2)?; + + let block1 = block1.seal_slow(); + let block2 = block2.seal_slow(); + + Ok(vec![(block1, block_output1), (block2, block_output2)]) +} diff --git a/crates/net/downloaders/src/receipt_file_client.rs b/crates/net/downloaders/src/receipt_file_client.rs index 2a18fbde866d..c32a8903e0a9 100644 --- a/crates/net/downloaders/src/receipt_file_client.rs +++ b/crates/net/downloaders/src/receipt_file_client.rs @@ -232,7 +232,7 @@ mod test { use tokio_util::codec::Decoder; #[derive(Debug, PartialEq, Eq, RlpDecodable)] - pub struct MockReceipt { + struct MockReceipt { tx_type: u8, status: u64, cumulative_gas_used: u64, diff --git a/crates/net/eth-wire-types/Cargo.toml b/crates/net/eth-wire-types/Cargo.toml index 671883dae68e..a2df9896541d 100644 --- a/crates/net/eth-wire-types/Cargo.toml +++ b/crates/net/eth-wire-types/Cargo.toml @@ -38,7 +38,6 @@ alloy-chains = { workspace = true, features = ["arbitrary"] } arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-arbitrary-interop.workspace = true -proptest-derive.workspace = true rand.workspace = true [features] diff --git a/crates/net/eth-wire/Cargo.toml b/crates/net/eth-wire/Cargo.toml index 2846c0f7cf02..355491783b61 100644 --- a/crates/net/eth-wire/Cargo.toml +++ b/crates/net/eth-wire/Cargo.toml @@ -56,7 +56,6 @@ secp256k1 = { workspace = true, features = [ arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-arbitrary-interop.workspace = true -proptest-derive.workspace = true async-stream.workspace = true serde.workspace = true diff --git a/crates/net/eth-wire/src/p2pstream.rs b/crates/net/eth-wire/src/p2pstream.rs index aa8770d058c6..466987768ead 100644 --- a/crates/net/eth-wire/src/p2pstream.rs +++ b/crates/net/eth-wire/src/p2pstream.rs @@ -613,19 +613,24 @@ where /// Returns `Poll::Ready(Ok(()))` when no buffered items remain. fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut this = self.project(); - loop { - match ready!(this.inner.as_mut().poll_flush(cx)) { - Err(err) => return Poll::Ready(Err(err.into())), - Ok(()) => { + let poll_res = loop { + match this.inner.as_mut().poll_ready(cx) { + Poll::Pending => break Poll::Pending, + Poll::Ready(Err(err)) => break Poll::Ready(Err(err.into())), + Poll::Ready(Ok(())) => { let Some(message) = this.outgoing_messages.pop_front() else { - return Poll::Ready(Ok(())) + break Poll::Ready(Ok(())) }; if let Err(err) = this.inner.as_mut().start_send(message) { - return Poll::Ready(Err(err.into())) + break Poll::Ready(Err(err.into())) } } } - } + }; + + ready!(this.inner.as_mut().poll_flush(cx))?; + + poll_res } fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { diff --git a/crates/net/network-api/src/lib.rs b/crates/net/network-api/src/lib.rs index 8efaec5f0fb7..1e23390fd1e2 100644 --- a/crates/net/network-api/src/lib.rs +++ b/crates/net/network-api/src/lib.rs @@ -18,7 +18,6 @@ pub use error::NetworkError; pub use reputation::{Reputation, ReputationChangeKind}; use reth_eth_wire::{capability::Capabilities, DisconnectReason, EthVersion, Status}; use reth_network_peers::NodeRecord; -use serde::{Deserialize, Serialize}; use std::{future::Future, net::SocketAddr, sync::Arc, time::Instant}; /// The `PeerId` type. @@ -246,7 +245,8 @@ impl std::fmt::Display for Direction { } /// The status of the network being ran by the local node. -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct NetworkStatus { /// The local node client version. pub client_version: String, diff --git a/crates/net/network/src/lib.rs b/crates/net/network/src/lib.rs index a22dbd532fb7..d37319723e28 100644 --- a/crates/net/network/src/lib.rs +++ b/crates/net/network/src/lib.rs @@ -149,12 +149,12 @@ pub use session::{ SessionManager, }; -pub use transactions::{FilterAnnouncement, MessageFilter, ValidateTx68}; - pub use flattened_response::FlattenedResponse; pub use manager::DiscoveredEvent; pub use metrics::TxTypesCounter; pub use reth_eth_wire::{DisconnectReason, HelloMessageWithProtocols}; +pub use reth_network_p2p::sync::{NetworkSyncUpdater, SyncState}; pub use reth_network_types::{PeersConfig, SessionsConfig}; pub use session::EthRlpxConnection; pub use swarm::NetworkConnectionState; +pub use transactions::{FilterAnnouncement, MessageFilter, ValidateTx68}; diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index 22db838c8a66..f0d9eef1beba 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -47,7 +47,7 @@ impl AnyNodeTypes { impl NodeTypes for AnyNodeTypes where P: NodePrimitives + Send + Sync + Unpin + 'static, - E: EngineTypes + Send + Sync + Unpin + 'static, + E: EngineTypes + Send + Sync + Unpin, { type Primitives = P; diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index 5fc3da56a7d2..bb77e419c29a 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -29,6 +29,7 @@ reth-rpc-builder.workspace = true reth-rpc-layer.workspace = true reth-node-api.workspace = true reth-node-core.workspace = true +reth-node-metrics.workspace = true reth-network.workspace = true reth-primitives.workspace = true reth-payload-builder.workspace = true diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index f3c8889ea348..9c146be0827a 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -29,7 +29,7 @@ use reth_node_core::{ rpc::eth::{helpers::AddDevSigners, FullEthApiServer}, }; use reth_primitives::revm_primitives::EnvKzgSettings; -use reth_provider::{providers::BlockchainProvider, ChainSpecProvider}; +use reth_provider::{providers::BlockchainProvider, ChainSpecProvider, FullProvider}; use reth_tasks::TaskExecutor; use reth_transaction_pool::{PoolConfig, TransactionPool}; use secp256k1::SecretKey; @@ -40,7 +40,7 @@ use crate::{ components::NodeComponentsBuilder, node::FullNode, rpc::{EthApiBuilderProvider, RethRpcServerHandles, RpcContext}, - DefaultNodeLauncher, Node, NodeHandle, + DefaultNodeLauncher, LaunchNode, Node, NodeHandle, }; /// The adapter type for a reth node with the builtin provider type @@ -207,6 +207,17 @@ where pub fn with_types(self) -> NodeBuilderWithTypes> where T: NodeTypes, + { + self.with_types_and_provider() + } + + /// Configures the types of the node and the provider type that will be used by the node. + pub fn with_types_and_provider( + self, + ) -> NodeBuilderWithTypes> + where + T: NodeTypes, + P: FullProvider, { NodeBuilderWithTypes::new(self.config, self.database) } @@ -258,6 +269,20 @@ where WithLaunchContext { builder: self.builder.with_types(), task_executor: self.task_executor } } + /// Configures the types of the node and the provider type that will be used by the node. + pub fn with_types_and_provider( + self, + ) -> WithLaunchContext>> + where + T: NodeTypes, + P: FullProvider, + { + WithLaunchContext { + builder: self.builder.with_types_and_provider(), + task_executor: self.task_executor, + } + } + /// Preconfigures the node with a specific node implementation. /// /// This is a convenience method that sets the node's types and components in one call. @@ -308,18 +333,14 @@ where } } -impl WithLaunchContext>> -where - DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, - T: NodeTypes, -{ +impl WithLaunchContext> { /// Advances the state of the node builder to the next state where all components are configured pub fn with_components( self, components_builder: CB, - ) -> WithLaunchContext, CB, ()>> + ) -> WithLaunchContext> where - CB: NodeComponentsBuilder>, + CB: NodeComponentsBuilder, { WithLaunchContext { builder: self.builder.with_components(components_builder), @@ -328,20 +349,16 @@ where } } -impl WithLaunchContext, CB, ()>> +impl WithLaunchContext> where - DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, - T: NodeTypes, - CB: NodeComponentsBuilder>, + T: FullNodeTypes, + CB: NodeComponentsBuilder, { /// Advances the state of the node builder to the next state where all customizable /// [`NodeAddOns`] types are configured. - pub fn with_add_ons( - self, - ) -> WithLaunchContext, CB, AO>> + pub fn with_add_ons(self) -> WithLaunchContext> where - CB: NodeComponentsBuilder>, - AO: NodeAddOns, CB::Components>>, + AO: NodeAddOns>, { WithLaunchContext { builder: self.builder.with_add_ons::(), @@ -350,20 +367,22 @@ where } } -impl WithLaunchContext, CB, AO>> +impl WithLaunchContext> where - DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, - T: NodeTypes, - CB: NodeComponentsBuilder>, - AO: NodeAddOns, CB::Components>>, + T: FullNodeTypes, + CB: NodeComponentsBuilder, + AO: NodeAddOns>, AO::EthApi: FullEthApiServer + AddDevSigners, { + /// Returns a reference to the node builder's config. + pub const fn config(&self) -> &NodeConfig { + &self.builder.config + } + /// Sets the hook that is run once the node's components are initialized. pub fn on_component_initialized(self, hook: F) -> Self where - F: FnOnce(NodeAdapter, CB::Components>) -> eyre::Result<()> - + Send - + 'static, + F: FnOnce(NodeAdapter) -> eyre::Result<()> + Send + 'static, { Self { builder: self.builder.on_component_initialized(hook), @@ -374,9 +393,7 @@ where /// Sets the hook that is run once the node has started. pub fn on_node_started(self, hook: F) -> Self where - F: FnOnce( - FullNode, CB::Components>, AO>, - ) -> eyre::Result<()> + F: FnOnce(FullNode, AO>) -> eyre::Result<()> + Send + 'static, { @@ -387,7 +404,7 @@ where pub fn on_rpc_started(self, hook: F) -> Self where F: FnOnce( - RpcContext<'_, NodeAdapter, CB::Components>, AO::EthApi>, + RpcContext<'_, NodeAdapter, AO::EthApi>, RethRpcServerHandles, ) -> eyre::Result<()> + Send @@ -399,9 +416,7 @@ where /// Sets the hook that is run to configure the rpc modules. pub fn extend_rpc_modules(self, hook: F) -> Self where - F: FnOnce( - RpcContext<'_, NodeAdapter, CB::Components>, AO::EthApi>, - ) -> eyre::Result<()> + F: FnOnce(RpcContext<'_, NodeAdapter, AO::EthApi>) -> eyre::Result<()> + Send + 'static, { @@ -415,9 +430,7 @@ where /// The `ExEx` ID must be unique. pub fn install_exex(self, exex_id: impl Into, exex: F) -> Self where - F: FnOnce(ExExContext, CB::Components>>) -> R - + Send - + 'static, + F: FnOnce(ExExContext>) -> R + Send + 'static, R: Future> + Send, E: Future> + Send, { @@ -427,6 +440,22 @@ where } } + /// Launches the node with the given launcher. + pub async fn launch_with(self, launcher: L) -> eyre::Result + where + L: LaunchNode>, + { + launcher.launch_node(self.builder).await + } + + /// Launches the node with the given closure. + pub fn launch_with_fn(self, launcher: L) -> R + where + L: FnOnce(Self) -> R, + { + launcher(self) + } + /// Check that the builder can be launched /// /// This is useful when writing tests to ensure that the builder is configured correctly. diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index b141768d5f49..bdad79c92dba 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -11,8 +11,7 @@ use rayon::ThreadPoolBuilder; use reth_auto_seal_consensus::MiningMode; use reth_beacon_consensus::EthBeaconConsensus; use reth_blockchain_tree::{ - noop::NoopBlockchainTree, BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, - TreeExternals, + BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, }; use reth_chainspec::{Chain, ChainSpec}; use reth_config::{config::EtlConfig, PruneConfig}; @@ -26,11 +25,21 @@ use reth_node_api::FullNodeTypes; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, node_config::NodeConfig, + version::{ + BUILD_PROFILE_NAME, CARGO_PKG_VERSION, VERGEN_BUILD_TIMESTAMP, VERGEN_CARGO_FEATURES, + VERGEN_CARGO_TARGET_TRIPLE, VERGEN_GIT_SHA, + }, +}; +use reth_node_metrics::{ + hooks::Hooks, + server::{MetricServer, MetricServerConfig}, + version::VersionInfo, }; use reth_primitives::{BlockNumber, Head, B256}; use reth_provider::{ - providers::{BlockchainProvider, StaticFileProvider}, - CanonStateNotificationSender, ProviderFactory, StaticFileProviderFactory, + providers::{BlockchainProvider, BlockchainProvider2, StaticFileProvider}, + CanonStateNotificationSender, FullProvider, ProviderFactory, StaticFileProviderFactory, + TreeViewer, }; use reth_prune::{PruneModes, PrunerBuilder}; use reth_rpc_builder::config::RethRpcServerConfig; @@ -45,6 +54,27 @@ use tokio::sync::{ oneshot, watch, }; +/// Allows to set a tree viewer for a configured blockchain provider. +// TODO: remove this helper trait once the engine revamp is done, the new +// blockchain provider won't require a TreeViewer. +// https://github.com/paradigmxyz/reth/issues/8742 +pub trait WithTree { + /// Setter for tree viewer. + fn set_tree(self, tree: Arc) -> Self; +} + +impl WithTree for BlockchainProvider { + fn set_tree(self, tree: Arc) -> Self { + self.with_tree(tree) + } +} + +impl WithTree for BlockchainProvider2 { + fn set_tree(self, _tree: Arc) -> Self { + self + } +} + /// Reusable setup for launching a node. /// /// This provides commonly used boilerplate for launching a node. @@ -371,8 +401,6 @@ where let has_receipt_pruning = self.toml_config().prune.as_ref().map_or(false, |a| a.has_receipts_pruning()); - info!(target: "reth::cli", "Verifying storage consistency."); - // Check for consistency between database and static files. If it fails, it unwinds to // the first block that's consistent between database and static files. if let Some(unwind_target) = factory @@ -454,23 +482,37 @@ where self.right().static_file_provider() } + /// This launches the prometheus endpoint. + /// /// Convenience function to [`Self::start_prometheus_endpoint`] - pub async fn with_prometheus(self) -> eyre::Result { + pub async fn with_prometheus_server(self) -> eyre::Result { self.start_prometheus_endpoint().await?; Ok(self) } /// Starts the prometheus endpoint. pub async fn start_prometheus_endpoint(&self) -> eyre::Result<()> { - let prometheus_handle = self.node_config().install_prometheus_recorder()?; - self.node_config() - .start_metrics_endpoint( - prometheus_handle, - self.database().clone(), - self.static_file_provider(), + let listen_addr = self.node_config().metrics; + if let Some(addr) = listen_addr { + info!(target: "reth::cli", "Starting metrics endpoint at {}", addr); + let config = MetricServerConfig::new( + addr, + VersionInfo { + version: CARGO_PKG_VERSION, + build_timestamp: VERGEN_BUILD_TIMESTAMP, + cargo_features: VERGEN_CARGO_FEATURES, + git_sha: VERGEN_GIT_SHA, + target_triple: VERGEN_CARGO_TARGET_TRIPLE, + build_profile: BUILD_PROFILE_NAME, + }, self.task_executor().clone(), - ) - .await + Hooks::new(self.database().clone(), self.static_file_provider()), + ); + + MetricServer::new(config).serve().await?; + } + + Ok(()) } /// Convenience function to [`Self::init_genesis`] @@ -486,7 +528,12 @@ where /// Creates a new `WithMeteredProvider` container and attaches it to the /// launch context. - pub fn with_metrics(self) -> LaunchContextWith>> { + /// + /// This spawns a metrics task that listens for metrics related events and updates metrics for + /// prometheus. + pub fn with_metrics_task( + self, + ) -> LaunchContextWith>> { let (metrics_sender, metrics_receiver) = unbounded_channel(); let with_metrics = @@ -518,24 +565,18 @@ where } /// Creates a `BlockchainProvider` and attaches it to the launch context. - pub fn with_blockchain_db( + pub fn with_blockchain_db( self, + create_blockchain_provider: F, + tree_config: BlockchainTreeConfig, + canon_state_notification_sender: CanonStateNotificationSender, ) -> eyre::Result>>> where - T: FullNodeTypes::DB>>, + T: FullNodeTypes, + T::Provider: FullProvider, + F: FnOnce(ProviderFactory) -> eyre::Result, { - let tree_config = BlockchainTreeConfig::default(); - - // NOTE: This is a temporary workaround to provide the canon state notification sender to the components builder because there's a cyclic dependency between the blockchain provider and the tree component. This will be removed once the Blockchain provider no longer depends on an instance of the tree: - let (canon_state_notification_sender, _receiver) = - tokio::sync::broadcast::channel(tree_config.max_reorg_depth() as usize * 2); - - let blockchain_db = BlockchainProvider::new( - self.provider_factory().clone(), - Arc::new(NoopBlockchainTree::with_canon_state_notifications( - canon_state_notification_sender.clone(), - )), - )?; + let blockchain_db = create_blockchain_provider(self.provider_factory().clone())?; let metered_providers = WithMeteredProviders { db_provider_container: WithMeteredProvider { @@ -561,7 +602,8 @@ where impl LaunchContextWith>> where DB: Database + DatabaseMetrics + Send + Sync + Clone + 'static, - T: FullNodeTypes>, + T: FullNodeTypes, + T::Provider: FullProvider + WithTree, { /// Returns access to the underlying database. pub fn database(&self) -> &DB { @@ -587,8 +629,8 @@ where self.right().db_provider_container.metrics_sender.clone() } - /// Returns a reference to the `BlockchainProvider`. - pub const fn blockchain_db(&self) -> &BlockchainProvider { + /// Returns a reference to the blockchain provider. + pub const fn blockchain_db(&self) -> &T::Provider { &self.right().blockchain_db } @@ -643,7 +685,7 @@ where let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); // Replace the tree component with the actual tree - let blockchain_db = self.blockchain_db().clone().with_tree(blockchain_tree); + let blockchain_db = self.blockchain_db().clone().set_tree(blockchain_tree); debug!(target: "reth::cli", "configured blockchain tree"); @@ -680,7 +722,8 @@ where impl LaunchContextWith>> where DB: Database + DatabaseMetrics + Send + Sync + Clone + 'static, - T: FullNodeTypes>, + T: FullNodeTypes, + T::Provider: FullProvider + WithTree, CB: NodeComponentsBuilder, { /// Returns the configured `ProviderFactory`. @@ -717,8 +760,8 @@ where &self.right().node_adapter } - /// Returns a reference to the `BlockchainProvider`. - pub const fn blockchain_db(&self) -> &BlockchainProvider { + /// Returns a reference to the blockchain provider. + pub const fn blockchain_db(&self) -> &T::Provider { &self.right().blockchain_db } @@ -814,9 +857,14 @@ pub struct WithMeteredProvider { /// Helper container to bundle the [`ProviderFactory`], [`BlockchainProvider`] /// and a metrics sender. #[allow(missing_debug_implementations)] -pub struct WithMeteredProviders { +pub struct WithMeteredProviders +where + DB: Database, + T: FullNodeTypes, + T::Provider: FullProvider, +{ db_provider_container: WithMeteredProvider, - blockchain_db: BlockchainProvider, + blockchain_db: T::Provider, canon_state_notification_sender: CanonStateNotificationSender, tree_config: BlockchainTreeConfig, // this field is used to store a reference to the FullNodeTypes so that we @@ -828,12 +876,14 @@ pub struct WithMeteredProviders { #[allow(missing_debug_implementations)] pub struct WithComponents where - T: FullNodeTypes>, + DB: Database, + T: FullNodeTypes, + T::Provider: FullProvider, CB: NodeComponentsBuilder, { db_provider_container: WithMeteredProvider, tree_config: BlockchainTreeConfig, - blockchain_db: BlockchainProvider, + blockchain_db: T::Provider, node_adapter: NodeAdapter, head: Head, consensus: Arc, diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index fb22e7908404..138403ed1012 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -6,13 +6,12 @@ mod exex; pub use common::LaunchContext; pub use exex::ExExLauncher; -use std::{future::Future, sync::Arc}; - use futures::{future::Either, stream, stream_select, StreamExt}; use reth_beacon_consensus::{ hooks::{EngineHooks, PruneHook, StaticFileHook}, BeaconConsensusEngine, }; +use reth_blockchain_tree::{noop::NoopBlockchainTree, BlockchainTreeConfig}; use reth_consensus_debug_client::{DebugConsensusClient, EtherscanBlockProvider, RpcBlockProvider}; use reth_engine_util::EngineMessageStreamExt; use reth_exex::ExExManagerHandle; @@ -32,6 +31,7 @@ use reth_rpc_types::engine::ClientVersionV1; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::TransactionPool; +use std::{future::Future, sync::Arc}; use tokio::sync::{mpsc::unbounded_channel, oneshot}; use tokio_stream::wrappers::UnboundedReceiverStream; @@ -119,6 +119,19 @@ where } = target; let NodeHooks { on_component_initialized, on_node_started, .. } = hooks; + // TODO: remove tree and move tree_config and canon_state_notification_sender + // initialization to with_blockchain_db once the engine revamp is done + // https://github.com/paradigmxyz/reth/issues/8742 + let tree_config = BlockchainTreeConfig::default(); + + // NOTE: This is a temporary workaround to provide the canon state notification sender to the components builder because there's a cyclic dependency between the blockchain provider and the tree component. This will be removed once the Blockchain provider no longer depends on an instance of the tree: + let (canon_state_notification_sender, _receiver) = + tokio::sync::broadcast::channel(tree_config.max_reorg_depth() as usize * 2); + + let tree = Arc::new(NoopBlockchainTree::with_canon_state_notifications( + canon_state_notification_sender.clone(), + )); + // setup the launch context let ctx = ctx .with_configured_globals() @@ -135,7 +148,7 @@ where .inspect(|_| { info!(target: "reth::cli", "Database opened"); }) - .with_prometheus().await? + .with_prometheus_server().await? .inspect(|this| { debug!(target: "reth::cli", chain=%this.chain_id(), genesis=?this.genesis_hash(), "Initializing genesis"); }) @@ -143,10 +156,12 @@ where .inspect(|this| { info!(target: "reth::cli", "\n{}", this.chain_spec().display_hardforks()); }) - .with_metrics() + .with_metrics_task() // passing FullNodeTypes as type parameter here so that we can build // later the components. - .with_blockchain_db::()? + .with_blockchain_db::(move |provider_factory| { + Ok(BlockchainProvider::new(provider_factory, tree)?) + }, tree_config, canon_state_notification_sender)? .with_components(components_builder, on_component_initialized).await?; // spawn exexs diff --git a/crates/node/core/Cargo.toml b/crates/node/core/Cargo.toml index 42673c13e0e4..b4317c6b1a95 100644 --- a/crates/node/core/Cargo.toml +++ b/crates/node/core/Cargo.toml @@ -35,7 +35,6 @@ reth-discv4.workspace = true reth-discv5.workspace = true reth-net-nat.workspace = true reth-network-peers.workspace = true -reth-tasks.workspace = true reth-consensus-common.workspace = true reth-prune-types.workspace = true reth-stages-types.workspace = true @@ -44,15 +43,6 @@ reth-stages-types.workspace = true alloy-genesis.workspace = true alloy-rpc-types-engine.workspace = true -# async -tokio.workspace = true - -# metrics -reth-metrics.workspace = true -metrics.workspace = true -metrics-exporter-prometheus.workspace = true -metrics-process.workspace = true -metrics-util.workspace = true # misc eyre.workspace = true @@ -61,17 +51,13 @@ humantime.workspace = true const_format.workspace = true rand.workspace = true derive_more.workspace = true -once_cell.workspace = true # io dirs-next = "2.0.0" shellexpand.workspace = true serde_json.workspace = true -# http/rpc -http.workspace = true -jsonrpsee.workspace = true -tower.workspace = true + # tracing tracing.workspace = true @@ -86,15 +72,11 @@ secp256k1 = { workspace = true, features = [ # async futures.workspace = true -[target.'cfg(unix)'.dependencies] -tikv-jemalloc-ctl = { version = "0.5.0", optional = true } - -[target.'cfg(target_os = "linux")'.dependencies] -procfs = "0.16.0" [dev-dependencies] # test vectors generation proptest.workspace = true +tokio.workspace = true [features] optimism = [ @@ -102,10 +84,9 @@ optimism = [ "reth-provider/optimism", "reth-rpc-types-compat/optimism", "reth-rpc-eth-api/optimism", - "reth-rpc-eth-types/optimism", ] -jemalloc = ["dep:tikv-jemalloc-ctl"] + [build-dependencies] vergen = { version = "8.0.0", features = ["build", "cargo", "git", "gitcl"] } diff --git a/crates/node/core/src/args/mod.rs b/crates/node/core/src/args/mod.rs index 7d1f61903ffb..5aa5c58633fc 100644 --- a/crates/node/core/src/args/mod.rs +++ b/crates/node/core/src/args/mod.rs @@ -22,7 +22,7 @@ pub use database::DatabaseArgs; /// LogArgs struct for configuring the logger mod log; -pub use log::{ColorMode, LogArgs}; +pub use log::{ColorMode, LogArgs, Verbosity}; /// `PayloadBuilderArgs` struct for configuring the payload builder mod payload_builder; diff --git a/crates/node/core/src/args/pruning.rs b/crates/node/core/src/args/pruning.rs index 1621f2d8ed8b..620ae3e8050a 100644 --- a/crates/node/core/src/args/pruning.rs +++ b/crates/node/core/src/args/pruning.rs @@ -21,15 +21,18 @@ impl PruningArgs { if !self.full { return None } + Some(PruneConfig { block_interval: 5, segments: PruneModes { sender_recovery: Some(PruneMode::Full), transaction_lookup: None, + // prune all receipts if chain doesn't have deposit contract specified in chain spec receipts: chain_spec .deposit_contract .as_ref() - .map(|contract| PruneMode::Before(contract.block)), + .map(|contract| PruneMode::Before(contract.block)) + .or(Some(PruneMode::Full)), account_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), storage_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), receipts_log_filter: ReceiptsLogPruneConfig( diff --git a/crates/node/core/src/lib.rs b/crates/node/core/src/lib.rs index 27a81cc26e7c..52286bea5091 100644 --- a/crates/node/core/src/lib.rs +++ b/crates/node/core/src/lib.rs @@ -12,14 +12,9 @@ pub mod args; pub mod cli; pub mod dirs; pub mod exit; -pub mod metrics; pub mod node_config; pub mod utils; pub mod version; - -// Re-export for backwards compatibility. -pub use metrics::prometheus_exporter; - /// Re-exported from `reth_primitives`. pub mod primitives { pub use reth_primitives::*; diff --git a/crates/node/core/src/metrics/mod.rs b/crates/node/core/src/metrics/mod.rs deleted file mode 100644 index 109c59c9f858..000000000000 --- a/crates/node/core/src/metrics/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -//! Metrics utilities for the node. - -pub mod prometheus_exporter; -pub mod version_metrics; diff --git a/crates/node/core/src/metrics/prometheus_exporter.rs b/crates/node/core/src/metrics/prometheus_exporter.rs deleted file mode 100644 index f19a0e15bc85..000000000000 --- a/crates/node/core/src/metrics/prometheus_exporter.rs +++ /dev/null @@ -1,317 +0,0 @@ -//! Prometheus exporter - -use crate::metrics::version_metrics::VersionInfo; -use eyre::WrapErr; -use http::Response; -use metrics::describe_gauge; -use metrics_exporter_prometheus::{PrometheusBuilder, PrometheusHandle}; -use metrics_util::layers::{PrefixLayer, Stack}; -use reth_db_api::database_metrics::DatabaseMetrics; -use reth_metrics::metrics::Unit; -use reth_provider::providers::StaticFileProvider; -use reth_tasks::TaskExecutor; -use std::{convert::Infallible, net::SocketAddr, sync::Arc}; - -pub(crate) trait Hook: Fn() + Send + Sync {} -impl Hook for T {} - -/// Installs Prometheus as the metrics recorder. -pub fn install_recorder() -> eyre::Result { - let recorder = PrometheusBuilder::new().build_recorder(); - let handle = recorder.handle(); - - // Build metrics stack - Stack::new(recorder) - .push(PrefixLayer::new("reth")) - .install() - .wrap_err("Couldn't set metrics recorder.")?; - - Ok(handle) -} - -/// Serves Prometheus metrics over HTTP with hooks. -/// -/// The hooks are called every time the metrics are requested at the given endpoint, and can be used -/// to record values for pull-style metrics, i.e. metrics that are not automatically updated. -pub(crate) async fn serve_with_hooks( - listen_addr: SocketAddr, - handle: PrometheusHandle, - hooks: impl IntoIterator, - task_executor: TaskExecutor, -) -> eyre::Result<()> { - let hooks: Vec<_> = hooks.into_iter().collect(); - - // Start endpoint - start_endpoint( - listen_addr, - handle, - Arc::new(move || hooks.iter().for_each(|hook| hook())), - task_executor, - ) - .await - .wrap_err("Could not start Prometheus endpoint")?; - - Ok(()) -} - -/// Starts an endpoint at the given address to serve Prometheus metrics. -async fn start_endpoint( - listen_addr: SocketAddr, - handle: PrometheusHandle, - hook: Arc, - task_executor: TaskExecutor, -) -> eyre::Result<()> { - let listener = - tokio::net::TcpListener::bind(listen_addr).await.wrap_err("Could not bind to address")?; - - task_executor.spawn_with_graceful_shutdown_signal(|mut signal| async move { - loop { - let io = tokio::select! { - _ = &mut signal => break, - io = listener.accept() => { - match io { - Ok((stream, _remote_addr)) => stream, - Err(err) => { - tracing::error!(%err, "failed to accept connection"); - continue; - } - } - } - }; - - let handle = handle.clone(); - let hook = hook.clone(); - let service = tower::service_fn(move |_| { - (hook)(); - let metrics = handle.render(); - async move { Ok::<_, Infallible>(Response::new(metrics)) } - }); - - let mut shutdown = signal.clone().ignore_guard(); - tokio::task::spawn(async move { - if let Err(error) = - jsonrpsee::server::serve_with_graceful_shutdown(io, service, &mut shutdown) - .await - { - tracing::debug!(%error, "failed to serve request") - } - }); - } - }); - - Ok(()) -} - -/// Serves Prometheus metrics over HTTP with database and process metrics. -pub async fn serve( - listen_addr: SocketAddr, - handle: PrometheusHandle, - db: Metrics, - static_file_provider: StaticFileProvider, - process: metrics_process::Collector, - task_executor: TaskExecutor, -) -> eyre::Result<()> -where - Metrics: DatabaseMetrics + 'static + Send + Sync, -{ - let db_metrics_hook = move || db.report_metrics(); - let static_file_metrics_hook = move || { - let _ = static_file_provider.report_metrics().map_err( - |error| tracing::error!(%error, "Failed to report static file provider metrics"), - ); - }; - - // Clone `process` to move it into the hook and use the original `process` for describe below. - let cloned_process = process.clone(); - let hooks: Vec>> = vec![ - Box::new(db_metrics_hook), - Box::new(static_file_metrics_hook), - Box::new(move || cloned_process.collect()), - Box::new(collect_memory_stats), - Box::new(collect_io_stats), - ]; - serve_with_hooks(listen_addr, handle, hooks, task_executor).await?; - - // We describe the metrics after the recorder is installed, otherwise this information is not - // registered - describe_gauge!("db.table_size", Unit::Bytes, "The size of a database table (in bytes)"); - describe_gauge!("db.table_pages", "The number of database pages for a table"); - describe_gauge!("db.table_entries", "The number of entries for a table"); - describe_gauge!("db.freelist", "The number of pages on the freelist"); - describe_gauge!("db.page_size", Unit::Bytes, "The size of a database page (in bytes)"); - describe_gauge!( - "db.timed_out_not_aborted_transactions", - "Number of timed out transactions that were not aborted by the user yet" - ); - - describe_gauge!("static_files.segment_size", Unit::Bytes, "The size of a static file segment"); - describe_gauge!("static_files.segment_files", "The number of files for a static file segment"); - describe_gauge!( - "static_files.segment_entries", - "The number of entries for a static file segment" - ); - - process.describe(); - describe_memory_stats(); - describe_io_stats(); - VersionInfo::default().register_version_metrics(); - - Ok(()) -} - -#[cfg(all(feature = "jemalloc", unix))] -fn collect_memory_stats() { - use metrics::gauge; - use tikv_jemalloc_ctl::{epoch, stats}; - use tracing::error; - - if epoch::advance().map_err(|error| error!(%error, "Failed to advance jemalloc epoch")).is_err() - { - return - } - - if let Ok(value) = stats::active::read() - .map_err(|error| error!(%error, "Failed to read jemalloc.stats.active")) - { - gauge!("jemalloc.active").set(value as f64); - } - - if let Ok(value) = stats::allocated::read() - .map_err(|error| error!(%error, "Failed to read jemalloc.stats.allocated")) - { - gauge!("jemalloc.allocated").set(value as f64); - } - - if let Ok(value) = stats::mapped::read() - .map_err(|error| error!(%error, "Failed to read jemalloc.stats.mapped")) - { - gauge!("jemalloc.mapped").set(value as f64); - } - - if let Ok(value) = stats::metadata::read() - .map_err(|error| error!(%error, "Failed to read jemalloc.stats.metadata")) - { - gauge!("jemalloc.metadata").set(value as f64); - } - - if let Ok(value) = stats::resident::read() - .map_err(|error| error!(%error, "Failed to read jemalloc.stats.resident")) - { - gauge!("jemalloc.resident").set(value as f64); - } - - if let Ok(value) = stats::retained::read() - .map_err(|error| error!(%error, "Failed to read jemalloc.stats.retained")) - { - gauge!("jemalloc.retained").set(value as f64); - } -} - -#[cfg(all(feature = "jemalloc", unix))] -fn describe_memory_stats() { - describe_gauge!( - "jemalloc.active", - Unit::Bytes, - "Total number of bytes in active pages allocated by the application" - ); - describe_gauge!( - "jemalloc.allocated", - Unit::Bytes, - "Total number of bytes allocated by the application" - ); - describe_gauge!( - "jemalloc.mapped", - Unit::Bytes, - "Total number of bytes in active extents mapped by the allocator" - ); - describe_gauge!( - "jemalloc.metadata", - Unit::Bytes, - "Total number of bytes dedicated to jemalloc metadata" - ); - describe_gauge!( - "jemalloc.resident", - Unit::Bytes, - "Total number of bytes in physically resident data pages mapped by the allocator" - ); - describe_gauge!( - "jemalloc.retained", - Unit::Bytes, - "Total number of bytes in virtual memory mappings that were retained rather than \ - being returned to the operating system via e.g. munmap(2)" - ); -} - -#[cfg(not(all(feature = "jemalloc", unix)))] -fn collect_memory_stats() {} - -#[cfg(not(all(feature = "jemalloc", unix)))] -fn describe_memory_stats() {} - -#[cfg(target_os = "linux")] -fn collect_io_stats() { - use metrics::counter; - use tracing::error; - - let Ok(process) = procfs::process::Process::myself() - .map_err(|error| error!(%error, "Failed to get currently running process")) - else { - return - }; - - let Ok(io) = process.io().map_err( - |error| error!(%error, "Failed to get IO stats for the currently running process"), - ) else { - return - }; - - counter!("io.rchar").absolute(io.rchar); - counter!("io.wchar").absolute(io.wchar); - counter!("io.syscr").absolute(io.syscr); - counter!("io.syscw").absolute(io.syscw); - counter!("io.read_bytes").absolute(io.read_bytes); - counter!("io.write_bytes").absolute(io.write_bytes); - counter!("io.cancelled_write_bytes").absolute(io.cancelled_write_bytes); -} - -#[cfg(target_os = "linux")] -fn describe_io_stats() { - use metrics::describe_counter; - - describe_counter!("io.rchar", "Characters read"); - describe_counter!("io.wchar", "Characters written"); - describe_counter!("io.syscr", "Read syscalls"); - describe_counter!("io.syscw", "Write syscalls"); - describe_counter!("io.read_bytes", Unit::Bytes, "Bytes read"); - describe_counter!("io.write_bytes", Unit::Bytes, "Bytes written"); - describe_counter!("io.cancelled_write_bytes", Unit::Bytes, "Cancelled write bytes"); -} - -#[cfg(not(target_os = "linux"))] -const fn collect_io_stats() {} - -#[cfg(not(target_os = "linux"))] -const fn describe_io_stats() {} - -#[cfg(test)] -mod tests { - use crate::node_config::PROMETHEUS_RECORDER_HANDLE; - - // Dependencies using different version of the `metrics` crate (to be exact, 0.21 vs 0.22) - // may not be able to communicate with each other through the global recorder. - // - // This test ensures that `metrics-process` dependency plays well with the current - // `metrics-exporter-prometheus` dependency version. - #[test] - fn process_metrics() { - // initialize the lazy handle - let _ = &*PROMETHEUS_RECORDER_HANDLE; - - let process = metrics_process::Collector::default(); - process.describe(); - process.collect(); - - let metrics = PROMETHEUS_RECORDER_HANDLE.render(); - assert!(metrics.contains("process_cpu_seconds_total"), "{metrics:?}"); - } -} diff --git a/crates/node/core/src/node_config.rs b/crates/node/core/src/node_config.rs index 1f5bea21beb8..82ed8b660623 100644 --- a/crates/node/core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -6,39 +6,27 @@ use crate::{ PruningArgs, RpcServerArgs, TxPoolArgs, }, dirs::{ChainPath, DataDirPath}, - metrics::prometheus_exporter, utils::get_single_header, }; -use metrics_exporter_prometheus::PrometheusHandle; -use once_cell::sync::Lazy; use reth_chainspec::{ChainSpec, MAINNET}; use reth_config::config::PruneConfig; -use reth_db_api::{database::Database, database_metrics::DatabaseMetrics}; +use reth_db_api::database::Database; use reth_network_p2p::headers::client::HeadersClient; + use reth_primitives::{ revm_primitives::EnvKzgSettings, BlockHashOrNumber, BlockNumber, Head, SealedHeader, B256, }; -use reth_provider::{ - providers::StaticFileProvider, BlockHashReader, HeaderProvider, ProviderFactory, - StageCheckpointReader, -}; +use reth_provider::{BlockHashReader, HeaderProvider, ProviderFactory, StageCheckpointReader}; use reth_stages_types::StageId; use reth_storage_errors::provider::ProviderResult; -use reth_tasks::TaskExecutor; use std::{net::SocketAddr, path::PathBuf, sync::Arc}; use tracing::*; -/// The default prometheus recorder handle. We use a global static to ensure that it is only -/// installed once. -pub static PROMETHEUS_RECORDER_HANDLE: Lazy = - Lazy::new(|| prometheus_exporter::install_recorder().unwrap()); - /// This includes all necessary configuration to launch the node. /// The individual configuration options can be overwritten before launching the node. /// /// # Example /// ```rust -/// # use reth_tasks::{TaskManager, TaskSpawner}; /// # use reth_node_core::{ /// # node_config::NodeConfig, /// # args::RpcServerArgs, @@ -47,10 +35,6 @@ pub static PROMETHEUS_RECORDER_HANDLE: Lazy = /// # use tokio::runtime::Handle; /// /// async fn t() { -/// let handle = Handle::current(); -/// let manager = TaskManager::new(handle); -/// let executor = manager.executor(); -/// /// // create the builder /// let builder = NodeConfig::default(); /// @@ -66,7 +50,6 @@ pub static PROMETHEUS_RECORDER_HANDLE: Lazy = /// /// # Example /// ```rust -/// # use reth_tasks::{TaskManager, TaskSpawner}; /// # use reth_node_core::{ /// # node_config::NodeConfig, /// # args::RpcServerArgs, @@ -75,10 +58,6 @@ pub static PROMETHEUS_RECORDER_HANDLE: Lazy = /// # use tokio::runtime::Handle; /// /// async fn t() { -/// let handle = Handle::current(); -/// let manager = TaskManager::new(handle); -/// let executor = manager.executor(); -/// /// // create the builder with a test database, using the `test` method /// let builder = NodeConfig::test(); /// @@ -284,38 +263,6 @@ impl NodeConfig { Ok(EnvKzgSettings::Default) } - /// Installs the prometheus recorder. - pub fn install_prometheus_recorder(&self) -> eyre::Result { - Ok(PROMETHEUS_RECORDER_HANDLE.clone()) - } - - /// Serves the prometheus endpoint over HTTP with the given database and prometheus handle. - pub async fn start_metrics_endpoint( - &self, - prometheus_handle: PrometheusHandle, - db: Metrics, - static_file_provider: StaticFileProvider, - task_executor: TaskExecutor, - ) -> eyre::Result<()> - where - Metrics: DatabaseMetrics + 'static + Send + Sync, - { - if let Some(listen_addr) = self.metrics { - info!(target: "reth::cli", addr = %listen_addr, "Starting metrics endpoint"); - prometheus_exporter::serve( - listen_addr, - prometheus_handle, - db, - static_file_provider, - metrics_process::Collector::default(), - task_executor, - ) - .await?; - } - - Ok(()) - } - /// Fetches the head block from the database. /// /// If the database is empty, returns the genesis block. diff --git a/crates/node/core/src/version.rs b/crates/node/core/src/version.rs index adc922787189..78dbcfbcf5e9 100644 --- a/crates/node/core/src/version.rs +++ b/crates/node/core/src/version.rs @@ -20,6 +20,12 @@ pub const VERGEN_GIT_SHA: &str = const_format::str_index!(VERGEN_GIT_SHA_LONG, . /// The build timestamp. pub const VERGEN_BUILD_TIMESTAMP: &str = env!("VERGEN_BUILD_TIMESTAMP"); +/// The target triple. +pub const VERGEN_CARGO_TARGET_TRIPLE: &str = env!("VERGEN_CARGO_TARGET_TRIPLE"); + +/// The build features. +pub const VERGEN_CARGO_FEATURES: &str = env!("VERGEN_CARGO_FEATURES"); + /// The short version information for reth. /// /// - The latest version from Cargo.toml @@ -73,7 +79,8 @@ pub const LONG_VERSION: &str = const_format::concatcp!( BUILD_PROFILE_NAME ); -pub(crate) const BUILD_PROFILE_NAME: &str = { +/// The build profile name. +pub const BUILD_PROFILE_NAME: &str = { // Derived from https://stackoverflow.com/questions/73595435/how-to-get-profile-from-cargo-toml-in-build-rs-or-at-runtime // We split on the path separator of the *host* machine, which may be different from // `std::path::MAIN_SEPARATOR_STR`. diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index bc0cfb1373a9..3fe989ab0b54 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -74,28 +74,30 @@ impl NodeState { self.peers_info.as_ref().map(|info| info.num_connected_peers()).unwrap_or_default() } + fn build_current_stage( + &self, + stage_id: StageId, + checkpoint: StageCheckpoint, + target: Option, + ) -> CurrentStage { + let (eta, entities_checkpoint) = self + .current_stage + .as_ref() + .filter(|current_stage| current_stage.stage_id == stage_id) + .map_or_else( + || (Eta::default(), None), + |current_stage| (current_stage.eta, current_stage.entities_checkpoint), + ); + + CurrentStage { stage_id, eta, checkpoint, entities_checkpoint, target } + } + /// Processes an event emitted by the pipeline fn handle_pipeline_event(&mut self, event: PipelineEvent) { match event { PipelineEvent::Prepare { pipeline_stages_progress, stage_id, checkpoint, target } => { let checkpoint = checkpoint.unwrap_or_default(); - let current_stage = CurrentStage { - stage_id, - eta: match &self.current_stage { - Some(current_stage) if current_stage.stage_id == stage_id => { - current_stage.eta - } - _ => Eta::default(), - }, - checkpoint, - entities_checkpoint: match &self.current_stage { - Some(current_stage) if current_stage.stage_id == stage_id => { - current_stage.entities_checkpoint - } - _ => None, - }, - target, - }; + let current_stage = self.build_current_stage(stage_id, checkpoint, target); info!( pipeline_stages = %pipeline_stages_progress, @@ -109,23 +111,7 @@ impl NodeState { } PipelineEvent::Run { pipeline_stages_progress, stage_id, checkpoint, target } => { let checkpoint = checkpoint.unwrap_or_default(); - let current_stage = CurrentStage { - stage_id, - eta: match &self.current_stage { - Some(current_stage) if current_stage.stage_id == stage_id => { - current_stage.eta - } - _ => Eta::default(), - }, - checkpoint, - entities_checkpoint: match &self.current_stage { - Some(current_stage) if current_stage.stage_id == stage_id => { - current_stage.entities_checkpoint - } - _ => None, - }, - target, - }; + let current_stage = self.build_current_stage(stage_id, checkpoint, target); if let Some(stage_eta) = current_stage.eta.fmt_for_stage(stage_id) { info!( diff --git a/crates/node/metrics/Cargo.toml b/crates/node/metrics/Cargo.toml new file mode 100644 index 000000000000..aaa2cb3fcd86 --- /dev/null +++ b/crates/node/metrics/Cargo.toml @@ -0,0 +1,52 @@ +[package] +name = "reth-node-metrics" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[dependencies] +reth-db-api.workspace = true +reth-provider.workspace = true +reth-metrics.workspace = true +reth-tasks.workspace = true + +metrics.workspace = true +metrics-exporter-prometheus.workspace = true +metrics-process.workspace = true +metrics-util.workspace = true + +tokio.workspace = true + +once_cell.workspace = true + +jsonrpsee = { workspace = true, features = ["server"] } +http.workspace = true +tower.workspace = true + +tracing.workspace = true +eyre.workspace = true + +[target.'cfg(unix)'.dependencies] +tikv-jemalloc-ctl = { version = "0.5.0", optional = true } + +[target.'cfg(target_os = "linux")'.dependencies] +procfs = "0.16.0" + +[dev-dependencies] +reth-db = { workspace = true, features = ["test-utils"] } +reqwest.workspace = true +reth-chainspec.workspace = true +socket2 = { version = "0.4", default-features = false } + +[lints] +workspace = true + +[features] +jemalloc = ["dep:tikv-jemalloc-ctl"] + + +[build-dependencies] +vergen = { version = "8.0.0", features = ["build", "cargo", "git", "gitcl"] } diff --git a/crates/node/metrics/src/hooks.rs b/crates/node/metrics/src/hooks.rs new file mode 100644 index 000000000000..18755717667c --- /dev/null +++ b/crates/node/metrics/src/hooks.rs @@ -0,0 +1,126 @@ +use metrics_process::Collector; +use reth_db_api::database_metrics::DatabaseMetrics; +use reth_provider::providers::StaticFileProvider; +use std::{fmt, sync::Arc}; +pub(crate) trait Hook: Fn() + Send + Sync {} +impl Hook for T {} + +impl fmt::Debug for Hooks { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let hooks_len = self.inner.len(); + f.debug_struct("Hooks") + .field("inner", &format!("Arc>>, len: {}", hooks_len)) + .finish() + } +} + +/// Helper type for managing hooks +#[derive(Clone)] +pub struct Hooks { + inner: Arc>>>, +} + +impl Hooks { + /// Create a new set of hooks + pub fn new( + db: Metrics, + static_file_provider: StaticFileProvider, + ) -> Self { + let hooks: Vec>> = vec![ + Box::new(move || db.report_metrics()), + Box::new(move || { + let _ = static_file_provider.report_metrics().map_err( + |error| tracing::error!(%error, "Failed to report static file provider metrics"), + ); + }), + Box::new(move || Collector::default().collect()), + Box::new(collect_memory_stats), + Box::new(collect_io_stats), + ]; + Self { inner: Arc::new(hooks) } + } + + pub(crate) fn iter(&self) -> impl Iterator>> { + self.inner.iter() + } +} + +#[cfg(all(feature = "jemalloc", unix))] +fn collect_memory_stats() { + use metrics::gauge; + use tikv_jemalloc_ctl::{epoch, stats}; + use tracing::error; + + if epoch::advance().map_err(|error| error!(%error, "Failed to advance jemalloc epoch")).is_err() + { + return + } + + if let Ok(value) = stats::active::read() + .map_err(|error| error!(%error, "Failed to read jemalloc.stats.active")) + { + gauge!("jemalloc.active").set(value as f64); + } + + if let Ok(value) = stats::allocated::read() + .map_err(|error| error!(%error, "Failed to read jemalloc.stats.allocated")) + { + gauge!("jemalloc.allocated").set(value as f64); + } + + if let Ok(value) = stats::mapped::read() + .map_err(|error| error!(%error, "Failed to read jemalloc.stats.mapped")) + { + gauge!("jemalloc.mapped").set(value as f64); + } + + if let Ok(value) = stats::metadata::read() + .map_err(|error| error!(%error, "Failed to read jemalloc.stats.metadata")) + { + gauge!("jemalloc.metadata").set(value as f64); + } + + if let Ok(value) = stats::resident::read() + .map_err(|error| error!(%error, "Failed to read jemalloc.stats.resident")) + { + gauge!("jemalloc.resident").set(value as f64); + } + + if let Ok(value) = stats::retained::read() + .map_err(|error| error!(%error, "Failed to read jemalloc.stats.retained")) + { + gauge!("jemalloc.retained").set(value as f64); + } +} + +#[cfg(not(all(feature = "jemalloc", unix)))] +const fn collect_memory_stats() {} + +#[cfg(target_os = "linux")] +fn collect_io_stats() { + use metrics::counter; + use tracing::error; + + let Ok(process) = procfs::process::Process::myself() + .map_err(|error| error!(%error, "Failed to get currently running process")) + else { + return + }; + + let Ok(io) = process.io().map_err( + |error| error!(%error, "Failed to get IO stats for the currently running process"), + ) else { + return + }; + + counter!("io.rchar").absolute(io.rchar); + counter!("io.wchar").absolute(io.wchar); + counter!("io.syscr").absolute(io.syscr); + counter!("io.syscw").absolute(io.syscw); + counter!("io.read_bytes").absolute(io.read_bytes); + counter!("io.write_bytes").absolute(io.write_bytes); + counter!("io.cancelled_write_bytes").absolute(io.cancelled_write_bytes); +} + +#[cfg(not(target_os = "linux"))] +const fn collect_io_stats() {} diff --git a/crates/node/metrics/src/lib.rs b/crates/node/metrics/src/lib.rs new file mode 100644 index 000000000000..4abc39a32dc3 --- /dev/null +++ b/crates/node/metrics/src/lib.rs @@ -0,0 +1,18 @@ +//! Metrics utilities for the node. +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +/// The metrics hooks for prometheus. +pub mod hooks; +pub mod recorder; +/// The metric server serving the metrics. +pub mod server; +pub mod version; + +pub use metrics_exporter_prometheus::*; +pub use metrics_process::*; diff --git a/crates/node/metrics/src/recorder.rs b/crates/node/metrics/src/recorder.rs new file mode 100644 index 000000000000..05047992faae --- /dev/null +++ b/crates/node/metrics/src/recorder.rs @@ -0,0 +1,58 @@ +//! Prometheus recorder + +use eyre::WrapErr; +use metrics_exporter_prometheus::{PrometheusBuilder, PrometheusHandle}; +use metrics_util::layers::{PrefixLayer, Stack}; +use once_cell::sync::Lazy; + +/// Installs the Prometheus recorder as the global recorder. +pub fn install_prometheus_recorder() -> &'static PrometheusHandle { + &PROMETHEUS_RECORDER_HANDLE +} + +/// The default Prometheus recorder handle. We use a global static to ensure that it is only +/// installed once. +static PROMETHEUS_RECORDER_HANDLE: Lazy = + Lazy::new(|| PrometheusRecorder::install().unwrap()); + +/// Prometheus recorder installer +#[derive(Debug)] +pub struct PrometheusRecorder; + +impl PrometheusRecorder { + /// Installs Prometheus as the metrics recorder. + pub fn install() -> eyre::Result { + let recorder = PrometheusBuilder::new().build_recorder(); + let handle = recorder.handle(); + + // Build metrics stack + Stack::new(recorder) + .push(PrefixLayer::new("reth")) + .install() + .wrap_err("Couldn't set metrics recorder.")?; + + Ok(handle) + } +} + +#[cfg(test)] +mod tests { + use super::*; + // Dependencies using different version of the `metrics` crate (to be exact, 0.21 vs 0.22) + // may not be able to communicate with each other through the global recorder. + // + // This test ensures that `metrics-process` dependency plays well with the current + // `metrics-exporter-prometheus` dependency version. + #[test] + fn process_metrics() { + // initialize the lazy handle + let _ = &*PROMETHEUS_RECORDER_HANDLE; + + let process = metrics_process::Collector::default(); + process.describe(); + process.collect(); + + let metrics = PROMETHEUS_RECORDER_HANDLE.render(); + assert!(metrics.contains("process_cpu_seconds_total"), "{metrics:?}"); + } +} diff --git a/crates/node/metrics/src/server.rs b/crates/node/metrics/src/server.rs new file mode 100644 index 000000000000..5eadaaab527a --- /dev/null +++ b/crates/node/metrics/src/server.rs @@ -0,0 +1,270 @@ +use crate::{ + hooks::{Hook, Hooks}, + recorder::install_prometheus_recorder, + version::VersionInfo, +}; +use eyre::WrapErr; +use http::{header::CONTENT_TYPE, HeaderValue, Response}; +use metrics::describe_gauge; +use metrics_process::Collector; +use reth_metrics::metrics::Unit; +use reth_tasks::TaskExecutor; +use std::{convert::Infallible, net::SocketAddr, sync::Arc}; +use tracing::info; + +/// Configuration for the [`MetricServer`] +#[derive(Debug)] +pub struct MetricServerConfig { + listen_addr: SocketAddr, + version_info: VersionInfo, + task_executor: TaskExecutor, + hooks: Hooks, +} + +impl MetricServerConfig { + /// Create a new [`MetricServerConfig`] with the given configuration + pub const fn new( + listen_addr: SocketAddr, + version_info: VersionInfo, + task_executor: TaskExecutor, + hooks: Hooks, + ) -> Self { + Self { listen_addr, hooks, task_executor, version_info } + } +} + +/// [`MetricServer`] responsible for serving the metrics endpoint +#[derive(Debug)] +pub struct MetricServer { + config: MetricServerConfig, +} + +impl MetricServer { + /// Create a new [`MetricServer`] with the given configuration + pub const fn new(config: MetricServerConfig) -> Self { + Self { config } + } + + /// Spawns the metrics server + pub async fn serve(&self) -> eyre::Result<()> { + let MetricServerConfig { listen_addr, hooks, task_executor, version_info } = &self.config; + + info!(target: "reth::cli", addr = %listen_addr, "Starting metrics endpoint"); + + let hooks = hooks.clone(); + self.start_endpoint( + *listen_addr, + Arc::new(move || hooks.iter().for_each(|hook| hook())), + task_executor.clone(), + ) + .await + .wrap_err("Could not start Prometheus endpoint")?; + + // Describe metrics after recorder installation + describe_db_metrics(); + describe_static_file_metrics(); + Collector::default().describe(); + describe_memory_stats(); + describe_io_stats(); + + version_info.register_version_metrics(); + + Ok(()) + } + + async fn start_endpoint( + &self, + listen_addr: SocketAddr, + hook: Arc, + task_executor: TaskExecutor, + ) -> eyre::Result<()> { + let listener = tokio::net::TcpListener::bind(listen_addr) + .await + .wrap_err("Could not bind to address")?; + + task_executor.spawn_with_graceful_shutdown_signal(|mut signal| async move { + loop { + let io = tokio::select! { + _ = &mut signal => break, + io = listener.accept() => { + match io { + Ok((stream, _remote_addr)) => stream, + Err(err) => { + tracing::error!(%err, "failed to accept connection"); + continue; + } + } + } + }; + + let handle = install_prometheus_recorder(); + let hook = hook.clone(); + let service = tower::service_fn(move |_| { + (hook)(); + let metrics = handle.render(); + let mut response = Response::new(metrics); + response + .headers_mut() + .insert(CONTENT_TYPE, HeaderValue::from_static("text/plain")); + async move { Ok::<_, Infallible>(response) } + }); + + let mut shutdown = signal.clone().ignore_guard(); + tokio::task::spawn(async move { + if let Err(error) = + jsonrpsee::server::serve_with_graceful_shutdown(io, service, &mut shutdown) + .await + { + tracing::debug!(%error, "failed to serve request") + } + }); + } + }); + + Ok(()) + } +} + +fn describe_db_metrics() { + describe_gauge!("db.table_size", Unit::Bytes, "The size of a database table (in bytes)"); + describe_gauge!("db.table_pages", "The number of database pages for a table"); + describe_gauge!("db.table_entries", "The number of entries for a table"); + describe_gauge!("db.freelist", "The number of pages on the freelist"); + describe_gauge!("db.page_size", Unit::Bytes, "The size of a database page (in bytes)"); + describe_gauge!( + "db.timed_out_not_aborted_transactions", + "Number of timed out transactions that were not aborted by the user yet" + ); +} + +fn describe_static_file_metrics() { + describe_gauge!("static_files.segment_size", Unit::Bytes, "The size of a static file segment"); + describe_gauge!("static_files.segment_files", "The number of files for a static file segment"); + describe_gauge!( + "static_files.segment_entries", + "The number of entries for a static file segment" + ); +} + +#[cfg(all(feature = "jemalloc", unix))] +fn describe_memory_stats() { + describe_gauge!( + "jemalloc.active", + Unit::Bytes, + "Total number of bytes in active pages allocated by the application" + ); + describe_gauge!( + "jemalloc.allocated", + Unit::Bytes, + "Total number of bytes allocated by the application" + ); + describe_gauge!( + "jemalloc.mapped", + Unit::Bytes, + "Total number of bytes in active extents mapped by the allocator" + ); + describe_gauge!( + "jemalloc.metadata", + Unit::Bytes, + "Total number of bytes dedicated to jemalloc metadata" + ); + describe_gauge!( + "jemalloc.resident", + Unit::Bytes, + "Total number of bytes in physically resident data pages mapped by the allocator" + ); + describe_gauge!( + "jemalloc.retained", + Unit::Bytes, + "Total number of bytes in virtual memory mappings that were retained rather than \ + being returned to the operating system via e.g. munmap(2)" + ); +} + +#[cfg(not(all(feature = "jemalloc", unix)))] +const fn describe_memory_stats() {} + +#[cfg(target_os = "linux")] +fn describe_io_stats() { + use metrics::describe_counter; + + describe_counter!("io.rchar", "Characters read"); + describe_counter!("io.wchar", "Characters written"); + describe_counter!("io.syscr", "Read syscalls"); + describe_counter!("io.syscw", "Write syscalls"); + describe_counter!("io.read_bytes", Unit::Bytes, "Bytes read"); + describe_counter!("io.write_bytes", Unit::Bytes, "Bytes written"); + describe_counter!("io.cancelled_write_bytes", Unit::Bytes, "Cancelled write bytes"); +} + +#[cfg(not(target_os = "linux"))] +const fn describe_io_stats() {} + +#[cfg(test)] +mod tests { + use super::*; + use reqwest::Client; + use reth_chainspec::MAINNET; + use reth_db::{ + test_utils::{create_test_rw_db, create_test_static_files_dir, TempDatabase}, + DatabaseEnv, + }; + use reth_provider::{ + providers::StaticFileProvider, ProviderFactory, StaticFileProviderFactory, + }; + use reth_tasks::TaskManager; + use socket2::{Domain, Socket, Type}; + use std::net::{SocketAddr, TcpListener}; + + fn create_test_db() -> ProviderFactory>> { + let (_, static_dir_path) = create_test_static_files_dir(); + ProviderFactory::new( + create_test_rw_db(), + MAINNET.clone(), + StaticFileProvider::read_write(static_dir_path).unwrap(), + ) + } + + fn get_random_available_addr() -> SocketAddr { + let addr = &"127.0.0.1:0".parse::().unwrap().into(); + let socket = Socket::new(Domain::IPV4, Type::STREAM, None).unwrap(); + socket.set_reuse_address(true).unwrap(); + socket.bind(addr).unwrap(); + socket.listen(1).unwrap(); + let listener = TcpListener::from(socket); + listener.local_addr().unwrap() + } + + #[tokio::test] + async fn test_metrics_endpoint() { + let version_info = VersionInfo { + version: "test", + build_timestamp: "test", + cargo_features: "test", + git_sha: "test", + target_triple: "test", + build_profile: "test", + }; + + let tasks = TaskManager::current(); + let executor = tasks.executor(); + + let factory = create_test_db(); + let hooks = Hooks::new(factory.db_ref().clone(), factory.static_file_provider()); + + let listen_addr = get_random_available_addr(); + let config = MetricServerConfig::new(listen_addr, version_info, executor, hooks); + + MetricServer::new(config).serve().await.unwrap(); + + // Send request to the metrics endpoint + let url = format!("http://{}", listen_addr); + let response = Client::new().get(&url).send().await.unwrap(); + assert!(response.status().is_success()); + + // Check the response body + let body = response.text().await.unwrap(); + assert!(body.contains("reth_db_table_size")); + assert!(body.contains("reth_jemalloc_metadata")); + } +} diff --git a/crates/node/core/src/metrics/version_metrics.rs b/crates/node/metrics/src/version.rs similarity index 71% rename from crates/node/core/src/metrics/version_metrics.rs rename to crates/node/metrics/src/version.rs index 03769d990f35..6cd8df4320c0 100644 --- a/crates/node/core/src/metrics/version_metrics.rs +++ b/crates/node/metrics/src/version.rs @@ -1,6 +1,4 @@ //! This exposes reth's version information over prometheus. - -use crate::version::{BUILD_PROFILE_NAME, VERGEN_GIT_SHA}; use metrics::gauge; /// Contains version information for the application. @@ -20,19 +18,6 @@ pub struct VersionInfo { pub build_profile: &'static str, } -impl Default for VersionInfo { - fn default() -> Self { - Self { - version: env!("CARGO_PKG_VERSION"), - build_timestamp: env!("VERGEN_BUILD_TIMESTAMP"), - cargo_features: env!("VERGEN_CARGO_FEATURES"), - git_sha: VERGEN_GIT_SHA, - target_triple: env!("VERGEN_CARGO_TARGET_TRIPLE"), - build_profile: BUILD_PROFILE_NAME, - } - } -} - impl VersionInfo { /// This exposes reth's version information over prometheus. pub fn register_version_metrics(&self) { diff --git a/crates/node/metrics/src/version_metrics.rs b/crates/node/metrics/src/version_metrics.rs new file mode 100644 index 000000000000..63b5009fa088 --- /dev/null +++ b/crates/node/metrics/src/version_metrics.rs @@ -0,0 +1,75 @@ +//! This exposes reth's version information over prometheus. +use metrics::gauge; + +/// The build timestamp. +pub const VERGEN_BUILD_TIMESTAMP: &str = env!("VERGEN_BUILD_TIMESTAMP"); +/// The cargo features enabled for the build. +pub const VERGEN_CARGO_FEATURES: &str = env!("VERGEN_CARGO_FEATURES"); +/// The target triple for the build. +pub const VERGEN_CARGO_TARGET_TRIPLE: &str = env!("VERGEN_CARGO_TARGET_TRIPLE"); +/// The full SHA of the latest commit. +pub const VERGEN_GIT_SHA_LONG: &str = env!("VERGEN_GIT_SHA"); +/// The 8 character short SHA of the latest commit. +pub const VERGEN_GIT_SHA: &str = const_format::str_index!(VERGEN_GIT_SHA_LONG, ..8); + +/// The build profile name. +pub const BUILD_PROFILE_NAME: &str = { + // Derived from https://stackoverflow.com/questions/73595435/how-to-get-profile-from-cargo-toml-in-build-rs-or-at-runtime + // We split on the path separator of the *host* machine, which may be different from + // `std::path::MAIN_SEPARATOR_STR`. + const OUT_DIR: &str = env!("OUT_DIR"); + let unix_parts = const_format::str_split!(OUT_DIR, '/'); + if unix_parts.len() >= 4 { + unix_parts[unix_parts.len() - 4] + } else { + let win_parts = const_format::str_split!(OUT_DIR, '\\'); + win_parts[win_parts.len() - 4] + } +}; + +/// Contains version information for the application. +#[derive(Debug, Clone)] +pub struct VersionInfo { + /// The version of the application. + pub version: &'static str, + /// The build timestamp of the application. + pub build_timestamp: &'static str, + /// The cargo features enabled for the build. + pub cargo_features: &'static str, + /// The Git SHA of the build. + pub git_sha: &'static str, + /// The target triple for the build. + pub target_triple: &'static str, + /// The build profile (e.g., debug or release). + pub build_profile: &'static str, +} + +impl Default for VersionInfo { + fn default() -> Self { + Self { + version: env!("CARGO_PKG_VERSION"), + build_timestamp: VERGEN_BUILD_TIMESTAMP, + cargo_features: VERGEN_CARGO_FEATURES, + git_sha: VERGEN_GIT_SHA, + target_triple: VERGEN_CARGO_TARGET_TRIPLE, + build_profile: BUILD_PROFILE_NAME, + } + } +} + +impl VersionInfo { + /// This exposes reth's version information over prometheus. + pub fn register_version_metrics(&self) { + let labels: [(&str, &str); 6] = [ + ("version", self.version), + ("build_timestamp", self.build_timestamp), + ("cargo_features", self.cargo_features), + ("git_sha", self.git_sha), + ("target_triple", self.target_triple), + ("build_profile", self.build_profile), + ]; + + let gauge = gauge!("info", &labels); + gauge.set(1) + } +} diff --git a/crates/optimism/cli/src/commands/import_receipts.rs b/crates/optimism/cli/src/commands/import_receipts.rs index fade01da44ca..f6b4a792ce24 100644 --- a/crates/optimism/cli/src/commands/import_receipts.rs +++ b/crates/optimism/cli/src/commands/import_receipts.rs @@ -16,8 +16,8 @@ use reth_node_core::version::SHORT_VERSION; use reth_optimism_primitives::bedrock_import::is_dup_tx; use reth_primitives::Receipts; use reth_provider::{ - OriginalValuesKnown, ProviderFactory, StageCheckpointReader, StateWriter, - StaticFileProviderFactory, StaticFileWriter, StatsReader, + writer::StorageWriter, OriginalValuesKnown, ProviderFactory, StageCheckpointReader, + StateWriter, StaticFileProviderFactory, StaticFileWriter, StatsReader, }; use reth_stages::StageId; use reth_static_file_types::StaticFileSegment; @@ -140,7 +140,7 @@ where ); // We're reusing receipt writing code internal to - // `ExecutionOutcome::write_to_storage`, so we just use a default empty + // `StorageWriter::append_receipts_from_blocks`, so we just use a default empty // `BundleState`. let execution_outcome = ExecutionOutcome::new(Default::default(), receipts, first_block, Default::default()); @@ -149,11 +149,8 @@ where static_file_provider.get_writer(first_block, StaticFileSegment::Receipts)?; // finally, write the receipts - execution_outcome.write_to_storage( - &provider, - Some(static_file_producer), - OriginalValuesKnown::Yes, - )?; + let mut storage_writer = StorageWriter::new(Some(&provider), Some(static_file_producer)); + storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::Yes)?; } provider.commit()?; diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index 7c4371b699c0..f2b4a2b83cb6 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -111,13 +111,13 @@ impl ConfigureEvmEnv for OptimismEvmConfig { impl ConfigureEvm for OptimismEvmConfig { type DefaultExternalContext<'a> = (); - fn evm<'a, DB: Database + 'a>(&self, db: DB) -> Evm<'a, Self::DefaultExternalContext<'a>, DB> { + fn evm(&self, db: DB) -> Evm<'_, Self::DefaultExternalContext<'_>, DB> { EvmBuilder::default().with_db(db).optimism().build() } - fn evm_with_inspector<'a, DB, I>(&self, db: DB, inspector: I) -> Evm<'a, I, DB> + fn evm_with_inspector(&self, db: DB, inspector: I) -> Evm<'_, I, DB> where - DB: Database + 'a, + DB: Database, I: GetInspector, { EvmBuilder::default() diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index ddbc4916671c..0b163a571bfb 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -80,7 +80,6 @@ optimism = [ "reth-beacon-consensus/optimism", "reth-revm/optimism", "reth-auto-seal-consensus/optimism", - "reth-rpc-eth-types/optimism", "reth-optimism-rpc/optimism" ] test-utils = ["reth-node-builder/test-utils"] diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml index 26d1ab5779b8..1f0b15b6e38f 100644 --- a/crates/optimism/rpc/Cargo.toml +++ b/crates/optimism/rpc/Cargo.toml @@ -38,6 +38,7 @@ tokio.workspace = true # rpc jsonrpsee.workspace = true +jsonrpsee-types.workspace = true # misc thiserror.workspace = true @@ -59,6 +60,5 @@ optimism = [ "reth-primitives/optimism", "reth-provider/optimism", "reth-rpc-eth-api/optimism", - "reth-rpc-eth-types/optimism", "revm/optimism" ] \ No newline at end of file diff --git a/crates/optimism/rpc/src/error.rs b/crates/optimism/rpc/src/error.rs index 7b7d3bed92fa..29a348ab741d 100644 --- a/crates/optimism/rpc/src/error.rs +++ b/crates/optimism/rpc/src/error.rs @@ -1,31 +1,85 @@ //! RPC errors specific to OP. -use jsonrpsee::types::ErrorObject; +use reth_primitives::revm_primitives::{InvalidTransaction, OptimismInvalidTransaction}; +use reth_rpc_eth_api::AsEthApiError; use reth_rpc_eth_types::EthApiError; -use reth_rpc_server_types::result::internal_rpc_err; -use reth_rpc_types::ToRpcError; +use reth_rpc_server_types::result::{internal_rpc_err, rpc_err}; +use reth_rpc_types::error::EthRpcErrorCode; /// Optimism specific errors, that extend [`EthApiError`]. #[derive(Debug, thiserror::Error)] pub enum OpEthApiError { + /// L1 ethereum error. + #[error(transparent)] + Eth(#[from] EthApiError), /// Thrown when calculating L1 gas fee. #[error("failed to calculate l1 gas fee")] L1BlockFeeError, /// Thrown when calculating L1 gas used #[error("failed to calculate l1 gas used")] L1BlockGasError, + /// Wrapper for [`revm_primitives::InvalidTransaction`](InvalidTransaction). + #[error(transparent)] + InvalidTransaction(OptimismInvalidTransactionError), } -impl ToRpcError for OpEthApiError { - fn to_rpc_error(&self) -> ErrorObject<'static> { +impl AsEthApiError for OpEthApiError { + fn as_err(&self) -> Option<&EthApiError> { match self { - Self::L1BlockFeeError | Self::L1BlockGasError => internal_rpc_err(self.to_string()), + Self::Eth(err) => Some(err), + _ => None, } } } -impl From for EthApiError { +impl From for jsonrpsee_types::error::ErrorObject<'static> { fn from(err: OpEthApiError) -> Self { - Self::other(err) + match err { + OpEthApiError::Eth(err) => err.into(), + OpEthApiError::L1BlockFeeError | OpEthApiError::L1BlockGasError => { + internal_rpc_err(err.to_string()) + } + OpEthApiError::InvalidTransaction(err) => err.into(), + } + } +} + +/// Optimism specific invalid transaction errors +#[derive(thiserror::Error, Debug)] +pub enum OptimismInvalidTransactionError { + /// A deposit transaction was submitted as a system transaction post-regolith. + #[error("no system transactions allowed after regolith")] + DepositSystemTxPostRegolith, + /// A deposit transaction halted post-regolith + #[error("deposit transaction halted after regolith")] + HaltedDepositPostRegolith, +} + +impl From for jsonrpsee_types::error::ErrorObject<'static> { + fn from(err: OptimismInvalidTransactionError) -> Self { + match err { + OptimismInvalidTransactionError::DepositSystemTxPostRegolith | + OptimismInvalidTransactionError::HaltedDepositPostRegolith => { + rpc_err(EthRpcErrorCode::TransactionRejected.code(), err.to_string(), None) + } + } + } +} + +impl TryFrom for OptimismInvalidTransactionError { + type Error = InvalidTransaction; + + fn try_from(err: InvalidTransaction) -> Result { + match err { + InvalidTransaction::OptimismError(err) => match err { + OptimismInvalidTransaction::DepositSystemTxPostRegolith => { + Ok(Self::DepositSystemTxPostRegolith) + } + OptimismInvalidTransaction::HaltedDepositPostRegolith => { + Ok(Self::HaltedDepositPostRegolith) + } + }, + _ => Err(err), + } } } diff --git a/crates/optimism/rpc/src/eth/block.rs b/crates/optimism/rpc/src/eth/block.rs index c48d70907f10..c1bdc6098ccd 100644 --- a/crates/optimism/rpc/src/eth/block.rs +++ b/crates/optimism/rpc/src/eth/block.rs @@ -2,8 +2,11 @@ use reth_primitives::TransactionMeta; use reth_provider::{BlockReaderIdExt, HeaderProvider}; -use reth_rpc_eth_api::helpers::{EthApiSpec, EthBlocks, LoadBlock, LoadReceipt, LoadTransaction}; -use reth_rpc_eth_types::{EthResult, EthStateCache, ReceiptBuilder}; +use reth_rpc_eth_api::{ + helpers::{EthApiSpec, EthBlocks, LoadBlock, LoadReceipt, LoadTransaction}, + FromEthApiError, +}; +use reth_rpc_eth_types::{EthStateCache, ReceiptBuilder}; use reth_rpc_types::{AnyTransactionReceipt, BlockId}; use crate::{op_receipt_fields, OpEthApi}; @@ -19,7 +22,7 @@ where async fn block_receipts( &self, block_id: BlockId, - ) -> EthResult>> + ) -> Result>, Self::Error> where Self: LoadReceipt, { @@ -52,11 +55,13 @@ where let optimism_tx_meta = self.build_op_tx_meta(tx, l1_block_info.clone(), timestamp)?; - ReceiptBuilder::new(tx, meta, receipt, &receipts).map(|builder| { - op_receipt_fields(builder, tx, receipt, optimism_tx_meta).build() - }) + ReceiptBuilder::new(tx, meta, receipt, &receipts) + .map(|builder| { + op_receipt_fields(builder, tx, receipt, optimism_tx_meta).build() + }) + .map_err(Self::Error::from_eth_err) }) - .collect::>>(); + .collect::, Self::Error>>(); return receipts.map(Some) } diff --git a/crates/optimism/rpc/src/eth/call.rs b/crates/optimism/rpc/src/eth/call.rs index 03aa9a1f40a5..d3bea8decdc1 100644 --- a/crates/optimism/rpc/src/eth/call.rs +++ b/crates/optimism/rpc/src/eth/call.rs @@ -3,13 +3,22 @@ use reth_primitives::{ revm_primitives::{BlockEnv, OptimismFields, TxEnv}, Bytes, }; -use reth_rpc_eth_api::helpers::Call; -use reth_rpc_eth_types::EthResult; +use reth_rpc_eth_api::{ + helpers::{Call, EthCall}, + EthApiTypes, FromEthApiError, +}; +use reth_rpc_eth_types::EthApiError; use reth_rpc_types::TransactionRequest; use crate::OpEthApi; -impl Call for OpEthApi { +impl EthCall for OpEthApi where EthApiError: From {} + +impl Call for OpEthApi +where + Eth: Call + EthApiTypes, + EthApiError: From, +{ fn call_gas_limit(&self) -> u64 { self.inner.call_gas_limit() } @@ -22,8 +31,9 @@ impl Call for OpEthApi { &self, block_env: &BlockEnv, request: TransactionRequest, - ) -> EthResult { - let mut env = Eth::create_txn_env(&self.inner, block_env, request)?; + ) -> Result { + let mut env = + self.inner.create_txn_env(block_env, request).map_err(Self::Error::from_eth_err)?; env.optimism = OptimismFields { enveloped_tx: Some(Bytes::new()), ..Default::default() }; diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index 1178ac1a77da..1f2b27c86e8f 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -18,10 +18,10 @@ use reth_provider::{BlockReaderIdExt, ChainSpecProvider, HeaderProvider, StatePr use reth_rpc::eth::DevSigner; use reth_rpc_eth_api::{ helpers::{ - AddDevSigners, EthApiSpec, EthCall, EthFees, EthSigner, EthState, LoadFee, LoadState, - SpawnBlocking, Trace, UpdateRawTxForwarder, + AddDevSigners, EthApiSpec, EthFees, EthSigner, EthState, LoadFee, LoadState, SpawnBlocking, + Trace, UpdateRawTxForwarder, }, - RawTransactionForwarder, + EthApiTypes, RawTransactionForwarder, }; use reth_rpc_eth_types::EthStateCache; use reth_rpc_types::SyncStatus; @@ -29,6 +29,8 @@ use reth_tasks::{pool::BlockingTaskPool, TaskSpawner}; use reth_transaction_pool::TransactionPool; use tokio::sync::{AcquireError, OwnedSemaphorePermit}; +use crate::OpEthApiError; + /// OP-Reth `Eth` API implementation. /// /// This type provides the functionality for handling `eth_` related requests. @@ -51,6 +53,13 @@ impl OpEthApi { } } +impl EthApiTypes for OpEthApi +where + Eth: Send + Sync, +{ + type Error = OpEthApiError; +} + impl EthApiSpec for OpEthApi { fn protocol_version(&self) -> impl Future> + Send { self.inner.protocol_version() @@ -142,8 +151,6 @@ impl EthState for OpEthApi { } } -impl EthCall for OpEthApi {} - impl EthFees for OpEthApi {} impl Trace for OpEthApi { diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index f11771d615f6..bef18a716086 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -1,8 +1,11 @@ //! Loads and formats OP receipt RPC response. use reth_primitives::{Receipt, TransactionMeta, TransactionSigned}; -use reth_rpc_eth_api::helpers::{EthApiSpec, LoadReceipt, LoadTransaction}; -use reth_rpc_eth_types::{EthApiError, EthResult, EthStateCache, ReceiptBuilder}; +use reth_rpc_eth_api::{ + helpers::{EthApiSpec, LoadReceipt, LoadTransaction}, + FromEthApiError, +}; +use reth_rpc_eth_types::{EthApiError, EthStateCache, ReceiptBuilder}; use reth_rpc_types::{AnyTransactionReceipt, OptimismTransactionReceiptFields}; use crate::{OpEthApi, OptimismTxMeta}; @@ -21,17 +24,19 @@ where tx: TransactionSigned, meta: TransactionMeta, receipt: Receipt, - ) -> EthResult { + ) -> Result { let (block, receipts) = LoadReceipt::cache(self) .get_block_and_receipts(meta.block_hash) - .await? - .ok_or(EthApiError::UnknownBlockNumber)?; + .await + .map_err(Self::Error::from_eth_err)? + .ok_or(Self::Error::from_eth_err(EthApiError::UnknownBlockNumber))?; let block = block.unseal(); let l1_block_info = reth_evm_optimism::extract_l1_info(&block).ok(); let optimism_tx_meta = self.build_op_tx_meta(&tx, l1_block_info, block.timestamp)?; - let resp_builder = ReceiptBuilder::new(&tx, meta, &receipt, &receipts)?; + let resp_builder = ReceiptBuilder::new(&tx, meta, &receipt, &receipts) + .map_err(Self::Error::from_eth_err)?; let resp_builder = op_receipt_fields(resp_builder, &tx, &receipt, optimism_tx_meta); Ok(resp_builder.build()) diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 326c3c73d1c8..6689e230f1ce 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -7,9 +7,9 @@ use reth_primitives::TransactionSigned; use reth_provider::{BlockReaderIdExt, TransactionsProvider}; use reth_rpc_eth_api::{ helpers::{EthApiSpec, EthSigner, EthTransactions, LoadTransaction}, - RawTransactionForwarder, + EthApiTypes, RawTransactionForwarder, }; -use reth_rpc_eth_types::{EthResult, EthStateCache}; +use reth_rpc_eth_types::EthStateCache; use revm::L1BlockInfo; use crate::{OpEthApi, OpEthApiError}; @@ -79,7 +79,7 @@ where tx: &TransactionSigned, l1_block_info: Option, block_timestamp: u64, - ) -> EthResult { + ) -> Result::Error> { let Some(l1_block_info) = l1_block_info else { return Ok(OptimismTxMeta::default()) }; let (l1_fee, l1_data_gas) = if !tx.is_deposit() { diff --git a/crates/payload/builder/src/events.rs b/crates/payload/builder/src/events.rs index 271eb2267ec4..6235ddf7fe22 100644 --- a/crates/payload/builder/src/events.rs +++ b/crates/payload/builder/src/events.rs @@ -20,11 +20,12 @@ pub enum Events { /// Represents a receiver for various payload events. #[derive(Debug)] pub struct PayloadEvents { + /// The receiver for the payload events. pub receiver: broadcast::Receiver>, } impl PayloadEvents { - // Convert this receiver into a stream of PayloadEvents. + /// Convert this receiver into a stream of `PayloadEvents`. pub fn into_stream(self) -> BroadcastStream> { BroadcastStream::new(self.receiver) } diff --git a/crates/payload/builder/src/lib.rs b/crates/payload/builder/src/lib.rs index b3baf11991de..2a29fe916ead 100644 --- a/crates/payload/builder/src/lib.rs +++ b/crates/payload/builder/src/lib.rs @@ -113,9 +113,11 @@ pub mod noop; #[cfg(any(test, feature = "test-utils"))] pub mod test_utils; -pub use events::Events; +pub use events::{Events, PayloadEvents}; pub use reth_rpc_types::engine::PayloadId; -pub use service::{PayloadBuilderHandle, PayloadBuilderService, PayloadStore}; +pub use service::{ + PayloadBuilderHandle, PayloadBuilderService, PayloadServiceCommand, PayloadStore, +}; pub use traits::{KeepPayloadJobAlive, PayloadJob, PayloadJobGenerator}; // re-export the Ethereum engine primitives for convenience diff --git a/crates/primitives-traits/Cargo.toml b/crates/primitives-traits/Cargo.toml index b7eb8515f26b..ede1af20c487 100644 --- a/crates/primitives-traits/Cargo.toml +++ b/crates/primitives-traits/Cargo.toml @@ -44,7 +44,6 @@ alloy-consensus = { workspace = true, features = ["arbitrary"] } arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-arbitrary-interop.workspace = true -proptest-derive.workspace = true test-fuzz.workspace = true rand.workspace = true serde_json.workspace = true diff --git a/crates/primitives-traits/src/account.rs b/crates/primitives-traits/src/account.rs index 8ecbf19a9159..6099e713118c 100644 --- a/crates/primitives-traits/src/account.rs +++ b/crates/primitives-traits/src/account.rs @@ -61,6 +61,10 @@ impl Bytecode { /// Create new bytecode from raw bytes. /// /// No analysis will be performed. + /// + /// # Panics + /// + /// Panics if bytecode is EOF and has incorrect format. pub fn new_raw(bytes: Bytes) -> Self { Self(RevmBytecode::new_raw(bytes)) } @@ -87,10 +91,10 @@ impl Compact for Bytecode { buf.put_slice(map); 1 + 8 + map.len() } - RevmBytecode::Eof(_) => { - // buf.put_u8(3); - // TODO(EOF) - todo!("EOF") + RevmBytecode::Eof(eof) => { + buf.put_u8(3); + buf.put_slice(eof.raw().as_ref()); + 1 + eof.raw().as_ref().len() } }; len + bytecode.len() + 4 @@ -114,8 +118,10 @@ impl Compact for Bytecode { JumpTable::from_slice(buf), ) }), - // TODO(EOF) - 3 => todo!("EOF"), + 3 => { + // EOF bytecode object will be decoded from the raw bytecode + Self(RevmBytecode::new_raw(bytes)) + } _ => unreachable!("Junk data in database: unknown Bytecode variant"), }; (decoded, &[]) diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index 6421e99b0d3c..c8c1cbd8dd95 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -25,7 +25,7 @@ pub mod account; pub use account::{Account, Bytecode}; mod integer_list; -pub use integer_list::IntegerList; +pub use integer_list::{IntegerList, RoaringBitmapError}; pub mod request; pub use request::{Request, Requests}; diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 3f44fc62c0c8..55ccf3b3325c 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -51,7 +51,6 @@ zstd = { workspace = true, features = ["experimental"], optional = true } # arbitrary utils arbitrary = { workspace = true, features = ["derive"], optional = true } proptest = { workspace = true, optional = true } -# proptest-derive = { workspace = true, optional = true } [dev-dependencies] # eth @@ -67,7 +66,6 @@ assert_matches.workspace = true arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-arbitrary-interop.workspace = true -proptest-derive.workspace = true rand.workspace = true serde_json.workspace = true test-fuzz.workspace = true diff --git a/crates/primitives/src/transaction/eip4844.rs b/crates/primitives/src/transaction/eip4844.rs index 234c558a1ee0..b035b396c099 100644 --- a/crates/primitives/src/transaction/eip4844.rs +++ b/crates/primitives/src/transaction/eip4844.rs @@ -5,6 +5,8 @@ use crate::{ }; use alloy_rlp::{length_of_length, Decodable, Encodable, Header}; use core::mem; + +#[cfg(any(test, feature = "reth-codec"))] use reth_codecs::Compact; /// To be used with `Option` to place or replace one bit on the bitflag struct. diff --git a/crates/primitives/src/transaction/eip7702.rs b/crates/primitives/src/transaction/eip7702.rs index 768d9cf28fb8..e98bd8344ad1 100644 --- a/crates/primitives/src/transaction/eip7702.rs +++ b/crates/primitives/src/transaction/eip7702.rs @@ -15,7 +15,10 @@ use reth_codecs::Compact; /// [EIP-7702 Set Code Transaction](https://eips.ethereum.org/EIPS/eip-7702) /// /// Set EOA account code for one transaction -#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::reth_codec)] +#[cfg_attr( + any(test, feature = "reth-codec"), + reth_codecs::reth_codec(no_arbitrary, add_arbitrary_tests) +)] #[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Serialize, Deserialize)] pub struct TxEip7702 { /// Added as EIP-155: Simple replay attack protection @@ -245,6 +248,73 @@ impl TxEip7702 { } } +// TODO(onbjerg): This is temporary until we upstream `Arbitrary` to EIP-7702 types and `Signature` +// in alloy +#[cfg(any(test, feature = "arbitrary"))] +impl<'a> arbitrary::Arbitrary<'a> for TxEip7702 { + fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { + use arbitrary::Arbitrary; + #[derive(Arbitrary)] + struct ArbitrarySignedAuth { + chain_id: ChainId, + address: alloy_primitives::Address, + nonce: Option, + parity: bool, + r: U256, + s: U256, + } + + let iter = u.arbitrary_iter::()?; + let mut authorization_list = Vec::new(); + for auth in iter { + let auth = auth?; + + let sig = alloy_primitives::Signature::from_rs_and_parity( + auth.r, + auth.s, + alloy_primitives::Parity::Parity(auth.parity), + ) + .unwrap_or_else(|_| { + // Give a default one if the randomly generated one failed + alloy_primitives::Signature::from_rs_and_parity( + alloy_primitives::b256!( + "1fd474b1f9404c0c5df43b7620119ffbc3a1c3f942c73b6e14e9f55255ed9b1d" + ) + .into(), + alloy_primitives::b256!( + "29aca24813279a901ec13b5f7bb53385fa1fc627b946592221417ff74a49600d" + ) + .into(), + false, + ) + .unwrap() + }); + + authorization_list.push( + alloy_eips::eip7702::Authorization { + chain_id: auth.chain_id, + address: auth.address, + nonce: auth.nonce.into(), + } + .into_signed(sig), + ); + } + + Ok(Self { + chain_id: Arbitrary::arbitrary(u)?, + nonce: Arbitrary::arbitrary(u)?, + gas_limit: Arbitrary::arbitrary(u)?, + max_fee_per_gas: Arbitrary::arbitrary(u)?, + max_priority_fee_per_gas: Arbitrary::arbitrary(u)?, + to: Arbitrary::arbitrary(u)?, + value: Arbitrary::arbitrary(u)?, + access_list: Arbitrary::arbitrary(u)?, + authorization_list, + input: Arbitrary::arbitrary(u)?, + }) + } +} + #[cfg(test)] mod tests { use super::TxEip7702; diff --git a/crates/primitives/src/transaction/tx_type.rs b/crates/primitives/src/transaction/tx_type.rs index d31cba06071b..963d38ace9e2 100644 --- a/crates/primitives/src/transaction/tx_type.rs +++ b/crates/primitives/src/transaction/tx_type.rs @@ -1,6 +1,5 @@ use crate::{U64, U8}; use alloy_rlp::{Decodable, Encodable}; -use bytes::Buf; use serde::{Deserialize, Serialize}; #[cfg(test)] @@ -9,6 +8,7 @@ use reth_codecs::Compact; /// For backwards compatibility purposes only 2 bits of the type are encoded in the identifier /// parameter. In the case of a 3, the full transaction type is read from the buffer as a /// single byte. +#[cfg(any(test, feature = "reth-codec"))] const COMPACT_EXTENDED_IDENTIFIER_FLAG: usize = 3; /// Identifier for legacy transaction, however [`TxLegacy`](crate::TxLegacy) this is technically not @@ -166,6 +166,7 @@ impl reth_codecs::Compact for TxType { // parameter. In the case of a 3, the full transaction type is read from the buffer as a // single byte. fn from_compact(mut buf: &[u8], identifier: usize) -> (Self, &[u8]) { + use bytes::Buf; ( match identifier { 0 => Self::Legacy, diff --git a/crates/prune/types/Cargo.toml b/crates/prune/types/Cargo.toml index 4fd5b9336812..13def8eaa8b0 100644 --- a/crates/prune/types/Cargo.toml +++ b/crates/prune/types/Cargo.toml @@ -25,7 +25,6 @@ thiserror.workspace = true arbitrary = { workspace = true, features = ["derive"] } assert_matches.workspace = true proptest.workspace = true -proptest-derive.workspace = true proptest-arbitrary-interop.workspace = true serde_json.workspace = true test-fuzz.workspace = true diff --git a/crates/revm/src/batch.rs b/crates/revm/src/batch.rs index 02ffba017bdf..400a3044e1ae 100644 --- a/crates/revm/src/batch.rs +++ b/crates/revm/src/batch.rs @@ -1,12 +1,14 @@ //! Helper for handling execution of multiple blocks. -use crate::{precompile::Address, primitives::alloy_primitives::BlockNumber}; +use crate::{ + precompile::{Address, HashSet}, + primitives::alloy_primitives::BlockNumber, +}; use core::time::Duration; use reth_execution_errors::BlockExecutionError; use reth_primitives::{Receipt, Receipts, Request, Requests}; use reth_prune_types::{PruneMode, PruneModes, PruneSegmentError, MINIMUM_PRUNING_DISTANCE}; use revm::db::states::bundle_state::BundleRetention; -use std::collections::HashSet; use tracing::debug; #[cfg(not(feature = "std"))] @@ -216,7 +218,12 @@ mod tests { use super::*; use reth_primitives::{Address, Log, Receipt}; use reth_prune_types::{PruneMode, ReceiptsLogPruneConfig}; + #[cfg(feature = "std")] use std::collections::BTreeMap; + #[cfg(not(feature = "std"))] + extern crate alloc; + #[cfg(not(feature = "std"))] + use alloc::collections::BTreeMap; #[test] fn test_save_receipts_empty() { diff --git a/crates/revm/src/state_change.rs b/crates/revm/src/state_change.rs index f0ca1255771c..b8bd293de030 100644 --- a/crates/revm/src/state_change.rs +++ b/crates/revm/src/state_change.rs @@ -1,3 +1,4 @@ +use crate::precompile::HashMap; use alloy_eips::eip2935::{HISTORY_STORAGE_ADDRESS, HISTORY_STORAGE_CODE}; use reth_chainspec::{ChainSpec, EthereumHardforks}; use reth_consensus_common::calc; @@ -9,15 +10,6 @@ use revm::{ Database, DatabaseCommit, }; -// reuse revm's hashbrown implementation for no-std -#[cfg(not(feature = "std"))] -use crate::precompile::HashMap; -#[cfg(not(feature = "std"))] -use alloc::{boxed::Box, format, string::ToString, vec::Vec}; - -#[cfg(feature = "std")] -use std::collections::HashMap; - /// Collect all balance changes at the end of the block. /// /// Balance changes might include the block reward, uncle rewards, withdrawals, or irregular diff --git a/crates/revm/src/test_utils.rs b/crates/revm/src/test_utils.rs index 09c66d588cb3..b55cd3fd0117 100644 --- a/crates/revm/src/test_utils.rs +++ b/crates/revm/src/test_utils.rs @@ -1,3 +1,4 @@ +use crate::precompile::HashMap; use reth_primitives::{ keccak256, Account, Address, BlockNumber, Bytecode, Bytes, StorageKey, B256, U256, }; @@ -6,7 +7,9 @@ use reth_storage_api::{ }; use reth_storage_errors::provider::ProviderResult; use reth_trie::{updates::TrieUpdates, AccountProof, HashedPostState}; -use std::collections::HashMap; + +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; /// Mock state for testing #[derive(Debug, Default, Clone, Eq, PartialEq)] diff --git a/crates/rpc/ipc/src/server/mod.rs b/crates/rpc/ipc/src/server/mod.rs index 6dff8a8afae0..28c0f6e8cb4f 100644 --- a/crates/rpc/ipc/src/server/mod.rs +++ b/crates/rpc/ipc/src/server/mod.rs @@ -32,10 +32,7 @@ use tower::{layer::util::Identity, Layer, Service}; use tracing::{debug, instrument, trace, warn, Instrument}; // re-export so can be used during builder setup use crate::{ - server::{ - connection::IpcConnDriver, - rpc_service::{RpcService, RpcServiceCfg}, - }, + server::{connection::IpcConnDriver, rpc_service::RpcServiceCfg}, stream_codec::StreamCodec, }; use tokio::sync::mpsc; @@ -46,6 +43,8 @@ mod connection; mod ipc; mod rpc_service; +pub use rpc_service::RpcService; + /// Ipc Server implementation /// /// This is an adapted `jsonrpsee` Server, but for `Ipc` connections. diff --git a/crates/rpc/rpc-builder/src/auth.rs b/crates/rpc/rpc-builder/src/auth.rs index be904f6efc80..25626e4f12d3 100644 --- a/crates/rpc/rpc-builder/src/auth.rs +++ b/crates/rpc/rpc-builder/src/auth.rs @@ -198,7 +198,7 @@ impl AuthRpcModule { /// Create a new `AuthRpcModule` with the given `engine_api`. pub fn new(engine: EngineApi) -> Self where - EngineT: EngineTypes + 'static, + EngineT: EngineTypes, EngineApi: EngineApiServer, { let mut module = RpcModule::new(()); diff --git a/crates/rpc/rpc-builder/src/cors.rs b/crates/rpc/rpc-builder/src/cors.rs index 0d98b4411bfb..c68cf84942c0 100644 --- a/crates/rpc/rpc-builder/src/cors.rs +++ b/crates/rpc/rpc-builder/src/cors.rs @@ -4,10 +4,19 @@ use tower_http::cors::{AllowOrigin, Any, CorsLayer}; /// Error thrown when parsing cors domains went wrong #[derive(Debug, thiserror::Error)] pub enum CorsDomainError { + /// Represents an invalid header value for a domain #[error("{domain} is an invalid header value")] - InvalidHeader { domain: String }, + InvalidHeader { + /// The domain that caused the invalid header + domain: String, + }, + + /// Indicates that a wildcard origin was used incorrectly in a list #[error("wildcard origin (`*`) cannot be passed as part of a list: {input}")] - WildCardNotAllowed { input: String }, + WildCardNotAllowed { + /// The input string containing the incorrectly used wildcard + input: String, + }, } /// Creates a [`CorsLayer`] from the given domains diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index e92067011344..d3346bcdc185 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -93,7 +93,7 @@ //! Network: NetworkInfo + Peers + Clone + 'static, //! Events: CanonStateSubscriptions + Clone + 'static, //! EngineApi: EngineApiServer, -//! EngineT: EngineTypes + 'static, +//! EngineT: EngineTypes, //! EvmConfig: ConfigureEvm, //! { //! // configure the rpc module per transport @@ -177,10 +177,9 @@ use serde::{Deserialize, Serialize}; use tower::Layer; use tower_http::cors::CorsLayer; -use crate::{ - auth::AuthRpcModule, cors::CorsDomainError, error::WsHttpSamePortError, - metrics::RpcRequestMetrics, -}; +use crate::{auth::AuthRpcModule, error::WsHttpSamePortError, metrics::RpcRequestMetrics}; + +pub use cors::CorsDomainError; // re-export for convenience pub use jsonrpsee::server::ServerBuilder; @@ -208,6 +207,7 @@ pub use eth::EthHandlers; // Rpc server metrics mod metrics; +pub use metrics::{MeteredRequestFuture, RpcRequestMetricsService}; /// Convenience function for starting a server in one step. #[allow(clippy::too_many_arguments)] @@ -434,7 +434,7 @@ where RpcRegistryInner, ) where - EngineT: EngineTypes + 'static, + EngineT: EngineTypes, EngineApi: EngineApiServer, EthApi: FullEthApiServer, { @@ -974,7 +974,7 @@ where /// Note: This does _not_ register the `engine_` in this registry. pub fn create_auth_module(&self, engine_api: EngineApi) -> AuthRpcModule where - EngineT: EngineTypes + 'static, + EngineT: EngineTypes, EngineApi: EngineApiServer, { let mut module = RpcModule::new(()); @@ -1141,7 +1141,6 @@ pub struct RpcServerConfig { /// JWT secret for authentication jwt_secret: Option, /// Configurable RPC middleware - #[allow(dead_code)] rpc_middleware: RpcServiceBuilder, } @@ -1337,8 +1336,9 @@ impl RpcServerConfig { /// Returns the [`RpcServerHandle`] with the handle to the started servers. pub async fn start(self, modules: &TransportRpcModules) -> Result where - RpcMiddleware: for<'a> Layer> + Clone + Send + 'static, - >::Service: Send + std::marker::Sync, + RpcMiddleware: Layer> + Clone + Send + 'static, + for<'a> >>::Service: + Send + Sync + 'static + RpcServiceT<'a>, { let mut http_handle = None; let mut ws_handle = None; @@ -1396,7 +1396,7 @@ impl RpcServerConfig { .option_layer(Self::maybe_jwt_layer(self.jwt_secret)), ) .set_rpc_middleware( - RpcServiceBuilder::new().layer( + self.rpc_middleware.clone().layer( modules .http .as_ref() @@ -1444,7 +1444,8 @@ impl RpcServerConfig { .option_layer(Self::maybe_jwt_layer(self.jwt_secret)), ) .set_rpc_middleware( - RpcServiceBuilder::new() + self.rpc_middleware + .clone() .layer(modules.ws.as_ref().map(RpcRequestMetrics::ws).unwrap_or_default()), ) .build(ws_socket_addr) @@ -1468,7 +1469,7 @@ impl RpcServerConfig { .option_layer(Self::maybe_jwt_layer(self.jwt_secret)), ) .set_rpc_middleware( - RpcServiceBuilder::new().layer( + self.rpc_middleware.clone().layer( modules.http.as_ref().map(RpcRequestMetrics::http).unwrap_or_default(), ), ) diff --git a/crates/rpc/rpc-builder/src/metrics.rs b/crates/rpc/rpc-builder/src/metrics.rs index 4b638a7b36d7..08fd38898558 100644 --- a/crates/rpc/rpc-builder/src/metrics.rs +++ b/crates/rpc/rpc-builder/src/metrics.rs @@ -81,9 +81,11 @@ struct RpcServerMetricsInner { /// A [`RpcServiceT`] middleware that captures RPC metrics for the server. /// /// This is created per connection and captures metrics for each request. -#[derive(Clone)] -pub(crate) struct RpcRequestMetricsService { +#[derive(Clone, Debug)] +pub struct RpcRequestMetricsService { + /// The metrics collector for RPC requests metrics: RpcRequestMetrics, + /// The inner service being wrapped inner: S, } @@ -125,7 +127,7 @@ impl Drop for RpcRequestMetricsService { /// Response future to update the metrics for a single request/response pair. #[pin_project::pin_project] -pub(crate) struct MeteredRequestFuture { +pub struct MeteredRequestFuture { #[pin] fut: F, /// time when the request started diff --git a/crates/rpc/rpc-builder/tests/it/main.rs b/crates/rpc/rpc-builder/tests/it/main.rs index 65ddebb3fd9c..a64ad1da2f54 100644 --- a/crates/rpc/rpc-builder/tests/it/main.rs +++ b/crates/rpc/rpc-builder/tests/it/main.rs @@ -1,5 +1,6 @@ mod auth; mod http; +mod middleware; mod serde; mod startup; pub mod utils; diff --git a/crates/rpc/rpc-builder/tests/it/middleware.rs b/crates/rpc/rpc-builder/tests/it/middleware.rs new file mode 100644 index 000000000000..59cc86d4dc86 --- /dev/null +++ b/crates/rpc/rpc-builder/tests/it/middleware.rs @@ -0,0 +1,80 @@ +use crate::utils::{test_address, test_rpc_builder}; +use jsonrpsee::{ + server::{middleware::rpc::RpcServiceT, RpcServiceBuilder}, + types::Request, + MethodResponse, +}; +use reth_rpc::EthApi; +use reth_rpc_builder::{RpcServerConfig, TransportRpcModuleConfig}; +use reth_rpc_eth_api::EthApiClient; +use reth_rpc_server_types::RpcModuleSelection; +use std::{ + future::Future, + pin::Pin, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, +}; +use tower::Layer; + +#[derive(Clone, Default)] +struct MyMiddlewareLayer { + count: Arc, +} + +impl Layer for MyMiddlewareLayer { + type Service = MyMiddlewareService; + + fn layer(&self, inner: S) -> Self::Service { + MyMiddlewareService { service: inner, count: self.count.clone() } + } +} + +#[derive(Clone)] +struct MyMiddlewareService { + service: S, + count: Arc, +} + +impl<'a, S> RpcServiceT<'a> for MyMiddlewareService +where + S: RpcServiceT<'a> + Send + Sync + Clone + 'static, +{ + type Future = Pin + Send + 'a>>; + + fn call(&self, req: Request<'a>) -> Self::Future { + tracing::info!("MyMiddleware processed call {}", req.method); + let count = self.count.clone(); + let service = self.service.clone(); + Box::pin(async move { + let rp = service.call(req).await; + // Modify the state. + count.fetch_add(1, Ordering::Relaxed); + rp + }) + } +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_rpc_middleware() { + let builder = test_rpc_builder(); + let modules = builder.build( + TransportRpcModuleConfig::set_http(RpcModuleSelection::All), + Box::new(EthApi::with_spawner), + ); + + let mylayer = MyMiddlewareLayer::default(); + + let handle = RpcServerConfig::http(Default::default()) + .with_http_address(test_address()) + .set_rpc_middleware(RpcServiceBuilder::new().layer(mylayer.clone())) + .start(&modules) + .await + .unwrap(); + + let client = handle.http_client().unwrap(); + EthApiClient::protocol_version(&client).await.unwrap(); + let count = mylayer.count.load(Ordering::Relaxed); + assert_eq!(count, 1); +} diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 881413210ae4..862a8ca02c7f 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -65,7 +65,7 @@ struct EngineApiInner { impl EngineApi where Provider: HeaderProvider + BlockReader + StateProviderFactory + EvmEnvProvider + 'static, - EngineT: EngineTypes + 'static, + EngineT: EngineTypes, { /// Create new instance of [`EngineApi`]. pub fn new( @@ -612,7 +612,7 @@ where impl EngineApiServer for EngineApi where Provider: HeaderProvider + BlockReader + StateProviderFactory + EvmEnvProvider + 'static, - EngineT: EngineTypes + 'static, + EngineT: EngineTypes, { /// Handler for `engine_newPayloadV1` /// See also diff --git a/crates/rpc/rpc-eth-api/Cargo.toml b/crates/rpc/rpc-eth-api/Cargo.toml index b1295d69e5f6..20e73908be69 100644 --- a/crates/rpc/rpc-eth-api/Cargo.toml +++ b/crates/rpc/rpc-eth-api/Cargo.toml @@ -35,6 +35,7 @@ alloy-dyn-abi = { workspace = true, features = ["eip712"] } # rpc jsonrpsee = { workspace = true, features = ["server", "macros"] } +jsonrpsee-types.workspace = true # async async-trait.workspace = true @@ -53,5 +54,4 @@ optimism = [ "reth-primitives/optimism", "revm/optimism", "reth-provider/optimism", - "reth-rpc-eth-types/optimism" ] diff --git a/crates/rpc/rpc-eth-api/src/core.rs b/crates/rpc/rpc-eth-api/src/core.rs index 3ba0a59e1000..a86b5c956f16 100644 --- a/crates/rpc/rpc-eth-api/src/core.rs +++ b/crates/rpc/rpc-eth-api/src/core.rs @@ -3,7 +3,7 @@ use alloy_dyn_abi::TypedData; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::{Address, BlockId, BlockNumberOrTag, Bytes, B256, B64, U256, U64}; +use reth_primitives::{Account, Address, BlockId, BlockNumberOrTag, Bytes, B256, B64, U256, U64}; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; use reth_rpc_types::{ serde_helpers::JsonStorageKey, @@ -245,6 +245,10 @@ pub trait EthApi { #[method(name = "gasPrice")] async fn gas_price(&self) -> RpcResult; + /// Returns the account details by specifying an address and a block number/tag + #[method(name = "getAccount")] + async fn get_account(&self, address: Address, block: BlockId) -> RpcResult; + /// Introduced in EIP-1559, returns suggestion for the priority for dynamic fee transactions. #[method(name = "maxPriorityFeePerGas")] async fn max_priority_fee_per_gas(&self) -> RpcResult; @@ -330,7 +334,8 @@ pub trait EthApi { #[async_trait::async_trait] impl EthApiServer for T where - Self: FullEthApi, + T: FullEthApi, + jsonrpsee_types::error::ErrorObject<'static>: From, { /// Handler for: `eth_protocolVersion` async fn protocol_version(&self) -> RpcResult { @@ -621,6 +626,11 @@ where return Ok(EthFees::gas_price(self).await?) } + /// Handler for: `eth_getAccount` + async fn get_account(&self, _address: Address, _block: BlockId) -> RpcResult { + Err(internal_rpc_err("unimplemented")) + } + /// Handler for: `eth_maxPriorityFeePerGas` async fn max_priority_fee_per_gas(&self) -> RpcResult { trace!(target: "rpc::eth", "Serving eth_maxPriorityFeePerGas"); diff --git a/crates/rpc/rpc-eth-api/src/helpers/block.rs b/crates/rpc/rpc-eth-api/src/helpers/block.rs index 78f1ef9da66b..837006a970ef 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/block.rs @@ -5,10 +5,12 @@ use std::sync::Arc; use futures::Future; use reth_primitives::{BlockId, Receipt, SealedBlock, SealedBlockWithSenders, TransactionMeta}; use reth_provider::{BlockIdReader, BlockReader, BlockReaderIdExt, HeaderProvider}; -use reth_rpc_eth_types::{EthApiError, EthResult, EthStateCache, ReceiptBuilder}; +use reth_rpc_eth_types::{EthApiError, EthStateCache, ReceiptBuilder}; use reth_rpc_types::{AnyTransactionReceipt, Header, Index, RichBlock}; use reth_rpc_types_compat::block::{from_block, uncle_block_from_header}; +use crate::FromEthApiError; + use super::{LoadPendingBlock, LoadReceipt, SpawnBlocking}; /// Block related functions for the [`EthApiServer`](crate::EthApiServer) trait in the @@ -23,7 +25,7 @@ pub trait EthBlocks: LoadBlock { fn rpc_block_header( &self, block_id: BlockId, - ) -> impl Future>> + Send + ) -> impl Future, Self::Error>> + Send where Self: LoadPendingBlock + SpawnBlocking, { @@ -38,7 +40,7 @@ pub trait EthBlocks: LoadBlock { &self, block_id: BlockId, full: bool, - ) -> impl Future>> + Send + ) -> impl Future, Self::Error>> + Send where Self: LoadPendingBlock + SpawnBlocking, { @@ -49,10 +51,11 @@ pub trait EthBlocks: LoadBlock { }; let block_hash = block.hash(); let total_difficulty = EthBlocks::provider(self) - .header_td_by_number(block.number)? + .header_td_by_number(block.number) + .map_err(Self::Error::from_eth_err)? .ok_or(EthApiError::UnknownBlockNumber)?; - let block = - from_block(block.unseal(), total_difficulty, full.into(), Some(block_hash))?; + let block = from_block(block.unseal(), total_difficulty, full.into(), Some(block_hash)) + .map_err(Self::Error::from_eth_err)?; Ok(Some(block.into())) } } @@ -63,19 +66,30 @@ pub trait EthBlocks: LoadBlock { fn block_transaction_count( &self, block_id: BlockId, - ) -> impl Future>> + Send { + ) -> impl Future, Self::Error>> + Send { async move { if block_id.is_pending() { // Pending block can be fetched directly without need for caching - return Ok(LoadBlock::provider(self).pending_block()?.map(|block| block.body.len())) + return Ok(LoadBlock::provider(self) + .pending_block() + .map_err(Self::Error::from_eth_err)? + .map(|block| block.body.len())) } - let block_hash = match LoadBlock::provider(self).block_hash_for_id(block_id)? { + let block_hash = match LoadBlock::provider(self) + .block_hash_for_id(block_id) + .map_err(Self::Error::from_eth_err)? + { Some(block_hash) => block_hash, None => return Ok(None), }; - Ok(self.cache().get_block_transactions(block_hash).await?.map(|txs| txs.len())) + Ok(self + .cache() + .get_block_transactions(block_hash) + .await + .map_err(Self::Error::from_eth_err)? + .map(|txs| txs.len())) } } @@ -85,7 +99,7 @@ pub trait EthBlocks: LoadBlock { fn block_receipts( &self, block_id: BlockId, - ) -> impl Future>>> + Send + ) -> impl Future>, Self::Error>> + Send where Self: LoadReceipt, { @@ -116,8 +130,9 @@ pub trait EthBlocks: LoadBlock { ReceiptBuilder::new(&tx, meta, receipt, &receipts) .map(|builder| builder.build()) + .map_err(Self::Error::from_eth_err) }) - .collect::>>(); + .collect::, Self::Error>>(); return receipts.map(Some) } @@ -129,19 +144,26 @@ pub trait EthBlocks: LoadBlock { fn load_block_and_receipts( &self, block_id: BlockId, - ) -> impl Future>)>>> + Send + ) -> impl Future>)>, Self::Error>> + Send where Self: LoadReceipt, { async move { if block_id.is_pending() { return Ok(LoadBlock::provider(self) - .pending_block_and_receipts()? + .pending_block_and_receipts() + .map_err(Self::Error::from_eth_err)? .map(|(sb, receipts)| (sb, Arc::new(receipts)))) } - if let Some(block_hash) = LoadBlock::provider(self).block_hash_for_id(block_id)? { - return Ok(LoadReceipt::cache(self).get_block_and_receipts(block_hash).await?) + if let Some(block_hash) = LoadBlock::provider(self) + .block_hash_for_id(block_id) + .map_err(Self::Error::from_eth_err)? + { + return LoadReceipt::cache(self) + .get_block_and_receipts(block_hash) + .await + .map_err(Self::Error::from_eth_err) } Ok(None) @@ -151,8 +173,11 @@ pub trait EthBlocks: LoadBlock { /// Returns uncle headers of given block. /// /// Returns an empty vec if there are none. - fn ommers(&self, block_id: BlockId) -> EthResult>> { - Ok(LoadBlock::provider(self).ommers_by_id(block_id)?) + fn ommers( + &self, + block_id: BlockId, + ) -> Result>, Self::Error> { + LoadBlock::provider(self).ommers_by_id(block_id).map_err(Self::Error::from_eth_err) } /// Returns uncle block at given index in given block. @@ -162,13 +187,18 @@ pub trait EthBlocks: LoadBlock { &self, block_id: BlockId, index: Index, - ) -> impl Future>> + Send { + ) -> impl Future, Self::Error>> + Send { async move { let uncles = if block_id.is_pending() { // Pending block can be fetched directly without need for caching - LoadBlock::provider(self).pending_block()?.map(|block| block.ommers) + LoadBlock::provider(self) + .pending_block() + .map_err(Self::Error::from_eth_err)? + .map(|block| block.ommers) } else { - LoadBlock::provider(self).ommers_by_id(block_id)? + LoadBlock::provider(self) + .ommers_by_id(block_id) + .map_err(Self::Error::from_eth_err)? } .unwrap_or_default(); @@ -198,7 +228,7 @@ pub trait LoadBlock: LoadPendingBlock + SpawnBlocking { fn block( &self, block_id: BlockId, - ) -> impl Future>> + Send { + ) -> impl Future, Self::Error>> + Send { async move { self.block_with_senders(block_id) .await @@ -210,12 +240,13 @@ pub trait LoadBlock: LoadPendingBlock + SpawnBlocking { fn block_with_senders( &self, block_id: BlockId, - ) -> impl Future>> + Send { + ) -> impl Future, Self::Error>> + Send { async move { if block_id.is_pending() { // Pending block can be fetched directly without need for caching - let maybe_pending = - LoadPendingBlock::provider(self).pending_block_with_senders()?; + let maybe_pending = LoadPendingBlock::provider(self) + .pending_block_with_senders() + .map_err(Self::Error::from_eth_err)?; return if maybe_pending.is_some() { Ok(maybe_pending) } else { @@ -223,12 +254,18 @@ pub trait LoadBlock: LoadPendingBlock + SpawnBlocking { } } - let block_hash = match LoadPendingBlock::provider(self).block_hash_for_id(block_id)? { + let block_hash = match LoadPendingBlock::provider(self) + .block_hash_for_id(block_id) + .map_err(Self::Error::from_eth_err)? + { Some(block_hash) => block_hash, None => return Ok(None), }; - Ok(self.cache().get_sealed_block_with_senders(block_hash).await?) + self.cache() + .get_sealed_block_with_senders(block_hash) + .await + .map_err(Self::Error::from_eth_err) } } } diff --git a/crates/rpc/rpc-eth-api/src/helpers/blocking_task.rs b/crates/rpc/rpc-eth-api/src/helpers/blocking_task.rs index 4a2c81b0fdfe..d23453b5ed83 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/blocking_task.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/blocking_task.rs @@ -2,12 +2,14 @@ //! are executed on the `tokio` runtime. use futures::Future; -use reth_rpc_eth_types::{EthApiError, EthResult}; +use reth_rpc_eth_types::EthApiError; use reth_tasks::{pool::BlockingTaskPool, TaskSpawner}; use tokio::sync::{oneshot, AcquireError, OwnedSemaphorePermit}; +use crate::EthApiTypes; + /// Executes code on a blocking thread. -pub trait SpawnBlocking: Clone + Send + Sync + 'static { +pub trait SpawnBlocking: EthApiTypes + Clone + Send + Sync + 'static { /// Returns a handle for spawning IO heavy blocking tasks. /// /// Runtime access in default trait method implementations. @@ -33,9 +35,9 @@ pub trait SpawnBlocking: Clone + Send + Sync + 'static { /// /// Note: This is expected for futures that are dominated by blocking IO operations, for tracing /// or CPU bound operations in general use [`spawn_tracing`](Self::spawn_tracing). - fn spawn_blocking_io(&self, f: F) -> impl Future> + Send + fn spawn_blocking_io(&self, f: F) -> impl Future> + Send where - F: FnOnce(Self) -> EthResult + Send + 'static, + F: FnOnce(Self) -> Result + Send + 'static, R: Send + 'static, { let (tx, rx) = oneshot::channel(); @@ -53,9 +55,9 @@ pub trait SpawnBlocking: Clone + Send + Sync + 'static { /// Note: This is expected for futures that are predominantly CPU bound, as it uses `rayon` /// under the hood, for blocking IO futures use [`spawn_blocking`](Self::spawn_blocking_io). See /// . - fn spawn_tracing(&self, f: F) -> impl Future> + Send + fn spawn_tracing(&self, f: F) -> impl Future> + Send where - F: FnOnce(Self) -> EthResult + Send + 'static, + F: FnOnce(Self) -> Result + Send + 'static, R: Send + 'static, { let this = self.clone(); diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index a8fc6d8e2305..aaf75a827f79 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -19,9 +19,11 @@ use reth_rpc_eth_types::{ apply_block_overrides, apply_state_overrides, caller_gas_allowance, cap_tx_gas_limit_with_caller_allowance, get_precompiles, CallFees, }, - EthApiError, EthResult, RevertError, RpcInvalidTransactionError, StateCacheDb, + EthApiError, RevertError, RpcInvalidTransactionError, StateCacheDb, +}; +use reth_rpc_server_types::constants::gas_oracle::{ + CALL_STIPEND_GAS, ESTIMATE_GAS_ERROR_RATIO, MIN_TRANSACTION_GAS, }; -use reth_rpc_server_types::constants::gas_oracle::{ESTIMATE_GAS_ERROR_RATIO, MIN_TRANSACTION_GAS}; use reth_rpc_types::{ state::{EvmOverrides, StateOverride}, AccessListWithGasUsed, BlockId, Bundle, EthCallResponse, StateContext, TransactionInfo, @@ -31,6 +33,8 @@ use revm::{Database, DatabaseCommit}; use revm_inspectors::access_list::AccessListInspector; use tracing::trace; +use crate::{AsEthApiError, FromEthApiError, FromEvmError, IntoEthApiError}; + use super::{LoadBlock, LoadPendingBlock, LoadState, LoadTransaction, SpawnBlocking, Trace}; /// Execution related functions for the [`EthApiServer`](crate::EthApiServer) trait in @@ -42,7 +46,7 @@ pub trait EthCall: Call + LoadPendingBlock { request: TransactionRequest, at: BlockId, state_override: Option, - ) -> impl Future> + Send { + ) -> impl Future> + Send { Call::estimate_gas_at(self, request, at, state_override) } @@ -52,12 +56,12 @@ pub trait EthCall: Call + LoadPendingBlock { request: TransactionRequest, block_number: Option, overrides: EvmOverrides, - ) -> impl Future> + Send { + ) -> impl Future> + Send { async move { let (res, _env) = self.transact_call_at(request, block_number.unwrap_or_default(), overrides).await?; - ensure_success(res.result) + ensure_success(res.result).map_err(Self::Error::from_eth_err) } } @@ -68,14 +72,16 @@ pub trait EthCall: Call + LoadPendingBlock { bundle: Bundle, state_context: Option, mut state_override: Option, - ) -> impl Future>> + Send + ) -> impl Future, Self::Error>> + Send where Self: LoadBlock, { async move { let Bundle { transactions, block_override } = bundle; if transactions.is_empty() { - return Err(EthApiError::InvalidParams(String::from("transactions are empty."))) + return Err( + EthApiError::InvalidParams(String::from("transactions are empty.")).into() + ) } let StateContext { transaction_index, block_number } = @@ -90,7 +96,7 @@ pub trait EthCall: Call + LoadPendingBlock { self.block_with_senders(target_block) )?; - let Some(block) = block else { return Err(EthApiError::UnknownBlockNumber) }; + let Some(block) = block else { return Err(EthApiError::UnknownBlockNumber.into()) }; let gas_limit = self.call_gas_limit(); // we're essentially replaying the transactions in the block here, hence we need the @@ -136,14 +142,16 @@ pub trait EthCall: Call + LoadPendingBlock { let state_overrides = state_override.take(); let overrides = EvmOverrides::new(state_overrides, block_overrides.clone()); - let env = this.prepare_call_env( - cfg.clone(), - block_env.clone(), - tx, - gas_limit, - &mut db, - overrides, - )?; + let env = this + .prepare_call_env( + cfg.clone(), + block_env.clone(), + tx, + gas_limit, + &mut db, + overrides, + ) + .map(Into::into)?; let (res, _) = this.transact(&mut db, env)?; match ensure_success(res.result) { @@ -177,7 +185,7 @@ pub trait EthCall: Call + LoadPendingBlock { &self, request: TransactionRequest, block_number: Option, - ) -> impl Future> + Send + ) -> impl Future> + Send where Self: Trace, { @@ -200,7 +208,7 @@ pub trait EthCall: Call + LoadPendingBlock { block: BlockEnv, at: BlockId, mut request: TransactionRequest, - ) -> EthResult + ) -> Result where Self: Trace, { @@ -228,7 +236,8 @@ pub trait EthCall: Call + LoadPendingBlock { let to = if let Some(TxKind::Call(to)) = request.to { to } else { - let nonce = db.basic_ref(from)?.unwrap_or_default().nonce; + let nonce = + db.basic_ref(from).map_err(Self::Error::from_eth_err)?.unwrap_or_default().nonce; from.create(nonce) }; @@ -248,7 +257,8 @@ pub trait EthCall: Call + LoadPendingBlock { Err(RpcInvalidTransactionError::Revert(RevertError::new(output))) } ExecutionResult::Success { .. } => Ok(()), - }?; + } + .map_err(Self::Error::from_eth_err)?; let access_list = inspector.into_access_list(); @@ -277,9 +287,9 @@ pub trait Call: LoadState + SpawnBlocking { fn evm_config(&self) -> &impl ConfigureEvm; /// Executes the closure with the state that corresponds to the given [`BlockId`]. - fn with_state_at_block(&self, at: BlockId, f: F) -> EthResult + fn with_state_at_block(&self, at: BlockId, f: F) -> Result where - F: FnOnce(StateProviderTraitObjWrapper<'_>) -> EthResult, + F: FnOnce(StateProviderTraitObjWrapper<'_>) -> Result, { let state = self.state_at_block_id(at)?; f(StateProviderTraitObjWrapper(&state)) @@ -291,13 +301,13 @@ pub trait Call: LoadState + SpawnBlocking { &self, db: DB, env: EnvWithHandlerCfg, - ) -> EthResult<(ResultAndState, EnvWithHandlerCfg)> + ) -> Result<(ResultAndState, EnvWithHandlerCfg), Self::Error> where DB: Database, - ::Error: Into, + EthApiError: From, { let mut evm = self.evm_config().evm_with_env(db, env); - let res = evm.transact()?; + let res = evm.transact().map_err(Self::Error::from_evm_err)?; let (_, env) = evm.into_db_and_env_with_handler_cfg(); Ok((res, env)) } @@ -308,7 +318,7 @@ pub trait Call: LoadState + SpawnBlocking { request: TransactionRequest, at: BlockId, overrides: EvmOverrides, - ) -> impl Future> + Send + ) -> impl Future> + Send where Self: LoadPendingBlock, { @@ -317,14 +327,14 @@ pub trait Call: LoadState + SpawnBlocking { } /// Executes the closure with the state that corresponds to the given [`BlockId`] on a new task - fn spawn_with_state_at_block( + fn spawn_with_state_at_block( &self, at: BlockId, f: F, - ) -> impl Future> + Send + ) -> impl Future> + Send where - F: FnOnce(StateProviderTraitObjWrapper<'_>) -> EthResult + Send + 'static, - T: Send + 'static, + F: FnOnce(StateProviderTraitObjWrapper<'_>) -> Result + Send + 'static, + R: Send + 'static, { self.spawn_tracing(move |this| { let state = this.state_at_block_id(at)?; @@ -343,10 +353,10 @@ pub trait Call: LoadState + SpawnBlocking { at: BlockId, overrides: EvmOverrides, f: F, - ) -> impl Future> + Send + ) -> impl Future> + Send where Self: LoadPendingBlock, - F: FnOnce(StateCacheDbRefMutWrapper<'_, '_>, EnvWithHandlerCfg) -> EthResult + F: FnOnce(StateCacheDbRefMutWrapper<'_, '_>, EnvWithHandlerCfg) -> Result + Send + 'static, R: Send + 'static, @@ -371,7 +381,7 @@ pub trait Call: LoadState + SpawnBlocking { f(StateCacheDbRefMutWrapper(&mut db), env) }) .await - .map_err(|_| EthApiError::InternalBlockingTaskError) + .map_err(|_| EthApiError::InternalBlockingTaskError.into()) } } @@ -388,10 +398,10 @@ pub trait Call: LoadState + SpawnBlocking { &self, hash: B256, f: F, - ) -> impl Future>> + Send + ) -> impl Future, Self::Error>> + Send where Self: LoadBlock + LoadPendingBlock + LoadTransaction, - F: FnOnce(TransactionInfo, ResultAndState, StateCacheDb<'_>) -> EthResult + F: FnOnce(TransactionInfo, ResultAndState, StateCacheDb<'_>) -> Result + Send + 'static, R: Send + 'static, @@ -451,10 +461,10 @@ pub trait Call: LoadState + SpawnBlocking { block_env: BlockEnv, transactions: impl IntoIterator, target_tx_hash: B256, - ) -> Result + ) -> Result where DB: DatabaseRef, - EthApiError: From<::Error>, + EthApiError: From, { let env = EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()); @@ -468,7 +478,7 @@ pub trait Call: LoadState + SpawnBlocking { let sender = tx.signer(); self.evm_config().fill_tx_env(evm.tx_mut(), &tx.into_signed(), sender); - evm.transact_commit()?; + evm.transact_commit().map_err(Self::Error::from_evm_err)?; index += 1; } Ok(index) @@ -480,7 +490,7 @@ pub trait Call: LoadState + SpawnBlocking { request: TransactionRequest, at: BlockId, state_override: Option, - ) -> impl Future> + Send + ) -> impl Future> + Send where Self: LoadPendingBlock, { @@ -505,7 +515,7 @@ pub trait Call: LoadState + SpawnBlocking { request: TransactionRequest, state: S, state_override: Option, - ) -> EthResult + ) -> Result where S: StateProvider, { @@ -535,7 +545,7 @@ pub trait Call: LoadState + SpawnBlocking { // Apply any state overrides if specified. if let Some(state_override) = state_override { - apply_state_overrides(state_override, &mut db)?; + apply_state_overrides(state_override, &mut db).map_err(Self::Error::from_eth_err)?; } // Optimize for simple transfer transactions, potentially reducing the gas estimate. @@ -566,7 +576,8 @@ pub trait Call: LoadState + SpawnBlocking { // The caller allowance is check by doing `(account.balance - tx.value) / tx.gas_price` if env.tx.gas_price > U256::ZERO { // cap the highest gas limit by max gas caller can afford with given gas price - highest_gas_limit = highest_gas_limit.min(caller_gas_allowance(&mut db, &env.tx)?); + highest_gas_limit = highest_gas_limit + .min(caller_gas_allowance(&mut db, &env.tx).map_err(Self::Error::from_eth_err)?); } // We can now normalize the highest gas limit to a u64 @@ -584,8 +595,9 @@ pub trait Call: LoadState + SpawnBlocking { // If the gas price or gas limit was specified in the request, retry the transaction // with the block's gas limit to determine if the failure was due to // insufficient gas. - Err(EthApiError::InvalidTransaction(RpcInvalidTransactionError::GasTooHigh)) - if tx_request_gas_limit.is_some() || tx_request_gas_price.is_some() => + Err(err) + if err.is_gas_too_high() && + (tx_request_gas_limit.is_some() || tx_request_gas_price.is_some()) => { return Err(self.map_out_of_gas_err(block_env_gas_limit, env, &mut db)) } @@ -598,7 +610,7 @@ pub trait Call: LoadState + SpawnBlocking { ExecutionResult::Halt { reason, gas_used } => { // here we don't check for invalid opcode because already executed with highest gas // limit - return Err(RpcInvalidTransactionError::halt(reason, gas_used).into()) + return Err(RpcInvalidTransactionError::halt(reason, gas_used).into_eth_err()) } ExecutionResult::Revert { output, .. } => { // if price or limit was included in the request then we can execute the request @@ -607,14 +619,18 @@ pub trait Call: LoadState + SpawnBlocking { Err(self.map_out_of_gas_err(block_env_gas_limit, env, &mut db)) } else { // the transaction did revert - Err(RpcInvalidTransactionError::Revert(RevertError::new(output)).into()) + Err(RpcInvalidTransactionError::Revert(RevertError::new(output)).into_eth_err()) } } }; // At this point we know the call succeeded but want to find the _best_ (lowest) gas the // transaction succeeds with. We find this by doing a binary search over the possible range. - // + + // we know the tx succeeded with the configured gas limit, so we can use that as the + // highest, in case we applied a gas cap due to caller allowance above + highest_gas_limit = env.tx.gas_limit; + // NOTE: this is the gas the transaction used, which is less than the // transaction requires to succeed. let mut gas_used = res.result.gas_used(); @@ -627,7 +643,7 @@ pub trait Call: LoadState + SpawnBlocking { // // Calculate the optimistic gas limit by adding gas used and gas refund, // then applying a 64/63 multiplier to account for gas forwarding rules. - let optimistic_gas_limit = (gas_used + gas_refund) * 64 / 63; + let optimistic_gas_limit = (gas_used + gas_refund + CALL_STIPEND_GAS) * 64 / 63; if optimistic_gas_limit < highest_gas_limit { // Set the transaction's gas limit to the calculated optimistic gas limit. env.tx.gas_limit = optimistic_gas_limit; @@ -669,8 +685,7 @@ pub trait Call: LoadState + SpawnBlocking { // Execute transaction and handle potential gas errors, adjusting limits accordingly. match self.transact(&mut db, env.clone()) { - // Check if the error is due to gas being too high. - Err(EthApiError::InvalidTransaction(RpcInvalidTransactionError::GasTooHigh)) => { + Err(err) if err.is_gas_too_high() => { // Increase the lowest gas limit if gas is too high lowest_gas_limit = mid_gas_limit; } @@ -707,7 +722,7 @@ pub trait Call: LoadState + SpawnBlocking { tx_gas_limit: u64, highest_gas_limit: &mut u64, lowest_gas_limit: &mut u64, - ) -> EthResult<()> { + ) -> Result<(), Self::Error> { match result { ExecutionResult::Success { .. } => { // Cap the highest gas limit with the succeeding gas limit. @@ -735,7 +750,7 @@ pub trait Call: LoadState + SpawnBlocking { // These cases should be unreachable because we know the transaction // succeeds, but if they occur, treat them as an // error. - return Err(RpcInvalidTransactionError::EvmHalt(err).into()) + return Err(RpcInvalidTransactionError::EvmHalt(err).into_eth_err()) } } } @@ -752,7 +767,7 @@ pub trait Call: LoadState + SpawnBlocking { env_gas_limit: U256, mut env: EnvWithHandlerCfg, db: &mut CacheDB>, - ) -> EthApiError + ) -> Self::Error where S: StateProvider, { @@ -766,14 +781,14 @@ pub trait Call: LoadState + SpawnBlocking { ExecutionResult::Success { .. } => { // transaction succeeded by manually increasing the gas limit to // highest, which means the caller lacks funds to pay for the tx - RpcInvalidTransactionError::BasicOutOfGas(req_gas_limit).into() + RpcInvalidTransactionError::BasicOutOfGas(req_gas_limit).into_eth_err() } ExecutionResult::Revert { output, .. } => { // reverted again after bumping the limit - RpcInvalidTransactionError::Revert(RevertError::new(output)).into() + RpcInvalidTransactionError::Revert(RevertError::new(output)).into_eth_err() } ExecutionResult::Halt { reason, .. } => { - RpcInvalidTransactionError::EvmHalt(reason).into() + RpcInvalidTransactionError::EvmHalt(reason).into_eth_err() } } } @@ -786,10 +801,10 @@ pub trait Call: LoadState + SpawnBlocking { &self, block_env: &BlockEnv, request: TransactionRequest, - ) -> EthResult { + ) -> Result { // Ensure that if versioned hashes are set, they're not empty if request.blob_versioned_hashes.as_ref().map_or(false, |hashes| hashes.is_empty()) { - return Err(RpcInvalidTransactionError::BlobTransactionMissingBlobHashes.into()) + return Err(RpcInvalidTransactionError::BlobTransactionMissingBlobHashes.into_eth_err()) } let TransactionRequest { @@ -827,14 +842,18 @@ pub trait Call: LoadState + SpawnBlocking { let env = TxEnv { gas_limit: gas_limit .try_into() - .map_err(|_| RpcInvalidTransactionError::GasUintOverflow)?, + .map_err(|_| RpcInvalidTransactionError::GasUintOverflow) + .map_err(Self::Error::from_eth_err)?, nonce, caller: from.unwrap_or_default(), gas_price, gas_priority_fee: max_priority_fee_per_gas, transact_to: to.unwrap_or(TxKind::Create), value: value.unwrap_or_default(), - data: input.try_into_unique_input()?.unwrap_or_default(), + data: input + .try_into_unique_input() + .map_err(Self::Error::from_eth_err)? + .unwrap_or_default(), chain_id, access_list: access_list.unwrap_or_default().into(), // EIP-4844 fields @@ -857,7 +876,7 @@ pub trait Call: LoadState + SpawnBlocking { cfg: CfgEnvWithHandlerCfg, block: BlockEnv, request: TransactionRequest, - ) -> EthResult { + ) -> Result { let tx = self.create_txn_env(&block, request)?; Ok(EnvWithHandlerCfg::new_with_cfg_env(cfg, block, tx)) } @@ -879,7 +898,7 @@ pub trait Call: LoadState + SpawnBlocking { gas_limit: u64, db: &mut CacheDB, overrides: EvmOverrides, - ) -> EthResult + ) -> Result where DB: DatabaseRef, EthApiError: From<::Error>, diff --git a/crates/rpc/rpc-eth-api/src/helpers/error.rs b/crates/rpc/rpc-eth-api/src/helpers/error.rs new file mode 100644 index 000000000000..041a019052bd --- /dev/null +++ b/crates/rpc/rpc-eth-api/src/helpers/error.rs @@ -0,0 +1,88 @@ +//! Helper traits to wrap generic l1 errors, in network specific error type configured in +//! [`EthApiTypes`](crate::EthApiTypes). + +use reth_rpc_eth_types::EthApiError; +use revm_primitives::EVMError; + +/// Helper trait to wrap core [`EthApiError`]. +pub trait FromEthApiError: From { + /// Converts from error via [`EthApiError`]. + fn from_eth_err(err: E) -> Self + where + EthApiError: From; +} + +impl FromEthApiError for T +where + T: From, +{ + fn from_eth_err(err: E) -> Self + where + EthApiError: From, + { + T::from(EthApiError::from(err)) + } +} + +/// Helper trait to wrap core [`EthApiError`]. +pub trait IntoEthApiError: Into { + /// Converts into error via [`EthApiError`]. + fn into_eth_err(self) -> E + where + E: FromEthApiError; +} + +impl IntoEthApiError for T +where + EthApiError: From, +{ + fn into_eth_err(self) -> E + where + E: FromEthApiError, + { + E::from_eth_err(self) + } +} + +/// Helper trait to access wrapped core error. +pub trait AsEthApiError { + /// Returns reference to [`EthApiError`], if this an error variant inherited from core + /// functionality. + fn as_err(&self) -> Option<&EthApiError>; + + /// Returns `true` if error is + /// [`RpcInvalidTransactionError::GasTooHigh`](reth_rpc_eth_types::RpcInvalidTransactionError::GasTooHigh). + fn is_gas_too_high(&self) -> bool { + if let Some(err) = self.as_err() { + return err.is_gas_too_high() + } + + false + } +} + +impl AsEthApiError for EthApiError { + fn as_err(&self) -> Option<&EthApiError> { + Some(self) + } +} + +/// Helper trait to convert from revm errors. +pub trait FromEvmError: From { + /// Converts from a revm error. + fn from_evm_err(err: EVMError) -> Self + where + EthApiError: From; +} + +impl FromEvmError for T +where + T: From, +{ + fn from_evm_err(err: EVMError) -> Self + where + EthApiError: From, + { + err.into_eth_err() + } +} diff --git a/crates/rpc/rpc-eth-api/src/helpers/fee.rs b/crates/rpc/rpc-eth-api/src/helpers/fee.rs index 54c577ea2504..290833eeca6e 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/fee.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/fee.rs @@ -4,12 +4,14 @@ use futures::Future; use reth_primitives::U256; use reth_provider::{BlockIdReader, BlockReaderIdExt, ChainSpecProvider, HeaderProvider}; use reth_rpc_eth_types::{ - fee_history::calculate_reward_percentiles_for_block, EthApiError, EthResult, EthStateCache, + fee_history::calculate_reward_percentiles_for_block, EthApiError, EthStateCache, FeeHistoryCache, FeeHistoryEntry, GasPriceOracle, RpcInvalidTransactionError, }; use reth_rpc_types::{BlockNumberOrTag, FeeHistory}; use tracing::debug; +use crate::FromEthApiError; + use super::LoadBlock; /// Fee related functions for the [`EthApiServer`](crate::EthApiServer) trait in the @@ -18,7 +20,7 @@ pub trait EthFees: LoadFee { /// Returns a suggestion for a gas price for legacy transactions. /// /// See also: - fn gas_price(&self) -> impl Future> + Send + fn gas_price(&self) -> impl Future> + Send where Self: LoadBlock, { @@ -26,7 +28,7 @@ pub trait EthFees: LoadFee { } /// Returns a suggestion for a base fee for blob transactions. - fn blob_base_fee(&self) -> impl Future> + Send + fn blob_base_fee(&self) -> impl Future> + Send where Self: LoadBlock, { @@ -34,7 +36,7 @@ pub trait EthFees: LoadFee { } /// Returns a suggestion for the priority fee (the tip) - fn suggested_priority_fee(&self) -> impl Future> + Send + fn suggested_priority_fee(&self) -> impl Future> + Send where Self: 'static, { @@ -50,7 +52,7 @@ pub trait EthFees: LoadFee { mut block_count: u64, newest_block: BlockNumberOrTag, reward_percentiles: Option>, - ) -> impl Future> + Send { + ) -> impl Future> + Send { async move { if block_count == 0 { return Ok(FeeHistory::default()) @@ -72,10 +74,11 @@ pub trait EthFees: LoadFee { block_count = max_fee_history } - let Some(end_block) = - LoadFee::provider(self).block_number_for_id(newest_block.into())? + let Some(end_block) = LoadFee::provider(self) + .block_number_for_id(newest_block.into()) + .map_err(Self::Error::from_eth_err)? else { - return Err(EthApiError::UnknownBlockNumber) + return Err(EthApiError::UnknownBlockNumber.into()) }; // need to add 1 to the end block to get the correct (inclusive) range @@ -91,7 +94,7 @@ pub trait EthFees: LoadFee { // Note: The types used ensure that the percentiles are never < 0 if let Some(percentiles) = &reward_percentiles { if percentiles.windows(2).any(|w| w[0] > w[1] || w[0] > 100.) { - return Err(EthApiError::InvalidRewardPercentiles) + return Err(EthApiError::InvalidRewardPercentiles.into()) } } @@ -116,7 +119,7 @@ pub trait EthFees: LoadFee { if let Some(fee_entries) = fee_entries { if fee_entries.len() != block_count as usize { - return Err(EthApiError::InvalidBlockRange) + return Err(EthApiError::InvalidBlockRange.into()) } for entry in &fee_entries { @@ -144,9 +147,9 @@ pub trait EthFees: LoadFee { base_fee_per_blob_gas.push(last_entry.next_block_blob_fee().unwrap_or_default()); } else { // read the requested header range - let headers = LoadFee::provider(self).sealed_headers_range(start_block..=end_block)?; + let headers = LoadFee::provider(self).sealed_headers_range(start_block..=end_block).map_err(Self::Error::from_eth_err)?; if headers.len() != block_count as usize { - return Err(EthApiError::InvalidBlockRange) + return Err(EthApiError::InvalidBlockRange.into()) } for header in &headers { @@ -162,7 +165,7 @@ pub trait EthFees: LoadFee { if let Some(percentiles) = &reward_percentiles { let (transactions, receipts) = LoadFee::cache(self) .get_transactions_and_receipts(header.hash()) - .await? + .await.map_err(Self::Error::from_eth_err)? .ok_or(EthApiError::InvalidBlockRange)?; rewards.push( calculate_reward_percentiles_for_block( @@ -251,7 +254,7 @@ pub trait LoadFee: LoadBlock { fn legacy_gas_price( &self, gas_price: Option, - ) -> impl Future> + Send { + ) -> impl Future> + Send { async move { match gas_price { Some(gas_price) => Ok(gas_price), @@ -271,7 +274,7 @@ pub trait LoadFee: LoadBlock { &self, max_fee_per_gas: Option, max_priority_fee_per_gas: Option, - ) -> impl Future> + Send { + ) -> impl Future> + Send { async move { let max_fee_per_gas = match max_fee_per_gas { Some(max_fee_per_gas) => max_fee_per_gas, @@ -303,7 +306,7 @@ pub trait LoadFee: LoadBlock { fn eip4844_blob_fee( &self, blob_fee: Option, - ) -> impl Future> + Send { + ) -> impl Future> + Send { async move { match blob_fee { Some(blob_fee) => Ok(blob_fee), @@ -315,7 +318,7 @@ pub trait LoadFee: LoadBlock { /// Returns a suggestion for a gas price for legacy transactions. /// /// See also: - fn gas_price(&self) -> impl Future> + Send { + fn gas_price(&self) -> impl Future> + Send { let header = self.block(BlockNumberOrTag::Latest.into()); let suggested_tip = self.suggested_priority_fee(); async move { @@ -326,21 +329,21 @@ pub trait LoadFee: LoadBlock { } /// Returns a suggestion for a base fee for blob transactions. - fn blob_base_fee(&self) -> impl Future> + Send { + fn blob_base_fee(&self) -> impl Future> + Send { async move { self.block(BlockNumberOrTag::Latest.into()) .await? .and_then(|h: reth_primitives::SealedBlock| h.next_block_blob_fee()) - .ok_or(EthApiError::ExcessBlobGasNotSet) + .ok_or(EthApiError::ExcessBlobGasNotSet.into()) .map(U256::from) } } /// Returns a suggestion for the priority fee (the tip) - fn suggested_priority_fee(&self) -> impl Future> + Send + fn suggested_priority_fee(&self) -> impl Future> + Send where Self: 'static, { - self.gas_oracle().suggest_tip_cap() + async move { self.gas_oracle().suggest_tip_cap().await.map_err(Self::Error::from_eth_err) } } } diff --git a/crates/rpc/rpc-eth-api/src/helpers/mod.rs b/crates/rpc/rpc-eth-api/src/helpers/mod.rs index b82a621acaf4..ecfd63388e3b 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/mod.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/mod.rs @@ -17,6 +17,7 @@ pub mod block; pub mod blocking_task; pub mod call; +pub mod error; pub mod fee; pub mod pending_block; pub mod receipt; @@ -25,6 +26,7 @@ pub mod spec; pub mod state; pub mod trace; pub mod transaction; +pub mod types; pub use block::{EthBlocks, LoadBlock}; pub use blocking_task::SpawnBlocking; @@ -38,6 +40,8 @@ pub use state::{EthState, LoadState}; pub use trace::Trace; pub use transaction::{EthTransactions, LoadTransaction, UpdateRawTxForwarder}; +use crate::EthApiTypes; + /// Extension trait that bundles traits needed for tracing transactions. pub trait TraceExt: LoadTransaction + LoadBlock + LoadPendingBlock + SpawnBlocking + Trace + Call @@ -50,12 +54,21 @@ impl TraceExt for T where T: LoadTransaction + LoadBlock + LoadPendingBlock + /// /// This trait is automatically implemented for any type that implements all the `Eth` traits. pub trait FullEthApi: - EthApiSpec + EthTransactions + EthBlocks + EthState + EthCall + EthFees + Trace + LoadReceipt + EthApiTypes + + EthApiSpec + + EthTransactions + + EthBlocks + + EthState + + EthCall + + EthFees + + Trace + + LoadReceipt { } impl FullEthApi for T where - T: EthApiSpec + T: EthApiTypes + + EthApiSpec + EthTransactions + EthBlocks + EthState diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index a17fbb43f2fe..183b1c791401 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -19,27 +19,29 @@ use reth_primitives::{ EMPTY_OMMER_ROOT_HASH, U256, }; use reth_provider::{ - BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderFactory, + BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ProviderError, + StateProviderFactory, }; use reth_revm::{ database::StateProviderDatabase, state_change::post_block_withdrawals_balance_increments, }; use reth_rpc_eth_types::{ - pending_block::pre_block_blockhashes_update, EthApiError, EthResult, PendingBlock, - PendingBlockEnv, PendingBlockEnvOrigin, + pending_block::pre_block_blockhashes_update, EthApiError, PendingBlock, PendingBlockEnv, + PendingBlockEnvOrigin, }; use reth_transaction_pool::{BestTransactionsAttributes, TransactionPool}; use revm::{db::states::bundle_state::BundleRetention, DatabaseCommit, State}; use tokio::sync::Mutex; use tracing::debug; +use crate::{EthApiTypes, FromEthApiError, FromEvmError}; + use super::SpawnBlocking; /// Loads a pending block from database. /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` blocks RPC methods. -#[auto_impl::auto_impl(&, Arc)] -pub trait LoadPendingBlock { +pub trait LoadPendingBlock: EthApiTypes { /// Returns a handle for reading data from disk. /// /// Data access in default (L1) trait method implementations. @@ -65,16 +67,19 @@ pub trait LoadPendingBlock { /// Configures the [`CfgEnvWithHandlerCfg`] and [`BlockEnv`] for the pending block /// /// If no pending block is available, this will derive it from the `latest` block - fn pending_block_env_and_cfg(&self) -> EthResult { + fn pending_block_env_and_cfg(&self) -> Result { let origin: PendingBlockEnvOrigin = if let Some(pending) = - self.provider().pending_block_with_senders()? + self.provider().pending_block_with_senders().map_err(Self::Error::from_eth_err)? { PendingBlockEnvOrigin::ActualPending(pending) } else { // no pending block from the CL yet, so we use the latest block and modify the env // values that we can - let latest = - self.provider().latest_header()?.ok_or_else(|| EthApiError::UnknownBlockNumber)?; + let latest = self + .provider() + .latest_header() + .map_err(Self::Error::from_eth_err)? + .ok_or_else(|| EthApiError::UnknownBlockNumber)?; let (mut latest_header, block_hash) = latest.split(); // child block @@ -102,12 +107,14 @@ pub trait LoadPendingBlock { let mut block_env = BlockEnv::default(); // Note: for the PENDING block we assume it is past the known merge block and thus this will // not fail when looking up the total difficulty value for the blockenv. - self.provider().fill_env_with_header( - &mut cfg, - &mut block_env, - origin.header(), - self.evm_config().clone(), - )?; + self.provider() + .fill_env_with_header( + &mut cfg, + &mut block_env, + origin.header(), + self.evm_config().clone(), + ) + .map_err(Self::Error::from_eth_err)?; Ok(PendingBlockEnv::new(cfg, block_env, origin)) } @@ -115,7 +122,7 @@ pub trait LoadPendingBlock { /// Returns the locally built pending block fn local_pending_block( &self, - ) -> impl Future>> + Send + ) -> impl Future, Self::Error>> + Send where Self: SpawnBlocking, { @@ -197,11 +204,17 @@ pub trait LoadPendingBlock { /// /// After Cancun, if the origin is the actual pending block, the block includes the EIP-4788 pre /// block contract call using the parent beacon block root received from the CL. - fn build_block(&self, env: PendingBlockEnv) -> EthResult { + fn build_block(&self, env: PendingBlockEnv) -> Result + where + EthApiError: From, + { let PendingBlockEnv { cfg, block_env, origin } = env; let parent_hash = origin.build_target_hash(); - let state_provider = self.provider().history_by_block_hash(parent_hash)?; + let state_provider = self + .provider() + .history_by_block_hash(parent_hash) + .map_err(Self::Error::from_eth_err)?; let state = StateProviderDatabase::new(state_provider); let mut db = State::builder().with_database(state).with_bundle_update().build(); @@ -316,7 +329,7 @@ pub trait LoadPendingBlock { } err => { // this is an error that we should treat as fatal for this attempt - return Err(err.into()) + return Err(Self::Error::from_evm_err(err)) } } } @@ -359,7 +372,7 @@ pub trait LoadPendingBlock { ); // increment account balances for withdrawals - db.increment_balances(balance_increments)?; + db.increment_balances(balance_increments).map_err(Self::Error::from_eth_err)?; // merge all transitions into bundle state. db.merge_transitions(BundleRetention::PlainState); @@ -378,7 +391,9 @@ pub trait LoadPendingBlock { // calculate the state root let state_provider = &db.database; - let state_root = state_provider.state_root(execution_outcome.state())?; + let state_root = state_provider + .state_root(execution_outcome.state()) + .map_err(Self::Error::from_eth_err)?; // create the block header let transactions_root = calculate_transaction_root(&executed_txs); diff --git a/crates/rpc/rpc-eth-api/src/helpers/receipt.rs b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs index 5cd6c03c4d9f..63016e3d2ec8 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/receipt.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs @@ -3,14 +3,15 @@ use futures::Future; use reth_primitives::{Receipt, TransactionMeta, TransactionSigned}; -use reth_rpc_eth_types::{EthApiError, EthResult, EthStateCache, ReceiptBuilder}; +use reth_rpc_eth_types::{EthApiError, EthStateCache, ReceiptBuilder}; use reth_rpc_types::AnyTransactionReceipt; +use crate::{EthApiTypes, FromEthApiError}; + /// Assembles transaction receipt data w.r.t to network. /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` receipts RPC methods. -#[auto_impl::auto_impl(&, Arc)] -pub trait LoadReceipt: Send + Sync { +pub trait LoadReceipt: EthApiTypes + Send + Sync { /// Returns a handle for reading data from memory. /// /// Data access in default (L1) trait method implementations. @@ -22,12 +23,17 @@ pub trait LoadReceipt: Send + Sync { tx: TransactionSigned, meta: TransactionMeta, receipt: Receipt, - ) -> impl Future> + Send { + ) -> impl Future> + Send { async move { // get all receipts for the block - let all_receipts = match self.cache().get_receipts(meta.block_hash).await? { + let all_receipts = match self + .cache() + .get_receipts(meta.block_hash) + .await + .map_err(Self::Error::from_eth_err)? + { Some(recpts) => recpts, - None => return Err(EthApiError::UnknownBlockNumber), + None => return Err(EthApiError::UnknownBlockNumber.into()), }; Ok(ReceiptBuilder::new(&tx, meta, &receipt, &all_receipts)?.build()) diff --git a/crates/rpc/rpc-eth-api/src/helpers/state.rs b/crates/rpc/rpc-eth-api/src/helpers/state.rs index 0da2a49c3b18..48d350e0e72c 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/state.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/state.rs @@ -8,15 +8,15 @@ use reth_primitives::{Address, BlockId, Bytes, Header, B256, U256}; use reth_provider::{ BlockIdReader, ChainSpecProvider, StateProvider, StateProviderBox, StateProviderFactory, }; -use reth_rpc_eth_types::{ - EthApiError, EthResult, EthStateCache, PendingBlockEnv, RpcInvalidTransactionError, -}; +use reth_rpc_eth_types::{EthApiError, EthStateCache, PendingBlockEnv, RpcInvalidTransactionError}; use reth_rpc_types::{serde_helpers::JsonStorageKey, EIP1186AccountProofResponse}; use reth_rpc_types_compat::proof::from_primitive_account_proof; use reth_transaction_pool::{PoolTransaction, TransactionPool}; use revm::db::BundleState; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, SpecId}; +use crate::{EthApiTypes, FromEthApiError}; + use super::{EthApiSpec, LoadPendingBlock, SpawnBlocking}; /// Helper methods for `eth_` methods relating to state (accounts). @@ -32,7 +32,7 @@ pub trait EthState: LoadState + SpawnBlocking { &self, address: Address, block_id: Option, - ) -> impl Future> + Send { + ) -> impl Future> + Send { LoadState::transaction_count(self, address, block_id) } @@ -41,11 +41,12 @@ pub trait EthState: LoadState + SpawnBlocking { &self, address: Address, block_id: Option, - ) -> impl Future> + Send { + ) -> impl Future> + Send { self.spawn_blocking_io(move |this| { Ok(this .state_at_block_id_or_latest(block_id)? - .account_code(address)? + .account_code(address) + .map_err(Self::Error::from_eth_err)? .unwrap_or_default() .original_bytes()) }) @@ -56,11 +57,12 @@ pub trait EthState: LoadState + SpawnBlocking { &self, address: Address, block_id: Option, - ) -> impl Future> + Send { + ) -> impl Future> + Send { self.spawn_blocking_io(move |this| { Ok(this .state_at_block_id_or_latest(block_id)? - .account_balance(address)? + .account_balance(address) + .map_err(Self::Error::from_eth_err)? .unwrap_or_default()) }) } @@ -71,11 +73,12 @@ pub trait EthState: LoadState + SpawnBlocking { address: Address, index: JsonStorageKey, block_id: Option, - ) -> impl Future> + Send { + ) -> impl Future> + Send { self.spawn_blocking_io(move |this| { Ok(B256::new( this.state_at_block_id_or_latest(block_id)? - .storage(address, index.0)? + .storage(address, index.0) + .map_err(Self::Error::from_eth_err)? .unwrap_or_default() .to_be_bytes(), )) @@ -88,21 +91,25 @@ pub trait EthState: LoadState + SpawnBlocking { address: Address, keys: Vec, block_id: Option, - ) -> EthResult> + Send> + ) -> Result< + impl Future> + Send, + Self::Error, + > where Self: EthApiSpec, { - let chain_info = self.chain_info()?; + let chain_info = self.chain_info().map_err(Self::Error::from_eth_err)?; let block_id = block_id.unwrap_or_default(); // Check whether the distance to the block exceeds the maximum configured window. let block_number = self .provider() - .block_number_for_id(block_id)? + .block_number_for_id(block_id) + .map_err(Self::Error::from_eth_err)? .ok_or(EthApiError::UnknownBlockNumber)?; let max_window = self.max_proof_window(); if chain_info.best_number.saturating_sub(block_number) > max_window { - return Err(EthApiError::ExceedsMaxProofWindow) + return Err(EthApiError::ExceedsMaxProofWindow.into()) } Ok(async move { @@ -113,7 +120,9 @@ pub trait EthState: LoadState + SpawnBlocking { self.spawn_blocking_io(move |this| { let state = this.state_at_block_id(block_id)?; let storage_keys = keys.iter().map(|key| key.0).collect::>(); - let proof = state.proof(&BundleState::default(), address, &storage_keys)?; + let proof = state + .proof(&BundleState::default(), address, &storage_keys) + .map_err(Self::Error::from_eth_err)?; Ok(from_primitive_account_proof(proof)) }) .await @@ -124,7 +133,7 @@ pub trait EthState: LoadState + SpawnBlocking { /// Loads state from database. /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` state RPC methods. -pub trait LoadState { +pub trait LoadState: EthApiTypes { /// Returns a handle for reading state from database. /// /// Data access in default trait method implementations. @@ -141,21 +150,21 @@ pub trait LoadState { fn pool(&self) -> impl TransactionPool; /// Returns the state at the given block number - fn state_at_hash(&self, block_hash: B256) -> EthResult { - Ok(self.provider().history_by_block_hash(block_hash)?) + fn state_at_hash(&self, block_hash: B256) -> Result { + self.provider().history_by_block_hash(block_hash).map_err(Self::Error::from_eth_err) } /// Returns the state at the given [`BlockId`] enum. /// /// Note: if not [`BlockNumberOrTag::Pending`](reth_primitives::BlockNumberOrTag) then this /// will only return canonical state. See also - fn state_at_block_id(&self, at: BlockId) -> EthResult { - Ok(self.provider().state_by_block_id(at)?) + fn state_at_block_id(&self, at: BlockId) -> Result { + self.provider().state_by_block_id(at).map_err(Self::Error::from_eth_err) } /// Returns the _latest_ state - fn latest_state(&self) -> EthResult { - Ok(self.provider().latest()?) + fn latest_state(&self) -> Result { + self.provider().latest().map_err(Self::Error::from_eth_err) } /// Returns the state at the given [`BlockId`] enum or the latest. @@ -164,7 +173,7 @@ pub trait LoadState { fn state_at_block_id_or_latest( &self, block_id: Option, - ) -> EthResult { + ) -> Result { if let Some(block_id) = block_id { self.state_at_block_id(block_id) } else { @@ -181,7 +190,7 @@ pub trait LoadState { fn evm_env_at( &self, at: BlockId, - ) -> impl Future> + Send + ) -> impl Future> + Send where Self: LoadPendingBlock + SpawnBlocking, { @@ -193,9 +202,14 @@ pub trait LoadState { } else { // Use cached values if there is no pending block let block_hash = LoadPendingBlock::provider(self) - .block_hash_for_id(at)? + .block_hash_for_id(at) + .map_err(Self::Error::from_eth_err)? .ok_or_else(|| EthApiError::UnknownBlockNumber)?; - let (cfg, env) = self.cache().get_evm_env(block_hash).await?; + let (cfg, env) = self + .cache() + .get_evm_env(block_hash) + .await + .map_err(Self::Error::from_eth_err)?; Ok((cfg, env, block_hash.into())) } } @@ -207,7 +221,7 @@ pub trait LoadState { fn evm_env_for_raw_block( &self, header: &Header, - ) -> impl Future> + Send + ) -> impl Future> + Send where Self: LoadPendingBlock + SpawnBlocking, { @@ -230,7 +244,7 @@ pub trait LoadState { &self, address: Address, block_id: Option, - ) -> impl Future> + Send + ) -> impl Future> + Send where Self: SpawnBlocking, { @@ -240,15 +254,20 @@ pub trait LoadState { if let Some(highest_nonce) = address_txs.iter().map(|item| item.transaction.nonce()).max() { - let tx_count = highest_nonce - .checked_add(1) - .ok_or(RpcInvalidTransactionError::NonceMaxValue)?; + let tx_count = highest_nonce.checked_add(1).ok_or(Self::Error::from( + EthApiError::InvalidTransaction(RpcInvalidTransactionError::NonceMaxValue), + ))?; return Ok(U256::from(tx_count)) } } let state = this.state_at_block_id_or_latest(block_id)?; - Ok(U256::from(state.account_nonce(address)?.unwrap_or_default())) + Ok(U256::from( + state + .account_nonce(address) + .map_err(Self::Error::from_eth_err)? + .unwrap_or_default(), + )) }) } } diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs index d48e566ed51d..09ad7f22fa21 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -6,13 +6,15 @@ use reth_primitives::B256; use reth_revm::database::StateProviderDatabase; use reth_rpc_eth_types::{ cache::db::{StateCacheDb, StateCacheDbRefMutWrapper, StateProviderTraitObjWrapper}, - EthApiError, EthResult, + EthApiError, }; use reth_rpc_types::{BlockId, TransactionInfo}; use revm::{db::CacheDB, Database, DatabaseCommit, GetInspector, Inspector}; use revm_inspectors::tracing::{TracingInspector, TracingInspectorConfig}; use revm_primitives::{EnvWithHandlerCfg, EvmState, ExecutionResult, ResultAndState}; +use crate::FromEvmError; + use super::{Call, LoadBlock, LoadPendingBlock, LoadState, LoadTransaction}; /// Executes CPU heavy tasks. @@ -29,10 +31,10 @@ pub trait Trace: LoadState { db: DB, env: EnvWithHandlerCfg, inspector: I, - ) -> EthResult<(ResultAndState, EnvWithHandlerCfg)> + ) -> Result<(ResultAndState, EnvWithHandlerCfg), Self::Error> where DB: Database, - ::Error: Into, + EthApiError: From, I: GetInspector, { self.inspect_and_return_db(db, env, inspector).map(|(res, env, _)| (res, env)) @@ -48,14 +50,15 @@ pub trait Trace: LoadState { db: DB, env: EnvWithHandlerCfg, inspector: I, - ) -> EthResult<(ResultAndState, EnvWithHandlerCfg, DB)> + ) -> Result<(ResultAndState, EnvWithHandlerCfg, DB), Self::Error> where DB: Database, - ::Error: Into, + EthApiError: From, + I: GetInspector, { let mut evm = self.evm_config().evm_with_env_and_inspector(db, env, inspector); - let res = evm.transact()?; + let res = evm.transact().map_err(Self::Error::from_evm_err)?; let (db, env) = evm.into_db_and_env_with_handler_cfg(); Ok((res, env, db)) } @@ -73,10 +76,10 @@ pub trait Trace: LoadState { config: TracingInspectorConfig, at: BlockId, f: F, - ) -> EthResult + ) -> Result where Self: Call, - F: FnOnce(TracingInspector, ResultAndState) -> EthResult, + F: FnOnce(TracingInspector, ResultAndState) -> Result, { self.with_state_at_block(at, |state| { let mut db = CacheDB::new(StateProviderDatabase::new(state)); @@ -99,10 +102,10 @@ pub trait Trace: LoadState { config: TracingInspectorConfig, at: BlockId, f: F, - ) -> impl Future> + Send + ) -> impl Future> + Send where Self: LoadPendingBlock + Call, - F: FnOnce(TracingInspector, ResultAndState, StateCacheDb<'_>) -> EthResult + F: FnOnce(TracingInspector, ResultAndState, StateCacheDb<'_>) -> Result + Send + 'static, R: Send + 'static, @@ -130,7 +133,7 @@ pub trait Trace: LoadState { hash: B256, config: TracingInspectorConfig, f: F, - ) -> impl Future>> + Send + ) -> impl Future, Self::Error>> + Send where Self: LoadPendingBlock + LoadTransaction + Call, F: FnOnce( @@ -138,7 +141,7 @@ pub trait Trace: LoadState { TracingInspector, ResultAndState, StateCacheDb<'_>, - ) -> EthResult + ) -> Result + Send + 'static, R: Send + 'static, @@ -160,10 +163,15 @@ pub trait Trace: LoadState { hash: B256, mut inspector: Insp, f: F, - ) -> impl Future>> + Send + ) -> impl Future, Self::Error>> + Send where Self: LoadPendingBlock + LoadTransaction + Call, - F: FnOnce(TransactionInfo, Insp, ResultAndState, StateCacheDb<'_>) -> EthResult + F: FnOnce( + TransactionInfo, + Insp, + ResultAndState, + StateCacheDb<'_>, + ) -> Result + Send + 'static, Insp: for<'a, 'b> Inspector> + Send + 'static, @@ -222,7 +230,7 @@ pub trait Trace: LoadState { highest_index: Option, config: TracingInspectorConfig, f: F, - ) -> impl Future>>> + Send + ) -> impl Future>, Self::Error>> + Send where Self: LoadBlock, F: Fn( @@ -231,7 +239,7 @@ pub trait Trace: LoadState { ExecutionResult, &EvmState, &StateCacheDb<'_>, - ) -> EthResult + ) -> Result + Send + 'static, R: Send + 'static, @@ -260,10 +268,16 @@ pub trait Trace: LoadState { highest_index: Option, mut inspector_setup: Setup, f: F, - ) -> impl Future>>> + Send + ) -> impl Future>, Self::Error>> + Send where Self: LoadBlock, - F: Fn(TransactionInfo, Insp, ExecutionResult, &EvmState, &StateCacheDb<'_>) -> EthResult + F: Fn( + TransactionInfo, + Insp, + ExecutionResult, + &EvmState, + &StateCacheDb<'_>, + ) -> Result + Send + 'static, Setup: FnMut() -> Insp + Send + 'static, @@ -360,7 +374,7 @@ pub trait Trace: LoadState { block_id: BlockId, config: TracingInspectorConfig, f: F, - ) -> impl Future>>> + Send + ) -> impl Future>, Self::Error>> + Send where Self: LoadBlock, // This is the callback that's invoked for each transaction with the inspector, the result, @@ -371,7 +385,7 @@ pub trait Trace: LoadState { ExecutionResult, &EvmState, &StateCacheDb<'_>, - ) -> EthResult + ) -> Result + Send + 'static, R: Send + 'static, @@ -398,12 +412,18 @@ pub trait Trace: LoadState { block_id: BlockId, insp_setup: Setup, f: F, - ) -> impl Future>>> + Send + ) -> impl Future>, Self::Error>> + Send where Self: LoadBlock, // This is the callback that's invoked for each transaction with the inspector, the result, // state and db - F: Fn(TransactionInfo, Insp, ExecutionResult, &EvmState, &StateCacheDb<'_>) -> EthResult + F: Fn( + TransactionInfo, + Insp, + ExecutionResult, + &EvmState, + &StateCacheDb<'_>, + ) -> Result + Send + 'static, Setup: FnMut() -> Insp + Send + 'static, diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index fa4c9be30787..bd2b2ffd55f0 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -24,9 +24,11 @@ use reth_rpc_types::{ use reth_rpc_types_compat::transaction::from_recovered_with_block_context; use reth_transaction_pool::{TransactionOrigin, TransactionPool}; -use super::EthSigner; +use crate::{FromEthApiError, IntoEthApiError}; -use super::{Call, EthApiSpec, LoadBlock, LoadFee, LoadPendingBlock, LoadReceipt, SpawnBlocking}; +use super::{ + Call, EthApiSpec, EthSigner, LoadBlock, LoadFee, LoadPendingBlock, LoadReceipt, SpawnBlocking, +}; /// Transaction related functions for the [`EthApiServer`](crate::EthApiServer) trait in /// the `eth_` namespace. @@ -75,7 +77,7 @@ pub trait EthTransactions: LoadTransaction { fn transaction_by_hash( &self, hash: B256, - ) -> impl Future>> + Send { + ) -> impl Future, Self::Error>> + Send { LoadTransaction::transaction_by_hash(self, hash) } @@ -85,8 +87,10 @@ pub trait EthTransactions: LoadTransaction { fn transactions_by_block( &self, block: B256, - ) -> impl Future>>> + Send { - async move { Ok(self.cache().get_block_transactions(block).await?) } + ) -> impl Future>, Self::Error>> + Send { + async move { + self.cache().get_block_transactions(block).await.map_err(Self::Error::from_eth_err) + } } /// Returns the EIP-2718 encoded transaction by hash. @@ -99,7 +103,7 @@ pub trait EthTransactions: LoadTransaction { fn raw_transaction_by_hash( &self, hash: B256, - ) -> impl Future>> + Send { + ) -> impl Future, Self::Error>> + Send { async move { // Note: this is mostly used to fetch pooled transactions so we check the pool first if let Some(tx) = @@ -110,7 +114,8 @@ pub trait EthTransactions: LoadTransaction { self.spawn_blocking_io(move |ref this| { Ok(LoadTransaction::provider(this) - .transaction_by_hash(hash)? + .transaction_by_hash(hash) + .map_err(Self::Error::from_eth_err)? .map(|tx| tx.envelope_encoded())) }) .await @@ -121,7 +126,7 @@ pub trait EthTransactions: LoadTransaction { fn historical_transaction_by_hash_at( &self, hash: B256, - ) -> impl Future>> + Send { + ) -> impl Future, Self::Error>> + Send { async move { match self.transaction_by_hash_at(hash).await? { None => Ok(None), @@ -137,7 +142,7 @@ pub trait EthTransactions: LoadTransaction { fn transaction_receipt( &self, hash: B256, - ) -> impl Future>> + Send + ) -> impl Future, Self::Error>> + Send where Self: LoadReceipt + 'static, { @@ -157,19 +162,26 @@ pub trait EthTransactions: LoadTransaction { fn load_transaction_and_receipt( &self, hash: TxHash, - ) -> impl Future>> + Send + ) -> impl Future< + Output = Result, Self::Error>, + > + Send where Self: 'static, { let this = self.clone(); self.spawn_blocking_io(move |_| { - let (tx, meta) = - match LoadTransaction::provider(&this).transaction_by_hash_with_meta(hash)? { - Some((tx, meta)) => (tx, meta), - None => return Ok(None), - }; + let (tx, meta) = match LoadTransaction::provider(&this) + .transaction_by_hash_with_meta(hash) + .map_err(Self::Error::from_eth_err)? + { + Some((tx, meta)) => (tx, meta), + None => return Ok(None), + }; - let receipt = match EthTransactions::provider(&this).receipt_by_hash(hash)? { + let receipt = match EthTransactions::provider(&this) + .receipt_by_hash(hash) + .map_err(Self::Error::from_eth_err)? + { Some(recpt) => recpt, None => return Ok(None), }; @@ -185,7 +197,7 @@ pub trait EthTransactions: LoadTransaction { &self, block_id: BlockId, index: usize, - ) -> impl Future>> + Send + ) -> impl Future, Self::Error>> + Send where Self: LoadBlock, { @@ -216,7 +228,7 @@ pub trait EthTransactions: LoadTransaction { &self, block_id: BlockId, index: usize, - ) -> impl Future>> + Send + ) -> impl Future, Self::Error>> + Send where Self: LoadBlock, { @@ -234,7 +246,10 @@ pub trait EthTransactions: LoadTransaction { /// Decodes and recovers the transaction and submits it to the pool. /// /// Returns the hash of the transaction. - fn send_raw_transaction(&self, tx: Bytes) -> impl Future> + Send { + fn send_raw_transaction( + &self, + tx: Bytes, + ) -> impl Future> + Send { async move { // On optimism, transactions are forwarded directly to the sequencer to be included in // blocks that it builds. @@ -250,8 +265,11 @@ pub trait EthTransactions: LoadTransaction { ); // submit the transaction to the pool with a `Local` origin - let hash = - self.pool().add_transaction(TransactionOrigin::Local, pool_transaction).await?; + let hash = self + .pool() + .add_transaction(TransactionOrigin::Local, pool_transaction) + .await + .map_err(Self::Error::from_eth_err)?; Ok(hash) } @@ -262,18 +280,18 @@ pub trait EthTransactions: LoadTransaction { fn send_transaction( &self, mut request: TransactionRequest, - ) -> impl Future> + Send + ) -> impl Future> + Send where Self: EthApiSpec + LoadBlock + LoadPendingBlock + LoadFee + Call, { async move { let from = match request.from { Some(from) => from, - None => return Err(SignError::NoAccount.into()), + None => return Err(SignError::NoAccount.into_eth_err()), }; if self.find_signer(&from).is_err() { - return Err(SignError::NoAccount.into()); + return Err(SignError::NoAccount.into_eth_err()); } // set nonce if not already set before @@ -447,7 +465,7 @@ pub trait EthTransactions: LoadTransaction { TypedTransactionRequest::EIP4844(req) } - None => return Err(EthApiError::ConflictingFeeFieldsInRequest), + None => return Err(EthApiError::ConflictingFeeFieldsInRequest.into()), }; let signed_tx = self.sign_request(&from, transaction)?; @@ -457,13 +475,14 @@ pub trait EthTransactions: LoadTransaction { let pool_transaction = match recovered.try_into() { Ok(converted) => <::Pool as TransactionPool>::Transaction::from_recovered_pooled_transaction(converted), - Err(_) => return Err(EthApiError::TransactionConversionError), + Err(_) => return Err(EthApiError::TransactionConversionError.into()), }; // submit the transaction to the pool with a `Local` origin let hash = LoadTransaction::pool(self) .add_transaction(TransactionOrigin::Local, pool_transaction) - .await?; + .await + .map_err(Self::Error::from_eth_err)?; Ok(hash) } @@ -474,16 +493,16 @@ pub trait EthTransactions: LoadTransaction { &self, from: &Address, request: TypedTransactionRequest, - ) -> EthResult { + ) -> Result { for signer in self.signers().read().iter() { if signer.is_signer_for(from) { return match signer.sign_transaction(request, from) { Ok(tx) => Ok(tx), - Err(e) => Err(e.into()), + Err(e) => Err(e.into_eth_err()), } } } - Err(EthApiError::InvalidTransactionSignature) + Err(EthApiError::InvalidTransactionSignature.into()) } /// Signs given message. Returns the signature. @@ -491,23 +510,37 @@ pub trait EthTransactions: LoadTransaction { &self, account: Address, message: Bytes, - ) -> impl Future> + Send { - async move { Ok(self.find_signer(&account)?.sign(account, &message).await?.to_hex_bytes()) } + ) -> impl Future> + Send { + async move { + Ok(self + .find_signer(&account)? + .sign(account, &message) + .await + .map_err(Self::Error::from_eth_err)? + .to_hex_bytes()) + } } /// Encodes and signs the typed data according EIP-712. Payload must implement Eip712 trait. - fn sign_typed_data(&self, data: &TypedData, account: Address) -> EthResult { - Ok(self.find_signer(&account)?.sign_typed_data(account, data)?.to_hex_bytes()) + fn sign_typed_data(&self, data: &TypedData, account: Address) -> Result { + Ok(self + .find_signer(&account)? + .sign_typed_data(account, data) + .map_err(Self::Error::from_eth_err)? + .to_hex_bytes()) } /// Returns the signer for the given account, if found in configured signers. - fn find_signer(&self, account: &Address) -> Result, SignError> { + fn find_signer( + &self, + account: &Address, + ) -> Result, Self::Error> { self.signers() .read() .iter() .find(|signer| signer.is_signer_for(account)) .map(|signer| dyn_clone::clone_box(&**signer)) - .ok_or(SignError::NoAccount) + .ok_or_else(|| SignError::NoAccount.into_eth_err()) } } @@ -543,12 +576,16 @@ pub trait LoadTransaction: SpawnBlocking { fn transaction_by_hash( &self, hash: B256, - ) -> impl Future>> + Send { + ) -> impl Future, Self::Error>> + Send { async move { // Try to find the transaction on disk let mut resp = self .spawn_blocking_io(move |this| { - match this.provider().transaction_by_hash_with_meta(hash)? { + match this + .provider() + .transaction_by_hash_with_meta(hash) + .map_err(Self::Error::from_eth_err)? + { None => Ok(None), Some((tx, meta)) => { // Note: we assume this transaction is valid, because it's mined (or @@ -590,7 +627,8 @@ pub trait LoadTransaction: SpawnBlocking { fn transaction_by_hash_at( &self, transaction_hash: B256, - ) -> impl Future>> + Send { + ) -> impl Future, Self::Error>> + Send + { async move { match self.transaction_by_hash(transaction_hash).await? { None => Ok(None), @@ -625,8 +663,8 @@ pub trait LoadTransaction: SpawnBlocking { fn transaction_and_block( &self, hash: B256, - ) -> impl Future>> + Send - { + ) -> impl Future, Self::Error>> + + Send { async move { let (transaction, at) = match self.transaction_by_hash_at(hash).await? { None => return Ok(None), @@ -638,7 +676,11 @@ pub trait LoadTransaction: SpawnBlocking { BlockId::Hash(hash) => hash.block_hash, _ => return Ok(None), }; - let block = self.cache().get_block_with_senders(block_hash).await?; + let block = self + .cache() + .get_block_with_senders(block_hash) + .await + .map_err(Self::Error::from_eth_err)?; Ok(block.map(|block| (transaction, block.seal(block_hash)))) } } diff --git a/crates/rpc/rpc-eth-api/src/helpers/types.rs b/crates/rpc/rpc-eth-api/src/helpers/types.rs new file mode 100644 index 000000000000..088f9d9b69e9 --- /dev/null +++ b/crates/rpc/rpc-eth-api/src/helpers/types.rs @@ -0,0 +1,17 @@ +//! Trait for specifying `eth` API types that may be network dependent. + +use std::error::Error; + +use crate::{AsEthApiError, FromEthApiError, FromEvmError}; + +/// Network specific `eth` API types. +pub trait EthApiTypes: Send + Sync { + /// Extension of [`EthApiError`](reth_rpc_eth_types::EthApiError), with network specific errors. + type Error: Into> + + FromEthApiError + + AsEthApiError + + FromEvmError + + Error + + Send + + Sync; +} diff --git a/crates/rpc/rpc-eth-api/src/lib.rs b/crates/rpc/rpc-eth-api/src/lib.rs index 1aed94d5cc6e..c707a94eef4f 100644 --- a/crates/rpc/rpc-eth-api/src/lib.rs +++ b/crates/rpc/rpc-eth-api/src/lib.rs @@ -21,6 +21,10 @@ pub mod pubsub; pub use bundle::{EthBundleApiServer, EthCallBundleApiServer}; pub use core::{EthApiServer, FullEthApiServer}; pub use filter::EthFilterApiServer; +pub use helpers::{ + error::{AsEthApiError, FromEthApiError, FromEvmError, IntoEthApiError}, + types::EthApiTypes, +}; pub use pubsub::EthPubSubApiServer; pub use helpers::transaction::RawTransactionForwarder; diff --git a/crates/rpc/rpc-eth-types/Cargo.toml b/crates/rpc/rpc-eth-types/Cargo.toml index b1c307191025..3fb20836e46f 100644 --- a/crates/rpc/rpc-eth-types/Cargo.toml +++ b/crates/rpc/rpc-eth-types/Cargo.toml @@ -55,14 +55,3 @@ tracing.workspace = true [dev-dependencies] serde_json.workspace = true - -[features] -optimism = [ - "reth-primitives/optimism", - "reth-provider/optimism", - "reth-revm/optimism", - "reth-chainspec/optimism", - "reth-execution-types/optimism", - "reth-revm/optimism", - "revm/optimism" -] \ No newline at end of file diff --git a/crates/rpc/rpc-eth-types/src/cache/multi_consumer.rs b/crates/rpc/rpc-eth-types/src/cache/multi_consumer.rs index 77d861343307..8ca7208d22bf 100644 --- a/crates/rpc/rpc-eth-types/src/cache/multi_consumer.rs +++ b/crates/rpc/rpc-eth-types/src/cache/multi_consumer.rs @@ -61,15 +61,12 @@ where } } - /// Remove consumers for a given key. + /// Remove consumers for a given key, this will also remove the key from the cache. pub fn remove(&mut self, key: &K) -> Option> { - match self.queued.remove(key) { - Some(removed) => { - self.metrics.queued_consumers_count.decrement(removed.len() as f64); - Some(removed) - } - None => None, - } + let _ = self.cache.remove(key); + self.queued + .remove(key) + .inspect(|removed| self.metrics.queued_consumers_count.decrement(removed.len() as f64)) } /// Returns a reference to the value for a given key and promotes that element to be the most diff --git a/crates/rpc/rpc-eth-types/src/error.rs b/crates/rpc/rpc-eth-types/src/error.rs index 67fd11eae2c0..62e5d9d97ce9 100644 --- a/crates/rpc/rpc-eth-types/src/error.rs +++ b/crates/rpc/rpc-eth-types/src/error.rs @@ -17,6 +17,7 @@ use reth_transaction_pool::error::{ }; use revm::primitives::{EVMError, ExecutionResult, HaltReason, OutOfGasError}; use revm_inspectors::tracing::{js::JsInspectorError, MuxError}; +use tracing::error; /// Result alias pub type EthResult = Result; @@ -137,6 +138,11 @@ impl EthApiError { pub fn other(err: E) -> Self { Self::Other(Box::new(err)) } + + /// Returns `true` if error is [`RpcInvalidTransactionError::GasTooHigh`] + pub const fn is_gas_too_high(&self) -> bool { + matches!(self, Self::InvalidTransaction(RpcInvalidTransactionError::GasTooHigh)) + } } impl From for jsonrpsee_types::error::ErrorObject<'static> { @@ -372,6 +378,11 @@ pub enum RpcInvalidTransactionError { /// Any other error #[error("{0}")] Other(Box), + /// Unexpected [`InvalidTransaction`](revm::primitives::InvalidTransaction) error, Optimism + /// errors should not be handled on this level. + // TODO: Remove when optimism feature removed in revm + #[error("unexpected transaction error")] + UnexpectedTransactionError, } impl RpcInvalidTransactionError { @@ -381,29 +392,6 @@ impl RpcInvalidTransactionError { } } -/// Optimism specific invalid transaction errors -#[cfg(feature = "optimism")] -#[derive(thiserror::Error, Debug)] -pub enum OptimismInvalidTransactionError { - /// A deposit transaction was submitted as a system transaction post-regolith. - #[error("no system transactions allowed after regolith")] - DepositSystemTxPostRegolith, - /// A deposit transaction halted post-regolith - #[error("deposit transaction halted after regolith")] - HaltedDepositPostRegolith, -} - -#[cfg(feature = "optimism")] -impl ToRpcError for OptimismInvalidTransactionError { - fn to_rpc_error(&self) -> jsonrpsee_types::error::ErrorObject<'static> { - match self { - Self::DepositSystemTxPostRegolith | Self::HaltedDepositPostRegolith => { - rpc_err(EthRpcErrorCode::TransactionRejected.code(), self.to_string(), None) - } - } - } -} - impl RpcInvalidTransactionError { /// Returns the rpc error code for this error. const fn error_code(&self) -> i32 { @@ -462,7 +450,7 @@ impl From for RpcInvalidTransactionError { InvalidTransaction::InvalidChainId => Self::InvalidChainId, InvalidTransaction::PriorityFeeGreaterThanMaxFee => Self::TipAboveFeeCap, InvalidTransaction::GasPriceLessThanBasefee => Self::FeeCapTooLow, - InvalidTransaction::CallerGasLimitMoreThanBlock => Self::GasTooHigh, + InvalidTransaction::CallerGasLimitMoreThanBlock | InvalidTransaction::CallGasCostMoreThanGasLimit => Self::GasTooHigh, InvalidTransaction::RejectCallerWithCode => Self::SenderNoEOA, InvalidTransaction::LackOfFundForMaxFee { .. } => Self::InsufficientFunds, @@ -488,17 +476,15 @@ impl From for RpcInvalidTransactionError { InvalidTransaction::AuthorizationListInvalidFields => { Self::AuthorizationListInvalidFields } - #[cfg(feature = "optimism")] - InvalidTransaction::OptimismError(err) => match err { - revm_primitives::OptimismInvalidTransaction::DepositSystemTxPostRegolith => { - Self::other(OptimismInvalidTransactionError::DepositSystemTxPostRegolith) - } - revm_primitives::OptimismInvalidTransaction::HaltedDepositPostRegolith => { - Self::Other(Box::new( - OptimismInvalidTransactionError::HaltedDepositPostRegolith, - )) - } - }, + #[allow(unreachable_patterns)] + _ => { + error!(target: "rpc", + ?err, + "unexpected transaction error" + ); + + Self::UnexpectedTransactionError + } } } } diff --git a/crates/rpc/rpc-layer/src/auth_layer.rs b/crates/rpc/rpc-layer/src/auth_layer.rs index 255273194a37..41ebce32dfb3 100644 --- a/crates/rpc/rpc-layer/src/auth_layer.rs +++ b/crates/rpc/rpc-layer/src/auth_layer.rs @@ -102,9 +102,11 @@ where } } +/// A future representing the response of an RPC request #[pin_project] #[allow(missing_debug_implementations)] pub struct ResponseFuture { + /// The kind of response future, error or pending #[pin] kind: Kind, } diff --git a/crates/rpc/rpc-layer/src/lib.rs b/crates/rpc/rpc-layer/src/lib.rs index e4f7dbe06f16..8387bb160e8b 100644 --- a/crates/rpc/rpc-layer/src/lib.rs +++ b/crates/rpc/rpc-layer/src/lib.rs @@ -15,6 +15,8 @@ mod auth_client_layer; mod auth_layer; mod jwt_validator; +pub use auth_layer::{AuthService, ResponseFuture}; + // Export alloy JWT types pub use alloy_rpc_types_engine::{Claims, JwtError, JwtSecret}; diff --git a/crates/rpc/rpc-server-types/src/constants.rs b/crates/rpc/rpc-server-types/src/constants.rs index e3c129bf6e28..e433bda0d4a7 100644 --- a/crates/rpc/rpc-server-types/src/constants.rs +++ b/crates/rpc/rpc-server-types/src/constants.rs @@ -87,6 +87,9 @@ pub mod gas_oracle { /// Taken from Geth's implementation in order to pass the hive tests /// pub const ESTIMATE_GAS_ERROR_RATIO: f64 = 0.015; + + /// Gas required at the beginning of a call. + pub const CALL_STIPEND_GAS: u64 = 2_300; } /// Cache specific constants diff --git a/crates/rpc/rpc-types/Cargo.toml b/crates/rpc/rpc-types/Cargo.toml index 2f52b907e144..46f957d25d1a 100644 --- a/crates/rpc/rpc-types/Cargo.toml +++ b/crates/rpc/rpc-types/Cargo.toml @@ -33,7 +33,6 @@ jsonrpsee-types = { workspace = true, optional = true } alloy-primitives = { workspace = true, features = ["rand", "rlp", "serde", "arbitrary"] } arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true -proptest-derive.workspace = true rand.workspace = true similar-asserts.workspace = true bytes.workspace = true @@ -41,4 +40,4 @@ serde_json.workspace = true [features] default = ["jsonrpsee-types"] -arbitrary = ["alloy-primitives/arbitrary", "alloy-rpc-types/arbitrary"] \ No newline at end of file +arbitrary = ["alloy-primitives/arbitrary", "alloy-rpc-types/arbitrary"] diff --git a/crates/rpc/rpc-types/src/lib.rs b/crates/rpc/rpc-types/src/lib.rs index 6a73c740213a..47c10e881579 100644 --- a/crates/rpc/rpc-types/src/lib.rs +++ b/crates/rpc/rpc-types/src/lib.rs @@ -11,8 +11,11 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #[allow(hidden_glob_reexports)] mod eth; -mod peer; +/// Alias for a peer identifier +pub type PeerId = B512; + +use alloy_primitives::B512; // re-export for convenience pub use alloy_rpc_types::serde_helpers; @@ -51,5 +54,3 @@ pub use eth::{ error::ToRpcError, transaction::{self, TransactionRequest, TypedTransactionRequest}, }; - -pub use peer::*; diff --git a/crates/rpc/rpc-types/src/net.rs b/crates/rpc/rpc-types/src/net.rs deleted file mode 100644 index eb77ac7922d4..000000000000 --- a/crates/rpc/rpc-types/src/net.rs +++ /dev/null @@ -1,13 +0,0 @@ -use alloy_rpc_types_admin::EthProtocolInfo; -use serde::{Deserialize, Serialize}; - -/// The status of the network being ran by the local node. -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct NetworkStatus { - /// The local node client version. - pub client_version: String, - /// The current ethereum protocol version - pub protocol_version: u64, - /// Information about the Ethereum Wire Protocol. - pub eth_protocol_info: EthProtocolInfo, -} diff --git a/crates/rpc/rpc-types/src/peer.rs b/crates/rpc/rpc-types/src/peer.rs deleted file mode 100644 index a07e61d00285..000000000000 --- a/crates/rpc/rpc-types/src/peer.rs +++ /dev/null @@ -1,4 +0,0 @@ -use alloy_primitives::B512; - -/// Alias for a peer identifier -pub type PeerId = B512; diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 5c2ebaa2357a..78db90e81a09 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -90,5 +90,4 @@ optimism = [ "reth-provider/optimism", "reth-rpc-eth-api/optimism", "reth-revm/optimism", - "reth-rpc-eth-types/optimism", ] diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 847ab6ae5a33..cbf35a7d51c5 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -14,8 +14,11 @@ use reth_provider::{ }; use reth_revm::database::StateProviderDatabase; use reth_rpc_api::DebugApiServer; -use reth_rpc_eth_api::helpers::{Call, EthApiSpec, EthTransactions, TraceExt}; -use reth_rpc_eth_types::{EthApiError, EthResult, StateCacheDb}; +use reth_rpc_eth_api::{ + helpers::{Call, EthApiSpec, EthTransactions, TraceExt}, + EthApiTypes, FromEthApiError, +}; +use reth_rpc_eth_types::{EthApiError, StateCacheDb}; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; use reth_rpc_types::{ state::EvmOverrides, @@ -68,7 +71,7 @@ where + StateProviderFactory + EvmEnvProvider + 'static, - Eth: TraceExt + 'static, + Eth: EthApiTypes + TraceExt + 'static, { /// Acquires a permit to execute a tracing call. async fn acquire_trace_permit(&self) -> Result { @@ -83,7 +86,7 @@ where cfg: CfgEnvWithHandlerCfg, block_env: BlockEnv, opts: GethDebugTracingOptions, - ) -> EthResult> { + ) -> Result, Eth::Error> { if transactions.is_empty() { // nothing to trace return Ok(Vec::new()) @@ -141,9 +144,10 @@ where &self, rlp_block: Bytes, opts: GethDebugTracingOptions, - ) -> EthResult> { - let block = - Block::decode(&mut rlp_block.as_ref()).map_err(BlockError::RlpDecodeRawBlock)?; + ) -> Result, Eth::Error> { + let block = Block::decode(&mut rlp_block.as_ref()) + .map_err(BlockError::RlpDecodeRawBlock) + .map_err(Eth::Error::from_eth_err)?; let (cfg, block_env) = self.eth_api().evm_env_for_raw_block(&block.header).await?; // we trace on top the block's parent block @@ -158,8 +162,9 @@ where .map(|tx| { tx.into_ecrecovered() .ok_or_else(|| EthApiError::InvalidTransactionSignature) + .map_err(Eth::Error::from_eth_err) }) - .collect::>>()? + .collect::, Eth::Error>>()? } else { block .body @@ -167,8 +172,9 @@ where .map(|tx| { tx.into_ecrecovered_unchecked() .ok_or_else(|| EthApiError::InvalidTransactionSignature) + .map_err(Eth::Error::from_eth_err) }) - .collect::>>()? + .collect::, Eth::Error>>()? }; self.trace_block(parent.into(), transactions, cfg, block_env, opts).await @@ -179,11 +185,12 @@ where &self, block_id: BlockId, opts: GethDebugTracingOptions, - ) -> EthResult> { + ) -> Result, Eth::Error> { let block_hash = self .inner .provider - .block_hash_for_id(block_id)? + .block_hash_for_id(block_id) + .map_err(Eth::Error::from_eth_err)? .ok_or_else(|| EthApiError::UnknownBlockNumber)?; let ((cfg, block_env, _), block) = futures::try_join!( @@ -213,9 +220,9 @@ where &self, tx_hash: B256, opts: GethDebugTracingOptions, - ) -> EthResult { + ) -> Result { let (transaction, block) = match self.inner.eth_api.transaction_and_block(tx_hash).await? { - None => return Err(EthApiError::TransactionNotFound), + None => return Err(EthApiError::TransactionNotFound.into()), Some(res) => res, }; let (cfg, block_env, _) = self.inner.eth_api.evm_env_at(block.hash().into()).await?; @@ -277,7 +284,7 @@ where call: TransactionRequest, block_id: Option, opts: GethDebugTracingCallOptions, - ) -> EthResult { + ) -> Result { let at = block_id.unwrap_or_default(); let GethDebugTracingCallOptions { tracing_options, state_overrides, block_overrides } = opts; @@ -313,8 +320,9 @@ where .inner .eth_api .spawn_with_call_at(call, at, overrides, move |db, env| { - let (res, _) = this.eth_api().inspect(db, env, &mut inspector)?; + let (res, env) = this.eth_api().inspect(db, env, &mut inspector)?; let frame = inspector + .with_transaction_gas_limit(env.tx.gas_limit) .into_geth_builder() .geth_call_traces(call_config, res.result.gas_used()); Ok(frame.into()) @@ -330,22 +338,24 @@ where TracingInspectorConfig::from_geth_prestate_config(&prestate_config), ); - let frame = - self.inner - .eth_api - .spawn_with_call_at(call, at, overrides, move |db, env| { - // wrapper is hack to get around 'higher-ranked lifetime error', - // see - let db = db.0; - - let (res, _) = - this.eth_api().inspect(&mut *db, env, &mut inspector)?; - let frame = inspector - .into_geth_builder() - .geth_prestate_traces(&res, prestate_config, db)?; - Ok(frame) - }) - .await?; + let frame = self + .inner + .eth_api + .spawn_with_call_at(call, at, overrides, move |db, env| { + // wrapper is hack to get around 'higher-ranked lifetime error', + // see + let db = db.0; + + let (res, env) = + this.eth_api().inspect(&mut *db, env, &mut inspector)?; + let frame = inspector + .with_transaction_gas_limit(env.tx.gas_limit) + .into_geth_builder() + .geth_prestate_traces(&res, prestate_config, db) + .map_err(Eth::Error::from_eth_err)?; + Ok(frame) + }) + .await?; return Ok(frame.into()) } GethDebugBuiltInTracerType::NoopTracer => Ok(NoopFrame::default().into()), @@ -354,7 +364,8 @@ where .into_mux_config() .map_err(|_| EthApiError::InvalidTracerConfig)?; - let mut inspector = MuxInspector::try_from_config(mux_config)?; + let mut inspector = MuxInspector::try_from_config(mux_config) + .map_err(Eth::Error::from_eth_err)?; let frame = self .inner @@ -366,7 +377,9 @@ where let (res, _) = this.eth_api().inspect(&mut *db, env, &mut inspector)?; - let frame = inspector.try_into_mux_frame(&res, db)?; + let frame = inspector + .try_into_mux_frame(&res, db) + .map_err(Eth::Error::from_eth_err)?; Ok(frame.into()) }) .await?; @@ -386,10 +399,11 @@ where // let db = db.0; - let mut inspector = JsInspector::new(code, config)?; + let mut inspector = + JsInspector::new(code, config).map_err(Eth::Error::from_eth_err)?; let (res, _) = this.eth_api().inspect(&mut *db, env.clone(), &mut inspector)?; - Ok(inspector.json_result(res, &env, db)?) + inspector.json_result(res, &env, db).map_err(Eth::Error::from_eth_err) }) .await?; @@ -403,17 +417,20 @@ where let mut inspector = TracingInspector::new(inspector_config); - let (res, inspector) = self + let (res, tx_gas_limit, inspector) = self .inner .eth_api .spawn_with_call_at(call, at, overrides, move |db, env| { - let (res, _) = this.eth_api().inspect(db, env, &mut inspector)?; - Ok((res, inspector)) + let (res, env) = this.eth_api().inspect(db, env, &mut inspector)?; + Ok((res, env.tx.gas_limit, inspector)) }) .await?; let gas_used = res.result.gas_used(); let return_value = res.result.into_output().unwrap_or_default(); - let frame = inspector.into_geth_builder().geth_traces(gas_used, return_value, config); + let frame = inspector + .with_transaction_gas_limit(tx_gas_limit) + .into_geth_builder() + .geth_traces(gas_used, return_value, config); Ok(frame.into()) } @@ -426,9 +443,9 @@ where bundles: Vec, state_context: Option, opts: Option, - ) -> EthResult>> { + ) -> Result>, Eth::Error> { if bundles.is_empty() { - return Err(EthApiError::InvalidParams(String::from("bundles are empty."))) + return Err(EthApiError::InvalidParams(String::from("bundles are empty.")).into()) } let StateContext { transaction_index, block_number } = state_context.unwrap_or_default(); @@ -546,7 +563,7 @@ where env: EnvWithHandlerCfg, db: &mut StateCacheDb<'_>, transaction_context: Option, - ) -> EthResult<(GethTrace, revm_primitives::EvmState)> { + ) -> Result<(GethTrace, revm_primitives::EvmState), Eth::Error> { let GethDebugTracingOptions { config, tracer, tracer_config, .. } = opts; if let Some(tracer) = tracer { @@ -566,9 +583,10 @@ where TracingInspectorConfig::from_geth_call_config(&call_config), ); - let (res, _) = self.eth_api().inspect(db, env, &mut inspector)?; + let (res, env) = self.eth_api().inspect(db, env, &mut inspector)?; let frame = inspector + .with_transaction_gas_limit(env.tx.gas_limit) .into_geth_builder() .geth_call_traces(call_config, res.result.gas_used()); @@ -582,13 +600,13 @@ where let mut inspector = TracingInspector::new( TracingInspectorConfig::from_geth_prestate_config(&prestate_config), ); - let (res, _) = self.eth_api().inspect(&mut *db, env, &mut inspector)?; + let (res, env) = self.eth_api().inspect(&mut *db, env, &mut inspector)?; - let frame = inspector.into_geth_builder().geth_prestate_traces( - &res, - prestate_config, - db, - )?; + let frame = inspector + .with_transaction_gas_limit(env.tx.gas_limit) + .into_geth_builder() + .geth_prestate_traces(&res, prestate_config, db) + .map_err(Eth::Error::from_eth_err)?; return Ok((frame.into(), res.state)) } @@ -600,10 +618,13 @@ where .into_mux_config() .map_err(|_| EthApiError::InvalidTracerConfig)?; - let mut inspector = MuxInspector::try_from_config(mux_config)?; + let mut inspector = MuxInspector::try_from_config(mux_config) + .map_err(Eth::Error::from_eth_err)?; let (res, _) = self.eth_api().inspect(&mut *db, env, &mut inspector)?; - let frame = inspector.try_into_mux_frame(&res, db)?; + let frame = inspector + .try_into_mux_frame(&res, db) + .map_err(Eth::Error::from_eth_err)?; return Ok((frame.into(), res.state)) } }, @@ -613,11 +634,13 @@ where code, config, transaction_context.unwrap_or_default(), - )?; + ) + .map_err(Eth::Error::from_eth_err)?; let (res, env) = self.eth_api().inspect(&mut *db, env, &mut inspector)?; let state = res.state.clone(); - let result = inspector.json_result(res, &env, db)?; + let result = + inspector.json_result(res, &env, db).map_err(Eth::Error::from_eth_err)?; Ok((GethTrace::JS(result), state)) } } @@ -628,10 +651,13 @@ where let mut inspector = TracingInspector::new(inspector_config); - let (res, _) = self.eth_api().inspect(db, env, &mut inspector)?; + let (res, env) = self.eth_api().inspect(db, env, &mut inspector)?; let gas_used = res.result.gas_used(); let return_value = res.result.into_output().unwrap_or_default(); - let frame = inspector.into_geth_builder().geth_traces(gas_used, return_value, config); + let frame = inspector + .with_transaction_gas_limit(env.tx.gas_limit) + .into_geth_builder() + .geth_traces(gas_used, return_value, config); Ok((frame.into(), res.state)) } @@ -690,7 +716,7 @@ where /// /// Returns the bytes of the transaction for the given hash. async fn raw_transaction(&self, hash: B256) -> RpcResult> { - Ok(self.inner.eth_api.raw_transaction_by_hash(hash).await?) + self.inner.eth_api.raw_transaction_by_hash(hash).await.map_err(Into::into) } /// Handler for `debug_getRawTransactions` @@ -739,7 +765,9 @@ where opts: Option, ) -> RpcResult> { let _permit = self.acquire_trace_permit().await; - Ok(Self::debug_trace_raw_block(self, rlp_block, opts.unwrap_or_default()).await?) + Self::debug_trace_raw_block(self, rlp_block, opts.unwrap_or_default()) + .await + .map_err(Into::into) } /// Handler for `debug_traceBlockByHash` @@ -749,7 +777,9 @@ where opts: Option, ) -> RpcResult> { let _permit = self.acquire_trace_permit().await; - Ok(Self::debug_trace_block(self, block.into(), opts.unwrap_or_default()).await?) + Self::debug_trace_block(self, block.into(), opts.unwrap_or_default()) + .await + .map_err(Into::into) } /// Handler for `debug_traceBlockByNumber` @@ -759,7 +789,9 @@ where opts: Option, ) -> RpcResult> { let _permit = self.acquire_trace_permit().await; - Ok(Self::debug_trace_block(self, block.into(), opts.unwrap_or_default()).await?) + Self::debug_trace_block(self, block.into(), opts.unwrap_or_default()) + .await + .map_err(Into::into) } /// Handler for `debug_traceTransaction` @@ -769,7 +801,9 @@ where opts: Option, ) -> RpcResult { let _permit = self.acquire_trace_permit().await; - Ok(Self::debug_trace_transaction(self, tx_hash, opts.unwrap_or_default()).await?) + Self::debug_trace_transaction(self, tx_hash, opts.unwrap_or_default()) + .await + .map_err(Into::into) } /// Handler for `debug_traceCall` @@ -780,7 +814,9 @@ where opts: Option, ) -> RpcResult { let _permit = self.acquire_trace_permit().await; - Ok(Self::debug_trace_call(self, request, block_number, opts.unwrap_or_default()).await?) + Self::debug_trace_call(self, request, block_number, opts.unwrap_or_default()) + .await + .map_err(Into::into) } async fn debug_trace_call_many( @@ -790,7 +826,7 @@ where opts: Option, ) -> RpcResult>> { let _permit = self.acquire_trace_permit().await; - Ok(Self::debug_trace_call_many(self, bundles, state_context, opts).await?) + Self::debug_trace_call_many(self, bundles, state_context, opts).await.map_err(Into::into) } async fn debug_backtrace_at(&self, _location: &str) -> RpcResult<()> { diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index d28013822ee1..9cabc1f6f5fd 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -10,6 +10,7 @@ use reth_primitives::{ PooledTransactionsElement, U256, }; use reth_revm::database::StateProviderDatabase; +use reth_rpc_eth_api::{FromEthApiError, FromEvmError}; use reth_rpc_types::mev::{EthCallBundle, EthCallBundleResponse, EthCallBundleTransactionResult}; use reth_tasks::pool::BlockingTaskGuard; use revm::{ @@ -23,9 +24,7 @@ use reth_rpc_eth_api::{ helpers::{Call, EthTransactions, LoadPendingBlock}, EthCallBundleApiServer, }; -use reth_rpc_eth_types::{ - utils::recover_raw_transaction, EthApiError, EthResult, RpcInvalidTransactionError, -}; +use reth_rpc_eth_types::{utils::recover_raw_transaction, EthApiError, RpcInvalidTransactionError}; /// `Eth` bundle implementation. pub struct EthBundle { @@ -48,7 +47,10 @@ where /// another (or the same) block. This can be used to simulate future blocks with the current /// state, or it can be used to simulate a past block. The sender is responsible for signing the /// transactions and using the correct nonce and ensuring validity - pub async fn call_bundle(&self, bundle: EthCallBundle) -> EthResult { + pub async fn call_bundle( + &self, + bundle: EthCallBundle, + ) -> Result { let EthCallBundle { txs, block_number, @@ -61,12 +63,14 @@ where if txs.is_empty() { return Err(EthApiError::InvalidParams( EthBundleError::EmptyBundleTransactions.to_string(), - )) + ) + .into()) } if block_number == 0 { return Err(EthApiError::InvalidParams( EthBundleError::BundleMissingBlockNumber.to_string(), - )) + ) + .into()) } let transactions = txs @@ -93,7 +97,8 @@ where { return Err(EthApiError::InvalidParams( EthBundleError::Eip4844BlobGasExceeded.to_string(), - )) + ) + .into()) } let block_id: reth_rpc_types::BlockId = state_block_number.into(); @@ -121,7 +126,8 @@ where let parent_block = block_env.number.saturating_to::(); // here we need to fetch the _next_ block's basefee based on the parent block let parent = LoadPendingBlock::provider(&self.inner.eth_api) - .header_by_number(parent_block)? + .header_by_number(parent_block) + .map_err(Eth::Error::from_eth_err)? .ok_or_else(|| EthApiError::UnknownBlockNumber)?; if let Some(base_fee) = parent.next_block_base_fee( LoadPendingBlock::provider(&self.inner.eth_api) @@ -146,7 +152,8 @@ where let env = EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, TxEnv::default()); let db = CacheDB::new(StateProviderDatabase::new(state)); - let initial_coinbase = DatabaseRef::basic_ref(&db, coinbase)? + let initial_coinbase = DatabaseRef::basic_ref(&db, coinbase) + .map_err(Eth::Error::from_eth_err)? .map(|acc| acc.balance) .unwrap_or_default(); let mut coinbase_balance_before_tx = initial_coinbase; @@ -164,8 +171,9 @@ where // Verify that the given blob data, commitments, and proofs are all valid for // this transaction. if let PooledTransactionsElement::BlobTransaction(ref tx) = tx { - tx.validate(EnvKzgSettings::Default.get()) - .map_err(|e| EthApiError::InvalidParams(e.to_string()))?; + tx.validate(EnvKzgSettings::Default.get()).map_err(|e| { + Eth::Error::from_eth_err(EthApiError::InvalidParams(e.to_string())) + })?; } let tx = tx.into_transaction(); @@ -173,9 +181,11 @@ where hash_bytes.extend_from_slice(tx.hash().as_slice()); let gas_price = tx .effective_tip_per_gas(basefee) - .ok_or_else(|| RpcInvalidTransactionError::FeeCapTooLow)?; + .ok_or_else(|| RpcInvalidTransactionError::FeeCapTooLow) + .map_err(Eth::Error::from_eth_err)?; Call::evm_config(ð_api).fill_tx_env(evm.tx_mut(), &tx, signer); - let ResultAndState { result, state } = evm.transact()?; + let ResultAndState { result, state } = + evm.transact().map_err(Eth::Error::from_evm_err)?; let gas_used = result.gas_used(); total_gas_used += gas_used; @@ -254,7 +264,7 @@ where Eth: EthTransactions + LoadPendingBlock + Call + 'static, { async fn call_bundle(&self, request: EthCallBundle) -> RpcResult { - Ok(Self::call_bundle(self, request).await?) + Self::call_bundle(self, request).await.map_err(Into::into) } } diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index 09065dfc1a12..590d73f7f7e4 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -10,10 +10,11 @@ use reth_primitives::{BlockNumberOrTag, U256}; use reth_provider::{BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider}; use reth_rpc_eth_api::{ helpers::{transaction::UpdateRawTxForwarder, EthSigner, SpawnBlocking}, - RawTransactionForwarder, + EthApiTypes, RawTransactionForwarder, }; use reth_rpc_eth_types::{ - EthApiBuilderCtx, EthStateCache, FeeHistoryCache, GasCap, GasPriceOracle, PendingBlock, + EthApiBuilderCtx, EthApiError, EthStateCache, FeeHistoryCache, GasCap, GasPriceOracle, + PendingBlock, }; use reth_tasks::{ pool::{BlockingTaskGuard, BlockingTaskPool}, @@ -114,6 +115,13 @@ where } } +impl EthApiTypes for EthApi +where + Self: Send + Sync, +{ + type Error = EthApiError; +} + impl std::fmt::Debug for EthApi { @@ -131,7 +139,7 @@ impl Clone for EthApi SpawnBlocking for EthApi where - Self: Clone + Send + Sync + 'static, + Self: EthApiTypes + Clone + Send + Sync + 'static, { #[inline] fn io_task_spawner(&self) -> impl reth_tasks::TaskSpawner { diff --git a/crates/rpc/rpc/src/eth/helpers/receipt.rs b/crates/rpc/rpc/src/eth/helpers/receipt.rs index db1fee781fd3..eb4483705f73 100644 --- a/crates/rpc/rpc/src/eth/helpers/receipt.rs +++ b/crates/rpc/rpc/src/eth/helpers/receipt.rs @@ -1,13 +1,13 @@ //! Builds an RPC receipt response w.r.t. data layout of network. -use reth_rpc_eth_api::helpers::LoadReceipt; +use reth_rpc_eth_api::{helpers::LoadReceipt, EthApiTypes}; use reth_rpc_eth_types::EthStateCache; use crate::EthApi; impl LoadReceipt for EthApi where - Self: Send + Sync, + Self: EthApiTypes, { #[inline] fn cache(&self) -> &EthStateCache { diff --git a/crates/rpc/rpc/src/eth/helpers/state.rs b/crates/rpc/rpc/src/eth/helpers/state.rs index b291eb8a2016..f76be9d88758 100644 --- a/crates/rpc/rpc/src/eth/helpers/state.rs +++ b/crates/rpc/rpc/src/eth/helpers/state.rs @@ -3,7 +3,10 @@ use reth_provider::{ChainSpecProvider, StateProviderFactory}; use reth_transaction_pool::TransactionPool; -use reth_rpc_eth_api::helpers::{EthState, LoadState, SpawnBlocking}; +use reth_rpc_eth_api::{ + helpers::{EthState, LoadState, SpawnBlocking}, + EthApiTypes, +}; use reth_rpc_eth_types::EthStateCache; use crate::EthApi; @@ -19,6 +22,7 @@ where impl LoadState for EthApi where + Self: EthApiTypes, Provider: StateProviderFactory + ChainSpecProvider, Pool: TransactionPool, { diff --git a/crates/rpc/rpc/src/eth/helpers/transaction.rs b/crates/rpc/rpc/src/eth/helpers/transaction.rs index 872af0cee451..635281c08e76 100644 --- a/crates/rpc/rpc/src/eth/helpers/transaction.rs +++ b/crates/rpc/rpc/src/eth/helpers/transaction.rs @@ -1,5 +1,7 @@ //! Contains RPC handler implementations specific to transactions +use std::sync::Arc; + use reth_provider::{BlockReaderIdExt, TransactionsProvider}; use reth_rpc_eth_api::{ helpers::{EthSigner, EthTransactions, LoadTransaction, SpawnBlocking}, @@ -23,7 +25,7 @@ where } #[inline] - fn raw_tx_forwarder(&self) -> Option> { + fn raw_tx_forwarder(&self) -> Option> { self.inner.raw_tx_forwarder() } @@ -43,7 +45,7 @@ where type Pool = Pool; #[inline] - fn provider(&self) -> impl reth_provider::TransactionsProvider { + fn provider(&self) -> impl TransactionsProvider { self.inner.provider() } diff --git a/crates/rpc/rpc/src/otterscan.rs b/crates/rpc/rpc/src/otterscan.rs index 320c6856de06..bf8279719fec 100644 --- a/crates/rpc/rpc/src/otterscan.rs +++ b/crates/rpc/rpc/src/otterscan.rs @@ -85,7 +85,8 @@ where TransferInspector::new(false), |_tx_info, inspector, _, _| Ok(inspector.into_transfers()), ) - .await? + .await + .map_err(Into::into)? .map(|transfer_operations| { transfer_operations .iter() @@ -115,7 +116,8 @@ where _ => Ok(None), }) .await - .map(Option::flatten)?; + .map(Option::flatten) + .map_err(Into::into)?; Ok(maybe_revert) } @@ -128,7 +130,8 @@ where TracingInspectorConfig::default_parity(), move |_tx_info, inspector, _, _| Ok(inspector.into_traces().into_nodes()), ) - .await? + .await + .map_err(Into::into)? .map(|traces| { traces .into_iter() @@ -325,7 +328,8 @@ where Ok(inspector.into_parity_builder().into_localized_transaction_traces(tx_info)) }, ) - .await? + .await + .map_err(Into::into)? .map(|traces| { traces .into_iter() diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index fd0174a4e174..461a1cad5fcd 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -1,7 +1,7 @@ use std::{collections::HashSet, sync::Arc}; use async_trait::async_trait; -use jsonrpsee::core::RpcResult as Result; +use jsonrpsee::core::RpcResult; use reth_chainspec::EthereumHardforks; use reth_consensus_common::calc::{ base_block_reward, base_block_reward_pre_merge, block_reward, ommer_reward, @@ -11,11 +11,11 @@ use reth_primitives::{BlockId, Bytes, Header, B256, U256}; use reth_provider::{BlockReader, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; use reth_revm::database::StateProviderDatabase; use reth_rpc_api::TraceApiServer; -use reth_rpc_eth_api::helpers::{Call, TraceExt}; -use reth_rpc_eth_types::{ - error::{EthApiError, EthResult}, - utils::recover_raw_transaction, +use reth_rpc_eth_api::{ + helpers::{Call, TraceExt}, + FromEthApiError, }; +use reth_rpc_eth_types::{error::EthApiError, utils::recover_raw_transaction}; use reth_rpc_types::{ state::{EvmOverrides, StateOverride}, trace::{ @@ -79,7 +79,10 @@ where Eth: TraceExt + 'static, { /// Executes the given call and returns a number of possible traces for it. - pub async fn trace_call(&self, trace_request: TraceCallRequest) -> EthResult { + pub async fn trace_call( + &self, + trace_request: TraceCallRequest, + ) -> Result { let at = trace_request.block_id.unwrap_or_default(); let config = TracingInspectorConfig::from_parity_config(&trace_request.trace_types); let overrides = @@ -93,11 +96,10 @@ where let db = db.0; let (res, _) = this.eth_api().inspect(&mut *db, env, &mut inspector)?; - let trace_res = inspector.into_parity_builder().into_trace_results_with_state( - &res, - &trace_request.trace_types, - &db, - )?; + let trace_res = inspector + .into_parity_builder() + .into_trace_results_with_state(&res, &trace_request.trace_types, &db) + .map_err(Eth::Error::from_eth_err)?; Ok(trace_res) }) .await @@ -109,7 +111,7 @@ where tx: Bytes, trace_types: HashSet, block_id: Option, - ) -> EthResult { + ) -> Result { let tx = recover_raw_transaction(tx)?; let (cfg, block, at) = self.inner.eth_api.evm_env_at(block_id.unwrap_or_default()).await?; @@ -125,11 +127,10 @@ where self.inner .eth_api .spawn_trace_at_with_state(env, config, at, move |inspector, res, db| { - Ok(inspector.into_parity_builder().into_trace_results_with_state( - &res, - &trace_types, - &db, - )?) + inspector + .into_parity_builder() + .into_trace_results_with_state(&res, &trace_types, &db) + .map_err(Eth::Error::from_eth_err) }) .await } @@ -142,7 +143,7 @@ where &self, calls: Vec<(TransactionRequest, HashSet)>, block_id: Option, - ) -> EthResult> { + ) -> Result, Eth::Error> { let at = block_id.unwrap_or(BlockId::pending()); let (cfg, block_env, at) = self.inner.eth_api.evm_env_at(at).await?; @@ -169,11 +170,10 @@ where let mut inspector = TracingInspector::new(config); let (res, _) = this.eth_api().inspect(&mut db, env, &mut inspector)?; - let trace_res = inspector.into_parity_builder().into_trace_results_with_state( - &res, - &trace_types, - &db, - )?; + let trace_res = inspector + .into_parity_builder() + .into_trace_results_with_state(&res, &trace_types, &db) + .map_err(Eth::Error::from_eth_err)?; results.push(trace_res); @@ -196,16 +196,15 @@ where &self, hash: B256, trace_types: HashSet, - ) -> EthResult { + ) -> Result { let config = TracingInspectorConfig::from_parity_config(&trace_types); self.inner .eth_api .spawn_trace_transaction_in_block(hash, config, move |_, inspector, res, db| { - let trace_res = inspector.into_parity_builder().into_trace_results_with_state( - &res, - &trace_types, - &db, - )?; + let trace_res = inspector + .into_parity_builder() + .into_trace_results_with_state(&res, &trace_types, &db) + .map_err(Eth::Error::from_eth_err)?; Ok(trace_res) }) .await @@ -223,7 +222,7 @@ where &self, hash: B256, indices: Vec, - ) -> EthResult> { + ) -> Result, Eth::Error> { if indices.len() != 1 { // The OG impl failed if it gets more than a single index return Ok(None) @@ -238,7 +237,7 @@ where &self, hash: B256, index: usize, - ) -> EthResult> { + ) -> Result, Eth::Error> { Ok(self.trace_transaction(hash).await?.and_then(|traces| traces.into_iter().nth(index))) } @@ -249,20 +248,21 @@ where pub async fn trace_filter( &self, filter: TraceFilter, - ) -> EthResult> { + ) -> Result, Eth::Error> { let matcher = filter.matcher(); - let TraceFilter { from_block, to_block, .. } = filter; + let TraceFilter { from_block, to_block, after, count, .. } = filter; let start = from_block.unwrap_or(0); let end = if let Some(to_block) = to_block { to_block } else { - self.provider().best_block_number()? + self.provider().best_block_number().map_err(Eth::Error::from_eth_err)? }; if start > end { return Err(EthApiError::InvalidParams( "invalid parameters: fromBlock cannot be greater than toBlock".to_string(), - )) + ) + .into()) } // ensure that the range is not too large, since we need to fetch all blocks in the range @@ -270,11 +270,12 @@ where if distance > 100 { return Err(EthApiError::InvalidParams( "Block range too large; currently limited to 100 blocks".to_string(), - )) + ) + .into()) } // fetch all blocks in that range - let blocks = self.provider().block_range(start..=end)?; + let blocks = self.provider().block_range(start..=end).map_err(Eth::Error::from_eth_err)?; // find relevant blocks to trace let mut target_blocks = Vec::new(); @@ -282,7 +283,10 @@ where let mut transaction_indices = HashSet::new(); let mut highest_matching_index = 0; for (tx_idx, tx) in block.body.iter().enumerate() { - let from = tx.recover_signer_unchecked().ok_or(BlockError::InvalidSignature)?; + let from = tx + .recover_signer_unchecked() + .ok_or(BlockError::InvalidSignature) + .map_err(Eth::Error::from_eth_err)?; let to = tx.to(); if matcher.matches(from, to) { let idx = tx_idx as u64; @@ -302,17 +306,15 @@ where num.into(), Some(highest_idx), TracingInspectorConfig::default_parity(), - move |tx_info, inspector, res, _, _| { + move |tx_info, inspector, _, _, _| { if let Some(idx) = tx_info.index { if !indices.contains(&idx) { // only record traces for relevant transactions return Ok(None) } } - let traces = inspector - .with_transaction_gas_used(res.gas_used()) - .into_parity_builder() - .into_localized_transaction_traces(tx_info); + let traces = + inspector.into_parity_builder().into_localized_transaction_traces(tx_info); Ok(Some(traces)) }, ); @@ -341,6 +343,20 @@ where } } + // apply after and count to traces if specified, this allows for a pagination style. + // only consider traces after + if let Some(after) = after.map(|a| a as usize).filter(|a| *a < all_traces.len()) { + all_traces = all_traces.split_off(after); + } + + // at most, return count of traces + if let Some(count) = count { + let count = count as usize; + if count < all_traces.len() { + all_traces.truncate(count); + } + }; + Ok(all_traces) } @@ -348,17 +364,15 @@ where pub async fn trace_transaction( &self, hash: B256, - ) -> EthResult>> { + ) -> Result>, Eth::Error> { self.inner .eth_api .spawn_trace_transaction_in_block( hash, TracingInspectorConfig::default_parity(), - move |tx_info, inspector, res, _| { - let traces = inspector - .with_transaction_gas_used(res.result.gas_used()) - .into_parity_builder() - .into_localized_transaction_traces(tx_info); + move |tx_info, inspector, _, _| { + let traces = + inspector.into_parity_builder().into_localized_transaction_traces(tx_info); Ok(traces) }, ) @@ -369,15 +383,13 @@ where pub async fn trace_block( &self, block_id: BlockId, - ) -> EthResult>> { + ) -> Result>, Eth::Error> { let traces = self.inner.eth_api.trace_block_with( block_id, TracingInspectorConfig::default_parity(), - |tx_info, inspector, res, _, _| { - let traces = inspector - .with_transaction_gas_used(res.gas_used()) - .into_parity_builder() - .into_localized_transaction_traces(tx_info); + |tx_info, inspector, _, _, _| { + let traces = + inspector.into_parity_builder().into_localized_transaction_traces(tx_info); Ok(traces) }, ); @@ -406,7 +418,7 @@ where &self, block_id: BlockId, trace_types: HashSet, - ) -> EthResult>> { + ) -> Result>, Eth::Error> { self.inner .eth_api .trace_block_with( @@ -419,7 +431,8 @@ where // If statediffs were requested, populate them with the account balance and // nonce from pre-state if let Some(ref mut state_diff) = full_trace.state_diff { - populate_state_diff(state_diff, db, state.iter())?; + populate_state_diff(state_diff, db, state.iter()) + .map_err(Eth::Error::from_eth_err)?; } let trace = TraceResultsWithTransactionHash { @@ -437,7 +450,7 @@ where pub async fn trace_transaction_opcode_gas( &self, tx_hash: B256, - ) -> EthResult> { + ) -> Result, Eth::Error> { self.inner .eth_api .spawn_trace_transaction_in_block_with_inspector( @@ -461,7 +474,7 @@ where pub async fn trace_block_opcode_gas( &self, block_id: BlockId, - ) -> EthResult> { + ) -> Result, Eth::Error> { let res = self .inner .eth_api @@ -494,7 +507,7 @@ where /// - if Paris hardfork is activated, no block rewards are given /// - if Paris hardfork is not activated, calculate block rewards with block number only /// - if Paris hardfork is unknown, calculate block rewards with block number and ttd - fn calculate_base_block_reward(&self, header: &Header) -> EthResult> { + fn calculate_base_block_reward(&self, header: &Header) -> Result, Eth::Error> { let chain_spec = self.provider().chain_spec(); let is_paris_activated = chain_spec.is_paris_active_at_block(header.number); @@ -504,7 +517,11 @@ where None => { // if Paris hardfork is unknown, we need to fetch the total difficulty at the // block's height and check if it is pre-merge to calculate the base block reward - if let Some(header_td) = self.provider().header_td_by_number(header.number)? { + if let Some(header_td) = self + .provider() + .header_td_by_number(header.number) + .map_err(Eth::Error::from_eth_err)? + { base_block_reward( chain_spec.as_ref(), header.number, @@ -570,11 +587,11 @@ where block_id: Option, state_overrides: Option, block_overrides: Option>, - ) -> Result { + ) -> RpcResult { let _permit = self.acquire_trace_permit().await; let request = TraceCallRequest { call, trace_types, block_id, state_overrides, block_overrides }; - Ok(Self::trace_call(self, request).await?) + Ok(Self::trace_call(self, request).await.map_err(Into::into)?) } /// Handler for `trace_callMany` @@ -582,9 +599,9 @@ where &self, calls: Vec<(TransactionRequest, HashSet)>, block_id: Option, - ) -> Result> { + ) -> RpcResult> { let _permit = self.acquire_trace_permit().await; - Ok(Self::trace_call_many(self, calls, block_id).await?) + Ok(Self::trace_call_many(self, calls, block_id).await.map_err(Into::into)?) } /// Handler for `trace_rawTransaction` @@ -593,9 +610,11 @@ where data: Bytes, trace_types: HashSet, block_id: Option, - ) -> Result { + ) -> RpcResult { let _permit = self.acquire_trace_permit().await; - Ok(Self::trace_raw_transaction(self, data, trace_types, block_id).await?) + Ok(Self::trace_raw_transaction(self, data, trace_types, block_id) + .await + .map_err(Into::into)?) } /// Handler for `trace_replayBlockTransactions` @@ -603,9 +622,11 @@ where &self, block_id: BlockId, trace_types: HashSet, - ) -> Result>> { + ) -> RpcResult>> { let _permit = self.acquire_trace_permit().await; - Ok(Self::replay_block_transactions(self, block_id, trace_types).await?) + Ok(Self::replay_block_transactions(self, block_id, trace_types) + .await + .map_err(Into::into)?) } /// Handler for `trace_replayTransaction` @@ -613,18 +634,18 @@ where &self, transaction: B256, trace_types: HashSet, - ) -> Result { + ) -> RpcResult { let _permit = self.acquire_trace_permit().await; - Ok(Self::replay_transaction(self, transaction, trace_types).await?) + Ok(Self::replay_transaction(self, transaction, trace_types).await.map_err(Into::into)?) } /// Handler for `trace_block` async fn trace_block( &self, block_id: BlockId, - ) -> Result>> { + ) -> RpcResult>> { let _permit = self.acquire_trace_permit().await; - Ok(Self::trace_block(self, block_id).await?) + Ok(Self::trace_block(self, block_id).await.map_err(Into::into)?) } /// Handler for `trace_filter` @@ -633,8 +654,8 @@ where /// /// # Limitations /// This currently requires block filter fields, since reth does not have address indices yet. - async fn trace_filter(&self, filter: TraceFilter) -> Result> { - Ok(Self::trace_filter(self, filter).await?) + async fn trace_filter(&self, filter: TraceFilter) -> RpcResult> { + Ok(Self::trace_filter(self, filter).await.map_err(Into::into)?) } /// Returns transaction trace at given index. @@ -643,33 +664,35 @@ where &self, hash: B256, indices: Vec, - ) -> Result> { + ) -> RpcResult> { let _permit = self.acquire_trace_permit().await; - Ok(Self::trace_get(self, hash, indices.into_iter().map(Into::into).collect()).await?) + Ok(Self::trace_get(self, hash, indices.into_iter().map(Into::into).collect()) + .await + .map_err(Into::into)?) } /// Handler for `trace_transaction` async fn trace_transaction( &self, hash: B256, - ) -> Result>> { + ) -> RpcResult>> { let _permit = self.acquire_trace_permit().await; - Ok(Self::trace_transaction(self, hash).await?) + Ok(Self::trace_transaction(self, hash).await.map_err(Into::into)?) } /// Handler for `trace_transactionOpcodeGas` async fn trace_transaction_opcode_gas( &self, tx_hash: B256, - ) -> Result> { + ) -> RpcResult> { let _permit = self.acquire_trace_permit().await; - Ok(Self::trace_transaction_opcode_gas(self, tx_hash).await?) + Ok(Self::trace_transaction_opcode_gas(self, tx_hash).await.map_err(Into::into)?) } /// Handler for `trace_blockOpcodeGas` - async fn trace_block_opcode_gas(&self, block_id: BlockId) -> Result> { + async fn trace_block_opcode_gas(&self, block_id: BlockId) -> RpcResult> { let _permit = self.acquire_trace_permit().await; - Ok(Self::trace_block_opcode_gas(self, block_id).await?) + Ok(Self::trace_block_opcode_gas(self, block_id).await.map_err(Into::into)?) } } diff --git a/crates/stages/api/src/pipeline/ctrl.rs b/crates/stages/api/src/pipeline/ctrl.rs index 8fc64c2ab708..161857552451 100644 --- a/crates/stages/api/src/pipeline/ctrl.rs +++ b/crates/stages/api/src/pipeline/ctrl.rs @@ -4,7 +4,7 @@ use reth_primitives_traits::SealedHeader; /// Determines the control flow during pipeline execution. /// /// See [`Pipeline::run_loop`](crate::Pipeline::run_loop) for more information. -#[derive(Debug, Eq, PartialEq)] +#[derive(Debug, Clone, Eq, PartialEq)] pub enum ControlFlow { /// An unwind was requested and must be performed before continuing. Unwind { diff --git a/crates/stages/api/src/pipeline/set.rs b/crates/stages/api/src/pipeline/set.rs index 99de4a06b278..baa9b0f3fcda 100644 --- a/crates/stages/api/src/pipeline/set.rs +++ b/crates/stages/api/src/pipeline/set.rs @@ -190,20 +190,22 @@ where /// # Panics /// /// Panics if the stage is not in this set. + #[track_caller] pub fn disable(mut self, stage_id: StageId) -> Self { - let entry = - self.stages.get_mut(&stage_id).expect("Cannot disable a stage that is not in the set."); + let entry = self + .stages + .get_mut(&stage_id) + .unwrap_or_else(|| panic!("Cannot disable a stage that is not in the set: {stage_id}")); entry.enabled = false; self } /// Disables all given stages. See [`disable`](Self::disable). + /// + /// If any of the stages is not in this set, it is ignored. pub fn disable_all(mut self, stages: &[StageId]) -> Self { for stage_id in stages { - let entry = self - .stages - .get_mut(stage_id) - .expect("Cannot disable a stage that is not in the set."); + let Some(entry) = self.stages.get_mut(stage_id) else { continue }; entry.enabled = false; } self @@ -212,6 +214,7 @@ where /// Disables the given stage if the given closure returns true. /// /// See [`Self::disable`] + #[track_caller] pub fn disable_if(self, stage_id: StageId, f: F) -> Self where F: FnOnce() -> bool, @@ -225,6 +228,7 @@ where /// Disables all given stages if the given closure returns true. /// /// See [`Self::disable`] + #[track_caller] pub fn disable_all_if(self, stages: &[StageId], f: F) -> Self where F: FnOnce() -> bool, diff --git a/crates/stages/stages/Cargo.toml b/crates/stages/stages/Cargo.toml index 757f4dcaf26a..fce1df25cf20 100644 --- a/crates/stages/stages/Cargo.toml +++ b/crates/stages/stages/Cargo.toml @@ -33,6 +33,7 @@ reth-storage-errors.workspace = true reth-revm.workspace = true reth-stages-api.workspace = true reth-trie = { workspace = true, features = ["metrics"] } +reth-trie-db = { workspace = true, features = ["metrics"] } reth-testing-utils = { workspace = true, optional = true } diff --git a/crates/stages/stages/benches/setup/mod.rs b/crates/stages/stages/benches/setup/mod.rs index 0f2dd2acf692..e5ec504ecd4b 100644 --- a/crates/stages/stages/benches/setup/mod.rs +++ b/crates/stages/stages/benches/setup/mod.rs @@ -6,6 +6,7 @@ use reth_db_api::{ transaction::{DbTx, DbTxMut}, }; use reth_primitives::{Account, Address, SealedBlock, B256, U256}; +use reth_provider::TrieWriter; use reth_stages::{ stages::{AccountHashingStage, StorageHashingStage}, test_utils::{StorageKind, TestStageDB}, @@ -26,6 +27,7 @@ mod constants; mod account_hashing; pub use account_hashing::*; use reth_stages_api::{ExecInput, Stage, UnwindInput}; +use reth_trie_db::DatabaseStateRoot; pub(crate) type StageRange = (ExecInput, UnwindInput); @@ -138,12 +140,10 @@ pub(crate) fn txs_testdata(num_blocks: u64) -> TestStageDB { let offset = transitions.len() as u64; + let provider_rw = db.factory.provider_rw().unwrap(); db.insert_changesets(transitions, None).unwrap(); - db.commit(|tx| { - updates.write_to_database(tx)?; - Ok(()) - }) - .unwrap(); + provider_rw.write_trie_updates(&updates).unwrap(); + provider_rw.commit().unwrap(); let (transitions, final_state) = random_changeset_range( &mut rng, diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index 6776597a6065..cd42dd12601d 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -206,7 +206,7 @@ impl Stage for BodyStage { // Write transactions for transaction in block.body { let appended_tx_number = static_file_producer - .append_transaction(next_tx_num, transaction.into())?; + .append_transaction(next_tx_num, &transaction.into())?; if appended_tx_number != next_tx_num { // This scenario indicates a critical error in the logic of adding new @@ -740,7 +740,7 @@ mod tests { body.tx_num_range().try_for_each(|tx_num| { let transaction = random_signed_tx(&mut rng); static_file_producer - .append_transaction(tx_num, transaction.into()) + .append_transaction(tx_num, &transaction.into()) .map(drop) })?; diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index f526a030a198..43eaf45d5745 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -10,6 +10,7 @@ use reth_primitives::{BlockNumber, Header, StaticFileSegment}; use reth_primitives_traits::format_gas_throughput; use reth_provider::{ providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter}, + writer::StorageWriter, BlockReader, DatabaseProviderRW, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderError, StateWriter, StatsReader, TransactionVariant, }; @@ -358,8 +359,11 @@ where } let time = Instant::now(); + // write output - state.write_to_storage(provider, static_file_producer, OriginalValuesKnown::Yes)?; + let mut writer = StorageWriter::new(Some(provider), static_file_producer); + writer.write_to_storage(state, OriginalValuesKnown::Yes)?; + let db_write_duration = time.elapsed(); debug!( target: "sync::stages::execution", diff --git a/crates/stages/stages/src/stages/hashing_storage.rs b/crates/stages/stages/src/stages/hashing_storage.rs index 662f1d1a7728..dbefa4b0e483 100644 --- a/crates/stages/stages/src/stages/hashing_storage.rs +++ b/crates/stages/stages/src/stages/hashing_storage.rs @@ -535,7 +535,7 @@ mod tests { storage_cursor.delete_current()?; } - if entry.value != U256::ZERO { + if !entry.value.is_zero() { storage_cursor.upsert(bn_address.address(), entry)?; } } diff --git a/crates/stages/stages/src/stages/headers.rs b/crates/stages/stages/src/stages/headers.rs index 6b326034f7d2..46130d76013d 100644 --- a/crates/stages/stages/src/stages/headers.rs +++ b/crates/stages/stages/src/stages/headers.rs @@ -138,7 +138,7 @@ where })?; // Append to Headers segment - writer.append_header(header, td, header_hash)?; + writer.append_header(&header, td, &header_hash)?; } info!(target: "sync::stages::headers", total = total_headers, "Writing headers hash index"); diff --git a/crates/stages/stages/src/stages/merkle.rs b/crates/stages/stages/src/stages/merkle.rs index 9bbd68ed3515..f85ef565f4bf 100644 --- a/crates/stages/stages/src/stages/merkle.rs +++ b/crates/stages/stages/src/stages/merkle.rs @@ -7,14 +7,15 @@ use reth_db_api::{ }; use reth_primitives::{BlockNumber, GotExpected, SealedHeader, B256}; use reth_provider::{ - DatabaseProviderRW, HeaderProvider, ProviderError, StageCheckpointReader, - StageCheckpointWriter, StatsReader, + writer::StorageWriter, DatabaseProviderRW, HeaderProvider, ProviderError, + StageCheckpointReader, StageCheckpointWriter, StatsReader, }; use reth_stages_api::{ BlockErrorKind, EntitiesCheckpoint, ExecInput, ExecOutput, MerkleCheckpoint, Stage, StageCheckpoint, StageError, StageId, UnwindInput, UnwindOutput, }; use reth_trie::{IntermediateStateRootState, StateRoot, StateRootProgress, StoredSubNode}; +use reth_trie_db::DatabaseStateRoot; use std::fmt::Debug; use tracing::*; @@ -217,7 +218,8 @@ impl Stage for MerkleStage { })?; match progress { StateRootProgress::Progress(state, hashed_entries_walked, updates) => { - updates.write_to_database(tx)?; + let writer = StorageWriter::new(Some(provider), None); + writer.write_trie_updates(&updates)?; let checkpoint = MerkleCheckpoint::new( to_block, @@ -237,7 +239,8 @@ impl Stage for MerkleStage { }) } StateRootProgress::Complete(root, hashed_entries_walked, updates) => { - updates.write_to_database(tx)?; + let writer = StorageWriter::new(Some(provider), None); + writer.write_trie_updates(&updates)?; entities_checkpoint.processed += hashed_entries_walked as u64; @@ -252,7 +255,8 @@ impl Stage for MerkleStage { error!(target: "sync::stages::merkle", %e, ?current_block_number, ?to_block, "Incremental state root failed! {INVALID_STATE_ROOT_ERROR_MESSAGE}"); StageError::Fatal(Box::new(e)) })?; - updates.write_to_database(provider.tx_ref())?; + let writer = StorageWriter::new(Some(provider), None); + writer.write_trie_updates(&updates)?; let total_hashed_entries = (provider.count_entries::()? + provider.count_entries::()?) @@ -325,7 +329,8 @@ impl Stage for MerkleStage { validate_state_root(block_root, target.seal_slow(), input.unwind_to)?; // Validation passed, apply unwind changes to the database. - updates.write_to_database(provider.tx_ref())?; + let writer = StorageWriter::new(Some(provider), None); + writer.write_trie_updates(&updates)?; // TODO(alexey): update entities checkpoint } else { @@ -562,7 +567,7 @@ mod tests { } let storage = storage_entries .into_iter() - .filter(|v| v.value != U256::ZERO) + .filter(|v| !v.value.is_zero()) .map(|v| (v.key, v.value)) .collect::>(); accounts.insert(key, (account, storage)); @@ -580,7 +585,7 @@ mod tests { let hash = last_header.hash_slow(); writer.prune_headers(1).unwrap(); writer.commit().unwrap(); - writer.append_header(last_header, U256::ZERO, hash).unwrap(); + writer.append_header(&last_header, U256::ZERO, &hash).unwrap(); writer.commit().unwrap(); Ok(blocks) @@ -636,7 +641,7 @@ mod tests { storage_cursor.delete_current().unwrap(); } - if value != U256::ZERO { + if !value.is_zero() { let storage_entry = StorageEntry { key: hashed_slot, value }; storage_cursor.upsert(hashed_address, storage_entry).unwrap(); } diff --git a/crates/stages/stages/src/stages/utils.rs b/crates/stages/stages/src/stages/utils.rs index 3b623c358e55..15e88a284011 100644 --- a/crates/stages/stages/src/stages/utils.rs +++ b/crates/stages/stages/src/stages/utils.rs @@ -186,7 +186,7 @@ where Ok(()) } -/// Shard and insert the indice list according to [`LoadMode`] and its length. +/// Shard and insert the indices list according to [`LoadMode`] and its length. pub(crate) fn load_indices( cursor: &mut C, partial_key: P, diff --git a/crates/stages/stages/src/test_utils/test_db.rs b/crates/stages/stages/src/test_utils/test_db.rs index 8f72b5aab225..0ee61355e64c 100644 --- a/crates/stages/stages/src/test_utils/test_db.rs +++ b/crates/stages/stages/src/test_utils/test_db.rs @@ -156,11 +156,11 @@ impl TestStageDB { for block_number in 0..header.number { let mut prev = header.clone().unseal(); prev.number = block_number; - writer.append_header(prev, U256::ZERO, B256::ZERO)?; + writer.append_header(&prev, U256::ZERO, &B256::ZERO)?; } } - writer.append_header(header.header().clone(), td, header.hash())?; + writer.append_header(header.header(), td, &header.hash())?; } else { tx.put::(header.number, header.hash())?; tx.put::(header.number, td.into())?; @@ -266,7 +266,7 @@ impl TestStageDB { let res = block.body.iter().try_for_each(|body_tx| { if let Some(txs_writer) = &mut txs_writer { - txs_writer.append_transaction(next_tx_num, body_tx.clone().into())?; + txs_writer.append_transaction(next_tx_num, &body_tx.clone().into())?; } else { tx.put::(next_tx_num, body_tx.clone().into())? } @@ -386,7 +386,7 @@ impl TestStageDB { tx.put::(hashed_address, account)?; // Insert into storage tables. - storage.into_iter().filter(|e| e.value != U256::ZERO).try_for_each(|entry| { + storage.into_iter().filter(|e| !e.value.is_zero()).try_for_each(|entry| { let hashed_entry = StorageEntry { key: keccak256(entry.key), ..entry }; let mut cursor = tx.cursor_dup_write::()?; diff --git a/crates/stages/types/Cargo.toml b/crates/stages/types/Cargo.toml index 76bb9f4292c2..54b14b335cb9 100644 --- a/crates/stages/types/Cargo.toml +++ b/crates/stages/types/Cargo.toml @@ -23,7 +23,6 @@ serde.workspace = true [dev-dependencies] arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true -proptest-derive.workspace = true proptest-arbitrary-interop.workspace = true test-fuzz.workspace = true rand.workspace = true diff --git a/crates/static-file/static-file/src/segments/headers.rs b/crates/static-file/static-file/src/segments/headers.rs index 5824d1d1ac7d..3212c0cd8894 100644 --- a/crates/static-file/static-file/src/segments/headers.rs +++ b/crates/static-file/static-file/src/segments/headers.rs @@ -50,7 +50,7 @@ impl Segment for Headers { debug_assert_eq!(header_td_block, canonical_header_block); let _static_file_block = - static_file_writer.append_header(header, header_td.0, canonical_header)?; + static_file_writer.append_header(&header, header_td.0, &canonical_header)?; debug_assert_eq!(_static_file_block, header_block); } diff --git a/crates/static-file/static-file/src/segments/transactions.rs b/crates/static-file/static-file/src/segments/transactions.rs index 4361f8ca661e..19b6aeb579a8 100644 --- a/crates/static-file/static-file/src/segments/transactions.rs +++ b/crates/static-file/static-file/src/segments/transactions.rs @@ -47,7 +47,7 @@ impl Segment for Transactions { for entry in transactions_walker { let (tx_number, transaction) = entry?; - static_file_writer.append_transaction(tx_number, transaction)?; + static_file_writer.append_transaction(tx_number, &transaction)?; } } diff --git a/crates/storage/codecs/Cargo.toml b/crates/storage/codecs/Cargo.toml index 4789ff6e1971..dea9972816a8 100644 --- a/crates/storage/codecs/Cargo.toml +++ b/crates/storage/codecs/Cargo.toml @@ -38,7 +38,6 @@ serde_json.workspace = true arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true -proptest-derive.workspace = true proptest-arbitrary-interop.workspace = true [features] diff --git a/crates/storage/db-api/Cargo.toml b/crates/storage/db-api/Cargo.toml index 7286e03f2da4..2dd7b8713ca4 100644 --- a/crates/storage/db-api/Cargo.toml +++ b/crates/storage/db-api/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] # reth reth-codecs.workspace = true -reth-primitives.workspace = true +reth-primitives = { workspace = true, features = ["reth-codec"] } reth-primitives-traits.workspace = true reth-prune-types.workspace = true reth-storage-errors.workspace = true @@ -58,7 +58,6 @@ iai-callgrind.workspace = true arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-arbitrary-interop.workspace = true -proptest-derive.workspace = true paste.workspace = true diff --git a/crates/storage/db-common/Cargo.toml b/crates/storage/db-common/Cargo.toml index d80236defd32..5c453df1cf67 100644 --- a/crates/storage/db-common/Cargo.toml +++ b/crates/storage/db-common/Cargo.toml @@ -16,6 +16,7 @@ reth-db-api.workspace = true reth-provider.workspace = true reth-config.workspace = true reth-trie.workspace = true +reth-trie-db.workspace = true reth-etl.workspace = true reth-codecs.workspace = true reth-stages-types.workspace = true diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index bbaf61cf3e64..63a1760ea5c0 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -11,15 +11,17 @@ use reth_primitives::{ Account, Address, Bytecode, Receipts, StaticFileSegment, StorageEntry, B256, U256, }; use reth_provider::{ - bundle_state::{BundleStateInit, RevertsInit}, errors::provider::ProviderResult, providers::{StaticFileProvider, StaticFileWriter}, - BlockHashReader, BlockNumReader, ChainSpecProvider, DatabaseProviderRW, ExecutionOutcome, - HashingWriter, HistoryWriter, OriginalValuesKnown, ProviderError, ProviderFactory, - StageCheckpointWriter, StateWriter, StaticFileProviderFactory, + writer::StorageWriter, + BlockHashReader, BlockNumReader, BundleStateInit, ChainSpecProvider, DatabaseProviderRW, + ExecutionOutcome, HashingWriter, HistoryWriter, OriginalValuesKnown, ProviderError, + ProviderFactory, RevertsInit, StageCheckpointWriter, StateWriter, StaticFileProviderFactory, + TrieWriter, }; use reth_stages_types::{StageCheckpoint, StageId}; use reth_trie::{IntermediateStateRootState, StateRoot as StateRootComputer, StateRootProgress}; +use reth_trie_db::DatabaseStateRoot; use serde::{Deserialize, Serialize}; use std::{ collections::{BTreeMap, HashMap}, @@ -64,7 +66,7 @@ pub enum InitDatabaseError { #[error( "state root mismatch, state dump: {expected_state_root}, computed: {computed_state_root}" )] - SateRootMismatch { + StateRootMismatch { /// Expected state root. expected_state_root: B256, /// Actual state root. @@ -201,7 +203,8 @@ pub fn insert_state<'a, 'b, DB: Database>( Vec::new(), ); - execution_outcome.write_to_storage(provider, None, OriginalValuesKnown::Yes)?; + let mut storage_writer = StorageWriter::new(Some(provider), None); + storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::Yes)?; trace!(target: "reth::cli", "Inserted state"); @@ -282,7 +285,7 @@ pub fn insert_genesis_header( Ok(None) | Err(ProviderError::MissingStaticFileBlock(StaticFileSegment::Headers, 0)) => { let (difficulty, hash) = (header.difficulty, block_hash); let mut writer = static_file_provider.latest_writer(StaticFileSegment::Headers)?; - writer.append_header(header, difficulty, hash)?; + writer.append_header(&header, difficulty, &hash)?; } Ok(Some(_)) => {} Err(e) => return Err(e), @@ -333,7 +336,7 @@ pub fn init_from_state_dump( "Computed state root does not match state root in state dump" ); - Err(InitDatabaseError::SateRootMismatch { expected_state_root, computed_state_root })? + Err(InitDatabaseError::StateRootMismatch { expected_state_root, computed_state_root })? } else { info!(target: "reth::cli", ?computed_state_root, @@ -461,7 +464,7 @@ fn compute_state_root(provider: &DatabaseProviderRW) -> eyre:: .root_with_progress()? { StateRootProgress::Progress(state, _, updates) => { - let updated_len = updates.write_to_database(tx)?; + let updated_len = provider.write_trie_updates(&updates)?; total_flushed_updates += updated_len; trace!(target: "reth::cli", @@ -481,7 +484,7 @@ fn compute_state_root(provider: &DatabaseProviderRW) -> eyre:: } } StateRootProgress::Complete(root, _, updates) => { - let updated_len = updates.write_to_database(tx)?; + let updated_len = provider.write_trie_updates(&updates)?; total_flushed_updates += updated_len; trace!(target: "reth::cli", diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index 117ec5ccc7b6..619942f38ae1 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] # reth reth-db-api.workspace = true -reth-primitives.workspace = true +reth-primitives = { workspace = true, features = ["reth-codec"] } reth-primitives-traits.workspace = true reth-fs-util.workspace = true reth-storage-errors.workspace = true @@ -35,21 +35,21 @@ eyre = { workspace = true, optional = true } serde = { workspace = true, default-features = false } # metrics -reth-metrics.workspace = true -metrics.workspace = true +reth-metrics = { workspace = true, optional = true } +metrics = { workspace = true, optional = true } # misc bytes.workspace = true -page_size = "0.6.0" +page_size = { version = "0.6.0", optional = true } thiserror.workspace = true tempfile = { workspace = true, optional = true } derive_more.workspace = true paste.workspace = true -rustc-hash.workspace = true +rustc-hash = { workspace = true, optional = true } sysinfo = { version = "0.30", default-features = false } # arbitrary utils -strum = { workspace = true, features = ["derive"] } +strum = { workspace = true, features = ["derive"], optional = true } [dev-dependencies] # reth libs with arbitrary @@ -77,7 +77,15 @@ assert_matches.workspace = true [features] default = ["mdbx"] -mdbx = ["dep:reth-libmdbx", "dep:eyre"] +mdbx = [ + "dep:reth-libmdbx", + "dep:eyre", + "dep:page_size", + "reth-metrics", + "dep:metrics", + "dep:strum", + "dep:rustc-hash", +] test-utils = ["dep:tempfile", "arbitrary"] bench = [] arbitrary = ["reth-primitives/arbitrary", "reth-db-api/arbitrary"] diff --git a/crates/storage/db/src/lib.rs b/crates/storage/db/src/lib.rs index e5414b574328..c16f2b73c4e9 100644 --- a/crates/storage/db/src/lib.rs +++ b/crates/storage/db/src/lib.rs @@ -17,9 +17,11 @@ mod implementation; pub mod lockfile; +#[cfg(feature = "mdbx")] mod metrics; pub mod static_file; pub mod tables; +#[cfg(feature = "mdbx")] mod utils; pub mod version; @@ -28,6 +30,7 @@ pub mod mdbx; pub use reth_storage_errors::db::{DatabaseError, DatabaseWriteOperation}; pub use tables::*; +#[cfg(feature = "mdbx")] pub use utils::is_database_empty; #[cfg(feature = "mdbx")] diff --git a/crates/storage/db/src/tables/mod.rs b/crates/storage/db/src/tables/mod.rs index c3c0d0b3f8ae..fb64fa86fcf8 100644 --- a/crates/storage/db/src/tables/mod.rs +++ b/crates/storage/db/src/tables/mod.rs @@ -16,6 +16,7 @@ pub mod codecs; mod raw; pub use raw::{RawDupSort, RawKey, RawTable, RawValue, TableRawRow}; +#[cfg(feature = "mdbx")] pub(crate) mod utils; use reth_db_api::{ diff --git a/crates/storage/errors/Cargo.toml b/crates/storage/errors/Cargo.toml index d8e699f8df40..5ef6f15771ef 100644 --- a/crates/storage/errors/Cargo.toml +++ b/crates/storage/errors/Cargo.toml @@ -11,6 +11,7 @@ repository.workspace = true workspace = true [dependencies] +alloy-rlp.workspace = true reth-primitives.workspace = true reth-fs-util.workspace = true diff --git a/crates/storage/errors/src/provider.rs b/crates/storage/errors/src/provider.rs index c3d47aa0bd1b..0979156ca042 100644 --- a/crates/storage/errors/src/provider.rs +++ b/crates/storage/errors/src/provider.rs @@ -21,6 +21,9 @@ pub enum ProviderError { /// Database error. #[error(transparent)] Database(#[from] crate::db::DatabaseError), + /// RLP error. + #[error(transparent)] + Rlp(#[from] alloy_rlp::Error), /// Filesystem path error. #[error("{0}")] FsPathError(String), diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index bca77d0c5486..48058084e7af 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -16,7 +16,7 @@ workspace = true reth-chainspec.workspace = true reth-blockchain-tree-api.workspace = true reth-execution-types.workspace = true -reth-primitives.workspace = true +reth-primitives = { workspace = true, features = ["reth-codec"] } reth-fs-util.workspace = true reth-errors.workspace = true reth-storage-errors.workspace = true @@ -27,9 +27,11 @@ reth-db-api.workspace = true reth-prune-types.workspace = true reth-stages-types.workspace = true reth-trie = { workspace = true, features = ["metrics"] } +reth-trie-db = { workspace = true, features = ["metrics"] } reth-nippy-jar.workspace = true reth-codecs.workspace = true reth-evm.workspace = true +reth-chain-state.workspace = true # ethereum alloy-rpc-types-engine.workspace = true @@ -37,7 +39,6 @@ revm.workspace = true # async tokio = { workspace = true, features = ["sync", "macros", "rt-multi-thread"] } -tokio-stream = { workspace = true, features = ["sync"] } # tracing tracing.workspace = true @@ -48,9 +49,7 @@ metrics.workspace = true # misc auto_impl.workspace = true -derive_more.workspace = true itertools.workspace = true -pin-project.workspace = true parking_lot.workspace = true dashmap = { workspace = true, features = ["inline"] } strum.workspace = true @@ -76,4 +75,4 @@ rand.workspace = true [features] optimism = ["reth-primitives/optimism", "reth-execution-types/optimism"] serde = ["reth-execution-types/serde"] -test-utils = ["alloy-rlp", "reth-db/test-utils", "reth-nippy-jar/test-utils"] +test-utils = ["alloy-rlp", "reth-db/test-utils", "reth-nippy-jar/test-utils", "reth-trie/test-utils", "reth-chain-state/test-utils", "reth-db/test-utils"] diff --git a/crates/storage/provider/src/bundle_state/execution_outcome.rs b/crates/storage/provider/src/bundle_state/execution_outcome.rs deleted file mode 100644 index ebb69201ea89..000000000000 --- a/crates/storage/provider/src/bundle_state/execution_outcome.rs +++ /dev/null @@ -1,1036 +0,0 @@ -use crate::{ - providers::StaticFileProviderRWRefMut, writer::StorageWriter, DatabaseProviderRW, StateChanges, - StateReverts, StateWriter, -}; -use reth_db::Database; -pub use reth_execution_types::*; -use reth_storage_errors::provider::ProviderResult; -pub use revm::db::states::OriginalValuesKnown; - -impl StateWriter for ExecutionOutcome { - fn write_to_storage( - self, - provider_rw: &DatabaseProviderRW, - static_file_producer: Option>, - is_value_known: OriginalValuesKnown, - ) -> ProviderResult<()> - where - DB: Database, - { - let (plain_state, reverts) = self.bundle.into_plain_state_and_reverts(is_value_known); - - StateReverts(reverts).write_to_db(provider_rw, self.first_block)?; - - StorageWriter::new(Some(provider_rw), static_file_producer) - .append_receipts_from_blocks(self.first_block, self.receipts.into_iter())?; - - StateChanges(plain_state).write_to_db(provider_rw)?; - - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{test_utils::create_test_provider_factory, AccountReader}; - use reth_db::{tables, test_utils::create_test_rw_db}; - use reth_db_api::{ - cursor::{DbCursorRO, DbDupCursorRO}, - database::Database, - models::{AccountBeforeTx, BlockNumberAddress}, - transaction::{DbTx, DbTxMut}, - }; - use reth_primitives::{ - keccak256, Account, Address, Receipt, Receipts, StorageEntry, B256, U256, - }; - use reth_trie::{test_utils::state_root, StateRoot}; - use revm::{ - db::{ - states::{ - bundle_state::BundleRetention, changes::PlainStorageRevert, PlainStorageChangeset, - }, - BundleState, EmptyDB, - }, - primitives::{ - Account as RevmAccount, AccountInfo as RevmAccountInfo, AccountStatus, EvmStorageSlot, - }, - DatabaseCommit, State, - }; - use std::collections::{BTreeMap, HashMap}; - - #[test] - fn write_to_db_account_info() { - let factory = create_test_provider_factory(); - let provider = factory.provider_rw().unwrap(); - - let address_a = Address::ZERO; - let address_b = Address::repeat_byte(0xff); - - let account_a = RevmAccountInfo { balance: U256::from(1), nonce: 1, ..Default::default() }; - let account_b = RevmAccountInfo { balance: U256::from(2), nonce: 2, ..Default::default() }; - let account_b_changed = - RevmAccountInfo { balance: U256::from(3), nonce: 3, ..Default::default() }; - - let mut state = State::builder().with_bundle_update().build(); - state.insert_not_existing(address_a); - state.insert_account(address_b, account_b.clone()); - - // 0x00.. is created - state.commit(HashMap::from([( - address_a, - RevmAccount { - info: account_a.clone(), - status: AccountStatus::Touched | AccountStatus::Created, - storage: HashMap::default(), - }, - )])); - - // 0xff.. is changed (balance + 1, nonce + 1) - state.commit(HashMap::from([( - address_b, - RevmAccount { - info: account_b_changed.clone(), - status: AccountStatus::Touched, - storage: HashMap::default(), - }, - )])); - - state.merge_transitions(BundleRetention::Reverts); - let mut revm_bundle_state = state.take_bundle(); - - // Write plain state and reverts separately. - let reverts = revm_bundle_state.take_all_reverts().into_plain_state_reverts(); - let plain_state = revm_bundle_state.into_plain_state(OriginalValuesKnown::Yes); - assert!(plain_state.storage.is_empty()); - assert!(plain_state.contracts.is_empty()); - StateChanges(plain_state) - .write_to_db(&provider) - .expect("Could not write plain state to DB"); - - assert_eq!(reverts.storage, [[]]); - StateReverts(reverts).write_to_db(&provider, 1).expect("Could not write reverts to DB"); - - let reth_account_a = account_a.into(); - let reth_account_b = account_b.into(); - let reth_account_b_changed = account_b_changed.clone().into(); - - // Check plain state - assert_eq!( - provider.basic_account(address_a).expect("Could not read account state"), - Some(reth_account_a), - "Account A state is wrong" - ); - assert_eq!( - provider.basic_account(address_b).expect("Could not read account state"), - Some(reth_account_b_changed), - "Account B state is wrong" - ); - - // Check change set - let mut changeset_cursor = provider - .tx_ref() - .cursor_dup_read::() - .expect("Could not open changeset cursor"); - assert_eq!( - changeset_cursor.seek_exact(1).expect("Could not read account change set"), - Some((1, AccountBeforeTx { address: address_a, info: None })), - "Account A changeset is wrong" - ); - assert_eq!( - changeset_cursor.next_dup().expect("Changeset table is malformed"), - Some((1, AccountBeforeTx { address: address_b, info: Some(reth_account_b) })), - "Account B changeset is wrong" - ); - - let mut state = State::builder().with_bundle_update().build(); - state.insert_account(address_b, account_b_changed.clone()); - - // 0xff.. is destroyed - state.commit(HashMap::from([( - address_b, - RevmAccount { - status: AccountStatus::Touched | AccountStatus::SelfDestructed, - info: account_b_changed, - storage: HashMap::default(), - }, - )])); - - state.merge_transitions(BundleRetention::Reverts); - let mut revm_bundle_state = state.take_bundle(); - - // Write plain state and reverts separately. - let reverts = revm_bundle_state.take_all_reverts().into_plain_state_reverts(); - let plain_state = revm_bundle_state.into_plain_state(OriginalValuesKnown::Yes); - // Account B selfdestructed so flag for it should be present. - assert_eq!( - plain_state.storage, - [PlainStorageChangeset { address: address_b, wipe_storage: true, storage: vec![] }] - ); - assert!(plain_state.contracts.is_empty()); - StateChanges(plain_state) - .write_to_db(&provider) - .expect("Could not write plain state to DB"); - - assert_eq!( - reverts.storage, - [[PlainStorageRevert { address: address_b, wiped: true, storage_revert: vec![] }]] - ); - StateReverts(reverts).write_to_db(&provider, 2).expect("Could not write reverts to DB"); - - // Check new plain state for account B - assert_eq!( - provider.basic_account(address_b).expect("Could not read account state"), - None, - "Account B should be deleted" - ); - - // Check change set - assert_eq!( - changeset_cursor.seek_exact(2).expect("Could not read account change set"), - Some((2, AccountBeforeTx { address: address_b, info: Some(reth_account_b_changed) })), - "Account B changeset is wrong after deletion" - ); - } - - #[test] - fn write_to_db_storage() { - let factory = create_test_provider_factory(); - let provider = factory.provider_rw().unwrap(); - - let address_a = Address::ZERO; - let address_b = Address::repeat_byte(0xff); - - let account_b = RevmAccountInfo { balance: U256::from(2), nonce: 2, ..Default::default() }; - - let mut state = State::builder().with_bundle_update().build(); - state.insert_not_existing(address_a); - state.insert_account_with_storage( - address_b, - account_b.clone(), - HashMap::from([(U256::from(1), U256::from(1))]), - ); - - state.commit(HashMap::from([ - ( - address_a, - RevmAccount { - status: AccountStatus::Touched | AccountStatus::Created, - info: RevmAccountInfo::default(), - // 0x00 => 0 => 1 - // 0x01 => 0 => 2 - storage: HashMap::from([ - ( - U256::from(0), - EvmStorageSlot { present_value: U256::from(1), ..Default::default() }, - ), - ( - U256::from(1), - EvmStorageSlot { present_value: U256::from(2), ..Default::default() }, - ), - ]), - }, - ), - ( - address_b, - RevmAccount { - status: AccountStatus::Touched, - info: account_b, - // 0x01 => 1 => 2 - storage: HashMap::from([( - U256::from(1), - EvmStorageSlot { - present_value: U256::from(2), - original_value: U256::from(1), - ..Default::default() - }, - )]), - }, - ), - ])); - - state.merge_transitions(BundleRetention::Reverts); - - ExecutionOutcome::new(state.take_bundle(), Receipts::default(), 1, Vec::new()) - .write_to_storage(&provider, None, OriginalValuesKnown::Yes) - .expect("Could not write bundle state to DB"); - - // Check plain storage state - let mut storage_cursor = provider - .tx_ref() - .cursor_dup_read::() - .expect("Could not open plain storage state cursor"); - - assert_eq!( - storage_cursor.seek_exact(address_a).unwrap(), - Some((address_a, StorageEntry { key: B256::ZERO, value: U256::from(1) })), - "Slot 0 for account A should be 1" - ); - assert_eq!( - storage_cursor.next_dup().unwrap(), - Some(( - address_a, - StorageEntry { key: B256::from(U256::from(1).to_be_bytes()), value: U256::from(2) } - )), - "Slot 1 for account A should be 2" - ); - assert_eq!( - storage_cursor.next_dup().unwrap(), - None, - "Account A should only have 2 storage slots" - ); - - assert_eq!( - storage_cursor.seek_exact(address_b).unwrap(), - Some(( - address_b, - StorageEntry { key: B256::from(U256::from(1).to_be_bytes()), value: U256::from(2) } - )), - "Slot 1 for account B should be 2" - ); - assert_eq!( - storage_cursor.next_dup().unwrap(), - None, - "Account B should only have 1 storage slot" - ); - - // Check change set - let mut changeset_cursor = provider - .tx_ref() - .cursor_dup_read::() - .expect("Could not open storage changeset cursor"); - assert_eq!( - changeset_cursor.seek_exact(BlockNumberAddress((1, address_a))).unwrap(), - Some(( - BlockNumberAddress((1, address_a)), - StorageEntry { key: B256::ZERO, value: U256::from(0) } - )), - "Slot 0 for account A should have changed from 0" - ); - assert_eq!( - changeset_cursor.next_dup().unwrap(), - Some(( - BlockNumberAddress((1, address_a)), - StorageEntry { key: B256::from(U256::from(1).to_be_bytes()), value: U256::from(0) } - )), - "Slot 1 for account A should have changed from 0" - ); - assert_eq!( - changeset_cursor.next_dup().unwrap(), - None, - "Account A should only be in the changeset 2 times" - ); - - assert_eq!( - changeset_cursor.seek_exact(BlockNumberAddress((1, address_b))).unwrap(), - Some(( - BlockNumberAddress((1, address_b)), - StorageEntry { key: B256::from(U256::from(1).to_be_bytes()), value: U256::from(1) } - )), - "Slot 1 for account B should have changed from 1" - ); - assert_eq!( - changeset_cursor.next_dup().unwrap(), - None, - "Account B should only be in the changeset 1 time" - ); - - // Delete account A - let mut state = State::builder().with_bundle_update().build(); - state.insert_account(address_a, RevmAccountInfo::default()); - - state.commit(HashMap::from([( - address_a, - RevmAccount { - status: AccountStatus::Touched | AccountStatus::SelfDestructed, - info: RevmAccountInfo::default(), - storage: HashMap::default(), - }, - )])); - - state.merge_transitions(BundleRetention::Reverts); - ExecutionOutcome::new(state.take_bundle(), Receipts::default(), 2, Vec::new()) - .write_to_storage(&provider, None, OriginalValuesKnown::Yes) - .expect("Could not write bundle state to DB"); - - assert_eq!( - storage_cursor.seek_exact(address_a).unwrap(), - None, - "Account A should have no storage slots after deletion" - ); - - assert_eq!( - changeset_cursor.seek_exact(BlockNumberAddress((2, address_a))).unwrap(), - Some(( - BlockNumberAddress((2, address_a)), - StorageEntry { key: B256::ZERO, value: U256::from(1) } - )), - "Slot 0 for account A should have changed from 1 on deletion" - ); - assert_eq!( - changeset_cursor.next_dup().unwrap(), - Some(( - BlockNumberAddress((2, address_a)), - StorageEntry { key: B256::from(U256::from(1).to_be_bytes()), value: U256::from(2) } - )), - "Slot 1 for account A should have changed from 2 on deletion" - ); - assert_eq!( - changeset_cursor.next_dup().unwrap(), - None, - "Account A should only be in the changeset 2 times on deletion" - ); - } - - #[test] - fn write_to_db_multiple_selfdestructs() { - let factory = create_test_provider_factory(); - let provider = factory.provider_rw().unwrap(); - - let address1 = Address::random(); - let account_info = RevmAccountInfo { nonce: 1, ..Default::default() }; - - // Block #0: initial state. - let mut init_state = State::builder().with_bundle_update().build(); - init_state.insert_not_existing(address1); - init_state.commit(HashMap::from([( - address1, - RevmAccount { - info: account_info.clone(), - status: AccountStatus::Touched | AccountStatus::Created, - // 0x00 => 0 => 1 - // 0x01 => 0 => 2 - storage: HashMap::from([ - ( - U256::ZERO, - EvmStorageSlot { present_value: U256::from(1), ..Default::default() }, - ), - ( - U256::from(1), - EvmStorageSlot { present_value: U256::from(2), ..Default::default() }, - ), - ]), - }, - )])); - init_state.merge_transitions(BundleRetention::Reverts); - ExecutionOutcome::new(init_state.take_bundle(), Receipts::default(), 0, Vec::new()) - .write_to_storage(&provider, None, OriginalValuesKnown::Yes) - .expect("Could not write init bundle state to DB"); - - let mut state = State::builder().with_bundle_update().build(); - state.insert_account_with_storage( - address1, - account_info.clone(), - HashMap::from([(U256::ZERO, U256::from(1)), (U256::from(1), U256::from(2))]), - ); - - // Block #1: change storage. - state.commit(HashMap::from([( - address1, - RevmAccount { - status: AccountStatus::Touched, - info: account_info.clone(), - // 0x00 => 1 => 2 - storage: HashMap::from([( - U256::ZERO, - EvmStorageSlot { - original_value: U256::from(1), - present_value: U256::from(2), - ..Default::default() - }, - )]), - }, - )])); - state.merge_transitions(BundleRetention::Reverts); - - // Block #2: destroy account. - state.commit(HashMap::from([( - address1, - RevmAccount { - status: AccountStatus::Touched | AccountStatus::SelfDestructed, - info: account_info.clone(), - storage: HashMap::default(), - }, - )])); - state.merge_transitions(BundleRetention::Reverts); - - // Block #3: re-create account and change storage. - state.commit(HashMap::from([( - address1, - RevmAccount { - status: AccountStatus::Touched | AccountStatus::Created, - info: account_info.clone(), - storage: HashMap::default(), - }, - )])); - state.merge_transitions(BundleRetention::Reverts); - - // Block #4: change storage. - state.commit(HashMap::from([( - address1, - RevmAccount { - status: AccountStatus::Touched, - info: account_info.clone(), - // 0x00 => 0 => 2 - // 0x02 => 0 => 4 - // 0x06 => 0 => 6 - storage: HashMap::from([ - ( - U256::ZERO, - EvmStorageSlot { present_value: U256::from(2), ..Default::default() }, - ), - ( - U256::from(2), - EvmStorageSlot { present_value: U256::from(4), ..Default::default() }, - ), - ( - U256::from(6), - EvmStorageSlot { present_value: U256::from(6), ..Default::default() }, - ), - ]), - }, - )])); - state.merge_transitions(BundleRetention::Reverts); - - // Block #5: Destroy account again. - state.commit(HashMap::from([( - address1, - RevmAccount { - status: AccountStatus::Touched | AccountStatus::SelfDestructed, - info: account_info.clone(), - storage: HashMap::default(), - }, - )])); - state.merge_transitions(BundleRetention::Reverts); - - // Block #6: Create, change, destroy and re-create in the same block. - state.commit(HashMap::from([( - address1, - RevmAccount { - status: AccountStatus::Touched | AccountStatus::Created, - info: account_info.clone(), - storage: HashMap::default(), - }, - )])); - state.commit(HashMap::from([( - address1, - RevmAccount { - status: AccountStatus::Touched, - info: account_info.clone(), - // 0x00 => 0 => 2 - storage: HashMap::from([( - U256::ZERO, - EvmStorageSlot { present_value: U256::from(2), ..Default::default() }, - )]), - }, - )])); - state.commit(HashMap::from([( - address1, - RevmAccount { - status: AccountStatus::Touched | AccountStatus::SelfDestructed, - info: account_info.clone(), - storage: HashMap::default(), - }, - )])); - state.commit(HashMap::from([( - address1, - RevmAccount { - status: AccountStatus::Touched | AccountStatus::Created, - info: account_info.clone(), - storage: HashMap::default(), - }, - )])); - state.merge_transitions(BundleRetention::Reverts); - - // Block #7: Change storage. - state.commit(HashMap::from([( - address1, - RevmAccount { - status: AccountStatus::Touched, - info: account_info, - // 0x00 => 0 => 9 - storage: HashMap::from([( - U256::ZERO, - EvmStorageSlot { present_value: U256::from(9), ..Default::default() }, - )]), - }, - )])); - state.merge_transitions(BundleRetention::Reverts); - - let bundle = state.take_bundle(); - - ExecutionOutcome::new(bundle, Receipts::default(), 1, Vec::new()) - .write_to_storage(&provider, None, OriginalValuesKnown::Yes) - .expect("Could not write bundle state to DB"); - - let mut storage_changeset_cursor = provider - .tx_ref() - .cursor_dup_read::() - .expect("Could not open plain storage state cursor"); - let mut storage_changes = storage_changeset_cursor.walk_range(..).unwrap(); - - // Iterate through all storage changes - - // Block - // : - // ... - - // Block #0 - // 0x00: 0 - // 0x01: 0 - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((0, address1)), - StorageEntry { key: B256::with_last_byte(0), value: U256::ZERO } - ))) - ); - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((0, address1)), - StorageEntry { key: B256::with_last_byte(1), value: U256::ZERO } - ))) - ); - - // Block #1 - // 0x00: 1 - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((1, address1)), - StorageEntry { key: B256::with_last_byte(0), value: U256::from(1) } - ))) - ); - - // Block #2 (destroyed) - // 0x00: 2 - // 0x01: 2 - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((2, address1)), - StorageEntry { key: B256::with_last_byte(0), value: U256::from(2) } - ))) - ); - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((2, address1)), - StorageEntry { key: B256::with_last_byte(1), value: U256::from(2) } - ))) - ); - - // Block #3 - // no storage changes - - // Block #4 - // 0x00: 0 - // 0x02: 0 - // 0x06: 0 - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((4, address1)), - StorageEntry { key: B256::with_last_byte(0), value: U256::ZERO } - ))) - ); - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((4, address1)), - StorageEntry { key: B256::with_last_byte(2), value: U256::ZERO } - ))) - ); - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((4, address1)), - StorageEntry { key: B256::with_last_byte(6), value: U256::ZERO } - ))) - ); - - // Block #5 (destroyed) - // 0x00: 2 - // 0x02: 4 - // 0x06: 6 - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((5, address1)), - StorageEntry { key: B256::with_last_byte(0), value: U256::from(2) } - ))) - ); - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((5, address1)), - StorageEntry { key: B256::with_last_byte(2), value: U256::from(4) } - ))) - ); - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((5, address1)), - StorageEntry { key: B256::with_last_byte(6), value: U256::from(6) } - ))) - ); - - // Block #6 - // no storage changes (only inter block changes) - - // Block #7 - // 0x00: 0 - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((7, address1)), - StorageEntry { key: B256::with_last_byte(0), value: U256::ZERO } - ))) - ); - assert_eq!(storage_changes.next(), None); - } - - #[test] - fn storage_change_after_selfdestruct_within_block() { - let factory = create_test_provider_factory(); - let provider = factory.provider_rw().unwrap(); - - let address1 = Address::random(); - let account1 = RevmAccountInfo { nonce: 1, ..Default::default() }; - - // Block #0: initial state. - let mut init_state = State::builder().with_bundle_update().build(); - init_state.insert_not_existing(address1); - init_state.commit(HashMap::from([( - address1, - RevmAccount { - info: account1.clone(), - status: AccountStatus::Touched | AccountStatus::Created, - // 0x00 => 0 => 1 - // 0x01 => 0 => 2 - storage: HashMap::from([ - ( - U256::ZERO, - EvmStorageSlot { present_value: U256::from(1), ..Default::default() }, - ), - ( - U256::from(1), - EvmStorageSlot { present_value: U256::from(2), ..Default::default() }, - ), - ]), - }, - )])); - init_state.merge_transitions(BundleRetention::Reverts); - ExecutionOutcome::new(init_state.take_bundle(), Receipts::default(), 0, Vec::new()) - .write_to_storage(&provider, None, OriginalValuesKnown::Yes) - .expect("Could not write init bundle state to DB"); - - let mut state = State::builder().with_bundle_update().build(); - state.insert_account_with_storage( - address1, - account1.clone(), - HashMap::from([(U256::ZERO, U256::from(1)), (U256::from(1), U256::from(2))]), - ); - - // Block #1: Destroy, re-create, change storage. - state.commit(HashMap::from([( - address1, - RevmAccount { - status: AccountStatus::Touched | AccountStatus::SelfDestructed, - info: account1.clone(), - storage: HashMap::default(), - }, - )])); - - state.commit(HashMap::from([( - address1, - RevmAccount { - status: AccountStatus::Touched | AccountStatus::Created, - info: account1.clone(), - storage: HashMap::default(), - }, - )])); - - state.commit(HashMap::from([( - address1, - RevmAccount { - status: AccountStatus::Touched, - info: account1, - // 0x01 => 0 => 5 - storage: HashMap::from([( - U256::from(1), - EvmStorageSlot { present_value: U256::from(5), ..Default::default() }, - )]), - }, - )])); - - // Commit block #1 changes to the database. - state.merge_transitions(BundleRetention::Reverts); - ExecutionOutcome::new(state.take_bundle(), Receipts::default(), 1, Vec::new()) - .write_to_storage(&provider, None, OriginalValuesKnown::Yes) - .expect("Could not write bundle state to DB"); - - let mut storage_changeset_cursor = provider - .tx_ref() - .cursor_dup_read::() - .expect("Could not open plain storage state cursor"); - let range = BlockNumberAddress::range(1..=1); - let mut storage_changes = storage_changeset_cursor.walk_range(range).unwrap(); - - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((1, address1)), - StorageEntry { key: B256::with_last_byte(0), value: U256::from(1) } - ))) - ); - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((1, address1)), - StorageEntry { key: B256::with_last_byte(1), value: U256::from(2) } - ))) - ); - assert_eq!(storage_changes.next(), None); - } - - #[test] - fn revert_to_indices() { - let base = ExecutionOutcome { - bundle: BundleState::default(), - receipts: vec![vec![Some(Receipt::default()); 2]; 7].into(), - first_block: 10, - requests: Vec::new(), - }; - - let mut this = base.clone(); - assert!(this.revert_to(10)); - assert_eq!(this.receipts.len(), 1); - - let mut this = base.clone(); - assert!(!this.revert_to(9)); - assert_eq!(this.receipts.len(), 7); - - let mut this = base.clone(); - assert!(this.revert_to(15)); - assert_eq!(this.receipts.len(), 6); - - let mut this = base.clone(); - assert!(this.revert_to(16)); - assert_eq!(this.receipts.len(), 7); - - let mut this = base; - assert!(!this.revert_to(17)); - assert_eq!(this.receipts.len(), 7); - } - - #[test] - fn bundle_state_state_root() { - type PreState = BTreeMap)>; - let mut prestate: PreState = (0..10) - .map(|key| { - let account = Account { nonce: 1, balance: U256::from(key), bytecode_hash: None }; - let storage = - (1..11).map(|key| (B256::with_last_byte(key), U256::from(key))).collect(); - (Address::with_last_byte(key), (account, storage)) - }) - .collect(); - - let db = create_test_rw_db(); - - // insert initial state to the database - db.update(|tx| { - for (address, (account, storage)) in &prestate { - let hashed_address = keccak256(address); - tx.put::(hashed_address, *account).unwrap(); - for (slot, value) in storage { - tx.put::( - hashed_address, - StorageEntry { key: keccak256(slot), value: *value }, - ) - .unwrap(); - } - } - - let (_, updates) = StateRoot::from_tx(tx).root_with_updates().unwrap(); - updates.write_to_database(tx).unwrap(); - }) - .unwrap(); - - let tx = db.tx().unwrap(); - let mut state = State::builder().with_bundle_update().build(); - - let assert_state_root = |state: &State, expected: &PreState, msg| { - assert_eq!( - ExecutionOutcome::new( - state.bundle_state.clone(), - Receipts::default(), - 0, - Vec::new() - ) - .hash_state_slow() - .state_root(&tx) - .unwrap(), - state_root(expected.clone().into_iter().map(|(address, (account, storage))| ( - address, - (account, storage.into_iter()) - ))), - "{msg}" - ); - }; - - // database only state root is correct - assert_state_root(&state, &prestate, "empty"); - - // destroy account 1 - let address1 = Address::with_last_byte(1); - let account1_old = prestate.remove(&address1).unwrap(); - state.insert_account(address1, account1_old.0.into()); - state.commit(HashMap::from([( - address1, - RevmAccount { - status: AccountStatus::Touched | AccountStatus::SelfDestructed, - info: RevmAccountInfo::default(), - storage: HashMap::default(), - }, - )])); - state.merge_transitions(BundleRetention::PlainState); - assert_state_root(&state, &prestate, "destroyed account"); - - // change slot 2 in account 2 - let address2 = Address::with_last_byte(2); - let slot2 = U256::from(2); - let slot2_key = B256::from(slot2); - let account2 = prestate.get_mut(&address2).unwrap(); - let account2_slot2_old_value = *account2.1.get(&slot2_key).unwrap(); - state.insert_account_with_storage( - address2, - account2.0.into(), - HashMap::from([(slot2, account2_slot2_old_value)]), - ); - - let account2_slot2_new_value = U256::from(100); - account2.1.insert(slot2_key, account2_slot2_new_value); - state.commit(HashMap::from([( - address2, - RevmAccount { - status: AccountStatus::Touched, - info: account2.0.into(), - storage: HashMap::from_iter([( - slot2, - EvmStorageSlot::new_changed(account2_slot2_old_value, account2_slot2_new_value), - )]), - }, - )])); - state.merge_transitions(BundleRetention::PlainState); - assert_state_root(&state, &prestate, "changed storage"); - - // change balance of account 3 - let address3 = Address::with_last_byte(3); - let account3 = prestate.get_mut(&address3).unwrap(); - state.insert_account(address3, account3.0.into()); - - account3.0.balance = U256::from(24); - state.commit(HashMap::from([( - address3, - RevmAccount { - status: AccountStatus::Touched, - info: account3.0.into(), - storage: HashMap::default(), - }, - )])); - state.merge_transitions(BundleRetention::PlainState); - assert_state_root(&state, &prestate, "changed balance"); - - // change nonce of account 4 - let address4 = Address::with_last_byte(4); - let account4 = prestate.get_mut(&address4).unwrap(); - state.insert_account(address4, account4.0.into()); - - account4.0.nonce = 128; - state.commit(HashMap::from([( - address4, - RevmAccount { - status: AccountStatus::Touched, - info: account4.0.into(), - storage: HashMap::default(), - }, - )])); - state.merge_transitions(BundleRetention::PlainState); - assert_state_root(&state, &prestate, "changed nonce"); - - // recreate account 1 - let account1_new = - Account { nonce: 56, balance: U256::from(123), bytecode_hash: Some(B256::random()) }; - prestate.insert(address1, (account1_new, BTreeMap::default())); - state.commit(HashMap::from([( - address1, - RevmAccount { - status: AccountStatus::Touched | AccountStatus::Created, - info: account1_new.into(), - storage: HashMap::default(), - }, - )])); - state.merge_transitions(BundleRetention::PlainState); - assert_state_root(&state, &prestate, "recreated"); - - // update storage for account 1 - let slot20 = U256::from(20); - let slot20_key = B256::from(slot20); - let account1_slot20_value = U256::from(12345); - prestate.get_mut(&address1).unwrap().1.insert(slot20_key, account1_slot20_value); - state.commit(HashMap::from([( - address1, - RevmAccount { - status: AccountStatus::Touched | AccountStatus::Created, - info: account1_new.into(), - storage: HashMap::from_iter([( - slot20, - EvmStorageSlot::new_changed(U256::ZERO, account1_slot20_value), - )]), - }, - )])); - state.merge_transitions(BundleRetention::PlainState); - assert_state_root(&state, &prestate, "recreated changed storage"); - } - - #[test] - fn prepend_state() { - let address1 = Address::random(); - let address2 = Address::random(); - - let account1 = RevmAccountInfo { nonce: 1, ..Default::default() }; - let account1_changed = RevmAccountInfo { nonce: 1, ..Default::default() }; - let account2 = RevmAccountInfo { nonce: 1, ..Default::default() }; - - let present_state = BundleState::builder(2..=2) - .state_present_account_info(address1, account1_changed.clone()) - .build(); - assert_eq!(present_state.reverts.len(), 1); - let previous_state = BundleState::builder(1..=1) - .state_present_account_info(address1, account1) - .state_present_account_info(address2, account2.clone()) - .build(); - assert_eq!(previous_state.reverts.len(), 1); - - let mut test = ExecutionOutcome { - bundle: present_state, - receipts: vec![vec![Some(Receipt::default()); 2]; 1].into(), - first_block: 2, - requests: Vec::new(), - }; - - test.prepend_state(previous_state); - - assert_eq!(test.receipts.len(), 1); - let end_state = test.state(); - assert_eq!(end_state.state.len(), 2); - // reverts num should stay the same. - assert_eq!(end_state.reverts.len(), 1); - // account1 is not overwritten. - assert_eq!(end_state.state.get(&address1).unwrap().info, Some(account1_changed)); - // account2 got inserted - assert_eq!(end_state.state.get(&address2).unwrap().info, Some(account2)); - } -} diff --git a/crates/storage/provider/src/bundle_state/mod.rs b/crates/storage/provider/src/bundle_state/mod.rs index 3dad9389f67d..58b76f1eacf7 100644 --- a/crates/storage/provider/src/bundle_state/mod.rs +++ b/crates/storage/provider/src/bundle_state/mod.rs @@ -1,10 +1,5 @@ //! Bundle state module. //! This module contains all the logic related to bundle state. -mod execution_outcome; -mod state_changes; mod state_reverts; - -pub use execution_outcome::{AccountRevertInit, BundleStateInit, OriginalValuesKnown, RevertsInit}; -pub use state_changes::StateChanges; -pub use state_reverts::{StateReverts, StorageRevertsIter}; +pub use state_reverts::StorageRevertsIter; diff --git a/crates/storage/provider/src/bundle_state/state_changes.rs b/crates/storage/provider/src/bundle_state/state_changes.rs deleted file mode 100644 index ba9acfcccfa6..000000000000 --- a/crates/storage/provider/src/bundle_state/state_changes.rs +++ /dev/null @@ -1,88 +0,0 @@ -use crate::DatabaseProviderRW; -use rayon::slice::ParallelSliceMut; -use reth_db::{tables, Database}; -use reth_db_api::{ - cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW}, - transaction::DbTxMut, -}; -use reth_primitives::{Bytecode, StorageEntry, U256}; -use reth_storage_errors::db::DatabaseError; -use revm::db::states::{PlainStorageChangeset, StateChangeset}; - -/// A change to the state of the world. -#[derive(Debug, Default)] -pub struct StateChanges(pub StateChangeset); - -impl From for StateChanges { - fn from(revm: StateChangeset) -> Self { - Self(revm) - } -} - -impl StateChanges { - /// Write the bundle state to the database. - pub fn write_to_db(mut self, provider: &DatabaseProviderRW) -> Result<(), DatabaseError> - where - DB: Database, - { - // sort all entries so they can be written to database in more performant way. - // and take smaller memory footprint. - self.0.accounts.par_sort_by_key(|a| a.0); - self.0.storage.par_sort_by_key(|a| a.address); - self.0.contracts.par_sort_by_key(|a| a.0); - - // Write new account state - tracing::trace!(target: "provider::bundle_state", len = self.0.accounts.len(), "Writing new account state"); - let mut accounts_cursor = provider.tx_ref().cursor_write::()?; - // write account to database. - for (address, account) in self.0.accounts { - if let Some(account) = account { - tracing::trace!(target: "provider::bundle_state", ?address, "Updating plain state account"); - accounts_cursor.upsert(address, account.into())?; - } else if accounts_cursor.seek_exact(address)?.is_some() { - tracing::trace!(target: "provider::bundle_state", ?address, "Deleting plain state account"); - accounts_cursor.delete_current()?; - } - } - - // Write bytecode - tracing::trace!(target: "provider::bundle_state", len = self.0.contracts.len(), "Writing bytecodes"); - let mut bytecodes_cursor = provider.tx_ref().cursor_write::()?; - for (hash, bytecode) in self.0.contracts { - bytecodes_cursor.upsert(hash, Bytecode(bytecode))?; - } - - // Write new storage state and wipe storage if needed. - tracing::trace!(target: "provider::bundle_state", len = self.0.storage.len(), "Writing new storage state"); - let mut storages_cursor = - provider.tx_ref().cursor_dup_write::()?; - for PlainStorageChangeset { address, wipe_storage, storage } in self.0.storage { - // Wiping of storage. - if wipe_storage && storages_cursor.seek_exact(address)?.is_some() { - storages_cursor.delete_current_duplicates()?; - } - // cast storages to B256. - let mut storage = storage - .into_iter() - .map(|(k, value)| StorageEntry { key: k.into(), value }) - .collect::>(); - // sort storage slots by key. - storage.par_sort_unstable_by_key(|a| a.key); - - for entry in storage { - tracing::trace!(target: "provider::bundle_state", ?address, ?entry.key, "Updating plain state storage"); - if let Some(db_entry) = storages_cursor.seek_by_key_subkey(address, entry.key)? { - if db_entry.key == entry.key { - storages_cursor.delete_current()?; - } - } - - if entry.value != U256::ZERO { - storages_cursor.upsert(address, entry)?; - } - } - } - - Ok(()) - } -} diff --git a/crates/storage/provider/src/bundle_state/state_reverts.rs b/crates/storage/provider/src/bundle_state/state_reverts.rs index b5bb77bc13a0..37d44cde51de 100644 --- a/crates/storage/provider/src/bundle_state/state_reverts.rs +++ b/crates/storage/provider/src/bundle_state/state_reverts.rs @@ -1,103 +1,7 @@ -use crate::DatabaseProviderRW; -use rayon::slice::ParallelSliceMut; -use reth_db::{tables, Database}; -use reth_db_api::{ - cursor::{DbCursorRO, DbDupCursorRO, DbDupCursorRW}, - models::{AccountBeforeTx, BlockNumberAddress}, - transaction::DbTxMut, -}; -use reth_primitives::{BlockNumber, StorageEntry, B256, U256}; -use reth_storage_errors::db::DatabaseError; -use revm::db::states::{PlainStateReverts, PlainStorageRevert, RevertToSlot}; +use reth_primitives::{B256, U256}; +use revm::db::states::RevertToSlot; use std::iter::Peekable; -/// Revert of the state. -#[derive(Debug, Default)] -pub struct StateReverts(pub PlainStateReverts); - -impl From for StateReverts { - fn from(revm: PlainStateReverts) -> Self { - Self(revm) - } -} - -impl StateReverts { - /// Write reverts to database. - /// - /// `Note::` Reverts will delete all wiped storage from plain state. - pub fn write_to_db( - self, - provider: &DatabaseProviderRW, - first_block: BlockNumber, - ) -> Result<(), DatabaseError> - where - DB: Database, - { - // Write storage changes - tracing::trace!(target: "provider::reverts", "Writing storage changes"); - let mut storages_cursor = - provider.tx_ref().cursor_dup_write::()?; - let mut storage_changeset_cursor = - provider.tx_ref().cursor_dup_write::()?; - for (block_index, mut storage_changes) in self.0.storage.into_iter().enumerate() { - let block_number = first_block + block_index as BlockNumber; - - tracing::trace!(target: "provider::reverts", block_number, "Writing block change"); - // sort changes by address. - storage_changes.par_sort_unstable_by_key(|a| a.address); - for PlainStorageRevert { address, wiped, storage_revert } in storage_changes { - let storage_id = BlockNumberAddress((block_number, address)); - - let mut storage = storage_revert - .into_iter() - .map(|(k, v)| (B256::new(k.to_be_bytes()), v)) - .collect::>(); - // sort storage slots by key. - storage.par_sort_unstable_by_key(|a| a.0); - - // If we are writing the primary storage wipe transition, the pre-existing plain - // storage state has to be taken from the database and written to storage history. - // See [StorageWipe::Primary] for more details. - let mut wiped_storage = Vec::new(); - if wiped { - tracing::trace!(target: "provider::reverts", ?address, "Wiping storage"); - if let Some((_, entry)) = storages_cursor.seek_exact(address)? { - wiped_storage.push((entry.key, entry.value)); - while let Some(entry) = storages_cursor.next_dup_val()? { - wiped_storage.push((entry.key, entry.value)) - } - } - } - - tracing::trace!(target: "provider::reverts", ?address, ?storage, "Writing storage reverts"); - for (key, value) in StorageRevertsIter::new(storage, wiped_storage) { - storage_changeset_cursor.append_dup(storage_id, StorageEntry { key, value })?; - } - } - } - - // Write account changes - tracing::trace!(target: "provider::reverts", "Writing account changes"); - let mut account_changeset_cursor = - provider.tx_ref().cursor_dup_write::()?; - - for (block_index, mut account_block_reverts) in self.0.accounts.into_iter().enumerate() { - let block_number = first_block + block_index as BlockNumber; - // Sort accounts by address. - account_block_reverts.par_sort_by_key(|a| a.0); - - for (address, info) in account_block_reverts { - account_changeset_cursor.append_dup( - block_number, - AccountBeforeTx { address, info: info.map(Into::into) }, - )?; - } - } - - Ok(()) - } -} - /// Iterator over storage reverts. /// See [`StorageRevertsIter::next`] for more details. #[allow(missing_debug_implementations)] diff --git a/crates/storage/provider/src/lib.rs b/crates/storage/provider/src/lib.rs index a578fa09d562..894a41620c52 100644 --- a/crates/storage/provider/src/lib.rs +++ b/crates/storage/provider/src/lib.rs @@ -34,11 +34,18 @@ pub use reth_storage_errors::provider::{ProviderError, ProviderResult}; pub use reth_execution_types::*; pub mod bundle_state; -pub use bundle_state::{OriginalValuesKnown, StateChanges, StateReverts}; + +/// Re-export `OriginalValuesKnown` +pub use revm::db::states::OriginalValuesKnown; /// Writer standalone type. pub mod writer; +pub use reth_chain_state::{ + CanonStateNotification, CanonStateNotificationSender, CanonStateNotificationStream, + CanonStateNotifications, CanonStateSubscriptions, +}; + pub(crate) fn to_range>(bounds: R) -> std::ops::Range { let start = match bounds.start_bound() { std::ops::Bound::Included(&v) => v, diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs new file mode 100644 index 000000000000..952cdff4bc54 --- /dev/null +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -0,0 +1,826 @@ +use crate::{ + providers::{BundleStateProvider, StaticFileProvider}, + AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, + BlockSource, BlockchainTreePendingStateProvider, CanonChainTracker, CanonStateNotifications, + CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, DatabaseProviderFactory, + DatabaseProviderRO, EvmEnvProvider, FullExecutionDataProvider, HeaderProvider, ProviderError, + ProviderFactory, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, + RequestsProvider, StageCheckpointReader, StateProviderBox, StateProviderFactory, + StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, +}; +use alloy_rpc_types_engine::ForkchoiceState; +use reth_chain_state::CanonicalInMemoryState; +use reth_chainspec::{ChainInfo, ChainSpec}; +use reth_db_api::{ + database::Database, + models::{AccountBeforeTx, StoredBlockBodyIndices}, +}; +use reth_evm::ConfigureEvmEnv; +use reth_primitives::{ + Account, Address, Block, BlockHash, BlockHashOrNumber, BlockId, BlockNumHash, BlockNumber, + BlockNumberOrTag, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, + SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, + Withdrawal, Withdrawals, B256, U256, +}; +use reth_prune_types::{PruneCheckpoint, PruneSegment}; +use reth_stages_types::{StageCheckpoint, StageId}; +use reth_storage_errors::provider::ProviderResult; +use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; +use std::{ + ops::{RangeBounds, RangeInclusive}, + sync::Arc, + time::Instant, +}; +use tracing::trace; + +/// The main type for interacting with the blockchain. +/// +/// This type serves as the main entry point for interacting with the blockchain and provides data +/// from database storage and from the blockchain tree (pending state etc.) It is a simple wrapper +/// type that holds an instance of the database and the blockchain tree. +#[allow(missing_debug_implementations)] +pub struct BlockchainProvider2 { + /// Provider type used to access the database. + database: ProviderFactory, + /// Tracks the chain info wrt forkchoice updates and in memory canonical + /// state. + canonical_in_memory_state: CanonicalInMemoryState, +} + +impl Clone for BlockchainProvider2 { + fn clone(&self) -> Self { + Self { + database: self.database.clone(), + canonical_in_memory_state: self.canonical_in_memory_state.clone(), + } + } +} + +impl BlockchainProvider2 { + /// Create new provider instance that wraps the database and the blockchain tree, using the + /// provided latest header to initialize the chain info tracker. + pub fn with_latest(database: ProviderFactory, latest: SealedHeader) -> Self { + Self { database, canonical_in_memory_state: CanonicalInMemoryState::with_head(latest) } + } +} + +impl BlockchainProvider2 +where + DB: Database, +{ + /// Create a new provider using only the database, fetching the latest header from + /// the database to initialize the provider. + pub fn new(database: ProviderFactory) -> ProviderResult { + let provider = database.provider()?; + let best: ChainInfo = provider.chain_info()?; + match provider.header_by_number(best.best_number)? { + Some(header) => { + drop(provider); + Ok(Self::with_latest(database, header.seal(best.best_hash))) + } + None => Err(ProviderError::HeaderNotFound(best.best_number.into())), + } + } + + /// Gets a clone of `canonical_in_memory_state`. + pub fn canonical_in_memory_state(&self) -> CanonicalInMemoryState { + self.canonical_in_memory_state.clone() + } +} + +impl BlockchainProvider2 +where + DB: Database, +{ + /// Ensures that the given block number is canonical (synced) + /// + /// This is a helper for guarding the `HistoricalStateProvider` against block numbers that are + /// out of range and would lead to invalid results, mainly during initial sync. + /// + /// Verifying the `block_number` would be expensive since we need to lookup sync table + /// Instead, we ensure that the `block_number` is within the range of the + /// [`Self::best_block_number`] which is updated when a block is synced. + #[inline] + fn ensure_canonical_block(&self, block_number: BlockNumber) -> ProviderResult<()> { + let latest = self.best_block_number()?; + if block_number > latest { + Err(ProviderError::HeaderNotFound(block_number.into())) + } else { + Ok(()) + } + } +} + +impl DatabaseProviderFactory for BlockchainProvider2 +where + DB: Database, +{ + fn database_provider_ro(&self) -> ProviderResult> { + self.database.provider() + } +} + +impl StaticFileProviderFactory for BlockchainProvider2 { + fn static_file_provider(&self) -> StaticFileProvider { + self.database.static_file_provider() + } +} + +impl HeaderProvider for BlockchainProvider2 +where + DB: Database, +{ + fn header(&self, block_hash: &BlockHash) -> ProviderResult> { + self.database.header(block_hash) + } + + fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { + self.database.header_by_number(num) + } + + fn header_td(&self, hash: &BlockHash) -> ProviderResult> { + self.database.header_td(hash) + } + + fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult> { + self.database.header_td_by_number(number) + } + + fn headers_range(&self, range: impl RangeBounds) -> ProviderResult> { + self.database.headers_range(range) + } + + fn sealed_header(&self, number: BlockNumber) -> ProviderResult> { + self.database.sealed_header(number) + } + + fn sealed_headers_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { + self.database.sealed_headers_range(range) + } + + fn sealed_headers_while( + &self, + range: impl RangeBounds, + predicate: impl FnMut(&SealedHeader) -> bool, + ) -> ProviderResult> { + self.database.sealed_headers_while(range, predicate) + } +} + +impl BlockHashReader for BlockchainProvider2 +where + DB: Database, +{ + fn block_hash(&self, number: u64) -> ProviderResult> { + self.database.block_hash(number) + } + + fn canonical_hashes_range( + &self, + start: BlockNumber, + end: BlockNumber, + ) -> ProviderResult> { + self.database.canonical_hashes_range(start, end) + } +} + +impl BlockNumReader for BlockchainProvider2 +where + DB: Database, +{ + fn chain_info(&self) -> ProviderResult { + Ok(self.canonical_in_memory_state.chain_info()) + } + + fn best_block_number(&self) -> ProviderResult { + Ok(self.canonical_in_memory_state.get_canonical_block_number()) + } + + fn last_block_number(&self) -> ProviderResult { + self.database.last_block_number() + } + + fn block_number(&self, hash: B256) -> ProviderResult> { + self.database.block_number(hash) + } +} + +impl BlockIdReader for BlockchainProvider2 +where + DB: Database, +{ + fn pending_block_num_hash(&self) -> ProviderResult> { + Ok(self.canonical_in_memory_state.pending_block_num_hash()) + } + + fn safe_block_num_hash(&self) -> ProviderResult> { + Ok(self.canonical_in_memory_state.get_safe_num_hash()) + } + + fn finalized_block_num_hash(&self) -> ProviderResult> { + Ok(self.canonical_in_memory_state.get_finalized_num_hash()) + } +} + +impl BlockReader for BlockchainProvider2 +where + DB: Database, +{ + fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { + let block = match source { + BlockSource::Any | BlockSource::Canonical => { + // check in memory first + // Note: it's fine to return the unsealed block because the caller already has + // the hash + let mut block = self + .canonical_in_memory_state + .state_by_hash(hash) + .map(|block_state| block_state.block().block().clone().unseal()); + + if block.is_none() { + block = self.database.block_by_hash(hash)?; + } + block + } + BlockSource::Pending => { + self.canonical_in_memory_state.pending_block().map(|block| block.unseal()) + } + }; + + Ok(block) + } + + fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { + match id { + BlockHashOrNumber::Hash(hash) => self.find_block_by_hash(hash, BlockSource::Any), + BlockHashOrNumber::Number(num) => self.database.block_by_number(num), + } + } + + fn pending_block(&self) -> ProviderResult> { + Ok(self.canonical_in_memory_state.pending_block()) + } + + fn pending_block_with_senders(&self) -> ProviderResult> { + Ok(self.canonical_in_memory_state.pending_block_with_senders()) + } + + fn pending_block_and_receipts(&self) -> ProviderResult)>> { + Ok(self.canonical_in_memory_state.pending_block_and_receipts()) + } + + fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { + self.database.ommers(id) + } + + fn block_body_indices( + &self, + number: BlockNumber, + ) -> ProviderResult> { + self.database.block_body_indices(number) + } + + /// Returns the block with senders with matching number or hash from database. + /// + /// **NOTE: If [`TransactionVariant::NoHash`] is provided then the transactions have invalid + /// hashes, since they would need to be calculated on the spot, and we want fast querying.** + /// + /// Returns `None` if block is not found. + fn block_with_senders( + &self, + id: BlockHashOrNumber, + transaction_kind: TransactionVariant, + ) -> ProviderResult> { + self.database.block_with_senders(id, transaction_kind) + } + + fn sealed_block_with_senders( + &self, + id: BlockHashOrNumber, + transaction_kind: TransactionVariant, + ) -> ProviderResult> { + self.database.sealed_block_with_senders(id, transaction_kind) + } + + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { + self.database.block_range(range) + } + + fn block_with_senders_range( + &self, + range: RangeInclusive, + ) -> ProviderResult> { + self.database.block_with_senders_range(range) + } + + fn sealed_block_with_senders_range( + &self, + range: RangeInclusive, + ) -> ProviderResult> { + self.database.sealed_block_with_senders_range(range) + } +} + +impl TransactionsProvider for BlockchainProvider2 +where + DB: Database, +{ + fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { + self.database.transaction_id(tx_hash) + } + + fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { + self.database.transaction_by_id(id) + } + + fn transaction_by_id_no_hash( + &self, + id: TxNumber, + ) -> ProviderResult> { + self.database.transaction_by_id_no_hash(id) + } + + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { + self.database.transaction_by_hash(hash) + } + + fn transaction_by_hash_with_meta( + &self, + tx_hash: TxHash, + ) -> ProviderResult> { + self.database.transaction_by_hash_with_meta(tx_hash) + } + + fn transaction_block(&self, id: TxNumber) -> ProviderResult> { + self.database.transaction_block(id) + } + + fn transactions_by_block( + &self, + id: BlockHashOrNumber, + ) -> ProviderResult>> { + self.database.transactions_by_block(id) + } + + fn transactions_by_block_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult>> { + self.database.transactions_by_block_range(range) + } + + fn transactions_by_tx_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { + self.database.transactions_by_tx_range(range) + } + + fn senders_by_tx_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { + self.database.senders_by_tx_range(range) + } + + fn transaction_sender(&self, id: TxNumber) -> ProviderResult> { + self.database.transaction_sender(id) + } +} + +impl ReceiptProvider for BlockchainProvider2 +where + DB: Database, +{ + fn receipt(&self, id: TxNumber) -> ProviderResult> { + self.database.receipt(id) + } + + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { + self.database.receipt_by_hash(hash) + } + + fn receipts_by_block(&self, block: BlockHashOrNumber) -> ProviderResult>> { + self.database.receipts_by_block(block) + } + + fn receipts_by_tx_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { + self.database.receipts_by_tx_range(range) + } +} + +impl ReceiptProviderIdExt for BlockchainProvider2 +where + DB: Database, +{ + fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { + match block { + BlockId::Hash(rpc_block_hash) => { + let mut receipts = self.receipts_by_block(rpc_block_hash.block_hash.into())?; + if receipts.is_none() && !rpc_block_hash.require_canonical.unwrap_or(false) { + let block_state = self + .canonical_in_memory_state + .state_by_hash(rpc_block_hash.block_hash) + .ok_or(ProviderError::StateForHashNotFound(rpc_block_hash.block_hash))?; + receipts = Some(block_state.executed_block_receipts()); + } + Ok(receipts) + } + BlockId::Number(num_tag) => match num_tag { + BlockNumberOrTag::Pending => Ok(self + .canonical_in_memory_state + .pending_state() + .map(|block_state| block_state.executed_block_receipts())), + _ => { + if let Some(num) = self.convert_block_number(num_tag)? { + self.receipts_by_block(num.into()) + } else { + Ok(None) + } + } + }, + } + } +} + +impl WithdrawalsProvider for BlockchainProvider2 +where + DB: Database, +{ + fn withdrawals_by_block( + &self, + id: BlockHashOrNumber, + timestamp: u64, + ) -> ProviderResult> { + self.database.withdrawals_by_block(id, timestamp) + } + + fn latest_withdrawal(&self) -> ProviderResult> { + self.database.latest_withdrawal() + } +} + +impl RequestsProvider for BlockchainProvider2 +where + DB: Database, +{ + fn requests_by_block( + &self, + id: BlockHashOrNumber, + timestamp: u64, + ) -> ProviderResult> { + self.database.requests_by_block(id, timestamp) + } +} + +impl StageCheckpointReader for BlockchainProvider2 +where + DB: Database, +{ + fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult> { + self.database.provider()?.get_stage_checkpoint(id) + } + + fn get_stage_checkpoint_progress(&self, id: StageId) -> ProviderResult>> { + self.database.provider()?.get_stage_checkpoint_progress(id) + } + + fn get_all_checkpoints(&self) -> ProviderResult> { + self.database.provider()?.get_all_checkpoints() + } +} + +impl EvmEnvProvider for BlockchainProvider2 +where + DB: Database, +{ + fn fill_env_at( + &self, + cfg: &mut CfgEnvWithHandlerCfg, + block_env: &mut BlockEnv, + at: BlockHashOrNumber, + evm_config: EvmConfig, + ) -> ProviderResult<()> + where + EvmConfig: ConfigureEvmEnv, + { + self.database.provider()?.fill_env_at(cfg, block_env, at, evm_config) + } + + fn fill_env_with_header( + &self, + cfg: &mut CfgEnvWithHandlerCfg, + block_env: &mut BlockEnv, + header: &Header, + evm_config: EvmConfig, + ) -> ProviderResult<()> + where + EvmConfig: ConfigureEvmEnv, + { + self.database.provider()?.fill_env_with_header(cfg, block_env, header, evm_config) + } + + fn fill_cfg_env_at( + &self, + cfg: &mut CfgEnvWithHandlerCfg, + at: BlockHashOrNumber, + evm_config: EvmConfig, + ) -> ProviderResult<()> + where + EvmConfig: ConfigureEvmEnv, + { + self.database.provider()?.fill_cfg_env_at(cfg, at, evm_config) + } + + fn fill_cfg_env_with_header( + &self, + cfg: &mut CfgEnvWithHandlerCfg, + header: &Header, + evm_config: EvmConfig, + ) -> ProviderResult<()> + where + EvmConfig: ConfigureEvmEnv, + { + self.database.provider()?.fill_cfg_env_with_header(cfg, header, evm_config) + } +} + +impl PruneCheckpointReader for BlockchainProvider2 +where + DB: Database, +{ + fn get_prune_checkpoint( + &self, + segment: PruneSegment, + ) -> ProviderResult> { + self.database.provider()?.get_prune_checkpoint(segment) + } + + fn get_prune_checkpoints(&self) -> ProviderResult> { + self.database.provider()?.get_prune_checkpoints() + } +} + +impl ChainSpecProvider for BlockchainProvider2 +where + DB: Send + Sync, +{ + fn chain_spec(&self) -> Arc { + self.database.chain_spec() + } +} + +impl StateProviderFactory for BlockchainProvider2 +where + DB: Database, +{ + /// Storage provider for latest block + fn latest(&self) -> ProviderResult { + trace!(target: "providers::blockchain", "Getting latest block state provider"); + self.database.latest() + } + + fn history_by_block_number( + &self, + block_number: BlockNumber, + ) -> ProviderResult { + trace!(target: "providers::blockchain", ?block_number, "Getting history by block number"); + self.ensure_canonical_block(block_number)?; + self.database.history_by_block_number(block_number) + } + + fn history_by_block_hash(&self, block_hash: BlockHash) -> ProviderResult { + trace!(target: "providers::blockchain", ?block_hash, "Getting history by block hash"); + self.database.history_by_block_hash(block_hash) + } + + fn state_by_block_hash(&self, block: BlockHash) -> ProviderResult { + trace!(target: "providers::blockchain", ?block, "Getting state by block hash"); + let mut state = self.history_by_block_hash(block); + + // we failed to get the state by hash, from disk, hash block be the pending block + if state.is_err() { + if let Ok(Some(pending)) = self.pending_state_by_hash(block) { + // we found pending block by hash + state = Ok(pending) + } + } + + state + } + + /// Returns the state provider for pending state. + /// + /// If there's no pending block available then the latest state provider is returned: + /// [`Self::latest`] + fn pending(&self) -> ProviderResult { + trace!(target: "providers::blockchain", "Getting provider for pending state"); + + if let Some(block) = self.canonical_in_memory_state.pending_block_num_hash() { + let historical = self.database.history_by_block_hash(block.hash)?; + let pending_provider = + self.canonical_in_memory_state.state_provider(block.hash, historical); + + return Ok(Box::new(pending_provider)); + } + + // fallback to latest state if the pending block is not available + self.latest() + } + + fn pending_state_by_hash(&self, block_hash: B256) -> ProviderResult> { + let historical = self.database.history_by_block_hash(block_hash)?; + if let Some(block) = self.canonical_in_memory_state.pending_block_num_hash() { + if block.hash == block_hash { + let pending_provider = + self.canonical_in_memory_state.state_provider(block_hash, historical); + + return Ok(Some(Box::new(pending_provider))) + } + } + Ok(None) + } + + fn pending_with_provider( + &self, + bundle_state_data: Box, + ) -> ProviderResult { + let state_provider = self.pending()?; + + let bundle_state_provider = BundleStateProvider::new(state_provider, bundle_state_data); + Ok(Box::new(bundle_state_provider)) + } +} + +impl CanonChainTracker for BlockchainProvider2 +where + DB: Send + Sync, + Self: BlockReader, +{ + fn on_forkchoice_update_received(&self, _update: &ForkchoiceState) { + // update timestamp + self.canonical_in_memory_state.on_forkchoice_update_received(); + } + + fn last_received_update_timestamp(&self) -> Option { + self.canonical_in_memory_state.last_received_update_timestamp() + } + + fn on_transition_configuration_exchanged(&self) { + self.canonical_in_memory_state.on_transition_configuration_exchanged(); + } + + fn last_exchanged_transition_configuration_timestamp(&self) -> Option { + self.canonical_in_memory_state.last_exchanged_transition_configuration_timestamp() + } + + fn set_canonical_head(&self, header: SealedHeader) { + self.canonical_in_memory_state.set_canonical_head(header); + } + + fn set_safe(&self, header: SealedHeader) { + self.canonical_in_memory_state.set_safe(header); + } + + fn set_finalized(&self, header: SealedHeader) { + self.canonical_in_memory_state.set_finalized(header); + } +} + +impl BlockReaderIdExt for BlockchainProvider2 +where + Self: BlockReader + BlockIdReader + ReceiptProviderIdExt, +{ + fn block_by_id(&self, id: BlockId) -> ProviderResult> { + match id { + BlockId::Number(num) => self.block_by_number_or_tag(num), + BlockId::Hash(hash) => { + // TODO: should we only apply this for the RPCs that are listed in EIP-1898? + // so not at the provider level? + // if we decide to do this at a higher level, then we can make this an automatic + // trait impl + if Some(true) == hash.require_canonical { + // check the database, canonical blocks are only stored in the database + self.find_block_by_hash(hash.block_hash, BlockSource::Canonical) + } else { + self.block_by_hash(hash.block_hash) + } + } + } + } + + fn header_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult> { + Ok(match id { + BlockNumberOrTag::Latest => { + Some(self.canonical_in_memory_state.get_canonical_head().unseal()) + } + BlockNumberOrTag::Finalized => { + self.canonical_in_memory_state.get_finalized_header().map(|h| h.unseal()) + } + BlockNumberOrTag::Safe => { + self.canonical_in_memory_state.get_safe_header().map(|h| h.unseal()) + } + BlockNumberOrTag::Earliest => self.header_by_number(0)?, + BlockNumberOrTag::Pending => self.canonical_in_memory_state.pending_header(), + + BlockNumberOrTag::Number(num) => self.header_by_number(num)?, + }) + } + + fn sealed_header_by_number_or_tag( + &self, + id: BlockNumberOrTag, + ) -> ProviderResult> { + match id { + BlockNumberOrTag::Latest => { + Ok(Some(self.canonical_in_memory_state.get_canonical_head())) + } + BlockNumberOrTag::Finalized => { + Ok(self.canonical_in_memory_state.get_finalized_header()) + } + BlockNumberOrTag::Safe => Ok(self.canonical_in_memory_state.get_safe_header()), + BlockNumberOrTag::Earliest => { + self.header_by_number(0)?.map_or_else(|| Ok(None), |h| Ok(Some(h.seal_slow()))) + } + BlockNumberOrTag::Pending => Ok(self.canonical_in_memory_state.pending_sealed_header()), + BlockNumberOrTag::Number(num) => { + self.header_by_number(num)?.map_or_else(|| Ok(None), |h| Ok(Some(h.seal_slow()))) + } + } + } + + fn sealed_header_by_id(&self, id: BlockId) -> ProviderResult> { + Ok(match id { + BlockId::Number(num) => self.sealed_header_by_number_or_tag(num)?, + BlockId::Hash(hash) => self.header(&hash.block_hash)?.map(|h| h.seal_slow()), + }) + } + + fn header_by_id(&self, id: BlockId) -> ProviderResult> { + Ok(match id { + BlockId::Number(num) => self.header_by_number_or_tag(num)?, + BlockId::Hash(hash) => self.header(&hash.block_hash)?, + }) + } + + fn ommers_by_id(&self, id: BlockId) -> ProviderResult>> { + match id { + BlockId::Number(num) => self.ommers_by_number_or_tag(num), + BlockId::Hash(hash) => { + // TODO: EIP-1898 question, see above + // here it is not handled + self.ommers(BlockHashOrNumber::Hash(hash.block_hash)) + } + } + } +} + +impl BlockchainTreePendingStateProvider for BlockchainProvider2 +where + DB: Send + Sync, +{ + fn find_pending_state_provider( + &self, + _block_hash: BlockHash, + ) -> Option> { + // TODO: check in memory overlay https://github.com/paradigmxyz/reth/issues/9614 + None + } +} + +impl CanonStateSubscriptions for BlockchainProvider2 +where + DB: Send + Sync, +{ + fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { + self.canonical_in_memory_state.subscribe_canon_state() + } +} + +impl ChangeSetReader for BlockchainProvider2 +where + DB: Database, +{ + fn account_block_changeset( + &self, + block_number: BlockNumber, + ) -> ProviderResult> { + self.database.provider()?.account_block_changeset(block_number) + } +} + +impl AccountReader for BlockchainProvider2 +where + DB: Database + Sync + Send, +{ + /// Get basic account information. + fn basic_account(&self, address: Address) -> ProviderResult> { + self.database.provider()?.basic_account(address) + } +} diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index bfd0a39bcbb3..ed4c1498fafc 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -512,6 +512,9 @@ impl StageCheckpointReader for ProviderFactory { fn get_stage_checkpoint_progress(&self, id: StageId) -> ProviderResult>> { self.provider()?.get_stage_checkpoint_progress(id) } + fn get_all_checkpoints(&self) -> ProviderResult> { + self.provider()?.get_all_checkpoints() + } } impl EvmEnvProvider for ProviderFactory { @@ -771,7 +774,7 @@ mod tests { // Checkpoint and no gap let mut static_file_writer = provider.static_file_provider().latest_writer(StaticFileSegment::Headers).unwrap(); - static_file_writer.append_header(head.header().clone(), U256::ZERO, head.hash()).unwrap(); + static_file_writer.append_header(head.header(), U256::ZERO, &head.hash()).unwrap(); static_file_writer.commit().unwrap(); drop(static_file_writer); diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 80246013924e..a688f125899e 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -1,5 +1,5 @@ use crate::{ - bundle_state::{BundleStateInit, RevertsInit}, + bundle_state::StorageRevertsIter, providers::{database::metrics, static_file::StaticFileWriter, StaticFileProvider}, to_range, traits::{ @@ -7,16 +7,20 @@ use crate::{ }, writer::StorageWriter, AccountReader, BlockExecutionReader, BlockExecutionWriter, BlockHashReader, BlockNumReader, - BlockReader, BlockWriter, EvmEnvProvider, FinalizedBlockReader, FinalizedBlockWriter, - HashingWriter, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, HistoricalStateProvider, - HistoryWriter, LatestStateProvider, OriginalValuesKnown, ProviderError, PruneCheckpointReader, - PruneCheckpointWriter, RequestsProvider, StageCheckpointReader, StateProviderBox, StateWriter, - StatsReader, StorageReader, TransactionVariant, TransactionsProvider, TransactionsProviderExt, - WithdrawalsProvider, + BlockReader, BlockWriter, BundleStateInit, EvmEnvProvider, FinalizedBlockReader, + FinalizedBlockWriter, HashingWriter, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, + HistoricalStateProvider, HistoryWriter, LatestStateProvider, OriginalValuesKnown, + ProviderError, PruneCheckpointReader, PruneCheckpointWriter, RequestsProvider, RevertsInit, + StageCheckpointReader, StateChangeWriter, StateProviderBox, StateWriter, StatsReader, + StorageReader, StorageTrieWriter, TransactionVariant, TransactionsProvider, + TransactionsProviderExt, TrieWriter, WithdrawalsProvider, }; use itertools::{izip, Itertools}; +use rayon::slice::ParallelSliceMut; use reth_chainspec::{ChainInfo, ChainSpec, EthereumHardforks}; -use reth_db::{tables, BlockNumberList, PlainAccountState, PlainStorageState}; +use reth_db::{ + cursor::DbDupCursorRW, tables, BlockNumberList, PlainAccountState, PlainStorageState, +}; use reth_db_api::{ common::KeyValue, cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, RangeWalker}, @@ -34,20 +38,25 @@ use reth_execution_types::{Chain, ExecutionOutcome}; use reth_network_p2p::headers::downloader::SyncTarget; use reth_primitives::{ keccak256, Account, Address, Block, BlockHash, BlockHashOrNumber, BlockNumber, - BlockWithSenders, GotExpected, Header, Receipt, Requests, SealedBlock, SealedBlockWithSenders, - SealedHeader, StaticFileSegment, StorageEntry, TransactionMeta, TransactionSigned, - TransactionSignedEcRecovered, TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, - Withdrawals, B256, U256, + BlockWithSenders, Bytecode, GotExpected, Header, Receipt, Requests, SealedBlock, + SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, TransactionMeta, + TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, TxHash, TxNumber, + Withdrawal, Withdrawals, B256, U256, }; use reth_prune_types::{PruneCheckpoint, PruneLimiter, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_errors::provider::{ProviderResult, RootMismatch}; use reth_trie::{ prefix_set::{PrefixSet, PrefixSetMut, TriePrefixSets}, - updates::TrieUpdates, - HashedPostStateSorted, Nibbles, StateRoot, + trie_cursor::DatabaseStorageTrieCursor, + updates::{StorageTrieUpdates, TrieUpdates}, + HashedPostStateSorted, Nibbles, StateRoot, StoredNibbles, +}; +use reth_trie_db::DatabaseStateRoot; +use revm::{ + db::states::{PlainStateReverts, PlainStorageChangeset, PlainStorageRevert, StateChangeset}, + primitives::{BlockEnv, CfgEnvWithHandlerCfg}, }; -use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; use std::{ cmp::Ordering, collections::{hash_map, BTreeMap, BTreeSet, HashMap, HashSet}, @@ -201,11 +210,11 @@ impl DatabaseProviderRW { for block_number in 0..block.number { let mut prev = block.header.clone().unseal(); prev.number = block_number; - writer.append_header(prev, U256::ZERO, B256::ZERO)?; + writer.append_header(&prev, U256::ZERO, &B256::ZERO)?; } } - writer.append_header(block.header.as_ref().clone(), ttd, block.hash())?; + writer.append_header(block.header.as_ref(), ttd, &block.hash())?; self.insert_block(block) } @@ -1001,7 +1010,7 @@ impl DatabaseProvider { } // insert value if needed - if *old_storage_value != U256::ZERO { + if !old_storage_value.is_zero() { plain_storage_cursor.upsert(*address, storage_entry)?; } } @@ -1099,7 +1108,7 @@ impl DatabaseProvider { } // insert value if needed - if *old_storage_value != U256::ZERO { + if !old_storage_value.is_zero() { plain_storage_cursor.upsert(*address, storage_entry)?; } } @@ -1948,7 +1957,7 @@ impl BlockNumReader for DatabaseProvider { impl BlockReader for DatabaseProvider { fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { - if source.is_database() { + if source.is_canonical() { self.block(hash.into()) } else { Ok(None) @@ -2508,6 +2517,14 @@ impl StageCheckpointReader for DatabaseProvider { Ok(self.tx.get::(id.to_string())?) } + fn get_all_checkpoints(&self) -> ProviderResult> { + self.tx + .cursor_read::()? + .walk(None)? + .collect::, _>>() + .map_err(ProviderError::Database) + } + /// Get stage checkpoint progress. fn get_stage_checkpoint_progress(&self, id: StageId) -> ProviderResult>> { Ok(self.tx.get::(id.to_string())?) @@ -2618,6 +2635,224 @@ impl StorageReader for DatabaseProvider { } } +impl StateChangeWriter for DatabaseProvider { + fn write_state_reverts( + &self, + reverts: PlainStateReverts, + first_block: BlockNumber, + ) -> ProviderResult<()> { + // Write storage changes + tracing::trace!("Writing storage changes"); + let mut storages_cursor = self.tx_ref().cursor_dup_write::()?; + let mut storage_changeset_cursor = + self.tx_ref().cursor_dup_write::()?; + for (block_index, mut storage_changes) in reverts.storage.into_iter().enumerate() { + let block_number = first_block + block_index as BlockNumber; + + tracing::trace!(block_number, "Writing block change"); + // sort changes by address. + storage_changes.par_sort_unstable_by_key(|a| a.address); + for PlainStorageRevert { address, wiped, storage_revert } in storage_changes { + let storage_id = BlockNumberAddress((block_number, address)); + + let mut storage = storage_revert + .into_iter() + .map(|(k, v)| (B256::new(k.to_be_bytes()), v)) + .collect::>(); + // sort storage slots by key. + storage.par_sort_unstable_by_key(|a| a.0); + + // If we are writing the primary storage wipe transition, the pre-existing plain + // storage state has to be taken from the database and written to storage history. + // See [StorageWipe::Primary] for more details. + let mut wiped_storage = Vec::new(); + if wiped { + tracing::trace!(?address, "Wiping storage"); + if let Some((_, entry)) = storages_cursor.seek_exact(address)? { + wiped_storage.push((entry.key, entry.value)); + while let Some(entry) = storages_cursor.next_dup_val()? { + wiped_storage.push((entry.key, entry.value)) + } + } + } + + tracing::trace!(?address, ?storage, "Writing storage reverts"); + for (key, value) in StorageRevertsIter::new(storage, wiped_storage) { + storage_changeset_cursor.append_dup(storage_id, StorageEntry { key, value })?; + } + } + } + + // Write account changes + tracing::trace!("Writing account changes"); + let mut account_changeset_cursor = + self.tx_ref().cursor_dup_write::()?; + + for (block_index, mut account_block_reverts) in reverts.accounts.into_iter().enumerate() { + let block_number = first_block + block_index as BlockNumber; + // Sort accounts by address. + account_block_reverts.par_sort_by_key(|a| a.0); + + for (address, info) in account_block_reverts { + account_changeset_cursor.append_dup( + block_number, + AccountBeforeTx { address, info: info.map(Into::into) }, + )?; + } + } + + Ok(()) + } + + fn write_state_changes(&self, mut changes: StateChangeset) -> ProviderResult<()> { + // sort all entries so they can be written to database in more performant way. + // and take smaller memory footprint. + changes.accounts.par_sort_by_key(|a| a.0); + changes.storage.par_sort_by_key(|a| a.address); + changes.contracts.par_sort_by_key(|a| a.0); + + // Write new account state + tracing::trace!(len = changes.accounts.len(), "Writing new account state"); + let mut accounts_cursor = self.tx_ref().cursor_write::()?; + // write account to database. + for (address, account) in changes.accounts { + if let Some(account) = account { + tracing::trace!(?address, "Updating plain state account"); + accounts_cursor.upsert(address, account.into())?; + } else if accounts_cursor.seek_exact(address)?.is_some() { + tracing::trace!(?address, "Deleting plain state account"); + accounts_cursor.delete_current()?; + } + } + + // Write bytecode + tracing::trace!(len = changes.contracts.len(), "Writing bytecodes"); + let mut bytecodes_cursor = self.tx_ref().cursor_write::()?; + for (hash, bytecode) in changes.contracts { + bytecodes_cursor.upsert(hash, Bytecode(bytecode))?; + } + + // Write new storage state and wipe storage if needed. + tracing::trace!(len = changes.storage.len(), "Writing new storage state"); + let mut storages_cursor = self.tx_ref().cursor_dup_write::()?; + for PlainStorageChangeset { address, wipe_storage, storage } in changes.storage { + // Wiping of storage. + if wipe_storage && storages_cursor.seek_exact(address)?.is_some() { + storages_cursor.delete_current_duplicates()?; + } + // cast storages to B256. + let mut storage = storage + .into_iter() + .map(|(k, value)| StorageEntry { key: k.into(), value }) + .collect::>(); + // sort storage slots by key. + storage.par_sort_unstable_by_key(|a| a.key); + + for entry in storage { + tracing::trace!(?address, ?entry.key, "Updating plain state storage"); + if let Some(db_entry) = storages_cursor.seek_by_key_subkey(address, entry.key)? { + if db_entry.key == entry.key { + storages_cursor.delete_current()?; + } + } + + if !entry.value.is_zero() { + storages_cursor.upsert(address, entry)?; + } + } + } + + Ok(()) + } +} + +impl TrieWriter for DatabaseProvider { + /// Writes trie updates. Returns the number of entries modified. + fn write_trie_updates(&self, trie_updates: &TrieUpdates) -> ProviderResult { + if trie_updates.is_empty() { + return Ok(0) + } + + // Track the number of inserted entries. + let mut num_entries = 0; + + // Merge updated and removed nodes. Updated nodes must take precedence. + let mut account_updates = trie_updates + .removed_nodes_ref() + .iter() + .filter_map(|n| { + (!trie_updates.account_nodes_ref().contains_key(n)).then_some((n, None)) + }) + .collect::>(); + account_updates.extend( + trie_updates.account_nodes_ref().iter().map(|(nibbles, node)| (nibbles, Some(node))), + ); + // Sort trie node updates. + account_updates.sort_unstable_by(|a, b| a.0.cmp(b.0)); + + let tx = self.tx_ref(); + let mut account_trie_cursor = tx.cursor_write::()?; + for (key, updated_node) in account_updates { + let nibbles = StoredNibbles(key.clone()); + match updated_node { + Some(node) => { + if !nibbles.0.is_empty() { + num_entries += 1; + account_trie_cursor.upsert(nibbles, node.clone())?; + } + } + None => { + num_entries += 1; + if account_trie_cursor.seek_exact(nibbles)?.is_some() { + account_trie_cursor.delete_current()?; + } + } + } + } + + num_entries += self.write_storage_trie_updates(trie_updates.storage_tries_ref())?; + + Ok(num_entries) + } +} + +impl StorageTrieWriter for DatabaseProvider { + /// Writes storage trie updates from the given storage trie map. First sorts the storage trie + /// updates by the hashed address, writing in sorted order. + fn write_storage_trie_updates( + &self, + storage_tries: &HashMap, + ) -> ProviderResult { + let mut num_entries = 0; + let mut storage_tries = Vec::from_iter(storage_tries); + storage_tries.sort_unstable_by(|a, b| a.0.cmp(b.0)); + let mut cursor = self.tx_ref().cursor_dup_write::()?; + for (hashed_address, storage_trie_updates) in storage_tries { + let mut db_storage_trie_cursor = + DatabaseStorageTrieCursor::new(cursor, *hashed_address); + num_entries += + db_storage_trie_cursor.write_storage_trie_updates(storage_trie_updates)?; + cursor = db_storage_trie_cursor.cursor; + } + + Ok(num_entries) + } + + fn write_individual_storage_trie_updates( + &self, + hashed_address: B256, + updates: &StorageTrieUpdates, + ) -> ProviderResult { + if updates.is_empty() { + return Ok(0) + } + + let cursor = self.tx_ref().cursor_dup_write::()?; + let mut trie_db_cursor = DatabaseStorageTrieCursor::new(cursor, hashed_address); + Ok(trie_db_cursor.write_storage_trie_updates(updates)?) + } +} + impl HashingWriter for DatabaseProvider { fn unwind_account_hashing( &self, @@ -2696,7 +2931,7 @@ impl HashingWriter for DatabaseProvider { hashed_storage.delete_current()?; } - if value != U256::ZERO { + if !value.is_zero() { hashed_storage.upsert(hashed_address, StorageEntry { key, value })?; } } @@ -2736,7 +2971,7 @@ impl HashingWriter for DatabaseProvider { hashed_storage_cursor.delete_current()?; } - if value != U256::ZERO { + if !value.is_zero() { hashed_storage_cursor.upsert(hashed_address, StorageEntry { key, value })?; } Ok(()) @@ -2813,7 +3048,7 @@ impl HashingWriter for DatabaseProvider { block_hash: end_block_hash, }))) } - trie_updates.write_to_database(&self.tx)?; + self.write_trie_updates(&trie_updates)?; } durations_recorder.record_relative(metrics::Action::InsertMerkleTree); @@ -3022,7 +3257,7 @@ impl BlockExecutionWriter for DatabaseProviderRW { block_hash: parent_hash, }))) } - trie_updates.write_to_database(&self.tx)?; + self.write_trie_updates(&trie_updates)?; // get blocks let blocks = self.take_block_range(range.clone())?; @@ -3110,7 +3345,7 @@ impl BlockExecutionWriter for DatabaseProviderRW { block_hash: parent_hash, }))) } - trie_updates.write_to_database(&self.tx)?; + self.write_trie_updates(&trie_updates)?; // get blocks let blocks = self.take_block_range(range.clone())?; @@ -3133,6 +3368,27 @@ impl BlockExecutionWriter for DatabaseProviderRW { } impl BlockWriter for DatabaseProviderRW { + /// Inserts the block into the database, always modifying the following tables: + /// * [`CanonicalHeaders`](tables::CanonicalHeaders) + /// * [`Headers`](tables::Headers) + /// * [`HeaderNumbers`](tables::HeaderNumbers) + /// * [`HeaderTerminalDifficulties`](tables::HeaderTerminalDifficulties) + /// * [`BlockBodyIndices`](tables::BlockBodyIndices) + /// + /// If there are transactions in the block, the following tables will be modified: + /// * [`Transactions`](tables::Transactions) + /// * [`TransactionBlocks`](tables::TransactionBlocks) + /// + /// If ommers are not empty, this will modify [`BlockOmmers`](tables::BlockOmmers). + /// If withdrawals are not empty, this will modify + /// [`BlockWithdrawals`](tables::BlockWithdrawals). + /// If requests are not empty, this will modify [`BlockRequests`](tables::BlockRequests). + /// + /// If the provider has __not__ configured full sender pruning, this will modify + /// [`TransactionSenders`](tables::TransactionSenders). + /// + /// If the provider has __not__ configured full transaction lookup pruning, this will modify + /// [`TransactionHashNumbers`](tables::TransactionHashNumbers). fn insert_block( &self, block: SealedBlockWithSenders, @@ -3303,14 +3559,16 @@ impl BlockWriter for DatabaseProviderRW { // Write state and changesets to the database. // Must be written after blocks because of the receipt lookup. - execution_outcome.write_to_storage(self, None, OriginalValuesKnown::No)?; + // TODO: should _these_ be moved to storagewriter? seems like storagewriter should be + // _above_ db provider + let mut storage_writer = StorageWriter::new(Some(self), None); + storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::No)?; durations_recorder.record_relative(metrics::Action::InsertState); // insert hashes and intermediate merkle nodes { - let storage_writer = StorageWriter::new(Some(self), None); storage_writer.write_hashed_state(&hashed_state)?; - trie_updates.write_to_database(&self.tx)?; + self.write_trie_updates(&trie_updates)?; } durations_recorder.record_relative(metrics::Action::InsertHashes); diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 330c880c7eb7..be6db2dcadb1 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -1,17 +1,18 @@ use crate::{ AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, - BlockSource, BlockchainTreePendingStateProvider, CanonChainTracker, CanonStateNotifications, - CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, DatabaseProviderFactory, - EvmEnvProvider, FullExecutionDataProvider, HeaderProvider, ProviderError, - PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, RequestsProvider, - StageCheckpointReader, StateProviderBox, StateProviderFactory, StaticFileProviderFactory, - TransactionVariant, TransactionsProvider, TreeViewer, WithdrawalsProvider, + BlockSource, BlockchainTreePendingStateProvider, CanonChainTracker, ChainSpecProvider, + ChangeSetReader, DatabaseProviderFactory, EvmEnvProvider, FullExecutionDataProvider, + HeaderProvider, ProviderError, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, + RequestsProvider, StageCheckpointReader, StateProviderBox, StateProviderFactory, + StaticFileProviderFactory, TransactionVariant, TransactionsProvider, TreeViewer, + WithdrawalsProvider, }; use reth_blockchain_tree_api::{ error::{CanonicalError, InsertBlockError}, BlockValidationKind, BlockchainTreeEngine, BlockchainTreeViewer, CanonicalOutcome, InsertPayloadOk, }; +use reth_chain_state::{CanonStateNotifications, CanonStateSubscriptions, ChainInfoTracker}; use reth_chainspec::{ChainInfo, ChainSpec}; use reth_db_api::{ database::Database, @@ -54,13 +55,13 @@ pub use state::{ mod bundle_state_provider; pub use bundle_state_provider::BundleStateProvider; -mod chain_info; -pub use chain_info::ChainInfoTracker; - mod consistent_view; use alloy_rpc_types_engine::ForkchoiceState; pub use consistent_view::{ConsistentDbView, ConsistentViewError}; +mod blockchain_provider; +pub use blockchain_provider::BlockchainProvider2; + /// The main type for interacting with the blockchain. /// /// This type serves as the main entry point for interacting with the blockchain and provides data @@ -290,7 +291,7 @@ where block } BlockSource::Pending => self.tree.block_by_hash(hash).map(|block| block.unseal()), - BlockSource::Database => self.database.block_by_hash(hash)?, + BlockSource::Canonical => self.database.block_by_hash(hash)?, }; Ok(block) @@ -526,6 +527,10 @@ where fn get_stage_checkpoint_progress(&self, id: StageId) -> ProviderResult>> { self.database.provider()?.get_stage_checkpoint_progress(id) } + + fn get_all_checkpoints(&self) -> ProviderResult> { + self.database.provider()?.get_all_checkpoints() + } } impl EvmEnvProvider for BlockchainProvider @@ -820,7 +825,7 @@ where // trait impl if Some(true) == hash.require_canonical { // check the database, canonical blocks are only stored in the database - self.find_block_by_hash(hash.block_hash, BlockSource::Database) + self.find_block_by_hash(hash.block_hash, BlockSource::Canonical) } else { self.block_by_hash(hash.block_hash) } diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index 268f5c6d3593..cbef08dcee27 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -15,7 +15,8 @@ use reth_primitives::{ }; use reth_storage_api::StateProofProvider; use reth_storage_errors::provider::ProviderResult; -use reth_trie::{updates::TrieUpdates, AccountProof, HashedPostState}; +use reth_trie::{proof::Proof, updates::TrieUpdates, AccountProof, HashedPostState, StateRoot}; +use reth_trie_db::{DatabaseProof, DatabaseStateRoot}; use std::fmt::Debug; /// State provider for a given block number which takes a tx reference. @@ -130,7 +131,7 @@ impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { ); } - Ok(HashedPostState::from_revert_range(self.tx, self.block_number..=tip)?) + Ok(HashedPostState::from_reverts(self.tx, self.block_number)?) } fn history_info( @@ -259,7 +260,8 @@ impl<'b, TX: DbTx> StateRootProvider for HistoricalStateProviderRef<'b, TX> { fn hashed_state_root(&self, hashed_state: &HashedPostState) -> ProviderResult { let mut revert_state = self.revert_state()?; revert_state.extend(hashed_state.clone()); - revert_state.state_root(self.tx).map_err(|err| ProviderError::Database(err.into())) + StateRoot::overlay_root(self.tx, revert_state) + .map_err(|err| ProviderError::Database(err.into())) } fn hashed_state_root_with_updates( @@ -268,8 +270,7 @@ impl<'b, TX: DbTx> StateRootProvider for HistoricalStateProviderRef<'b, TX> { ) -> ProviderResult<(B256, TrieUpdates)> { let mut revert_state = self.revert_state()?; revert_state.extend(hashed_state.clone()); - revert_state - .state_root_with_updates(self.tx) + StateRoot::overlay_root_with_updates(self.tx, revert_state) .map_err(|err| ProviderError::Database(err.into())) } } @@ -284,9 +285,8 @@ impl<'b, TX: DbTx> StateProofProvider for HistoricalStateProviderRef<'b, TX> { ) -> ProviderResult { let mut revert_state = self.revert_state()?; revert_state.extend(hashed_state.clone()); - revert_state - .account_proof(self.tx, address, slots) - .map_err(|err| ProviderError::Database(err.into())) + Proof::overlay_account_proof(self.tx, revert_state, address, slots) + .map_err(Into::::into) } } diff --git a/crates/storage/provider/src/providers/state/latest.rs b/crates/storage/provider/src/providers/state/latest.rs index 6bca0d69d467..8c95c8c26174 100644 --- a/crates/storage/provider/src/providers/state/latest.rs +++ b/crates/storage/provider/src/providers/state/latest.rs @@ -12,7 +12,8 @@ use reth_primitives::{ }; use reth_storage_api::StateProofProvider; use reth_storage_errors::provider::{ProviderError, ProviderResult}; -use reth_trie::{updates::TrieUpdates, AccountProof, HashedPostState}; +use reth_trie::{proof::Proof, updates::TrieUpdates, AccountProof, HashedPostState, StateRoot}; +use reth_trie_db::{DatabaseProof, DatabaseStateRoot}; /// State provider over latest state that takes tx reference. #[derive(Debug)] @@ -75,15 +76,15 @@ impl<'b, TX: DbTx> BlockHashReader for LatestStateProviderRef<'b, TX> { impl<'b, TX: DbTx> StateRootProvider for LatestStateProviderRef<'b, TX> { fn hashed_state_root(&self, hashed_state: &HashedPostState) -> ProviderResult { - hashed_state.state_root(self.tx).map_err(|err| ProviderError::Database(err.into())) + StateRoot::overlay_root(self.tx, hashed_state.clone()) + .map_err(|err| ProviderError::Database(err.into())) } fn hashed_state_root_with_updates( &self, hashed_state: &HashedPostState, ) -> ProviderResult<(B256, TrieUpdates)> { - hashed_state - .state_root_with_updates(self.tx) + StateRoot::overlay_root_with_updates(self.tx, hashed_state.clone()) .map_err(|err| ProviderError::Database(err.into())) } } @@ -95,9 +96,8 @@ impl<'b, TX: DbTx> StateProofProvider for LatestStateProviderRef<'b, TX> { address: Address, slots: &[B256], ) -> ProviderResult { - Ok(hashed_state - .account_proof(self.tx, address, slots) - .map_err(Into::::into)?) + Proof::overlay_account_proof(self.tx, hashed_state.clone(), address, slots) + .map_err(Into::::into) } } diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 548b395d74ea..65304b4854f1 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -532,6 +532,18 @@ impl StaticFileProvider { provider: &DatabaseProvider, has_receipt_pruning: bool, ) -> ProviderResult> { + // OVM chain contains duplicate transactions, so is inconsistent by default since reth db + // not designed for duplicate transactions (see ). Undefined behaviour for queries + // to OVM chain is also in op-erigon. + if provider.chain_spec().is_optimism_mainnet() { + info!(target: "reth::cli", + "Skipping storage verification for OP mainnet, expected inconsistency in OVM chain" + ); + return Ok(None); + } + + info!(target: "reth::cli", "Verifying storage consistency."); + let mut unwind_target: Option = None; let mut update_unwind_target = |new_target: BlockNumber| { if let Some(target) = unwind_target.as_mut() { diff --git a/crates/storage/provider/src/providers/static_file/mod.rs b/crates/storage/provider/src/providers/static_file/mod.rs index c5abdbe00c31..abbc774c7b40 100644 --- a/crates/storage/provider/src/providers/static_file/mod.rs +++ b/crates/storage/provider/src/providers/static_file/mod.rs @@ -107,7 +107,7 @@ mod tests { for header in headers.clone() { td += header.header().difficulty; let hash = header.hash(); - writer.append_header(header.unseal(), td, hash).unwrap(); + writer.append_header(&header.unseal(), td, &hash).unwrap(); } writer.commit().unwrap(); } diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index df4417ace2bc..f973afde6e2c 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -13,6 +13,7 @@ use reth_primitives::{ }; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ + borrow::Borrow, path::{Path, PathBuf}, sync::{Arc, Weak}, time::Instant, @@ -466,9 +467,9 @@ impl StaticFileProviderRW { /// Returns the current [`BlockNumber`] as seen in the static file. pub fn append_header( &mut self, - header: Header, + header: &Header, total_difficulty: U256, - hash: BlockHash, + hash: &BlockHash, ) -> ProviderResult { let start = Instant::now(); self.ensure_no_queued_prune()?; @@ -501,7 +502,7 @@ impl StaticFileProviderRW { pub fn append_transaction( &mut self, tx_num: TxNumber, - tx: TransactionSignedNoHash, + tx: &TransactionSignedNoHash, ) -> ProviderResult { let start = Instant::now(); self.ensure_no_queued_prune()?; @@ -528,7 +529,7 @@ impl StaticFileProviderRW { pub fn append_receipt( &mut self, tx_num: TxNumber, - receipt: Receipt, + receipt: &Receipt, ) -> ProviderResult { let start = Instant::now(); self.ensure_no_queued_prune()?; @@ -549,9 +550,10 @@ impl StaticFileProviderRW { /// Appends multiple receipts to the static file. /// /// Returns the current [`TxNumber`] as seen in the static file, if any. - pub fn append_receipts(&mut self, receipts: I) -> ProviderResult> + pub fn append_receipts(&mut self, receipts: I) -> ProviderResult> where - I: IntoIterator>, + I: Iterator>, + R: Borrow, { let mut receipts_iter = receipts.into_iter().peekable(); // If receipts are empty, we can simply return None @@ -568,7 +570,8 @@ impl StaticFileProviderRW { for receipt_result in receipts_iter { let (tx_num, receipt) = receipt_result?; - tx_number = self.append_with_tx_number(StaticFileSegment::Receipts, tx_num, receipt)?; + tx_number = + self.append_with_tx_number(StaticFileSegment::Receipts, tx_num, receipt.borrow())?; count += 1; } diff --git a/crates/storage/provider/src/test_utils/events.rs b/crates/storage/provider/src/test_utils/events.rs deleted file mode 100644 index 39e53772ca5d..000000000000 --- a/crates/storage/provider/src/test_utils/events.rs +++ /dev/null @@ -1,35 +0,0 @@ -use std::sync::{Arc, Mutex}; -use tokio::sync::broadcast::{self, Sender}; - -use crate::{CanonStateNotification, CanonStateNotifications, CanonStateSubscriptions, Chain}; - -/// A test `ChainEventSubscriptions` -#[derive(Clone, Debug, Default)] -pub struct TestCanonStateSubscriptions { - canon_notif_tx: Arc>>>, -} - -impl TestCanonStateSubscriptions { - /// Adds new block commit to the queue that can be consumed with - /// [`TestCanonStateSubscriptions::subscribe_to_canonical_state`] - pub fn add_next_commit(&self, new: Arc) { - let event = CanonStateNotification::Commit { new }; - self.canon_notif_tx.lock().as_mut().unwrap().retain(|tx| tx.send(event.clone()).is_ok()) - } - - /// Adds reorg to the queue that can be consumed with - /// [`TestCanonStateSubscriptions::subscribe_to_canonical_state`] - pub fn add_next_reorg(&self, old: Arc, new: Arc) { - let event = CanonStateNotification::Reorg { old, new }; - self.canon_notif_tx.lock().as_mut().unwrap().retain(|tx| tx.send(event.clone()).is_ok()) - } -} - -impl CanonStateSubscriptions for TestCanonStateSubscriptions { - fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { - let (canon_notif_tx, canon_notif_rx) = broadcast::channel(100); - self.canon_notif_tx.lock().as_mut().unwrap().push(canon_notif_tx); - - canon_notif_rx - } -} diff --git a/crates/storage/provider/src/test_utils/mod.rs b/crates/storage/provider/src/test_utils/mod.rs index 4d40ad54e990..edbbe4582926 100644 --- a/crates/storage/provider/src/test_utils/mod.rs +++ b/crates/storage/provider/src/test_utils/mod.rs @@ -1,19 +1,22 @@ -use crate::{providers::StaticFileProvider, ProviderFactory}; +use crate::{providers::StaticFileProvider, HashingWriter, ProviderFactory, TrieWriter}; use reth_chainspec::{ChainSpec, MAINNET}; use reth_db::{ test_utils::{create_test_rw_db, create_test_static_files_dir, TempDatabase}, - DatabaseEnv, + Database, DatabaseEnv, }; +use reth_errors::ProviderResult; +use reth_primitives::{Account, StorageEntry, B256}; +use reth_trie::StateRoot; +use reth_trie_db::DatabaseStateRoot; use std::sync::Arc; pub mod blocks; -mod events; mod mock; mod noop; -pub use events::TestCanonStateSubscriptions; pub use mock::{ExtendedAccount, MockEthProvider}; pub use noop::NoopProvider; +pub use reth_chain_state::test_utils::TestCanonStateSubscriptions; /// Creates test provider factory with mainnet chain spec. pub fn create_test_provider_factory() -> ProviderFactory>> { @@ -32,3 +35,39 @@ pub fn create_test_provider_factory_with_chain_spec( StaticFileProvider::read_write(static_dir.into_path()).expect("static file provider"), ) } + +/// Inserts the genesis alloc from the provided chain spec into the trie. +pub fn insert_genesis( + provider_factory: &ProviderFactory, + chain_spec: Arc, +) -> ProviderResult { + let provider = provider_factory.provider_rw()?; + + // Hash accounts and insert them into hashing table. + let genesis = chain_spec.genesis(); + let alloc_accounts = genesis + .alloc + .iter() + .map(|(addr, account)| (*addr, Some(Account::from_genesis_account(account)))); + provider.insert_account_for_hashing(alloc_accounts).unwrap(); + + let alloc_storage = genesis.alloc.clone().into_iter().filter_map(|(addr, account)| { + // Only return `Some` if there is storage. + account.storage.map(|storage| { + ( + addr, + storage.into_iter().map(|(key, value)| StorageEntry { key, value: value.into() }), + ) + }) + }); + provider.insert_storage_for_hashing(alloc_storage)?; + + let (root, updates) = StateRoot::from_tx(provider.tx_ref()) + .root_with_updates() + .map_err(Into::::into)?; + provider.write_trie_updates(&updates).unwrap(); + + provider.commit()?; + + Ok(root) +} diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index 877c4d7afca2..1f0dfb4d161e 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -3,6 +3,7 @@ use std::{ sync::Arc, }; +use reth_chain_state::{CanonStateNotifications, CanonStateSubscriptions}; use reth_chainspec::{ChainInfo, ChainSpec, MAINNET}; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; @@ -24,11 +25,10 @@ use crate::{ providers::StaticFileProvider, traits::{BlockSource, ReceiptProvider}, AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, - CanonStateNotifications, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, - EvmEnvProvider, HeaderProvider, PruneCheckpointReader, ReceiptProviderIdExt, RequestsProvider, - StageCheckpointReader, StateProvider, StateProviderBox, StateProviderFactory, - StateRootProvider, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, - WithdrawalsProvider, + ChainSpecProvider, ChangeSetReader, EvmEnvProvider, HeaderProvider, PruneCheckpointReader, + ReceiptProviderIdExt, RequestsProvider, StageCheckpointReader, StateProvider, StateProviderBox, + StateProviderFactory, StateRootProvider, StaticFileProviderFactory, TransactionVariant, + TransactionsProvider, WithdrawalsProvider, }; /// Supports various api interfaces for testing purposes. @@ -442,6 +442,10 @@ impl StageCheckpointReader for NoopProvider { fn get_stage_checkpoint_progress(&self, _id: StageId) -> ProviderResult>> { Ok(None) } + + fn get_all_checkpoints(&self) -> ProviderResult> { + Ok(Vec::new()) + } } impl WithdrawalsProvider for NoopProvider { diff --git a/crates/storage/provider/src/traits/full.rs b/crates/storage/provider/src/traits/full.rs index c53150560d3a..f47bd3efd2e3 100644 --- a/crates/storage/provider/src/traits/full.rs +++ b/crates/storage/provider/src/traits/full.rs @@ -1,10 +1,11 @@ //! Helper provider traits to encapsulate all provider traits for simplicity. use crate::{ - AccountReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, - DatabaseProviderFactory, EvmEnvProvider, HeaderProvider, StageCheckpointReader, - StateProviderFactory, StaticFileProviderFactory, TransactionsProvider, + AccountReader, BlockReaderIdExt, ChainSpecProvider, ChangeSetReader, DatabaseProviderFactory, + EvmEnvProvider, HeaderProvider, StageCheckpointReader, StateProviderFactory, + StaticFileProviderFactory, TransactionsProvider, }; +use reth_chain_state::CanonStateSubscriptions; use reth_db_api::database::Database; /// Helper trait to unify all provider traits for simplicity. diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index 466a9e2908d0..c89815a9f670 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -16,14 +16,7 @@ mod header_sync_gap; pub use header_sync_gap::{HeaderSyncGap, HeaderSyncGapProvider}; mod state; -pub use state::StateWriter; - -mod chain; -pub use chain::{ - CanonStateNotification, CanonStateNotificationSender, CanonStateNotificationStream, - CanonStateNotifications, CanonStateSubscriptions, ForkChoiceNotifications, ForkChoiceStream, - ForkChoiceSubscriptions, -}; +pub use state::{StateChangeWriter, StateWriter}; mod spec; pub use spec::ChainSpecProvider; @@ -31,6 +24,9 @@ pub use spec::ChainSpecProvider; mod hashing; pub use hashing::HashingWriter; +mod trie; +pub use trie::{StorageTrieWriter, TrieWriter}; + mod history; pub use history::HistoryWriter; diff --git a/crates/storage/provider/src/traits/state.rs b/crates/storage/provider/src/traits/state.rs index b445892a060a..eec2ee11ff7b 100644 --- a/crates/storage/provider/src/traits/state.rs +++ b/crates/storage/provider/src/traits/state.rs @@ -1,19 +1,33 @@ -use crate::{providers::StaticFileProviderRWRefMut, DatabaseProviderRW}; -use reth_db::Database; +use reth_execution_types::ExecutionOutcome; +use reth_primitives::BlockNumber; use reth_storage_errors::provider::ProviderResult; -use revm::db::OriginalValuesKnown; +use revm::db::{ + states::{PlainStateReverts, StateChangeset}, + OriginalValuesKnown, +}; -/// A helper trait for [`ExecutionOutcome`](reth_execution_types::ExecutionOutcome) to -/// write state and receipts to storage. +/// A helper trait for [`ExecutionOutcome`] to write state and receipts to storage. pub trait StateWriter { /// Write the data and receipts to the database or static files if `static_file_producer` is /// `Some`. It should be `None` if there is any kind of pruning/filtering over the receipts. - fn write_to_storage( - self, - provider_rw: &DatabaseProviderRW, - static_file_producer: Option>, + fn write_to_storage( + &mut self, + execution_outcome: ExecutionOutcome, is_value_known: OriginalValuesKnown, - ) -> ProviderResult<()> - where - DB: Database; + ) -> ProviderResult<()>; +} + +/// A trait specifically for writing state changes or reverts +pub trait StateChangeWriter { + /// Write state reverts to the database. + /// + /// NOTE: Reverts will delete all wiped storage from plain state. + fn write_state_reverts( + &self, + reverts: PlainStateReverts, + first_block: BlockNumber, + ) -> ProviderResult<()>; + + /// Write state changes to the database. + fn write_state_changes(&self, changes: StateChangeset) -> ProviderResult<()>; } diff --git a/crates/storage/provider/src/traits/tree_viewer.rs b/crates/storage/provider/src/traits/tree_viewer.rs index a8eea44a6928..f75dbae24d22 100644 --- a/crates/storage/provider/src/traits/tree_viewer.rs +++ b/crates/storage/provider/src/traits/tree_viewer.rs @@ -1,5 +1,6 @@ -use crate::{BlockchainTreePendingStateProvider, CanonStateSubscriptions}; +use crate::BlockchainTreePendingStateProvider; use reth_blockchain_tree_api::{BlockchainTreeEngine, BlockchainTreeViewer}; +use reth_chain_state::CanonStateSubscriptions; /// Helper trait to combine all the traits we need for the `BlockchainProvider` /// diff --git a/crates/storage/provider/src/traits/trie.rs b/crates/storage/provider/src/traits/trie.rs new file mode 100644 index 000000000000..960af93c8547 --- /dev/null +++ b/crates/storage/provider/src/traits/trie.rs @@ -0,0 +1,36 @@ +use std::collections::HashMap; + +use auto_impl::auto_impl; +use reth_primitives::B256; +use reth_storage_errors::provider::ProviderResult; +use reth_trie::updates::{StorageTrieUpdates, TrieUpdates}; + +/// Trie Writer +#[auto_impl(&, Arc, Box)] +pub trait TrieWriter: Send + Sync { + /// Writes trie updates to the database. + /// + /// Returns the number of entries modified. + fn write_trie_updates(&self, trie_updates: &TrieUpdates) -> ProviderResult; +} + +/// Storage Trie Writer +#[auto_impl(&, Arc, Box)] +pub trait StorageTrieWriter: Send + Sync { + /// Writes storage trie updates from the given storage trie map. + /// + /// First sorts the storage trie updates by the hashed address key, writing in sorted order. + /// + /// Returns the number of entries modified. + fn write_storage_trie_updates( + &self, + storage_tries: &HashMap, + ) -> ProviderResult; + + /// Writes storage trie updates for the given hashed address. + fn write_individual_storage_trie_updates( + &self, + hashed_address: B256, + updates: &StorageTrieUpdates, + ) -> ProviderResult; +} diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index f5a3554d3b6b..eb21e90991d7 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -1,4 +1,7 @@ -use crate::{providers::StaticFileProviderRWRefMut, DatabaseProviderRW}; +use crate::{ + providers::StaticFileProviderRWRefMut, DatabaseProvider, DatabaseProviderRO, + DatabaseProviderRW, StateChangeWriter, StateWriter, TrieWriter, +}; use itertools::Itertools; use reth_db::{ cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW}, @@ -7,11 +10,16 @@ use reth_db::{ Database, }; use reth_errors::{ProviderError, ProviderResult}; -use reth_primitives::{BlockNumber, StorageEntry, U256}; +use reth_execution_types::ExecutionOutcome; +use reth_primitives::{ + BlockNumber, Header, StaticFileSegment, StorageEntry, TransactionSignedNoHash, B256, U256, +}; use reth_storage_api::ReceiptWriter; use reth_storage_errors::writer::StorageWriterError; -use reth_trie::HashedPostStateSorted; +use reth_trie::{updates::TrieUpdates, HashedPostStateSorted}; +use revm::db::OriginalValuesKnown; use static_file::StaticFileWriter; +use std::borrow::Borrow; mod database; mod static_file; @@ -25,29 +33,24 @@ enum StorageType { /// [`StorageWriter`] is responsible for managing the writing to either database, static file or /// both. #[derive(Debug)] -pub struct StorageWriter<'a, 'b, DB: Database> { - database_writer: Option<&'a DatabaseProviderRW>, +pub struct StorageWriter<'a, 'b, TX> { + database_writer: Option<&'a DatabaseProvider>, static_file_writer: Option>, } -impl<'a, 'b, DB: Database> StorageWriter<'a, 'b, DB> { +impl<'a, 'b, TX> StorageWriter<'a, 'b, TX> { /// Creates a new instance of [`StorageWriter`]. /// /// # Parameters /// - `database_writer`: An optional reference to a database writer. /// - `static_file_writer`: An optional mutable reference to a static file writer. pub const fn new( - database_writer: Option<&'a DatabaseProviderRW>, + database_writer: Option<&'a DatabaseProvider>, static_file_writer: Option>, ) -> Self { Self { database_writer, static_file_writer } } - /// Creates a new instance of [`StorageWriter`] from a database writer. - pub const fn from_database_writer(database_writer: &'a DatabaseProviderRW) -> Self { - Self::new(Some(database_writer), None) - } - /// Creates a new instance of [`StorageWriter`] from a static file writer. pub const fn from_static_file_writer( static_file_writer: StaticFileProviderRWRefMut<'b>, @@ -55,11 +58,31 @@ impl<'a, 'b, DB: Database> StorageWriter<'a, 'b, DB> { Self::new(None, Some(static_file_writer)) } + /// Creates a new instance of [`StorageWriter`] from a read-only database provider. + pub const fn from_database_provider_ro( + database: &'a DatabaseProviderRO, + ) -> StorageWriter<'_, '_, ::TX> + where + DB: Database, + { + StorageWriter::new(Some(database), None) + } + + /// Creates a new instance of [`StorageWriter`] from a read-write database provider. + pub fn from_database_provider_rw( + database: &'a DatabaseProviderRW, + ) -> StorageWriter<'_, '_, ::TXMut> + where + DB: Database, + { + StorageWriter::new(Some(database), None) + } + /// Returns a reference to the database writer. /// /// # Panics /// If the database writer is not set. - fn database_writer(&self) -> &DatabaseProviderRW { + fn database_writer(&self) -> &DatabaseProvider { self.database_writer.as_ref().expect("should exist") } @@ -94,7 +117,108 @@ impl<'a, 'b, DB: Database> StorageWriter<'a, 'b, DB> { } Ok(()) } +} + +impl<'a, 'b, TX> StorageWriter<'a, 'b, TX> +where + TX: DbTx, +{ + /// Appends headers to static files, using the + /// [`HeaderTerminalDifficulties`](tables::HeaderTerminalDifficulties) table to determine the + /// total difficulty of the parent block during header insertion. + /// + /// NOTE: The static file writer used to construct this [`StorageWriter`] MUST be a writer for + /// the Headers segment. + pub fn append_headers_from_blocks( + &mut self, + initial_block_number: BlockNumber, + headers: impl Iterator, + ) -> ProviderResult<()> + where + I: Borrow<(H, B256)>, + H: Borrow
, + { + self.ensure_database_writer()?; + self.ensure_static_file_writer()?; + let mut td_cursor = + self.database_writer().tx_ref().cursor_read::()?; + + let first_td = if initial_block_number == 0 { + U256::ZERO + } else { + td_cursor + .seek_exact(initial_block_number - 1)? + .map(|(_, td)| td.0) + .ok_or_else(|| ProviderError::TotalDifficultyNotFound(initial_block_number))? + }; + + for pair in headers { + let (header, hash) = pair.borrow(); + let header = header.borrow(); + let td = first_td + header.difficulty; + self.static_file_writer().append_header(header, td, hash)?; + } + + Ok(()) + } + + /// Appends transactions to static files, using the + /// [`BlockBodyIndices`](tables::BlockBodyIndices) table to determine the transaction number + /// when appending to static files. + /// + /// NOTE: The static file writer used to construct this [`StorageWriter`] MUST be a writer for + /// the Transactions segment. + pub fn append_transactions_from_blocks( + &mut self, + initial_block_number: BlockNumber, + transactions: impl Iterator, + ) -> ProviderResult<()> + where + T: Borrow>, + { + self.ensure_database_writer()?; + self.ensure_static_file_writer()?; + + let mut bodies_cursor = + self.database_writer().tx_ref().cursor_read::()?; + + let mut last_tx_idx = None; + for (idx, transactions) in transactions.enumerate() { + let block_number = initial_block_number + idx as u64; + + let mut first_tx_index = + bodies_cursor.seek_exact(block_number)?.map(|(_, indices)| indices.first_tx_num()); + + // If there are no indices, that means there have been no transactions + // + // So instead of returning an error, use zero + if block_number == initial_block_number && first_tx_index.is_none() { + first_tx_index = Some(0); + } + let mut tx_index = first_tx_index + .or(last_tx_idx) + .ok_or_else(|| ProviderError::BlockBodyIndicesNotFound(block_number))?; + + for tx in transactions.borrow() { + self.static_file_writer().append_transaction(tx_index, tx)?; + tx_index += 1; + } + + self.static_file_writer() + .increment_block(StaticFileSegment::Transactions, block_number)?; + + // update index + last_tx_idx = Some(tx_index); + } + Ok(()) + } +} + +impl<'a, 'b, TX> StorageWriter<'a, 'b, TX> +where + TX: DbTxMut + DbTx, +{ /// Writes the hashed state changes to the database pub fn write_hashed_state(&self, hashed_state: &HashedPostStateSorted) -> ProviderResult<()> { self.ensure_database_writer()?; @@ -129,7 +253,7 @@ impl<'a, 'b, DB: Database> StorageWriter<'a, 'b, DB> { } } - if entry.value != U256::ZERO { + if !entry.value.is_zero() { hashed_storage_cursor.upsert(*hashed_address, entry)?; } } @@ -143,12 +267,15 @@ impl<'a, 'b, DB: Database> StorageWriter<'a, 'b, DB> { /// ATTENTION: If called from [`StorageWriter`] without a static file producer, it will always /// write them to database. Otherwise, it will look into the pruning configuration to decide. /// + /// NOTE: The static file writer used to construct this [`StorageWriter`] MUST be a writer for + /// the Receipts segment. + /// /// # Parameters /// - `initial_block_number`: The starting block number. /// - `blocks`: An iterator over blocks, each block having a vector of optional receipts. If /// `receipt` is `None`, it has been pruned. pub fn append_receipts_from_blocks( - mut self, + &mut self, initial_block_number: BlockNumber, blocks: impl Iterator>>, ) -> ProviderResult<()> { @@ -171,14 +298,27 @@ impl<'a, 'b, DB: Database> StorageWriter<'a, 'b, DB> { StorageType::StaticFile(self.static_file_writer()) }; + let mut last_tx_idx = None; for (idx, receipts) in blocks.enumerate() { let block_number = initial_block_number + idx as u64; - let first_tx_index = bodies_cursor - .seek_exact(block_number)? - .map(|(_, indices)| indices.first_tx_num()) + let mut first_tx_index = + bodies_cursor.seek_exact(block_number)?.map(|(_, indices)| indices.first_tx_num()); + + // If there are no indices, that means there have been no transactions + // + // So instead of returning an error, use zero + if block_number == initial_block_number && first_tx_index.is_none() { + first_tx_index = Some(0); + } + + let first_tx_index = first_tx_index + .or(last_tx_idx) .ok_or_else(|| ProviderError::BlockBodyIndicesNotFound(block_number))?; + // update for empty blocks + last_tx_idx = Some(first_tx_index); + match &mut storage_type { StorageType::Database(cursor) => { DatabaseWriter(cursor).append_block_receipts( @@ -199,15 +339,70 @@ impl<'a, 'b, DB: Database> StorageWriter<'a, 'b, DB> { Ok(()) } + + /// Writes trie updates. Returns the number of entries modified. + pub fn write_trie_updates(&self, trie_updates: &TrieUpdates) -> ProviderResult { + self.ensure_database_writer()?; + self.database_writer().write_trie_updates(trie_updates) + } +} + +impl<'a, 'b, TX> StateWriter for StorageWriter<'a, 'b, TX> +where + TX: DbTxMut + DbTx, +{ + /// Write the data and receipts to the database or static files if `static_file_producer` is + /// `Some`. It should be `None` if there is any kind of pruning/filtering over the receipts. + fn write_to_storage( + &mut self, + execution_outcome: ExecutionOutcome, + is_value_known: OriginalValuesKnown, + ) -> ProviderResult<()> { + self.ensure_database_writer()?; + let (plain_state, reverts) = + execution_outcome.bundle.into_plain_state_and_reverts(is_value_known); + + self.database_writer().write_state_reverts(reverts, execution_outcome.first_block)?; + + self.append_receipts_from_blocks( + execution_outcome.first_block, + execution_outcome.receipts.into_iter(), + )?; + + self.database_writer().write_state_changes(plain_state)?; + + Ok(()) + } } #[cfg(test)] mod tests { use super::*; - use crate::test_utils::create_test_provider_factory; - use reth_db_api::transaction::DbTx; - use reth_primitives::{keccak256, Account, Address, B256}; - use reth_trie::{HashedPostState, HashedStorage}; + use crate::{test_utils::create_test_provider_factory, AccountReader, TrieWriter}; + use reth_db::tables; + use reth_db_api::{ + cursor::{DbCursorRO, DbDupCursorRO}, + models::{AccountBeforeTx, BlockNumberAddress}, + transaction::{DbTx, DbTxMut}, + }; + use reth_primitives::{ + keccak256, Account, Address, Receipt, Receipts, StorageEntry, B256, U256, + }; + use reth_trie::{test_utils::state_root, HashedPostState, HashedStorage, StateRoot}; + use reth_trie_db::DatabaseStateRoot; + use revm::{ + db::{ + states::{ + bundle_state::BundleRetention, changes::PlainStorageRevert, PlainStorageChangeset, + }, + BundleState, EmptyDB, + }, + primitives::{ + Account as RevmAccount, AccountInfo as RevmAccountInfo, AccountStatus, EvmStorageSlot, + }, + DatabaseCommit, State, + }; + use std::collections::{BTreeMap, HashMap}; #[test] fn wiped_entries_are_removed() { @@ -260,4 +455,993 @@ mod tests { Ok(None) ); } + + #[test] + fn write_to_db_account_info() { + let factory = create_test_provider_factory(); + let provider = factory.provider_rw().unwrap(); + + let address_a = Address::ZERO; + let address_b = Address::repeat_byte(0xff); + + let account_a = RevmAccountInfo { balance: U256::from(1), nonce: 1, ..Default::default() }; + let account_b = RevmAccountInfo { balance: U256::from(2), nonce: 2, ..Default::default() }; + let account_b_changed = + RevmAccountInfo { balance: U256::from(3), nonce: 3, ..Default::default() }; + + let mut state = State::builder().with_bundle_update().build(); + state.insert_not_existing(address_a); + state.insert_account(address_b, account_b.clone()); + + // 0x00.. is created + state.commit(HashMap::from([( + address_a, + RevmAccount { + info: account_a.clone(), + status: AccountStatus::Touched | AccountStatus::Created, + storage: HashMap::default(), + }, + )])); + + // 0xff.. is changed (balance + 1, nonce + 1) + state.commit(HashMap::from([( + address_b, + RevmAccount { + info: account_b_changed.clone(), + status: AccountStatus::Touched, + storage: HashMap::default(), + }, + )])); + + state.merge_transitions(BundleRetention::Reverts); + let mut revm_bundle_state = state.take_bundle(); + + // Write plain state and reverts separately. + let reverts = revm_bundle_state.take_all_reverts().into_plain_state_reverts(); + let plain_state = revm_bundle_state.into_plain_state(OriginalValuesKnown::Yes); + assert!(plain_state.storage.is_empty()); + assert!(plain_state.contracts.is_empty()); + provider.write_state_changes(plain_state).expect("Could not write plain state to DB"); + + assert_eq!(reverts.storage, [[]]); + provider.write_state_reverts(reverts, 1).expect("Could not write reverts to DB"); + + let reth_account_a = account_a.into(); + let reth_account_b = account_b.into(); + let reth_account_b_changed = account_b_changed.clone().into(); + + // Check plain state + assert_eq!( + provider.basic_account(address_a).expect("Could not read account state"), + Some(reth_account_a), + "Account A state is wrong" + ); + assert_eq!( + provider.basic_account(address_b).expect("Could not read account state"), + Some(reth_account_b_changed), + "Account B state is wrong" + ); + + // Check change set + let mut changeset_cursor = provider + .tx_ref() + .cursor_dup_read::() + .expect("Could not open changeset cursor"); + assert_eq!( + changeset_cursor.seek_exact(1).expect("Could not read account change set"), + Some((1, AccountBeforeTx { address: address_a, info: None })), + "Account A changeset is wrong" + ); + assert_eq!( + changeset_cursor.next_dup().expect("Changeset table is malformed"), + Some((1, AccountBeforeTx { address: address_b, info: Some(reth_account_b) })), + "Account B changeset is wrong" + ); + + let mut state = State::builder().with_bundle_update().build(); + state.insert_account(address_b, account_b_changed.clone()); + + // 0xff.. is destroyed + state.commit(HashMap::from([( + address_b, + RevmAccount { + status: AccountStatus::Touched | AccountStatus::SelfDestructed, + info: account_b_changed, + storage: HashMap::default(), + }, + )])); + + state.merge_transitions(BundleRetention::Reverts); + let mut revm_bundle_state = state.take_bundle(); + + // Write plain state and reverts separately. + let reverts = revm_bundle_state.take_all_reverts().into_plain_state_reverts(); + let plain_state = revm_bundle_state.into_plain_state(OriginalValuesKnown::Yes); + // Account B selfdestructed so flag for it should be present. + assert_eq!( + plain_state.storage, + [PlainStorageChangeset { address: address_b, wipe_storage: true, storage: vec![] }] + ); + assert!(plain_state.contracts.is_empty()); + provider.write_state_changes(plain_state).expect("Could not write plain state to DB"); + + assert_eq!( + reverts.storage, + [[PlainStorageRevert { address: address_b, wiped: true, storage_revert: vec![] }]] + ); + provider.write_state_reverts(reverts, 2).expect("Could not write reverts to DB"); + + // Check new plain state for account B + assert_eq!( + provider.basic_account(address_b).expect("Could not read account state"), + None, + "Account B should be deleted" + ); + + // Check change set + assert_eq!( + changeset_cursor.seek_exact(2).expect("Could not read account change set"), + Some((2, AccountBeforeTx { address: address_b, info: Some(reth_account_b_changed) })), + "Account B changeset is wrong after deletion" + ); + } + + #[test] + fn write_to_db_storage() { + let factory = create_test_provider_factory(); + let provider = factory.provider_rw().unwrap(); + + let address_a = Address::ZERO; + let address_b = Address::repeat_byte(0xff); + + let account_b = RevmAccountInfo { balance: U256::from(2), nonce: 2, ..Default::default() }; + + let mut state = State::builder().with_bundle_update().build(); + state.insert_not_existing(address_a); + state.insert_account_with_storage( + address_b, + account_b.clone(), + HashMap::from([(U256::from(1), U256::from(1))]), + ); + + state.commit(HashMap::from([ + ( + address_a, + RevmAccount { + status: AccountStatus::Touched | AccountStatus::Created, + info: RevmAccountInfo::default(), + // 0x00 => 0 => 1 + // 0x01 => 0 => 2 + storage: HashMap::from([ + ( + U256::from(0), + EvmStorageSlot { present_value: U256::from(1), ..Default::default() }, + ), + ( + U256::from(1), + EvmStorageSlot { present_value: U256::from(2), ..Default::default() }, + ), + ]), + }, + ), + ( + address_b, + RevmAccount { + status: AccountStatus::Touched, + info: account_b, + // 0x01 => 1 => 2 + storage: HashMap::from([( + U256::from(1), + EvmStorageSlot { + present_value: U256::from(2), + original_value: U256::from(1), + ..Default::default() + }, + )]), + }, + ), + ])); + + state.merge_transitions(BundleRetention::Reverts); + + let outcome = + ExecutionOutcome::new(state.take_bundle(), Receipts::default(), 1, Vec::new()); + let mut writer = StorageWriter::new(Some(&provider), None); + writer + .write_to_storage(outcome, OriginalValuesKnown::Yes) + .expect("Could not write bundle state to DB"); + + // Check plain storage state + let mut storage_cursor = provider + .tx_ref() + .cursor_dup_read::() + .expect("Could not open plain storage state cursor"); + + assert_eq!( + storage_cursor.seek_exact(address_a).unwrap(), + Some((address_a, StorageEntry { key: B256::ZERO, value: U256::from(1) })), + "Slot 0 for account A should be 1" + ); + assert_eq!( + storage_cursor.next_dup().unwrap(), + Some(( + address_a, + StorageEntry { key: B256::from(U256::from(1).to_be_bytes()), value: U256::from(2) } + )), + "Slot 1 for account A should be 2" + ); + assert_eq!( + storage_cursor.next_dup().unwrap(), + None, + "Account A should only have 2 storage slots" + ); + + assert_eq!( + storage_cursor.seek_exact(address_b).unwrap(), + Some(( + address_b, + StorageEntry { key: B256::from(U256::from(1).to_be_bytes()), value: U256::from(2) } + )), + "Slot 1 for account B should be 2" + ); + assert_eq!( + storage_cursor.next_dup().unwrap(), + None, + "Account B should only have 1 storage slot" + ); + + // Check change set + let mut changeset_cursor = provider + .tx_ref() + .cursor_dup_read::() + .expect("Could not open storage changeset cursor"); + assert_eq!( + changeset_cursor.seek_exact(BlockNumberAddress((1, address_a))).unwrap(), + Some(( + BlockNumberAddress((1, address_a)), + StorageEntry { key: B256::ZERO, value: U256::from(0) } + )), + "Slot 0 for account A should have changed from 0" + ); + assert_eq!( + changeset_cursor.next_dup().unwrap(), + Some(( + BlockNumberAddress((1, address_a)), + StorageEntry { key: B256::from(U256::from(1).to_be_bytes()), value: U256::from(0) } + )), + "Slot 1 for account A should have changed from 0" + ); + assert_eq!( + changeset_cursor.next_dup().unwrap(), + None, + "Account A should only be in the changeset 2 times" + ); + + assert_eq!( + changeset_cursor.seek_exact(BlockNumberAddress((1, address_b))).unwrap(), + Some(( + BlockNumberAddress((1, address_b)), + StorageEntry { key: B256::from(U256::from(1).to_be_bytes()), value: U256::from(1) } + )), + "Slot 1 for account B should have changed from 1" + ); + assert_eq!( + changeset_cursor.next_dup().unwrap(), + None, + "Account B should only be in the changeset 1 time" + ); + + // Delete account A + let mut state = State::builder().with_bundle_update().build(); + state.insert_account(address_a, RevmAccountInfo::default()); + + state.commit(HashMap::from([( + address_a, + RevmAccount { + status: AccountStatus::Touched | AccountStatus::SelfDestructed, + info: RevmAccountInfo::default(), + storage: HashMap::default(), + }, + )])); + + state.merge_transitions(BundleRetention::Reverts); + let outcome = + ExecutionOutcome::new(state.take_bundle(), Receipts::default(), 2, Vec::new()); + let mut writer = StorageWriter::new(Some(&provider), None); + writer + .write_to_storage(outcome, OriginalValuesKnown::Yes) + .expect("Could not write bundle state to DB"); + + assert_eq!( + storage_cursor.seek_exact(address_a).unwrap(), + None, + "Account A should have no storage slots after deletion" + ); + + assert_eq!( + changeset_cursor.seek_exact(BlockNumberAddress((2, address_a))).unwrap(), + Some(( + BlockNumberAddress((2, address_a)), + StorageEntry { key: B256::ZERO, value: U256::from(1) } + )), + "Slot 0 for account A should have changed from 1 on deletion" + ); + assert_eq!( + changeset_cursor.next_dup().unwrap(), + Some(( + BlockNumberAddress((2, address_a)), + StorageEntry { key: B256::from(U256::from(1).to_be_bytes()), value: U256::from(2) } + )), + "Slot 1 for account A should have changed from 2 on deletion" + ); + assert_eq!( + changeset_cursor.next_dup().unwrap(), + None, + "Account A should only be in the changeset 2 times on deletion" + ); + } + + #[test] + fn write_to_db_multiple_selfdestructs() { + let factory = create_test_provider_factory(); + let provider = factory.provider_rw().unwrap(); + + let address1 = Address::random(); + let account_info = RevmAccountInfo { nonce: 1, ..Default::default() }; + + // Block #0: initial state. + let mut init_state = State::builder().with_bundle_update().build(); + init_state.insert_not_existing(address1); + init_state.commit(HashMap::from([( + address1, + RevmAccount { + info: account_info.clone(), + status: AccountStatus::Touched | AccountStatus::Created, + // 0x00 => 0 => 1 + // 0x01 => 0 => 2 + storage: HashMap::from([ + ( + U256::ZERO, + EvmStorageSlot { present_value: U256::from(1), ..Default::default() }, + ), + ( + U256::from(1), + EvmStorageSlot { present_value: U256::from(2), ..Default::default() }, + ), + ]), + }, + )])); + init_state.merge_transitions(BundleRetention::Reverts); + + let outcome = + ExecutionOutcome::new(init_state.take_bundle(), Receipts::default(), 0, Vec::new()); + let mut writer = StorageWriter::new(Some(&provider), None); + writer + .write_to_storage(outcome, OriginalValuesKnown::Yes) + .expect("Could not write bundle state to DB"); + + let mut state = State::builder().with_bundle_update().build(); + state.insert_account_with_storage( + address1, + account_info.clone(), + HashMap::from([(U256::ZERO, U256::from(1)), (U256::from(1), U256::from(2))]), + ); + + // Block #1: change storage. + state.commit(HashMap::from([( + address1, + RevmAccount { + status: AccountStatus::Touched, + info: account_info.clone(), + // 0x00 => 1 => 2 + storage: HashMap::from([( + U256::ZERO, + EvmStorageSlot { + original_value: U256::from(1), + present_value: U256::from(2), + ..Default::default() + }, + )]), + }, + )])); + state.merge_transitions(BundleRetention::Reverts); + + // Block #2: destroy account. + state.commit(HashMap::from([( + address1, + RevmAccount { + status: AccountStatus::Touched | AccountStatus::SelfDestructed, + info: account_info.clone(), + storage: HashMap::default(), + }, + )])); + state.merge_transitions(BundleRetention::Reverts); + + // Block #3: re-create account and change storage. + state.commit(HashMap::from([( + address1, + RevmAccount { + status: AccountStatus::Touched | AccountStatus::Created, + info: account_info.clone(), + storage: HashMap::default(), + }, + )])); + state.merge_transitions(BundleRetention::Reverts); + + // Block #4: change storage. + state.commit(HashMap::from([( + address1, + RevmAccount { + status: AccountStatus::Touched, + info: account_info.clone(), + // 0x00 => 0 => 2 + // 0x02 => 0 => 4 + // 0x06 => 0 => 6 + storage: HashMap::from([ + ( + U256::ZERO, + EvmStorageSlot { present_value: U256::from(2), ..Default::default() }, + ), + ( + U256::from(2), + EvmStorageSlot { present_value: U256::from(4), ..Default::default() }, + ), + ( + U256::from(6), + EvmStorageSlot { present_value: U256::from(6), ..Default::default() }, + ), + ]), + }, + )])); + state.merge_transitions(BundleRetention::Reverts); + + // Block #5: Destroy account again. + state.commit(HashMap::from([( + address1, + RevmAccount { + status: AccountStatus::Touched | AccountStatus::SelfDestructed, + info: account_info.clone(), + storage: HashMap::default(), + }, + )])); + state.merge_transitions(BundleRetention::Reverts); + + // Block #6: Create, change, destroy and re-create in the same block. + state.commit(HashMap::from([( + address1, + RevmAccount { + status: AccountStatus::Touched | AccountStatus::Created, + info: account_info.clone(), + storage: HashMap::default(), + }, + )])); + state.commit(HashMap::from([( + address1, + RevmAccount { + status: AccountStatus::Touched, + info: account_info.clone(), + // 0x00 => 0 => 2 + storage: HashMap::from([( + U256::ZERO, + EvmStorageSlot { present_value: U256::from(2), ..Default::default() }, + )]), + }, + )])); + state.commit(HashMap::from([( + address1, + RevmAccount { + status: AccountStatus::Touched | AccountStatus::SelfDestructed, + info: account_info.clone(), + storage: HashMap::default(), + }, + )])); + state.commit(HashMap::from([( + address1, + RevmAccount { + status: AccountStatus::Touched | AccountStatus::Created, + info: account_info.clone(), + storage: HashMap::default(), + }, + )])); + state.merge_transitions(BundleRetention::Reverts); + + // Block #7: Change storage. + state.commit(HashMap::from([( + address1, + RevmAccount { + status: AccountStatus::Touched, + info: account_info, + // 0x00 => 0 => 9 + storage: HashMap::from([( + U256::ZERO, + EvmStorageSlot { present_value: U256::from(9), ..Default::default() }, + )]), + }, + )])); + state.merge_transitions(BundleRetention::Reverts); + + let bundle = state.take_bundle(); + + let outcome = ExecutionOutcome::new(bundle, Receipts::default(), 1, Vec::new()); + let mut writer = StorageWriter::new(Some(&provider), None); + writer + .write_to_storage(outcome, OriginalValuesKnown::Yes) + .expect("Could not write bundle state to DB"); + + let mut storage_changeset_cursor = provider + .tx_ref() + .cursor_dup_read::() + .expect("Could not open plain storage state cursor"); + let mut storage_changes = storage_changeset_cursor.walk_range(..).unwrap(); + + // Iterate through all storage changes + + // Block + // : + // ... + + // Block #0 + // 0x00: 0 + // 0x01: 0 + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((0, address1)), + StorageEntry { key: B256::with_last_byte(0), value: U256::ZERO } + ))) + ); + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((0, address1)), + StorageEntry { key: B256::with_last_byte(1), value: U256::ZERO } + ))) + ); + + // Block #1 + // 0x00: 1 + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((1, address1)), + StorageEntry { key: B256::with_last_byte(0), value: U256::from(1) } + ))) + ); + + // Block #2 (destroyed) + // 0x00: 2 + // 0x01: 2 + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((2, address1)), + StorageEntry { key: B256::with_last_byte(0), value: U256::from(2) } + ))) + ); + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((2, address1)), + StorageEntry { key: B256::with_last_byte(1), value: U256::from(2) } + ))) + ); + + // Block #3 + // no storage changes + + // Block #4 + // 0x00: 0 + // 0x02: 0 + // 0x06: 0 + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((4, address1)), + StorageEntry { key: B256::with_last_byte(0), value: U256::ZERO } + ))) + ); + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((4, address1)), + StorageEntry { key: B256::with_last_byte(2), value: U256::ZERO } + ))) + ); + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((4, address1)), + StorageEntry { key: B256::with_last_byte(6), value: U256::ZERO } + ))) + ); + + // Block #5 (destroyed) + // 0x00: 2 + // 0x02: 4 + // 0x06: 6 + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((5, address1)), + StorageEntry { key: B256::with_last_byte(0), value: U256::from(2) } + ))) + ); + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((5, address1)), + StorageEntry { key: B256::with_last_byte(2), value: U256::from(4) } + ))) + ); + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((5, address1)), + StorageEntry { key: B256::with_last_byte(6), value: U256::from(6) } + ))) + ); + + // Block #6 + // no storage changes (only inter block changes) + + // Block #7 + // 0x00: 0 + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((7, address1)), + StorageEntry { key: B256::with_last_byte(0), value: U256::ZERO } + ))) + ); + assert_eq!(storage_changes.next(), None); + } + + #[test] + fn storage_change_after_selfdestruct_within_block() { + let factory = create_test_provider_factory(); + let provider = factory.provider_rw().unwrap(); + + let address1 = Address::random(); + let account1 = RevmAccountInfo { nonce: 1, ..Default::default() }; + + // Block #0: initial state. + let mut init_state = State::builder().with_bundle_update().build(); + init_state.insert_not_existing(address1); + init_state.commit(HashMap::from([( + address1, + RevmAccount { + info: account1.clone(), + status: AccountStatus::Touched | AccountStatus::Created, + // 0x00 => 0 => 1 + // 0x01 => 0 => 2 + storage: HashMap::from([ + ( + U256::ZERO, + EvmStorageSlot { present_value: U256::from(1), ..Default::default() }, + ), + ( + U256::from(1), + EvmStorageSlot { present_value: U256::from(2), ..Default::default() }, + ), + ]), + }, + )])); + init_state.merge_transitions(BundleRetention::Reverts); + let outcome = + ExecutionOutcome::new(init_state.take_bundle(), Receipts::default(), 0, Vec::new()); + let mut writer = StorageWriter::new(Some(&provider), None); + writer + .write_to_storage(outcome, OriginalValuesKnown::Yes) + .expect("Could not write bundle state to DB"); + + let mut state = State::builder().with_bundle_update().build(); + state.insert_account_with_storage( + address1, + account1.clone(), + HashMap::from([(U256::ZERO, U256::from(1)), (U256::from(1), U256::from(2))]), + ); + + // Block #1: Destroy, re-create, change storage. + state.commit(HashMap::from([( + address1, + RevmAccount { + status: AccountStatus::Touched | AccountStatus::SelfDestructed, + info: account1.clone(), + storage: HashMap::default(), + }, + )])); + + state.commit(HashMap::from([( + address1, + RevmAccount { + status: AccountStatus::Touched | AccountStatus::Created, + info: account1.clone(), + storage: HashMap::default(), + }, + )])); + + state.commit(HashMap::from([( + address1, + RevmAccount { + status: AccountStatus::Touched, + info: account1, + // 0x01 => 0 => 5 + storage: HashMap::from([( + U256::from(1), + EvmStorageSlot { present_value: U256::from(5), ..Default::default() }, + )]), + }, + )])); + + // Commit block #1 changes to the database. + state.merge_transitions(BundleRetention::Reverts); + let outcome = + ExecutionOutcome::new(state.take_bundle(), Receipts::default(), 1, Vec::new()); + let mut writer = StorageWriter::new(Some(&provider), None); + writer + .write_to_storage(outcome, OriginalValuesKnown::Yes) + .expect("Could not write bundle state to DB"); + + let mut storage_changeset_cursor = provider + .tx_ref() + .cursor_dup_read::() + .expect("Could not open plain storage state cursor"); + let range = BlockNumberAddress::range(1..=1); + let mut storage_changes = storage_changeset_cursor.walk_range(range).unwrap(); + + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((1, address1)), + StorageEntry { key: B256::with_last_byte(0), value: U256::from(1) } + ))) + ); + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((1, address1)), + StorageEntry { key: B256::with_last_byte(1), value: U256::from(2) } + ))) + ); + assert_eq!(storage_changes.next(), None); + } + + #[test] + fn revert_to_indices() { + let base = ExecutionOutcome { + bundle: BundleState::default(), + receipts: vec![vec![Some(Receipt::default()); 2]; 7].into(), + first_block: 10, + requests: Vec::new(), + }; + + let mut this = base.clone(); + assert!(this.revert_to(10)); + assert_eq!(this.receipts.len(), 1); + + let mut this = base.clone(); + assert!(!this.revert_to(9)); + assert_eq!(this.receipts.len(), 7); + + let mut this = base.clone(); + assert!(this.revert_to(15)); + assert_eq!(this.receipts.len(), 6); + + let mut this = base.clone(); + assert!(this.revert_to(16)); + assert_eq!(this.receipts.len(), 7); + + let mut this = base; + assert!(!this.revert_to(17)); + assert_eq!(this.receipts.len(), 7); + } + + #[test] + fn bundle_state_state_root() { + type PreState = BTreeMap)>; + let mut prestate: PreState = (0..10) + .map(|key| { + let account = Account { nonce: 1, balance: U256::from(key), bytecode_hash: None }; + let storage = + (1..11).map(|key| (B256::with_last_byte(key), U256::from(key))).collect(); + (Address::with_last_byte(key), (account, storage)) + }) + .collect(); + + let provider_factory = create_test_provider_factory(); + let provider_rw = provider_factory.provider_rw().unwrap(); + + // insert initial state to the database + let tx = provider_rw.tx_ref(); + for (address, (account, storage)) in &prestate { + let hashed_address = keccak256(address); + tx.put::(hashed_address, *account).unwrap(); + for (slot, value) in storage { + tx.put::( + hashed_address, + StorageEntry { key: keccak256(slot), value: *value }, + ) + .unwrap(); + } + } + + let (_, updates) = StateRoot::from_tx(tx).root_with_updates().unwrap(); + provider_rw.write_trie_updates(&updates).unwrap(); + + let mut state = State::builder().with_bundle_update().build(); + + let assert_state_root = |state: &State, expected: &PreState, msg| { + assert_eq!( + StateRoot::overlay_root( + tx, + ExecutionOutcome::new( + state.bundle_state.clone(), + Receipts::default(), + 0, + Vec::new() + ) + .hash_state_slow() + ) + .unwrap(), + state_root(expected.clone().into_iter().map(|(address, (account, storage))| ( + address, + (account, storage.into_iter()) + ))), + "{msg}" + ); + }; + + // database only state root is correct + assert_state_root(&state, &prestate, "empty"); + + // destroy account 1 + let address1 = Address::with_last_byte(1); + let account1_old = prestate.remove(&address1).unwrap(); + state.insert_account(address1, account1_old.0.into()); + state.commit(HashMap::from([( + address1, + RevmAccount { + status: AccountStatus::Touched | AccountStatus::SelfDestructed, + info: RevmAccountInfo::default(), + storage: HashMap::default(), + }, + )])); + state.merge_transitions(BundleRetention::PlainState); + assert_state_root(&state, &prestate, "destroyed account"); + + // change slot 2 in account 2 + let address2 = Address::with_last_byte(2); + let slot2 = U256::from(2); + let slot2_key = B256::from(slot2); + let account2 = prestate.get_mut(&address2).unwrap(); + let account2_slot2_old_value = *account2.1.get(&slot2_key).unwrap(); + state.insert_account_with_storage( + address2, + account2.0.into(), + HashMap::from([(slot2, account2_slot2_old_value)]), + ); + + let account2_slot2_new_value = U256::from(100); + account2.1.insert(slot2_key, account2_slot2_new_value); + state.commit(HashMap::from([( + address2, + RevmAccount { + status: AccountStatus::Touched, + info: account2.0.into(), + storage: HashMap::from_iter([( + slot2, + EvmStorageSlot::new_changed(account2_slot2_old_value, account2_slot2_new_value), + )]), + }, + )])); + state.merge_transitions(BundleRetention::PlainState); + assert_state_root(&state, &prestate, "changed storage"); + + // change balance of account 3 + let address3 = Address::with_last_byte(3); + let account3 = prestate.get_mut(&address3).unwrap(); + state.insert_account(address3, account3.0.into()); + + account3.0.balance = U256::from(24); + state.commit(HashMap::from([( + address3, + RevmAccount { + status: AccountStatus::Touched, + info: account3.0.into(), + storage: HashMap::default(), + }, + )])); + state.merge_transitions(BundleRetention::PlainState); + assert_state_root(&state, &prestate, "changed balance"); + + // change nonce of account 4 + let address4 = Address::with_last_byte(4); + let account4 = prestate.get_mut(&address4).unwrap(); + state.insert_account(address4, account4.0.into()); + + account4.0.nonce = 128; + state.commit(HashMap::from([( + address4, + RevmAccount { + status: AccountStatus::Touched, + info: account4.0.into(), + storage: HashMap::default(), + }, + )])); + state.merge_transitions(BundleRetention::PlainState); + assert_state_root(&state, &prestate, "changed nonce"); + + // recreate account 1 + let account1_new = + Account { nonce: 56, balance: U256::from(123), bytecode_hash: Some(B256::random()) }; + prestate.insert(address1, (account1_new, BTreeMap::default())); + state.commit(HashMap::from([( + address1, + RevmAccount { + status: AccountStatus::Touched | AccountStatus::Created, + info: account1_new.into(), + storage: HashMap::default(), + }, + )])); + state.merge_transitions(BundleRetention::PlainState); + assert_state_root(&state, &prestate, "recreated"); + + // update storage for account 1 + let slot20 = U256::from(20); + let slot20_key = B256::from(slot20); + let account1_slot20_value = U256::from(12345); + prestate.get_mut(&address1).unwrap().1.insert(slot20_key, account1_slot20_value); + state.commit(HashMap::from([( + address1, + RevmAccount { + status: AccountStatus::Touched | AccountStatus::Created, + info: account1_new.into(), + storage: HashMap::from_iter([( + slot20, + EvmStorageSlot::new_changed(U256::ZERO, account1_slot20_value), + )]), + }, + )])); + state.merge_transitions(BundleRetention::PlainState); + assert_state_root(&state, &prestate, "recreated changed storage"); + } + + #[test] + fn prepend_state() { + let address1 = Address::random(); + let address2 = Address::random(); + + let account1 = RevmAccountInfo { nonce: 1, ..Default::default() }; + let account1_changed = RevmAccountInfo { nonce: 1, ..Default::default() }; + let account2 = RevmAccountInfo { nonce: 1, ..Default::default() }; + + let present_state = BundleState::builder(2..=2) + .state_present_account_info(address1, account1_changed.clone()) + .build(); + assert_eq!(present_state.reverts.len(), 1); + let previous_state = BundleState::builder(1..=1) + .state_present_account_info(address1, account1) + .state_present_account_info(address2, account2.clone()) + .build(); + assert_eq!(previous_state.reverts.len(), 1); + + let mut test = ExecutionOutcome { + bundle: present_state, + receipts: vec![vec![Some(Receipt::default()); 2]; 1].into(), + first_block: 2, + requests: Vec::new(), + }; + + test.prepend_state(previous_state); + + assert_eq!(test.receipts.len(), 1); + let end_state = test.state(); + assert_eq!(end_state.state.len(), 2); + // reverts num should stay the same. + assert_eq!(end_state.reverts.len(), 1); + // account1 is not overwritten. + assert_eq!(end_state.state.get(&address1).unwrap().info, Some(account1_changed)); + // account2 got inserted + assert_eq!(end_state.state.get(&address2).unwrap().info, Some(account2)); + } } diff --git a/crates/storage/provider/src/writer/static_file.rs b/crates/storage/provider/src/writer/static_file.rs index b31b7dabd311..54d9bf5b98a5 100644 --- a/crates/storage/provider/src/writer/static_file.rs +++ b/crates/storage/provider/src/writer/static_file.rs @@ -14,10 +14,12 @@ impl<'a> ReceiptWriter for StaticFileWriter<'a, StaticFileProviderRWRefMut<'_>> ) -> ProviderResult<()> { // Increment block on static file header. self.0.increment_block(StaticFileSegment::Receipts, block_number)?; - let receipts = receipts.into_iter().enumerate().map(|(tx_idx, receipt)| { + let receipts = receipts.iter().enumerate().map(|(tx_idx, receipt)| { Ok(( first_tx_index + tx_idx as u64, - receipt.expect("receipt should not be filtered when saving to static files."), + receipt + .as_ref() + .expect("receipt should not be filtered when saving to static files."), )) }); self.0.append_receipts(receipts)?; diff --git a/crates/storage/storage-api/src/block.rs b/crates/storage/storage-api/src/block.rs index 3dc22de8ae4f..fe97fb3713de 100644 --- a/crates/storage/storage-api/src/block.rs +++ b/crates/storage/storage-api/src/block.rs @@ -23,10 +23,10 @@ pub enum BlockSource { #[default] Any, /// The block was fetched from the pending block source, the blockchain tree that buffers - /// blocks that are not yet finalized. + /// blocks that are not yet part of the canonical chain. Pending, - /// The block was fetched from the database. - Database, + /// The block must be part of the canonical chain. + Canonical, } impl BlockSource { @@ -35,9 +35,9 @@ impl BlockSource { matches!(self, Self::Pending | Self::Any) } - /// Returns `true` if the block source is `Database` or `Any`. - pub const fn is_database(&self) -> bool { - matches!(self, Self::Database | Self::Any) + /// Returns `true` if the block source is `Canonical` or `Any`. + pub const fn is_canonical(&self) -> bool { + matches!(self, Self::Canonical | Self::Any) } } diff --git a/crates/storage/storage-api/src/stage_checkpoint.rs b/crates/storage/storage-api/src/stage_checkpoint.rs index 3815239be2c2..d59f3dfb2554 100644 --- a/crates/storage/storage-api/src/stage_checkpoint.rs +++ b/crates/storage/storage-api/src/stage_checkpoint.rs @@ -10,6 +10,10 @@ pub trait StageCheckpointReader: Send + Sync { /// Get stage checkpoint progress. fn get_stage_checkpoint_progress(&self, id: StageId) -> ProviderResult>>; + + /// Reads all stage checkpoints and returns a list with the name of the stage and the checkpoint + /// data. + fn get_all_checkpoints(&self) -> ProviderResult>; } /// The trait for updating stage checkpoint related data. diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index 77edd6f3e541..459784b61017 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -15,7 +15,7 @@ workspace = true # reth reth-chainspec.workspace = true reth-eth-wire-types.workspace = true -reth-primitives.workspace = true +reth-primitives = { workspace = true, features = ["c-kzg"] } reth-execution-types.workspace = true reth-fs-util.workspace = true reth-provider.workspace = true diff --git a/crates/trie/common/Cargo.toml b/crates/trie/common/Cargo.toml index da5d5a828cbf..3812016fffc8 100644 --- a/crates/trie/common/Cargo.toml +++ b/crates/trie/common/Cargo.toml @@ -38,7 +38,6 @@ arbitrary = { workspace = true, features = ["derive"] } assert_matches.workspace = true proptest.workspace = true proptest-arbitrary-interop.workspace = true -proptest-derive.workspace = true serde_json.workspace = true test-fuzz.workspace = true toml.workspace = true diff --git a/crates/trie/common/src/account.rs b/crates/trie/common/src/account.rs index 64860ab78b31..269202601182 100644 --- a/crates/trie/common/src/account.rs +++ b/crates/trie/common/src/account.rs @@ -35,7 +35,7 @@ impl From for TrieAccount { storage_root_unhashed( storage .into_iter() - .filter(|(_, value)| *value != B256::ZERO) + .filter(|(_, value)| !value.is_zero()) .map(|(slot, value)| (slot, U256::from_be_bytes(*value))), ) }) diff --git a/crates/trie/common/src/lib.rs b/crates/trie/common/src/lib.rs index ee19b7ed91a2..bdec36028b94 100644 --- a/crates/trie/common/src/lib.rs +++ b/crates/trie/common/src/lib.rs @@ -26,7 +26,7 @@ pub use subnode::StoredSubNode; mod proofs; #[cfg(any(test, feature = "test-utils"))] pub use proofs::triehash; -pub use proofs::{AccountProof, StorageProof}; +pub use proofs::*; pub mod root; diff --git a/crates/trie/common/src/proofs.rs b/crates/trie/common/src/proofs.rs index 11953a48decf..8fa72e2395ae 100644 --- a/crates/trie/common/src/proofs.rs +++ b/crates/trie/common/src/proofs.rs @@ -2,12 +2,121 @@ use crate::{Nibbles, TrieAccount}; use alloy_primitives::{keccak256, Address, Bytes, B256, U256}; -use alloy_rlp::encode_fixed_size; +use alloy_rlp::{encode_fixed_size, Decodable}; use alloy_trie::{ + nodes::TrieNode, proof::{verify_proof, ProofVerificationError}, EMPTY_ROOT_HASH, }; -use reth_primitives_traits::Account; +use reth_primitives_traits::{constants::KECCAK_EMPTY, Account}; +use std::collections::{BTreeMap, HashMap}; + +/// The state multiproof of target accounts and multiproofs of their storage tries. +#[derive(Clone, Default, Debug)] +pub struct MultiProof { + /// State trie multiproof for requested accounts. + pub account_subtree: BTreeMap, + /// Storage trie multiproofs. + pub storage_multiproofs: HashMap, +} + +impl MultiProof { + /// Construct the account proof from the multiproof. + pub fn account_proof( + &self, + address: Address, + slots: &[B256], + ) -> Result { + let hashed_address = keccak256(address); + let nibbles = Nibbles::unpack(hashed_address); + + // Retrieve the account proof. + let proof = self + .account_subtree + .iter() + .filter(|(path, _)| nibbles.starts_with(path)) + .map(|(_, node)| node.clone()) + .collect::>(); + + // Inspect the last node in the proof. If it's a leaf node with matching suffix, + // then the node contains the encoded trie account. + let info = 'info: { + if let Some(last) = proof.last() { + if let TrieNode::Leaf(leaf) = TrieNode::decode(&mut &last[..])? { + if nibbles.ends_with(&leaf.key) { + let account = TrieAccount::decode(&mut &leaf.value[..])?; + break 'info Some(Account { + balance: account.balance, + nonce: account.nonce, + bytecode_hash: (account.code_hash != KECCAK_EMPTY) + .then_some(account.code_hash), + }) + } + } + } + None + }; + + // Retrieve proofs for requested storage slots. + let storage_multiproof = self.storage_multiproofs.get(&hashed_address); + let storage_root = storage_multiproof.map(|m| m.root).unwrap_or(EMPTY_ROOT_HASH); + let mut storage_proofs = Vec::with_capacity(slots.len()); + for slot in slots { + let proof = if let Some(multiproof) = &storage_multiproof { + multiproof.storage_proof(*slot)? + } else { + StorageProof::new(*slot) + }; + storage_proofs.push(proof); + } + Ok(AccountProof { address, info, proof, storage_root, storage_proofs }) + } +} + +/// The merkle multiproof of storage trie. +#[derive(Clone, Debug)] +pub struct StorageMultiProof { + /// Storage trie root. + pub root: B256, + /// Storage multiproof for requested slots. + pub subtree: BTreeMap, +} + +impl Default for StorageMultiProof { + fn default() -> Self { + Self { root: EMPTY_ROOT_HASH, subtree: BTreeMap::default() } + } +} + +impl StorageMultiProof { + /// Return storage proofs for the target storage slot (unhashed). + pub fn storage_proof(&self, slot: B256) -> Result { + let nibbles = Nibbles::unpack(keccak256(slot)); + + // Retrieve the storage proof. + let proof = self + .subtree + .iter() + .filter(|(path, _)| nibbles.starts_with(path)) + .map(|(_, node)| node.clone()) + .collect::>(); + + // Inspect the last node in the proof. If it's a leaf node with matching suffix, + // then the node contains the encoded slot value. + let value = 'value: { + if let Some(last) = proof.last() { + if let TrieNode::Leaf(leaf) = TrieNode::decode(&mut &last[..])? { + if nibbles.ends_with(&leaf.key) { + break 'value U256::decode(&mut &leaf.value[..])? + } + } + } + U256::ZERO + }; + + Ok(StorageProof { key: slot, nibbles, value, proof }) + } +} /// The merkle proof with the relevant account info. #[derive(PartialEq, Eq, Debug)] @@ -37,23 +146,6 @@ impl AccountProof { } } - /// Set account info, storage root and requested storage proofs. - pub fn set_account( - &mut self, - info: Account, - storage_root: B256, - storage_proofs: Vec, - ) { - self.info = Some(info); - self.storage_root = storage_root; - self.storage_proofs = storage_proofs; - } - - /// Set proof path. - pub fn set_proof(&mut self, proof: Vec) { - self.proof = proof; - } - /// Verify the storage proofs and account proof against the provided state root. pub fn verify(&self, root: B256) -> Result<(), ProofVerificationError> { // Verify storage proofs. @@ -106,16 +198,6 @@ impl StorageProof { Self { key, nibbles, ..Default::default() } } - /// Set storage value. - pub fn set_value(&mut self, value: U256) { - self.value = value; - } - - /// Set proof path. - pub fn set_proof(&mut self, proof: Vec) { - self.proof = proof; - } - /// Verify the proof against the provided storage root. pub fn verify(&self, root: B256) -> Result<(), ProofVerificationError> { let expected = diff --git a/crates/trie/db/Cargo.toml b/crates/trie/db/Cargo.toml new file mode 100644 index 000000000000..3c479072b658 --- /dev/null +++ b/crates/trie/db/Cargo.toml @@ -0,0 +1,78 @@ +[package] +name = "reth-trie-db" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "Database integration with merkle trie implementation" + +[lints] +workspace = true + +[dependencies] +# reth +reth-primitives.workspace = true +reth-execution-errors.workspace = true +reth-db.workspace = true +reth-db-api.workspace = true +reth-stages-types.workspace = true +reth-trie-common.workspace = true +reth-trie.workspace = true + +revm.workspace = true + +# alloy +alloy-rlp.workspace = true + +# tracing +tracing.workspace = true + +# misc +rayon.workspace = true +derive_more.workspace = true +auto_impl.workspace = true +itertools.workspace = true + +# `metrics` feature +reth-metrics = { workspace = true, optional = true } +metrics = { workspace = true, optional = true } + +# `test-utils` feature +triehash = { version = "0.8", optional = true } + +# `serde` feature +serde = { workspace = true, optional = true } + +[dev-dependencies] +# reth +reth-chainspec.workspace = true +reth-primitives = { workspace = true, features = ["test-utils", "arbitrary"] } +reth-db = { workspace = true, features = ["test-utils"] } +reth-provider = { workspace = true, features = ["test-utils"] } +reth-storage-errors.workspace = true +reth-trie-common = { workspace = true, features = ["test-utils", "arbitrary"] } +reth-trie = { workspace = true, features = ["test-utils"] } + +# trie +triehash = "0.8" + +# misc +proptest.workspace = true +proptest-arbitrary-interop.workspace = true +tokio = { workspace = true, default-features = false, features = [ + "sync", + "rt", + "macros", +] } +tokio-stream.workspace = true +once_cell.workspace = true +serde_json.workspace = true +similar-asserts.workspace = true +criterion.workspace = true + +[features] +metrics = ["reth-metrics", "reth-trie/metrics", "dep:metrics"] +serde = ["dep:serde"] +test-utils = ["triehash", "reth-trie-common/test-utils"] diff --git a/crates/trie/db/src/lib.rs b/crates/trie/db/src/lib.rs new file mode 100644 index 000000000000..21deac82d27c --- /dev/null +++ b/crates/trie/db/src/lib.rs @@ -0,0 +1,9 @@ +//! An integration of [`reth-trie`] with [`reth-db`]. + +mod proof; +mod state; +mod storage; + +pub use proof::DatabaseProof; +pub use state::DatabaseStateRoot; +pub use storage::DatabaseStorageRoot; diff --git a/crates/trie/db/src/proof.rs b/crates/trie/db/src/proof.rs new file mode 100644 index 000000000000..09f8098fe2c9 --- /dev/null +++ b/crates/trie/db/src/proof.rs @@ -0,0 +1,46 @@ +use reth_db_api::transaction::DbTx; +use reth_execution_errors::StateProofError; +use reth_primitives::{Address, B256}; +use reth_trie::{ + hashed_cursor::{DatabaseHashedCursorFactory, HashedPostStateCursorFactory}, + proof::Proof, + HashedPostState, +}; +use reth_trie_common::AccountProof; + +/// Extends [`Proof`] with operations specific for working with a database transaction. +pub trait DatabaseProof<'a, TX> { + /// Create a new [Proof] from database transaction. + fn from_tx(tx: &'a TX) -> Self; + + /// Generates the state proof for target account and slots on top of this [`HashedPostState`]. + fn overlay_account_proof( + tx: &'a TX, + post_state: HashedPostState, + address: Address, + slots: &[B256], + ) -> Result; +} + +impl<'a, TX: DbTx> DatabaseProof<'a, TX> for Proof<&'a TX, DatabaseHashedCursorFactory<'a, TX>> { + /// Create a new [Proof] instance from database transaction. + fn from_tx(tx: &'a TX) -> Self { + Self::new(tx, DatabaseHashedCursorFactory::new(tx)) + } + + fn overlay_account_proof( + tx: &'a TX, + post_state: HashedPostState, + address: Address, + slots: &[B256], + ) -> Result { + let prefix_sets = post_state.construct_prefix_sets(); + let sorted = post_state.into_sorted(); + let hashed_cursor_factory = + HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(tx), &sorted); + Proof::from_tx(tx) + .with_hashed_cursor_factory(hashed_cursor_factory) + .with_prefix_sets_mut(prefix_sets) + .account_proof(address, slots) + } +} diff --git a/crates/trie/db/src/state.rs b/crates/trie/db/src/state.rs new file mode 100644 index 000000000000..8c72825e1d31 --- /dev/null +++ b/crates/trie/db/src/state.rs @@ -0,0 +1,208 @@ +use reth_db_api::transaction::DbTx; +use reth_execution_errors::StateRootError; +use reth_primitives::{BlockNumber, B256}; +use reth_trie::{ + hashed_cursor::{DatabaseHashedCursorFactory, HashedPostStateCursorFactory}, + prefix_set::PrefixSetLoader, + updates::TrieUpdates, + HashedPostState, StateRoot, StateRootProgress, +}; +use std::ops::RangeInclusive; +use tracing::debug; + +/// Extends [`StateRoot`] with operations specific for working with a database transaction. +pub trait DatabaseStateRoot<'a, TX>: Sized { + /// Create a new [`StateRoot`] instance. + fn from_tx(tx: &'a TX) -> Self; + + /// Given a block number range, identifies all the accounts and storage keys that + /// have changed. + /// + /// # Returns + /// + /// An instance of state root calculator with account and storage prefixes loaded. + fn incremental_root_calculator( + tx: &'a TX, + range: RangeInclusive, + ) -> Result; + + /// Computes the state root of the trie with the changed account and storage prefixes and + /// existing trie nodes. + /// + /// # Returns + /// + /// The updated state root. + fn incremental_root( + tx: &'a TX, + range: RangeInclusive, + ) -> Result; + + /// Computes the state root of the trie with the changed account and storage prefixes and + /// existing trie nodes collecting updates in the process. + /// + /// Ignores the threshold. + /// + /// # Returns + /// + /// The updated state root and the trie updates. + fn incremental_root_with_updates( + tx: &'a TX, + range: RangeInclusive, + ) -> Result<(B256, TrieUpdates), StateRootError>; + + /// Computes the state root of the trie with the changed account and storage prefixes and + /// existing trie nodes collecting updates in the process. + /// + /// # Returns + /// + /// The intermediate progress of state root computation. + fn incremental_root_with_progress( + tx: &'a TX, + range: RangeInclusive, + ) -> Result; + + /// Calculate the state root for this [`HashedPostState`]. + /// Internally, this method retrieves prefixsets and uses them + /// to calculate incremental state root. + /// + /// # Example + /// + /// ``` + /// use reth_db::test_utils::create_test_rw_db; + /// use reth_db_api::database::Database; + /// use reth_primitives::{Account, U256}; + /// use reth_trie::{HashedPostState, StateRoot}; + /// use reth_trie_db::DatabaseStateRoot; + /// + /// // Initialize the database + /// let db = create_test_rw_db(); + /// + /// // Initialize hashed post state + /// let mut hashed_state = HashedPostState::default(); + /// hashed_state.accounts.insert( + /// [0x11; 32].into(), + /// Some(Account { nonce: 1, balance: U256::from(10), bytecode_hash: None }), + /// ); + /// + /// // Calculate the state root + /// let tx = db.tx().expect("failed to create transaction"); + /// let state_root = StateRoot::overlay_root(&tx, hashed_state); + /// ``` + /// + /// # Returns + /// + /// The state root for this [`HashedPostState`]. + fn overlay_root(tx: &'a TX, post_state: HashedPostState) -> Result; + + /// Calculates the state root for this [`HashedPostState`] and returns it alongside trie + /// updates. See [`Self::overlay_root`] for more info. + fn overlay_root_with_updates( + tx: &'a TX, + post_state: HashedPostState, + ) -> Result<(B256, TrieUpdates), StateRootError>; +} + +impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX> + for StateRoot<&'a TX, DatabaseHashedCursorFactory<'a, TX>> +{ + fn from_tx(tx: &'a TX) -> Self { + Self::new(tx, DatabaseHashedCursorFactory::new(tx)) + } + + fn incremental_root_calculator( + tx: &'a TX, + range: RangeInclusive, + ) -> Result { + let loaded_prefix_sets = PrefixSetLoader::new(tx).load(range)?; + Ok(Self::from_tx(tx).with_prefix_sets(loaded_prefix_sets)) + } + + fn incremental_root( + tx: &'a TX, + range: RangeInclusive, + ) -> Result { + debug!(target: "trie::loader", ?range, "incremental state root"); + Self::incremental_root_calculator(tx, range)?.root() + } + + fn incremental_root_with_updates( + tx: &'a TX, + range: RangeInclusive, + ) -> Result<(B256, TrieUpdates), StateRootError> { + debug!(target: "trie::loader", ?range, "incremental state root"); + Self::incremental_root_calculator(tx, range)?.root_with_updates() + } + + fn incremental_root_with_progress( + tx: &'a TX, + range: RangeInclusive, + ) -> Result { + debug!(target: "trie::loader", ?range, "incremental state root with progress"); + Self::incremental_root_calculator(tx, range)?.root_with_progress() + } + + fn overlay_root(tx: &'a TX, post_state: HashedPostState) -> Result { + let prefix_sets = post_state.construct_prefix_sets().freeze(); + let sorted = post_state.into_sorted(); + StateRoot::new( + tx, + HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(tx), &sorted), + ) + .with_prefix_sets(prefix_sets) + .root() + } + + fn overlay_root_with_updates( + tx: &'a TX, + post_state: HashedPostState, + ) -> Result<(B256, TrieUpdates), StateRootError> { + let prefix_sets = post_state.construct_prefix_sets().freeze(); + let sorted = post_state.into_sorted(); + StateRoot::new( + tx, + HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(tx), &sorted), + ) + .with_prefix_sets(prefix_sets) + .root_with_updates() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use reth_db::test_utils::create_test_rw_db; + use reth_db_api::database::Database; + use reth_primitives::{hex, revm_primitives::AccountInfo, Address, U256}; + use revm::db::BundleState; + use std::collections::HashMap; + + #[test] + fn from_bundle_state_with_rayon() { + let address1 = Address::with_last_byte(1); + let address2 = Address::with_last_byte(2); + let slot1 = U256::from(1015); + let slot2 = U256::from(2015); + + let account1 = AccountInfo { nonce: 1, ..Default::default() }; + let account2 = AccountInfo { nonce: 2, ..Default::default() }; + + let bundle_state = BundleState::builder(2..=2) + .state_present_account_info(address1, account1) + .state_present_account_info(address2, account2) + .state_storage(address1, HashMap::from([(slot1, (U256::ZERO, U256::from(10)))])) + .state_storage(address2, HashMap::from([(slot2, (U256::ZERO, U256::from(20)))])) + .build(); + assert_eq!(bundle_state.reverts.len(), 1); + + let post_state = HashedPostState::from_bundle_state(&bundle_state.state); + assert_eq!(post_state.accounts.len(), 2); + assert_eq!(post_state.storages.len(), 2); + + let db = create_test_rw_db(); + let tx = db.tx().expect("failed to create transaction"); + assert_eq!( + StateRoot::overlay_root(&tx, post_state).unwrap(), + hex!("b464525710cafcf5d4044ac85b72c08b1e76231b8d91f288fe438cc41d8eaafd") + ); + } +} diff --git a/crates/trie/db/src/storage.rs b/crates/trie/db/src/storage.rs new file mode 100644 index 000000000000..b4c31dbe343b --- /dev/null +++ b/crates/trie/db/src/storage.rs @@ -0,0 +1,39 @@ +use reth_db_api::transaction::DbTx; +use reth_primitives::{Address, B256}; +use reth_trie::{hashed_cursor::DatabaseHashedCursorFactory, StorageRoot}; + +#[cfg(feature = "metrics")] +use reth_trie::metrics::{TrieRootMetrics, TrieType}; + +/// Extends [`StorageRoot`] with operations specific for working with a database transaction. +pub trait DatabaseStorageRoot<'a, TX> { + /// Create a new storage root calculator from database transaction and raw address. + fn from_tx(tx: &'a TX, address: Address) -> Self; + + /// Create a new storage root calculator from database transaction and hashed address. + fn from_tx_hashed(tx: &'a TX, hashed_address: B256) -> Self; +} + +impl<'a, TX: DbTx> DatabaseStorageRoot<'a, TX> + for StorageRoot<&'a TX, DatabaseHashedCursorFactory<'a, TX>> +{ + fn from_tx(tx: &'a TX, address: Address) -> Self { + Self::new( + tx, + DatabaseHashedCursorFactory::new(tx), + address, + #[cfg(feature = "metrics")] + TrieRootMetrics::new(TrieType::Storage), + ) + } + + fn from_tx_hashed(tx: &'a TX, hashed_address: B256) -> Self { + Self::new_hashed( + tx, + DatabaseHashedCursorFactory::new(tx), + hashed_address, + #[cfg(feature = "metrics")] + TrieRootMetrics::new(TrieType::Storage), + ) + } +} diff --git a/crates/trie/db/tests/fuzz_in_memory_nodes.rs b/crates/trie/db/tests/fuzz_in_memory_nodes.rs new file mode 100644 index 000000000000..5c213924b421 --- /dev/null +++ b/crates/trie/db/tests/fuzz_in_memory_nodes.rs @@ -0,0 +1,59 @@ +use proptest::prelude::*; +use reth_db::{cursor::DbCursorRW, tables, transaction::DbTxMut}; +use reth_primitives::{Account, B256, U256}; +use reth_provider::test_utils::create_test_provider_factory; +use reth_trie::{ + prefix_set::{PrefixSetMut, TriePrefixSets}, + test_utils::state_root_prehashed, + trie_cursor::InMemoryTrieCursorFactory, + StateRoot, +}; +use reth_trie_common::Nibbles; +use reth_trie_db::DatabaseStateRoot; +use std::collections::BTreeMap; + +proptest! { + #![proptest_config(ProptestConfig { + cases: 128, ..ProptestConfig::default() + })] + + #[test] + fn fuzz_in_memory_nodes(mut init_state: BTreeMap, mut updated_state: BTreeMap) { + let factory = create_test_provider_factory(); + let provider = factory.provider_rw().unwrap(); + let mut hashed_account_cursor = provider.tx_ref().cursor_write::().unwrap(); + + // Insert init state into database + for (hashed_address, balance) in init_state.clone() { + hashed_account_cursor.upsert(hashed_address, Account { balance, ..Default::default() }).unwrap(); + } + + // Compute initial root and updates + let (_, trie_updates) = StateRoot::from_tx(provider.tx_ref()) + .root_with_updates() + .unwrap(); + + // Insert state updates into database + let mut changes = PrefixSetMut::default(); + for (hashed_address, balance) in updated_state.clone() { + hashed_account_cursor.upsert(hashed_address, Account { balance, ..Default::default() }).unwrap(); + changes.insert(Nibbles::unpack(hashed_address)); + } + + // Compute root with in-memory trie nodes overlay + let (state_root, _) = StateRoot::from_tx(provider.tx_ref()) + .with_prefix_sets(TriePrefixSets { account_prefix_set: changes.freeze(), ..Default::default() }) + .with_trie_cursor_factory(InMemoryTrieCursorFactory::new(provider.tx_ref(), &trie_updates.into_sorted())) + .root_with_updates() + .unwrap(); + + // Verify the result + let mut state = BTreeMap::default(); + state.append(&mut init_state); + state.append(&mut updated_state); + let expected_root = state_root_prehashed( + state.iter().map(|(&key, &balance)| (key, (Account { balance, ..Default::default() }, std::iter::empty()))) + ); + assert_eq!(expected_root, state_root); + } +} diff --git a/crates/trie/db/tests/proof.rs b/crates/trie/db/tests/proof.rs new file mode 100644 index 000000000000..fbc40254bab7 --- /dev/null +++ b/crates/trie/db/tests/proof.rs @@ -0,0 +1,288 @@ +use once_cell::sync::Lazy; +use reth_chainspec::{Chain, ChainSpec, HOLESKY, MAINNET}; +use reth_db_api::database::Database; +use reth_primitives::{ + constants::EMPTY_ROOT_HASH, keccak256, Account, Address, Bytes, StorageEntry, B256, U256, +}; +use reth_provider::{ + test_utils::create_test_provider_factory, HashingWriter, ProviderFactory, TrieWriter, +}; +use reth_storage_errors::provider::ProviderResult; +use reth_trie::{proof::Proof, Nibbles, StateRoot}; +use reth_trie_common::{AccountProof, StorageProof}; +use reth_trie_db::{DatabaseProof, DatabaseStateRoot}; +use std::{str::FromStr, sync::Arc}; + +/* + World State (sampled from ) + | address | prefix | hash | balance + |--------------------------------------------|-----------|--------------------------------------------------------------------|-------- + | 0x2031f89b3ea8014eb51a78c316e42af3e0d7695f | 0xa711355 | 0xa711355ec1c8f7e26bb3ccbcb0b75d870d15846c0b98e5cc452db46c37faea40 | 45 eth + | 0x33f0fc440b8477fcfbe9d0bf8649e7dea9baedb2 | 0xa77d337 | 0xa77d337781e762f3577784bab7491fcc43e291ce5a356b9bc517ac52eed3a37a | 1 wei + | 0x62b0dd4aab2b1a0a04e279e2b828791a10755528 | 0xa7f9365 | 0xa7f936599f93b769acf90c7178fd2ddcac1b5b4bc9949ee5a04b7e0823c2446e | 1.1 eth + | 0x1ed9b1dd266b607ee278726d324b855a093394a6 | 0xa77d397 | 0xa77d397a32b8ab5eb4b043c65b1f00c93f517bc8883c5cd31baf8e8a279475e3 | .12 eth + + All expected testspec results were obtained from querying proof RPC on the running geth instance `geth init crates/trie/testdata/proof-genesis.json && geth --http`. +*/ +static TEST_SPEC: Lazy> = Lazy::new(|| { + ChainSpec { + chain: Chain::from_id(12345), + genesis: serde_json::from_str(include_str!("../../trie/testdata/proof-genesis.json")) + .expect("Can't deserialize test genesis json"), + ..Default::default() + } + .into() +}); + +fn convert_to_proof<'a>(path: impl IntoIterator) -> Vec { + path.into_iter().map(Bytes::from_str).collect::, _>>().unwrap() +} + +fn insert_genesis( + provider_factory: &ProviderFactory, + chain_spec: Arc, +) -> ProviderResult { + let provider = provider_factory.provider_rw()?; + + // Hash accounts and insert them into hashing table. + let genesis = chain_spec.genesis(); + let alloc_accounts = genesis + .alloc + .iter() + .map(|(addr, account)| (*addr, Some(Account::from_genesis_account(account)))); + provider.insert_account_for_hashing(alloc_accounts).unwrap(); + + let alloc_storage = genesis.alloc.clone().into_iter().filter_map(|(addr, account)| { + // Only return `Some` if there is storage. + account.storage.map(|storage| { + ( + addr, + storage.into_iter().map(|(key, value)| StorageEntry { key, value: value.into() }), + ) + }) + }); + provider.insert_storage_for_hashing(alloc_storage)?; + + let (root, updates) = StateRoot::from_tx(provider.tx_ref()) + .root_with_updates() + .map_err(Into::::into)?; + provider.write_trie_updates(&updates).unwrap(); + + provider.commit()?; + + Ok(root) +} + +#[test] +fn testspec_proofs() { + // Create test database and insert genesis accounts. + let factory = create_test_provider_factory(); + let root = insert_genesis(&factory, TEST_SPEC.clone()).unwrap(); + + let data = Vec::from([ + ( + "0x2031f89b3ea8014eb51a78c316e42af3e0d7695f", + convert_to_proof([ + "0xe48200a7a040f916999be583c572cc4dd369ec53b0a99f7de95f13880cf203d98f935ed1b3", + "0xf87180a04fb9bab4bb88c062f32452b7c94c8f64d07b5851d44a39f1e32ba4b1829fdbfb8080808080a0b61eeb2eb82808b73c4ad14140a2836689f4ab8445d69dd40554eaf1fce34bc080808080808080a0dea230ff2026e65de419288183a340125b04b8405cc61627b3b4137e2260a1e880", + "0xf8719f31355ec1c8f7e26bb3ccbcb0b75d870d15846c0b98e5cc452db46c37faea40b84ff84d80890270801d946c940000a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" + ]) + ), + ( + "0x33f0fc440b8477fcfbe9d0bf8649e7dea9baedb2", + convert_to_proof([ + "0xe48200a7a040f916999be583c572cc4dd369ec53b0a99f7de95f13880cf203d98f935ed1b3", + "0xf87180a04fb9bab4bb88c062f32452b7c94c8f64d07b5851d44a39f1e32ba4b1829fdbfb8080808080a0b61eeb2eb82808b73c4ad14140a2836689f4ab8445d69dd40554eaf1fce34bc080808080808080a0dea230ff2026e65de419288183a340125b04b8405cc61627b3b4137e2260a1e880", + "0xe48200d3a0ef957210bca5b9b402d614eb8408c88cfbf4913eb6ab83ca233c8b8f0e626b54", + "0xf851808080a02743a5addaf4cf9b8c0c073e1eaa555deaaf8c41cb2b41958e88624fa45c2d908080808080a0bfbf6937911dfb88113fecdaa6bde822e4e99dae62489fcf61a91cb2f36793d680808080808080", + "0xf8679e207781e762f3577784bab7491fcc43e291ce5a356b9bc517ac52eed3a37ab846f8448001a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" + ]) + ), + ( + "0x62b0dd4aab2b1a0a04e279e2b828791a10755528", + convert_to_proof([ + "0xe48200a7a040f916999be583c572cc4dd369ec53b0a99f7de95f13880cf203d98f935ed1b3", + "0xf87180a04fb9bab4bb88c062f32452b7c94c8f64d07b5851d44a39f1e32ba4b1829fdbfb8080808080a0b61eeb2eb82808b73c4ad14140a2836689f4ab8445d69dd40554eaf1fce34bc080808080808080a0dea230ff2026e65de419288183a340125b04b8405cc61627b3b4137e2260a1e880", + "0xf8709f3936599f93b769acf90c7178fd2ddcac1b5b4bc9949ee5a04b7e0823c2446eb84ef84c80880f43fc2c04ee0000a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" + ]) + ), + ( + "0x1ed9b1dd266b607ee278726d324b855a093394a6", + convert_to_proof([ + "0xe48200a7a040f916999be583c572cc4dd369ec53b0a99f7de95f13880cf203d98f935ed1b3", + "0xf87180a04fb9bab4bb88c062f32452b7c94c8f64d07b5851d44a39f1e32ba4b1829fdbfb8080808080a0b61eeb2eb82808b73c4ad14140a2836689f4ab8445d69dd40554eaf1fce34bc080808080808080a0dea230ff2026e65de419288183a340125b04b8405cc61627b3b4137e2260a1e880", + "0xe48200d3a0ef957210bca5b9b402d614eb8408c88cfbf4913eb6ab83ca233c8b8f0e626b54", + "0xf851808080a02743a5addaf4cf9b8c0c073e1eaa555deaaf8c41cb2b41958e88624fa45c2d908080808080a0bfbf6937911dfb88113fecdaa6bde822e4e99dae62489fcf61a91cb2f36793d680808080808080", + "0xf86f9e207a32b8ab5eb4b043c65b1f00c93f517bc8883c5cd31baf8e8a279475e3b84ef84c808801aa535d3d0c0000a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" + ]) + ), + ]); + + let provider = factory.provider().unwrap(); + for (target, expected_proof) in data { + let target = Address::from_str(target).unwrap(); + let account_proof = Proof::from_tx(provider.tx_ref()).account_proof(target, &[]).unwrap(); + similar_asserts::assert_eq!( + account_proof.proof, + expected_proof, + "proof for {target:?} does not match" + ); + assert_eq!(account_proof.verify(root), Ok(())); + } +} + +#[test] +fn testspec_empty_storage_proof() { + // Create test database and insert genesis accounts. + let factory = create_test_provider_factory(); + let root = insert_genesis(&factory, TEST_SPEC.clone()).unwrap(); + + let target = Address::from_str("0x1ed9b1dd266b607ee278726d324b855a093394a6").unwrap(); + let slots = Vec::from([B256::with_last_byte(1), B256::with_last_byte(3)]); + + let provider = factory.provider().unwrap(); + let account_proof = Proof::from_tx(provider.tx_ref()).account_proof(target, &slots).unwrap(); + assert_eq!(account_proof.storage_root, EMPTY_ROOT_HASH, "expected empty storage root"); + + assert_eq!(slots.len(), account_proof.storage_proofs.len()); + for (idx, slot) in slots.into_iter().enumerate() { + let proof = account_proof.storage_proofs.get(idx).unwrap(); + assert_eq!(proof, &StorageProof::new(slot)); + assert_eq!(proof.verify(account_proof.storage_root), Ok(())); + } + assert_eq!(account_proof.verify(root), Ok(())); +} + +#[test] +fn mainnet_genesis_account_proof() { + // Create test database and insert genesis accounts. + let factory = create_test_provider_factory(); + let root = insert_genesis(&factory, MAINNET.clone()).unwrap(); + + // Address from mainnet genesis allocation. + // keccak256 - `0xcf67b71c90b0d523dd5004cf206f325748da347685071b34812e21801f5270c4` + let target = Address::from_str("0x000d836201318ec6899a67540690382780743280").unwrap(); + + // `cast proof 0x000d836201318ec6899a67540690382780743280 --block 0` + let expected_account_proof = convert_to_proof([ + "0xf90211a090dcaf88c40c7bbc95a912cbdde67c175767b31173df9ee4b0d733bfdd511c43a0babe369f6b12092f49181ae04ca173fb68d1a5456f18d20fa32cba73954052bda0473ecf8a7e36a829e75039a3b055e51b8332cbf03324ab4af2066bbd6fbf0021a0bbda34753d7aa6c38e603f360244e8f59611921d9e1f128372fec0d586d4f9e0a04e44caecff45c9891f74f6a2156735886eedf6f1a733628ebc802ec79d844648a0a5f3f2f7542148c973977c8a1e154c4300fec92f755f7846f1b734d3ab1d90e7a0e823850f50bf72baae9d1733a36a444ab65d0a6faaba404f0583ce0ca4dad92da0f7a00cbe7d4b30b11faea3ae61b7f1f2b315b61d9f6bd68bfe587ad0eeceb721a07117ef9fc932f1a88e908eaead8565c19b5645dc9e5b1b6e841c5edbdfd71681a069eb2de283f32c11f859d7bcf93da23990d3e662935ed4d6b39ce3673ec84472a0203d26456312bbc4da5cd293b75b840fc5045e493d6f904d180823ec22bfed8ea09287b5c21f2254af4e64fca76acc5cd87399c7f1ede818db4326c98ce2dc2208a06fc2d754e304c48ce6a517753c62b1a9c1d5925b89707486d7fc08919e0a94eca07b1c54f15e299bd58bdfef9741538c7828b5d7d11a489f9c20d052b3471df475a051f9dd3739a927c89e357580a4c97b40234aa01ed3d5e0390dc982a7975880a0a089d613f26159af43616fd9455bb461f4869bfede26f2130835ed067a8b967bfb80", + "0xf90211a0dae48f5b47930c28bb116fbd55e52cd47242c71bf55373b55eb2805ee2e4a929a00f1f37f337ec800e2e5974e2e7355f10f1a4832b39b846d916c3597a460e0676a0da8f627bb8fbeead17b318e0a8e4f528db310f591bb6ab2deda4a9f7ca902ab5a0971c662648d58295d0d0aa4b8055588da0037619951217c22052802549d94a2fa0ccc701efe4b3413fd6a61a6c9f40e955af774649a8d9fd212d046a5a39ddbb67a0d607cdb32e2bd635ee7f2f9e07bc94ddbd09b10ec0901b66628e15667aec570ba05b89203dc940e6fa70ec19ad4e01d01849d3a5baa0a8f9c0525256ed490b159fa0b84227d48df68aecc772939a59afa9e1a4ab578f7b698bdb1289e29b6044668ea0fd1c992070b94ace57e48cbf6511a16aa770c645f9f5efba87bbe59d0a042913a0e16a7ccea6748ae90de92f8aef3b3dc248a557b9ac4e296934313f24f7fced5fa042373cf4a00630d94de90d0a23b8f38ced6b0f7cb818b8925fee8f0c2a28a25aa05f89d2161c1741ff428864f7889866484cef622de5023a46e795dfdec336319fa07597a017664526c8c795ce1da27b8b72455c49657113e0455552dbc068c5ba31a0d5be9089012fda2c585a1b961e988ea5efcd3a06988e150a8682091f694b37c5a0f7b0352e38c315b2d9a14d51baea4ddee1770974c806e209355233c3c89dce6ea049bf6e8df0acafd0eff86defeeb305568e44d52d2235cf340ae15c6034e2b24180", + "0xf901f1a0cf67e0f5d5f8d70e53a6278056a14ddca46846f5ef69c7bde6810d058d4a9eda80a06732ada65afd192197fe7ce57792a7f25d26978e64e954b7b84a1f7857ac279da05439f8d011683a6fc07efb90afca198fd7270c795c835c7c85d91402cda992eaa0449b93033b6152d289045fdb0bf3f44926f831566faa0e616b7be1abaad2cb2da031be6c3752bcd7afb99b1bb102baf200f8567c394d464315323a363697646616a0a40e3ed11d906749aa501279392ffde868bd35102db41364d9c601fd651f974aa0044bfa4fe8dd1a58e6c7144da79326e94d1331c0b00373f6ae7f3662f45534b7a098005e3e48db68cb1dc9b9f034ff74d2392028ddf718b0f2084133017da2c2e7a02a62bc40414ee95b02e202a9e89babbabd24bef0abc3fc6dcd3e9144ceb0b725a0239facd895bbf092830390a8676f34b35b29792ae561f196f86614e0448a5792a0a4080f88925daff6b4ce26d188428841bd65655d8e93509f2106020e76d41eefa04918987904be42a6894256ca60203283d1b89139cf21f09f5719c44b8cdbb8f7a06201fc3ef0827e594d953b5e3165520af4fceb719e11cc95fd8d3481519bfd8ca05d0e353d596bd725b09de49c01ede0f29023f0153d7b6d401556aeb525b2959ba0cd367d0679950e9c5f2aa4298fd4b081ade2ea429d71ff390c50f8520e16e30880", + "0xf87180808080808080a0dbee8b33c73b86df839f309f7ac92eee19836e08b39302ffa33921b3c6a09f66a06068b283d51aeeee682b8fb5458354315d0b91737441ede5e137c18b4775174a8080808080a0fe7779c7d58c2fda43eba0a6644043c86ebb9ceb4836f89e30831f23eb059ece8080", + "0xf8719f20b71c90b0d523dd5004cf206f325748da347685071b34812e21801f5270c4b84ff84d80890ad78ebc5ac6200000a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" + ]); + + let provider = factory.provider().unwrap(); + let account_proof = Proof::from_tx(provider.tx_ref()).account_proof(target, &[]).unwrap(); + similar_asserts::assert_eq!(account_proof.proof, expected_account_proof); + assert_eq!(account_proof.verify(root), Ok(())); +} + +#[test] +fn mainnet_genesis_account_proof_nonexistent() { + // Create test database and insert genesis accounts. + let factory = create_test_provider_factory(); + let root = insert_genesis(&factory, MAINNET.clone()).unwrap(); + + // Address that does not exist in mainnet genesis allocation. + // keccak256 - `0x18f415ffd7f66bb1924d90f0e82fb79ca8c6d8a3473cd9a95446a443b9db1761` + let target = Address::from_str("0x000d836201318ec6899a67540690382780743281").unwrap(); + + // `cast proof 0x000d836201318ec6899a67540690382780743281 --block 0` + let expected_account_proof = convert_to_proof([ + "0xf90211a090dcaf88c40c7bbc95a912cbdde67c175767b31173df9ee4b0d733bfdd511c43a0babe369f6b12092f49181ae04ca173fb68d1a5456f18d20fa32cba73954052bda0473ecf8a7e36a829e75039a3b055e51b8332cbf03324ab4af2066bbd6fbf0021a0bbda34753d7aa6c38e603f360244e8f59611921d9e1f128372fec0d586d4f9e0a04e44caecff45c9891f74f6a2156735886eedf6f1a733628ebc802ec79d844648a0a5f3f2f7542148c973977c8a1e154c4300fec92f755f7846f1b734d3ab1d90e7a0e823850f50bf72baae9d1733a36a444ab65d0a6faaba404f0583ce0ca4dad92da0f7a00cbe7d4b30b11faea3ae61b7f1f2b315b61d9f6bd68bfe587ad0eeceb721a07117ef9fc932f1a88e908eaead8565c19b5645dc9e5b1b6e841c5edbdfd71681a069eb2de283f32c11f859d7bcf93da23990d3e662935ed4d6b39ce3673ec84472a0203d26456312bbc4da5cd293b75b840fc5045e493d6f904d180823ec22bfed8ea09287b5c21f2254af4e64fca76acc5cd87399c7f1ede818db4326c98ce2dc2208a06fc2d754e304c48ce6a517753c62b1a9c1d5925b89707486d7fc08919e0a94eca07b1c54f15e299bd58bdfef9741538c7828b5d7d11a489f9c20d052b3471df475a051f9dd3739a927c89e357580a4c97b40234aa01ed3d5e0390dc982a7975880a0a089d613f26159af43616fd9455bb461f4869bfede26f2130835ed067a8b967bfb80", + "0xf90211a0586b1ddec8db4824154209d355a1989b6c43aa69aba36e9d70c9faa53e7452baa0f86db47d628c73764d74b9ccaed73b8486d97a7731d57008fc9efaf417411860a0d9faed7b9ea107b5d98524246c977e782377f976e34f70717e8b1207f2f9b981a00218f59ccedf797c95e27c56405b9bf16845050fb43e773b66b26bc6992744f5a0dbf396f480c4e024156644adea7c331688d03742369e9d87ab8913bc439ff975a0aced524f39b22c62a5be512ddbca89f0b89b47c311065ccf423dee7013c7ea83a0c06b05f80b237b403adc019c0bc95b5de935021b14a75cbc18509eec60dfd83aa085339d45c4a52b7d523c301701f1ab339964e9c907440cff0a871c98dcf8811ea03ae9f6b8e227ec9be9461f0947b01696f78524c4519a6dee9fba14d209952cf9a0af17f551f9fa1ba4be41d0b342b160e2e8468d7e98a65a2dbf9d5fe5d6928024a0b850ac3bc03e9a309cc59ce5f1ab8db264870a7a22786081753d1db91897b8e6a09e796a4904bd78cb2655b5f346c94350e2d5f0dbf2bc00ac00871cd7ba46b241a0f6f0377427b900529caf32abf32ba1eb93f5f70153aa50b90bf55319a434c252a0725eaf27c8ee07e9b2511a6d6a0d71c649d855e8a9ed26e667903e2e94ae47cba0e4139fb48aa1a524d47f6e0df80314b88b52202d7e853da33c276aa8572283a8a05e9003d54a45935fdebae3513dc7cd16626dc05e1d903ae7f47f1a35aa6e234580", + "0xf901d1a0b7c55b381eb205712a2f5d1b7d6309ac725da79ab159cb77dc2783af36e6596da0b3b48aa390e0f3718b486ccc32b01682f92819e652315c1629058cd4d9bb1545a0e3c0cc68af371009f14416c27e17f05f4f696566d2ba45362ce5711d4a01d0e4a0bad1e085e431b510508e2a9e3712633a414b3fe6fd358635ab206021254c1e10a0f8407fe8d5f557b9e012d52e688139bd932fec40d48630d7ff4204d27f8cc68da08c6ca46eff14ad4950e65469c394ca9d6b8690513b1c1a6f91523af00082474c80a0630c034178cb1290d4d906edf28688804d79d5e37a3122c909adab19ac7dc8c5a059f6d047c5d1cc75228c4517a537763cb410c38554f273e5448a53bc3c7166e7a0d842f53ce70c3aad1e616fa6485d3880d15c936fcc306ec14ae35236e5a60549a0218ee2ee673c69b4e1b953194b2568157a69085b86e4f01644fa06ab472c6cf9a016a35a660ea496df7c0da646378bfaa9562f401e42a5c2fe770b7bbe22433585a0dd0fbbe227a4d50868cdbb3107573910fd97131ea8d835bef81d91a2fc30b175a06aafa3d78cf179bf055bd5ec629be0ff8352ce0aec9125a4d75be3ee7eb71f10a01d6817ef9f64fcbb776ff6df0c83138dcd2001bd752727af3e60f4afc123d8d58080" + ]); + + let provider = factory.provider().unwrap(); + let account_proof = Proof::from_tx(provider.tx_ref()).account_proof(target, &[]).unwrap(); + similar_asserts::assert_eq!(account_proof.proof, expected_account_proof); + assert_eq!(account_proof.verify(root), Ok(())); +} + +#[test] +fn holesky_deposit_contract_proof() { + // Create test database and insert genesis accounts. + let factory = create_test_provider_factory(); + let root = insert_genesis(&factory, HOLESKY.clone()).unwrap(); + + let target = Address::from_str("0x4242424242424242424242424242424242424242").unwrap(); + // existent + let slot_22 = + B256::from_str("0x0000000000000000000000000000000000000000000000000000000000000022") + .unwrap(); + let slot_23 = + B256::from_str("0x0000000000000000000000000000000000000000000000000000000000000023") + .unwrap(); + let slot_24 = + B256::from_str("0x0000000000000000000000000000000000000000000000000000000000000024") + .unwrap(); + // non-existent + let slot_100 = + B256::from_str("0x0000000000000000000000000000000000000000000000000000000000000100") + .unwrap(); + let slots = Vec::from([slot_22, slot_23, slot_24, slot_100]); + + // `cast proof 0x4242424242424242424242424242424242424242 0x22 0x23 0x24 0x100 --block 0` + let expected = AccountProof { + address: target, + info: Some(Account { + balance: U256::ZERO, + nonce: 0, + bytecode_hash: Some(B256::from_str("0x2034f79e0e33b0ae6bef948532021baceb116adf2616478703bec6b17329f1cc").unwrap()) + }), + storage_root: B256::from_str("0x556a482068355939c95a3412bdb21213a301483edb1b64402fb66ac9f3583599").unwrap(), + proof: convert_to_proof([ + "0xf90211a0ea92fb71507739d5afe328d607b2c5e98322b7aa7cdfeccf817543058b54af70a0bd0c2525b5bee47abf7120c9e01ec3249699d687f80ebb96ed9ad9de913dbab0a0ab4b14b89416eb23c6b64204fa45cfcb39d4220016a9cd0815ebb751fe45eb71a0986ae29c2148b9e61f9a7543f44a1f8d029f1c5095b359652e9ec94e64b5d393a0555d54aa23ed990b0488153418637df7b2c878b604eb761aa2673b609937b0eba0140afb6a3909cc6047b3d44af13fc83f161a7e4c4ddba430a2841862912eb222a031b1185c1f455022d9e42ce04a71f174eb9441b1ada67449510500f4d85b3b22a051ecd01e18113b23cc65e62f67d69b33ee15d20bf81a6b524f7df90ded00ca15a0703769d6a7befad000bc2b4faae3e41b809b1b1241fe2964262554e7e3603488a0e5de7f600e4e6c3c3e5630e0c66f50506a17c9715642fccb63667e81397bbf93a095f783cd1d464a60e3c8adcadc28c6eb9fec7306664df39553be41dccc909606a04225fda3b89f0c59bf40129d1d5e5c3bf67a2129f0c55e53ffdd2cebf185d644a078e0f7fd3ae5a9bc90f66169614211b48fe235eb64818b3935d3e69c53523b9aa0a870e00e53ebaa1e9ec16e5f36606fd7d21d3a3c96894c0a2a23550949d4fdf7a0809226b69cee1f4f22ced1974e7805230da1909036a49a7652428999431afac2a0f11593b2407e86e11997325d8df2d22d937bbe0aef8302ba40c6be0601b04fc380", + "0xf901f1a09da7d9755fe0c558b3c3de9fdcdf9f28ae641f38c9787b05b73ab22ae53af3e2a0d9990bf0b810d1145ecb2b011fd68c63cc85564e6724166fd4a9520180706e5fa05f5f09855df46330aa310e8d6be5fb82d1a4b975782d9b29acf06ac8d3e72b1ca0ca976997ddaf06f18992f6207e4f6a05979d07acead96568058789017cc6d06ba04d78166b48044fdc28ed22d2fd39c8df6f8aaa04cb71d3a17286856f6893ff83a004f8c7cc4f1335182a1709fb28fc67d52e59878480210abcba864d5d1fd4a066a0fc3b71c33e2e6b77c5e494c1db7fdbb447473f003daf378c7a63ba9bf3f0049d80a07b8e7a21c1178d28074f157b50fca85ee25c12568ff8e9706dcbcdacb77bf854a0973274526811393ea0bf4811ca9077531db00d06b86237a2ecd683f55ba4bcb0a03a93d726d7487874e51b52d8d534c63aa2a689df18e3b307c0d6cb0a388b00f3a06aa67101d011d1c22fe739ef83b04b5214a3e2f8e1a2625d8bfdb116b447e86fa02dd545b33c62d33a183e127a08a4767fba891d9f3b94fc20a2ca02600d6d1fffa0f3b039a4f32349e85c782d1164c1890e5bf16badc9ee4cf827db6afd2229dde6a0d9240a9d2d5851d05a97ff3305334dfdb0101e1e321fc279d2bb3cad6afa8fc8a01b69c6ab5173de8a8ec53a6ebba965713a4cc7feb86cb3e230def37c230ca2b280", + "0xf869a0202a47fc6863b89a6b51890ef3c1550d560886c027141d2058ba1e2d4c66d99ab846f8448080a0556a482068355939c95a3412bdb21213a301483edb1b64402fb66ac9f3583599a02034f79e0e33b0ae6bef948532021baceb116adf2616478703bec6b17329f1cc" + ]), + storage_proofs: Vec::from([ + StorageProof { + key: slot_22, + nibbles: Nibbles::unpack(keccak256(slot_22)), + value: U256::from_str("0xf5a5fd42d16a20302798ef6ed309979b43003d2320d9f0e8ea9831a92759fb4b").unwrap(), + proof: convert_to_proof([ + "0xf9019180a0aafd5b14a6edacd149e110ba6776a654f2dbffca340902be933d011113f2750380a0a502c93b1918c4c6534d4593ae03a5a23fa10ebc30ffb7080b297bff2446e42da02eb2bf45fd443bd1df8b6f9c09726a4c6252a0f7896a131a081e39a7f644b38980a0a9cf7f673a0bce76fd40332afe8601542910b48dea44e93933a3e5e930da5d19a0ddf79db0a36d0c8134ba143bcb541cd4795a9a2bae8aca0ba24b8d8963c2a77da0b973ec0f48f710bf79f63688485755cbe87f9d4c68326bb83c26af620802a80ea0f0855349af6bf84afc8bca2eda31c8ef8c5139be1929eeb3da4ba6b68a818cb0a0c271e189aeeb1db5d59d7fe87d7d6327bbe7cfa389619016459196497de3ccdea0e7503ba5799e77aa31bbe1310c312ca17b2c5bcc8fa38f266675e8f154c2516ba09278b846696d37213ab9d20a5eb42b03db3173ce490a2ef3b2f3b3600579fc63a0e9041059114f9c910adeca12dbba1fef79b2e2c8899f2d7213cd22dfe4310561a047c59da56bb2bf348c9dd2a2e8f5538a92b904b661cfe54a4298b85868bbe4858080", + "0xf85180a0776aa456ba9c5008e03b82b841a9cf2fc1e8578cfacd5c9015804eae315f17fb80808080808080808080808080a072e3e284d47badbb0a5ca1421e1179d3ea90cc10785b26b74fb8a81f0f9e841880", + "0xf843a020035b26e3e9eee00e0d72fd1ee8ddca6894550dca6916ea2ac6baa90d11e510a1a0f5a5fd42d16a20302798ef6ed309979b43003d2320d9f0e8ea9831a92759fb4b" + ]) + }, + StorageProof { + key: slot_23, + nibbles: Nibbles::unpack(keccak256(slot_23)), + value: U256::from_str("0xdb56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71").unwrap(), + proof: convert_to_proof([ + "0xf9019180a0aafd5b14a6edacd149e110ba6776a654f2dbffca340902be933d011113f2750380a0a502c93b1918c4c6534d4593ae03a5a23fa10ebc30ffb7080b297bff2446e42da02eb2bf45fd443bd1df8b6f9c09726a4c6252a0f7896a131a081e39a7f644b38980a0a9cf7f673a0bce76fd40332afe8601542910b48dea44e93933a3e5e930da5d19a0ddf79db0a36d0c8134ba143bcb541cd4795a9a2bae8aca0ba24b8d8963c2a77da0b973ec0f48f710bf79f63688485755cbe87f9d4c68326bb83c26af620802a80ea0f0855349af6bf84afc8bca2eda31c8ef8c5139be1929eeb3da4ba6b68a818cb0a0c271e189aeeb1db5d59d7fe87d7d6327bbe7cfa389619016459196497de3ccdea0e7503ba5799e77aa31bbe1310c312ca17b2c5bcc8fa38f266675e8f154c2516ba09278b846696d37213ab9d20a5eb42b03db3173ce490a2ef3b2f3b3600579fc63a0e9041059114f9c910adeca12dbba1fef79b2e2c8899f2d7213cd22dfe4310561a047c59da56bb2bf348c9dd2a2e8f5538a92b904b661cfe54a4298b85868bbe4858080", + "0xf8518080808080a0d546c4ca227a267d29796643032422374624ed109b3d94848c5dc06baceaee76808080808080a027c48e210ccc6e01686be2d4a199d35f0e1e8df624a8d3a17c163be8861acd6680808080", + "0xf843a0207b2b5166478fd4318d2acc6cc2c704584312bdd8781b32d5d06abda57f4230a1a0db56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71" + ]) + }, + StorageProof { + key: slot_24, + nibbles: Nibbles::unpack(keccak256(slot_24)), + value: U256::from_str("0xc78009fdf07fc56a11f122370658a353aaa542ed63e44c4bc15ff4cd105ab33c").unwrap(), + proof: convert_to_proof([ + "0xf9019180a0aafd5b14a6edacd149e110ba6776a654f2dbffca340902be933d011113f2750380a0a502c93b1918c4c6534d4593ae03a5a23fa10ebc30ffb7080b297bff2446e42da02eb2bf45fd443bd1df8b6f9c09726a4c6252a0f7896a131a081e39a7f644b38980a0a9cf7f673a0bce76fd40332afe8601542910b48dea44e93933a3e5e930da5d19a0ddf79db0a36d0c8134ba143bcb541cd4795a9a2bae8aca0ba24b8d8963c2a77da0b973ec0f48f710bf79f63688485755cbe87f9d4c68326bb83c26af620802a80ea0f0855349af6bf84afc8bca2eda31c8ef8c5139be1929eeb3da4ba6b68a818cb0a0c271e189aeeb1db5d59d7fe87d7d6327bbe7cfa389619016459196497de3ccdea0e7503ba5799e77aa31bbe1310c312ca17b2c5bcc8fa38f266675e8f154c2516ba09278b846696d37213ab9d20a5eb42b03db3173ce490a2ef3b2f3b3600579fc63a0e9041059114f9c910adeca12dbba1fef79b2e2c8899f2d7213cd22dfe4310561a047c59da56bb2bf348c9dd2a2e8f5538a92b904b661cfe54a4298b85868bbe4858080", + "0xf85180808080a030263404acfee103d0b1019053ff3240fce433c69b709831673285fa5887ce4c80808080808080a0f8f1fbb1f7b482d9860480feebb83ff54a8b6ec1ead61cc7d2f25d7c01659f9c80808080", + "0xf843a020d332d19b93bcabe3cce7ca0c18a052f57e5fd03b4758a09f30f5ddc4b22ec4a1a0c78009fdf07fc56a11f122370658a353aaa542ed63e44c4bc15ff4cd105ab33c" + ]) + }, + StorageProof { + key: slot_100, + nibbles: Nibbles::unpack(keccak256(slot_100)), + value: U256::ZERO, + proof: convert_to_proof([ + "0xf9019180a0aafd5b14a6edacd149e110ba6776a654f2dbffca340902be933d011113f2750380a0a502c93b1918c4c6534d4593ae03a5a23fa10ebc30ffb7080b297bff2446e42da02eb2bf45fd443bd1df8b6f9c09726a4c6252a0f7896a131a081e39a7f644b38980a0a9cf7f673a0bce76fd40332afe8601542910b48dea44e93933a3e5e930da5d19a0ddf79db0a36d0c8134ba143bcb541cd4795a9a2bae8aca0ba24b8d8963c2a77da0b973ec0f48f710bf79f63688485755cbe87f9d4c68326bb83c26af620802a80ea0f0855349af6bf84afc8bca2eda31c8ef8c5139be1929eeb3da4ba6b68a818cb0a0c271e189aeeb1db5d59d7fe87d7d6327bbe7cfa389619016459196497de3ccdea0e7503ba5799e77aa31bbe1310c312ca17b2c5bcc8fa38f266675e8f154c2516ba09278b846696d37213ab9d20a5eb42b03db3173ce490a2ef3b2f3b3600579fc63a0e9041059114f9c910adeca12dbba1fef79b2e2c8899f2d7213cd22dfe4310561a047c59da56bb2bf348c9dd2a2e8f5538a92b904b661cfe54a4298b85868bbe4858080", + "0xf891a090bacef44b189ddffdc5f22edc70fe298c58e5e523e6e1dfdf7dbc6d657f7d1b80a026eed68746028bc369eb456b7d3ee475aa16f34e5eaa0c98fdedb9c59ebc53b0808080a09ce86197173e14e0633db84ce8eea32c5454eebe954779255644b45b717e8841808080a0328c7afb2c58ef3f8c4117a8ebd336f1a61d24591067ed9c5aae94796cac987d808080808080" + ]) + }, + ]) + }; + + let provider = factory.provider().unwrap(); + let account_proof = Proof::from_tx(provider.tx_ref()).account_proof(target, &slots).unwrap(); + similar_asserts::assert_eq!(account_proof, expected); + assert_eq!(account_proof.verify(root), Ok(())); +} diff --git a/crates/trie/db/tests/trie.rs b/crates/trie/db/tests/trie.rs new file mode 100644 index 000000000000..8a9dbee9b2ac --- /dev/null +++ b/crates/trie/db/tests/trie.rs @@ -0,0 +1,773 @@ +use proptest::{prelude::ProptestConfig, proptest}; +use proptest_arbitrary_interop::arb; +use reth_db::{tables, test_utils::TempDatabase, DatabaseEnv}; +use reth_db_api::{ + cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO}, + transaction::DbTxMut, +}; +use reth_primitives::{hex_literal::hex, Account, StorageEntry, U256}; +use reth_provider::{ + test_utils::create_test_provider_factory, DatabaseProviderRW, StorageTrieWriter, TrieWriter, +}; +use reth_trie::{ + prefix_set::PrefixSetMut, + test_utils::{state_root, state_root_prehashed, storage_root, storage_root_prehashed}, + BranchNodeCompact, StateRoot, StorageRoot, TrieMask, +}; +use reth_trie_common::triehash::KeccakHasher; +use reth_trie_db::{DatabaseStateRoot, DatabaseStorageRoot}; +use std::{ + collections::{BTreeMap, HashMap}, + ops::Mul, + str::FromStr, + sync::Arc, +}; + +use alloy_rlp::Encodable; +use reth_db_api::transaction::DbTx; +use reth_primitives::{constants::EMPTY_ROOT_HASH, keccak256, Address, B256}; +use reth_trie::{ + prefix_set::TriePrefixSets, updates::StorageTrieUpdates, HashBuilder, + IntermediateStateRootState, Nibbles, StateRootProgress, TrieAccount, +}; + +fn insert_account( + tx: &impl DbTxMut, + address: Address, + account: Account, + storage: &BTreeMap, +) { + let hashed_address = keccak256(address); + tx.put::(hashed_address, account).unwrap(); + insert_storage(tx, hashed_address, storage); +} + +fn insert_storage(tx: &impl DbTxMut, hashed_address: B256, storage: &BTreeMap) { + for (k, v) in storage { + tx.put::( + hashed_address, + StorageEntry { key: keccak256(k), value: *v }, + ) + .unwrap(); + } +} + +fn incremental_vs_full_root(inputs: &[&str], modified: &str) { + let factory = create_test_provider_factory(); + let tx = factory.provider_rw().unwrap(); + let hashed_address = B256::with_last_byte(1); + + let mut hashed_storage_cursor = + tx.tx_ref().cursor_dup_write::().unwrap(); + let data = inputs.iter().map(|x| B256::from_str(x).unwrap()); + let value = U256::from(0); + for key in data { + hashed_storage_cursor.upsert(hashed_address, StorageEntry { key, value }).unwrap(); + } + + // Generate the intermediate nodes on the receiving end of the channel + let (_, _, trie_updates) = + StorageRoot::from_tx_hashed(tx.tx_ref(), hashed_address).root_with_updates().unwrap(); + + // 1. Some state transition happens, update the hashed storage to the new value + let modified_key = B256::from_str(modified).unwrap(); + let value = U256::from(1); + if hashed_storage_cursor.seek_by_key_subkey(hashed_address, modified_key).unwrap().is_some() { + hashed_storage_cursor.delete_current().unwrap(); + } + hashed_storage_cursor + .upsert(hashed_address, StorageEntry { key: modified_key, value }) + .unwrap(); + + // 2. Calculate full merkle root + let loader = StorageRoot::from_tx_hashed(tx.tx_ref(), hashed_address); + let modified_root = loader.root().unwrap(); + + // Update the intermediate roots table so that we can run the incremental verification + tx.write_individual_storage_trie_updates(hashed_address, &trie_updates).unwrap(); + + // 3. Calculate the incremental root + let mut storage_changes = PrefixSetMut::default(); + storage_changes.insert(Nibbles::unpack(modified_key)); + let loader = StorageRoot::from_tx_hashed(tx.tx_ref(), hashed_address) + .with_prefix_set(storage_changes.freeze()); + let incremental_root = loader.root().unwrap(); + + assert_eq!(modified_root, incremental_root); +} + +#[test] +fn branch_node_child_changes() { + incremental_vs_full_root( + &[ + "1000000000000000000000000000000000000000000000000000000000000000", + "1100000000000000000000000000000000000000000000000000000000000000", + "1110000000000000000000000000000000000000000000000000000000000000", + "1200000000000000000000000000000000000000000000000000000000000000", + "1220000000000000000000000000000000000000000000000000000000000000", + "1320000000000000000000000000000000000000000000000000000000000000", + ], + "1200000000000000000000000000000000000000000000000000000000000000", + ); +} + +#[test] +fn arbitrary_storage_root() { + proptest!(ProptestConfig::with_cases(10), |(item in arb::<(Address, std::collections::BTreeMap)>())| { + let (address, storage) = item; + + let hashed_address = keccak256(address); + let factory = create_test_provider_factory(); + let tx = factory.provider_rw().unwrap(); + for (key, value) in &storage { + tx.tx_ref().put::( + hashed_address, + StorageEntry { key: keccak256(key), value: *value }, + ) + .unwrap(); + } + tx.commit().unwrap(); + + let tx = factory.provider_rw().unwrap(); + let got = StorageRoot::from_tx(tx.tx_ref(), address).root().unwrap(); + let expected = storage_root(storage.into_iter()); + assert_eq!(expected, got); + }); +} + +#[test] +// This ensures we dont add empty accounts to the trie +fn test_empty_account() { + let state: State = BTreeMap::from([ + ( + Address::random(), + ( + Account { nonce: 0, balance: U256::from(0), bytecode_hash: None }, + BTreeMap::from([(B256::with_last_byte(0x4), U256::from(12))]), + ), + ), + ( + Address::random(), + ( + Account { nonce: 0, balance: U256::from(0), bytecode_hash: None }, + BTreeMap::default(), + ), + ), + ( + Address::random(), + ( + Account { + nonce: 155, + balance: U256::from(414241124u32), + bytecode_hash: Some(keccak256("test")), + }, + BTreeMap::from([ + (B256::ZERO, U256::from(3)), + (B256::with_last_byte(2), U256::from(1)), + ]), + ), + ), + ]); + test_state_root_with_state(state); +} + +#[test] +// This ensures we return an empty root when there are no storage entries +fn test_empty_storage_root() { + let factory = create_test_provider_factory(); + let tx = factory.provider_rw().unwrap(); + + let address = Address::random(); + let code = "el buen fla"; + let account = Account { + nonce: 155, + balance: U256::from(414241124u32), + bytecode_hash: Some(keccak256(code)), + }; + insert_account(tx.tx_ref(), address, account, &Default::default()); + tx.commit().unwrap(); + + let tx = factory.provider_rw().unwrap(); + let got = StorageRoot::from_tx(tx.tx_ref(), address).root().unwrap(); + assert_eq!(got, EMPTY_ROOT_HASH); +} + +#[test] +// This ensures that the walker goes over all the storage slots +fn test_storage_root() { + let factory = create_test_provider_factory(); + let tx = factory.provider_rw().unwrap(); + + let address = Address::random(); + let storage = + BTreeMap::from([(B256::ZERO, U256::from(3)), (B256::with_last_byte(2), U256::from(1))]); + + let code = "el buen fla"; + let account = Account { + nonce: 155, + balance: U256::from(414241124u32), + bytecode_hash: Some(keccak256(code)), + }; + + insert_account(tx.tx_ref(), address, account, &storage); + tx.commit().unwrap(); + + let tx = factory.provider_rw().unwrap(); + let got = StorageRoot::from_tx(tx.tx_ref(), address).root().unwrap(); + + assert_eq!(storage_root(storage.into_iter()), got); +} + +type State = BTreeMap)>; + +#[test] +fn arbitrary_state_root() { + proptest!( + ProptestConfig::with_cases(10), | (state in arb::()) | { + test_state_root_with_state(state); + } + ); +} + +#[test] +fn arbitrary_state_root_with_progress() { + proptest!( + ProptestConfig::with_cases(10), | (state in arb::()) | { + let hashed_entries_total = state.len() + + state.values().map(|(_, slots)| slots.len()).sum::(); + + let factory = create_test_provider_factory(); + let tx = factory.provider_rw().unwrap(); + + for (address, (account, storage)) in &state { + insert_account(tx.tx_ref(), *address, *account, storage) + } + tx.commit().unwrap(); + let tx = factory.provider_rw().unwrap(); + + let expected = state_root(state); + + let threshold = 10; + let mut got = None; + let mut hashed_entries_walked = 0; + + let mut intermediate_state: Option> = None; + while got.is_none() { + let calculator = StateRoot::from_tx(tx.tx_ref()) + .with_threshold(threshold) + .with_intermediate_state(intermediate_state.take().map(|state| *state)); + match calculator.root_with_progress().unwrap() { + StateRootProgress::Progress(state, walked, _) => { + intermediate_state = Some(state); + hashed_entries_walked += walked; + }, + StateRootProgress::Complete(root, walked, _) => { + got = Some(root); + hashed_entries_walked += walked; + }, + }; + } + assert_eq!(expected, got.unwrap()); + assert_eq!(hashed_entries_total, hashed_entries_walked) + } + ); +} + +fn test_state_root_with_state(state: State) { + let factory = create_test_provider_factory(); + let tx = factory.provider_rw().unwrap(); + + for (address, (account, storage)) in &state { + insert_account(tx.tx_ref(), *address, *account, storage) + } + tx.commit().unwrap(); + let expected = state_root(state); + + let tx = factory.provider_rw().unwrap(); + let got = StateRoot::from_tx(tx.tx_ref()).root().unwrap(); + assert_eq!(expected, got); +} + +fn encode_account(account: Account, storage_root: Option) -> Vec { + let account = TrieAccount::from((account, storage_root.unwrap_or(EMPTY_ROOT_HASH))); + let mut account_rlp = Vec::with_capacity(account.length()); + account.encode(&mut account_rlp); + account_rlp +} + +#[test] +fn storage_root_regression() { + let factory = create_test_provider_factory(); + let tx = factory.provider_rw().unwrap(); + // Some address whose hash starts with 0xB041 + let address3 = Address::from_str("16b07afd1c635f77172e842a000ead9a2a222459").unwrap(); + let key3 = keccak256(address3); + assert_eq!(key3[0], 0xB0); + assert_eq!(key3[1], 0x41); + + let storage = BTreeMap::from( + [ + ("1200000000000000000000000000000000000000000000000000000000000000", 0x42), + ("1400000000000000000000000000000000000000000000000000000000000000", 0x01), + ("3000000000000000000000000000000000000000000000000000000000E00000", 0x127a89), + ("3000000000000000000000000000000000000000000000000000000000E00001", 0x05), + ] + .map(|(slot, val)| (B256::from_str(slot).unwrap(), U256::from(val))), + ); + + let mut hashed_storage_cursor = + tx.tx_ref().cursor_dup_write::().unwrap(); + for (hashed_slot, value) in storage.clone() { + hashed_storage_cursor.upsert(key3, StorageEntry { key: hashed_slot, value }).unwrap(); + } + tx.commit().unwrap(); + let tx = factory.provider_rw().unwrap(); + + let account3_storage_root = StorageRoot::from_tx(tx.tx_ref(), address3).root().unwrap(); + let expected_root = storage_root_prehashed(storage); + assert_eq!(expected_root, account3_storage_root); +} + +#[test] +fn account_and_storage_trie() { + let ether = U256::from(1e18); + let storage = BTreeMap::from( + [ + ("1200000000000000000000000000000000000000000000000000000000000000", 0x42), + ("1400000000000000000000000000000000000000000000000000000000000000", 0x01), + ("3000000000000000000000000000000000000000000000000000000000E00000", 0x127a89), + ("3000000000000000000000000000000000000000000000000000000000E00001", 0x05), + ] + .map(|(slot, val)| (B256::from_str(slot).unwrap(), U256::from(val))), + ); + + let factory = create_test_provider_factory(); + let tx = factory.provider_rw().unwrap(); + + let mut hashed_account_cursor = tx.tx_ref().cursor_write::().unwrap(); + let mut hashed_storage_cursor = + tx.tx_ref().cursor_dup_write::().unwrap(); + + let mut hash_builder = HashBuilder::default(); + + // Insert first account + let key1 = + B256::from_str("b000000000000000000000000000000000000000000000000000000000000000").unwrap(); + let account1 = Account { nonce: 0, balance: U256::from(3).mul(ether), bytecode_hash: None }; + hashed_account_cursor.upsert(key1, account1).unwrap(); + hash_builder.add_leaf(Nibbles::unpack(key1), &encode_account(account1, None)); + + // Some address whose hash starts with 0xB040 + let address2 = Address::from_str("7db3e81b72d2695e19764583f6d219dbee0f35ca").unwrap(); + let key2 = keccak256(address2); + assert_eq!(key2[0], 0xB0); + assert_eq!(key2[1], 0x40); + let account2 = Account { nonce: 0, balance: ether, ..Default::default() }; + hashed_account_cursor.upsert(key2, account2).unwrap(); + hash_builder.add_leaf(Nibbles::unpack(key2), &encode_account(account2, None)); + + // Some address whose hash starts with 0xB041 + let address3 = Address::from_str("16b07afd1c635f77172e842a000ead9a2a222459").unwrap(); + let key3 = keccak256(address3); + assert_eq!(key3[0], 0xB0); + assert_eq!(key3[1], 0x41); + let code_hash = + B256::from_str("5be74cad16203c4905c068b012a2e9fb6d19d036c410f16fd177f337541440dd").unwrap(); + let account3 = + Account { nonce: 0, balance: U256::from(2).mul(ether), bytecode_hash: Some(code_hash) }; + hashed_account_cursor.upsert(key3, account3).unwrap(); + for (hashed_slot, value) in storage { + if hashed_storage_cursor + .seek_by_key_subkey(key3, hashed_slot) + .unwrap() + .filter(|e| e.key == hashed_slot) + .is_some() + { + hashed_storage_cursor.delete_current().unwrap(); + } + hashed_storage_cursor.upsert(key3, StorageEntry { key: hashed_slot, value }).unwrap(); + } + let account3_storage_root = StorageRoot::from_tx(tx.tx_ref(), address3).root().unwrap(); + hash_builder + .add_leaf(Nibbles::unpack(key3), &encode_account(account3, Some(account3_storage_root))); + + let key4a = + B256::from_str("B1A0000000000000000000000000000000000000000000000000000000000000").unwrap(); + let account4a = Account { nonce: 0, balance: U256::from(4).mul(ether), ..Default::default() }; + hashed_account_cursor.upsert(key4a, account4a).unwrap(); + hash_builder.add_leaf(Nibbles::unpack(key4a), &encode_account(account4a, None)); + + let key5 = + B256::from_str("B310000000000000000000000000000000000000000000000000000000000000").unwrap(); + let account5 = Account { nonce: 0, balance: U256::from(8).mul(ether), ..Default::default() }; + hashed_account_cursor.upsert(key5, account5).unwrap(); + hash_builder.add_leaf(Nibbles::unpack(key5), &encode_account(account5, None)); + + let key6 = + B256::from_str("B340000000000000000000000000000000000000000000000000000000000000").unwrap(); + let account6 = Account { nonce: 0, balance: U256::from(1).mul(ether), ..Default::default() }; + hashed_account_cursor.upsert(key6, account6).unwrap(); + hash_builder.add_leaf(Nibbles::unpack(key6), &encode_account(account6, None)); + + // Populate account & storage trie DB tables + let expected_root = + B256::from_str("72861041bc90cd2f93777956f058a545412b56de79af5eb6b8075fe2eabbe015").unwrap(); + let computed_expected_root: B256 = triehash::trie_root::([ + (key1, encode_account(account1, None)), + (key2, encode_account(account2, None)), + (key3, encode_account(account3, Some(account3_storage_root))), + (key4a, encode_account(account4a, None)), + (key5, encode_account(account5, None)), + (key6, encode_account(account6, None)), + ]); + // Check computed trie root to ensure correctness + assert_eq!(computed_expected_root, expected_root); + + // Check hash builder root + assert_eq!(hash_builder.root(), computed_expected_root); + + // Check state root calculation from scratch + let (root, trie_updates) = StateRoot::from_tx(tx.tx_ref()).root_with_updates().unwrap(); + assert_eq!(root, computed_expected_root); + + // Check account trie + let account_updates = trie_updates.clone().into_sorted(); + let account_updates = account_updates.account_nodes_ref(); + assert_eq!(account_updates.len(), 2); + + let (nibbles1a, node1a) = account_updates.first().unwrap(); + assert_eq!(nibbles1a[..], [0xB]); + assert_eq!(node1a.state_mask, TrieMask::new(0b1011)); + assert_eq!(node1a.tree_mask, TrieMask::new(0b0001)); + assert_eq!(node1a.hash_mask, TrieMask::new(0b1001)); + assert_eq!(node1a.root_hash, None); + assert_eq!(node1a.hashes.len(), 2); + + let (nibbles2a, node2a) = account_updates.last().unwrap(); + assert_eq!(nibbles2a[..], [0xB, 0x0]); + assert_eq!(node2a.state_mask, TrieMask::new(0b10001)); + assert_eq!(node2a.tree_mask, TrieMask::new(0b00000)); + assert_eq!(node2a.hash_mask, TrieMask::new(0b10000)); + assert_eq!(node2a.root_hash, None); + assert_eq!(node2a.hashes.len(), 1); + + // Check storage trie + let mut updated_storage_trie = + trie_updates.storage_tries_ref().iter().filter(|(_, u)| !u.storage_nodes_ref().is_empty()); + assert_eq!(updated_storage_trie.clone().count(), 1); + let (_, storage_trie_updates) = updated_storage_trie.next().unwrap(); + assert_eq!(storage_trie_updates.storage_nodes_ref().len(), 1); + + let (nibbles3, node3) = storage_trie_updates.storage_nodes_ref().iter().next().unwrap(); + assert!(nibbles3.is_empty()); + assert_eq!(node3.state_mask, TrieMask::new(0b1010)); + assert_eq!(node3.tree_mask, TrieMask::new(0b0000)); + assert_eq!(node3.hash_mask, TrieMask::new(0b0010)); + + assert_eq!(node3.hashes.len(), 1); + assert_eq!(node3.root_hash, Some(account3_storage_root)); + + // Add an account + // Some address whose hash starts with 0xB1 + let address4b = Address::from_str("4f61f2d5ebd991b85aa1677db97307caf5215c91").unwrap(); + let key4b = keccak256(address4b); + assert_eq!(key4b.0[0], key4a.0[0]); + let account4b = Account { nonce: 0, balance: U256::from(5).mul(ether), bytecode_hash: None }; + hashed_account_cursor.upsert(key4b, account4b).unwrap(); + + let mut prefix_set = PrefixSetMut::default(); + prefix_set.insert(Nibbles::unpack(key4b)); + + let expected_state_root = + B256::from_str("8e263cd4eefb0c3cbbb14e5541a66a755cad25bcfab1e10dd9d706263e811b28").unwrap(); + + let (root, trie_updates) = StateRoot::from_tx(tx.tx_ref()) + .with_prefix_sets(TriePrefixSets { + account_prefix_set: prefix_set.freeze(), + ..Default::default() + }) + .root_with_updates() + .unwrap(); + assert_eq!(root, expected_state_root); + + let account_updates = trie_updates.into_sorted(); + let account_updates = account_updates.account_nodes_ref(); + assert_eq!(account_updates.len(), 2); + + let (nibbles1b, node1b) = account_updates.first().unwrap(); + assert_eq!(nibbles1b[..], [0xB]); + assert_eq!(node1b.state_mask, TrieMask::new(0b1011)); + assert_eq!(node1b.tree_mask, TrieMask::new(0b0001)); + assert_eq!(node1b.hash_mask, TrieMask::new(0b1011)); + assert_eq!(node1b.root_hash, None); + assert_eq!(node1b.hashes.len(), 3); + assert_eq!(node1a.hashes[0], node1b.hashes[0]); + assert_eq!(node1a.hashes[1], node1b.hashes[2]); + + let (nibbles2b, node2b) = account_updates.last().unwrap(); + assert_eq!(nibbles2b[..], [0xB, 0x0]); + assert_eq!(node2a, node2b); + tx.commit().unwrap(); + + { + let tx = factory.provider_rw().unwrap(); + let mut hashed_account_cursor = + tx.tx_ref().cursor_write::().unwrap(); + + let account = hashed_account_cursor.seek_exact(key2).unwrap().unwrap(); + hashed_account_cursor.delete_current().unwrap(); + + let mut account_prefix_set = PrefixSetMut::default(); + account_prefix_set.insert(Nibbles::unpack(account.0)); + + let computed_expected_root: B256 = triehash::trie_root::([ + (key1, encode_account(account1, None)), + // DELETED: (key2, encode_account(account2, None)), + (key3, encode_account(account3, Some(account3_storage_root))), + (key4a, encode_account(account4a, None)), + (key4b, encode_account(account4b, None)), + (key5, encode_account(account5, None)), + (key6, encode_account(account6, None)), + ]); + + let (root, trie_updates) = StateRoot::from_tx(tx.tx_ref()) + .with_prefix_sets(TriePrefixSets { + account_prefix_set: account_prefix_set.freeze(), + ..Default::default() + }) + .root_with_updates() + .unwrap(); + assert_eq!(root, computed_expected_root); + assert_eq!( + trie_updates.account_nodes_ref().len() + trie_updates.removed_nodes_ref().len(), + 1 + ); + + assert_eq!(trie_updates.account_nodes_ref().len(), 1); + + let (nibbles1c, node1c) = trie_updates.account_nodes_ref().iter().next().unwrap(); + assert_eq!(nibbles1c[..], [0xB]); + + assert_eq!(node1c.state_mask, TrieMask::new(0b1011)); + assert_eq!(node1c.tree_mask, TrieMask::new(0b0000)); + assert_eq!(node1c.hash_mask, TrieMask::new(0b1011)); + + assert_eq!(node1c.root_hash, None); + + assert_eq!(node1c.hashes.len(), 3); + assert_ne!(node1c.hashes[0], node1b.hashes[0]); + assert_eq!(node1c.hashes[1], node1b.hashes[1]); + assert_eq!(node1c.hashes[2], node1b.hashes[2]); + } + + { + let tx = factory.provider_rw().unwrap(); + let mut hashed_account_cursor = + tx.tx_ref().cursor_write::().unwrap(); + + let account2 = hashed_account_cursor.seek_exact(key2).unwrap().unwrap(); + hashed_account_cursor.delete_current().unwrap(); + let account3 = hashed_account_cursor.seek_exact(key3).unwrap().unwrap(); + hashed_account_cursor.delete_current().unwrap(); + + let mut account_prefix_set = PrefixSetMut::default(); + account_prefix_set.insert(Nibbles::unpack(account2.0)); + account_prefix_set.insert(Nibbles::unpack(account3.0)); + + let computed_expected_root: B256 = triehash::trie_root::([ + (key1, encode_account(account1, None)), + // DELETED: (key2, encode_account(account2, None)), + // DELETED: (key3, encode_account(account3, Some(account3_storage_root))), + (key4a, encode_account(account4a, None)), + (key4b, encode_account(account4b, None)), + (key5, encode_account(account5, None)), + (key6, encode_account(account6, None)), + ]); + + let (root, trie_updates) = StateRoot::from_tx(tx.tx_ref()) + .with_prefix_sets(TriePrefixSets { + account_prefix_set: account_prefix_set.freeze(), + ..Default::default() + }) + .root_with_updates() + .unwrap(); + assert_eq!(root, computed_expected_root); + assert_eq!( + trie_updates.account_nodes_ref().len() + trie_updates.removed_nodes_ref().len(), + 1 + ); + assert!(!trie_updates + .storage_tries_ref() + .iter() + .any(|(_, u)| !u.storage_nodes_ref().is_empty() || !u.removed_nodes_ref().is_empty())); // no storage root update + + assert_eq!(trie_updates.account_nodes_ref().len(), 1); + + let (nibbles1d, node1d) = trie_updates.account_nodes_ref().iter().next().unwrap(); + assert_eq!(nibbles1d[..], [0xB]); + + assert_eq!(node1d.state_mask, TrieMask::new(0b1011)); + assert_eq!(node1d.tree_mask, TrieMask::new(0b0000)); + assert_eq!(node1d.hash_mask, TrieMask::new(0b1010)); + + assert_eq!(node1d.root_hash, None); + + assert_eq!(node1d.hashes.len(), 2); + assert_eq!(node1d.hashes[0], node1b.hashes[1]); + assert_eq!(node1d.hashes[1], node1b.hashes[2]); + } +} + +#[test] +fn account_trie_around_extension_node() { + let factory = create_test_provider_factory(); + let tx = factory.provider_rw().unwrap(); + + let expected = extension_node_trie(&tx); + + let (got, updates) = StateRoot::from_tx(tx.tx_ref()).root_with_updates().unwrap(); + assert_eq!(expected, got); + assert_trie_updates(updates.account_nodes_ref()); +} + +#[test] +fn account_trie_around_extension_node_with_dbtrie() { + let factory = create_test_provider_factory(); + let tx = factory.provider_rw().unwrap(); + + let expected = extension_node_trie(&tx); + + let (got, updates) = StateRoot::from_tx(tx.tx_ref()).root_with_updates().unwrap(); + assert_eq!(expected, got); + tx.write_trie_updates(&updates).unwrap(); + + // read the account updates from the db + let mut accounts_trie = tx.tx_ref().cursor_read::().unwrap(); + let walker = accounts_trie.walk(None).unwrap(); + let account_updates = walker + .into_iter() + .map(|item| { + let (key, node) = item.unwrap(); + (key.0, node) + }) + .collect(); + assert_trie_updates(&account_updates); +} + +proptest! { + #![proptest_config(ProptestConfig { + cases: 128, ..ProptestConfig::default() + })] + + #[test] + fn fuzz_state_root_incremental(account_changes: [BTreeMap; 5]) { + let factory = create_test_provider_factory(); + let tx = factory.provider_rw().unwrap(); + let mut hashed_account_cursor = tx.tx_ref().cursor_write::().unwrap(); + + let mut state = BTreeMap::default(); + for accounts in account_changes { + let should_generate_changeset = !state.is_empty(); + let mut changes = PrefixSetMut::default(); + for (hashed_address, balance) in accounts.clone() { + hashed_account_cursor.upsert(hashed_address, Account { balance, ..Default::default() }).unwrap(); + if should_generate_changeset { + changes.insert(Nibbles::unpack(hashed_address)); + } + } + + let (state_root, trie_updates) = StateRoot::from_tx(tx.tx_ref()) + .with_prefix_sets(TriePrefixSets { account_prefix_set: changes.freeze(), ..Default::default() }) + .root_with_updates() + .unwrap(); + + state.append(&mut accounts.clone()); + let expected_root = state_root_prehashed( + state.iter().map(|(&key, &balance)| (key, (Account { balance, ..Default::default() }, std::iter::empty()))) + ); + assert_eq!(expected_root, state_root); + tx.write_trie_updates(&trie_updates).unwrap(); + } + } +} + +#[test] +fn storage_trie_around_extension_node() { + let factory = create_test_provider_factory(); + let tx = factory.provider_rw().unwrap(); + + let hashed_address = B256::random(); + let (expected_root, expected_updates) = extension_node_storage_trie(&tx, hashed_address); + + let (got, _, updates) = + StorageRoot::from_tx_hashed(tx.tx_ref(), hashed_address).root_with_updates().unwrap(); + assert_eq!(expected_root, got); + assert_eq!(expected_updates, updates); + assert_trie_updates(updates.storage_nodes_ref()); +} + +fn extension_node_storage_trie( + tx: &DatabaseProviderRW>>, + hashed_address: B256, +) -> (B256, StorageTrieUpdates) { + let value = U256::from(1); + + let mut hashed_storage = tx.tx_ref().cursor_write::().unwrap(); + + let mut hb = HashBuilder::default().with_updates(true); + + for key in [ + hex!("30af561000000000000000000000000000000000000000000000000000000000"), + hex!("30af569000000000000000000000000000000000000000000000000000000000"), + hex!("30af650000000000000000000000000000000000000000000000000000000000"), + hex!("30af6f0000000000000000000000000000000000000000000000000000000000"), + hex!("30af8f0000000000000000000000000000000000000000000000000000000000"), + hex!("3100000000000000000000000000000000000000000000000000000000000000"), + ] { + hashed_storage.upsert(hashed_address, StorageEntry { key: B256::new(key), value }).unwrap(); + hb.add_leaf(Nibbles::unpack(key), &alloy_rlp::encode_fixed_size(&value)); + } + + let root = hb.root(); + let (_, updates) = hb.split(); + let trie_updates = StorageTrieUpdates::new(updates); + (root, trie_updates) +} + +fn extension_node_trie(tx: &DatabaseProviderRW>>) -> B256 { + let a = Account { nonce: 0, balance: U256::from(1u64), bytecode_hash: Some(B256::random()) }; + let val = encode_account(a, None); + + let mut hashed_accounts = tx.tx_ref().cursor_write::().unwrap(); + let mut hb = HashBuilder::default(); + + for key in [ + hex!("30af561000000000000000000000000000000000000000000000000000000000"), + hex!("30af569000000000000000000000000000000000000000000000000000000000"), + hex!("30af650000000000000000000000000000000000000000000000000000000000"), + hex!("30af6f0000000000000000000000000000000000000000000000000000000000"), + hex!("30af8f0000000000000000000000000000000000000000000000000000000000"), + hex!("3100000000000000000000000000000000000000000000000000000000000000"), + ] { + hashed_accounts.upsert(B256::new(key), a).unwrap(); + hb.add_leaf(Nibbles::unpack(key), &val); + } + + hb.root() +} + +fn assert_trie_updates(account_updates: &HashMap) { + assert_eq!(account_updates.len(), 2); + + let node = account_updates.get(&[0x3][..]).unwrap(); + let expected = BranchNodeCompact::new(0b0011, 0b0001, 0b0000, vec![], None); + assert_eq!(node, &expected); + + let node = account_updates.get(&[0x3, 0x0, 0xA, 0xF][..]).unwrap(); + assert_eq!(node.state_mask, TrieMask::new(0b101100000)); + assert_eq!(node.tree_mask, TrieMask::new(0b000000000)); + assert_eq!(node.hash_mask, TrieMask::new(0b001000000)); + + assert_eq!(node.root_hash, None); + assert_eq!(node.hashes.len(), 1); +} diff --git a/crates/trie/parallel/Cargo.toml b/crates/trie/parallel/Cargo.toml index 36b7cbdc4a28..92f939dd0cb3 100644 --- a/crates/trie/parallel/Cargo.toml +++ b/crates/trie/parallel/Cargo.toml @@ -17,6 +17,7 @@ reth-primitives.workspace = true reth-db.workspace = true reth-db-api.workspace = true reth-trie.workspace = true +reth-trie-db.workspace = true reth-execution-errors.workspace = true reth-provider.workspace = true diff --git a/crates/trie/parallel/benches/root.rs b/crates/trie/parallel/benches/root.rs index 66d0593da178..bbd2ff228f80 100644 --- a/crates/trie/parallel/benches/root.rs +++ b/crates/trie/parallel/benches/root.rs @@ -6,11 +6,14 @@ use rayon::ThreadPoolBuilder; use reth_primitives::{Account, B256, U256}; use reth_provider::{ providers::ConsistentDbView, test_utils::create_test_provider_factory, writer::StorageWriter, + TrieWriter, }; use reth_tasks::pool::BlockingTaskPool; use reth_trie::{ - hashed_cursor::HashedPostStateCursorFactory, HashedPostState, HashedStorage, StateRoot, + hashed_cursor::{DatabaseHashedCursorFactory, HashedPostStateCursorFactory}, + HashedPostState, HashedStorage, StateRoot, }; +use reth_trie_db::DatabaseStateRoot; use reth_trie_parallel::{async_root::AsyncStateRoot, parallel_root::ParallelStateRoot}; use std::collections::HashMap; @@ -30,7 +33,7 @@ pub fn calculate_state_root(c: &mut Criterion) { storage_writer.write_hashed_state(&db_state.into_sorted()).unwrap(); let (_, updates) = StateRoot::from_tx(provider_rw.tx_ref()).root_with_updates().unwrap(); - updates.write_to_database(provider_rw.tx_ref()).unwrap(); + provider_rw.write_trie_updates(&updates).unwrap(); provider_rw.commit().unwrap(); } @@ -46,11 +49,12 @@ pub fn calculate_state_root(c: &mut Criterion) { (provider, sorted_state, prefix_sets) }, |(provider, sorted_state, prefix_sets)| async move { + let hashed_cursor_factory = HashedPostStateCursorFactory::new( + DatabaseHashedCursorFactory::new(provider.tx_ref()), + &sorted_state, + ); StateRoot::from_tx(provider.tx_ref()) - .with_hashed_cursor_factory(HashedPostStateCursorFactory::new( - provider.tx_ref(), - &sorted_state, - )) + .with_hashed_cursor_factory(hashed_cursor_factory) .with_prefix_sets(prefix_sets) .root() }, diff --git a/crates/trie/parallel/src/async_root.rs b/crates/trie/parallel/src/async_root.rs index db6152b6a2cf..cf3eabcdc32c 100644 --- a/crates/trie/parallel/src/async_root.rs +++ b/crates/trie/parallel/src/async_root.rs @@ -7,7 +7,9 @@ use reth_primitives::B256; use reth_provider::{providers::ConsistentDbView, DatabaseProviderFactory, ProviderError}; use reth_tasks::pool::BlockingTaskPool; use reth_trie::{ - hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, + hashed_cursor::{ + DatabaseHashedCursorFactory, HashedCursorFactory, HashedPostStateCursorFactory, + }, node_iter::{TrieElement, TrieNodeIter}, trie_cursor::TrieCursorFactory, updates::TrieUpdates, @@ -107,9 +109,13 @@ where let handle = self.blocking_pool.spawn_fifo(move || -> Result<_, AsyncStateRootError> { let provider = view.provider_ro()?; + let hashed_state = HashedPostStateCursorFactory::new( + DatabaseHashedCursorFactory::new(provider.tx_ref()), + &hashed_state_sorted, + ); Ok(StorageRoot::new_hashed( provider.tx_ref(), - HashedPostStateCursorFactory::new(provider.tx_ref(), &hashed_state_sorted), + hashed_state, hashed_address, #[cfg(feature = "metrics")] metrics, @@ -125,7 +131,10 @@ where let provider_ro = self.view.provider_ro()?; let tx = provider_ro.tx_ref(); - let hashed_cursor_factory = HashedPostStateCursorFactory::new(tx, &hashed_state_sorted); + let hashed_cursor_factory = HashedPostStateCursorFactory::new( + DatabaseHashedCursorFactory::new(tx), + &hashed_state_sorted, + ); let trie_cursor_factory = tx; let walker = TrieWalker::new( diff --git a/crates/trie/parallel/src/parallel_root.rs b/crates/trie/parallel/src/parallel_root.rs index 0983fd47e5a3..b95d38fa422f 100644 --- a/crates/trie/parallel/src/parallel_root.rs +++ b/crates/trie/parallel/src/parallel_root.rs @@ -6,7 +6,9 @@ use reth_execution_errors::StorageRootError; use reth_primitives::B256; use reth_provider::{providers::ConsistentDbView, DatabaseProviderFactory, ProviderError}; use reth_trie::{ - hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, + hashed_cursor::{ + DatabaseHashedCursorFactory, HashedCursorFactory, HashedPostStateCursorFactory, + }, node_iter::{TrieElement, TrieNodeIter}, trie_cursor::TrieCursorFactory, updates::TrieUpdates, @@ -91,9 +93,13 @@ where .into_par_iter() .map(|(hashed_address, prefix_set)| { let provider_ro = self.view.provider_ro()?; + let hashed_cursor_factory = HashedPostStateCursorFactory::new( + DatabaseHashedCursorFactory::new(provider_ro.tx_ref()), + &hashed_state_sorted, + ); let storage_root_result = StorageRoot::new_hashed( provider_ro.tx_ref(), - HashedPostStateCursorFactory::new(provider_ro.tx_ref(), &hashed_state_sorted), + hashed_cursor_factory, hashed_address, #[cfg(feature = "metrics")] self.metrics.storage_trie.clone(), @@ -108,8 +114,10 @@ where let mut trie_updates = TrieUpdates::default(); let provider_ro = self.view.provider_ro()?; - let hashed_cursor_factory = - HashedPostStateCursorFactory::new(provider_ro.tx_ref(), &hashed_state_sorted); + let hashed_cursor_factory = HashedPostStateCursorFactory::new( + DatabaseHashedCursorFactory::new(provider_ro.tx_ref()), + &hashed_state_sorted, + ); let trie_cursor_factory = provider_ro.tx_ref(); let walker = TrieWalker::new( @@ -202,7 +210,7 @@ impl From for ProviderError { fn from(error: ParallelStateRootError) -> Self { match error { ParallelStateRootError::Provider(error) => error, - ParallelStateRootError::StorageRoot(StorageRootError::DB(error)) => { + ParallelStateRootError::StorageRoot(StorageRootError::Database(error)) => { Self::Database(error) } } diff --git a/crates/trie/trie/src/hashed_cursor/default.rs b/crates/trie/trie/src/hashed_cursor/default.rs index 197fd7ecde76..e667f4723173 100644 --- a/crates/trie/trie/src/hashed_cursor/default.rs +++ b/crates/trie/trie/src/hashed_cursor/default.rs @@ -6,13 +6,30 @@ use reth_db_api::{ }; use reth_primitives::{Account, B256, U256}; -impl<'a, TX: DbTx> HashedCursorFactory for &'a TX { - type AccountCursor = ::Cursor; +/// A struct wrapping database transaction that implements [`HashedCursorFactory`]. +#[derive(Debug)] +pub struct DatabaseHashedCursorFactory<'a, TX>(&'a TX); + +impl<'a, TX> Clone for DatabaseHashedCursorFactory<'a, TX> { + fn clone(&self) -> Self { + Self(self.0) + } +} + +impl<'a, TX> DatabaseHashedCursorFactory<'a, TX> { + /// Create new database hashed cursor factory. + pub const fn new(tx: &'a TX) -> Self { + Self(tx) + } +} + +impl<'a, TX: DbTx> HashedCursorFactory for DatabaseHashedCursorFactory<'a, TX> { + type AccountCursor = DatabaseHashedAccountCursor<::Cursor>; type StorageCursor = DatabaseHashedStorageCursor<::DupCursor>; fn hashed_account_cursor(&self) -> Result { - self.cursor_read::() + Ok(DatabaseHashedAccountCursor(self.0.cursor_read::()?)) } fn hashed_storage_cursor( @@ -20,24 +37,36 @@ impl<'a, TX: DbTx> HashedCursorFactory for &'a TX { hashed_address: B256, ) -> Result { Ok(DatabaseHashedStorageCursor::new( - self.cursor_dup_read::()?, + self.0.cursor_dup_read::()?, hashed_address, )) } } -impl HashedCursor for C +/// A struct wrapping database cursor over hashed accounts implementing [`HashedCursor`] for +/// iterating over accounts. +#[derive(Debug)] +pub struct DatabaseHashedAccountCursor(C); + +impl DatabaseHashedAccountCursor { + /// Create new database hashed account cursor. + pub const fn new(cursor: C) -> Self { + Self(cursor) + } +} + +impl HashedCursor for DatabaseHashedAccountCursor where C: DbCursorRO, { type Value = Account; fn seek(&mut self, key: B256) -> Result, reth_db::DatabaseError> { - self.seek(key) + self.0.seek(key) } fn next(&mut self) -> Result, reth_db::DatabaseError> { - self.next() + self.0.next() } } diff --git a/crates/trie/trie/src/hashed_cursor/mod.rs b/crates/trie/trie/src/hashed_cursor/mod.rs index 05de76721d59..053836e826d4 100644 --- a/crates/trie/trie/src/hashed_cursor/mod.rs +++ b/crates/trie/trie/src/hashed_cursor/mod.rs @@ -2,7 +2,7 @@ use reth_primitives::{Account, B256, U256}; /// Default implementation of the hashed state cursor traits. mod default; -pub use default::DatabaseHashedStorageCursor; +pub use default::*; /// Implementation of hashed state cursor traits for the post state. mod post_state; diff --git a/crates/trie/trie/src/hashed_cursor/post_state.rs b/crates/trie/trie/src/hashed_cursor/post_state.rs index ac262f3d44fc..fffd66a73f6b 100644 --- a/crates/trie/trie/src/hashed_cursor/post_state.rs +++ b/crates/trie/trie/src/hashed_cursor/post_state.rs @@ -8,7 +8,7 @@ use reth_primitives::{Account, B256, U256}; use std::collections::HashSet; /// The hashed cursor factory for the post state. -#[derive(Debug, Clone)] +#[derive(Clone, Debug)] pub struct HashedPostStateCursorFactory<'a, CF> { cursor_factory: CF, post_state: &'a HashedPostStateSorted, @@ -328,7 +328,7 @@ where #[cfg(test)] mod tests { use super::*; - use crate::{HashedPostState, HashedStorage}; + use crate::{hashed_cursor::DatabaseHashedCursorFactory, HashedPostState, HashedStorage}; use proptest::prelude::*; use proptest_arbitrary_interop::arb; use reth_db::{tables, test_utils::create_test_rw_db}; @@ -387,7 +387,8 @@ mod tests { let sorted = hashed_post_state.into_sorted(); let tx = db.tx().unwrap(); - let factory = HashedPostStateCursorFactory::new(&tx, &sorted); + let factory = + HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(&tx), &sorted); assert_account_cursor_order(&factory, accounts.into_iter()); } @@ -406,7 +407,10 @@ mod tests { let sorted_post_state = HashedPostState::default().into_sorted(); let tx = db.tx().unwrap(); - let factory = HashedPostStateCursorFactory::new(&tx, &sorted_post_state); + let factory = HashedPostStateCursorFactory::new( + DatabaseHashedCursorFactory::new(&tx), + &sorted_post_state, + ); assert_account_cursor_order(&factory, accounts.into_iter()); } @@ -431,7 +435,8 @@ mod tests { let sorted = hashed_post_state.into_sorted(); let tx = db.tx().unwrap(); - let factory = HashedPostStateCursorFactory::new(&tx, &sorted); + let factory = + HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(&tx), &sorted); assert_account_cursor_order(&factory, accounts.into_iter()); } @@ -461,7 +466,8 @@ mod tests { let sorted = hashed_post_state.into_sorted(); let tx = db.tx().unwrap(); - let factory = HashedPostStateCursorFactory::new(&tx, &sorted); + let factory = + HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(&tx), &sorted); let expected = accounts.into_iter().filter(|x| !removed_keys.contains(&x.0)); assert_account_cursor_order(&factory, expected); } @@ -488,7 +494,8 @@ mod tests { let sorted = hashed_post_state.into_sorted(); let tx = db.tx().unwrap(); - let factory = HashedPostStateCursorFactory::new(&tx, &sorted); + let factory = + HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(&tx), &sorted); assert_account_cursor_order(&factory, accounts.into_iter()); } @@ -520,7 +527,7 @@ mod tests { let sorted = hashed_post_state.into_sorted(); let tx = db.tx().unwrap(); - let factory = HashedPostStateCursorFactory::new(&tx, &sorted); + let factory = HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(&tx), &sorted); assert_account_cursor_order(&factory, expected.into_iter()); } ); @@ -535,7 +542,8 @@ mod tests { { let sorted = HashedPostState::default().into_sorted(); let tx = db.tx().unwrap(); - let factory = HashedPostStateCursorFactory::new(&tx, &sorted); + let factory = + HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(&tx), &sorted); let mut cursor = factory.hashed_storage_cursor(address).unwrap(); assert!(cursor.is_storage_empty().unwrap()); } @@ -558,7 +566,8 @@ mod tests { { let sorted = HashedPostState::default().into_sorted(); let tx = db.tx().unwrap(); - let factory = HashedPostStateCursorFactory::new(&tx, &sorted); + let factory = + HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(&tx), &sorted); let mut cursor = factory.hashed_storage_cursor(address).unwrap(); assert!(!cursor.is_storage_empty().unwrap()); } @@ -573,7 +582,8 @@ mod tests { let sorted = hashed_post_state.into_sorted(); let tx = db.tx().unwrap(); - let factory = HashedPostStateCursorFactory::new(&tx, &sorted); + let factory = + HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(&tx), &sorted); let mut cursor = factory.hashed_storage_cursor(address).unwrap(); assert!(cursor.is_storage_empty().unwrap()); } @@ -589,7 +599,8 @@ mod tests { let sorted = hashed_post_state.into_sorted(); let tx = db.tx().unwrap(); - let factory = HashedPostStateCursorFactory::new(&tx, &sorted); + let factory = + HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(&tx), &sorted); let mut cursor = factory.hashed_storage_cursor(address).unwrap(); assert!(cursor.is_storage_empty().unwrap()); } @@ -605,7 +616,8 @@ mod tests { let sorted = hashed_post_state.into_sorted(); let tx = db.tx().unwrap(); - let factory = HashedPostStateCursorFactory::new(&tx, &sorted); + let factory = + HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(&tx), &sorted); let mut cursor = factory.hashed_storage_cursor(address).unwrap(); assert!(!cursor.is_storage_empty().unwrap()); } @@ -643,7 +655,8 @@ mod tests { let sorted = hashed_post_state.into_sorted(); let tx = db.tx().unwrap(); - let factory = HashedPostStateCursorFactory::new(&tx, &sorted); + let factory = + HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(&tx), &sorted); let expected = std::iter::once((address, db_storage.into_iter().chain(post_state_storage).collect())); assert_storage_cursor_order(&factory, expected); @@ -679,7 +692,8 @@ mod tests { let sorted = hashed_post_state.into_sorted(); let tx = db.tx().unwrap(); - let factory = HashedPostStateCursorFactory::new(&tx, &sorted); + let factory = + HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(&tx), &sorted); let expected = std::iter::once(( address, post_state_storage.into_iter().filter(|(_, value)| *value > U256::ZERO).collect(), @@ -716,7 +730,8 @@ mod tests { let sorted = hashed_post_state.into_sorted(); let tx = db.tx().unwrap(); - let factory = HashedPostStateCursorFactory::new(&tx, &sorted); + let factory = + HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(&tx), &sorted); let expected = std::iter::once((address, post_state_storage)); assert_storage_cursor_order(&factory, expected); } @@ -751,7 +766,8 @@ mod tests { let sorted = hashed_post_state.into_sorted(); let tx = db.tx().unwrap(); - let factory = HashedPostStateCursorFactory::new(&tx, &sorted); + let factory = + HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(&tx), &sorted); let expected = std::iter::once((address, storage)); assert_storage_cursor_order(&factory, expected); } @@ -798,7 +814,7 @@ mod tests { let sorted = hashed_post_state.into_sorted(); let tx = db.tx().unwrap(); - let factory = HashedPostStateCursorFactory::new(&tx, &sorted); + let factory = HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(&tx), &sorted); assert_storage_cursor_order(&factory, expected.into_iter()); }); } diff --git a/crates/trie/trie/src/proof.rs b/crates/trie/trie/src/proof.rs index eb492f81f4f6..85a254f70b8b 100644 --- a/crates/trie/trie/src/proof.rs +++ b/crates/trie/trie/src/proof.rs @@ -2,16 +2,17 @@ use crate::{ hashed_cursor::{HashedCursorFactory, HashedStorageCursor}, node_iter::{TrieElement, TrieNodeIter}, prefix_set::TriePrefixSetsMut, - trie_cursor::{DatabaseAccountTrieCursor, DatabaseStorageTrieCursor}, + trie_cursor::TrieCursorFactory, walker::TrieWalker, HashBuilder, Nibbles, }; use alloy_rlp::{BufMut, Encodable}; -use reth_db::tables; -use reth_db_api::transaction::DbTx; -use reth_execution_errors::{StateRootError, StorageRootError}; -use reth_primitives::{constants::EMPTY_ROOT_HASH, keccak256, Address, B256}; -use reth_trie_common::{proof::ProofRetainer, AccountProof, StorageProof, TrieAccount}; +use reth_execution_errors::trie::StateProofError; +use reth_primitives::{keccak256, Address, B256}; +use reth_trie_common::{ + proof::ProofRetainer, AccountProof, MultiProof, StorageMultiProof, TrieAccount, +}; +use std::collections::HashMap; /// A struct for generating merkle proofs. /// @@ -19,24 +20,36 @@ use reth_trie_common::{proof::ProofRetainer, AccountProof, StorageProof, TrieAcc /// on the hash builder and follows the same algorithm as the state root calculator. /// See `StateRoot::root` for more info. #[derive(Debug)] -pub struct Proof<'a, TX, H> { - /// A reference to the database transaction. - tx: &'a TX, +pub struct Proof { /// The factory for hashed cursors. hashed_cursor_factory: H, + /// Creates cursor for traversing trie entities. + trie_cursor_factory: T, /// A set of prefix sets that have changes. prefix_sets: TriePrefixSetsMut, + /// Proof targets. + targets: HashMap>, } -impl<'a, TX, H> Proof<'a, TX, H> { - /// Creates a new proof generator. - pub fn new(tx: &'a TX, hashed_cursor_factory: H) -> Self { - Self { tx, hashed_cursor_factory, prefix_sets: TriePrefixSetsMut::default() } +impl Proof { + /// Create a new [Proof] instance. + pub fn new(t: T, h: H) -> Self { + Self { + trie_cursor_factory: t, + hashed_cursor_factory: h, + prefix_sets: TriePrefixSetsMut::default(), + targets: HashMap::default(), + } } /// Set the hashed cursor factory. - pub fn with_hashed_cursor_factory(self, hashed_cursor_factory: HF) -> Proof<'a, TX, HF> { - Proof { tx: self.tx, hashed_cursor_factory, prefix_sets: self.prefix_sets } + pub fn with_hashed_cursor_factory(self, hashed_cursor_factory: HF) -> Proof { + Proof { + trie_cursor_factory: self.trie_cursor_factory, + hashed_cursor_factory, + prefix_sets: self.prefix_sets, + targets: self.targets, + } } /// Set the prefix sets. They have to be mutable in order to allow extension with proof target. @@ -44,43 +57,49 @@ impl<'a, TX, H> Proof<'a, TX, H> { self.prefix_sets = prefix_sets; self } -} -impl<'a, TX> Proof<'a, TX, &'a TX> { - /// Create a new [Proof] instance from database transaction. - pub fn from_tx(tx: &'a TX) -> Self { - Self::new(tx, tx) + /// Set the target accounts and slots. + pub fn with_targets(mut self, targets: HashMap>) -> Self { + self.targets = targets; + self } } -impl<'a, TX, H> Proof<'a, TX, H> +impl Proof where - TX: DbTx, + T: TrieCursorFactory, H: HashedCursorFactory + Clone, { /// Generate an account proof from intermediate nodes. pub fn account_proof( - &self, + self, address: Address, slots: &[B256], - ) -> Result { - let target_hashed_address = keccak256(address); - let target_nibbles = Nibbles::unpack(target_hashed_address); - let mut account_proof = AccountProof::new(address); + ) -> Result { + Ok(self + .with_targets(HashMap::from([( + keccak256(address), + slots.iter().map(keccak256).collect(), + )])) + .multi_proof()? + .account_proof(address, slots)?) + } + /// Generate a state multiproof according to specified targets. + pub fn multi_proof(&self) -> Result { let hashed_account_cursor = self.hashed_cursor_factory.hashed_account_cursor()?; - let trie_cursor = - DatabaseAccountTrieCursor::new(self.tx.cursor_read::()?); + let trie_cursor = self.trie_cursor_factory.account_trie_cursor()?; // Create the walker. let mut prefix_set = self.prefix_sets.account_prefix_set.clone(); - prefix_set.insert(target_nibbles.clone()); + prefix_set.extend(self.targets.keys().map(Nibbles::unpack)); let walker = TrieWalker::new(trie_cursor, prefix_set.freeze()); // Create a hash builder to rebuild the root node since it is not available in the database. - let retainer = ProofRetainer::from_iter([target_nibbles]); + let retainer = ProofRetainer::from_iter(self.targets.keys().map(Nibbles::unpack)); let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); + let mut storage_multiproofs = HashMap::default(); let mut account_rlp = Vec::with_capacity(128); let mut account_node_iter = TrieNodeIter::new(walker, hashed_account_cursor); while let Some(account_node) = account_node_iter.try_next()? { @@ -89,62 +108,44 @@ where hash_builder.add_branch(node.key, node.value, node.children_are_in_trie); } TrieElement::Leaf(hashed_address, account) => { - let storage_root = if hashed_address == target_hashed_address { - let (storage_root, storage_proofs) = - self.storage_root_with_proofs(hashed_address, slots)?; - account_proof.set_account(account, storage_root, storage_proofs); - storage_root - } else { - self.storage_root(hashed_address)? - }; + let storage_multiproof = self.storage_multiproof(hashed_address)?; + // Encode account account_rlp.clear(); - let account = TrieAccount::from((account, storage_root)); + let account = TrieAccount::from((account, storage_multiproof.root)); account.encode(&mut account_rlp as &mut dyn BufMut); hash_builder.add_leaf(Nibbles::unpack(hashed_address), &account_rlp); + storage_multiproofs.insert(hashed_address, storage_multiproof); } } } - let _ = hash_builder.root(); - - let proofs = hash_builder.take_proofs(); - account_proof.set_proof(proofs.values().cloned().collect()); - - Ok(account_proof) - } - - /// Compute storage root. - pub fn storage_root(&self, hashed_address: B256) -> Result { - let (storage_root, _) = self.storage_root_with_proofs(hashed_address, &[])?; - Ok(storage_root) + Ok(MultiProof { account_subtree: hash_builder.take_proofs(), storage_multiproofs }) } - /// Compute the storage root and retain proofs for requested slots. - pub fn storage_root_with_proofs( + /// Generate a storage multiproof according to specified targets. + pub fn storage_multiproof( &self, hashed_address: B256, - slots: &[B256], - ) -> Result<(B256, Vec), StorageRootError> { + ) -> Result { let mut hashed_storage_cursor = self.hashed_cursor_factory.hashed_storage_cursor(hashed_address)?; - let mut proofs = slots.iter().copied().map(StorageProof::new).collect::>(); - // short circuit on empty storage if hashed_storage_cursor.is_storage_empty()? { - return Ok((EMPTY_ROOT_HASH, proofs)) + return Ok(StorageMultiProof::default()) } - let target_nibbles = proofs.iter().map(|p| p.nibbles.clone()).collect::>(); + let target_nibbles = self + .targets + .get(&hashed_address) + .map_or(Vec::new(), |slots| slots.iter().map(Nibbles::unpack).collect()); + let mut prefix_set = self.prefix_sets.storage_prefix_sets.get(&hashed_address).cloned().unwrap_or_default(); prefix_set.extend(target_nibbles.clone()); - let trie_cursor = DatabaseStorageTrieCursor::new( - self.tx.cursor_dup_read::()?, - hashed_address, - ); + let trie_cursor = self.trie_cursor_factory.storage_trie_cursor(hashed_address)?; let walker = TrieWalker::new(trie_cursor, prefix_set.freeze()); let retainer = ProofRetainer::from_iter(target_nibbles); @@ -156,320 +157,15 @@ where hash_builder.add_branch(node.key, node.value, node.children_are_in_trie); } TrieElement::Leaf(hashed_slot, value) => { - let nibbles = Nibbles::unpack(hashed_slot); - if let Some(proof) = proofs.iter_mut().find(|proof| proof.nibbles == nibbles) { - proof.set_value(value); - } - hash_builder.add_leaf(nibbles, alloy_rlp::encode_fixed_size(&value).as_ref()); + hash_builder.add_leaf( + Nibbles::unpack(hashed_slot), + alloy_rlp::encode_fixed_size(&value).as_ref(), + ); } } } let root = hash_builder.root(); - - let all_proof_nodes = hash_builder.take_proofs(); - for proof in &mut proofs { - // Iterate over all proof nodes and find the matching ones. - // The filtered results are guaranteed to be in order. - let matching_proof_nodes = all_proof_nodes - .iter() - .filter(|(path, _)| proof.nibbles.starts_with(path)) - .map(|(_, node)| node.clone()); - proof.set_proof(matching_proof_nodes.collect()); - } - - Ok((root, proofs)) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::StateRoot; - use once_cell::sync::Lazy; - use reth_chainspec::{Chain, ChainSpec, HOLESKY, MAINNET}; - use reth_db_api::database::Database; - use reth_primitives::{Account, Bytes, StorageEntry, U256}; - use reth_provider::{test_utils::create_test_provider_factory, HashingWriter, ProviderFactory}; - use reth_storage_errors::provider::ProviderResult; - use std::{str::FromStr, sync::Arc}; - - /* - World State (sampled from ) - | address | prefix | hash | balance - |--------------------------------------------|-----------|--------------------------------------------------------------------|-------- - | 0x2031f89b3ea8014eb51a78c316e42af3e0d7695f | 0xa711355 | 0xa711355ec1c8f7e26bb3ccbcb0b75d870d15846c0b98e5cc452db46c37faea40 | 45 eth - | 0x33f0fc440b8477fcfbe9d0bf8649e7dea9baedb2 | 0xa77d337 | 0xa77d337781e762f3577784bab7491fcc43e291ce5a356b9bc517ac52eed3a37a | 1 wei - | 0x62b0dd4aab2b1a0a04e279e2b828791a10755528 | 0xa7f9365 | 0xa7f936599f93b769acf90c7178fd2ddcac1b5b4bc9949ee5a04b7e0823c2446e | 1.1 eth - | 0x1ed9b1dd266b607ee278726d324b855a093394a6 | 0xa77d397 | 0xa77d397a32b8ab5eb4b043c65b1f00c93f517bc8883c5cd31baf8e8a279475e3 | .12 eth - - All expected testspec results were obtained from querying proof RPC on the running geth instance `geth init crates/trie/testdata/proof-genesis.json && geth --http`. - */ - static TEST_SPEC: Lazy> = Lazy::new(|| { - ChainSpec { - chain: Chain::from_id(12345), - genesis: serde_json::from_str(include_str!("../testdata/proof-genesis.json")) - .expect("Can't deserialize test genesis json"), - ..Default::default() - } - .into() - }); - - fn convert_to_proof<'a>(path: impl IntoIterator) -> Vec { - path.into_iter().map(Bytes::from_str).collect::, _>>().unwrap() - } - - fn insert_genesis( - provider_factory: &ProviderFactory, - chain_spec: Arc, - ) -> ProviderResult { - let mut provider = provider_factory.provider_rw()?; - - // Hash accounts and insert them into hashing table. - let genesis = chain_spec.genesis(); - let alloc_accounts = genesis - .alloc - .iter() - .map(|(addr, account)| (*addr, Some(Account::from_genesis_account(account)))); - provider.insert_account_for_hashing(alloc_accounts).unwrap(); - - let alloc_storage = genesis.alloc.clone().into_iter().filter_map(|(addr, account)| { - // Only return `Some` if there is storage. - account.storage.map(|storage| { - ( - addr, - storage - .into_iter() - .map(|(key, value)| StorageEntry { key, value: value.into() }), - ) - }) - }); - provider.insert_storage_for_hashing(alloc_storage)?; - - let (root, updates) = StateRoot::from_tx(provider.tx_ref()) - .root_with_updates() - .map_err(Into::::into)?; - updates.write_to_database(provider.tx_mut())?; - - provider.commit()?; - - Ok(root) - } - - #[test] - fn testspec_proofs() { - // Create test database and insert genesis accounts. - let factory = create_test_provider_factory(); - let root = insert_genesis(&factory, TEST_SPEC.clone()).unwrap(); - - let data = Vec::from([ - ( - "0x2031f89b3ea8014eb51a78c316e42af3e0d7695f", - convert_to_proof([ - "0xe48200a7a040f916999be583c572cc4dd369ec53b0a99f7de95f13880cf203d98f935ed1b3", - "0xf87180a04fb9bab4bb88c062f32452b7c94c8f64d07b5851d44a39f1e32ba4b1829fdbfb8080808080a0b61eeb2eb82808b73c4ad14140a2836689f4ab8445d69dd40554eaf1fce34bc080808080808080a0dea230ff2026e65de419288183a340125b04b8405cc61627b3b4137e2260a1e880", - "0xf8719f31355ec1c8f7e26bb3ccbcb0b75d870d15846c0b98e5cc452db46c37faea40b84ff84d80890270801d946c940000a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" - ]) - ), - ( - "0x33f0fc440b8477fcfbe9d0bf8649e7dea9baedb2", - convert_to_proof([ - "0xe48200a7a040f916999be583c572cc4dd369ec53b0a99f7de95f13880cf203d98f935ed1b3", - "0xf87180a04fb9bab4bb88c062f32452b7c94c8f64d07b5851d44a39f1e32ba4b1829fdbfb8080808080a0b61eeb2eb82808b73c4ad14140a2836689f4ab8445d69dd40554eaf1fce34bc080808080808080a0dea230ff2026e65de419288183a340125b04b8405cc61627b3b4137e2260a1e880", - "0xe48200d3a0ef957210bca5b9b402d614eb8408c88cfbf4913eb6ab83ca233c8b8f0e626b54", - "0xf851808080a02743a5addaf4cf9b8c0c073e1eaa555deaaf8c41cb2b41958e88624fa45c2d908080808080a0bfbf6937911dfb88113fecdaa6bde822e4e99dae62489fcf61a91cb2f36793d680808080808080", - "0xf8679e207781e762f3577784bab7491fcc43e291ce5a356b9bc517ac52eed3a37ab846f8448001a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" - ]) - ), - ( - "0x62b0dd4aab2b1a0a04e279e2b828791a10755528", - convert_to_proof([ - "0xe48200a7a040f916999be583c572cc4dd369ec53b0a99f7de95f13880cf203d98f935ed1b3", - "0xf87180a04fb9bab4bb88c062f32452b7c94c8f64d07b5851d44a39f1e32ba4b1829fdbfb8080808080a0b61eeb2eb82808b73c4ad14140a2836689f4ab8445d69dd40554eaf1fce34bc080808080808080a0dea230ff2026e65de419288183a340125b04b8405cc61627b3b4137e2260a1e880", - "0xf8709f3936599f93b769acf90c7178fd2ddcac1b5b4bc9949ee5a04b7e0823c2446eb84ef84c80880f43fc2c04ee0000a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" - ]) - ), - ( - "0x1ed9b1dd266b607ee278726d324b855a093394a6", - convert_to_proof([ - "0xe48200a7a040f916999be583c572cc4dd369ec53b0a99f7de95f13880cf203d98f935ed1b3", - "0xf87180a04fb9bab4bb88c062f32452b7c94c8f64d07b5851d44a39f1e32ba4b1829fdbfb8080808080a0b61eeb2eb82808b73c4ad14140a2836689f4ab8445d69dd40554eaf1fce34bc080808080808080a0dea230ff2026e65de419288183a340125b04b8405cc61627b3b4137e2260a1e880", - "0xe48200d3a0ef957210bca5b9b402d614eb8408c88cfbf4913eb6ab83ca233c8b8f0e626b54", - "0xf851808080a02743a5addaf4cf9b8c0c073e1eaa555deaaf8c41cb2b41958e88624fa45c2d908080808080a0bfbf6937911dfb88113fecdaa6bde822e4e99dae62489fcf61a91cb2f36793d680808080808080", - "0xf86f9e207a32b8ab5eb4b043c65b1f00c93f517bc8883c5cd31baf8e8a279475e3b84ef84c808801aa535d3d0c0000a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" - ]) - ), - ]); - - let provider = factory.provider().unwrap(); - for (target, expected_proof) in data { - let target = Address::from_str(target).unwrap(); - let account_proof = - Proof::from_tx(provider.tx_ref()).account_proof(target, &[]).unwrap(); - similar_asserts::assert_eq!( - account_proof.proof, - expected_proof, - "proof for {target:?} does not match" - ); - assert_eq!(account_proof.verify(root), Ok(())); - } - } - - #[test] - fn testspec_empty_storage_proof() { - // Create test database and insert genesis accounts. - let factory = create_test_provider_factory(); - let root = insert_genesis(&factory, TEST_SPEC.clone()).unwrap(); - - let target = Address::from_str("0x1ed9b1dd266b607ee278726d324b855a093394a6").unwrap(); - let slots = Vec::from([B256::with_last_byte(1), B256::with_last_byte(3)]); - - let provider = factory.provider().unwrap(); - let account_proof = - Proof::from_tx(provider.tx_ref()).account_proof(target, &slots).unwrap(); - assert_eq!(account_proof.storage_root, EMPTY_ROOT_HASH, "expected empty storage root"); - - assert_eq!(slots.len(), account_proof.storage_proofs.len()); - for (idx, slot) in slots.into_iter().enumerate() { - let proof = account_proof.storage_proofs.get(idx).unwrap(); - assert_eq!(proof, &StorageProof::new(slot)); - assert_eq!(proof.verify(account_proof.storage_root), Ok(())); - } - assert_eq!(account_proof.verify(root), Ok(())); - } - - #[test] - fn mainnet_genesis_account_proof() { - // Create test database and insert genesis accounts. - let factory = create_test_provider_factory(); - let root = insert_genesis(&factory, MAINNET.clone()).unwrap(); - - // Address from mainnet genesis allocation. - // keccak256 - `0xcf67b71c90b0d523dd5004cf206f325748da347685071b34812e21801f5270c4` - let target = Address::from_str("0x000d836201318ec6899a67540690382780743280").unwrap(); - - // `cast proof 0x000d836201318ec6899a67540690382780743280 --block 0` - let expected_account_proof = convert_to_proof([ - "0xf90211a090dcaf88c40c7bbc95a912cbdde67c175767b31173df9ee4b0d733bfdd511c43a0babe369f6b12092f49181ae04ca173fb68d1a5456f18d20fa32cba73954052bda0473ecf8a7e36a829e75039a3b055e51b8332cbf03324ab4af2066bbd6fbf0021a0bbda34753d7aa6c38e603f360244e8f59611921d9e1f128372fec0d586d4f9e0a04e44caecff45c9891f74f6a2156735886eedf6f1a733628ebc802ec79d844648a0a5f3f2f7542148c973977c8a1e154c4300fec92f755f7846f1b734d3ab1d90e7a0e823850f50bf72baae9d1733a36a444ab65d0a6faaba404f0583ce0ca4dad92da0f7a00cbe7d4b30b11faea3ae61b7f1f2b315b61d9f6bd68bfe587ad0eeceb721a07117ef9fc932f1a88e908eaead8565c19b5645dc9e5b1b6e841c5edbdfd71681a069eb2de283f32c11f859d7bcf93da23990d3e662935ed4d6b39ce3673ec84472a0203d26456312bbc4da5cd293b75b840fc5045e493d6f904d180823ec22bfed8ea09287b5c21f2254af4e64fca76acc5cd87399c7f1ede818db4326c98ce2dc2208a06fc2d754e304c48ce6a517753c62b1a9c1d5925b89707486d7fc08919e0a94eca07b1c54f15e299bd58bdfef9741538c7828b5d7d11a489f9c20d052b3471df475a051f9dd3739a927c89e357580a4c97b40234aa01ed3d5e0390dc982a7975880a0a089d613f26159af43616fd9455bb461f4869bfede26f2130835ed067a8b967bfb80", - "0xf90211a0dae48f5b47930c28bb116fbd55e52cd47242c71bf55373b55eb2805ee2e4a929a00f1f37f337ec800e2e5974e2e7355f10f1a4832b39b846d916c3597a460e0676a0da8f627bb8fbeead17b318e0a8e4f528db310f591bb6ab2deda4a9f7ca902ab5a0971c662648d58295d0d0aa4b8055588da0037619951217c22052802549d94a2fa0ccc701efe4b3413fd6a61a6c9f40e955af774649a8d9fd212d046a5a39ddbb67a0d607cdb32e2bd635ee7f2f9e07bc94ddbd09b10ec0901b66628e15667aec570ba05b89203dc940e6fa70ec19ad4e01d01849d3a5baa0a8f9c0525256ed490b159fa0b84227d48df68aecc772939a59afa9e1a4ab578f7b698bdb1289e29b6044668ea0fd1c992070b94ace57e48cbf6511a16aa770c645f9f5efba87bbe59d0a042913a0e16a7ccea6748ae90de92f8aef3b3dc248a557b9ac4e296934313f24f7fced5fa042373cf4a00630d94de90d0a23b8f38ced6b0f7cb818b8925fee8f0c2a28a25aa05f89d2161c1741ff428864f7889866484cef622de5023a46e795dfdec336319fa07597a017664526c8c795ce1da27b8b72455c49657113e0455552dbc068c5ba31a0d5be9089012fda2c585a1b961e988ea5efcd3a06988e150a8682091f694b37c5a0f7b0352e38c315b2d9a14d51baea4ddee1770974c806e209355233c3c89dce6ea049bf6e8df0acafd0eff86defeeb305568e44d52d2235cf340ae15c6034e2b24180", - "0xf901f1a0cf67e0f5d5f8d70e53a6278056a14ddca46846f5ef69c7bde6810d058d4a9eda80a06732ada65afd192197fe7ce57792a7f25d26978e64e954b7b84a1f7857ac279da05439f8d011683a6fc07efb90afca198fd7270c795c835c7c85d91402cda992eaa0449b93033b6152d289045fdb0bf3f44926f831566faa0e616b7be1abaad2cb2da031be6c3752bcd7afb99b1bb102baf200f8567c394d464315323a363697646616a0a40e3ed11d906749aa501279392ffde868bd35102db41364d9c601fd651f974aa0044bfa4fe8dd1a58e6c7144da79326e94d1331c0b00373f6ae7f3662f45534b7a098005e3e48db68cb1dc9b9f034ff74d2392028ddf718b0f2084133017da2c2e7a02a62bc40414ee95b02e202a9e89babbabd24bef0abc3fc6dcd3e9144ceb0b725a0239facd895bbf092830390a8676f34b35b29792ae561f196f86614e0448a5792a0a4080f88925daff6b4ce26d188428841bd65655d8e93509f2106020e76d41eefa04918987904be42a6894256ca60203283d1b89139cf21f09f5719c44b8cdbb8f7a06201fc3ef0827e594d953b5e3165520af4fceb719e11cc95fd8d3481519bfd8ca05d0e353d596bd725b09de49c01ede0f29023f0153d7b6d401556aeb525b2959ba0cd367d0679950e9c5f2aa4298fd4b081ade2ea429d71ff390c50f8520e16e30880", - "0xf87180808080808080a0dbee8b33c73b86df839f309f7ac92eee19836e08b39302ffa33921b3c6a09f66a06068b283d51aeeee682b8fb5458354315d0b91737441ede5e137c18b4775174a8080808080a0fe7779c7d58c2fda43eba0a6644043c86ebb9ceb4836f89e30831f23eb059ece8080", - "0xf8719f20b71c90b0d523dd5004cf206f325748da347685071b34812e21801f5270c4b84ff84d80890ad78ebc5ac6200000a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" - ]); - - let provider = factory.provider().unwrap(); - let account_proof = Proof::from_tx(provider.tx_ref()).account_proof(target, &[]).unwrap(); - similar_asserts::assert_eq!(account_proof.proof, expected_account_proof); - assert_eq!(account_proof.verify(root), Ok(())); - } - - #[test] - fn mainnet_genesis_account_proof_nonexistent() { - // Create test database and insert genesis accounts. - let factory = create_test_provider_factory(); - let root = insert_genesis(&factory, MAINNET.clone()).unwrap(); - - // Address that does not exist in mainnet genesis allocation. - // keccak256 - `0x18f415ffd7f66bb1924d90f0e82fb79ca8c6d8a3473cd9a95446a443b9db1761` - let target = Address::from_str("0x000d836201318ec6899a67540690382780743281").unwrap(); - - // `cast proof 0x000d836201318ec6899a67540690382780743281 --block 0` - let expected_account_proof = convert_to_proof([ - "0xf90211a090dcaf88c40c7bbc95a912cbdde67c175767b31173df9ee4b0d733bfdd511c43a0babe369f6b12092f49181ae04ca173fb68d1a5456f18d20fa32cba73954052bda0473ecf8a7e36a829e75039a3b055e51b8332cbf03324ab4af2066bbd6fbf0021a0bbda34753d7aa6c38e603f360244e8f59611921d9e1f128372fec0d586d4f9e0a04e44caecff45c9891f74f6a2156735886eedf6f1a733628ebc802ec79d844648a0a5f3f2f7542148c973977c8a1e154c4300fec92f755f7846f1b734d3ab1d90e7a0e823850f50bf72baae9d1733a36a444ab65d0a6faaba404f0583ce0ca4dad92da0f7a00cbe7d4b30b11faea3ae61b7f1f2b315b61d9f6bd68bfe587ad0eeceb721a07117ef9fc932f1a88e908eaead8565c19b5645dc9e5b1b6e841c5edbdfd71681a069eb2de283f32c11f859d7bcf93da23990d3e662935ed4d6b39ce3673ec84472a0203d26456312bbc4da5cd293b75b840fc5045e493d6f904d180823ec22bfed8ea09287b5c21f2254af4e64fca76acc5cd87399c7f1ede818db4326c98ce2dc2208a06fc2d754e304c48ce6a517753c62b1a9c1d5925b89707486d7fc08919e0a94eca07b1c54f15e299bd58bdfef9741538c7828b5d7d11a489f9c20d052b3471df475a051f9dd3739a927c89e357580a4c97b40234aa01ed3d5e0390dc982a7975880a0a089d613f26159af43616fd9455bb461f4869bfede26f2130835ed067a8b967bfb80", - "0xf90211a0586b1ddec8db4824154209d355a1989b6c43aa69aba36e9d70c9faa53e7452baa0f86db47d628c73764d74b9ccaed73b8486d97a7731d57008fc9efaf417411860a0d9faed7b9ea107b5d98524246c977e782377f976e34f70717e8b1207f2f9b981a00218f59ccedf797c95e27c56405b9bf16845050fb43e773b66b26bc6992744f5a0dbf396f480c4e024156644adea7c331688d03742369e9d87ab8913bc439ff975a0aced524f39b22c62a5be512ddbca89f0b89b47c311065ccf423dee7013c7ea83a0c06b05f80b237b403adc019c0bc95b5de935021b14a75cbc18509eec60dfd83aa085339d45c4a52b7d523c301701f1ab339964e9c907440cff0a871c98dcf8811ea03ae9f6b8e227ec9be9461f0947b01696f78524c4519a6dee9fba14d209952cf9a0af17f551f9fa1ba4be41d0b342b160e2e8468d7e98a65a2dbf9d5fe5d6928024a0b850ac3bc03e9a309cc59ce5f1ab8db264870a7a22786081753d1db91897b8e6a09e796a4904bd78cb2655b5f346c94350e2d5f0dbf2bc00ac00871cd7ba46b241a0f6f0377427b900529caf32abf32ba1eb93f5f70153aa50b90bf55319a434c252a0725eaf27c8ee07e9b2511a6d6a0d71c649d855e8a9ed26e667903e2e94ae47cba0e4139fb48aa1a524d47f6e0df80314b88b52202d7e853da33c276aa8572283a8a05e9003d54a45935fdebae3513dc7cd16626dc05e1d903ae7f47f1a35aa6e234580", - "0xf901d1a0b7c55b381eb205712a2f5d1b7d6309ac725da79ab159cb77dc2783af36e6596da0b3b48aa390e0f3718b486ccc32b01682f92819e652315c1629058cd4d9bb1545a0e3c0cc68af371009f14416c27e17f05f4f696566d2ba45362ce5711d4a01d0e4a0bad1e085e431b510508e2a9e3712633a414b3fe6fd358635ab206021254c1e10a0f8407fe8d5f557b9e012d52e688139bd932fec40d48630d7ff4204d27f8cc68da08c6ca46eff14ad4950e65469c394ca9d6b8690513b1c1a6f91523af00082474c80a0630c034178cb1290d4d906edf28688804d79d5e37a3122c909adab19ac7dc8c5a059f6d047c5d1cc75228c4517a537763cb410c38554f273e5448a53bc3c7166e7a0d842f53ce70c3aad1e616fa6485d3880d15c936fcc306ec14ae35236e5a60549a0218ee2ee673c69b4e1b953194b2568157a69085b86e4f01644fa06ab472c6cf9a016a35a660ea496df7c0da646378bfaa9562f401e42a5c2fe770b7bbe22433585a0dd0fbbe227a4d50868cdbb3107573910fd97131ea8d835bef81d91a2fc30b175a06aafa3d78cf179bf055bd5ec629be0ff8352ce0aec9125a4d75be3ee7eb71f10a01d6817ef9f64fcbb776ff6df0c83138dcd2001bd752727af3e60f4afc123d8d58080" - ]); - - let provider = factory.provider().unwrap(); - let account_proof = Proof::from_tx(provider.tx_ref()).account_proof(target, &[]).unwrap(); - similar_asserts::assert_eq!(account_proof.proof, expected_account_proof); - assert_eq!(account_proof.verify(root), Ok(())); - } - - #[test] - fn holesky_deposit_contract_proof() { - // Create test database and insert genesis accounts. - let factory = create_test_provider_factory(); - let root = insert_genesis(&factory, HOLESKY.clone()).unwrap(); - - let target = Address::from_str("0x4242424242424242424242424242424242424242").unwrap(); - // existent - let slot_22 = - B256::from_str("0x0000000000000000000000000000000000000000000000000000000000000022") - .unwrap(); - let slot_23 = - B256::from_str("0x0000000000000000000000000000000000000000000000000000000000000023") - .unwrap(); - let slot_24 = - B256::from_str("0x0000000000000000000000000000000000000000000000000000000000000024") - .unwrap(); - // non-existent - let slot_100 = - B256::from_str("0x0000000000000000000000000000000000000000000000000000000000000100") - .unwrap(); - let slots = Vec::from([slot_22, slot_23, slot_24, slot_100]); - - // `cast proof 0x4242424242424242424242424242424242424242 0x22 0x23 0x24 0x100 --block 0` - let expected = AccountProof { - address: target, - info: Some(Account { - balance: U256::ZERO, - nonce: 0, - bytecode_hash: Some(B256::from_str("0x2034f79e0e33b0ae6bef948532021baceb116adf2616478703bec6b17329f1cc").unwrap()) - }), - storage_root: B256::from_str("0x556a482068355939c95a3412bdb21213a301483edb1b64402fb66ac9f3583599").unwrap(), - proof: convert_to_proof([ - "0xf90211a0ea92fb71507739d5afe328d607b2c5e98322b7aa7cdfeccf817543058b54af70a0bd0c2525b5bee47abf7120c9e01ec3249699d687f80ebb96ed9ad9de913dbab0a0ab4b14b89416eb23c6b64204fa45cfcb39d4220016a9cd0815ebb751fe45eb71a0986ae29c2148b9e61f9a7543f44a1f8d029f1c5095b359652e9ec94e64b5d393a0555d54aa23ed990b0488153418637df7b2c878b604eb761aa2673b609937b0eba0140afb6a3909cc6047b3d44af13fc83f161a7e4c4ddba430a2841862912eb222a031b1185c1f455022d9e42ce04a71f174eb9441b1ada67449510500f4d85b3b22a051ecd01e18113b23cc65e62f67d69b33ee15d20bf81a6b524f7df90ded00ca15a0703769d6a7befad000bc2b4faae3e41b809b1b1241fe2964262554e7e3603488a0e5de7f600e4e6c3c3e5630e0c66f50506a17c9715642fccb63667e81397bbf93a095f783cd1d464a60e3c8adcadc28c6eb9fec7306664df39553be41dccc909606a04225fda3b89f0c59bf40129d1d5e5c3bf67a2129f0c55e53ffdd2cebf185d644a078e0f7fd3ae5a9bc90f66169614211b48fe235eb64818b3935d3e69c53523b9aa0a870e00e53ebaa1e9ec16e5f36606fd7d21d3a3c96894c0a2a23550949d4fdf7a0809226b69cee1f4f22ced1974e7805230da1909036a49a7652428999431afac2a0f11593b2407e86e11997325d8df2d22d937bbe0aef8302ba40c6be0601b04fc380", - "0xf901f1a09da7d9755fe0c558b3c3de9fdcdf9f28ae641f38c9787b05b73ab22ae53af3e2a0d9990bf0b810d1145ecb2b011fd68c63cc85564e6724166fd4a9520180706e5fa05f5f09855df46330aa310e8d6be5fb82d1a4b975782d9b29acf06ac8d3e72b1ca0ca976997ddaf06f18992f6207e4f6a05979d07acead96568058789017cc6d06ba04d78166b48044fdc28ed22d2fd39c8df6f8aaa04cb71d3a17286856f6893ff83a004f8c7cc4f1335182a1709fb28fc67d52e59878480210abcba864d5d1fd4a066a0fc3b71c33e2e6b77c5e494c1db7fdbb447473f003daf378c7a63ba9bf3f0049d80a07b8e7a21c1178d28074f157b50fca85ee25c12568ff8e9706dcbcdacb77bf854a0973274526811393ea0bf4811ca9077531db00d06b86237a2ecd683f55ba4bcb0a03a93d726d7487874e51b52d8d534c63aa2a689df18e3b307c0d6cb0a388b00f3a06aa67101d011d1c22fe739ef83b04b5214a3e2f8e1a2625d8bfdb116b447e86fa02dd545b33c62d33a183e127a08a4767fba891d9f3b94fc20a2ca02600d6d1fffa0f3b039a4f32349e85c782d1164c1890e5bf16badc9ee4cf827db6afd2229dde6a0d9240a9d2d5851d05a97ff3305334dfdb0101e1e321fc279d2bb3cad6afa8fc8a01b69c6ab5173de8a8ec53a6ebba965713a4cc7feb86cb3e230def37c230ca2b280", - "0xf869a0202a47fc6863b89a6b51890ef3c1550d560886c027141d2058ba1e2d4c66d99ab846f8448080a0556a482068355939c95a3412bdb21213a301483edb1b64402fb66ac9f3583599a02034f79e0e33b0ae6bef948532021baceb116adf2616478703bec6b17329f1cc" - ]), - storage_proofs: Vec::from([ - StorageProof { - key: slot_22, - nibbles: Nibbles::unpack(keccak256(slot_22)), - value: U256::from_str("0xf5a5fd42d16a20302798ef6ed309979b43003d2320d9f0e8ea9831a92759fb4b").unwrap(), - proof: convert_to_proof([ - "0xf9019180a0aafd5b14a6edacd149e110ba6776a654f2dbffca340902be933d011113f2750380a0a502c93b1918c4c6534d4593ae03a5a23fa10ebc30ffb7080b297bff2446e42da02eb2bf45fd443bd1df8b6f9c09726a4c6252a0f7896a131a081e39a7f644b38980a0a9cf7f673a0bce76fd40332afe8601542910b48dea44e93933a3e5e930da5d19a0ddf79db0a36d0c8134ba143bcb541cd4795a9a2bae8aca0ba24b8d8963c2a77da0b973ec0f48f710bf79f63688485755cbe87f9d4c68326bb83c26af620802a80ea0f0855349af6bf84afc8bca2eda31c8ef8c5139be1929eeb3da4ba6b68a818cb0a0c271e189aeeb1db5d59d7fe87d7d6327bbe7cfa389619016459196497de3ccdea0e7503ba5799e77aa31bbe1310c312ca17b2c5bcc8fa38f266675e8f154c2516ba09278b846696d37213ab9d20a5eb42b03db3173ce490a2ef3b2f3b3600579fc63a0e9041059114f9c910adeca12dbba1fef79b2e2c8899f2d7213cd22dfe4310561a047c59da56bb2bf348c9dd2a2e8f5538a92b904b661cfe54a4298b85868bbe4858080", - "0xf85180a0776aa456ba9c5008e03b82b841a9cf2fc1e8578cfacd5c9015804eae315f17fb80808080808080808080808080a072e3e284d47badbb0a5ca1421e1179d3ea90cc10785b26b74fb8a81f0f9e841880", - "0xf843a020035b26e3e9eee00e0d72fd1ee8ddca6894550dca6916ea2ac6baa90d11e510a1a0f5a5fd42d16a20302798ef6ed309979b43003d2320d9f0e8ea9831a92759fb4b" - ]) - }, - StorageProof { - key: slot_23, - nibbles: Nibbles::unpack(keccak256(slot_23)), - value: U256::from_str("0xdb56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71").unwrap(), - proof: convert_to_proof([ - "0xf9019180a0aafd5b14a6edacd149e110ba6776a654f2dbffca340902be933d011113f2750380a0a502c93b1918c4c6534d4593ae03a5a23fa10ebc30ffb7080b297bff2446e42da02eb2bf45fd443bd1df8b6f9c09726a4c6252a0f7896a131a081e39a7f644b38980a0a9cf7f673a0bce76fd40332afe8601542910b48dea44e93933a3e5e930da5d19a0ddf79db0a36d0c8134ba143bcb541cd4795a9a2bae8aca0ba24b8d8963c2a77da0b973ec0f48f710bf79f63688485755cbe87f9d4c68326bb83c26af620802a80ea0f0855349af6bf84afc8bca2eda31c8ef8c5139be1929eeb3da4ba6b68a818cb0a0c271e189aeeb1db5d59d7fe87d7d6327bbe7cfa389619016459196497de3ccdea0e7503ba5799e77aa31bbe1310c312ca17b2c5bcc8fa38f266675e8f154c2516ba09278b846696d37213ab9d20a5eb42b03db3173ce490a2ef3b2f3b3600579fc63a0e9041059114f9c910adeca12dbba1fef79b2e2c8899f2d7213cd22dfe4310561a047c59da56bb2bf348c9dd2a2e8f5538a92b904b661cfe54a4298b85868bbe4858080", - "0xf8518080808080a0d546c4ca227a267d29796643032422374624ed109b3d94848c5dc06baceaee76808080808080a027c48e210ccc6e01686be2d4a199d35f0e1e8df624a8d3a17c163be8861acd6680808080", - "0xf843a0207b2b5166478fd4318d2acc6cc2c704584312bdd8781b32d5d06abda57f4230a1a0db56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71" - ]) - }, - StorageProof { - key: slot_24, - nibbles: Nibbles::unpack(keccak256(slot_24)), - value: U256::from_str("0xc78009fdf07fc56a11f122370658a353aaa542ed63e44c4bc15ff4cd105ab33c").unwrap(), - proof: convert_to_proof([ - "0xf9019180a0aafd5b14a6edacd149e110ba6776a654f2dbffca340902be933d011113f2750380a0a502c93b1918c4c6534d4593ae03a5a23fa10ebc30ffb7080b297bff2446e42da02eb2bf45fd443bd1df8b6f9c09726a4c6252a0f7896a131a081e39a7f644b38980a0a9cf7f673a0bce76fd40332afe8601542910b48dea44e93933a3e5e930da5d19a0ddf79db0a36d0c8134ba143bcb541cd4795a9a2bae8aca0ba24b8d8963c2a77da0b973ec0f48f710bf79f63688485755cbe87f9d4c68326bb83c26af620802a80ea0f0855349af6bf84afc8bca2eda31c8ef8c5139be1929eeb3da4ba6b68a818cb0a0c271e189aeeb1db5d59d7fe87d7d6327bbe7cfa389619016459196497de3ccdea0e7503ba5799e77aa31bbe1310c312ca17b2c5bcc8fa38f266675e8f154c2516ba09278b846696d37213ab9d20a5eb42b03db3173ce490a2ef3b2f3b3600579fc63a0e9041059114f9c910adeca12dbba1fef79b2e2c8899f2d7213cd22dfe4310561a047c59da56bb2bf348c9dd2a2e8f5538a92b904b661cfe54a4298b85868bbe4858080", - "0xf85180808080a030263404acfee103d0b1019053ff3240fce433c69b709831673285fa5887ce4c80808080808080a0f8f1fbb1f7b482d9860480feebb83ff54a8b6ec1ead61cc7d2f25d7c01659f9c80808080", - "0xf843a020d332d19b93bcabe3cce7ca0c18a052f57e5fd03b4758a09f30f5ddc4b22ec4a1a0c78009fdf07fc56a11f122370658a353aaa542ed63e44c4bc15ff4cd105ab33c" - ]) - }, - StorageProof { - key: slot_100, - nibbles: Nibbles::unpack(keccak256(slot_100)), - value: U256::ZERO, - proof: convert_to_proof([ - "0xf9019180a0aafd5b14a6edacd149e110ba6776a654f2dbffca340902be933d011113f2750380a0a502c93b1918c4c6534d4593ae03a5a23fa10ebc30ffb7080b297bff2446e42da02eb2bf45fd443bd1df8b6f9c09726a4c6252a0f7896a131a081e39a7f644b38980a0a9cf7f673a0bce76fd40332afe8601542910b48dea44e93933a3e5e930da5d19a0ddf79db0a36d0c8134ba143bcb541cd4795a9a2bae8aca0ba24b8d8963c2a77da0b973ec0f48f710bf79f63688485755cbe87f9d4c68326bb83c26af620802a80ea0f0855349af6bf84afc8bca2eda31c8ef8c5139be1929eeb3da4ba6b68a818cb0a0c271e189aeeb1db5d59d7fe87d7d6327bbe7cfa389619016459196497de3ccdea0e7503ba5799e77aa31bbe1310c312ca17b2c5bcc8fa38f266675e8f154c2516ba09278b846696d37213ab9d20a5eb42b03db3173ce490a2ef3b2f3b3600579fc63a0e9041059114f9c910adeca12dbba1fef79b2e2c8899f2d7213cd22dfe4310561a047c59da56bb2bf348c9dd2a2e8f5538a92b904b661cfe54a4298b85868bbe4858080", - "0xf891a090bacef44b189ddffdc5f22edc70fe298c58e5e523e6e1dfdf7dbc6d657f7d1b80a026eed68746028bc369eb456b7d3ee475aa16f34e5eaa0c98fdedb9c59ebc53b0808080a09ce86197173e14e0633db84ce8eea32c5454eebe954779255644b45b717e8841808080a0328c7afb2c58ef3f8c4117a8ebd336f1a61d24591067ed9c5aae94796cac987d808080808080" - ]) - }, - ]) - }; - - let provider = factory.provider().unwrap(); - let account_proof = - Proof::from_tx(provider.tx_ref()).account_proof(target, &slots).unwrap(); - similar_asserts::assert_eq!(account_proof, expected); - assert_eq!(account_proof.verify(root), Ok(())); + Ok(StorageMultiProof { root, subtree: hash_builder.take_proofs() }) } } diff --git a/crates/trie/trie/src/state.rs b/crates/trie/trie/src/state.rs index 84bfb8fd6f5f..65222fdc0a8e 100644 --- a/crates/trie/trie/src/state.rs +++ b/crates/trie/trie/src/state.rs @@ -1,9 +1,6 @@ use crate::{ - hashed_cursor::HashedPostStateCursorFactory, prefix_set::{PrefixSetMut, TriePrefixSetsMut}, - proof::Proof, - updates::TrieUpdates, - Nibbles, StateRoot, + Nibbles, }; use itertools::Itertools; use rayon::prelude::{IntoParallelIterator, ParallelIterator}; @@ -13,14 +10,9 @@ use reth_db_api::{ models::{AccountBeforeTx, BlockNumberAddress}, transaction::DbTx, }; -use reth_execution_errors::StateRootError; use reth_primitives::{keccak256, Account, Address, BlockNumber, B256, U256}; -use reth_trie_common::AccountProof; use revm::db::BundleAccount; -use std::{ - collections::{hash_map, HashMap, HashSet}, - ops::RangeInclusive, -}; +use std::collections::{hash_map, HashMap, HashSet}; /// Representation of in-memory hashed state. #[derive(PartialEq, Eq, Clone, Default, Debug)] @@ -62,20 +54,13 @@ impl HashedPostState { Self { accounts, storages } } - /// Initialize [`HashedPostState`] from revert range. - /// Iterate over state reverts in the specified block range and - /// apply them to hashed state in reverse. - /// - /// NOTE: In order to have the resulting [`HashedPostState`] be a correct - /// overlay of the plain state, the end of the range must be the current tip. - pub fn from_revert_range( - tx: &TX, - range: RangeInclusive, - ) -> Result { + /// Initializes [`HashedPostState`] from reverts. Iterates over state reverts from the specified + /// block up to the current tip and aggregates them into hashed state in reverse. + pub fn from_reverts(tx: &TX, from: BlockNumber) -> Result { // Iterate over account changesets and record value before first occurring account change. let mut accounts = HashMap::>::default(); let mut account_changesets_cursor = tx.cursor_read::()?; - for entry in account_changesets_cursor.walk_range(range.clone())? { + for entry in account_changesets_cursor.walk_range(from..)? { let (_, AccountBeforeTx { address, info }) = entry?; if let hash_map::Entry::Vacant(entry) = accounts.entry(address) { entry.insert(info); @@ -85,7 +70,9 @@ impl HashedPostState { // Iterate over storage changesets and record value before first occurring storage change. let mut storages = HashMap::>::default(); let mut storage_changesets_cursor = tx.cursor_read::()?; - for entry in storage_changesets_cursor.walk_range(BlockNumberAddress::range(range))? { + for entry in + storage_changesets_cursor.walk_range(BlockNumberAddress((from, Address::ZERO))..)? + { let (BlockNumberAddress((_, address)), storage) = entry?; let account_storage = storages.entry(address).or_default(); if let hash_map::Entry::Vacant(entry) = account_storage.entry(storage.key) { @@ -202,74 +189,6 @@ impl HashedPostState { TriePrefixSetsMut { account_prefix_set, storage_prefix_sets, destroyed_accounts } } - - /// Calculate the state root for this [`HashedPostState`]. - /// Internally, this method retrieves prefixsets and uses them - /// to calculate incremental state root. - /// - /// # Example - /// - /// ``` - /// use reth_db::test_utils::create_test_rw_db; - /// use reth_db_api::database::Database; - /// use reth_primitives::{Account, U256}; - /// use reth_trie::HashedPostState; - /// - /// // Initialize the database - /// let db = create_test_rw_db(); - /// - /// // Initialize hashed post state - /// let mut hashed_state = HashedPostState::default(); - /// hashed_state.accounts.insert( - /// [0x11; 32].into(), - /// Some(Account { nonce: 1, balance: U256::from(10), bytecode_hash: None }), - /// ); - /// - /// // Calculate the state root - /// let tx = db.tx().expect("failed to create transaction"); - /// let state_root = hashed_state.state_root(&tx); - /// ``` - /// - /// # Returns - /// - /// The state root for this [`HashedPostState`]. - pub fn state_root(&self, tx: &TX) -> Result { - let sorted = self.clone().into_sorted(); - let prefix_sets = self.construct_prefix_sets().freeze(); - StateRoot::from_tx(tx) - .with_hashed_cursor_factory(HashedPostStateCursorFactory::new(tx, &sorted)) - .with_prefix_sets(prefix_sets) - .root() - } - - /// Calculates the state root for this [`HashedPostState`] and returns it alongside trie - /// updates. See [`Self::state_root`] for more info. - pub fn state_root_with_updates( - &self, - tx: &TX, - ) -> Result<(B256, TrieUpdates), StateRootError> { - let sorted = self.clone().into_sorted(); - let prefix_sets = self.construct_prefix_sets().freeze(); - StateRoot::from_tx(tx) - .with_hashed_cursor_factory(HashedPostStateCursorFactory::new(tx, &sorted)) - .with_prefix_sets(prefix_sets) - .root_with_updates() - } - - /// Generates the state proof for target account and slots on top of this [`HashedPostState`]. - pub fn account_proof( - &self, - tx: &TX, - address: Address, - slots: &[B256], - ) -> Result { - let sorted = self.clone().into_sorted(); - let prefix_sets = self.construct_prefix_sets(); - Proof::from_tx(tx) - .with_hashed_cursor_factory(HashedPostStateCursorFactory::new(tx, &sorted)) - .with_prefix_sets_mut(prefix_sets) - .account_proof(address, slots) - } } /// Representation of in-memory hashed storage. @@ -309,7 +228,7 @@ impl HashedStorage { let mut non_zero_valued_slots = Vec::new(); let mut zero_valued_slots = HashSet::default(); for (hashed_slot, value) in self.storage { - if value == U256::ZERO { + if value.is_zero() { zero_valued_slots.insert(hashed_slot); } else { non_zero_valued_slots.push((hashed_slot, value)); @@ -392,13 +311,6 @@ impl HashedStorageSorted { #[cfg(test)] mod tests { use super::*; - use reth_db::test_utils::create_test_rw_db; - use reth_db_api::database::Database; - use reth_primitives::hex; - use revm::{ - db::states::BundleState, - primitives::{AccountInfo, HashMap}, - }; #[test] fn hashed_state_wiped_extension() { @@ -473,34 +385,4 @@ mod tests { ); assert_eq!(account_storage.map(|st| st.wiped), Some(true)); } - - #[test] - fn from_bundle_state_with_rayon() { - let address1 = Address::with_last_byte(1); - let address2 = Address::with_last_byte(2); - let slot1 = U256::from(1015); - let slot2 = U256::from(2015); - - let account1 = AccountInfo { nonce: 1, ..Default::default() }; - let account2 = AccountInfo { nonce: 2, ..Default::default() }; - - let bundle_state = BundleState::builder(2..=2) - .state_present_account_info(address1, account1) - .state_present_account_info(address2, account2) - .state_storage(address1, HashMap::from([(slot1, (U256::ZERO, U256::from(10)))])) - .state_storage(address2, HashMap::from([(slot2, (U256::ZERO, U256::from(20)))])) - .build(); - assert_eq!(bundle_state.reverts.len(), 1); - - let post_state = HashedPostState::from_bundle_state(&bundle_state.state); - assert_eq!(post_state.accounts.len(), 2); - assert_eq!(post_state.storages.len(), 2); - - let db = create_test_rw_db(); - let tx = db.tx().expect("failed to create transaction"); - assert_eq!( - post_state.state_root(&tx).unwrap(), - hex!("b464525710cafcf5d4044ac85b72c08b1e76231b8d91f288fe438cc41d8eaafd") - ); - } } diff --git a/crates/trie/trie/src/trie.rs b/crates/trie/trie/src/trie.rs index c444a305638d..2b5c6d0b63c7 100644 --- a/crates/trie/trie/src/trie.rs +++ b/crates/trie/trie/src/trie.rs @@ -1,7 +1,7 @@ use crate::{ hashed_cursor::{HashedCursorFactory, HashedStorageCursor}, node_iter::{TrieElement, TrieNodeIter}, - prefix_set::{PrefixSet, PrefixSetLoader, TriePrefixSets}, + prefix_set::{PrefixSet, TriePrefixSets}, progress::{IntermediateStateRootState, StateRootProgress}, stats::TrieTracker, trie_cursor::TrieCursorFactory, @@ -10,14 +10,12 @@ use crate::{ HashBuilder, Nibbles, TrieAccount, }; use alloy_rlp::{BufMut, Encodable}; -use reth_db_api::transaction::DbTx; use reth_execution_errors::{StateRootError, StorageRootError}; -use reth_primitives::{constants::EMPTY_ROOT_HASH, keccak256, Address, BlockNumber, B256}; -use std::ops::RangeInclusive; -use tracing::{debug, trace}; +use reth_primitives::{constants::EMPTY_ROOT_HASH, keccak256, Address, B256}; +use tracing::trace; #[cfg(feature = "metrics")] -use crate::metrics::{StateRootMetrics, TrieRootMetrics, TrieType}; +use crate::metrics::{StateRootMetrics, TrieRootMetrics}; /// `StateRoot` is used to compute the root node of a state trie. #[derive(Debug)] @@ -38,6 +36,23 @@ pub struct StateRoot { } impl StateRoot { + /// Creates [`StateRoot`] with `trie_cursor_factory` and `hashed_cursor_factory`. All other + /// parameters are set to reasonable defaults. + /// + /// The cursors created by given factories are then used to walk through the accounts and + /// calculate the state root value with. + pub fn new(trie_cursor_factory: T, hashed_cursor_factory: H) -> Self { + Self { + trie_cursor_factory, + hashed_cursor_factory, + prefix_sets: TriePrefixSets::default(), + previous_state: None, + threshold: 100_000, + #[cfg(feature = "metrics")] + metrics: StateRootMetrics::default(), + } + } + /// Set the prefix sets. pub fn with_prefix_sets(mut self, prefix_sets: TriePrefixSets) -> Self { self.prefix_sets = prefix_sets; @@ -89,79 +104,6 @@ impl StateRoot { } } -impl<'a, TX: DbTx> StateRoot<&'a TX, &'a TX> { - /// Create a new [`StateRoot`] instance. - pub fn from_tx(tx: &'a TX) -> Self { - Self { - trie_cursor_factory: tx, - hashed_cursor_factory: tx, - prefix_sets: TriePrefixSets::default(), - previous_state: None, - threshold: 100_000, - #[cfg(feature = "metrics")] - metrics: StateRootMetrics::default(), - } - } - - /// Given a block number range, identifies all the accounts and storage keys that - /// have changed. - /// - /// # Returns - /// - /// An instance of state root calculator with account and storage prefixes loaded. - pub fn incremental_root_calculator( - tx: &'a TX, - range: RangeInclusive, - ) -> Result { - let loaded_prefix_sets = PrefixSetLoader::new(tx).load(range)?; - Ok(Self::from_tx(tx).with_prefix_sets(loaded_prefix_sets)) - } - - /// Computes the state root of the trie with the changed account and storage prefixes and - /// existing trie nodes. - /// - /// # Returns - /// - /// The updated state root. - pub fn incremental_root( - tx: &'a TX, - range: RangeInclusive, - ) -> Result { - debug!(target: "trie::loader", ?range, "incremental state root"); - Self::incremental_root_calculator(tx, range)?.root() - } - - /// Computes the state root of the trie with the changed account and storage prefixes and - /// existing trie nodes collecting updates in the process. - /// - /// Ignores the threshold. - /// - /// # Returns - /// - /// The updated state root and the trie updates. - pub fn incremental_root_with_updates( - tx: &'a TX, - range: RangeInclusive, - ) -> Result<(B256, TrieUpdates), StateRootError> { - debug!(target: "trie::loader", ?range, "incremental state root"); - Self::incremental_root_calculator(tx, range)?.root_with_updates() - } - - /// Computes the state root of the trie with the changed account and storage prefixes and - /// existing trie nodes collecting updates in the process. - /// - /// # Returns - /// - /// The intermediate progress of state root computation. - pub fn incremental_root_with_progress( - tx: &'a TX, - range: RangeInclusive, - ) -> Result { - debug!(target: "trie::loader", ?range, "incremental state root with progress"); - Self::incremental_root_calculator(tx, range)?.root_with_progress() - } -} - impl StateRoot where T: TrieCursorFactory + Clone, @@ -420,30 +362,6 @@ impl StorageRoot { } } -impl<'a, TX: DbTx> StorageRoot<&'a TX, &'a TX> { - /// Create a new storage root calculator from database transaction and raw address. - pub fn from_tx(tx: &'a TX, address: Address) -> Self { - Self::new( - tx, - tx, - address, - #[cfg(feature = "metrics")] - TrieRootMetrics::new(TrieType::Storage), - ) - } - - /// Create a new storage root calculator from database transaction and hashed address. - pub fn from_tx_hashed(tx: &'a TX, hashed_address: B256) -> Self { - Self::new_hashed( - tx, - tx, - hashed_address, - #[cfg(feature = "metrics")] - TrieRootMetrics::new(TrieType::Storage), - ) - } -} - impl StorageRoot where T: TrieCursorFactory, @@ -536,780 +454,3 @@ where Ok((root, storage_slots_walked, trie_updates)) } } - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - prefix_set::PrefixSetMut, - test_utils::{state_root, state_root_prehashed, storage_root, storage_root_prehashed}, - BranchNodeCompact, TrieMask, - }; - use proptest::{prelude::ProptestConfig, proptest}; - use proptest_arbitrary_interop::arb; - use reth_db::{tables, test_utils::TempDatabase, DatabaseEnv}; - use reth_db_api::{ - cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO}, - transaction::DbTxMut, - }; - use reth_primitives::{hex_literal::hex, Account, StorageEntry, U256}; - use reth_provider::{test_utils::create_test_provider_factory, DatabaseProviderRW}; - use reth_trie_common::triehash::KeccakHasher; - use std::{ - collections::{BTreeMap, HashMap}, - ops::Mul, - str::FromStr, - sync::Arc, - }; - - fn insert_account( - tx: &impl DbTxMut, - address: Address, - account: Account, - storage: &BTreeMap, - ) { - let hashed_address = keccak256(address); - tx.put::(hashed_address, account).unwrap(); - insert_storage(tx, hashed_address, storage); - } - - fn insert_storage(tx: &impl DbTxMut, hashed_address: B256, storage: &BTreeMap) { - for (k, v) in storage { - tx.put::( - hashed_address, - StorageEntry { key: keccak256(k), value: *v }, - ) - .unwrap(); - } - } - - fn incremental_vs_full_root(inputs: &[&str], modified: &str) { - let factory = create_test_provider_factory(); - let tx = factory.provider_rw().unwrap(); - let hashed_address = B256::with_last_byte(1); - - let mut hashed_storage_cursor = - tx.tx_ref().cursor_dup_write::().unwrap(); - let data = inputs.iter().map(|x| B256::from_str(x).unwrap()); - let value = U256::from(0); - for key in data { - hashed_storage_cursor.upsert(hashed_address, StorageEntry { key, value }).unwrap(); - } - - // Generate the intermediate nodes on the receiving end of the channel - let (_, _, trie_updates) = - StorageRoot::from_tx_hashed(tx.tx_ref(), hashed_address).root_with_updates().unwrap(); - - // 1. Some state transition happens, update the hashed storage to the new value - let modified_key = B256::from_str(modified).unwrap(); - let value = U256::from(1); - if hashed_storage_cursor.seek_by_key_subkey(hashed_address, modified_key).unwrap().is_some() - { - hashed_storage_cursor.delete_current().unwrap(); - } - hashed_storage_cursor - .upsert(hashed_address, StorageEntry { key: modified_key, value }) - .unwrap(); - - // 2. Calculate full merkle root - let loader = StorageRoot::from_tx_hashed(tx.tx_ref(), hashed_address); - let modified_root = loader.root().unwrap(); - - // Update the intermediate roots table so that we can run the incremental verification - trie_updates.write_to_database(tx.tx_ref(), hashed_address).unwrap(); - - // 3. Calculate the incremental root - let mut storage_changes = PrefixSetMut::default(); - storage_changes.insert(Nibbles::unpack(modified_key)); - let loader = StorageRoot::from_tx_hashed(tx.tx_ref(), hashed_address) - .with_prefix_set(storage_changes.freeze()); - let incremental_root = loader.root().unwrap(); - - assert_eq!(modified_root, incremental_root); - } - - #[test] - fn branch_node_child_changes() { - incremental_vs_full_root( - &[ - "1000000000000000000000000000000000000000000000000000000000000000", - "1100000000000000000000000000000000000000000000000000000000000000", - "1110000000000000000000000000000000000000000000000000000000000000", - "1200000000000000000000000000000000000000000000000000000000000000", - "1220000000000000000000000000000000000000000000000000000000000000", - "1320000000000000000000000000000000000000000000000000000000000000", - ], - "1200000000000000000000000000000000000000000000000000000000000000", - ); - } - - #[test] - fn arbitrary_storage_root() { - proptest!(ProptestConfig::with_cases(10), |(item in arb::<(Address, std::collections::BTreeMap)>())| { - let (address, storage) = item; - - let hashed_address = keccak256(address); - let factory = create_test_provider_factory(); - let tx = factory.provider_rw().unwrap(); - for (key, value) in &storage { - tx.tx_ref().put::( - hashed_address, - StorageEntry { key: keccak256(key), value: *value }, - ) - .unwrap(); - } - tx.commit().unwrap(); - - let tx = factory.provider_rw().unwrap(); - let got = StorageRoot::from_tx(tx.tx_ref(), address).root().unwrap(); - let expected = storage_root(storage.into_iter()); - assert_eq!(expected, got); - }); - } - - #[test] - // This ensures we dont add empty accounts to the trie - fn test_empty_account() { - let state: State = BTreeMap::from([ - ( - Address::random(), - ( - Account { nonce: 0, balance: U256::from(0), bytecode_hash: None }, - BTreeMap::from([(B256::with_last_byte(0x4), U256::from(12))]), - ), - ), - ( - Address::random(), - ( - Account { nonce: 0, balance: U256::from(0), bytecode_hash: None }, - BTreeMap::default(), - ), - ), - ( - Address::random(), - ( - Account { - nonce: 155, - balance: U256::from(414241124u32), - bytecode_hash: Some(keccak256("test")), - }, - BTreeMap::from([ - (B256::ZERO, U256::from(3)), - (B256::with_last_byte(2), U256::from(1)), - ]), - ), - ), - ]); - test_state_root_with_state(state); - } - - #[test] - // This ensures we return an empty root when there are no storage entries - fn test_empty_storage_root() { - let factory = create_test_provider_factory(); - let tx = factory.provider_rw().unwrap(); - - let address = Address::random(); - let code = "el buen fla"; - let account = Account { - nonce: 155, - balance: U256::from(414241124u32), - bytecode_hash: Some(keccak256(code)), - }; - insert_account(tx.tx_ref(), address, account, &Default::default()); - tx.commit().unwrap(); - - let tx = factory.provider_rw().unwrap(); - let got = StorageRoot::from_tx(tx.tx_ref(), address).root().unwrap(); - assert_eq!(got, EMPTY_ROOT_HASH); - } - - #[test] - // This ensures that the walker goes over all the storage slots - fn test_storage_root() { - let factory = create_test_provider_factory(); - let tx = factory.provider_rw().unwrap(); - - let address = Address::random(); - let storage = - BTreeMap::from([(B256::ZERO, U256::from(3)), (B256::with_last_byte(2), U256::from(1))]); - - let code = "el buen fla"; - let account = Account { - nonce: 155, - balance: U256::from(414241124u32), - bytecode_hash: Some(keccak256(code)), - }; - - insert_account(tx.tx_ref(), address, account, &storage); - tx.commit().unwrap(); - - let tx = factory.provider_rw().unwrap(); - let got = StorageRoot::from_tx(tx.tx_ref(), address).root().unwrap(); - - assert_eq!(storage_root(storage.into_iter()), got); - } - - type State = BTreeMap)>; - - #[test] - fn arbitrary_state_root() { - proptest!( - ProptestConfig::with_cases(10), | (state in arb::()) | { - test_state_root_with_state(state); - } - ); - } - - #[test] - fn arbitrary_state_root_with_progress() { - proptest!( - ProptestConfig::with_cases(10), | (state in arb::()) | { - let hashed_entries_total = state.len() + - state.values().map(|(_, slots)| slots.len()).sum::(); - - let factory = create_test_provider_factory(); - let tx = factory.provider_rw().unwrap(); - - for (address, (account, storage)) in &state { - insert_account(tx.tx_ref(), *address, *account, storage) - } - tx.commit().unwrap(); - let tx = factory.provider_rw().unwrap(); - - let expected = state_root(state); - - let threshold = 10; - let mut got = None; - let mut hashed_entries_walked = 0; - - let mut intermediate_state: Option> = None; - while got.is_none() { - let calculator = StateRoot::from_tx(tx.tx_ref()) - .with_threshold(threshold) - .with_intermediate_state(intermediate_state.take().map(|state| *state)); - match calculator.root_with_progress().unwrap() { - StateRootProgress::Progress(state, walked, _) => { - intermediate_state = Some(state); - hashed_entries_walked += walked; - }, - StateRootProgress::Complete(root, walked, _) => { - got = Some(root); - hashed_entries_walked += walked; - }, - }; - } - assert_eq!(expected, got.unwrap()); - assert_eq!(hashed_entries_total, hashed_entries_walked) - } - ); - } - - fn test_state_root_with_state(state: State) { - let factory = create_test_provider_factory(); - let tx = factory.provider_rw().unwrap(); - - for (address, (account, storage)) in &state { - insert_account(tx.tx_ref(), *address, *account, storage) - } - tx.commit().unwrap(); - let expected = state_root(state); - - let tx = factory.provider_rw().unwrap(); - let got = StateRoot::from_tx(tx.tx_ref()).root().unwrap(); - assert_eq!(expected, got); - } - - fn encode_account(account: Account, storage_root: Option) -> Vec { - let account = TrieAccount::from((account, storage_root.unwrap_or(EMPTY_ROOT_HASH))); - let mut account_rlp = Vec::with_capacity(account.length()); - account.encode(&mut account_rlp); - account_rlp - } - - #[test] - fn storage_root_regression() { - let factory = create_test_provider_factory(); - let tx = factory.provider_rw().unwrap(); - // Some address whose hash starts with 0xB041 - let address3 = Address::from_str("16b07afd1c635f77172e842a000ead9a2a222459").unwrap(); - let key3 = keccak256(address3); - assert_eq!(key3[0], 0xB0); - assert_eq!(key3[1], 0x41); - - let storage = BTreeMap::from( - [ - ("1200000000000000000000000000000000000000000000000000000000000000", 0x42), - ("1400000000000000000000000000000000000000000000000000000000000000", 0x01), - ("3000000000000000000000000000000000000000000000000000000000E00000", 0x127a89), - ("3000000000000000000000000000000000000000000000000000000000E00001", 0x05), - ] - .map(|(slot, val)| (B256::from_str(slot).unwrap(), U256::from(val))), - ); - - let mut hashed_storage_cursor = - tx.tx_ref().cursor_dup_write::().unwrap(); - for (hashed_slot, value) in storage.clone() { - hashed_storage_cursor.upsert(key3, StorageEntry { key: hashed_slot, value }).unwrap(); - } - tx.commit().unwrap(); - let tx = factory.provider_rw().unwrap(); - - let account3_storage_root = StorageRoot::from_tx(tx.tx_ref(), address3).root().unwrap(); - let expected_root = storage_root_prehashed(storage); - assert_eq!(expected_root, account3_storage_root); - } - - #[test] - fn account_and_storage_trie() { - let ether = U256::from(1e18); - let storage = BTreeMap::from( - [ - ("1200000000000000000000000000000000000000000000000000000000000000", 0x42), - ("1400000000000000000000000000000000000000000000000000000000000000", 0x01), - ("3000000000000000000000000000000000000000000000000000000000E00000", 0x127a89), - ("3000000000000000000000000000000000000000000000000000000000E00001", 0x05), - ] - .map(|(slot, val)| (B256::from_str(slot).unwrap(), U256::from(val))), - ); - - let factory = create_test_provider_factory(); - let tx = factory.provider_rw().unwrap(); - - let mut hashed_account_cursor = - tx.tx_ref().cursor_write::().unwrap(); - let mut hashed_storage_cursor = - tx.tx_ref().cursor_dup_write::().unwrap(); - - let mut hash_builder = HashBuilder::default(); - - // Insert first account - let key1 = - B256::from_str("b000000000000000000000000000000000000000000000000000000000000000") - .unwrap(); - let account1 = Account { nonce: 0, balance: U256::from(3).mul(ether), bytecode_hash: None }; - hashed_account_cursor.upsert(key1, account1).unwrap(); - hash_builder.add_leaf(Nibbles::unpack(key1), &encode_account(account1, None)); - - // Some address whose hash starts with 0xB040 - let address2 = Address::from_str("7db3e81b72d2695e19764583f6d219dbee0f35ca").unwrap(); - let key2 = keccak256(address2); - assert_eq!(key2[0], 0xB0); - assert_eq!(key2[1], 0x40); - let account2 = Account { nonce: 0, balance: ether, ..Default::default() }; - hashed_account_cursor.upsert(key2, account2).unwrap(); - hash_builder.add_leaf(Nibbles::unpack(key2), &encode_account(account2, None)); - - // Some address whose hash starts with 0xB041 - let address3 = Address::from_str("16b07afd1c635f77172e842a000ead9a2a222459").unwrap(); - let key3 = keccak256(address3); - assert_eq!(key3[0], 0xB0); - assert_eq!(key3[1], 0x41); - let code_hash = - B256::from_str("5be74cad16203c4905c068b012a2e9fb6d19d036c410f16fd177f337541440dd") - .unwrap(); - let account3 = - Account { nonce: 0, balance: U256::from(2).mul(ether), bytecode_hash: Some(code_hash) }; - hashed_account_cursor.upsert(key3, account3).unwrap(); - for (hashed_slot, value) in storage { - if hashed_storage_cursor - .seek_by_key_subkey(key3, hashed_slot) - .unwrap() - .filter(|e| e.key == hashed_slot) - .is_some() - { - hashed_storage_cursor.delete_current().unwrap(); - } - hashed_storage_cursor.upsert(key3, StorageEntry { key: hashed_slot, value }).unwrap(); - } - let account3_storage_root = StorageRoot::from_tx(tx.tx_ref(), address3).root().unwrap(); - hash_builder.add_leaf( - Nibbles::unpack(key3), - &encode_account(account3, Some(account3_storage_root)), - ); - - let key4a = - B256::from_str("B1A0000000000000000000000000000000000000000000000000000000000000") - .unwrap(); - let account4a = - Account { nonce: 0, balance: U256::from(4).mul(ether), ..Default::default() }; - hashed_account_cursor.upsert(key4a, account4a).unwrap(); - hash_builder.add_leaf(Nibbles::unpack(key4a), &encode_account(account4a, None)); - - let key5 = - B256::from_str("B310000000000000000000000000000000000000000000000000000000000000") - .unwrap(); - let account5 = - Account { nonce: 0, balance: U256::from(8).mul(ether), ..Default::default() }; - hashed_account_cursor.upsert(key5, account5).unwrap(); - hash_builder.add_leaf(Nibbles::unpack(key5), &encode_account(account5, None)); - - let key6 = - B256::from_str("B340000000000000000000000000000000000000000000000000000000000000") - .unwrap(); - let account6 = - Account { nonce: 0, balance: U256::from(1).mul(ether), ..Default::default() }; - hashed_account_cursor.upsert(key6, account6).unwrap(); - hash_builder.add_leaf(Nibbles::unpack(key6), &encode_account(account6, None)); - - // Populate account & storage trie DB tables - let expected_root = - B256::from_str("72861041bc90cd2f93777956f058a545412b56de79af5eb6b8075fe2eabbe015") - .unwrap(); - let computed_expected_root: B256 = triehash::trie_root::([ - (key1, encode_account(account1, None)), - (key2, encode_account(account2, None)), - (key3, encode_account(account3, Some(account3_storage_root))), - (key4a, encode_account(account4a, None)), - (key5, encode_account(account5, None)), - (key6, encode_account(account6, None)), - ]); - // Check computed trie root to ensure correctness - assert_eq!(computed_expected_root, expected_root); - - // Check hash builder root - assert_eq!(hash_builder.root(), computed_expected_root); - - // Check state root calculation from scratch - let (root, trie_updates) = StateRoot::from_tx(tx.tx_ref()).root_with_updates().unwrap(); - assert_eq!(root, computed_expected_root); - - // Check account trie - let account_updates = trie_updates.clone().into_sorted().account_nodes; - assert_eq!(account_updates.len(), 2); - - let (nibbles1a, node1a) = account_updates.first().unwrap(); - assert_eq!(nibbles1a[..], [0xB]); - assert_eq!(node1a.state_mask, TrieMask::new(0b1011)); - assert_eq!(node1a.tree_mask, TrieMask::new(0b0001)); - assert_eq!(node1a.hash_mask, TrieMask::new(0b1001)); - assert_eq!(node1a.root_hash, None); - assert_eq!(node1a.hashes.len(), 2); - - let (nibbles2a, node2a) = account_updates.last().unwrap(); - assert_eq!(nibbles2a[..], [0xB, 0x0]); - assert_eq!(node2a.state_mask, TrieMask::new(0b10001)); - assert_eq!(node2a.tree_mask, TrieMask::new(0b00000)); - assert_eq!(node2a.hash_mask, TrieMask::new(0b10000)); - assert_eq!(node2a.root_hash, None); - assert_eq!(node2a.hashes.len(), 1); - - // Check storage trie - let mut updated_storage_trie = - trie_updates.storage_tries.iter().filter(|(_, u)| !u.storage_nodes.is_empty()); - assert_eq!(updated_storage_trie.clone().count(), 1); - let (_, storage_trie_updates) = updated_storage_trie.next().unwrap(); - assert_eq!(storage_trie_updates.storage_nodes.len(), 1); - - let (nibbles3, node3) = storage_trie_updates.storage_nodes.iter().next().unwrap(); - assert!(nibbles3.is_empty()); - assert_eq!(node3.state_mask, TrieMask::new(0b1010)); - assert_eq!(node3.tree_mask, TrieMask::new(0b0000)); - assert_eq!(node3.hash_mask, TrieMask::new(0b0010)); - - assert_eq!(node3.hashes.len(), 1); - assert_eq!(node3.root_hash, Some(account3_storage_root)); - - // Add an account - // Some address whose hash starts with 0xB1 - let address4b = Address::from_str("4f61f2d5ebd991b85aa1677db97307caf5215c91").unwrap(); - let key4b = keccak256(address4b); - assert_eq!(key4b.0[0], key4a.0[0]); - let account4b = - Account { nonce: 0, balance: U256::from(5).mul(ether), bytecode_hash: None }; - hashed_account_cursor.upsert(key4b, account4b).unwrap(); - - let mut prefix_set = PrefixSetMut::default(); - prefix_set.insert(Nibbles::unpack(key4b)); - - let expected_state_root = - B256::from_str("8e263cd4eefb0c3cbbb14e5541a66a755cad25bcfab1e10dd9d706263e811b28") - .unwrap(); - - let (root, trie_updates) = StateRoot::from_tx(tx.tx_ref()) - .with_prefix_sets(TriePrefixSets { - account_prefix_set: prefix_set.freeze(), - ..Default::default() - }) - .root_with_updates() - .unwrap(); - assert_eq!(root, expected_state_root); - - let account_updates = trie_updates.into_sorted().account_nodes; - assert_eq!(account_updates.len(), 2); - - let (nibbles1b, node1b) = account_updates.first().unwrap(); - assert_eq!(nibbles1b[..], [0xB]); - assert_eq!(node1b.state_mask, TrieMask::new(0b1011)); - assert_eq!(node1b.tree_mask, TrieMask::new(0b0001)); - assert_eq!(node1b.hash_mask, TrieMask::new(0b1011)); - assert_eq!(node1b.root_hash, None); - assert_eq!(node1b.hashes.len(), 3); - assert_eq!(node1a.hashes[0], node1b.hashes[0]); - assert_eq!(node1a.hashes[1], node1b.hashes[2]); - - let (nibbles2b, node2b) = account_updates.last().unwrap(); - assert_eq!(nibbles2b[..], [0xB, 0x0]); - assert_eq!(node2a, node2b); - tx.commit().unwrap(); - - { - let tx = factory.provider_rw().unwrap(); - let mut hashed_account_cursor = - tx.tx_ref().cursor_write::().unwrap(); - - let account = hashed_account_cursor.seek_exact(key2).unwrap().unwrap(); - hashed_account_cursor.delete_current().unwrap(); - - let mut account_prefix_set = PrefixSetMut::default(); - account_prefix_set.insert(Nibbles::unpack(account.0)); - - let computed_expected_root: B256 = triehash::trie_root::([ - (key1, encode_account(account1, None)), - // DELETED: (key2, encode_account(account2, None)), - (key3, encode_account(account3, Some(account3_storage_root))), - (key4a, encode_account(account4a, None)), - (key4b, encode_account(account4b, None)), - (key5, encode_account(account5, None)), - (key6, encode_account(account6, None)), - ]); - - let (root, trie_updates) = StateRoot::from_tx(tx.tx_ref()) - .with_prefix_sets(TriePrefixSets { - account_prefix_set: account_prefix_set.freeze(), - ..Default::default() - }) - .root_with_updates() - .unwrap(); - assert_eq!(root, computed_expected_root); - assert_eq!(trie_updates.account_nodes.len() + trie_updates.removed_nodes.len(), 1); - - assert_eq!(trie_updates.account_nodes.len(), 1); - - let (nibbles1c, node1c) = trie_updates.account_nodes.iter().next().unwrap(); - assert_eq!(nibbles1c[..], [0xB]); - - assert_eq!(node1c.state_mask, TrieMask::new(0b1011)); - assert_eq!(node1c.tree_mask, TrieMask::new(0b0000)); - assert_eq!(node1c.hash_mask, TrieMask::new(0b1011)); - - assert_eq!(node1c.root_hash, None); - - assert_eq!(node1c.hashes.len(), 3); - assert_ne!(node1c.hashes[0], node1b.hashes[0]); - assert_eq!(node1c.hashes[1], node1b.hashes[1]); - assert_eq!(node1c.hashes[2], node1b.hashes[2]); - } - - { - let tx = factory.provider_rw().unwrap(); - let mut hashed_account_cursor = - tx.tx_ref().cursor_write::().unwrap(); - - let account2 = hashed_account_cursor.seek_exact(key2).unwrap().unwrap(); - hashed_account_cursor.delete_current().unwrap(); - let account3 = hashed_account_cursor.seek_exact(key3).unwrap().unwrap(); - hashed_account_cursor.delete_current().unwrap(); - - let mut account_prefix_set = PrefixSetMut::default(); - account_prefix_set.insert(Nibbles::unpack(account2.0)); - account_prefix_set.insert(Nibbles::unpack(account3.0)); - - let computed_expected_root: B256 = triehash::trie_root::([ - (key1, encode_account(account1, None)), - // DELETED: (key2, encode_account(account2, None)), - // DELETED: (key3, encode_account(account3, Some(account3_storage_root))), - (key4a, encode_account(account4a, None)), - (key4b, encode_account(account4b, None)), - (key5, encode_account(account5, None)), - (key6, encode_account(account6, None)), - ]); - - let (root, trie_updates) = StateRoot::from_tx(tx.tx_ref()) - .with_prefix_sets(TriePrefixSets { - account_prefix_set: account_prefix_set.freeze(), - ..Default::default() - }) - .root_with_updates() - .unwrap(); - assert_eq!(root, computed_expected_root); - assert_eq!(trie_updates.account_nodes.len() + trie_updates.removed_nodes.len(), 1); - assert!(!trie_updates - .storage_tries - .iter() - .any(|(_, u)| !u.storage_nodes.is_empty() || !u.removed_nodes.is_empty())); // no storage root update - - assert_eq!(trie_updates.account_nodes.len(), 1); - - let (nibbles1d, node1d) = trie_updates.account_nodes.iter().next().unwrap(); - assert_eq!(nibbles1d[..], [0xB]); - - assert_eq!(node1d.state_mask, TrieMask::new(0b1011)); - assert_eq!(node1d.tree_mask, TrieMask::new(0b0000)); - assert_eq!(node1d.hash_mask, TrieMask::new(0b1010)); - - assert_eq!(node1d.root_hash, None); - - assert_eq!(node1d.hashes.len(), 2); - assert_eq!(node1d.hashes[0], node1b.hashes[1]); - assert_eq!(node1d.hashes[1], node1b.hashes[2]); - } - } - - #[test] - fn account_trie_around_extension_node() { - let factory = create_test_provider_factory(); - let tx = factory.provider_rw().unwrap(); - - let expected = extension_node_trie(&tx); - - let (got, updates) = StateRoot::from_tx(tx.tx_ref()).root_with_updates().unwrap(); - assert_eq!(expected, got); - assert_trie_updates(&updates.account_nodes); - } - - #[test] - fn account_trie_around_extension_node_with_dbtrie() { - let factory = create_test_provider_factory(); - let tx = factory.provider_rw().unwrap(); - - let expected = extension_node_trie(&tx); - - let (got, updates) = StateRoot::from_tx(tx.tx_ref()).root_with_updates().unwrap(); - assert_eq!(expected, got); - updates.write_to_database(tx.tx_ref()).unwrap(); - - // read the account updates from the db - let mut accounts_trie = tx.tx_ref().cursor_read::().unwrap(); - let walker = accounts_trie.walk(None).unwrap(); - let account_updates = walker - .into_iter() - .map(|item| { - let (key, node) = item.unwrap(); - (key.0, node) - }) - .collect(); - assert_trie_updates(&account_updates); - } - - proptest! { - #![proptest_config(ProptestConfig { - cases: 128, ..ProptestConfig::default() - })] - - #[test] - fn fuzz_state_root_incremental(account_changes: [BTreeMap; 5]) { - let factory = create_test_provider_factory(); - let tx = factory.provider_rw().unwrap(); - let mut hashed_account_cursor = tx.tx_ref().cursor_write::().unwrap(); - - let mut state = BTreeMap::default(); - for accounts in account_changes { - let should_generate_changeset = !state.is_empty(); - let mut changes = PrefixSetMut::default(); - for (hashed_address, balance) in accounts.clone() { - hashed_account_cursor.upsert(hashed_address, Account { balance, ..Default::default() }).unwrap(); - if should_generate_changeset { - changes.insert(Nibbles::unpack(hashed_address)); - } - } - - let (state_root, trie_updates) = StateRoot::from_tx(tx.tx_ref()) - .with_prefix_sets(TriePrefixSets { account_prefix_set: changes.freeze(), ..Default::default() }) - .root_with_updates() - .unwrap(); - - state.append(&mut accounts.clone()); - let expected_root = state_root_prehashed( - state.iter().map(|(&key, &balance)| (key, (Account { balance, ..Default::default() }, std::iter::empty()))) - ); - assert_eq!(expected_root, state_root); - trie_updates.write_to_database(tx.tx_ref()).unwrap(); - } - } - } - - #[test] - fn storage_trie_around_extension_node() { - let factory = create_test_provider_factory(); - let tx = factory.provider_rw().unwrap(); - - let hashed_address = B256::random(); - let (expected_root, expected_updates) = extension_node_storage_trie(&tx, hashed_address); - - let (got, _, updates) = - StorageRoot::from_tx_hashed(tx.tx_ref(), hashed_address).root_with_updates().unwrap(); - assert_eq!(expected_root, got); - assert_eq!(expected_updates, updates); - assert_trie_updates(&updates.storage_nodes); - } - - fn extension_node_storage_trie( - tx: &DatabaseProviderRW>>, - hashed_address: B256, - ) -> (B256, StorageTrieUpdates) { - let value = U256::from(1); - - let mut hashed_storage = tx.tx_ref().cursor_write::().unwrap(); - - let mut hb = HashBuilder::default().with_updates(true); - - for key in [ - hex!("30af561000000000000000000000000000000000000000000000000000000000"), - hex!("30af569000000000000000000000000000000000000000000000000000000000"), - hex!("30af650000000000000000000000000000000000000000000000000000000000"), - hex!("30af6f0000000000000000000000000000000000000000000000000000000000"), - hex!("30af8f0000000000000000000000000000000000000000000000000000000000"), - hex!("3100000000000000000000000000000000000000000000000000000000000000"), - ] { - hashed_storage - .upsert(hashed_address, StorageEntry { key: B256::new(key), value }) - .unwrap(); - hb.add_leaf(Nibbles::unpack(key), &alloy_rlp::encode_fixed_size(&value)); - } - - let root = hb.root(); - let (_, updates) = hb.split(); - let trie_updates = StorageTrieUpdates { storage_nodes: updates, ..Default::default() }; - (root, trie_updates) - } - - fn extension_node_trie(tx: &DatabaseProviderRW>>) -> B256 { - let a = - Account { nonce: 0, balance: U256::from(1u64), bytecode_hash: Some(B256::random()) }; - let val = encode_account(a, None); - - let mut hashed_accounts = tx.tx_ref().cursor_write::().unwrap(); - let mut hb = HashBuilder::default(); - - for key in [ - hex!("30af561000000000000000000000000000000000000000000000000000000000"), - hex!("30af569000000000000000000000000000000000000000000000000000000000"), - hex!("30af650000000000000000000000000000000000000000000000000000000000"), - hex!("30af6f0000000000000000000000000000000000000000000000000000000000"), - hex!("30af8f0000000000000000000000000000000000000000000000000000000000"), - hex!("3100000000000000000000000000000000000000000000000000000000000000"), - ] { - hashed_accounts.upsert(B256::new(key), a).unwrap(); - hb.add_leaf(Nibbles::unpack(key), &val); - } - - hb.root() - } - - fn assert_trie_updates(account_updates: &HashMap) { - assert_eq!(account_updates.len(), 2); - - let node = account_updates.get(&[0x3][..]).unwrap(); - let expected = BranchNodeCompact::new(0b0011, 0b0001, 0b0000, vec![], None); - assert_eq!(node, &expected); - - let node = account_updates.get(&[0x3, 0x0, 0xA, 0xF][..]).unwrap(); - assert_eq!(node.state_mask, TrieMask::new(0b101100000)); - assert_eq!(node.tree_mask, TrieMask::new(0b000000000)); - assert_eq!(node.hash_mask, TrieMask::new(0b001000000)); - - assert_eq!(node.root_hash, None); - assert_eq!(node.hashes.len(), 1); - } -} diff --git a/crates/trie/trie/src/trie_cursor/database_cursors.rs b/crates/trie/trie/src/trie_cursor/database_cursors.rs index 4c9e5e6a73b2..7149c53c0e5a 100644 --- a/crates/trie/trie/src/trie_cursor/database_cursors.rs +++ b/crates/trie/trie/src/trie_cursor/database_cursors.rs @@ -1,11 +1,17 @@ use super::{TrieCursor, TrieCursorFactory}; -use crate::{BranchNodeCompact, Nibbles, StoredNibbles, StoredNibblesSubKey}; -use reth_db::{tables, DatabaseError}; +use crate::{ + updates::StorageTrieUpdates, BranchNodeCompact, Nibbles, StoredNibbles, StoredNibblesSubKey, +}; +use reth_db::{ + cursor::{DbCursorRW, DbDupCursorRW}, + tables, DatabaseError, +}; use reth_db_api::{ cursor::{DbCursorRO, DbDupCursorRO}, transaction::DbTx, }; use reth_primitives::B256; +use reth_trie_common::StorageTrieEntry; /// Implementation of the trie cursor factory for a database transaction. impl<'a, TX: DbTx> TrieCursorFactory for &'a TX { @@ -86,6 +92,62 @@ impl DatabaseStorageTrieCursor { } } +impl DatabaseStorageTrieCursor +where + C: DbCursorRO + + DbCursorRW + + DbDupCursorRO + + DbDupCursorRW, +{ + /// Writes storage updates + pub fn write_storage_trie_updates( + &mut self, + updates: &StorageTrieUpdates, + ) -> Result { + // The storage trie for this account has to be deleted. + if updates.is_deleted && self.cursor.seek_exact(self.hashed_address)?.is_some() { + self.cursor.delete_current_duplicates()?; + } + + // Merge updated and removed nodes. Updated nodes must take precedence. + let mut storage_updates = updates + .removed_nodes + .iter() + .filter_map(|n| (!updates.storage_nodes.contains_key(n)).then_some((n, None))) + .collect::>(); + storage_updates + .extend(updates.storage_nodes.iter().map(|(nibbles, node)| (nibbles, Some(node)))); + + // Sort trie node updates. + storage_updates.sort_unstable_by(|a, b| a.0.cmp(b.0)); + + let mut num_entries = 0; + for (nibbles, maybe_updated) in storage_updates.into_iter().filter(|(n, _)| !n.is_empty()) { + num_entries += 1; + let nibbles = StoredNibblesSubKey(nibbles.clone()); + // Delete the old entry if it exists. + if self + .cursor + .seek_by_key_subkey(self.hashed_address, nibbles.clone())? + .filter(|e| e.nibbles == nibbles) + .is_some() + { + self.cursor.delete_current()?; + } + + // There is an updated version of this node, insert new entry. + if let Some(node) = maybe_updated { + self.cursor.upsert( + self.hashed_address, + StorageTrieEntry { nibbles, node: node.clone() }, + )?; + } + } + + Ok(num_entries) + } +} + impl TrieCursor for DatabaseStorageTrieCursor where C: DbCursorRO + DbDupCursorRO + Send + Sync, diff --git a/crates/trie/trie/src/trie_cursor/in_memory.rs b/crates/trie/trie/src/trie_cursor/in_memory.rs index c74ee0eaf3b3..d02940e20181 100644 --- a/crates/trie/trie/src/trie_cursor/in_memory.rs +++ b/crates/trie/trie/src/trie_cursor/in_memory.rs @@ -296,65 +296,3 @@ fn compare_trie_node_entries( db_item.or(in_memory_item) } } - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - prefix_set::{PrefixSetMut, TriePrefixSets}, - test_utils::state_root_prehashed, - StateRoot, - }; - use proptest::prelude::*; - use reth_db::{cursor::DbCursorRW, tables, transaction::DbTxMut}; - use reth_primitives::{Account, U256}; - use reth_provider::test_utils::create_test_provider_factory; - use std::collections::BTreeMap; - - proptest! { - #![proptest_config(ProptestConfig { - cases: 128, ..ProptestConfig::default() - })] - - #[test] - fn fuzz_in_memory_nodes(mut init_state: BTreeMap, mut updated_state: BTreeMap) { - let factory = create_test_provider_factory(); - let provider = factory.provider_rw().unwrap(); - let mut hashed_account_cursor = provider.tx_ref().cursor_write::().unwrap(); - - // Insert init state into database - for (hashed_address, balance) in init_state.clone() { - hashed_account_cursor.upsert(hashed_address, Account { balance, ..Default::default() }).unwrap(); - } - - // Compute initial root and updates - let (_, trie_updates) = StateRoot::from_tx(provider.tx_ref()) - .root_with_updates() - .unwrap(); - - // Insert state updates into database - let mut changes = PrefixSetMut::default(); - for (hashed_address, balance) in updated_state.clone() { - hashed_account_cursor.upsert(hashed_address, Account { balance, ..Default::default() }).unwrap(); - changes.insert(Nibbles::unpack(hashed_address)); - } - - // Compute root with in-memory trie nodes overlay - let (state_root, _) = StateRoot::from_tx(provider.tx_ref()) - .with_prefix_sets(TriePrefixSets { account_prefix_set: changes.freeze(), ..Default::default() }) - .with_trie_cursor_factory(InMemoryTrieCursorFactory::new(provider.tx_ref(), &trie_updates.into_sorted())) - .root_with_updates() - .unwrap(); - - // Verify the result - let mut state = BTreeMap::default(); - state.append(&mut init_state); - state.append(&mut updated_state); - let expected_root = state_root_prehashed( - state.iter().map(|(&key, &balance)| (key, (Account { balance, ..Default::default() }, std::iter::empty()))) - ); - assert_eq!(expected_root, state_root); - - } - } -} diff --git a/crates/trie/trie/src/updates.rs b/crates/trie/trie/src/updates.rs index a1f7767e6806..2d35dbf4809f 100644 --- a/crates/trie/trie/src/updates.rs +++ b/crates/trie/trie/src/updates.rs @@ -1,12 +1,4 @@ -use crate::{ - walker::TrieWalker, BranchNodeCompact, HashBuilder, Nibbles, StorageTrieEntry, StoredNibbles, - StoredNibblesSubKey, -}; -use reth_db::tables; -use reth_db_api::{ - cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW}, - transaction::{DbTx, DbTxMut}, -}; +use crate::{walker::TrieWalker, BranchNodeCompact, HashBuilder, Nibbles}; use reth_primitives::B256; use std::collections::{HashMap, HashSet}; @@ -84,64 +76,6 @@ impl TrieUpdates { .collect(); TrieUpdatesSorted { removed_nodes: self.removed_nodes, account_nodes, storage_tries } } - - /// Flush updates all aggregated updates to the database. - /// - /// # Returns - /// - /// The number of storage trie entries updated in the database. - pub fn write_to_database(self, tx: &TX) -> Result - where - TX: DbTx + DbTxMut, - { - if self.is_empty() { - return Ok(0) - } - - // Track the number of inserted entries. - let mut num_entries = 0; - - // Merge updated and removed nodes. Updated nodes must take precedence. - let mut account_updates = self - .removed_nodes - .into_iter() - .filter_map(|n| (!self.account_nodes.contains_key(&n)).then_some((n, None))) - .collect::>(); - account_updates - .extend(self.account_nodes.into_iter().map(|(nibbles, node)| (nibbles, Some(node)))); - // Sort trie node updates. - account_updates.sort_unstable_by(|a, b| a.0.cmp(&b.0)); - - let mut account_trie_cursor = tx.cursor_write::()?; - for (key, updated_node) in account_updates { - let nibbles = StoredNibbles(key); - match updated_node { - Some(node) => { - if !nibbles.0.is_empty() { - num_entries += 1; - account_trie_cursor.upsert(nibbles, node)?; - } - } - None => { - num_entries += 1; - if account_trie_cursor.seek_exact(nibbles)?.is_some() { - account_trie_cursor.delete_current()?; - } - } - } - } - - let mut storage_tries = Vec::from_iter(self.storage_tries); - storage_tries.sort_unstable_by(|a, b| a.0.cmp(&b.0)); - let mut storage_trie_cursor = tx.cursor_dup_write::()?; - for (hashed_address, storage_trie_updates) in storage_tries { - let updated_storage_entries = - storage_trie_updates.write_with_cursor(&mut storage_trie_cursor, hashed_address)?; - num_entries += updated_storage_entries; - } - - Ok(num_entries) - } } /// Trie updates for storage trie of a single account. @@ -156,6 +90,14 @@ pub struct StorageTrieUpdates { pub(crate) removed_nodes: HashSet, } +#[cfg(feature = "test-utils")] +impl StorageTrieUpdates { + /// Creates a new storage trie updates that are not marked as deleted. + pub fn new(updates: HashMap) -> Self { + Self { storage_nodes: updates, ..Default::default() } + } +} + impl StorageTrieUpdates { /// Returns empty storage trie updates with `deleted` set to `true`. pub fn deleted() -> Self { @@ -217,77 +159,6 @@ impl StorageTrieUpdates { storage_nodes, } } - - /// Initializes a storage trie cursor and writes updates to database. - pub fn write_to_database( - self, - tx: &TX, - hashed_address: B256, - ) -> Result - where - TX: DbTx + DbTxMut, - { - if self.is_empty() { - return Ok(0) - } - - let mut cursor = tx.cursor_dup_write::()?; - self.write_with_cursor(&mut cursor, hashed_address) - } - - /// Writes updates to database. - /// - /// # Returns - /// - /// The number of storage trie entries updated in the database. - fn write_with_cursor( - self, - cursor: &mut C, - hashed_address: B256, - ) -> Result - where - C: DbCursorRO - + DbCursorRW - + DbDupCursorRO - + DbDupCursorRW, - { - // The storage trie for this account has to be deleted. - if self.is_deleted && cursor.seek_exact(hashed_address)?.is_some() { - cursor.delete_current_duplicates()?; - } - - // Merge updated and removed nodes. Updated nodes must take precedence. - let mut storage_updates = self - .removed_nodes - .into_iter() - .filter_map(|n| (!self.storage_nodes.contains_key(&n)).then_some((n, None))) - .collect::>(); - storage_updates - .extend(self.storage_nodes.into_iter().map(|(nibbles, node)| (nibbles, Some(node)))); - // Sort trie node updates. - storage_updates.sort_unstable_by(|a, b| a.0.cmp(&b.0)); - - let mut num_entries = 0; - for (nibbles, maybe_updated) in storage_updates.into_iter().filter(|(n, _)| !n.is_empty()) { - num_entries += 1; - let nibbles = StoredNibblesSubKey(nibbles); - // Delete the old entry if it exists. - if cursor - .seek_by_key_subkey(hashed_address, nibbles.clone())? - .filter(|e| e.nibbles == nibbles) - .is_some() - { - cursor.delete_current()?; - } - - // There is an updated version of this node, insert new entry. - if let Some(node) = maybe_updated { - cursor.upsert(hashed_address, StorageTrieEntry { nibbles, node })?; - } - } - - Ok(num_entries) - } } /// Sorted trie updates used for lookups and insertions. diff --git a/docs/crates/network.md b/docs/crates/network.md index 9e381877f606..a6ac24305658 100644 --- a/docs/crates/network.md +++ b/docs/crates/network.md @@ -649,7 +649,7 @@ fn on_bodies_request( ## Transactions Task The transactions task listens for, requests, and propagates transactions both from the node's peers, and those that are added locally (e.g., submitted via RPC). Note that this task focuses solely on the network communication involved with Ethereum transactions, we will talk more about the structure of the transaction pool itself -in the [transaction-pool](../../../ethereum/transaction-pool/README.md) chapter. +in the [transaction-pool](https://reth.rs/docs/reth_transaction_pool/index.html) chapter. Again, like the network management and ETH requests tasks, the transactions task is implemented as an endless future that runs as a background task on a standalone `tokio::task`. It's represented by the `TransactionsManager` struct: diff --git a/examples/beacon-api-sidecar-fetcher/src/main.rs b/examples/beacon-api-sidecar-fetcher/src/main.rs index f3c7a843a396..c31153be135c 100644 --- a/examples/beacon-api-sidecar-fetcher/src/main.rs +++ b/examples/beacon-api-sidecar-fetcher/src/main.rs @@ -28,7 +28,7 @@ pub mod mined_sidecar; fn main() { Cli::::parse() - .run(|builder, args| async move { + .run(|builder, beacon_config| async move { // launch the node let NodeHandle { node, node_exit_future } = builder.node(EthereumNode::default()).launch().await?; @@ -38,27 +38,30 @@ fn main() { let pool = node.pool.clone(); - let mut sidecar_stream = MinedSidecarStream { - events: notifications, - pool, - beacon_config: args, - client: reqwest::Client::new(), - pending_requests: FuturesUnordered::new(), - queued_actions: VecDeque::new(), - }; + node.task_executor.spawn(async move { + let mut sidecar_stream = MinedSidecarStream { + events: notifications, + pool, + beacon_config, + client: reqwest::Client::new(), + pending_requests: FuturesUnordered::new(), + queued_actions: VecDeque::new(), + }; - while let Some(result) = sidecar_stream.next().await { - match result { - Ok(blob_transaction) => { - // Handle successful transaction - println!("Processed BlobTransaction: {:?}", blob_transaction); - } - Err(e) => { - // Handle errors specifically - eprintln!("Failed to process transaction: {:?}", e); + while let Some(result) = sidecar_stream.next().await { + match result { + Ok(blob_transaction) => { + // Handle successful transaction + println!("Processed BlobTransaction: {:?}", blob_transaction); + } + Err(e) => { + // Handle errors specifically + eprintln!("Failed to process transaction: {:?}", e); + } } } - } + }); + node_exit_future.await }) .unwrap(); diff --git a/examples/custom-evm/src/main.rs b/examples/custom-evm/src/main.rs index 6e58acdca169..4c2f3b712a14 100644 --- a/examples/custom-evm/src/main.rs +++ b/examples/custom-evm/src/main.rs @@ -109,7 +109,7 @@ impl ConfigureEvmEnv for MyEvmConfig { impl ConfigureEvm for MyEvmConfig { type DefaultExternalContext<'a> = (); - fn evm<'a, DB: Database + 'a>(&self, db: DB) -> Evm<'a, Self::DefaultExternalContext<'a>, DB> { + fn evm(&self, db: DB) -> Evm<'_, Self::DefaultExternalContext<'_>, DB> { EvmBuilder::default() .with_db(db) // add additional precompiles @@ -117,9 +117,9 @@ impl ConfigureEvm for MyEvmConfig { .build() } - fn evm_with_inspector<'a, DB, I>(&self, db: DB, inspector: I) -> Evm<'a, I, DB> + fn evm_with_inspector(&self, db: DB, inspector: I) -> Evm<'_, I, DB> where - DB: Database + 'a, + DB: Database, I: GetInspector, { EvmBuilder::default() diff --git a/examples/db-access/src/main.rs b/examples/db-access/src/main.rs index 27047fd3f8ec..4c36c3348184 100644 --- a/examples/db-access/src/main.rs +++ b/examples/db-access/src/main.rs @@ -1,5 +1,4 @@ use reth_chainspec::ChainSpecBuilder; -use reth_db::open_db_read_only; use reth_primitives::{Address, B256}; use reth_provider::{ providers::StaticFileProvider, AccountReader, BlockReader, BlockSource, HeaderProvider, @@ -16,20 +15,18 @@ use std::path::Path; // Other parts of the code which include caching are parts of the `EthApi` abstraction. fn main() -> eyre::Result<()> { // Opens a RO handle to the database file. - // TODO: Should be able to do `ProviderFactory::new_with_db_path_ro(...)` instead of - // doing in 2 steps. let db_path = std::env::var("RETH_DB_PATH")?; let db_path = Path::new(&db_path); - let db = open_db_read_only(db_path.join("db").as_path(), Default::default())?; // Instantiate a provider factory for Ethereum mainnet using the provided DB. // TODO: Should the DB version include the spec so that you do not need to specify it here? let spec = ChainSpecBuilder::mainnet().build(); - let factory = ProviderFactory::new( - db, + let factory = ProviderFactory::new_with_database_path( + db_path, spec.into(), + Default::default(), StaticFileProvider::read_only(db_path.join("static_files"))?, - ); + )?; // This call opens a RO transaction on the database. To write to the DB you'd need to call // the `provider_rw` function and look for the `Writer` variants of the traits. diff --git a/examples/stateful-precompile/src/main.rs b/examples/stateful-precompile/src/main.rs index d73e613d66a9..dd598a801212 100644 --- a/examples/stateful-precompile/src/main.rs +++ b/examples/stateful-precompile/src/main.rs @@ -166,7 +166,7 @@ impl ConfigureEvmEnv for MyEvmConfig { impl ConfigureEvm for MyEvmConfig { type DefaultExternalContext<'a> = (); - fn evm<'a, DB: Database + 'a>(&self, db: DB) -> Evm<'a, Self::DefaultExternalContext<'a>, DB> { + fn evm(&self, db: DB) -> Evm<'_, Self::DefaultExternalContext<'_>, DB> { let new_cache = self.precompile_cache.clone(); EvmBuilder::default() .with_db(db) @@ -177,9 +177,9 @@ impl ConfigureEvm for MyEvmConfig { .build() } - fn evm_with_inspector<'a, DB, I>(&self, db: DB, inspector: I) -> Evm<'a, I, DB> + fn evm_with_inspector(&self, db: DB, inspector: I) -> Evm<'_, I, DB> where - DB: Database + 'a, + DB: Database, I: GetInspector, { let new_cache = self.precompile_cache.clone(); diff --git a/testing/ef-tests/src/models.rs b/testing/ef-tests/src/models.rs index 2c580dc54ea8..e7556ec0d47e 100644 --- a/testing/ef-tests/src/models.rs +++ b/testing/ef-tests/src/models.rs @@ -41,7 +41,7 @@ pub struct BlockchainTest { } /// A block header in an Ethereum blockchain test. -#[derive(Debug, PartialEq, Eq, Clone, Deserialize)] +#[derive(Debug, PartialEq, Eq, Clone, Deserialize, Default)] #[serde(rename_all = "camelCase")] pub struct Header { /// Bloom filter. @@ -120,7 +120,7 @@ impl From
for SealedHeader { } /// A block in an Ethereum blockchain test. -#[derive(Debug, PartialEq, Eq, Deserialize)] +#[derive(Debug, PartialEq, Eq, Deserialize, Default)] #[serde(rename_all = "camelCase")] pub struct Block { /// Block header. @@ -138,7 +138,7 @@ pub struct Block { } /// Transaction sequence in block -#[derive(Debug, PartialEq, Eq, Deserialize)] +#[derive(Debug, PartialEq, Eq, Deserialize, Default)] #[serde(deny_unknown_fields)] #[serde(rename_all = "camelCase")] pub struct TransactionSequence { @@ -148,7 +148,7 @@ pub struct TransactionSequence { } /// Ethereum blockchain test data state. -#[derive(Clone, Debug, Eq, PartialEq, Deserialize)] +#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Default)] pub struct State(BTreeMap); impl State { @@ -194,7 +194,7 @@ impl Deref for State { } /// An account. -#[derive(Debug, PartialEq, Eq, Deserialize, Clone)] +#[derive(Debug, PartialEq, Eq, Deserialize, Clone, Default)] #[serde(deny_unknown_fields)] pub struct Account { /// Balance. diff --git a/testing/testing-utils/src/generators.rs b/testing/testing-utils/src/generators.rs index 4ef65043f602..62923b1acd2b 100644 --- a/testing/testing-utils/src/generators.rs +++ b/testing/testing-utils/src/generators.rs @@ -245,7 +245,7 @@ where let mut old_entries: Vec<_> = new_entries .into_iter() .filter_map(|entry| { - let old = if entry.value != U256::ZERO { + let old = if !entry.value.is_zero() { storage.insert(entry.key, entry.value) } else { let old = storage.remove(&entry.key); From 3fa37f7b6279ee2a0b22a140cfa1d9526e9244bf Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Mon, 29 Jul 2024 11:51:47 +0200 Subject: [PATCH 20/40] Revert "merge origin/main" This reverts commit 5a5abeb40aa45fc8fdf6e90d402a130ba4c6a7c2. --- .github/assets/check_no_std.sh | 20 +- .github/workflows/lint.yml | 4 +- Cargo.lock | 479 ++-- Cargo.toml | 16 +- bin/reth-bench/Cargo.toml | 2 +- bin/reth-bench/src/bench/new_payload_fcu.rs | 8 +- bin/reth/Cargo.toml | 7 +- .../src/commands/debug_cmd/build_block.rs | 8 +- .../commands/debug_cmd/in_memory_merkle.rs | 16 +- bin/reth/src/commands/debug_cmd/merkle.rs | 9 +- bin/reth/src/engine2.rs | 39 - bin/reth/src/lib.rs | 4 +- book/developers/exex/hello-world.md | 2 +- crates/blockchain-tree/Cargo.toml | 1 - crates/blockchain-tree/src/block_indices.rs | 5 +- crates/blockchain-tree/src/blockchain_tree.rs | 8 +- crates/blockchain-tree/src/state.rs | 2 +- crates/chain-state/Cargo.toml | 47 - crates/chain-state/src/in_memory.rs | 964 --------- crates/chain-state/src/lib.rs | 29 - crates/chain-state/src/test_utils.rs | 102 - crates/cli/commands/Cargo.toml | 5 +- crates/cli/commands/src/common.rs | 2 + crates/cli/commands/src/node.rs | 3 +- .../cli/commands/src/recover/storage_tries.rs | 1 - crates/cli/commands/src/stage/run.rs | 33 +- crates/cli/util/Cargo.toml | 3 + crates/cli/util/src/parsers.rs | 2 +- crates/consensus/auto-seal/src/task.rs | 2 +- crates/consensus/beacon/src/engine/event.rs | 11 - crates/consensus/beacon/src/engine/handle.rs | 3 - .../beacon/src/engine/invalid_headers.rs | 2 +- crates/consensus/beacon/src/engine/mod.rs | 4 +- crates/e2e-test-utils/src/engine_api.rs | 2 +- crates/e2e-test-utils/src/payload.rs | 4 +- crates/e2e-test-utils/src/rpc.rs | 11 +- crates/engine/primitives/src/lib.rs | 1 - crates/engine/tree/Cargo.toml | 8 +- crates/engine/tree/src/backfill.rs | 40 +- crates/engine/tree/src/chain.rs | 66 +- crates/engine/tree/src/database.rs | 261 +++ crates/engine/tree/src/engine.rs | 64 +- crates/engine/tree/src/lib.rs | 6 +- crates/engine/tree/src/persistence.rs | 420 +--- crates/engine/tree/src/static_files.rs | 272 +++ crates/engine/tree/src/test_utils.rs | 49 +- crates/engine/tree/src/tree.rs | 1923 ----------------- .../tree/src/tree}/memory_overlay.rs | 41 +- crates/engine/tree/src/tree/mod.rs | 1328 ++++++++++++ crates/engine/tree/test-data/holesky/1.rlp | 1 - crates/engine/tree/test-data/holesky/2.rlp | 1 - crates/ethereum-forks/Cargo.toml | 1 + crates/ethereum-forks/src/hardfork/dev.rs | 2 - crates/ethereum/engine/Cargo.toml | 14 +- crates/ethereum/engine/src/service.rs | 98 +- crates/ethereum/evm/src/execute.rs | 10 +- crates/ethereum/evm/src/lib.rs | 7 +- crates/ethereum/node/Cargo.toml | 1 - crates/ethereum/node/src/launch.rs | 102 +- crates/ethereum/node/tests/it/builder.rs | 10 +- crates/ethereum/payload/src/lib.rs | 37 +- crates/evm/execution-errors/Cargo.toml | 1 - crates/evm/execution-errors/src/lib.rs | 2 +- crates/evm/execution-errors/src/trie.rs | 30 +- crates/evm/execution-types/src/execute.rs | 41 - crates/evm/execution-types/src/lib.rs | 9 +- crates/evm/src/builder.rs | 150 -- crates/evm/src/either.rs | 6 +- crates/evm/src/execute.rs | 47 +- crates/evm/src/lib.rs | 15 +- crates/evm/src/noop.rs | 6 +- crates/evm/src/provider.rs | 2 +- crates/evm/src/system_calls.rs | 13 +- crates/exex/exex/Cargo.toml | 23 +- .../exex/src/{backfill/job.rs => backfill.rs} | 273 ++- crates/exex/exex/src/backfill/factory.rs | 79 - crates/exex/exex/src/backfill/mod.rs | 9 - crates/exex/exex/src/backfill/stream.rs | 161 -- crates/exex/exex/src/backfill/test_utils.rs | 162 -- .../downloaders/src/receipt_file_client.rs | 2 +- crates/net/eth-wire-types/Cargo.toml | 1 + crates/net/eth-wire/Cargo.toml | 1 + crates/net/eth-wire/src/p2pstream.rs | 19 +- crates/net/network-api/src/lib.rs | 4 +- crates/net/network/src/lib.rs | 4 +- crates/node/api/src/node.rs | 2 +- crates/node/builder/Cargo.toml | 1 - crates/node/builder/src/builder/mod.rs | 101 +- crates/node/builder/src/launch/common.rs | 132 +- crates/node/builder/src/launch/mod.rs | 25 +- crates/node/core/Cargo.toml | 25 +- crates/node/core/src/args/mod.rs | 2 +- crates/node/core/src/args/pruning.rs | 5 +- crates/node/core/src/lib.rs | 5 + crates/node/core/src/metrics/mod.rs | 4 + .../core/src/metrics/prometheus_exporter.rs | 317 +++ .../src/metrics/version_metrics.rs} | 15 + crates/node/core/src/node_config.rs | 59 +- crates/node/core/src/version.rs | 9 +- crates/node/events/src/node.rs | 54 +- crates/node/metrics/Cargo.toml | 52 - crates/node/metrics/src/hooks.rs | 126 -- crates/node/metrics/src/lib.rs | 18 - crates/node/metrics/src/recorder.rs | 58 - crates/node/metrics/src/server.rs | 270 --- crates/node/metrics/src/version_metrics.rs | 75 - .../cli/src/commands/import_receipts.rs | 13 +- crates/optimism/evm/src/lib.rs | 6 +- crates/optimism/node/Cargo.toml | 1 + crates/optimism/rpc/Cargo.toml | 2 +- crates/optimism/rpc/src/error.rs | 70 +- crates/optimism/rpc/src/eth/block.rs | 19 +- crates/optimism/rpc/src/eth/call.rs | 20 +- crates/optimism/rpc/src/eth/mod.rs | 17 +- crates/optimism/rpc/src/eth/receipt.rs | 17 +- crates/optimism/rpc/src/eth/transaction.rs | 6 +- crates/payload/builder/src/events.rs | 3 +- crates/payload/builder/src/lib.rs | 6 +- crates/primitives-traits/Cargo.toml | 1 + crates/primitives-traits/src/account.rs | 18 +- crates/primitives-traits/src/lib.rs | 2 +- crates/primitives/Cargo.toml | 2 + crates/primitives/src/transaction/eip4844.rs | 2 - crates/primitives/src/transaction/eip7702.rs | 72 +- crates/primitives/src/transaction/tx_type.rs | 3 +- crates/prune/types/Cargo.toml | 1 + crates/revm/src/batch.rs | 11 +- crates/revm/src/state_change.rs | 10 +- crates/revm/src/test_utils.rs | 5 +- crates/rpc/ipc/src/server/mod.rs | 7 +- crates/rpc/rpc-builder/src/auth.rs | 2 +- crates/rpc/rpc-builder/src/cors.rs | 13 +- crates/rpc/rpc-builder/src/lib.rs | 27 +- crates/rpc/rpc-builder/src/metrics.rs | 8 +- crates/rpc/rpc-builder/tests/it/main.rs | 1 - crates/rpc/rpc-builder/tests/it/middleware.rs | 80 - crates/rpc/rpc-engine-api/src/engine_api.rs | 4 +- crates/rpc/rpc-eth-api/Cargo.toml | 2 +- crates/rpc/rpc-eth-api/src/core.rs | 14 +- crates/rpc/rpc-eth-api/src/helpers/block.rs | 91 +- .../rpc-eth-api/src/helpers/blocking_task.rs | 14 +- crates/rpc/rpc-eth-api/src/helpers/call.rs | 143 +- crates/rpc/rpc-eth-api/src/helpers/error.rs | 88 - crates/rpc/rpc-eth-api/src/helpers/fee.rs | 45 +- crates/rpc/rpc-eth-api/src/helpers/mod.rs | 17 +- .../rpc-eth-api/src/helpers/pending_block.rs | 57 +- crates/rpc/rpc-eth-api/src/helpers/receipt.rs | 18 +- crates/rpc/rpc-eth-api/src/helpers/state.rs | 83 +- crates/rpc/rpc-eth-api/src/helpers/trace.rs | 64 +- .../rpc-eth-api/src/helpers/transaction.rs | 126 +- crates/rpc/rpc-eth-api/src/helpers/types.rs | 17 - crates/rpc/rpc-eth-api/src/lib.rs | 4 - crates/rpc/rpc-eth-types/Cargo.toml | 11 + .../rpc-eth-types/src/cache/multi_consumer.rs | 13 +- crates/rpc/rpc-eth-types/src/error.rs | 56 +- crates/rpc/rpc-layer/src/auth_layer.rs | 2 - crates/rpc/rpc-layer/src/lib.rs | 2 - crates/rpc/rpc-server-types/src/constants.rs | 3 - crates/rpc/rpc-types/Cargo.toml | 3 +- crates/rpc/rpc-types/src/lib.rs | 7 +- crates/rpc/rpc-types/src/net.rs | 13 + crates/rpc/rpc-types/src/peer.rs | 4 + crates/rpc/rpc/Cargo.toml | 1 + crates/rpc/rpc/src/debug.rs | 160 +- crates/rpc/rpc/src/eth/bundle.rs | 38 +- crates/rpc/rpc/src/eth/core.rs | 14 +- crates/rpc/rpc/src/eth/helpers/receipt.rs | 4 +- crates/rpc/rpc/src/eth/helpers/state.rs | 6 +- crates/rpc/rpc/src/eth/helpers/transaction.rs | 6 +- crates/rpc/rpc/src/otterscan.rs | 12 +- crates/rpc/rpc/src/trace.rs | 189 +- crates/stages/api/src/pipeline/ctrl.rs | 2 +- crates/stages/api/src/pipeline/set.rs | 16 +- crates/stages/stages/Cargo.toml | 1 - crates/stages/stages/benches/setup/mod.rs | 10 +- crates/stages/stages/src/stages/bodies.rs | 4 +- crates/stages/stages/src/stages/execution.rs | 6 +- .../stages/src/stages/hashing_storage.rs | 2 +- crates/stages/stages/src/stages/headers.rs | 2 +- crates/stages/stages/src/stages/merkle.rs | 23 +- crates/stages/stages/src/stages/utils.rs | 2 +- .../stages/stages/src/test_utils/test_db.rs | 8 +- crates/stages/types/Cargo.toml | 1 + .../static-file/src/segments/headers.rs | 2 +- .../static-file/src/segments/transactions.rs | 2 +- crates/storage/codecs/Cargo.toml | 1 + crates/storage/db-api/Cargo.toml | 3 +- crates/storage/db-common/Cargo.toml | 1 - crates/storage/db-common/src/init.rs | 23 +- crates/storage/db/Cargo.toml | 22 +- crates/storage/db/src/lib.rs | 3 - crates/storage/db/src/tables/mod.rs | 1 - crates/storage/errors/Cargo.toml | 1 - crates/storage/errors/src/provider.rs | 3 - crates/storage/provider/Cargo.toml | 9 +- .../src/bundle_state/execution_outcome.rs | 1036 +++++++++ .../storage/provider/src/bundle_state/mod.rs | 7 +- .../src/bundle_state/state_changes.rs | 88 + .../src/bundle_state/state_reverts.rs | 100 +- crates/storage/provider/src/lib.rs | 9 +- .../src/providers/blockchain_provider.rs | 826 ------- .../provider/src/providers}/chain_info.rs | 0 .../provider/src/providers/database/mod.rs | 5 +- .../src/providers/database/provider.rs | 314 +-- crates/storage/provider/src/providers/mod.rs | 27 +- .../src/providers/state/historical.rs | 16 +- .../provider/src/providers/state/latest.rs | 14 +- .../src/providers/static_file/manager.rs | 12 - .../provider/src/providers/static_file/mod.rs | 2 +- .../src/providers/static_file/writer.rs | 17 +- .../storage/provider/src/test_utils/events.rs | 35 + crates/storage/provider/src/test_utils/mod.rs | 47 +- .../storage/provider/src/test_utils/noop.rs | 14 +- .../provider/src/traits/chain.rs} | 4 +- crates/storage/provider/src/traits/full.rs | 7 +- crates/storage/provider/src/traits/mod.rs | 12 +- crates/storage/provider/src/traits/state.rs | 38 +- .../provider/src/traits/tree_viewer.rs | 3 +- crates/storage/provider/src/traits/trie.rs | 36 - crates/storage/provider/src/writer/mod.rs | 1228 +---------- .../provider/src/writer/static_file.rs | 6 +- crates/storage/storage-api/src/block.rs | 12 +- .../storage-api/src/stage_checkpoint.rs | 4 - crates/transaction-pool/Cargo.toml | 2 +- crates/trie/common/Cargo.toml | 1 + crates/trie/common/src/account.rs | 2 +- crates/trie/common/src/lib.rs | 2 +- crates/trie/common/src/proofs.rs | 140 +- crates/trie/db/Cargo.toml | 78 - crates/trie/db/src/lib.rs | 9 - crates/trie/db/src/proof.rs | 46 - crates/trie/db/src/state.rs | 208 -- crates/trie/db/src/storage.rs | 39 - crates/trie/db/tests/fuzz_in_memory_nodes.rs | 59 - crates/trie/db/tests/proof.rs | 288 --- crates/trie/db/tests/trie.rs | 773 ------- crates/trie/parallel/Cargo.toml | 1 - crates/trie/parallel/benches/root.rs | 16 +- crates/trie/parallel/src/async_root.rs | 15 +- crates/trie/parallel/src/parallel_root.rs | 18 +- crates/trie/trie/src/hashed_cursor/default.rs | 43 +- crates/trie/trie/src/hashed_cursor/mod.rs | 2 +- .../trie/trie/src/hashed_cursor/post_state.rs | 52 +- crates/trie/trie/src/proof.rs | 444 +++- crates/trie/trie/src/state.rs | 138 +- crates/trie/trie/src/trie.rs | 901 +++++++- .../trie/src/trie_cursor/database_cursors.rs | 66 +- crates/trie/trie/src/trie_cursor/in_memory.rs | 62 + crates/trie/trie/src/updates.rs | 147 +- docs/crates/network.md | 2 +- .../beacon-api-sidecar-fetcher/src/main.rs | 41 +- examples/custom-evm/src/main.rs | 6 +- examples/db-access/src/main.rs | 11 +- examples/stateful-precompile/src/main.rs | 6 +- testing/ef-tests/src/models.rs | 10 +- testing/testing-utils/src/generators.rs | 2 +- 256 files changed, 7060 insertions(+), 11451 deletions(-) delete mode 100644 bin/reth/src/engine2.rs delete mode 100644 crates/chain-state/Cargo.toml delete mode 100644 crates/chain-state/src/in_memory.rs delete mode 100644 crates/chain-state/src/lib.rs delete mode 100644 crates/chain-state/src/test_utils.rs create mode 100644 crates/engine/tree/src/database.rs create mode 100644 crates/engine/tree/src/static_files.rs delete mode 100644 crates/engine/tree/src/tree.rs rename crates/{chain-state/src => engine/tree/src/tree}/memory_overlay.rs (84%) create mode 100644 crates/engine/tree/src/tree/mod.rs delete mode 100644 crates/engine/tree/test-data/holesky/1.rlp delete mode 100644 crates/engine/tree/test-data/holesky/2.rlp delete mode 100644 crates/evm/execution-types/src/execute.rs delete mode 100644 crates/evm/src/builder.rs rename crates/exex/exex/src/{backfill/job.rs => backfill.rs} (57%) delete mode 100644 crates/exex/exex/src/backfill/factory.rs delete mode 100644 crates/exex/exex/src/backfill/mod.rs delete mode 100644 crates/exex/exex/src/backfill/stream.rs delete mode 100644 crates/exex/exex/src/backfill/test_utils.rs create mode 100644 crates/node/core/src/metrics/mod.rs create mode 100644 crates/node/core/src/metrics/prometheus_exporter.rs rename crates/node/{metrics/src/version.rs => core/src/metrics/version_metrics.rs} (71%) delete mode 100644 crates/node/metrics/Cargo.toml delete mode 100644 crates/node/metrics/src/hooks.rs delete mode 100644 crates/node/metrics/src/lib.rs delete mode 100644 crates/node/metrics/src/recorder.rs delete mode 100644 crates/node/metrics/src/server.rs delete mode 100644 crates/node/metrics/src/version_metrics.rs delete mode 100644 crates/rpc/rpc-builder/tests/it/middleware.rs delete mode 100644 crates/rpc/rpc-eth-api/src/helpers/error.rs delete mode 100644 crates/rpc/rpc-eth-api/src/helpers/types.rs create mode 100644 crates/rpc/rpc-types/src/net.rs create mode 100644 crates/rpc/rpc-types/src/peer.rs create mode 100644 crates/storage/provider/src/bundle_state/execution_outcome.rs create mode 100644 crates/storage/provider/src/bundle_state/state_changes.rs delete mode 100644 crates/storage/provider/src/providers/blockchain_provider.rs rename crates/{chain-state/src => storage/provider/src/providers}/chain_info.rs (100%) create mode 100644 crates/storage/provider/src/test_utils/events.rs rename crates/{chain-state/src/notifications.rs => storage/provider/src/traits/chain.rs} (98%) delete mode 100644 crates/storage/provider/src/traits/trie.rs delete mode 100644 crates/trie/db/Cargo.toml delete mode 100644 crates/trie/db/src/lib.rs delete mode 100644 crates/trie/db/src/proof.rs delete mode 100644 crates/trie/db/src/state.rs delete mode 100644 crates/trie/db/src/storage.rs delete mode 100644 crates/trie/db/tests/fuzz_in_memory_nodes.rs delete mode 100644 crates/trie/db/tests/proof.rs delete mode 100644 crates/trie/db/tests/trie.rs diff --git a/.github/assets/check_no_std.sh b/.github/assets/check_no_std.sh index 441ef5d0d48d..f19e39ddac90 100755 --- a/.github/assets/check_no_std.sh +++ b/.github/assets/check_no_std.sh @@ -3,23 +3,21 @@ set -eo pipefail # TODO no_std_packages=( -# The following were confirmed not working in the past, but could be enabled if issues have been resolved +# reth-codecs +# reth-consensus # reth-db -# reth-primitives -# reth-revm +# reth-errors +# reth-ethereum-forks # reth-evm # reth-evm-ethereum -# reth-consensus -# the following are confirmed working - reth-errors - reth-ethereum-forks - reth-network-peers - reth-primitives-traits - reth-codecs +# reth-network-peers +# reth-primitives +# reth-primitives-traits +# reth-revm ) for package in "${no_std_packages[@]}"; do - cmd="cargo +stable build -p $package --target wasm32-wasip1 --no-default-features" + cmd="cargo +stable build -p $package --target riscv32imac-unknown-none-elf --no-default-features" if [ -n "$CI" ]; then echo "::group::$cmd" diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index b108ddb96b48..3aefc21c8389 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -52,7 +52,7 @@ jobs: - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@stable with: - target: wasm32-wasip1 + target: riscv32imac-unknown-none-elf - uses: taiki-e/install-action@cargo-hack - uses: Swatinem/rust-cache@v2 with: @@ -168,7 +168,7 @@ jobs: - uses: dtolnay/rust-toolchain@stable - name: Ensure no arbitrary or proptest dependency on default build run: cargo tree --package reth -e=features,no-dev | grep -Eq "arbitrary|proptest" && exit 1 || exit 0 - + lint-success: name: lint success runs-on: ubuntu-latest diff --git a/Cargo.lock b/Cargo.lock index 42d5518233c3..1b4fbd82479e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -151,7 +151,7 @@ dependencies = [ "itoa", "serde", "serde_json", - "winnow 0.6.14", + "winnow 0.6.13", ] [[package]] @@ -294,7 +294,7 @@ dependencies = [ "async-stream", "async-trait", "auto_impl", - "dashmap 5.5.3", + "dashmap", "futures", "futures-utils-wasm", "lru", @@ -598,7 +598,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cbcba3ca07cf7975f15d871b721fb18031eec8bce51103907f6dcce00b255d98" dependencies = [ "serde", - "winnow 0.6.14", + "winnow 0.6.13", ] [[package]] @@ -929,9 +929,9 @@ dependencies = [ [[package]] name = "arrayref" -version = "0.3.8" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d151e35f61089500b617991b791fc8bfd237ae50cd5950803758a179b41e67a" +checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" [[package]] name = "arrayvec" @@ -1324,7 +1324,7 @@ dependencies = [ "boa_string", "bytemuck", "cfg-if", - "dashmap 5.5.3", + "dashmap", "fast-float", "hashbrown 0.14.5", "icu_normalizer", @@ -1598,12 +1598,13 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.6" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2aba8f4e9906c7ce3c73463f62a7f0c65183ada1a2d47e397cc8810827f9694f" +checksum = "47de7e88bbbd467951ae7f5a6f34f70d1b4d9cfce53d5fd70f74ebe118b3db56" dependencies = [ "jobserver", "libc", + "once_cell", ] [[package]] @@ -2267,20 +2268,6 @@ dependencies = [ "parking_lot_core 0.9.10", ] -[[package]] -name = "dashmap" -version = "6.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "804c8821570c3f8b70230c2ba75ffa5c0f9a4189b9a432b6656c536712acae28" -dependencies = [ - "cfg-if", - "crossbeam-utils", - "hashbrown 0.14.5", - "lock_api", - "once_cell", - "parking_lot_core 0.9.10", -] - [[package]] name = "data-encoding" version = "2.6.0" @@ -2572,7 +2559,7 @@ dependencies = [ [[package]] name = "ef-tests" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-rlp", "rayon", @@ -4005,9 +3992,9 @@ checksum = "64e9829a50b42bb782c1df523f78d332fe371b10c661e78b7a3c34b0198e9fac" [[package]] name = "inferno" -version = "0.11.20" +version = "0.11.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c77a3ae7d4761b9c64d2c030f70746ceb8cfba32dce0325a56792e0a4816c31" +checksum = "321f0f839cd44a4686e9504b0a62b4d69a50b62072144c71c68f5873c167b8d9" dependencies = [ "ahash", "indexmap 2.2.6", @@ -4456,9 +4443,9 @@ checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" [[package]] name = "libloading" -version = "0.8.5" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" +checksum = "e310b3a6b5907f99202fcdb4960ff45b93735d7c7d96b760fcff8db2dc0e103d" dependencies = [ "cfg-if", "windows-targets 0.52.6", @@ -5246,7 +5233,7 @@ dependencies = [ [[package]] name = "op-reth" -version = "1.0.3" +version = "1.0.2" dependencies = [ "clap", "reth-cli-util", @@ -5397,7 +5384,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.3", + "redox_syscall 0.5.2", "smallvec", "windows-targets 0.52.6", ] @@ -5615,9 +5602,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.7.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da544ee218f0d287a911e9c99a39a8c9bc8fcad3cb8db5959940044ecfc67265" +checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0" [[package]] name = "powerfmt" @@ -6045,9 +6032,9 @@ dependencies = [ [[package]] name = "raw-cpuid" -version = "11.1.0" +version = "11.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb9ee317cfe3fbd54b36a511efc1edd42e216903c9cd575e686dd68a2ba90d8d" +checksum = "e29830cbb1290e404f24c73af91c5d8d631ce7e128691e9477556b540cd01ecd" dependencies = [ "bitflags 2.6.0", ] @@ -6089,9 +6076,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.3" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4" +checksum = "c82cf8cff14456045f55ec4241383baeff27af886adb72ffb2162f99911de0fd" dependencies = [ "bitflags 2.6.0", ] @@ -6218,7 +6205,7 @@ dependencies = [ [[package]] name = "reth" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-rlp", "aquamarine", @@ -6262,7 +6249,6 @@ dependencies = [ "reth-node-core", "reth-node-ethereum", "reth-node-events", - "reth-node-metrics", "reth-payload-builder", "reth-payload-primitives", "reth-payload-validator", @@ -6285,7 +6271,6 @@ dependencies = [ "reth-tracing", "reth-transaction-pool", "reth-trie", - "reth-trie-db", "serde", "serde_json", "similar-asserts", @@ -6298,7 +6283,7 @@ dependencies = [ [[package]] name = "reth-auto-seal-consensus" -version = "1.0.3" +version = "1.0.2" dependencies = [ "futures-util", "reth-beacon-consensus", @@ -6324,7 +6309,7 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-rlp", "futures-core", @@ -6346,7 +6331,7 @@ dependencies = [ [[package]] name = "reth-beacon-consensus" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-genesis", "assert_matches", @@ -6397,7 +6382,7 @@ dependencies = [ [[package]] name = "reth-bench" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6438,7 +6423,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-genesis", "aquamarine", @@ -6465,7 +6450,6 @@ dependencies = [ "reth-storage-errors", "reth-testing-utils", "reth-trie", - "reth-trie-db", "reth-trie-parallel", "tokio", "tracing", @@ -6473,7 +6457,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree-api" -version = "1.0.3" +version = "1.0.2" dependencies = [ "reth-consensus", "reth-execution-errors", @@ -6482,30 +6466,9 @@ dependencies = [ "thiserror", ] -[[package]] -name = "reth-chain-state" -version = "1.0.3" -dependencies = [ - "auto_impl", - "derive_more", - "parking_lot 0.12.3", - "pin-project", - "rand 0.8.5", - "reth-chainspec", - "reth-errors", - "reth-execution-types", - "reth-primitives", - "reth-storage-api", - "reth-trie", - "revm", - "tokio", - "tokio-stream", - "tracing", -] - [[package]] name = "reth-chainspec" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-chains", "alloy-eips", @@ -6529,7 +6492,7 @@ dependencies = [ [[package]] name = "reth-cli" -version = "1.0.3" +version = "1.0.2" dependencies = [ "clap", "eyre", @@ -6539,7 +6502,7 @@ dependencies = [ [[package]] name = "reth-cli-commands" -version = "1.0.3" +version = "1.0.2" dependencies = [ "ahash", "arbitrary", @@ -6553,6 +6516,7 @@ dependencies = [ "futures", "human_bytes", "itertools 0.13.0", + "metrics-process", "proptest", "proptest-arbitrary-interop", "ratatui", @@ -6575,7 +6539,6 @@ dependencies = [ "reth-node-builder", "reth-node-core", "reth-node-events", - "reth-node-metrics", "reth-primitives", "reth-provider", "reth-prune", @@ -6583,7 +6546,6 @@ dependencies = [ "reth-static-file", "reth-static-file-types", "reth-trie", - "reth-trie-db", "serde", "serde_json", "tokio", @@ -6593,7 +6555,7 @@ dependencies = [ [[package]] name = "reth-cli-runner" -version = "1.0.3" +version = "1.0.2" dependencies = [ "reth-tasks", "tokio", @@ -6602,12 +6564,13 @@ dependencies = [ [[package]] name = "reth-cli-util" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-eips", "alloy-primitives", "eyre", "libc", + "proptest", "rand 0.8.5", "reth-fs-util", "secp256k1", @@ -6616,7 +6579,7 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6628,6 +6591,7 @@ dependencies = [ "modular-bitfield", "proptest", "proptest-arbitrary-interop", + "proptest-derive 0.5.0", "reth-codecs-derive", "serde", "serde_json", @@ -6636,7 +6600,7 @@ dependencies = [ [[package]] name = "reth-codecs-derive" -version = "1.0.3" +version = "1.0.2" dependencies = [ "convert_case 0.6.0", "proc-macro2", @@ -6647,7 +6611,7 @@ dependencies = [ [[package]] name = "reth-config" -version = "1.0.3" +version = "1.0.2" dependencies = [ "confy", "humantime-serde", @@ -6661,7 +6625,7 @@ dependencies = [ [[package]] name = "reth-consensus" -version = "1.0.3" +version = "1.0.2" dependencies = [ "auto_impl", "reth-primitives", @@ -6670,7 +6634,7 @@ dependencies = [ [[package]] name = "reth-consensus-common" -version = "1.0.3" +version = "1.0.2" dependencies = [ "mockall", "rand 0.8.5", @@ -6682,7 +6646,7 @@ dependencies = [ [[package]] name = "reth-consensus-debug-client" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6704,7 +6668,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "1.0.3" +version = "1.0.2" dependencies = [ "arbitrary", "assert_matches", @@ -6743,7 +6707,7 @@ dependencies = [ [[package]] name = "reth-db-api" -version = "1.0.3" +version = "1.0.2" dependencies = [ "arbitrary", "assert_matches", @@ -6758,6 +6722,7 @@ dependencies = [ "pprof", "proptest", "proptest-arbitrary-interop", + "proptest-derive 0.5.0", "rand 0.8.5", "reth-codecs", "reth-primitives", @@ -6773,7 +6738,7 @@ dependencies = [ [[package]] name = "reth-db-common" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-genesis", "boyer-moore-magiclen", @@ -6790,7 +6755,6 @@ dependencies = [ "reth-provider", "reth-stages-types", "reth-trie", - "reth-trie-db", "serde", "serde_json", "thiserror", @@ -6799,7 +6763,7 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -6826,7 +6790,7 @@ dependencies = [ [[package]] name = "reth-discv5" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -6852,7 +6816,7 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-chains", "alloy-primitives", @@ -6880,7 +6844,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-rlp", "assert_matches", @@ -6915,7 +6879,7 @@ dependencies = [ [[package]] name = "reth-e2e-test-utils" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-consensus", "alloy-network", @@ -6947,7 +6911,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "1.0.3" +version = "1.0.2" dependencies = [ "aes 0.8.4", "alloy-primitives", @@ -6977,7 +6941,7 @@ dependencies = [ [[package]] name = "reth-engine-primitives" -version = "1.0.3" +version = "1.0.2" dependencies = [ "reth-chainspec", "reth-payload-primitives", @@ -6986,9 +6950,8 @@ dependencies = [ [[package]] name = "reth-engine-tree" -version = "1.0.3" +version = "1.0.2" dependencies = [ - "alloy-rlp", "aquamarine", "assert_matches", "futures", @@ -6998,7 +6961,6 @@ dependencies = [ "reth-beacon-consensus", "reth-blockchain-tree", "reth-blockchain-tree-api", - "reth-chain-state", "reth-chainspec", "reth-consensus", "reth-db", @@ -7020,7 +6982,6 @@ dependencies = [ "reth-prune-types", "reth-revm", "reth-rpc-types", - "reth-rpc-types-compat", "reth-stages", "reth-stages-api", "reth-stages-types", @@ -7037,7 +6998,7 @@ dependencies = [ [[package]] name = "reth-engine-util" -version = "1.0.3" +version = "1.0.2" dependencies = [ "eyre", "futures", @@ -7055,7 +7016,7 @@ dependencies = [ [[package]] name = "reth-errors" -version = "1.0.3" +version = "1.0.2" dependencies = [ "reth-blockchain-tree-api", "reth-consensus", @@ -7067,7 +7028,7 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-rlp", "arbitrary", @@ -7078,6 +7039,7 @@ dependencies = [ "pin-project", "proptest", "proptest-arbitrary-interop", + "proptest-derive 0.5.0", "rand 0.8.5", "reth-chainspec", "reth-codecs", @@ -7100,7 +7062,7 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-chains", "alloy-genesis", @@ -7110,6 +7072,7 @@ dependencies = [ "derive_more", "proptest", "proptest-arbitrary-interop", + "proptest-derive 0.5.0", "rand 0.8.5", "reth-chainspec", "reth-codecs-derive", @@ -7120,7 +7083,7 @@ dependencies = [ [[package]] name = "reth-ethereum-cli" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-genesis", "clap", @@ -7133,7 +7096,7 @@ dependencies = [ [[package]] name = "reth-ethereum-consensus" -version = "1.0.3" +version = "1.0.2" dependencies = [ "reth-chainspec", "reth-consensus", @@ -7144,27 +7107,16 @@ dependencies = [ [[package]] name = "reth-ethereum-engine" -version = "1.0.3" +version = "1.0.2" dependencies = [ "futures", "pin-project", "reth-beacon-consensus", - "reth-blockchain-tree", "reth-chainspec", - "reth-consensus", "reth-db-api", "reth-engine-tree", "reth-ethereum-engine-primitives", - "reth-evm", - "reth-evm-ethereum", - "reth-exex-types", "reth-network-p2p", - "reth-payload-builder", - "reth-payload-validator", - "reth-primitives", - "reth-provider", - "reth-prune", - "reth-prune-types", "reth-stages-api", "reth-tasks", "thiserror", @@ -7174,7 +7126,7 @@ dependencies = [ [[package]] name = "reth-ethereum-engine-primitives" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-rlp", "reth-chainspec", @@ -7192,7 +7144,7 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-chains", "alloy-primitives", @@ -7211,7 +7163,7 @@ dependencies = [ [[package]] name = "reth-ethereum-payload-builder" -version = "1.0.3" +version = "1.0.2" dependencies = [ "reth-basic-payload-builder", "reth-errors", @@ -7229,7 +7181,7 @@ dependencies = [ [[package]] name = "reth-etl" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-primitives", "rayon", @@ -7239,7 +7191,7 @@ dependencies = [ [[package]] name = "reth-evm" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-eips", "auto_impl", @@ -7257,7 +7209,7 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-eips", "alloy-sol-types", @@ -7277,7 +7229,7 @@ dependencies = [ [[package]] name = "reth-evm-optimism" -version = "1.0.3" +version = "1.0.2" dependencies = [ "reth-chainspec", "reth-consensus-common", @@ -7297,11 +7249,10 @@ dependencies = [ [[package]] name = "reth-execution-errors" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-eips", "alloy-primitives", - "alloy-rlp", "reth-consensus", "reth-prune-types", "reth-storage-errors", @@ -7311,7 +7262,7 @@ dependencies = [ [[package]] name = "reth-execution-types" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7325,10 +7276,9 @@ dependencies = [ [[package]] name = "reth-exex" -version = "1.0.3" +version = "1.0.2" dependencies = [ "eyre", - "futures", "metrics", "reth-blockchain-tree", "reth-chainspec", @@ -7359,7 +7309,7 @@ dependencies = [ [[package]] name = "reth-exex-test-utils" -version = "1.0.3" +version = "1.0.2" dependencies = [ "eyre", "futures-util", @@ -7389,7 +7339,7 @@ dependencies = [ [[package]] name = "reth-exex-types" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-primitives", "reth-provider", @@ -7398,7 +7348,7 @@ dependencies = [ [[package]] name = "reth-fs-util" -version = "1.0.3" +version = "1.0.2" dependencies = [ "serde", "serde_json", @@ -7407,7 +7357,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "1.0.3" +version = "1.0.2" dependencies = [ "async-trait", "bytes", @@ -7429,12 +7379,12 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "1.0.3" +version = "1.0.2" dependencies = [ "bitflags 2.6.0", "byteorder", "criterion", - "dashmap 6.0.1", + "dashmap", "derive_more", "indexmap 2.2.6", "parking_lot 0.12.3", @@ -7449,7 +7399,7 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "1.0.3" +version = "1.0.2" dependencies = [ "bindgen", "cc", @@ -7457,7 +7407,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "1.0.3" +version = "1.0.2" dependencies = [ "futures", "metrics", @@ -7468,7 +7418,7 @@ dependencies = [ [[package]] name = "reth-metrics-derive" -version = "1.0.3" +version = "1.0.2" dependencies = [ "metrics", "once_cell", @@ -7482,14 +7432,14 @@ dependencies = [ [[package]] name = "reth-net-banlist" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-primitives", ] [[package]] name = "reth-net-nat" -version = "1.0.3" +version = "1.0.2" dependencies = [ "futures-util", "reqwest", @@ -7501,7 +7451,7 @@ dependencies = [ [[package]] name = "reth-network" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-node-bindings", "alloy-provider", @@ -7559,7 +7509,7 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-primitives", "alloy-rpc-types-admin", @@ -7573,7 +7523,7 @@ dependencies = [ [[package]] name = "reth-network-p2p" -version = "1.0.3" +version = "1.0.2" dependencies = [ "auto_impl", "futures", @@ -7591,7 +7541,7 @@ dependencies = [ [[package]] name = "reth-network-peers" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7607,7 +7557,7 @@ dependencies = [ [[package]] name = "reth-network-types" -version = "1.0.3" +version = "1.0.2" dependencies = [ "humantime-serde", "reth-net-banlist", @@ -7620,7 +7570,7 @@ dependencies = [ [[package]] name = "reth-nippy-jar" -version = "1.0.3" +version = "1.0.2" dependencies = [ "anyhow", "bincode", @@ -7641,7 +7591,7 @@ dependencies = [ [[package]] name = "reth-node-api" -version = "1.0.3" +version = "1.0.2" dependencies = [ "reth-db-api", "reth-engine-primitives", @@ -7656,7 +7606,7 @@ dependencies = [ [[package]] name = "reth-node-builder" -version = "1.0.3" +version = "1.0.2" dependencies = [ "aquamarine", "backon", @@ -7685,7 +7635,6 @@ dependencies = [ "reth-node-api", "reth-node-core", "reth-node-events", - "reth-node-metrics", "reth-payload-builder", "reth-primitives", "reth-provider", @@ -7710,7 +7659,7 @@ dependencies = [ [[package]] name = "reth-node-core" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-genesis", "alloy-rpc-types-engine", @@ -7720,7 +7669,15 @@ dependencies = [ "dirs-next", "eyre", "futures", + "http 1.1.0", "humantime", + "jsonrpsee", + "metrics", + "metrics-exporter-prometheus", + "metrics-process", + "metrics-util", + "once_cell", + "procfs", "proptest", "rand 0.8.5", "reth-chainspec", @@ -7732,6 +7689,7 @@ dependencies = [ "reth-discv4", "reth-discv5", "reth-fs-util", + "reth-metrics", "reth-net-nat", "reth-network", "reth-network-p2p", @@ -7747,19 +7705,22 @@ dependencies = [ "reth-rpc-types-compat", "reth-stages-types", "reth-storage-errors", + "reth-tasks", "reth-tracing", "reth-transaction-pool", "secp256k1", "serde_json", "shellexpand", + "tikv-jemalloc-ctl", "tokio", + "tower", "tracing", "vergen", ] [[package]] name = "reth-node-ethereum" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -7770,7 +7731,6 @@ dependencies = [ "reth-auto-seal-consensus", "reth-basic-payload-builder", "reth-beacon-consensus", - "reth-blockchain-tree", "reth-chainspec", "reth-consensus", "reth-db", @@ -7801,7 +7761,7 @@ dependencies = [ [[package]] name = "reth-node-events" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-rpc-types-engine", "futures", @@ -7821,37 +7781,9 @@ dependencies = [ "tracing", ] -[[package]] -name = "reth-node-metrics" -version = "1.0.3" -dependencies = [ - "eyre", - "http 1.1.0", - "jsonrpsee", - "metrics", - "metrics-exporter-prometheus", - "metrics-process", - "metrics-util", - "once_cell", - "procfs", - "reqwest", - "reth-chainspec", - "reth-db", - "reth-db-api", - "reth-metrics", - "reth-provider", - "reth-tasks", - "socket2 0.4.10", - "tikv-jemalloc-ctl", - "tokio", - "tower", - "tracing", - "vergen", -] - [[package]] name = "reth-node-optimism" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -7901,7 +7833,7 @@ dependencies = [ [[package]] name = "reth-optimism-cli" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -7943,7 +7875,7 @@ dependencies = [ [[package]] name = "reth-optimism-consensus" -version = "1.0.3" +version = "1.0.2" dependencies = [ "reth-chainspec", "reth-consensus", @@ -7954,7 +7886,7 @@ dependencies = [ [[package]] name = "reth-optimism-payload-builder" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-rlp", "reth-basic-payload-builder", @@ -7978,15 +7910,14 @@ dependencies = [ [[package]] name = "reth-optimism-primitives" -version = "1.0.3" +version = "1.0.2" [[package]] name = "reth-optimism-rpc" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-primitives", "jsonrpsee", - "jsonrpsee-types", "parking_lot 0.12.3", "reth-chainspec", "reth-errors", @@ -8011,7 +7942,7 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "1.0.3" +version = "1.0.2" dependencies = [ "futures-util", "metrics", @@ -8033,7 +7964,7 @@ dependencies = [ [[package]] name = "reth-payload-primitives" -version = "1.0.3" +version = "1.0.2" dependencies = [ "reth-chainspec", "reth-errors", @@ -8047,7 +7978,7 @@ dependencies = [ [[package]] name = "reth-payload-validator" -version = "1.0.3" +version = "1.0.2" dependencies = [ "reth-chainspec", "reth-primitives", @@ -8057,7 +7988,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-eips", "alloy-genesis", @@ -8077,6 +8008,7 @@ dependencies = [ "pprof", "proptest", "proptest-arbitrary-interop", + "proptest-derive 0.5.0", "rand 0.8.5", "rayon", "reth-chainspec", @@ -8100,7 +8032,7 @@ dependencies = [ [[package]] name = "reth-primitives-traits" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8115,6 +8047,7 @@ dependencies = [ "modular-bitfield", "proptest", "proptest-arbitrary-interop", + "proptest-derive 0.5.0", "rand 0.8.5", "reth-codecs", "revm-primitives", @@ -8126,20 +8059,21 @@ dependencies = [ [[package]] name = "reth-provider" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-rlp", "alloy-rpc-types-engine", "assert_matches", "auto_impl", - "dashmap 6.0.1", + "dashmap", + "derive_more", "itertools 0.13.0", "metrics", "parking_lot 0.12.3", + "pin-project", "rand 0.8.5", "rayon", "reth-blockchain-tree-api", - "reth-chain-state", "reth-chainspec", "reth-codecs", "reth-db", @@ -8158,17 +8092,17 @@ dependencies = [ "reth-storage-errors", "reth-testing-utils", "reth-trie", - "reth-trie-db", "revm", "strum", "tempfile", "tokio", + "tokio-stream", "tracing", ] [[package]] name = "reth-prune" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-primitives", "assert_matches", @@ -8197,7 +8131,7 @@ dependencies = [ [[package]] name = "reth-prune-types" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-primitives", "arbitrary", @@ -8207,6 +8141,7 @@ dependencies = [ "modular-bitfield", "proptest", "proptest-arbitrary-interop", + "proptest-derive 0.5.0", "reth-codecs", "serde", "serde_json", @@ -8217,7 +8152,7 @@ dependencies = [ [[package]] name = "reth-revm" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-eips", "reth-chainspec", @@ -8235,7 +8170,7 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-dyn-abi", "alloy-genesis", @@ -8292,7 +8227,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "1.0.3" +version = "1.0.2" dependencies = [ "jsonrpsee", "reth-engine-primitives", @@ -8305,7 +8240,7 @@ dependencies = [ [[package]] name = "reth-rpc-api-testing-util" -version = "1.0.3" +version = "1.0.2" dependencies = [ "futures", "jsonrpsee", @@ -8320,7 +8255,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "1.0.3" +version = "1.0.2" dependencies = [ "clap", "http 1.1.0", @@ -8366,7 +8301,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-rlp", "assert_matches", @@ -8399,7 +8334,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-api" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-dyn-abi", "async-trait", @@ -8407,7 +8342,6 @@ dependencies = [ "dyn-clone", "futures", "jsonrpsee", - "jsonrpsee-types", "parking_lot 0.12.3", "reth-chainspec", "reth-errors", @@ -8431,7 +8365,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-types" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-sol-types", "derive_more", @@ -8468,7 +8402,7 @@ dependencies = [ [[package]] name = "reth-rpc-layer" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-rpc-types-engine", "assert_matches", @@ -8485,7 +8419,7 @@ dependencies = [ [[package]] name = "reth-rpc-server-types" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-primitives", "jsonrpsee-core", @@ -8500,7 +8434,7 @@ dependencies = [ [[package]] name = "reth-rpc-types" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-primitives", "alloy-rpc-types", @@ -8516,6 +8450,7 @@ dependencies = [ "bytes", "jsonrpsee-types", "proptest", + "proptest-derive 0.5.0", "rand 0.8.5", "serde_json", "similar-asserts", @@ -8523,7 +8458,7 @@ dependencies = [ [[package]] name = "reth-rpc-types-compat" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-rlp", "alloy-rpc-types", @@ -8535,7 +8470,7 @@ dependencies = [ [[package]] name = "reth-stages" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-rlp", "assert_matches", @@ -8573,7 +8508,6 @@ dependencies = [ "reth-storage-errors", "reth-testing-utils", "reth-trie", - "reth-trie-db", "serde_json", "tempfile", "thiserror", @@ -8583,7 +8517,7 @@ dependencies = [ [[package]] name = "reth-stages-api" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-primitives", "aquamarine", @@ -8612,7 +8546,7 @@ dependencies = [ [[package]] name = "reth-stages-types" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-primitives", "arbitrary", @@ -8620,6 +8554,7 @@ dependencies = [ "modular-bitfield", "proptest", "proptest-arbitrary-interop", + "proptest-derive 0.5.0", "rand 0.8.5", "reth-codecs", "reth-trie-common", @@ -8629,7 +8564,7 @@ dependencies = [ [[package]] name = "reth-static-file" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-primitives", "assert_matches", @@ -8652,7 +8587,7 @@ dependencies = [ [[package]] name = "reth-static-file-types" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-primitives", "clap", @@ -8663,7 +8598,7 @@ dependencies = [ [[package]] name = "reth-storage-api" -version = "1.0.3" +version = "1.0.2" dependencies = [ "auto_impl", "reth-chainspec", @@ -8679,9 +8614,8 @@ dependencies = [ [[package]] name = "reth-storage-errors" -version = "1.0.3" +version = "1.0.2" dependencies = [ - "alloy-rlp", "reth-fs-util", "reth-primitives", "thiserror-no-std", @@ -8689,7 +8623,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "1.0.3" +version = "1.0.2" dependencies = [ "auto_impl", "dyn-clone", @@ -8706,7 +8640,7 @@ dependencies = [ [[package]] name = "reth-testing-utils" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-genesis", "rand 0.8.5", @@ -8716,7 +8650,7 @@ dependencies = [ [[package]] name = "reth-tokio-util" -version = "1.0.3" +version = "1.0.2" dependencies = [ "tokio", "tokio-stream", @@ -8725,7 +8659,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "1.0.3" +version = "1.0.2" dependencies = [ "clap", "eyre", @@ -8739,7 +8673,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-rlp", "aquamarine", @@ -8779,7 +8713,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-rlp", "auto_impl", @@ -8813,7 +8747,7 @@ dependencies = [ [[package]] name = "reth-trie-common" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -8830,6 +8764,7 @@ dependencies = [ "plain_hasher", "proptest", "proptest-arbitrary-interop", + "proptest-derive 0.5.0", "reth-codecs", "reth-primitives-traits", "revm-primitives", @@ -8839,44 +8774,9 @@ dependencies = [ "toml", ] -[[package]] -name = "reth-trie-db" -version = "1.0.3" -dependencies = [ - "alloy-rlp", - "auto_impl", - "criterion", - "derive_more", - "itertools 0.13.0", - "metrics", - "once_cell", - "proptest", - "proptest-arbitrary-interop", - "rayon", - "reth-chainspec", - "reth-db", - "reth-db-api", - "reth-execution-errors", - "reth-metrics", - "reth-primitives", - "reth-provider", - "reth-stages-types", - "reth-storage-errors", - "reth-trie", - "reth-trie-common", - "revm", - "serde", - "serde_json", - "similar-asserts", - "tokio", - "tokio-stream", - "tracing", - "triehash", -] - [[package]] name = "reth-trie-parallel" -version = "1.0.3" +version = "1.0.2" dependencies = [ "alloy-rlp", "criterion", @@ -8895,7 +8795,6 @@ dependencies = [ "reth-provider", "reth-tasks", "reth-trie", - "reth-trie-db", "thiserror", "tokio", "tracing", @@ -8918,9 +8817,9 @@ dependencies = [ [[package]] name = "revm-inspectors" -version = "0.5.4" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5296ccad8d7ccbeb6c5a037a57bfe1ff27e81d8c4efbd3ae7df0a554eb1a818a" +checksum = "d485a7ccfbbcaf2d0c08c3d866dae279c6f71d7357862cbea637f23f27b7b695" dependencies = [ "alloy-primitives", "alloy-rpc-types", @@ -8999,9 +8898,9 @@ dependencies = [ [[package]] name = "rgb" -version = "0.8.45" +version = "0.8.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ade4539f42266ded9e755c605bdddf546242b2c961b03b06a7375260788a0523" +checksum = "1aee83dc281d5a3200d37b299acd13b81066ea126a7f16f0eae70fc9aed241d9" dependencies = [ "bytemuck", ] @@ -9300,9 +9199,9 @@ dependencies = [ [[package]] name = "scc" -version = "2.1.4" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4465c22496331e20eb047ff46e7366455bc01c0c02015c4a376de0b2cd3a1af" +checksum = "af947d0ca10a2f3e00c7ec1b515b7c83e5cb3fa62d4c11a64301d9eec54440e9" dependencies = [ "sdd", ] @@ -9335,9 +9234,9 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "sdd" -version = "1.7.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85f05a494052771fc5bd0619742363b5e24e5ad72ab3111ec2e27925b8edc5f3" +checksum = "b84345e4c9bd703274a082fb80caaa99b7612be48dfaa1dd9266577ec412309d" [[package]] name = "sec1" @@ -9375,9 +9274,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.11.1" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" +checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" dependencies = [ "bitflags 2.6.0", "core-foundation", @@ -9389,9 +9288,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.11.1" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75da29fe9b9b08fe9d6b22b5b4bcbc75d8db3aa31e639aa56bb62e9d46bfceaf" +checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" dependencies = [ "core-foundation-sys", "libc", @@ -9511,9 +9410,9 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.9.0" +version = "3.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cecfa94848272156ea67b2b1a53f20fc7bc638c4a46d2f8abde08f05f4b857" +checksum = "e73139bc5ec2d45e6c5fd85be5a46949c1c39a4c18e56915f5eb4c12f975e377" dependencies = [ "base64 0.22.1", "chrono", @@ -9529,9 +9428,9 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.9.0" +version = "3.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8fee4991ef4f274617a51ad4af30519438dacb2f56ac773b08a1922ff743350" +checksum = "b80d3d6b56b64335c0180e5ffde23b3c5e08c14c585b51a15bd0e95393f46703" dependencies = [ "darling", "proc-macro2", @@ -10088,18 +9987,18 @@ checksum = "a38c90d48152c236a3ab59271da4f4ae63d678c5d7ad6b7714d7cb9760be5e4b" [[package]] name = "thiserror" -version = "1.0.63" +version = "1.0.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" +checksum = "f2675633b1499176c2dff06b0856a27976a8f9d436737b4cf4f312d4d91d8bbb" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.63" +version = "1.0.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" +checksum = "d20468752b09f49e909e55a5d338caa8bedf615594e9d80bc4c565d30faf798c" dependencies = [ "proc-macro2", "quote", @@ -10256,9 +10155,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.38.1" +version = "1.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb2caba9f80616f438e09748d5acda951967e1ea58508ef53d9c6402485a46df" +checksum = "ba4f4a02a7a80d6f274636f0aa95c7e383b912d41fe721a31f29e29698585a4a" dependencies = [ "backtrace", "bytes", @@ -10340,14 +10239,14 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.15" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac2caab0bf757388c6c0ae23b3293fdb463fee59434529014f85e3263b995c28" +checksum = "6f49eb2ab21d2f26bd6db7bf383edc527a7ebaee412d17af4d40fdccd442f335" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.16", + "toml_edit 0.22.15", ] [[package]] @@ -10372,15 +10271,15 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.16" +version = "0.22.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "278f3d518e152219c994ce877758516bca5e118eaed6996192a774fb9fbf0788" +checksum = "d59a3a72298453f564e2b111fa896f8d07fabb36f51f06d7e875fc5e0b5a3ef1" dependencies = [ "indexmap 2.2.6", "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.14", + "winnow 0.6.13", ] [[package]] @@ -11277,9 +11176,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.14" +version = "0.6.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "374ec40a2d767a3c1b4972d9475ecd557356637be906f2cb3f7fe17a6eb5e22f" +checksum = "59b5e5f6c299a3c7890b876a2a587f3115162487e704907d9b6cd29473052ba1" dependencies = [ "memchr", ] diff --git a/Cargo.toml b/Cargo.toml index 2e519aa35b96..acc7062463bb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace.package] -version = "1.0.3" +version = "1.0.2" edition = "2021" rust-version = "1.79" license = "MIT OR Apache-2.0" @@ -14,7 +14,6 @@ members = [ "crates/blockchain-tree/", "crates/blockchain-tree-api/", "crates/chainspec/", - "crates/chain-state/", "crates/cli/cli/", "crates/cli/commands/", "crates/cli/runner/", @@ -66,7 +65,6 @@ members = [ "crates/node/api/", "crates/node/builder/", "crates/node/events/", - "crates/node/metrics", "crates/optimism/cli", "crates/optimism/consensus", "crates/optimism/evm/", @@ -116,7 +114,6 @@ members = [ "crates/tracing/", "crates/transaction-pool/", "crates/trie/common", - "crates/trie/db", "crates/trie/parallel/", "crates/trie/trie", "examples/beacon-api-sidecar-fetcher/", @@ -275,7 +272,6 @@ reth-beacon-consensus = { path = "crates/consensus/beacon" } reth-blockchain-tree = { path = "crates/blockchain-tree" } reth-blockchain-tree-api = { path = "crates/blockchain-tree-api" } reth-chainspec = { path = "crates/chainspec" } -reth-chain-state = { path = "crates/chain-state" } reth-cli = { path = "crates/cli/cli" } reth-cli-commands = { path = "crates/cli/commands" } reth-cli-runner = { path = "crates/cli/runner" } @@ -335,7 +331,6 @@ reth-node-builder = { path = "crates/node/builder" } reth-node-core = { path = "crates/node/core" } reth-node-ethereum = { path = "crates/ethereum/node" } reth-node-events = { path = "crates/node/events" } -reth-node-metrics = { path = "crates/node/metrics" } reth-node-optimism = { path = "crates/optimism/node" } reth-optimism-cli = { path = "crates/optimism/cli" } reth-optimism-consensus = { path = "crates/optimism/consensus" } @@ -345,7 +340,7 @@ reth-optimism-rpc = { path = "crates/optimism/rpc" } reth-payload-builder = { path = "crates/payload/builder" } reth-payload-primitives = { path = "crates/payload/primitives" } reth-payload-validator = { path = "crates/payload/validator" } -reth-primitives = { path = "crates/primitives", default-features = false, features = ["std"] } +reth-primitives = { path = "crates/primitives" } reth-primitives-traits = { path = "crates/primitives-traits", default-features = false } reth-provider = { path = "crates/storage/provider" } reth-prune = { path = "crates/prune/prune" } @@ -376,7 +371,6 @@ reth-tracing = { path = "crates/tracing" } reth-transaction-pool = { path = "crates/transaction-pool" } reth-trie = { path = "crates/trie/trie" } reth-trie-common = { path = "crates/trie/common" } -reth-trie-db = { path = "crates/trie/db" } reth-trie-parallel = { path = "crates/trie/parallel" } # revm @@ -439,7 +433,7 @@ bytes = "1.5" bitflags = "2.4" clap = "4" const_format = { version = "0.2.32", features = ["rust_1_64"] } -dashmap = "6.0" +dashmap = "5.5" derive_more = "0.99.17" fdlimit = "0.3.0" eyre = "0.6" @@ -455,9 +449,9 @@ serde_with = "3.3.0" humantime = "2.1" humantime-serde = "1.1" rand = "0.8.5" -rustc-hash = { version = "2.0", default-features = false } +rustc-hash = { version = "2.0", default-features = false } schnellru = "0.2" -strum = { version = "0.26", default-features = false } +strum = "0.26" rayon = "1.7" itertools = "0.13" parking_lot = "0.12" diff --git a/bin/reth-bench/Cargo.toml b/bin/reth-bench/Cargo.toml index 4023c1c17375..00a5124fa2da 100644 --- a/bin/reth-bench/Cargo.toml +++ b/bin/reth-bench/Cargo.toml @@ -83,7 +83,7 @@ default = ["jemalloc"] asm-keccak = ["reth-primitives/asm-keccak"] -jemalloc = ["dep:tikv-jemallocator"] +jemalloc = ["dep:tikv-jemallocator", "reth-node-core/jemalloc"] jemalloc-prof = ["jemalloc", "tikv-jemallocator?/profiling"] min-error-logs = ["tracing/release_max_level_error"] diff --git a/bin/reth-bench/src/bench/new_payload_fcu.rs b/bin/reth-bench/src/bench/new_payload_fcu.rs index 190217f6a8cf..c7ea5683175f 100644 --- a/bin/reth-bench/src/bench/new_payload_fcu.rs +++ b/bin/reth-bench/src/bench/new_payload_fcu.rs @@ -57,11 +57,11 @@ impl Command { }; let head_block_hash = block.hash(); - let safe_block_hash = block_provider - .get_block_by_number(block.number.saturating_sub(32).into(), false); + let safe_block_hash = + block_provider.get_block_by_number((block.number - 32).into(), false); - let finalized_block_hash = block_provider - .get_block_by_number(block.number.saturating_sub(64).into(), false); + let finalized_block_hash = + block_provider.get_block_by_number((block.number - 64).into(), false); let (safe, finalized) = tokio::join!(safe_block_hash, finalized_block_hash,); diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 33c8a24cc312..c8465165cd16 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -54,7 +54,6 @@ reth-basic-payload-builder.workspace = true reth-static-file.workspace = true reth-static-file-types = { workspace = true, features = ["clap"] } reth-trie = { workspace = true, features = ["metrics"] } -reth-trie-db = { workspace = true, features = ["metrics"] } reth-node-api.workspace = true reth-node-core.workspace = true reth-ethereum-payload-builder.workspace = true @@ -62,7 +61,6 @@ reth-db-common.workspace = true reth-node-ethereum.workspace = true reth-node-builder.workspace = true reth-node-events.workspace = true -reth-node-metrics.workspace = true reth-consensus.workspace = true reth-engine-util.workspace = true reth-prune.workspace = true @@ -120,7 +118,7 @@ dev = ["reth-cli-commands/dev"] asm-keccak = ["reth-primitives/asm-keccak"] -jemalloc = ["dep:tikv-jemallocator", "reth-node-metrics/jemalloc"] +jemalloc = ["dep:tikv-jemallocator", "reth-node-core/jemalloc"] jemalloc-prof = ["jemalloc", "tikv-jemallocator?/profiling"] min-error-logs = ["tracing/release_max_level_error"] @@ -133,6 +131,3 @@ min-trace-logs = ["tracing/release_max_level_trace"] name = "reth" path = "src/main.rs" -[[bin]] -name = "engine2" -path = "src/engine2.rs" diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index fd238637e508..e98c22b1dc90 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -37,8 +37,6 @@ use reth_transaction_pool::{ blobstore::InMemoryBlobStore, BlobStore, EthPooledTransaction, PoolConfig, TransactionOrigin, TransactionPool, TransactionValidationTaskExecutor, }; -use reth_trie::StateRoot; -use reth_trie_db::DatabaseStateRoot; use std::{path::PathBuf, str::FromStr, sync::Arc}; use tracing::*; @@ -268,10 +266,8 @@ impl Command { debug!(target: "reth::cli", ?execution_outcome, "Executed block"); let hashed_post_state = execution_outcome.hash_state_slow(); - let (state_root, trie_updates) = StateRoot::overlay_root_with_updates( - provider_factory.provider()?.tx_ref(), - hashed_post_state.clone(), - )?; + let (state_root, trie_updates) = hashed_post_state + .state_root_with_updates(provider_factory.provider()?.tx_ref())?; if state_root != block_with_senders.state_root { eyre::bail!( diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index 5378a30bde56..d5ea36a59ecc 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -19,15 +19,14 @@ use reth_network_api::NetworkInfo; use reth_node_ethereum::EthExecutorProvider; use reth_primitives::BlockHashOrNumber; use reth_provider::{ - writer::StorageWriter, AccountExtReader, ChainSpecProvider, HashingWriter, HeaderProvider, - LatestStateProviderRef, OriginalValuesKnown, ProviderFactory, StageCheckpointReader, - StateWriter, StaticFileProviderFactory, StorageReader, + AccountExtReader, ChainSpecProvider, HashingWriter, HeaderProvider, LatestStateProviderRef, + OriginalValuesKnown, ProviderFactory, StageCheckpointReader, StateWriter, + StaticFileProviderFactory, StorageReader, }; use reth_revm::database::StateProviderDatabase; use reth_stages::StageId; use reth_tasks::TaskExecutor; use reth_trie::StateRoot; -use reth_trie_db::DatabaseStateRoot; use std::{path::PathBuf, sync::Arc}; use tracing::*; @@ -149,10 +148,8 @@ impl Command { ExecutionOutcome::new(state, receipts.into(), block.number, vec![requests.into()]); // Unpacked `BundleState::state_root_slow` function - let (in_memory_state_root, in_memory_updates) = StateRoot::overlay_root_with_updates( - provider.tx_ref(), - execution_outcome.hash_state_slow(), - )?; + let (in_memory_state_root, in_memory_updates) = + execution_outcome.hash_state_slow().state_root_with_updates(provider.tx_ref())?; if in_memory_state_root == block.state_root { info!(target: "reth::cli", state_root = ?in_memory_state_root, "Computed in-memory state root matches"); @@ -168,8 +165,7 @@ impl Command { .try_seal_with_senders() .map_err(|_| BlockValidationError::SenderRecoveryError)?, )?; - let mut storage_writer = StorageWriter::new(Some(&provider_rw), None); - storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::No)?; + execution_outcome.write_to_storage(&provider_rw, None, OriginalValuesKnown::No)?; let storage_lists = provider_rw.changed_storages_with_range(block.number..=block.number)?; let storages = provider_rw.plain_state_storages(storage_lists)?; provider_rw.insert_storage_for_hashing(storages)?; diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index 1c6e804148c4..1b330ea6a0f4 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -17,8 +17,8 @@ use reth_network_p2p::full_block::FullBlockClient; use reth_node_ethereum::EthExecutorProvider; use reth_primitives::BlockHashOrNumber; use reth_provider::{ - writer::StorageWriter, BlockNumReader, BlockWriter, ChainSpecProvider, HeaderProvider, - LatestStateProviderRef, OriginalValuesKnown, ProviderError, ProviderFactory, StateWriter, + BlockNumReader, BlockWriter, ChainSpecProvider, HeaderProvider, LatestStateProviderRef, + OriginalValuesKnown, ProviderError, ProviderFactory, StateWriter, }; use reth_revm::database::StateProviderDatabase; use reth_stages::{ @@ -151,10 +151,7 @@ impl Command { ), )); executor.execute_and_verify_one((&sealed_block.clone().unseal(), td).into())?; - let execution_outcome = executor.finalize(); - - let mut storage_writer = StorageWriter::new(Some(&provider_rw), None); - storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::Yes)?; + executor.finalize().write_to_storage(&provider_rw, None, OriginalValuesKnown::Yes)?; let checkpoint = Some(StageCheckpoint::new( block_number.checked_sub(1).ok_or(eyre::eyre!("GenesisBlockHasNoParent"))?, diff --git a/bin/reth/src/engine2.rs b/bin/reth/src/engine2.rs deleted file mode 100644 index 3f0470699ca3..000000000000 --- a/bin/reth/src/engine2.rs +++ /dev/null @@ -1,39 +0,0 @@ -#![allow(missing_docs)] -#![allow(rustdoc::missing_crate_level_docs)] - -// We use jemalloc for performance reasons. -#[cfg(all(feature = "jemalloc", unix))] -#[global_allocator] -static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; - -fn main() { - use reth::cli::Cli; - use reth_node_ethereum::{launch::EthNodeLauncher, node::EthereumAddOns, EthereumNode}; - use reth_provider::providers::BlockchainProvider2; - - reth_cli_util::sigsegv_handler::install(); - - // Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided. - if std::env::var_os("RUST_BACKTRACE").is_none() { - std::env::set_var("RUST_BACKTRACE", "1"); - } - - if let Err(err) = Cli::parse_args().run(|builder, _| async { - let handle = builder - .with_types_and_provider::>() - .with_components(EthereumNode::components()) - .with_add_ons::() - .launch_with_fn(|builder| { - let launcher = EthNodeLauncher::new( - builder.task_executor().clone(), - builder.config().datadir(), - ); - builder.launch_with(launcher) - }) - .await?; - handle.node_exit_future.await - }) { - eprintln!("Error: {err:?}"); - std::process::exit(1); - } -} diff --git a/bin/reth/src/lib.rs b/bin/reth/src/lib.rs index f49c909b94bc..2e1760d9888a 100644 --- a/bin/reth/src/lib.rs +++ b/bin/reth/src/lib.rs @@ -58,9 +58,9 @@ pub mod core { pub use reth_node_core::*; } -/// Re-exported from `reth_node_metrics`. +/// Re-exported from `reth_node_core`. pub mod prometheus_exporter { - pub use reth_node_metrics::recorder::*; + pub use reth_node_core::prometheus_exporter::*; } /// Re-export of the `reth_node_core` types specifically in the `args` module. diff --git a/book/developers/exex/hello-world.md b/book/developers/exex/hello-world.md index 6ee68807d40c..0f50cacbb9a6 100644 --- a/book/developers/exex/hello-world.md +++ b/book/developers/exex/hello-world.md @@ -159,7 +159,7 @@ and it's safe to prune the associated data. -What we've arrived at is the [minimal ExEx example](https://github.com/paradigmxyz/reth-exex-examples/blob/4f3498f0cc00e038d6d8c32cd94fe82788862f49/minimal/src/main.rs) that we provide in the [reth-exex-examples](https://github.com/paradigmxyz/reth-exex-examples) repository. +What we've arrived at is the [minimal ExEx example](https://github.com/paradigmxyz/reth/blob/b8cd7be6c92a71aea5341cdeba685f124c6de540/examples/exex/minimal/src/main.rs) that we provide in the Reth repository. ## What's next? diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml index 988bb54e8580..b3679677a13c 100644 --- a/crates/blockchain-tree/Cargo.toml +++ b/crates/blockchain-tree/Cargo.toml @@ -25,7 +25,6 @@ reth-execution-types.workspace = true reth-prune-types.workspace = true reth-stages-api.workspace = true reth-trie = { workspace = true, features = ["metrics"] } -reth-trie-db = { workspace = true, features = ["metrics"] } reth-trie-parallel = { workspace = true, features = ["parallel"] } reth-network.workspace = true reth-consensus.workspace = true diff --git a/crates/blockchain-tree/src/block_indices.rs b/crates/blockchain-tree/src/block_indices.rs index 3b4c30eae96f..b080f26bda33 100644 --- a/crates/blockchain-tree/src/block_indices.rs +++ b/crates/blockchain-tree/src/block_indices.rs @@ -63,8 +63,7 @@ impl BlockIndices { } /// Return block to chain id - #[allow(dead_code)] - pub(crate) const fn blocks_to_chain(&self) -> &HashMap { + pub const fn blocks_to_chain(&self) -> &HashMap { &self.blocks_to_chain } @@ -203,7 +202,7 @@ impl BlockIndices { /// Remove chain from indices and return dependent chains that need to be removed. /// Does the cleaning of the tree and removing blocks from the chain. - pub(crate) fn remove_chain(&mut self, chain: &Chain) -> BTreeSet { + pub fn remove_chain(&mut self, chain: &Chain) -> BTreeSet { chain .blocks() .iter() diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 2fb567463686..b41e4dbbffba 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -26,11 +26,7 @@ use reth_provider::{ use reth_prune_types::PruneModes; use reth_stages_api::{MetricEvent, MetricEventsSender}; use reth_storage_errors::provider::{ProviderResult, RootMismatch}; -use reth_trie::{ - hashed_cursor::{DatabaseHashedCursorFactory, HashedPostStateCursorFactory}, - StateRoot, -}; -use reth_trie_db::DatabaseStateRoot; +use reth_trie::{hashed_cursor::HashedPostStateCursorFactory, StateRoot}; use std::{ collections::{btree_map::Entry, BTreeMap, HashSet}, sync::Arc, @@ -1242,7 +1238,7 @@ where .disable_long_read_transaction_safety(); let (state_root, trie_updates) = StateRoot::from_tx(provider.tx_ref()) .with_hashed_cursor_factory(HashedPostStateCursorFactory::new( - DatabaseHashedCursorFactory::new(provider.tx_ref()), + provider.tx_ref(), &hashed_state_sorted, )) .with_prefix_sets(prefix_sets) diff --git a/crates/blockchain-tree/src/state.rs b/crates/blockchain-tree/src/state.rs index 43e47743b837..e44e1aae552a 100644 --- a/crates/blockchain-tree/src/state.rs +++ b/crates/blockchain-tree/src/state.rs @@ -113,7 +113,7 @@ impl TreeState { /// The ID of a sidechain internally in a [`BlockchainTree`][super::BlockchainTree]. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Ord, PartialOrd)] -pub(crate) struct BlockchainId(u64); +pub struct BlockchainId(u64); impl From for u64 { fn from(value: BlockchainId) -> Self { diff --git a/crates/chain-state/Cargo.toml b/crates/chain-state/Cargo.toml deleted file mode 100644 index 1615c3f5eb5b..000000000000 --- a/crates/chain-state/Cargo.toml +++ /dev/null @@ -1,47 +0,0 @@ -[package] -name = "reth-chain-state" -version.workspace = true -edition.workspace = true -rust-version.workspace = true -license.workspace = true -homepage.workspace = true -repository.workspace = true -description = "Reth state related types and functionality." - -[lints] -workspace = true - -[dependencies] -# reth -reth-chainspec.workspace = true -reth-errors.workspace = true -reth-execution-types.workspace = true -reth-primitives.workspace = true -reth-storage-api.workspace = true -reth-trie.workspace = true - -revm = { workspace = true, optional = true} - -# async -tokio = { workspace = true, features = ["sync", "macros", "rt-multi-thread"] } -tokio-stream = { workspace = true, features = ["sync"] } - -# tracing -tracing.workspace = true - -# misc -auto_impl.workspace = true -derive_more.workspace = true -parking_lot.workspace = true -pin-project.workspace = true -rand = { workspace = true, optional = true } - -[dev-dependencies] -rand.workspace = true -revm.workspace = true - -[features] -test-utils = [ - "rand", - "revm" -] \ No newline at end of file diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs deleted file mode 100644 index 1ac01b57b153..000000000000 --- a/crates/chain-state/src/in_memory.rs +++ /dev/null @@ -1,964 +0,0 @@ -//! Types for tracking the canonical chain state in memory. - -use crate::{ - CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, - ChainInfoTracker, MemoryOverlayStateProvider, -}; -use parking_lot::RwLock; -use reth_chainspec::ChainInfo; -use reth_execution_types::{Chain, ExecutionOutcome}; -use reth_primitives::{ - Address, BlockNumHash, Header, Receipt, Receipts, SealedBlock, SealedBlockWithSenders, - SealedHeader, B256, -}; -use reth_storage_api::StateProviderBox; -use reth_trie::{updates::TrieUpdates, HashedPostState}; -use std::{collections::HashMap, ops::Deref, sync::Arc, time::Instant}; -use tokio::sync::broadcast; - -/// Size of the broadcast channel used to notify canonical state events. -const CANON_STATE_NOTIFICATION_CHANNEL_SIZE: usize = 256; - -/// Container type for in memory state data of the canonical chain. -/// -/// This tracks blocks and their state that haven't been persisted to disk yet but are part of the -/// canonical chain that can be traced back to a canonical block on disk. -#[derive(Debug, Default)] -pub(crate) struct InMemoryState { - /// All canonical blocks that are not on disk yet. - blocks: RwLock>>, - /// Mapping of block numbers to block hashes. - numbers: RwLock>, - /// The pending block that has not yet been made canonical. - pending: RwLock>, -} - -impl InMemoryState { - pub(crate) const fn new( - blocks: HashMap>, - numbers: HashMap, - pending: Option, - ) -> Self { - Self { - blocks: RwLock::new(blocks), - numbers: RwLock::new(numbers), - pending: RwLock::new(pending), - } - } - - /// Returns the state for a given block hash. - pub(crate) fn state_by_hash(&self, hash: B256) -> Option> { - self.blocks.read().get(&hash).cloned() - } - - /// Returns the state for a given block number. - pub(crate) fn state_by_number(&self, number: u64) -> Option> { - self.numbers.read().get(&number).and_then(|hash| self.blocks.read().get(hash).cloned()) - } - - /// Returns the current chain head state. - pub(crate) fn head_state(&self) -> Option> { - self.numbers - .read() - .iter() - .max_by_key(|(&number, _)| number) - .and_then(|(_, hash)| self.blocks.read().get(hash).cloned()) - } - - /// Returns the pending state corresponding to the current head plus one, - /// from the payload received in newPayload that does not have a FCU yet. - pub(crate) fn pending_state(&self) -> Option> { - self.pending.read().as_ref().map(|state| Arc::new(BlockState::new(state.block.clone()))) - } - - #[cfg(test)] - fn block_count(&self) -> usize { - self.blocks.read().len() - } -} - -/// Inner type to provide in memory state. It includes a chain tracker to be -/// advanced internally by the tree. -#[derive(Debug)] -pub(crate) struct CanonicalInMemoryStateInner { - pub(crate) chain_info_tracker: ChainInfoTracker, - pub(crate) in_memory_state: InMemoryState, - pub(crate) canon_state_notification_sender: CanonStateNotificationSender, -} - -/// This type is responsible for providing the blocks, receipts, and state for -/// all canonical blocks not on disk yet and keeps track of the block range that -/// is in memory. -#[derive(Debug, Clone)] -pub struct CanonicalInMemoryState { - pub(crate) inner: Arc, -} - -impl CanonicalInMemoryState { - /// Create a new in memory state with the given blocks, numbers, and pending state. - pub fn new( - blocks: HashMap>, - numbers: HashMap, - pending: Option, - ) -> Self { - let in_memory_state = InMemoryState::new(blocks, numbers, pending); - let head_state = in_memory_state.head_state(); - let header = match head_state { - Some(state) => state.block().block().header.clone(), - None => SealedHeader::default(), - }; - let chain_info_tracker = ChainInfoTracker::new(header); - let (canon_state_notification_sender, _canon_state_notification_receiver) = - broadcast::channel(CANON_STATE_NOTIFICATION_CHANNEL_SIZE); - - let inner = CanonicalInMemoryStateInner { - chain_info_tracker, - in_memory_state, - canon_state_notification_sender, - }; - - Self { inner: Arc::new(inner) } - } - - /// Create a new in memory state with the given local head. - pub fn with_head(head: SealedHeader) -> Self { - let chain_info_tracker = ChainInfoTracker::new(head); - let in_memory_state = InMemoryState::default(); - let (canon_state_notification_sender, _canon_state_notification_receiver) = - broadcast::channel(CANON_STATE_NOTIFICATION_CHANNEL_SIZE); - let inner = CanonicalInMemoryStateInner { - chain_info_tracker, - in_memory_state, - canon_state_notification_sender, - }; - - Self { inner: Arc::new(inner) } - } - - /// Returns in the header corresponding to the given hash. - pub fn header_by_hash(&self, hash: B256) -> Option { - self.state_by_hash(hash).map(|block| block.block().block.header.clone()) - } - - /// Append new blocks to the in memory state. - fn update_blocks(&self, new_blocks: I, reorged: I) - where - I: IntoIterator, - { - // acquire all locks - let mut blocks = self.inner.in_memory_state.blocks.write(); - let mut numbers = self.inner.in_memory_state.numbers.write(); - let mut pending = self.inner.in_memory_state.pending.write(); - - // we first remove the blocks from the reorged chain - for block in reorged { - let hash = block.block().hash(); - let number = block.block().number; - blocks.remove(&hash); - numbers.remove(&number); - } - - // insert the new blocks - for block in new_blocks { - let parent = blocks.get(&block.block().parent_hash).cloned(); - let block_state = BlockState::with_parent(block.clone(), parent.map(|p| (*p).clone())); - let hash = block_state.hash(); - let number = block_state.number(); - - // append new blocks - blocks.insert(hash, Arc::new(block_state)); - numbers.insert(number, hash); - } - - // remove the pending state - pending.take(); - } - - /// Update the in memory state with the given chain update. - pub fn update_chain(&self, new_chain: NewCanonicalChain) { - match new_chain { - NewCanonicalChain::Commit { new } => { - self.update_blocks(new, vec![]); - } - NewCanonicalChain::Reorg { new, old } => { - self.update_blocks(new, old); - } - } - } - - /// Removes blocks from the in memory state that are persisted to the given height. - /// - /// This will update the links between blocks and remove all blocks that are [.. - /// `persisted_height`]. - pub fn remove_persisted_blocks(&self, persisted_height: u64) { - let mut blocks = self.inner.in_memory_state.blocks.write(); - let mut numbers = self.inner.in_memory_state.numbers.write(); - let _pending = self.inner.in_memory_state.pending.write(); - - // clear all numbers - numbers.clear(); - - // drain all blocks and only keep the ones that are not persisted - let mut old_blocks = blocks - .drain() - .map(|(_, b)| b.block.clone()) - .filter(|b| b.block().number > persisted_height) - .collect::>(); - - // sort the blocks by number so we can insert them back in order - old_blocks.sort_unstable_by_key(|block| block.block().number); - - for block in old_blocks { - let parent = blocks.get(&block.block().parent_hash).cloned(); - let block_state = BlockState::with_parent(block.clone(), parent.map(|p| (*p).clone())); - let hash = block_state.hash(); - let number = block_state.number(); - - // append new blocks - blocks.insert(hash, Arc::new(block_state)); - numbers.insert(number, hash); - } - } - - /// Returns in memory state corresponding the given hash. - pub fn state_by_hash(&self, hash: B256) -> Option> { - self.inner.in_memory_state.state_by_hash(hash) - } - - /// Returns in memory state corresponding the block number. - pub fn state_by_number(&self, number: u64) -> Option> { - self.inner.in_memory_state.state_by_number(number) - } - - /// Returns the in memory head state. - pub fn head_state(&self) -> Option> { - self.inner.in_memory_state.head_state() - } - - /// Returns the in memory pending state. - pub fn pending_state(&self) -> Option> { - self.inner.in_memory_state.pending_state() - } - - /// Returns the in memory pending `BlockNumHash`. - pub fn pending_block_num_hash(&self) -> Option { - self.inner - .in_memory_state - .pending_state() - .map(|state| BlockNumHash { number: state.number(), hash: state.hash() }) - } - - /// Returns the current `ChainInfo`. - pub fn chain_info(&self) -> ChainInfo { - self.inner.chain_info_tracker.chain_info() - } - - /// Returns the latest canonical block number. - pub fn get_canonical_block_number(&self) -> u64 { - self.inner.chain_info_tracker.get_canonical_block_number() - } - - /// Returns the `BlockNumHash` of the safe head. - pub fn get_safe_num_hash(&self) -> Option { - self.inner.chain_info_tracker.get_safe_num_hash() - } - - /// Returns the `BlockNumHash` of the finalized head. - pub fn get_finalized_num_hash(&self) -> Option { - self.inner.chain_info_tracker.get_finalized_num_hash() - } - - /// Hook for new fork choice update. - pub fn on_forkchoice_update_received(&self) { - self.inner.chain_info_tracker.on_forkchoice_update_received(); - } - - /// Returns the timestamp of the last received update. - pub fn last_received_update_timestamp(&self) -> Option { - self.inner.chain_info_tracker.last_forkchoice_update_received_at() - } - - /// Hook for transition configuration exchanged. - pub fn on_transition_configuration_exchanged(&self) { - self.inner.chain_info_tracker.on_transition_configuration_exchanged(); - } - - /// Returns the timepstamp of the last transition configuration exchanged, - pub fn last_exchanged_transition_configuration_timestamp(&self) -> Option { - self.inner.chain_info_tracker.last_transition_configuration_exchanged_at() - } - - /// Canonical head setter. - pub fn set_canonical_head(&self, header: SealedHeader) { - self.inner.chain_info_tracker.set_canonical_head(header); - } - - /// Safe head setter. - pub fn set_safe(&self, header: SealedHeader) { - self.inner.chain_info_tracker.set_safe(header); - } - - /// Finalized head setter. - pub fn set_finalized(&self, header: SealedHeader) { - self.inner.chain_info_tracker.set_finalized(header); - } - - /// Canonical head getter. - pub fn get_canonical_head(&self) -> SealedHeader { - self.inner.chain_info_tracker.get_canonical_head() - } - - /// Finalized header getter. - pub fn get_finalized_header(&self) -> Option { - self.inner.chain_info_tracker.get_finalized_header() - } - - /// Safe header getter. - pub fn get_safe_header(&self) -> Option { - self.inner.chain_info_tracker.get_safe_header() - } - - /// Returns the `SealedHeader` corresponding to the pending state. - pub fn pending_sealed_header(&self) -> Option { - self.pending_state().map(|h| h.block().block().header.clone()) - } - - /// Returns the `Header` corresponding to the pending state. - pub fn pending_header(&self) -> Option
{ - self.pending_sealed_header().map(|sealed_header| sealed_header.unseal()) - } - - /// Returns the `SealedBlock` corresponding to the pending state. - pub fn pending_block(&self) -> Option { - self.pending_state().map(|block_state| block_state.block().block().clone()) - } - - /// Returns the `SealedBlockWithSenders` corresponding to the pending state. - pub fn pending_block_with_senders(&self) -> Option { - self.pending_state() - .and_then(|block_state| block_state.block().block().clone().seal_with_senders()) - } - - /// Returns a tuple with the `SealedBlock` corresponding to the pending - /// state and a vector of its `Receipt`s. - pub fn pending_block_and_receipts(&self) -> Option<(SealedBlock, Vec)> { - self.pending_state().map(|block_state| { - (block_state.block().block().clone(), block_state.executed_block_receipts()) - }) - } - - /// Subscribe to new blocks events. - pub fn subscribe_canon_state(&self) -> CanonStateNotifications { - self.inner.canon_state_notification_sender.subscribe() - } - - /// Attempts to send a new [`CanonStateNotification`] to all active Receiver handles. - pub fn notify_canon_state(&self, event: CanonStateNotification) { - self.inner.canon_state_notification_sender.send(event).ok(); - } - - /// Return state provider with reference to in-memory blocks that overlay database state. - /// - /// This merges the state of all blocks that are part of the chain that the requested block is - /// the head of. This includes all blocks that connect back to the canonical block on disk. - pub fn state_provider( - &self, - hash: B256, - historical: StateProviderBox, - ) -> MemoryOverlayStateProvider { - let in_memory = if let Some(state) = self.state_by_hash(hash) { - state.chain().into_iter().map(|block_state| block_state.block()).collect() - } else { - Vec::new() - }; - - MemoryOverlayStateProvider::new(in_memory, historical) - } -} - -/// State after applying the given block, this block is part of the canonical chain that partially -/// stored in memory and can be traced back to a canonical block on disk. -#[derive(Debug, PartialEq, Eq, Clone)] -pub struct BlockState { - /// The executed block that determines the state after this block has been executed. - block: ExecutedBlock, - /// The block's parent block if it exists. - parent: Option>, -} - -#[allow(dead_code)] -impl BlockState { - /// `BlockState` constructor. - pub const fn new(block: ExecutedBlock) -> Self { - Self { block, parent: None } - } - - /// `BlockState` constructor with parent. - pub fn with_parent(block: ExecutedBlock, parent: Option) -> Self { - Self { block, parent: parent.map(Box::new) } - } - - /// Returns the hash and block of the on disk block this state can be traced back to. - pub fn anchor(&self) -> BlockNumHash { - if let Some(parent) = &self.parent { - parent.anchor() - } else { - self.block.block().parent_num_hash() - } - } - - /// Returns the executed block that determines the state. - pub fn block(&self) -> ExecutedBlock { - self.block.clone() - } - - /// Returns the hash of executed block that determines the state. - pub fn hash(&self) -> B256 { - self.block.block().hash() - } - - /// Returns the block number of executed block that determines the state. - pub fn number(&self) -> u64 { - self.block.block().number - } - - /// Returns the state root after applying the executed block that determines - /// the state. - pub fn state_root(&self) -> B256 { - self.block.block().header.state_root - } - - /// Returns the `Receipts` of executed block that determines the state. - pub fn receipts(&self) -> &Receipts { - &self.block.execution_outcome().receipts - } - - /// Returns a vector of `Receipt` of executed block that determines the state. - /// We assume that the `Receipts` in the executed block `ExecutionOutcome` - /// has only one element corresponding to the executed block associated to - /// the state. - pub fn executed_block_receipts(&self) -> Vec { - let receipts = self.receipts(); - - debug_assert!( - receipts.receipt_vec.len() <= 1, - "Expected at most one block's worth of receipts, found {}", - receipts.receipt_vec.len() - ); - - receipts - .receipt_vec - .first() - .map(|block_receipts| { - block_receipts.iter().filter_map(|opt_receipt| opt_receipt.clone()).collect() - }) - .unwrap_or_default() - } - - /// Returns a vector of parent `BlockStates` starting from the oldest one. - pub fn parent_state_chain(&self) -> Vec<&Self> { - let mut parents = Vec::new(); - let mut current = self.parent.as_deref(); - - while let Some(parent) = current { - parents.insert(0, parent); - current = parent.parent.as_deref(); - } - - parents - } - - /// Returns a vector of `BlockStates` representing the entire in memory chain, - /// including self as the last element. - pub fn chain(&self) -> Vec<&Self> { - let mut chain = self.parent_state_chain(); - chain.push(self); - chain - } -} - -/// Represents an executed block stored in-memory. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct ExecutedBlock { - /// Sealed block the rest of fields refer to. - pub block: Arc, - /// Block's senders. - pub senders: Arc>, - /// Block's execution outcome. - pub execution_output: Arc, - /// Block's hashedst state. - pub hashed_state: Arc, - /// Trie updates that result of applying the block. - pub trie: Arc, -} - -impl ExecutedBlock { - /// `ExecutedBlock` constructor. - pub const fn new( - block: Arc, - senders: Arc>, - execution_output: Arc, - hashed_state: Arc, - trie: Arc, - ) -> Self { - Self { block, senders, execution_output, hashed_state, trie } - } - - /// Returns a reference to the executed block. - pub fn block(&self) -> &SealedBlock { - &self.block - } - - /// Returns a reference to the block's senders - pub fn senders(&self) -> &Vec
{ - &self.senders - } - - /// Returns a [`SealedBlockWithSenders`] - /// - /// Note: this clones the block and senders. - pub fn sealed_block_with_senders(&self) -> SealedBlockWithSenders { - SealedBlockWithSenders { block: (*self.block).clone(), senders: (*self.senders).clone() } - } - - /// Returns a reference to the block's execution outcome - pub fn execution_outcome(&self) -> &ExecutionOutcome { - &self.execution_output - } - - /// Returns a reference to the hashed state result of the execution outcome - pub fn hashed_state(&self) -> &HashedPostState { - &self.hashed_state - } - - /// Returns a reference to the trie updates for the block - pub fn trie_updates(&self) -> &TrieUpdates { - &self.trie - } -} - -/// Non-empty chain of blocks. -#[derive(Debug)] -pub enum NewCanonicalChain { - /// A simple append to the current canonical head - Commit { - /// all blocks that lead back to the canonical head - new: Vec, - }, - /// A reorged chain consists of two chains that trace back to a shared ancestor block at which - /// point they diverge. - Reorg { - /// All blocks of the _new_ chain - new: Vec, - /// All blocks of the _old_ chain - old: Vec, - }, -} - -impl NewCanonicalChain { - /// Returns the length of the new chain. - pub fn new_block_count(&self) -> usize { - match self { - Self::Commit { new } | Self::Reorg { new, .. } => new.len(), - } - } - - /// Returns the length of the reorged chain. - pub fn reorged_block_count(&self) -> usize { - match self { - Self::Commit { .. } => 0, - Self::Reorg { old, .. } => old.len(), - } - } - - /// Converts the new chain into a notification that will be emitted to listeners - pub fn to_chain_notification(&self) -> CanonStateNotification { - // TODO: do we need to merge execution outcome for multiblock commit or reorg? - // implement this properly - match self { - Self::Commit { new } => CanonStateNotification::Commit { - new: Arc::new(Chain::new( - new.iter().map(ExecutedBlock::sealed_block_with_senders), - new.last().unwrap().execution_output.deref().clone(), - None, - )), - }, - Self::Reorg { new, old } => CanonStateNotification::Reorg { - new: Arc::new(Chain::new( - new.iter().map(ExecutedBlock::sealed_block_with_senders), - new.last().unwrap().execution_output.deref().clone(), - None, - )), - old: Arc::new(Chain::new( - old.iter().map(ExecutedBlock::sealed_block_with_senders), - old.last().unwrap().execution_output.deref().clone(), - None, - )), - }, - } - } - - /// Returns the new tip of the chain. - /// - /// Returns the new tip for [`Self::Reorg`] and [`Self::Commit`] variants which commit at least - /// 1 new block. - pub fn tip(&self) -> &SealedBlock { - match self { - Self::Commit { new } | Self::Reorg { new, .. } => { - new.last().expect("non empty blocks").block() - } - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::test_utils::{get_executed_block_with_number, get_executed_block_with_receipts}; - use rand::Rng; - use reth_errors::ProviderResult; - use reth_primitives::{Account, BlockNumber, Bytecode, Receipt, StorageKey, StorageValue}; - use reth_storage_api::{ - AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateRootProvider, - }; - use reth_trie::AccountProof; - - fn create_mock_state(block_number: u64, parent_hash: B256) -> BlockState { - BlockState::new(get_executed_block_with_number(block_number, parent_hash)) - } - - fn create_mock_state_chain(num_blocks: u64) -> Vec { - let mut chain = Vec::with_capacity(num_blocks as usize); - let mut parent_hash = B256::random(); - let mut parent_state: Option = None; - - for i in 1..=num_blocks { - let mut state = create_mock_state(i, parent_hash); - if let Some(parent) = parent_state { - state.parent = Some(Box::new(parent)); - } - parent_hash = state.hash(); - parent_state = Some(state.clone()); - chain.push(state); - } - - chain - } - - struct MockStateProvider; - - impl StateProvider for MockStateProvider { - fn storage( - &self, - _address: Address, - _storage_key: StorageKey, - ) -> ProviderResult> { - Ok(None) - } - - fn bytecode_by_hash(&self, _code_hash: B256) -> ProviderResult> { - Ok(None) - } - } - - impl BlockHashReader for MockStateProvider { - fn block_hash(&self, _number: BlockNumber) -> ProviderResult> { - Ok(None) - } - - fn canonical_hashes_range( - &self, - _start: BlockNumber, - _end: BlockNumber, - ) -> ProviderResult> { - Ok(vec![]) - } - } - - impl AccountReader for MockStateProvider { - fn basic_account(&self, _address: Address) -> ProviderResult> { - Ok(None) - } - } - - impl StateRootProvider for MockStateProvider { - fn hashed_state_root(&self, _hashed_state: &HashedPostState) -> ProviderResult { - Ok(B256::random()) - } - - fn hashed_state_root_with_updates( - &self, - _hashed_state: &HashedPostState, - ) -> ProviderResult<(B256, TrieUpdates)> { - Ok((B256::random(), TrieUpdates::default())) - } - - fn state_root(&self, _bundle_state: &revm::db::BundleState) -> ProviderResult { - Ok(B256::random()) - } - } - - impl StateProofProvider for MockStateProvider { - fn hashed_proof( - &self, - _hashed_state: &HashedPostState, - _address: Address, - _slots: &[B256], - ) -> ProviderResult { - Ok(AccountProof::new(Address::random())) - } - } - - #[test] - fn test_in_memory_state_impl_state_by_hash() { - let mut state_by_hash = HashMap::new(); - let number = rand::thread_rng().gen::(); - let state = Arc::new(create_mock_state(number, B256::random())); - state_by_hash.insert(state.hash(), state.clone()); - - let in_memory_state = InMemoryState::new(state_by_hash, HashMap::new(), None); - - assert_eq!(in_memory_state.state_by_hash(state.hash()), Some(state)); - assert_eq!(in_memory_state.state_by_hash(B256::random()), None); - } - - #[test] - fn test_in_memory_state_impl_state_by_number() { - let mut state_by_hash = HashMap::new(); - let mut hash_by_number = HashMap::new(); - - let number = rand::thread_rng().gen::(); - let state = Arc::new(create_mock_state(number, B256::random())); - let hash = state.hash(); - - state_by_hash.insert(hash, state.clone()); - hash_by_number.insert(number, hash); - - let in_memory_state = InMemoryState::new(state_by_hash, hash_by_number, None); - - assert_eq!(in_memory_state.state_by_number(number), Some(state)); - assert_eq!(in_memory_state.state_by_number(number + 1), None); - } - - #[test] - fn test_in_memory_state_impl_head_state() { - let mut state_by_hash = HashMap::new(); - let mut hash_by_number = HashMap::new(); - let state1 = Arc::new(create_mock_state(1, B256::random())); - let hash1 = state1.hash(); - let state2 = Arc::new(create_mock_state(2, hash1)); - let hash2 = state2.hash(); - hash_by_number.insert(1, hash1); - hash_by_number.insert(2, hash2); - state_by_hash.insert(hash1, state1); - state_by_hash.insert(hash2, state2); - - let in_memory_state = InMemoryState::new(state_by_hash, hash_by_number, None); - let head_state = in_memory_state.head_state().unwrap(); - - assert_eq!(head_state.hash(), hash2); - assert_eq!(head_state.number(), 2); - } - - #[test] - fn test_in_memory_state_impl_pending_state() { - let pending_number = rand::thread_rng().gen::(); - let pending_state = create_mock_state(pending_number, B256::random()); - let pending_hash = pending_state.hash(); - - let in_memory_state = - InMemoryState::new(HashMap::new(), HashMap::new(), Some(pending_state)); - - let result = in_memory_state.pending_state(); - assert!(result.is_some()); - let actual_pending_state = result.unwrap(); - assert_eq!(actual_pending_state.block.block().hash(), pending_hash); - assert_eq!(actual_pending_state.block.block().number, pending_number); - } - - #[test] - fn test_in_memory_state_impl_no_pending_state() { - let in_memory_state = InMemoryState::new(HashMap::new(), HashMap::new(), None); - - assert_eq!(in_memory_state.pending_state(), None); - } - - #[test] - fn test_state_new() { - let number = rand::thread_rng().gen::(); - let block = get_executed_block_with_number(number, B256::random()); - - let state = BlockState::new(block.clone()); - - assert_eq!(state.block(), block); - } - - #[test] - fn test_state_block() { - let number = rand::thread_rng().gen::(); - let block = get_executed_block_with_number(number, B256::random()); - - let state = BlockState::new(block.clone()); - - assert_eq!(state.block(), block); - } - - #[test] - fn test_state_hash() { - let number = rand::thread_rng().gen::(); - let block = get_executed_block_with_number(number, B256::random()); - - let state = BlockState::new(block.clone()); - - assert_eq!(state.hash(), block.block.hash()); - } - - #[test] - fn test_state_number() { - let number = rand::thread_rng().gen::(); - let block = get_executed_block_with_number(number, B256::random()); - - let state = BlockState::new(block); - - assert_eq!(state.number(), number); - } - - #[test] - fn test_state_state_root() { - let number = rand::thread_rng().gen::(); - let block = get_executed_block_with_number(number, B256::random()); - - let state = BlockState::new(block.clone()); - - assert_eq!(state.state_root(), block.block().state_root); - } - - #[test] - fn test_state_receipts() { - let receipts = Receipts { receipt_vec: vec![vec![Some(Receipt::default())]] }; - - let block = get_executed_block_with_receipts(receipts.clone(), B256::random()); - - let state = BlockState::new(block); - - assert_eq!(state.receipts(), &receipts); - } - - #[test] - fn test_in_memory_state_chain_update() { - let state = CanonicalInMemoryState::new(HashMap::new(), HashMap::new(), None); - let block1 = get_executed_block_with_number(0, B256::random()); - let block2 = get_executed_block_with_number(0, B256::random()); - let chain = NewCanonicalChain::Commit { new: vec![block1.clone()] }; - state.update_chain(chain); - assert_eq!(state.head_state().unwrap().block().block().hash(), block1.block().hash()); - assert_eq!(state.state_by_number(0).unwrap().block().block().hash(), block1.block().hash()); - - let chain = NewCanonicalChain::Reorg { new: vec![block2.clone()], old: vec![block1] }; - state.update_chain(chain); - assert_eq!(state.head_state().unwrap().block().block().hash(), block2.block().hash()); - assert_eq!(state.state_by_number(0).unwrap().block().block().hash(), block2.block().hash()); - - assert_eq!(state.inner.in_memory_state.block_count(), 1); - } - - #[test] - fn test_canonical_in_memory_state_state_provider() { - let block1 = get_executed_block_with_number(1, B256::random()); - let block2 = get_executed_block_with_number(2, block1.block().hash()); - let block3 = get_executed_block_with_number(3, block2.block().hash()); - - let state1 = BlockState::new(block1.clone()); - let state2 = BlockState::with_parent(block2.clone(), Some(state1.clone())); - let state3 = BlockState::with_parent(block3.clone(), Some(state2.clone())); - - let mut blocks = HashMap::new(); - blocks.insert(block1.block().hash(), Arc::new(state1)); - blocks.insert(block2.block().hash(), Arc::new(state2)); - blocks.insert(block3.block().hash(), Arc::new(state3)); - - let mut numbers = HashMap::new(); - numbers.insert(1, block1.block().hash()); - numbers.insert(2, block2.block().hash()); - numbers.insert(3, block3.block().hash()); - - let canonical_state = CanonicalInMemoryState::new(blocks, numbers, None); - - let historical: StateProviderBox = Box::new(MockStateProvider); - - let overlay_provider = canonical_state.state_provider(block3.block().hash(), historical); - - assert_eq!(overlay_provider.in_memory.len(), 3); - assert_eq!(overlay_provider.in_memory[0].block().number, 1); - assert_eq!(overlay_provider.in_memory[1].block().number, 2); - assert_eq!(overlay_provider.in_memory[2].block().number, 3); - - assert_eq!( - overlay_provider.in_memory[1].block().parent_hash, - overlay_provider.in_memory[0].block().hash() - ); - assert_eq!( - overlay_provider.in_memory[2].block().parent_hash, - overlay_provider.in_memory[1].block().hash() - ); - - let unknown_hash = B256::random(); - let empty_overlay_provider = - canonical_state.state_provider(unknown_hash, Box::new(MockStateProvider)); - assert_eq!(empty_overlay_provider.in_memory.len(), 0); - } - - #[test] - fn test_block_state_parent_blocks() { - let chain = create_mock_state_chain(4); - - let parents = chain[3].parent_state_chain(); - assert_eq!(parents.len(), 3); - assert_eq!(parents[0].block().block.number, 1); - assert_eq!(parents[1].block().block.number, 2); - assert_eq!(parents[2].block().block.number, 3); - - let parents = chain[2].parent_state_chain(); - assert_eq!(parents.len(), 2); - assert_eq!(parents[0].block().block.number, 1); - assert_eq!(parents[1].block().block.number, 2); - - let parents = chain[0].parent_state_chain(); - assert_eq!(parents.len(), 0); - } - - #[test] - fn test_block_state_single_block_state_chain() { - let single_block_number = 1; - let single_block = create_mock_state(single_block_number, B256::random()); - let single_block_hash = single_block.block().block.hash(); - - let parents = single_block.parent_state_chain(); - assert_eq!(parents.len(), 0); - - let block_state_chain = single_block.chain(); - assert_eq!(block_state_chain.len(), 1); - assert_eq!(block_state_chain[0].block().block.number, single_block_number); - assert_eq!(block_state_chain[0].block().block.hash(), single_block_hash); - } - - #[test] - fn test_block_state_chain() { - let chain = create_mock_state_chain(3); - - let block_state_chain = chain[2].chain(); - assert_eq!(block_state_chain.len(), 3); - assert_eq!(block_state_chain[0].block().block.number, 1); - assert_eq!(block_state_chain[1].block().block.number, 2); - assert_eq!(block_state_chain[2].block().block.number, 3); - - let block_state_chain = chain[1].chain(); - assert_eq!(block_state_chain.len(), 2); - assert_eq!(block_state_chain[0].block().block.number, 1); - assert_eq!(block_state_chain[1].block().block.number, 2); - - let block_state_chain = chain[0].chain(); - assert_eq!(block_state_chain.len(), 1); - assert_eq!(block_state_chain[0].block().block.number, 1); - } -} diff --git a/crates/chain-state/src/lib.rs b/crates/chain-state/src/lib.rs deleted file mode 100644 index 50a103111071..000000000000 --- a/crates/chain-state/src/lib.rs +++ /dev/null @@ -1,29 +0,0 @@ -//! Reth state related types and functionality. - -#![doc( - html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", - html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", - issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" -)] -#![cfg_attr(not(test), warn(unused_crate_dependencies))] -#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] - -mod in_memory; -pub use in_memory::*; - -mod chain_info; -pub use chain_info::ChainInfoTracker; - -mod notifications; -pub use notifications::{ - CanonStateNotification, CanonStateNotificationSender, CanonStateNotificationStream, - CanonStateNotifications, CanonStateSubscriptions, ForkChoiceNotifications, ForkChoiceStream, - ForkChoiceSubscriptions, -}; - -mod memory_overlay; -pub use memory_overlay::MemoryOverlayStateProvider; - -#[cfg(any(test, feature = "test-utils"))] -/// Common test helpers -pub mod test_utils; diff --git a/crates/chain-state/src/test_utils.rs b/crates/chain-state/src/test_utils.rs deleted file mode 100644 index 4cb2d270ab07..000000000000 --- a/crates/chain-state/src/test_utils.rs +++ /dev/null @@ -1,102 +0,0 @@ -use crate::{ - in_memory::ExecutedBlock, CanonStateNotification, CanonStateNotifications, - CanonStateSubscriptions, -}; -use rand::Rng; -use reth_execution_types::{Chain, ExecutionOutcome}; -use reth_primitives::{ - Address, Block, BlockNumber, Receipts, Requests, SealedBlockWithSenders, TransactionSigned, - B256, -}; -use reth_trie::{updates::TrieUpdates, HashedPostState}; -use revm::db::BundleState; -use std::{ - ops::Range, - sync::{Arc, Mutex}, -}; -use tokio::sync::broadcast::{self, Sender}; - -fn get_executed_block( - block_number: BlockNumber, - receipts: Receipts, - parent_hash: B256, -) -> ExecutedBlock { - let mut block = Block::default(); - let mut header = block.header.clone(); - header.number = block_number; - header.parent_hash = parent_hash; - header.ommers_hash = B256::random(); - block.header = header; - let tx = TransactionSigned::default(); - block.body.push(tx); - let sealed = block.seal_slow(); - let sender = Address::random(); - let sealed_with_senders = SealedBlockWithSenders::new(sealed.clone(), vec![sender]).unwrap(); - ExecutedBlock::new( - Arc::new(sealed), - Arc::new(sealed_with_senders.senders), - Arc::new(ExecutionOutcome::new( - BundleState::default(), - receipts, - block_number, - vec![Requests::default()], - )), - Arc::new(HashedPostState::default()), - Arc::new(TrieUpdates::default()), - ) -} - -/// Generates an `ExecutedBlock` that includes the given `Receipts`. -pub fn get_executed_block_with_receipts(receipts: Receipts, parent_hash: B256) -> ExecutedBlock { - let number = rand::thread_rng().gen::(); - get_executed_block(number, receipts, parent_hash) -} - -/// Generates an `ExecutedBlock` with the given `BlockNumber`. -pub fn get_executed_block_with_number( - block_number: BlockNumber, - parent_hash: B256, -) -> ExecutedBlock { - get_executed_block(block_number, Receipts { receipt_vec: vec![vec![]] }, parent_hash) -} - -/// Generates a range of executed blocks with ascending block numbers. -pub fn get_executed_blocks(range: Range) -> impl Iterator { - let mut parent_hash = B256::default(); - range.map(move |number| { - let block = get_executed_block_with_number(number, parent_hash); - parent_hash = block.block.hash(); - block - }) -} - -/// A test `ChainEventSubscriptions` -#[derive(Clone, Debug, Default)] -pub struct TestCanonStateSubscriptions { - canon_notif_tx: Arc>>>, -} - -impl TestCanonStateSubscriptions { - /// Adds new block commit to the queue that can be consumed with - /// [`TestCanonStateSubscriptions::subscribe_to_canonical_state`] - pub fn add_next_commit(&self, new: Arc) { - let event = CanonStateNotification::Commit { new }; - self.canon_notif_tx.lock().as_mut().unwrap().retain(|tx| tx.send(event.clone()).is_ok()) - } - - /// Adds reorg to the queue that can be consumed with - /// [`TestCanonStateSubscriptions::subscribe_to_canonical_state`] - pub fn add_next_reorg(&self, old: Arc, new: Arc) { - let event = CanonStateNotification::Reorg { old, new }; - self.canon_notif_tx.lock().as_mut().unwrap().retain(|tx| tx.send(event.clone()).is_ok()) - } -} - -impl CanonStateSubscriptions for TestCanonStateSubscriptions { - fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { - let (canon_notif_tx, canon_notif_rx) = broadcast::channel(100); - self.canon_notif_tx.lock().as_mut().unwrap().push(canon_notif_tx); - - canon_notif_rx - } -} diff --git a/crates/cli/commands/Cargo.toml b/crates/cli/commands/Cargo.toml index 1d9983ad8d45..35ed25eda66d 100644 --- a/crates/cli/commands/Cargo.toml +++ b/crates/cli/commands/Cargo.toml @@ -28,7 +28,6 @@ reth-network-p2p.workspace = true reth-node-builder.workspace = true reth-node-core.workspace = true reth-node-events.workspace = true -reth-node-metrics.workspace = true reth-primitives.workspace = true reth-provider.workspace = true reth-prune.workspace = true @@ -36,7 +35,6 @@ reth-stages.workspace = true reth-static-file-types.workspace = true reth-static-file.workspace = true reth-trie = { workspace = true, features = ["metrics"] } -reth-trie-db = { workspace = true, features = ["metrics"] } itertools.workspace = true futures.workspace = true @@ -64,6 +62,9 @@ ratatui = { version = "0.27", default-features = false, features = [ "crossterm", ] } +# metrics +metrics-process.workspace = true + # reth test-vectors proptest = { workspace = true, optional = true } arbitrary = { workspace = true, optional = true } diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index a303b8934daa..b382f7312cac 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -114,6 +114,8 @@ impl EnvironmentArgs { let factory = ProviderFactory::new(db, self.chain.clone(), static_file_provider) .with_prune_modes(prune_modes.clone()); + info!(target: "reth::cli", "Verifying storage consistency."); + // Check for consistency between database and static files. if let Some(unwind_target) = factory .static_file_provider() diff --git a/crates/cli/commands/src/node.rs b/crates/cli/commands/src/node.rs index 7a8c47d1f696..233a7d5b3a4a 100644 --- a/crates/cli/commands/src/node.rs +++ b/crates/cli/commands/src/node.rs @@ -15,7 +15,6 @@ use reth_node_core::{ node_config::NodeConfig, version, }; -use reth_node_metrics::recorder::install_prometheus_recorder; use std::{ffi::OsString, fmt, future::Future, net::SocketAddr, path::PathBuf, sync::Arc}; /// Start the node @@ -174,7 +173,7 @@ impl NodeCommand { // Register the prometheus recorder before creating the database, // because database init needs it to register metrics. - let _ = install_prometheus_recorder(); + let _ = node_config.install_prometheus_recorder()?; let data_dir = node_config.datadir(); let db_path = data_dir.db(); diff --git a/crates/cli/commands/src/recover/storage_tries.rs b/crates/cli/commands/src/recover/storage_tries.rs index 5c1ae7bfca57..2b4087144805 100644 --- a/crates/cli/commands/src/recover/storage_tries.rs +++ b/crates/cli/commands/src/recover/storage_tries.rs @@ -8,7 +8,6 @@ use reth_db_api::{ }; use reth_provider::{BlockNumReader, HeaderProvider, ProviderError}; use reth_trie::StateRoot; -use reth_trie_db::DatabaseStateRoot; use tracing::*; /// `reth recover storage-tries` command diff --git a/crates/cli/commands/src/stage/run.rs b/crates/cli/commands/src/stage/run.rs index 5a02ec417ae9..2a2dd6f8a25e 100644 --- a/crates/cli/commands/src/stage/run.rs +++ b/crates/cli/commands/src/stage/run.rs @@ -13,15 +13,7 @@ use reth_evm::execute::BlockExecutorProvider; use reth_exex::ExExManagerHandle; use reth_node_core::{ args::{NetworkArgs, StageEnum}, - version::{ - BUILD_PROFILE_NAME, CARGO_PKG_VERSION, VERGEN_BUILD_TIMESTAMP, VERGEN_CARGO_FEATURES, - VERGEN_CARGO_TARGET_TRIPLE, VERGEN_GIT_SHA, - }, -}; -use reth_node_metrics::{ - hooks::Hooks, - server::{MetricServer, MetricServerConfig}, - version::VersionInfo, + prometheus_exporter, }; use reth_provider::{ ChainSpecProvider, StageCheckpointReader, StageCheckpointWriter, StaticFileProviderFactory, @@ -107,24 +99,15 @@ impl Command { if let Some(listen_addr) = self.metrics { info!(target: "reth::cli", "Starting metrics endpoint at {}", listen_addr); - let config = MetricServerConfig::new( + prometheus_exporter::serve( listen_addr, - VersionInfo { - version: CARGO_PKG_VERSION, - build_timestamp: VERGEN_BUILD_TIMESTAMP, - cargo_features: VERGEN_CARGO_FEATURES, - git_sha: VERGEN_GIT_SHA, - target_triple: VERGEN_CARGO_TARGET_TRIPLE, - build_profile: BUILD_PROFILE_NAME, - }, + prometheus_exporter::install_recorder()?, + provider_factory.db_ref().clone(), + provider_factory.static_file_provider(), + metrics_process::Collector::default(), ctx.task_executor, - Hooks::new( - provider_factory.db_ref().clone(), - provider_factory.static_file_provider(), - ), - ); - - MetricServer::new(config).serve().await?; + ) + .await?; } let batch_size = self.batch_size.unwrap_or(self.to.saturating_sub(self.from) + 1); diff --git a/crates/cli/util/Cargo.toml b/crates/cli/util/Cargo.toml index ba090935599b..bb8b511e6bb0 100644 --- a/crates/cli/util/Cargo.toml +++ b/crates/cli/util/Cargo.toml @@ -25,5 +25,8 @@ rand.workspace = true thiserror.workspace = true eyre.workspace = true +[dev-dependencies] +proptest.workspace = true + [target.'cfg(unix)'.dependencies] libc = "0.2" diff --git a/crates/cli/util/src/parsers.rs b/crates/cli/util/src/parsers.rs index 173af7d6d1f7..c708a9a29e07 100644 --- a/crates/cli/util/src/parsers.rs +++ b/crates/cli/util/src/parsers.rs @@ -68,7 +68,7 @@ pub fn parse_socket_address(value: &str) -> eyre::Result::Transaction: IntoRecoveredTransaction, - Engine: EngineTypes, + Engine: EngineTypes + 'static, Executor: BlockExecutorProvider, { type Output = (); diff --git a/crates/consensus/beacon/src/engine/event.rs b/crates/consensus/beacon/src/engine/event.rs index 4561f2e5ef74..4b092bd2feb2 100644 --- a/crates/consensus/beacon/src/engine/event.rs +++ b/crates/consensus/beacon/src/engine/event.rs @@ -18,17 +18,6 @@ pub enum BeaconConsensusEngineEvent { ForkBlockAdded(Arc), } -impl BeaconConsensusEngineEvent { - /// Returns the canonical header if the event is a - /// [`BeaconConsensusEngineEvent::CanonicalChainCommitted`]. - pub const fn canonical_header(&self) -> Option<&SealedHeader> { - match self { - Self::CanonicalChainCommitted(header, _) => Some(header), - _ => None, - } - } -} - /// Progress of the consensus engine during live sync. #[derive(Clone, Debug)] pub enum ConsensusEngineLiveSyncProgress { diff --git a/crates/consensus/beacon/src/engine/handle.rs b/crates/consensus/beacon/src/engine/handle.rs index aee554f8241a..0cffc67b3ff1 100644 --- a/crates/consensus/beacon/src/engine/handle.rs +++ b/crates/consensus/beacon/src/engine/handle.rs @@ -87,9 +87,6 @@ where /// Sends a transition configuration exchange message to the beacon consensus engine. /// /// See also - /// - /// This only notifies about the exchange. The actual exchange is done by the engine API impl - /// itself. pub fn transition_configuration_exchanged(&self) { let _ = self.to_engine.send(BeaconEngineMessage::TransitionConfigurationExchanged); } diff --git a/crates/consensus/beacon/src/engine/invalid_headers.rs b/crates/consensus/beacon/src/engine/invalid_headers.rs index b93138901d03..fbe6bf462bb3 100644 --- a/crates/consensus/beacon/src/engine/invalid_headers.rs +++ b/crates/consensus/beacon/src/engine/invalid_headers.rs @@ -67,7 +67,7 @@ impl InvalidHeaderCache { } /// Inserts an invalid ancestor into the map. - pub fn insert(&mut self, invalid_ancestor: SealedHeader) { + pub(crate) fn insert(&mut self, invalid_ancestor: SealedHeader) { if self.get(&invalid_ancestor.hash()).is_none() { let hash = invalid_ancestor.hash(); let header = invalid_ancestor.unseal(); diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 0e9f91ac6585..9673f6205db2 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -235,7 +235,7 @@ where + ChainSpecProvider + 'static, Client: HeadersClient + BodiesClient + Clone + Unpin + 'static, - EngineT: EngineTypes + Unpin, + EngineT: EngineTypes + Unpin + 'static, { /// Create a new instance of the [`BeaconConsensusEngine`]. #[allow(clippy::too_many_arguments)] @@ -1801,7 +1801,7 @@ where + ChainSpecProvider + Unpin + 'static, - EngineT: EngineTypes + Unpin, + EngineT: EngineTypes + Unpin + 'static, { type Output = Result<(), BeaconConsensusEngineError>; diff --git a/crates/e2e-test-utils/src/engine_api.rs b/crates/e2e-test-utils/src/engine_api.rs index d66bd6135a62..66e8900323be 100644 --- a/crates/e2e-test-utils/src/engine_api.rs +++ b/crates/e2e-test-utils/src/engine_api.rs @@ -23,7 +23,7 @@ pub struct EngineApiTestContext { pub _marker: PhantomData, } -impl EngineApiTestContext { +impl EngineApiTestContext { /// Retrieves a v3 payload from the engine api pub async fn get_payload_v3( &self, diff --git a/crates/e2e-test-utils/src/payload.rs b/crates/e2e-test-utils/src/payload.rs index c29eccef923d..828bc5f32c4f 100644 --- a/crates/e2e-test-utils/src/payload.rs +++ b/crates/e2e-test-utils/src/payload.rs @@ -4,13 +4,13 @@ use reth_payload_builder::{Events, PayloadBuilderHandle, PayloadId}; use tokio_stream::wrappers::BroadcastStream; /// Helper for payload operations -pub struct PayloadTestContext { +pub struct PayloadTestContext { pub payload_event_stream: BroadcastStream>, payload_builder: PayloadBuilderHandle, pub timestamp: u64, } -impl PayloadTestContext { +impl PayloadTestContext { /// Creates a new payload helper pub async fn new(payload_builder: PayloadBuilderHandle) -> eyre::Result { let payload_events = payload_builder.subscribe().await?; diff --git a/crates/e2e-test-utils/src/rpc.rs b/crates/e2e-test-utils/src/rpc.rs index 77f4b27e21a9..8e499bcca60c 100644 --- a/crates/e2e-test-utils/src/rpc.rs +++ b/crates/e2e-test-utils/src/rpc.rs @@ -2,9 +2,12 @@ use alloy_consensus::TxEnvelope; use alloy_network::eip2718::Decodable2718; use reth::{ builder::{rpc::RpcRegistry, FullNodeComponents}, - rpc::api::{ - eth::helpers::{EthApiSpec, EthTransactions, TraceExt}, - DebugApiServer, + rpc::{ + api::{ + eth::helpers::{EthApiSpec, EthTransactions, TraceExt}, + DebugApiServer, + }, + server_types::eth::EthResult, }, }; use reth_primitives::{Bytes, B256}; @@ -18,7 +21,7 @@ where EthApi: EthApiSpec + EthTransactions + TraceExt, { /// Injects a raw transaction into the node tx pool via RPC server - pub async fn inject_tx(&mut self, raw_tx: Bytes) -> Result { + pub async fn inject_tx(&mut self, raw_tx: Bytes) -> EthResult { let eth_api = self.inner.eth_api(); eth_api.send_raw_transaction(raw_tx).await } diff --git a/crates/engine/primitives/src/lib.rs b/crates/engine/primitives/src/lib.rs index 4b0db7c0a14c..b83abc39e6cc 100644 --- a/crates/engine/primitives/src/lib.rs +++ b/crates/engine/primitives/src/lib.rs @@ -27,7 +27,6 @@ pub trait EngineTypes: + TryInto, > + DeserializeOwned + Serialize - + 'static { /// Execution Payload V1 type. type ExecutionPayloadV1: DeserializeOwned + Serialize + Clone + Unpin + Send + Sync + 'static; diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index ad1c0fc18934..475aa1c45a1f 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -16,7 +16,6 @@ reth-beacon-consensus.workspace = true reth-blockchain-tree.workspace = true reth-blockchain-tree-api.workspace = true reth-chainspec.workspace = true -reth-chain-state.workspace = true reth-consensus.workspace = true reth-db.workspace = true reth-db-api.workspace = true @@ -34,8 +33,8 @@ reth-prune.workspace = true reth-prune-types.workspace = true reth-revm.workspace = true reth-rpc-types.workspace = true -reth-stages-api.workspace = true reth-stages-types.workspace = true +reth-stages-api.workspace = true reth-static-file.workspace = true reth-tasks.workspace = true reth-tokio-util.workspace = true @@ -72,11 +71,7 @@ reth-network-p2p = { workspace = true, features = ["test-utils"] } reth-prune.workspace = true reth-prune-types.workspace = true reth-stages = { workspace = true, features = ["test-utils"] } -reth-chain-state = { workspace = true, features = ["test-utils"] } reth-tracing.workspace = true -reth-rpc-types-compat.workspace = true - -alloy-rlp.workspace = true assert_matches.workspace = true rand.workspace = true @@ -86,7 +81,6 @@ test-utils = [ "reth-db/test-utils", "reth-network-p2p/test-utils", "reth-stages/test-utils", - "reth-chain-state/test-utils", "reth-tracing", "rand" ] diff --git a/crates/engine/tree/src/backfill.rs b/crates/engine/tree/src/backfill.rs index aa075b6ad099..f575bff81234 100644 --- a/crates/engine/tree/src/backfill.rs +++ b/crates/engine/tree/src/backfill.rs @@ -15,37 +15,6 @@ use std::task::{ready, Context, Poll}; use tokio::sync::oneshot; use tracing::trace; -/// Represents the state of the backfill synchronization process. -#[derive(Debug, PartialEq, Eq, Default)] -pub enum BackfillSyncState { - /// The node is not performing any backfill synchronization. - /// This is the initial or default state. - #[default] - Idle, - /// A backfill synchronization has been requested or planned, but processing has not started - /// yet. - Pending, - /// The node is actively engaged in backfill synchronization. - Active, -} - -impl BackfillSyncState { - /// Returns true if the state is idle. - pub const fn is_idle(&self) -> bool { - matches!(self, Self::Idle) - } - - /// Returns true if the state is pending. - pub const fn is_pending(&self) -> bool { - matches!(self, Self::Pending) - } - - /// Returns true if the state is active. - pub const fn is_active(&self) -> bool { - matches!(self, Self::Active) - } -} - /// Backfill sync mode functionality. pub trait BackfillSync: Send + Sync { /// Performs a backfill action. @@ -65,6 +34,8 @@ pub enum BackfillAction { /// The events that can be emitted on backfill sync. #[derive(Debug)] pub enum BackfillEvent { + /// Backfill sync idle. + Idle, /// Backfill sync started. Started(PipelineTarget), /// Backfill sync finished. @@ -170,10 +141,7 @@ where } }; let ev = match res { - Ok((pipeline, result)) => { - self.pipeline_state = PipelineState::Idle(Some(pipeline)); - BackfillEvent::Finished(result) - } + Ok((_, result)) => BackfillEvent::Finished(result), Err(why) => { // failed to receive the pipeline BackfillEvent::TaskDropped(why.to_string()) @@ -200,7 +168,7 @@ where } // make sure we poll the pipeline if it's active, and return any ready pipeline events - if self.is_pipeline_active() { + if !self.is_pipeline_idle() { // advance the pipeline if let Poll::Ready(event) = self.poll_pipeline(cx) { return Poll::Ready(event) diff --git a/crates/engine/tree/src/chain.rs b/crates/engine/tree/src/chain.rs index e77139d3e3ec..4ac015d23a5a 100644 --- a/crates/engine/tree/src/chain.rs +++ b/crates/engine/tree/src/chain.rs @@ -1,11 +1,10 @@ use crate::backfill::{BackfillAction, BackfillEvent, BackfillSync}; use futures::Stream; -use reth_stages_api::{ControlFlow, PipelineTarget}; +use reth_stages_api::PipelineTarget; use std::{ pin::Pin, task::{Context, Poll}, }; -use tracing::*; /// The type that drives the chain forward. /// @@ -82,6 +81,7 @@ where // try to poll the backfill sync to completion, if active match this.backfill_sync.poll(cx) { Poll::Ready(backfill_sync_event) => match backfill_sync_event { + BackfillEvent::Idle => {} BackfillEvent::Started(_) => { // notify handler that backfill sync started this.handler.on_event(FromOrchestrator::BackfillSyncStarted); @@ -89,10 +89,10 @@ where } BackfillEvent::Finished(res) => { return match res { - Ok(ctrl) => { - tracing::debug!(?ctrl, "backfill sync finished"); + Ok(event) => { + tracing::debug!(?event, "backfill sync finished"); // notify handler that backfill sync finished - this.handler.on_event(FromOrchestrator::BackfillSyncFinished(ctrl)); + this.handler.on_event(FromOrchestrator::BackfillSyncFinished); Poll::Ready(ChainEvent::BackfillSyncFinished) } Err(err) => { @@ -113,19 +113,15 @@ where match this.handler.poll(cx) { Poll::Ready(handler_event) => { match handler_event { - HandlerEvent::BackfillAction(action) => { - // forward action to backfill_sync - this.backfill_sync.on_action(action); + HandlerEvent::BackfillSync(target) => { + // trigger backfill sync and start polling it + this.backfill_sync.on_action(BackfillAction::Start(target)); continue 'outer } HandlerEvent::Event(ev) => { // bubble up the event return Poll::Ready(ChainEvent::Handler(ev)); } - HandlerEvent::FatalError => { - error!(target: "engine::tree", "Fatal error"); - return Poll::Ready(ChainEvent::FatalError) - } } } Poll::Pending => { @@ -151,6 +147,14 @@ where } } +/// Represents the sync mode the chain is operating in. +#[derive(Debug, Default)] +enum SyncMode { + #[default] + Handler, + Backfill, +} + /// Event emitted by the [`ChainOrchestrator`] /// /// These are meant to be used for observability and debugging purposes. @@ -169,14 +173,6 @@ pub enum ChainEvent { /// A trait that advances the chain by handling actions. /// /// This is intended to be implement the chain consensus logic, for example `engine` API. -/// -/// ## Control flow -/// -/// The [`ChainOrchestrator`] is responsible for advancing this handler through -/// [`ChainHandler::poll`] and handling the emitted events, for example -/// [`HandlerEvent::BackfillAction`] to start a backfill sync. Events from the [`ChainOrchestrator`] -/// are passed to the handler via [`ChainHandler::on_event`], e.g. -/// [`FromOrchestrator::BackfillSyncStarted`] once the backfill sync started or finished. pub trait ChainHandler: Send + Sync { /// Event generated by this handler that orchestrator can bubble up; type Event: Send; @@ -191,19 +187,39 @@ pub trait ChainHandler: Send + Sync { /// Events/Requests that the [`ChainHandler`] can emit to the [`ChainOrchestrator`]. #[derive(Clone, Debug)] pub enum HandlerEvent { - /// Request an action to backfill sync - BackfillAction(BackfillAction), + /// Request to start a backfill sync + BackfillSync(PipelineTarget), /// Other event emitted by the handler Event(T), - // Fatal error - FatalError, } /// Internal events issued by the [`ChainOrchestrator`]. #[derive(Clone, Debug)] pub enum FromOrchestrator { /// Invoked when backfill sync finished - BackfillSyncFinished(ControlFlow), + BackfillSyncFinished, /// Invoked when backfill sync started BackfillSyncStarted, } + +/// Represents the state of the chain. +#[derive(Clone, Copy, PartialEq, Eq, Default, Debug)] +pub enum OrchestratorState { + /// Orchestrator has exclusive write access to the database. + BackfillSyncActive, + /// Node is actively processing the chain. + #[default] + Idle, +} + +impl OrchestratorState { + /// Returns `true` if the state is [`OrchestratorState::BackfillSyncActive`]. + pub const fn is_backfill_sync_active(&self) -> bool { + matches!(self, Self::BackfillSyncActive) + } + + /// Returns `true` if the state is [`OrchestratorState::Idle`]. + pub const fn is_idle(&self) -> bool { + matches!(self, Self::Idle) + } +} diff --git a/crates/engine/tree/src/database.rs b/crates/engine/tree/src/database.rs new file mode 100644 index 000000000000..e9b62111ab8d --- /dev/null +++ b/crates/engine/tree/src/database.rs @@ -0,0 +1,261 @@ +#![allow(dead_code)] + +use crate::{ + static_files::{StaticFileAction, StaticFileServiceHandle}, + tree::ExecutedBlock, +}; +use reth_db::database::Database; +use reth_errors::ProviderResult; +use reth_primitives::B256; +use reth_provider::{ + writer::StorageWriter, BlockExecutionWriter, BlockNumReader, BlockWriter, HistoryWriter, + OriginalValuesKnown, ProviderFactory, StageCheckpointWriter, StateWriter, +}; +use reth_prune::{Pruner, PrunerOutput}; +use reth_stages_types::{StageCheckpoint, StageId}; +use std::sync::mpsc::{Receiver, SendError, Sender}; +use tokio::sync::oneshot; +use tracing::debug; + +/// Writes parts of reth's in memory tree state to the database. +/// +/// This is meant to be a spawned service that listens for various incoming database operations, +/// performing those actions on disk, and returning the result in a channel. +/// +/// There are two types of operations this service can perform: +/// - Writing executed blocks to disk, returning the hash of the latest block that was inserted. +/// - Removing blocks from disk, returning the hash of the lowest block removed. +/// +/// This should be spawned in its own thread with [`std::thread::spawn`], since this performs +/// blocking database operations in an endless loop. +#[derive(Debug)] +pub struct DatabaseService { + /// The db / static file provider to use + provider: ProviderFactory, + /// Incoming requests to persist stuff + incoming: Receiver, + /// Handle for the static file service. + static_file_handle: StaticFileServiceHandle, + /// The pruner + pruner: Pruner>, +} + +impl DatabaseService { + /// Create a new database service + pub const fn new( + provider: ProviderFactory, + incoming: Receiver, + static_file_handle: StaticFileServiceHandle, + pruner: Pruner>, + ) -> Self { + Self { provider, incoming, static_file_handle, pruner } + } + + /// Writes the cloned tree state to the database + fn write(&self, blocks: Vec) -> ProviderResult<()> { + let provider_rw = self.provider.provider_rw()?; + + if blocks.is_empty() { + debug!(target: "tree::persistence::db", "Attempted to write empty block range"); + return Ok(()) + } + + let first_number = blocks.first().unwrap().block().number; + + let last = blocks.last().unwrap().block(); + let last_block_number = last.number; + + // TODO: remove all the clones and do performant / batched writes for each type of object + // instead of a loop over all blocks, + // meaning: + // * blocks + // * state + // * hashed state + // * trie updates (cannot naively extend, need helper) + // * indices (already done basically) + // Insert the blocks + for block in blocks { + let sealed_block = + block.block().clone().try_with_senders_unchecked(block.senders().clone()).unwrap(); + provider_rw.insert_block(sealed_block)?; + + // Write state and changesets to the database. + // Must be written after blocks because of the receipt lookup. + let execution_outcome = block.execution_outcome().clone(); + // TODO: use single storage writer in task when sf / db tasks are combined + execution_outcome.write_to_storage(&provider_rw, None, OriginalValuesKnown::No)?; + + // insert hashes and intermediate merkle nodes + { + let trie_updates = block.trie_updates().clone(); + let hashed_state = block.hashed_state(); + // TODO: use single storage writer in task when sf / db tasks are combined + let storage_writer = StorageWriter::new(Some(&provider_rw), None); + storage_writer.write_hashed_state(&hashed_state.clone().into_sorted())?; + trie_updates.write_to_database(provider_rw.tx_ref())?; + } + + // update history indices + provider_rw.update_history_indices(first_number..=last_block_number)?; + + // Update pipeline progress + provider_rw.update_pipeline_stages(last_block_number, false)?; + } + + debug!(target: "tree::persistence::db", range = ?first_number..=last_block_number, "Appended blocks"); + + Ok(()) + } + + /// Removes block data above the given block number from the database. + /// This is exclusive, i.e., it only removes blocks above `block_number`, and does not remove + /// `block_number`. + /// + /// This will then send a command to the static file service, to remove the actual block data. + fn remove_blocks_above( + &self, + block_number: u64, + sender: oneshot::Sender<()>, + ) -> ProviderResult<()> { + let provider_rw = self.provider.provider_rw()?; + let highest_block = self.provider.last_block_number()?; + provider_rw.remove_block_and_execution_range(block_number..=highest_block)?; + + // send a command to the static file service to also remove blocks + let _ = self + .static_file_handle + .send_action(StaticFileAction::RemoveBlocksAbove((block_number, sender))); + Ok(()) + } + + /// Prunes block data before the given block hash according to the configured prune + /// configuration. + fn prune_before(&mut self, block_num: u64) -> PrunerOutput { + // TODO: doing this properly depends on pruner segment changes + self.pruner.run(block_num).expect("todo: handle errors") + } + + /// Updates checkpoints related to block headers and bodies. This should be called by the static + /// file service, after new transactions have been successfully written to disk. + fn update_transaction_meta(&self, block_num: u64) -> ProviderResult<()> { + let provider_rw = self.provider.provider_rw()?; + provider_rw.save_stage_checkpoint(StageId::Headers, StageCheckpoint::new(block_num))?; + provider_rw.save_stage_checkpoint(StageId::Bodies, StageCheckpoint::new(block_num))?; + provider_rw.commit()?; + Ok(()) + } +} + +impl DatabaseService +where + DB: Database, +{ + /// This is the main loop, that will listen to database events and perform the requested + /// database actions + pub fn run(mut self) { + // If the receiver errors then senders have disconnected, so the loop should then end. + while let Ok(action) = self.incoming.recv() { + match action { + DatabaseAction::RemoveBlocksAbove((new_tip_num, sender)) => { + self.remove_blocks_above(new_tip_num, sender).expect("todo: handle errors"); + } + DatabaseAction::SaveBlocks((blocks, sender)) => { + if blocks.is_empty() { + todo!("return error or something"); + } + let last_block_hash = blocks.last().unwrap().block().hash(); + self.write(blocks).unwrap(); + + // we ignore the error because the caller may or may not care about the result + let _ = sender.send(last_block_hash); + } + DatabaseAction::PruneBefore((block_num, sender)) => { + let res = self.prune_before(block_num); + + // we ignore the error because the caller may or may not care about the result + let _ = sender.send(res); + } + DatabaseAction::UpdateTransactionMeta((block_num, sender)) => { + self.update_transaction_meta(block_num).expect("todo: handle errors"); + + // we ignore the error because the caller may or may not care about the result + let _ = sender.send(()); + } + } + } + } +} + +/// A signal to the database service that part of the tree state can be persisted. +#[derive(Debug)] +pub enum DatabaseAction { + /// The section of tree state that should be persisted. These blocks are expected in order of + /// increasing block number. + /// + /// This should just store the execution history-related data. Header, transaction, and + /// receipt-related data should already be written to static files. + SaveBlocks((Vec, oneshot::Sender)), + + /// Updates checkpoints related to block headers and bodies. This should be called by the + /// static file service, after new transactions have been successfully written to disk. + UpdateTransactionMeta((u64, oneshot::Sender<()>)), + + /// Removes block data above the given block number from the database. + /// + /// This will then send a command to the static file service, to remove the actual block data. + RemoveBlocksAbove((u64, oneshot::Sender<()>)), + + /// Prune associated block data before the given block number, according to already-configured + /// prune modes. + PruneBefore((u64, oneshot::Sender)), +} + +/// A handle to the database service +#[derive(Debug, Clone)] +pub struct DatabaseServiceHandle { + /// The channel used to communicate with the database service + sender: Sender, +} + +impl DatabaseServiceHandle { + /// Create a new [`DatabaseServiceHandle`] from a [`Sender`]. + pub const fn new(sender: Sender) -> Self { + Self { sender } + } + + /// Sends a specific [`DatabaseAction`] in the contained channel. The caller is responsible + /// for creating any channels for the given action. + pub fn send_action(&self, action: DatabaseAction) -> Result<(), SendError> { + self.sender.send(action) + } + + /// Tells the database service to save a certain list of finalized blocks. The blocks are + /// assumed to be ordered by block number. + /// + /// This returns the latest hash that has been saved, allowing removal of that block and any + /// previous blocks from in-memory data structures. + pub async fn save_blocks(&self, blocks: Vec) -> B256 { + let (tx, rx) = oneshot::channel(); + self.sender.send(DatabaseAction::SaveBlocks((blocks, tx))).expect("should be able to send"); + rx.await.expect("todo: err handling") + } + + /// Tells the database service to remove blocks above a certain block number. + pub async fn remove_blocks_above(&self, block_num: u64) { + let (tx, rx) = oneshot::channel(); + self.sender + .send(DatabaseAction::RemoveBlocksAbove((block_num, tx))) + .expect("should be able to send"); + rx.await.expect("todo: err handling") + } + + /// Tells the database service to remove block data before the given hash, according to the + /// configured prune config. + pub async fn prune_before(&self, block_num: u64) -> PrunerOutput { + let (tx, rx) = oneshot::channel(); + self.sender + .send(DatabaseAction::PruneBefore((block_num, tx))) + .expect("should be able to send"); + rx.await.expect("todo: err handling") + } +} diff --git a/crates/engine/tree/src/engine.rs b/crates/engine/tree/src/engine.rs index bd4c220565a2..9b965e892268 100644 --- a/crates/engine/tree/src/engine.rs +++ b/crates/engine/tree/src/engine.rs @@ -1,18 +1,18 @@ //! An engine API handler for the chain. use crate::{ - backfill::BackfillAction, chain::{ChainHandler, FromOrchestrator, HandlerEvent}, download::{BlockDownloader, DownloadAction, DownloadOutcome}, + tree::TreeEvent, }; use futures::{Stream, StreamExt}; -use reth_beacon_consensus::{BeaconConsensusEngineEvent, BeaconEngineMessage}; +use reth_beacon_consensus::BeaconEngineMessage; use reth_engine_primitives::EngineTypes; use reth_primitives::{SealedBlockWithSenders, B256}; use std::{ collections::HashSet, sync::mpsc::Sender, - task::{ready, Context, Poll}, + task::{Context, Poll}, }; use tokio::sync::mpsc::UnboundedReceiver; @@ -27,8 +27,6 @@ use tokio::sync::mpsc::UnboundedReceiver; /// received from the CL to the handler. /// /// It is responsible for handling the following: -/// - Delegating incoming requests to the [`EngineRequestHandler`]. -/// - Advancing the [`EngineRequestHandler`] by polling it and emitting events. /// - Downloading blocks on demand from the network if requested by the [`EngineApiRequestHandler`]. /// /// The core logic is part of the [`EngineRequestHandler`], which is responsible for processing the @@ -73,18 +71,18 @@ where // drain the handler first while let Poll::Ready(ev) = self.handler.poll(cx) { match ev { + RequestHandlerEvent::Idle => break, RequestHandlerEvent::HandlerEvent(ev) => { return match ev { - HandlerEvent::BackfillAction(target) => { + HandlerEvent::BackfillSync(target) => { // bubble up backfill sync request request self.downloader.on_action(DownloadAction::Clear); - Poll::Ready(HandlerEvent::BackfillAction(target)) + Poll::Ready(HandlerEvent::BackfillSync(target)) } HandlerEvent::Event(ev) => { // bubble up the event Poll::Ready(HandlerEvent::Event(ev)) } - HandlerEvent::FatalError => Poll::Ready(HandlerEvent::FatalError), } } RequestHandlerEvent::Download(req) => { @@ -114,14 +112,7 @@ where } } -/// A type that processes incoming requests (e.g. requests from the consensus layer, engine API, -/// such as newPayload). -/// -/// ## Control flow -/// -/// Requests and certain updates, such as a change in backfill sync status, are delegated to this -/// type via [`EngineRequestHandler::on_event`]. This type is responsible for processing the -/// incoming requests and advancing the chain and emit events when it is polled. +/// A type that processes incoming requests (e.g. requests from the consensus layer, engine API) pub trait EngineRequestHandler: Send + Sync { /// Even type this handler can emit type Event: Send; @@ -178,7 +169,7 @@ impl EngineRequestHandler for EngineApiRequestHandler where T: EngineTypes, { - type Event = BeaconConsensusEngineEvent; + type Event = EngineApiEvent; type Request = BeaconEngineMessage; fn on_event(&mut self, event: FromEngine) { @@ -187,39 +178,15 @@ where } fn poll(&mut self, cx: &mut Context<'_>) -> Poll> { - let Some(ev) = ready!(self.from_tree.poll_recv(cx)) else { - return Poll::Ready(RequestHandlerEvent::HandlerEvent(HandlerEvent::FatalError)) - }; - - let ev = match ev { - EngineApiEvent::BeaconConsensus(ev) => { - RequestHandlerEvent::HandlerEvent(HandlerEvent::Event(ev)) - } - EngineApiEvent::BackfillAction(action) => { - RequestHandlerEvent::HandlerEvent(HandlerEvent::BackfillAction(action)) - } - EngineApiEvent::Download(action) => RequestHandlerEvent::Download(action), - }; - Poll::Ready(ev) + todo!("poll tree") } } /// Events emitted by the engine API handler. #[derive(Debug)] pub enum EngineApiEvent { - /// Event from the consensus engine. - // TODO(mattsse): find a more appropriate name for this variant, consider phasing it out. - BeaconConsensus(BeaconConsensusEngineEvent), - /// Backfill action is needed. - BackfillAction(BackfillAction), - /// Block download is needed. - Download(DownloadRequest), -} - -impl From for EngineApiEvent { - fn from(event: BeaconConsensusEngineEvent) -> Self { - Self::BeaconConsensus(event) - } + /// Bubbled from tree. + FromTree(TreeEvent), } #[derive(Debug)] @@ -241,6 +208,8 @@ impl From for FromEngine { /// Requests produced by a [`EngineRequestHandler`]. #[derive(Debug)] pub enum RequestHandlerEvent { + /// The handler is idle. + Idle, /// An event emitted by the handler. HandlerEvent(HandlerEvent), /// Request to download blocks. @@ -255,10 +224,3 @@ pub enum DownloadRequest { /// Download the given range of blocks. BlockRange(B256, u64), } - -impl DownloadRequest { - /// Returns a [`DownloadRequest`] for a single block. - pub fn single_block(hash: B256) -> Self { - Self::BlockSet(HashSet::from([hash])) - } -} diff --git a/crates/engine/tree/src/lib.rs b/crates/engine/tree/src/lib.rs index d238bf879ca6..b4ac74992c21 100644 --- a/crates/engine/tree/src/lib.rs +++ b/crates/engine/tree/src/lib.rs @@ -20,14 +20,18 @@ pub use reth_blockchain_tree_api::*; pub mod backfill; /// The type that drives the chain forward. pub mod chain; +/// The background writer service for batch db writes. +pub mod database; /// Support for downloading blocks on demand for live sync. pub mod download; /// Engine Api chain handler support. pub mod engine; /// Metrics support. pub mod metrics; -/// The background writer service, coordinating write operations on static files and the database. +/// The background writer service, coordinating the static file and database services. pub mod persistence; +/// The background writer service for static file writes. +pub mod static_files; /// Support for interacting with the blockchain tree. pub mod tree; diff --git a/crates/engine/tree/src/persistence.rs b/crates/engine/tree/src/persistence.rs index 2a5baf76d3d4..b3e73ffcfdcf 100644 --- a/crates/engine/tree/src/persistence.rs +++ b/crates/engine/tree/src/persistence.rs @@ -1,317 +1,35 @@ #![allow(dead_code)] -use reth_chain_state::ExecutedBlock; -use reth_db::Database; -use reth_errors::ProviderResult; -use reth_primitives::{SealedBlock, StaticFileSegment, TransactionSignedNoHash, B256}; -use reth_provider::{ - writer::StorageWriter, BlockExecutionWriter, BlockNumReader, BlockWriter, HistoryWriter, - OriginalValuesKnown, ProviderFactory, StageCheckpointWriter, StateWriter, - StaticFileProviderFactory, StaticFileWriter, TransactionsProviderExt, +use crate::{ + database::{DatabaseAction, DatabaseService, DatabaseServiceHandle}, + static_files::{StaticFileAction, StaticFileService, StaticFileServiceHandle}, + tree::ExecutedBlock, }; +use reth_db::Database; +use reth_primitives::{SealedBlock, B256, U256}; +use reth_provider::ProviderFactory; use reth_prune::{Pruner, PrunerOutput}; -use reth_stages_types::{StageCheckpoint, StageId}; use std::sync::{ - mpsc::{Receiver, SendError, Sender}, + mpsc::{SendError, Sender}, Arc, }; use tokio::sync::oneshot; -use tracing::debug; - -/// Writes parts of reth's in memory tree state to the database and static files. -/// -/// This is meant to be a spawned service that listens for various incoming persistence operations, -/// performing those actions on disk, and returning the result in a channel. -/// -/// This should be spawned in its own thread with [`std::thread::spawn`], since this performs -/// blocking I/O operations in an endless loop. -#[derive(Debug)] -pub struct PersistenceService { - /// The provider factory to use - provider: ProviderFactory, - /// Incoming requests - incoming: Receiver, - /// The pruner - pruner: Pruner>, -} - -impl PersistenceService { - /// Create a new persistence service - pub const fn new( - provider: ProviderFactory, - incoming: Receiver, - pruner: Pruner>, - ) -> Self { - Self { provider, incoming, pruner } - } - - /// Writes the cloned tree state to database - fn write(&self, blocks: &[ExecutedBlock]) -> ProviderResult<()> { - debug!(target: "tree::persistence", "Writing blocks to database"); - let provider_rw = self.provider.provider_rw()?; - - if blocks.is_empty() { - debug!(target: "tree::persistence", "Attempted to write empty block range"); - return Ok(()) - } - - let first_number = blocks.first().unwrap().block().number; - - let last = blocks.last().unwrap().block(); - let last_block_number = last.number; - - // TODO: remove all the clones and do performant / batched writes for each type of object - // instead of a loop over all blocks, - // meaning: - // * blocks - // * state - // * hashed state - // * trie updates (cannot naively extend, need helper) - // * indices (already done basically) - // Insert the blocks - for block in blocks { - let sealed_block = - block.block().clone().try_with_senders_unchecked(block.senders().clone()).unwrap(); - provider_rw.insert_block(sealed_block)?; - - // Write state and changesets to the database. - // Must be written after blocks because of the receipt lookup. - let execution_outcome = block.execution_outcome().clone(); - // TODO: do we provide a static file producer here? - let mut storage_writer = StorageWriter::new(Some(&provider_rw), None); - storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::No)?; - - // insert hashes and intermediate merkle nodes - { - let trie_updates = block.trie_updates().clone(); - let hashed_state = block.hashed_state(); - storage_writer.write_hashed_state(&hashed_state.clone().into_sorted())?; - storage_writer.write_trie_updates(&trie_updates)?; - } - - // update history indices - provider_rw.update_history_indices(first_number..=last_block_number)?; - - // Update pipeline progress - provider_rw.update_pipeline_stages(last_block_number, false)?; - } - - debug!(target: "tree::persistence", range = ?first_number..=last_block_number, "Appended block data"); - - Ok(()) - } - - /// Removes block data above the given block number from the database. - /// This is exclusive, i.e., it only removes blocks above `block_number`, and does not remove - /// `block_number`. - /// - /// This will then send a command to the static file service, to remove the actual block data. - fn remove_blocks_above(&self, block_number: u64) -> ProviderResult<()> { - debug!(target: "tree::persistence", ?block_number, "Removing blocks from database above block_number"); - let provider_rw = self.provider.provider_rw()?; - let highest_block = self.provider.last_block_number()?; - provider_rw.remove_block_and_execution_range(block_number..=highest_block)?; - - Ok(()) - } - - /// Prunes block data before the given block hash according to the configured prune - /// configuration. - fn prune_before(&mut self, block_num: u64) -> PrunerOutput { - debug!(target: "tree::persistence", ?block_num, "Running pruner"); - // TODO: doing this properly depends on pruner segment changes - self.pruner.run(block_num).expect("todo: handle errors") - } - - /// Updates checkpoints related to block headers and bodies. This should be called after new - /// transactions have been successfully written to disk. - fn update_transaction_meta(&self, block_num: u64) -> ProviderResult<()> { - debug!(target: "tree::persistence", ?block_num, "Updating transaction metadata after writing"); - let provider_rw = self.provider.provider_rw()?; - provider_rw.save_stage_checkpoint(StageId::Headers, StageCheckpoint::new(block_num))?; - provider_rw.save_stage_checkpoint(StageId::Bodies, StageCheckpoint::new(block_num))?; - provider_rw.commit()?; - Ok(()) - } - - /// Writes the transactions to static files. - /// - /// The [`update_transaction_meta`](Self::update_transaction_meta) method should be called - /// after this, to update the checkpoints for headers and block bodies. - fn write_transactions(&self, block: Arc) -> ProviderResult { - debug!(target: "tree::persistence", "Writing transactions"); - let provider = self.provider.static_file_provider(); - - let header_writer = provider.get_writer(block.number, StaticFileSegment::Headers)?; - let provider_ro = self.provider.provider()?; - let mut storage_writer = StorageWriter::new(Some(&provider_ro), Some(header_writer)); - storage_writer.append_headers_from_blocks( - block.header().number, - std::iter::once(&(block.header(), block.hash())), - )?; - - let transactions_writer = - provider.get_writer(block.number, StaticFileSegment::Transactions)?; - let mut storage_writer = StorageWriter::new(Some(&provider_ro), Some(transactions_writer)); - let no_hash_transactions = - block.body.clone().into_iter().map(TransactionSignedNoHash::from).collect(); - storage_writer.append_transactions_from_blocks( - block.header().number, - std::iter::once(&no_hash_transactions), - )?; - - Ok(block.number) - } - - /// Write execution-related block data to static files. - /// - /// This will then send a command to the db service, that it should write new data, and update - /// the checkpoints for execution and beyond. - fn write_execution_data(&self, blocks: &[ExecutedBlock]) -> ProviderResult<()> { - if blocks.is_empty() { - return Ok(()) - } - let provider_rw = self.provider.provider_rw()?; - let provider = self.provider.static_file_provider(); - - // NOTE: checked non-empty above - let first_block = blocks.first().unwrap().block(); - let last_block = blocks.last().unwrap().block().clone(); - - // use the storage writer - let current_block = first_block.number; - debug!(target: "tree::persistence", len=blocks.len(), ?current_block, "Writing execution data to static files"); - - let receipts_writer = - provider.get_writer(first_block.number, StaticFileSegment::Receipts)?; - - let mut storage_writer = StorageWriter::new(Some(&provider_rw), Some(receipts_writer)); - let receipts_iter = blocks.iter().map(|block| { - let receipts = block.execution_outcome().receipts().receipt_vec.clone(); - debug_assert!(receipts.len() == 1); - receipts.first().unwrap().clone() - }); - storage_writer.append_receipts_from_blocks(current_block, receipts_iter)?; - Ok(()) - } - - /// Removes the blocks above the given block number from static files. Also removes related - /// receipt and header data. - /// - /// This is exclusive, i.e., it only removes blocks above `block_number`, and does not remove - /// `block_number`. - /// - /// Returns the block hash for the lowest block removed from the database, which should be - /// the hash for `block_number + 1`. - /// - /// This is meant to be called by the db service, as this should only be done after related data - /// is removed from the database, and checkpoints are updated. - /// - /// Returns the hash of the lowest removed block. - fn remove_static_file_blocks_above(&self, block_number: u64) -> ProviderResult<()> { - debug!(target: "tree::persistence", ?block_number, "Removing static file blocks above block_number"); - let sf_provider = self.provider.static_file_provider(); - let db_provider_ro = self.provider.provider()?; - - // get highest static file block for the total block range - let highest_static_file_block = sf_provider - .get_highest_static_file_block(StaticFileSegment::Headers) - .expect("todo: error handling, headers should exist"); - - // Get the total txs for the block range, so we have the correct number of columns for - // receipts and transactions - let tx_range = db_provider_ro - .transaction_range_by_block_range(block_number..=highest_static_file_block)?; - let total_txs = tx_range.end().saturating_sub(*tx_range.start()); - - // get the writers - let mut header_writer = sf_provider.get_writer(block_number, StaticFileSegment::Headers)?; - let mut transactions_writer = - sf_provider.get_writer(block_number, StaticFileSegment::Transactions)?; - let mut receipts_writer = - sf_provider.get_writer(block_number, StaticFileSegment::Receipts)?; - - // finally actually truncate, these internally commit - receipts_writer.prune_receipts(total_txs, block_number)?; - transactions_writer.prune_transactions(total_txs, block_number)?; - header_writer.prune_headers(highest_static_file_block.saturating_sub(block_number))?; - - sf_provider.commit()?; - - Ok(()) - } -} - -impl PersistenceService -where - DB: Database, -{ - /// This is the main loop, that will listen to database events and perform the requested - /// database actions - pub fn run(mut self) { - // If the receiver errors then senders have disconnected, so the loop should then end. - while let Ok(action) = self.incoming.recv() { - match action { - PersistenceAction::RemoveBlocksAbove((new_tip_num, sender)) => { - self.remove_blocks_above(new_tip_num).expect("todo: handle errors"); - self.remove_static_file_blocks_above(new_tip_num).expect("todo: handle errors"); - - // we ignore the error because the caller may or may not care about the result - let _ = sender.send(()); - } - PersistenceAction::SaveBlocks((blocks, sender)) => { - if blocks.is_empty() { - todo!("return error or something"); - } - let last_block_hash = blocks.last().unwrap().block().hash(); - // first write to static files - self.write_execution_data(&blocks).expect("todo: handle errors"); - // then write to db - self.write(&blocks).expect("todo: handle errors"); - - // we ignore the error because the caller may or may not care about the result - let _ = sender.send(last_block_hash); - } - PersistenceAction::PruneBefore((block_num, sender)) => { - let res = self.prune_before(block_num); - - // we ignore the error because the caller may or may not care about the result - let _ = sender.send(res); - } - PersistenceAction::WriteTransactions((block, sender)) => { - let block_num = self.write_transactions(block).expect("todo: handle errors"); - self.update_transaction_meta(block_num).expect("todo: handle errors"); - - // we ignore the error because the caller may or may not care about the result - let _ = sender.send(()); - } - } - } - } -} - -/// A signal to the persistence service that part of the tree state can be persisted. +/// A signal to the database and static file services that part of the tree state can be persisted. #[derive(Debug)] pub enum PersistenceAction { + /// The given block has been added to the canonical chain, its transactions and headers will be + /// persisted for durability. + LogTransactions((Arc, u64, U256, oneshot::Sender<()>)), + /// The section of tree state that should be persisted. These blocks are expected in order of /// increasing block number. /// - /// First, header, transaction, and receipt-related data should be written to static files. - /// Then the execution history-related data will be written to the database. + /// This should just store the execution history-related data. Header, transaction, and + /// receipt-related data should already be written to static files. SaveBlocks((Vec, oneshot::Sender)), - /// The given block has been added to the canonical chain, its transactions and headers will be - /// persisted for durability. - /// - /// This will first append the header and transactions to static files, then update the - /// checkpoints for headers and block bodies in the database. - WriteTransactions((Arc, oneshot::Sender<()>)), - /// Removes block data above the given block number from the database. - /// - /// This will first update checkpoints from the database, then remove actual block data from - /// static files. RemoveBlocksAbove((u64, oneshot::Sender<()>)), /// Prune associated block data before the given block number, according to already-configured @@ -319,47 +37,111 @@ pub enum PersistenceAction { PruneBefore((u64, oneshot::Sender)), } -/// A handle to the persistence service +/// An error type for when there is a [`SendError`] while sending an action to one of the services. +#[derive(Debug)] +pub enum PersistenceSendError { + /// When there is an error sending to the static file service + StaticFile(SendError), + /// When there is an error sending to the database service + Database(SendError), +} + +impl From> for PersistenceSendError { + fn from(value: SendError) -> Self { + Self::StaticFile(value) + } +} + +impl From> for PersistenceSendError { + fn from(value: SendError) -> Self { + Self::Database(value) + } +} + +/// A handle to the database and static file services. This will send commands to the correct +/// service, depending on the command. +/// +/// Some commands should be sent to the database service, and others should be sent to the static +/// file service, despite having the same name. This is because some actions require work to be done +/// by both the static file _and_ the database service, and require some coordination. +/// +/// This type is what actually coordinates the two services, and should be used by consumers of the +/// persistence related services. #[derive(Debug, Clone)] pub struct PersistenceHandle { - /// The channel used to communicate with the persistence service - sender: Sender, + /// The channel used to communicate with the database service + db_sender: Sender, + /// The channel used to communicate with the static file service + static_file_sender: Sender, } impl PersistenceHandle { /// Create a new [`PersistenceHandle`] from a [`Sender`]. - pub const fn new(sender: Sender) -> Self { - Self { sender } + pub const fn new( + db_sender: Sender, + static_file_sender: Sender, + ) -> Self { + Self { db_sender, static_file_sender } } - /// Create a new [`PersistenceHandle`], and spawn the persistence service. - pub fn spawn_service( + /// Create a new [`PersistenceHandle`], and spawn the database and static file services. + pub fn spawn_services( provider_factory: ProviderFactory, pruner: Pruner>, ) -> Self { // create the initial channels + let (static_file_service_tx, static_file_service_rx) = std::sync::mpsc::channel(); let (db_service_tx, db_service_rx) = std::sync::mpsc::channel(); // construct persistence handle - let persistence_handle = Self::new(db_service_tx); - - // spawn the persistence service - let db_service = PersistenceService::new(provider_factory, db_service_rx, pruner); + let persistence_handle = Self::new(db_service_tx.clone(), static_file_service_tx.clone()); + + // construct handles for the services to talk to each other + let static_file_handle = StaticFileServiceHandle::new(static_file_service_tx); + let database_handle = DatabaseServiceHandle::new(db_service_tx); + + // spawn the db service + let db_service = DatabaseService::new( + provider_factory.clone(), + db_service_rx, + static_file_handle, + pruner, + ); std::thread::Builder::new() - .name("Persistence Service".to_string()) + .name("Database Service".to_string()) .spawn(|| db_service.run()) .unwrap(); + // spawn the static file service + let static_file_service = + StaticFileService::new(provider_factory, static_file_service_rx, database_handle); + std::thread::Builder::new() + .name("Static File Service".to_string()) + .spawn(|| static_file_service.run()) + .unwrap(); + persistence_handle } /// Sends a specific [`PersistenceAction`] in the contained channel. The caller is responsible /// for creating any channels for the given action. - pub fn send_action( - &self, - action: PersistenceAction, - ) -> Result<(), SendError> { - self.sender.send(action) + pub fn send_action(&self, action: PersistenceAction) -> Result<(), PersistenceSendError> { + match action { + PersistenceAction::LogTransactions(input) => self + .static_file_sender + .send(StaticFileAction::LogTransactions(input)) + .map_err(From::from), + PersistenceAction::SaveBlocks(input) => self + .static_file_sender + .send(StaticFileAction::WriteExecutionData(input)) + .map_err(From::from), + PersistenceAction::RemoveBlocksAbove(input) => { + self.db_sender.send(DatabaseAction::RemoveBlocksAbove(input)).map_err(From::from) + } + PersistenceAction::PruneBefore(input) => { + self.db_sender.send(DatabaseAction::PruneBefore(input)).map_err(From::from) + } + } } /// Tells the persistence service to save a certain list of finalized blocks. The blocks are @@ -399,7 +181,7 @@ impl PersistenceHandle { #[cfg(test)] mod tests { use super::*; - use reth_chain_state::test_utils::{get_executed_block_with_number, get_executed_blocks}; + use crate::test_utils::{get_executed_block_with_number, get_executed_blocks}; use reth_exex_types::FinishedExExHeight; use reth_primitives::B256; use reth_provider::{test_utils::create_test_provider_factory, ProviderFactory}; @@ -420,12 +202,11 @@ mod tests { finished_exex_height_rx, ); - PersistenceHandle::spawn_service(provider, pruner) + PersistenceHandle::spawn_services(provider, pruner) } #[tokio::test] async fn test_save_blocks_empty() { - reth_tracing::init_test_tracing(); let persistence_handle = default_persistence_handle(); let blocks = vec![]; @@ -439,10 +220,9 @@ mod tests { #[tokio::test] async fn test_save_blocks_single_block() { - reth_tracing::init_test_tracing(); let persistence_handle = default_persistence_handle(); let block_number = 0; - let executed = get_executed_block_with_number(block_number, B256::random()); + let executed = get_executed_block_with_number(block_number); let block_hash = executed.block().hash(); let blocks = vec![executed]; @@ -456,7 +236,6 @@ mod tests { #[tokio::test] async fn test_save_blocks_multiple_blocks() { - reth_tracing::init_test_tracing(); let persistence_handle = default_persistence_handle(); let blocks = get_executed_blocks(0..5).collect::>(); @@ -471,7 +250,6 @@ mod tests { #[tokio::test] async fn test_save_blocks_multiple_calls() { - reth_tracing::init_test_tracing(); let persistence_handle = default_persistence_handle(); let ranges = [0..1, 1..2, 2..4, 4..5]; diff --git a/crates/engine/tree/src/static_files.rs b/crates/engine/tree/src/static_files.rs new file mode 100644 index 000000000000..fcdf0292bd3d --- /dev/null +++ b/crates/engine/tree/src/static_files.rs @@ -0,0 +1,272 @@ +#![allow(dead_code)] + +use reth_db::database::Database; +use reth_errors::ProviderResult; +use reth_primitives::{SealedBlock, StaticFileSegment, TransactionSignedNoHash, B256, U256}; +use reth_provider::{ + ProviderFactory, StaticFileProviderFactory, StaticFileWriter, TransactionsProviderExt, +}; +use std::sync::{ + mpsc::{Receiver, SendError, Sender}, + Arc, +}; +use tokio::sync::oneshot; + +use crate::{ + database::{DatabaseAction, DatabaseServiceHandle}, + tree::ExecutedBlock, +}; + +/// Writes finalized blocks to reth's static files. +/// +/// This is meant to be a spawned service that listens for various incoming finalization operations, +/// and writing to or producing new static files. +/// +/// This should be spawned in its own thread with [`std::thread::spawn`], since this performs +/// blocking file operations in an endless loop. +#[derive(Debug)] +pub struct StaticFileService { + /// The db / static file provider to use + provider: ProviderFactory, + /// Handle for the database service + database_handle: DatabaseServiceHandle, + /// Incoming requests to write static files + incoming: Receiver, +} + +impl StaticFileService +where + DB: Database + 'static, +{ + /// Create a new static file service. + pub const fn new( + provider: ProviderFactory, + incoming: Receiver, + database_handle: DatabaseServiceHandle, + ) -> Self { + Self { provider, database_handle, incoming } + } + + // TODO: some things about this are a bit weird, and just to make the underlying static file + // writes work - tx number, total difficulty inclusion. They require either additional in memory + // data or a db lookup. Maybe we can use a db read here + /// Writes the transactions to static files, to act as a log. + /// + /// This will then send a command to the db service, that it should update the checkpoints for + /// headers and block bodies. + fn log_transactions( + &self, + block: Arc, + start_tx_number: u64, + td: U256, + sender: oneshot::Sender<()>, + ) -> ProviderResult<()> { + let provider = self.provider.static_file_provider(); + let mut header_writer = provider.get_writer(block.number, StaticFileSegment::Headers)?; + let mut transactions_writer = + provider.get_writer(block.number, StaticFileSegment::Transactions)?; + + // TODO: does to_compact require ownership? + header_writer.append_header(block.header().clone(), td, block.hash())?; + let no_hash_transactions = + block.body.clone().into_iter().map(TransactionSignedNoHash::from); + + let mut tx_number = start_tx_number; + for tx in no_hash_transactions { + transactions_writer.append_transaction(tx_number, tx)?; + tx_number += 1; + } + + // increment block for transactions + transactions_writer.increment_block(StaticFileSegment::Transactions, block.number)?; + + // finally commit + transactions_writer.commit()?; + header_writer.commit()?; + + // TODO: do we care about the mpsc error here? + // send a command to the db service to update the checkpoints for headers / bodies + let _ = self + .database_handle + .send_action(DatabaseAction::UpdateTransactionMeta((block.number, sender))); + + Ok(()) + } + + /// Write execution-related block data to static files. + /// + /// This will then send a command to the db service, that it should write new data, and update + /// the checkpoints for execution and beyond. + fn write_execution_data( + &self, + blocks: Vec, + sender: oneshot::Sender, + ) -> ProviderResult<()> { + if blocks.is_empty() { + return Ok(()) + } + let provider = self.provider.static_file_provider(); + + // NOTE: checked non-empty above + let first_block = blocks.first().unwrap().block(); + let last_block = blocks.last().unwrap().block(); + + // get highest receipt, if it returns none, use zero (this is the first static file write) + let mut current_receipt = provider + .get_highest_static_file_tx(StaticFileSegment::Receipts) + .map(|num| num + 1) + .unwrap_or_default(); + let mut current_block = first_block.number; + + let mut receipts_writer = + provider.get_writer(first_block.number, StaticFileSegment::Receipts)?; + for receipts in blocks.iter().map(|block| block.execution_outcome().receipts.clone()) { + debug_assert!(receipts.len() == 1); + // TODO: should we also assert that the receipt is not None here, that means the + // receipt is pruned + for maybe_receipt in receipts.first().unwrap() { + if let Some(receipt) = maybe_receipt { + receipts_writer.append_receipt(current_receipt, receipt.clone())?; + } + current_receipt += 1; + } + + // increment the block + receipts_writer.increment_block(StaticFileSegment::Receipts, current_block)?; + current_block += 1; + } + + // finally increment block and commit + receipts_writer.commit()?; + + // TODO: do we care about the mpsc error here? + // send a command to the db service to update the checkpoints for execution etc. + let _ = self.database_handle.send_action(DatabaseAction::SaveBlocks((blocks, sender))); + + Ok(()) + } + + /// Removes the blocks above the given block number from static files. Also removes related + /// receipt and header data. + /// + /// This is exclusive, i.e., it only removes blocks above `block_number`, and does not remove + /// `block_number`. + /// + /// Returns the block hash for the lowest block removed from the database, which should be + /// the hash for `block_number + 1`. + /// + /// This is meant to be called by the db service, as this should only be done after related data + /// is removed from the database, and checkpoints are updated. + /// + /// Returns the hash of the lowest removed block. + fn remove_blocks_above( + &self, + block_num: u64, + sender: oneshot::Sender<()>, + ) -> ProviderResult<()> { + let sf_provider = self.provider.static_file_provider(); + let db_provider_rw = self.provider.provider_rw()?; + + // get highest static file block for the total block range + let highest_static_file_block = sf_provider + .get_highest_static_file_block(StaticFileSegment::Headers) + .expect("todo: error handling, headers should exist"); + + // Get the total txs for the block range, so we have the correct number of columns for + // receipts and transactions + let tx_range = db_provider_rw + .transaction_range_by_block_range(block_num..=highest_static_file_block)?; + let total_txs = tx_range.end().saturating_sub(*tx_range.start()); + + // get the writers + let mut header_writer = sf_provider.get_writer(block_num, StaticFileSegment::Headers)?; + let mut transactions_writer = + sf_provider.get_writer(block_num, StaticFileSegment::Transactions)?; + let mut receipts_writer = sf_provider.get_writer(block_num, StaticFileSegment::Receipts)?; + + // finally actually truncate, these internally commit + receipts_writer.prune_receipts(total_txs, block_num)?; + transactions_writer.prune_transactions(total_txs, block_num)?; + header_writer.prune_headers(highest_static_file_block.saturating_sub(block_num))?; + + sf_provider.commit()?; + + Ok(()) + } +} + +impl StaticFileService +where + DB: Database + 'static, +{ + /// This is the main loop, that will listen to static file actions, and write DB data to static + /// files. + pub fn run(self) { + // If the receiver errors then senders have disconnected, so the loop should then end. + while let Ok(action) = self.incoming.recv() { + match action { + StaticFileAction::LogTransactions(( + block, + start_tx_number, + td, + response_sender, + )) => { + self.log_transactions(block, start_tx_number, td, response_sender) + .expect("todo: handle errors"); + } + StaticFileAction::RemoveBlocksAbove((block_num, response_sender)) => { + self.remove_blocks_above(block_num, response_sender) + .expect("todo: handle errors"); + } + StaticFileAction::WriteExecutionData((blocks, response_sender)) => { + self.write_execution_data(blocks, response_sender) + .expect("todo: handle errors"); + } + } + } + } +} + +/// A signal to the static file service that some data should be copied from the DB to static files. +#[derive(Debug)] +pub enum StaticFileAction { + /// The given block has been added to the canonical chain, its transactions and headers will be + /// persisted for durability. + /// + /// This will then send a command to the db service, that it should update the checkpoints for + /// headers and block bodies. + LogTransactions((Arc, u64, U256, oneshot::Sender<()>)), + + /// Write execution-related block data to static files. + /// + /// This will then send a command to the db service, that it should write new data, and update + /// the checkpoints for execution and beyond. + WriteExecutionData((Vec, oneshot::Sender)), + + /// Removes the blocks above the given block number from static files. Also removes related + /// receipt and header data. + /// + /// This is meant to be called by the db service, as this should only be done after related + /// data is removed from the database, and checkpoints are updated. + RemoveBlocksAbove((u64, oneshot::Sender<()>)), +} + +/// A handle to the static file service +#[derive(Debug, Clone)] +pub struct StaticFileServiceHandle { + /// The channel used to communicate with the static file service + sender: Sender, +} + +impl StaticFileServiceHandle { + /// Create a new [`StaticFileServiceHandle`] from a [`Sender`]. + pub const fn new(sender: Sender) -> Self { + Self { sender } + } + + /// Sends a specific [`StaticFileAction`] in the contained channel. The caller is responsible + /// for creating any channels for the given action. + pub fn send_action(&self, action: StaticFileAction) -> Result<(), SendError> { + self.sender.send(action) + } +} diff --git a/crates/engine/tree/src/test_utils.rs b/crates/engine/tree/src/test_utils.rs index 0a5fbd5ad560..f946f2259a07 100644 --- a/crates/engine/tree/src/test_utils.rs +++ b/crates/engine/tree/src/test_utils.rs @@ -1,12 +1,19 @@ +use crate::tree::ExecutedBlock; +use rand::Rng; use reth_chainspec::ChainSpec; use reth_db::{mdbx::DatabaseEnv, test_utils::TempDatabase}; use reth_network_p2p::test_utils::TestFullBlockClient; -use reth_primitives::{BlockBody, SealedHeader, B256}; +use reth_primitives::{ + Address, Block, BlockBody, BlockNumber, Receipts, Requests, SealedBlockWithSenders, + SealedHeader, TransactionSigned, B256, +}; use reth_provider::{test_utils::create_test_provider_factory_with_chain_spec, ExecutionOutcome}; use reth_prune_types::PruneModes; use reth_stages::{test_utils::TestStages, ExecOutput, StageError}; use reth_stages_api::Pipeline; use reth_static_file::StaticFileProducer; +use reth_trie::{updates::TrieUpdates, HashedPostState}; +use revm::db::BundleState; use std::{collections::VecDeque, ops::Range, sync::Arc}; use tokio::sync::watch; @@ -75,3 +82,43 @@ pub(crate) fn insert_headers_into_client( client.insert(sealed_header.clone(), body.clone()); } } + +fn get_executed_block(block_number: BlockNumber, receipts: Receipts) -> ExecutedBlock { + let mut block = Block::default(); + let mut header = block.header.clone(); + header.number = block_number; + block.header = header; + + let sender = Address::random(); + let tx = TransactionSigned::default(); + block.body.push(tx); + let sealed = block.seal_slow(); + let sealed_with_senders = SealedBlockWithSenders::new(sealed.clone(), vec![sender]).unwrap(); + + ExecutedBlock::new( + Arc::new(sealed), + Arc::new(sealed_with_senders.senders), + Arc::new(ExecutionOutcome::new( + BundleState::default(), + receipts, + block_number, + vec![Requests::default()], + )), + Arc::new(HashedPostState::default()), + Arc::new(TrieUpdates::default()), + ) +} + +pub(crate) fn get_executed_block_with_receipts(receipts: Receipts) -> ExecutedBlock { + let number = rand::thread_rng().gen::(); + + get_executed_block(number, receipts) +} + +pub(crate) fn get_executed_block_with_number(block_number: BlockNumber) -> ExecutedBlock { + get_executed_block(block_number, Receipts { receipt_vec: vec![vec![]] }) +} + +pub(crate) fn get_executed_blocks(range: Range) -> impl Iterator { + range.map(get_executed_block_with_number) +} diff --git a/crates/engine/tree/src/tree.rs b/crates/engine/tree/src/tree.rs deleted file mode 100644 index 271984d20f88..000000000000 --- a/crates/engine/tree/src/tree.rs +++ /dev/null @@ -1,1923 +0,0 @@ -use crate::{ - backfill::{BackfillAction, BackfillSyncState}, - chain::FromOrchestrator, - engine::{DownloadRequest, EngineApiEvent, FromEngine}, - persistence::PersistenceHandle, -}; -use reth_beacon_consensus::{ - BeaconConsensusEngineEvent, BeaconEngineMessage, ForkchoiceStateTracker, InvalidHeaderCache, - OnForkChoiceUpdated, MIN_BLOCKS_FOR_PIPELINE_RUN, -}; -use reth_blockchain_tree::{ - error::InsertBlockErrorKind, BlockAttachment, BlockBuffer, BlockStatus, -}; -use reth_blockchain_tree_api::{error::InsertBlockError, InsertPayloadOk}; -use reth_chain_state::{ - CanonicalInMemoryState, ExecutedBlock, MemoryOverlayStateProvider, NewCanonicalChain, -}; -use reth_consensus::{Consensus, PostExecutionInput}; -use reth_engine_primitives::EngineTypes; -use reth_errors::{ConsensusError, ProviderResult}; -use reth_evm::execute::{BlockExecutorProvider, Executor}; -use reth_payload_builder::PayloadBuilderHandle; -use reth_payload_primitives::{PayloadAttributes, PayloadBuilderAttributes, PayloadTypes}; -use reth_payload_validator::ExecutionPayloadValidator; -use reth_primitives::{ - Block, BlockNumHash, BlockNumber, GotExpected, Header, Receipts, Requests, SealedBlock, - SealedBlockWithSenders, SealedHeader, B256, U256, -}; -use reth_provider::{ - BlockReader, ExecutionOutcome, ProviderError, StateProviderFactory, StateRootProvider, -}; -use reth_revm::database::StateProviderDatabase; -use reth_rpc_types::{ - engine::{ - CancunPayloadFields, ForkchoiceState, PayloadStatus, PayloadStatusEnum, - PayloadValidationError, - }, - ExecutionPayload, -}; -use reth_stages_api::ControlFlow; -use reth_trie::HashedPostState; -use std::{ - collections::{BTreeMap, HashMap, HashSet}, - sync::{mpsc::Receiver, Arc}, -}; -use tokio::sync::{ - mpsc::{UnboundedReceiver, UnboundedSender}, - oneshot, -}; -use tracing::*; - -/// Maximum number of blocks to be kept only in memory without triggering persistence. -const PERSISTENCE_THRESHOLD: u64 = 256; -/// Number of pending blocks that cannot be executed due to missing parent and -/// are kept in cache. -const DEFAULT_BLOCK_BUFFER_LIMIT: u32 = 256; -/// Number of invalid headers to keep in cache. -const DEFAULT_MAX_INVALID_HEADER_CACHE_LENGTH: u32 = 256; - -/// Keeps track of the state of the tree. -/// -/// ## Invariants -/// -/// - This only stores blocks that are connected to the canonical chain. -/// - All executed blocks are valid and have been executed. -#[derive(Debug, Default)] -pub struct TreeState { - /// __All__ executed blocks by block hash. - /// - /// This includes blocks of all forks. - blocks_by_hash: HashMap, - /// Executed blocks grouped by their respective block number. - /// - /// This maps unique block number to all known blocks for that height. - blocks_by_number: BTreeMap>, - /// Currently tracked canonical head of the chain. - current_canonical_head: BlockNumHash, - /// Map of any parent block hash to its children. - parent_to_child: HashMap>, -} - -impl TreeState { - /// Returns a new, empty tree state that points to the given canonical head. - fn new(current_canonical_head: BlockNumHash) -> Self { - Self { - blocks_by_hash: HashMap::new(), - blocks_by_number: BTreeMap::new(), - current_canonical_head, - parent_to_child: HashMap::new(), - } - } - - /// Returns the block by hash. - fn block_by_hash(&self, hash: B256) -> Option> { - self.blocks_by_hash.get(&hash).map(|b| b.block.clone()) - } - - fn block_by_number(&self, number: BlockNumber) -> Option> { - self.blocks_by_number - .get(&number) - .and_then(|blocks| blocks.last()) - .map(|executed_block| executed_block.block.clone()) - } - - /// Insert executed block into the state. - fn insert_executed(&mut self, executed: ExecutedBlock) { - let hash = executed.block.hash(); - let parent_hash = executed.block.parent_hash; - let block_number = executed.block.number; - - if self.blocks_by_hash.contains_key(&hash) { - return; - } - - self.blocks_by_hash.insert(hash, executed.clone()); - - self.blocks_by_number.entry(block_number).or_default().push(executed); - - self.parent_to_child.entry(parent_hash).or_default().insert(hash); - - if let Some(existing_blocks) = self.blocks_by_number.get(&block_number) { - if existing_blocks.len() > 1 { - self.parent_to_child.entry(parent_hash).or_default().insert(hash); - } - } - - for children in self.parent_to_child.values_mut() { - children.retain(|child| self.blocks_by_hash.contains_key(child)); - } - } - - /// Remove blocks before specified block number. - pub(crate) fn remove_before(&mut self, block_number: BlockNumber) { - let mut numbers_to_remove = Vec::new(); - for (&number, _) in self.blocks_by_number.range(..block_number) { - numbers_to_remove.push(number); - } - - for number in numbers_to_remove { - if let Some(blocks) = self.blocks_by_number.remove(&number) { - for block in blocks { - let block_hash = block.block.hash(); - self.blocks_by_hash.remove(&block_hash); - - if let Some(parent_children) = - self.parent_to_child.get_mut(&block.block.parent_hash) - { - parent_children.remove(&block_hash); - if parent_children.is_empty() { - self.parent_to_child.remove(&block.block.parent_hash); - } - } - - self.parent_to_child.remove(&block_hash); - } - } - } - } - - /// Returns the maximum block number stored. - pub(crate) fn max_block_number(&self) -> BlockNumber { - *self.blocks_by_number.last_key_value().unwrap_or((&BlockNumber::default(), &vec![])).0 - } - - /// Returns the block number of the pending block: `head + 1` - const fn pending_block_number(&self) -> BlockNumber { - self.current_canonical_head.number + 1 - } - - /// Updates the canonical head to the given block. - fn set_canonical_head(&mut self, new_head: BlockNumHash) { - self.current_canonical_head = new_head; - } - - /// Returns the tracked canonical head. - const fn canonical_head(&self) -> &BlockNumHash { - &self.current_canonical_head - } - - /// Returns the block hash of the canonical head. - const fn canonical_block_hash(&self) -> B256 { - self.canonical_head().hash - } - - /// Returns the new chain for the given head. - /// - /// This also handles reorgs. - fn on_new_head(&self, new_head: B256) -> Option { - let mut new_chain = Vec::new(); - let mut current_hash = new_head; - let mut fork_point = None; - - // walk back the chain until we reach the canonical block - while current_hash != self.canonical_block_hash() { - let current_block = self.blocks_by_hash.get(¤t_hash)?; - new_chain.push(current_block.clone()); - - // check if this block's parent has multiple children - if let Some(children) = self.parent_to_child.get(¤t_block.block.parent_hash) { - if children.len() > 1 || - self.canonical_block_hash() == current_block.block.parent_hash - { - // we've found a fork point - fork_point = Some(current_block.block.parent_hash); - break; - } - } - - current_hash = current_block.block.parent_hash; - } - - new_chain.reverse(); - - // if we found a fork point, collect the reorged blocks - let reorged = if let Some(fork_hash) = fork_point { - let mut reorged = Vec::new(); - let mut current_hash = self.current_canonical_head.hash; - // walk back the chain up to the fork hash - while current_hash != fork_hash { - if let Some(block) = self.blocks_by_hash.get(¤t_hash) { - reorged.push(block.clone()); - current_hash = block.block.parent_hash; - } else { - // current hash not found in memory - warn!(target: "consensus::engine", invalid_hash=?current_hash, "Block not found in TreeState while walking back fork"); - return None; - } - } - reorged.reverse(); - reorged - } else { - Vec::new() - }; - - if reorged.is_empty() { - Some(NewCanonicalChain::Commit { new: new_chain }) - } else { - Some(NewCanonicalChain::Reorg { new: new_chain, old: reorged }) - } - } -} - -/// Tracks the state of the engine api internals. -/// -/// This type is not shareable. -#[derive(Debug)] -pub struct EngineApiTreeState { - /// Tracks the state of the blockchain tree. - tree_state: TreeState, - /// Tracks the forkchoice state updates received by the CL. - forkchoice_state_tracker: ForkchoiceStateTracker, - /// Buffer of detached blocks. - buffer: BlockBuffer, - /// Tracks the header of invalid payloads that were rejected by the engine because they're - /// invalid. - invalid_headers: InvalidHeaderCache, -} - -impl EngineApiTreeState { - fn new( - block_buffer_limit: u32, - max_invalid_header_cache_length: u32, - canonical_block: BlockNumHash, - ) -> Self { - Self { - invalid_headers: InvalidHeaderCache::new(max_invalid_header_cache_length), - buffer: BlockBuffer::new(block_buffer_limit), - tree_state: TreeState::new(canonical_block), - forkchoice_state_tracker: ForkchoiceStateTracker::default(), - } - } -} - -/// The type responsible for processing engine API requests. -pub trait EngineApiTreeHandler { - /// The engine type that this handler is for. - type Engine: EngineTypes; - - /// Invoked when previously requested blocks were downloaded. - fn on_downloaded(&mut self, blocks: Vec) -> Option; - - /// When the Consensus layer receives a new block via the consensus gossip protocol, - /// the transactions in the block are sent to the execution layer in the form of a - /// [`ExecutionPayload`]. The Execution layer executes the transactions and validates the - /// state in the block header, then passes validation data back to Consensus layer, that - /// adds the block to the head of its own blockchain and attests to it. The block is then - /// broadcast over the consensus p2p network in the form of a "Beacon block". - /// - /// These responses should adhere to the [Engine API Spec for - /// `engine_newPayload`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#specification). - /// - /// This returns a [`PayloadStatus`] that represents the outcome of a processed new payload and - /// returns an error if an internal error occurred. - fn on_new_payload( - &mut self, - payload: ExecutionPayload, - cancun_fields: Option, - ) -> ProviderResult>; - - /// Invoked when we receive a new forkchoice update message. Calls into the blockchain tree - /// to resolve chain forks and ensure that the Execution Layer is working with the latest valid - /// chain. - /// - /// These responses should adhere to the [Engine API Spec for - /// `engine_forkchoiceUpdated`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#specification-1). - /// - /// Returns an error if an internal error occurred like a database error. - fn on_forkchoice_updated( - &mut self, - state: ForkchoiceState, - attrs: Option<::PayloadAttributes>, - ) -> ProviderResult>; -} - -/// The outcome of a tree operation. -#[derive(Debug)] -pub struct TreeOutcome { - /// The outcome of the operation. - pub outcome: T, - /// An optional event to tell the caller to do something. - pub event: Option, -} - -impl TreeOutcome { - /// Create new tree outcome. - pub const fn new(outcome: T) -> Self { - Self { outcome, event: None } - } - - /// Set event on the outcome. - pub fn with_event(mut self, event: TreeEvent) -> Self { - self.event = Some(event); - self - } -} - -/// Events that can be emitted by the [`EngineApiTreeHandler`]. -#[derive(Debug)] -pub enum TreeEvent { - /// Tree action is needed. - TreeAction(TreeAction), - /// Backfill action is needed. - BackfillAction(BackfillAction), - /// Block download is needed. - Download(DownloadRequest), -} - -impl TreeEvent { - /// Returns true if the event is a backfill action. - const fn is_backfill_action(&self) -> bool { - matches!(self, Self::BackfillAction(_)) - } -} - -/// The actions that can be performed on the tree. -#[derive(Debug)] -pub enum TreeAction { - /// Make target canonical. - MakeCanonical(B256), -} - -/// The engine API tree handler implementation. -/// -/// This type is responsible for processing engine API requests, maintaining the canonical state and -/// emitting events. -#[derive(Debug)] -pub struct EngineApiTreeHandlerImpl { - provider: P, - executor_provider: E, - consensus: Arc, - payload_validator: ExecutionPayloadValidator, - /// Keeps track of internals such as executed and buffered blocks. - state: EngineApiTreeState, - /// Incoming engine API requests. - incoming: Receiver>>, - /// Outgoing events that are emitted to the handler. - outgoing: UnboundedSender, - /// Channels to the persistence layer. - persistence: PersistenceHandle, - /// Tracks the state changes of the persistence task. - persistence_state: PersistenceState, - /// Flag indicating the state of the node's backfill synchronization process. - backfill_sync_state: BackfillSyncState, - /// Keeps track of the state of the canonical chain that isn't persisted yet. - /// This is intended to be accessed from external sources, such as rpc. - canonical_in_memory_state: CanonicalInMemoryState, - /// Handle to the payload builder that will receive payload attributes for valid forkchoice - /// updates - payload_builder: PayloadBuilderHandle, -} - -impl EngineApiTreeHandlerImpl -where - P: BlockReader + StateProviderFactory + Clone + 'static, - E: BlockExecutorProvider, - T: EngineTypes, -{ - #[allow(clippy::too_many_arguments)] - pub fn new( - provider: P, - executor_provider: E, - consensus: Arc, - payload_validator: ExecutionPayloadValidator, - incoming: Receiver>>, - outgoing: UnboundedSender, - state: EngineApiTreeState, - canonical_in_memory_state: CanonicalInMemoryState, - persistence: PersistenceHandle, - payload_builder: PayloadBuilderHandle, - ) -> Self { - Self { - provider, - executor_provider, - consensus, - payload_validator, - incoming, - outgoing, - persistence, - persistence_state: PersistenceState::default(), - backfill_sync_state: BackfillSyncState::Idle, - state, - canonical_in_memory_state, - payload_builder, - } - } - - /// Creates a new `EngineApiTreeHandlerImpl` instance and spawns it in its - /// own thread. Returns the receiver end of a `EngineApiEvent` unbounded - /// channel to receive events from the engine. - #[allow(clippy::too_many_arguments)] - pub fn spawn_new( - provider: P, - executor_provider: E, - consensus: Arc, - payload_validator: ExecutionPayloadValidator, - incoming: Receiver>>, - persistence: PersistenceHandle, - payload_builder: PayloadBuilderHandle, - canonical_in_memory_state: CanonicalInMemoryState, - ) -> UnboundedReceiver { - let best_block_number = provider.best_block_number().unwrap_or(0); - let header = provider.sealed_header(best_block_number).ok().flatten().unwrap_or_default(); - - let (tx, outgoing) = tokio::sync::mpsc::unbounded_channel(); - let state = EngineApiTreeState::new( - DEFAULT_BLOCK_BUFFER_LIMIT, - DEFAULT_MAX_INVALID_HEADER_CACHE_LENGTH, - header.num_hash(), - ); - - let task = Self::new( - provider, - executor_provider, - consensus, - payload_validator, - incoming, - tx, - state, - canonical_in_memory_state, - persistence, - payload_builder, - ); - std::thread::Builder::new().name("Tree Task".to_string()).spawn(|| task.run()).unwrap(); - outgoing - } - - /// Run the engine API handler. - /// - /// This will block the current thread and process incoming messages. - pub fn run(mut self) { - while let Ok(msg) = self.incoming.recv() { - self.run_once(msg); - } - } - - /// Run the engine API handler once. - fn run_once(&mut self, msg: FromEngine>) { - self.on_engine_message(msg); - - if self.should_persist() && !self.persistence_state.in_progress() { - let blocks_to_persist = self.get_blocks_to_persist(); - let (tx, rx) = oneshot::channel(); - self.persistence.save_blocks(blocks_to_persist, tx); - self.persistence_state.start(rx); - } - - if self.persistence_state.in_progress() { - let rx = self - .persistence_state - .rx - .as_mut() - .expect("if a persistence task is in progress Receiver must be Some"); - // Check if persistence has completed - if let Ok(last_persisted_block_hash) = rx.try_recv() { - if let Some(block) = self.state.tree_state.block_by_hash(last_persisted_block_hash) - { - self.persistence_state.finish(last_persisted_block_hash, block.number); - self.on_new_persisted_block(); - } else { - error!("could not find persisted block with hash {last_persisted_block_hash} in memory"); - } - } - } - } - - /// Handles a message from the engine. - fn on_engine_message(&mut self, msg: FromEngine>) { - match msg { - FromEngine::Event(event) => match event { - FromOrchestrator::BackfillSyncStarted => { - debug!(target: "consensus::engine", "received backfill sync started event"); - self.backfill_sync_state = BackfillSyncState::Active; - } - FromOrchestrator::BackfillSyncFinished(ctrl) => { - self.on_backfill_sync_finished(ctrl); - } - }, - FromEngine::Request(request) => match request { - BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx } => { - let mut output = self.on_forkchoice_updated(state, payload_attrs); - - if let Ok(res) = &mut output { - // emit an event about the handled FCU - self.emit_event(BeaconConsensusEngineEvent::ForkchoiceUpdated( - state, - res.outcome.forkchoice_status(), - )); - - // handle the event if any - self.on_maybe_tree_event(res.event.take()); - } - - if let Err(err) = tx.send(output.map(|o| o.outcome).map_err(Into::into)) { - error!("Failed to send event: {err:?}"); - } - } - BeaconEngineMessage::NewPayload { payload, cancun_fields, tx } => { - let output = self.on_new_payload(payload, cancun_fields); - if let Err(err) = tx.send(output.map(|o| o.outcome).map_err(|e| { - reth_beacon_consensus::BeaconOnNewPayloadError::Internal(Box::new(e)) - })) { - error!("Failed to send event: {err:?}"); - } - } - BeaconEngineMessage::TransitionConfigurationExchanged => { - // triggering this hook will record that we received a request from the CL - self.canonical_in_memory_state.on_transition_configuration_exchanged(); - } - }, - FromEngine::DownloadedBlocks(blocks) => { - if let Some(event) = self.on_downloaded(blocks) { - self.on_tree_event(event); - } - } - } - } - - /// Invoked if the backfill sync has finished to target. - /// - /// Checks the tracked finalized block against the block on disk and restarts backfill if - /// needed. - /// - /// This will also try to connect the buffered blocks. - fn on_backfill_sync_finished(&mut self, ctrl: ControlFlow) { - debug!(target: "consensus::engine", "received backfill sync finished event"); - self.backfill_sync_state = BackfillSyncState::Idle; - - // Pipeline unwound, memorize the invalid block and wait for CL for next sync target. - if let ControlFlow::Unwind { bad_block, .. } = ctrl { - warn!(target: "consensus::engine", invalid_hash=?bad_block.hash(), invalid_number=?bad_block.number, "Bad block detected in unwind"); - // update the `invalid_headers` cache with the new invalid header - self.state.invalid_headers.insert(*bad_block); - return - } - - let Some(sync_target_state) = self.state.forkchoice_state_tracker.sync_target_state() - else { - return - }; - - if sync_target_state.finalized_block_hash.is_zero() { - return - } - - // get the block number of the finalized block, if we have it - let newest_finalized = self - .state - .buffer - .block(&sync_target_state.finalized_block_hash) - .map(|block| block.number); - - // TODO(mattsse): state housekeeping, this needs to update the tracked canonical state and - // attempt to make the current target canonical if we have all the blocks buffered - - // The block number that the backfill finished at - if the progress or newest - // finalized is None then we can't check the distance anyways. - // - // If both are Some, we perform another distance check and return the desired - // backfill target - let Some(backfill_target) = - ctrl.block_number().zip(newest_finalized).and_then(|(progress, finalized_number)| { - // Determines whether or not we should run backfill again, in case - // the new gap is still large enough and requires running backfill again - self.backfill_sync_target(progress, finalized_number, None) - }) - else { - return - }; - - // request another backfill run - self.emit_event(EngineApiEvent::BackfillAction(BackfillAction::Start( - backfill_target.into(), - ))); - } - - /// Attempts to make the given target canonical. - /// - /// This will update the tracked canonical in memory state and do the necessary housekeeping. - const fn make_canonical(&self, target: B256) { - // TODO: implement state updates and shift canonical state - } - - /// Convenience function to handle an optional tree event. - fn on_maybe_tree_event(&self, event: Option) { - if let Some(event) = event { - self.on_tree_event(event); - } - } - - /// Handles a tree event. - fn on_tree_event(&self, event: TreeEvent) { - match event { - TreeEvent::TreeAction(action) => match action { - TreeAction::MakeCanonical(target) => { - self.make_canonical(target); - } - }, - TreeEvent::BackfillAction(action) => { - self.emit_event(EngineApiEvent::BackfillAction(action)); - } - TreeEvent::Download(action) => { - self.emit_event(EngineApiEvent::Download(action)); - } - } - } - - /// Emits an outgoing event to the engine. - fn emit_event(&self, event: impl Into) { - let _ = self - .outgoing - .send(event.into()) - .inspect_err(|err| error!("Failed to send internal event: {err:?}")); - } - - /// Returns true if the canonical chain length minus the last persisted - /// block is greater than or equal to the persistence threshold. - fn should_persist(&self) -> bool { - self.state.tree_state.max_block_number() - - self.persistence_state.last_persisted_block_number >= - PERSISTENCE_THRESHOLD - } - - fn get_blocks_to_persist(&self) -> Vec { - let start = self.persistence_state.last_persisted_block_number; - let end = start + PERSISTENCE_THRESHOLD; - - // NOTE: this is an exclusive range, to try to include exactly PERSISTENCE_THRESHOLD blocks - self.state - .tree_state - .blocks_by_number - .range(start..end) - .flat_map(|(_, blocks)| blocks.iter().cloned()) - .collect() - } - - /// This clears the blocks from the in-memory tree state that have been persisted to the - /// database. - /// - /// This also updates the canonical in-memory state to reflect the newest persisted block - /// height. - fn on_new_persisted_block(&mut self) { - self.remove_persisted_blocks_from_tree_state(); - self.canonical_in_memory_state - .remove_persisted_blocks(self.persistence_state.last_persisted_block_number); - } - - /// Clears persisted blocks from the in-memory tree state. - fn remove_persisted_blocks_from_tree_state(&mut self) { - let keys_to_remove: Vec = self - .state - .tree_state - .blocks_by_number - .range(..=self.persistence_state.last_persisted_block_number) - .map(|(&k, _)| k) - .collect(); - - for key in keys_to_remove { - if let Some(blocks) = self.state.tree_state.blocks_by_number.remove(&key) { - // Remove corresponding blocks from blocks_by_hash - for block in blocks { - self.state.tree_state.blocks_by_hash.remove(&block.block().hash()); - } - } - } - } - - /// Return block from database or in-memory state by hash. - fn block_by_hash(&self, hash: B256) -> ProviderResult> { - // check database first - let mut block = self.provider.block_by_hash(hash)?; - if block.is_none() { - // Note: it's fine to return the unsealed block because the caller already has - // the hash - block = self - .state - .tree_state - .block_by_hash(hash) - // TODO: clone for compatibility. should we return an Arc here? - .map(|block| block.as_ref().clone().unseal()); - } - Ok(block) - } - - /// Return state provider with reference to in-memory blocks that overlay database state. - /// - /// This merges the state of all blocks that are part of the chain that the requested block is - /// the head of. This includes all blocks that connect back to the canonical block on disk. - // TODO: return error if the chain has gaps - fn state_provider(&self, hash: B256) -> ProviderResult { - let mut in_memory = Vec::new(); - let mut parent_hash = hash; - while let Some(executed) = self.state.tree_state.blocks_by_hash.get(&parent_hash) { - parent_hash = executed.block.parent_hash; - in_memory.insert(0, executed.clone()); - } - - let historical = self.provider.state_by_block_hash(parent_hash)?; - Ok(MemoryOverlayStateProvider::new(in_memory, historical)) - } - - /// Return the parent hash of the lowest buffered ancestor for the requested block, if there - /// are any buffered ancestors. If there are no buffered ancestors, and the block itself does - /// not exist in the buffer, this returns the hash that is passed in. - /// - /// Returns the parent hash of the block itself if the block is buffered and has no other - /// buffered ancestors. - fn lowest_buffered_ancestor_or(&self, hash: B256) -> B256 { - self.state - .buffer - .lowest_ancestor(&hash) - .map(|block| block.parent_hash) - .unwrap_or_else(|| hash) - } - - /// If validation fails, the response MUST contain the latest valid hash: - /// - /// - The block hash of the ancestor of the invalid payload satisfying the following two - /// conditions: - /// - It is fully validated and deemed VALID - /// - Any other ancestor of the invalid payload with a higher blockNumber is INVALID - /// - 0x0000000000000000000000000000000000000000000000000000000000000000 if the above - /// conditions are satisfied by a `PoW` block. - /// - null if client software cannot determine the ancestor of the invalid payload satisfying - /// the above conditions. - fn latest_valid_hash_for_invalid_payload( - &mut self, - parent_hash: B256, - ) -> ProviderResult> { - // Check if parent exists in side chain or in canonical chain. - if self.block_by_hash(parent_hash)?.is_some() { - return Ok(Some(parent_hash)) - } - - // iterate over ancestors in the invalid cache - // until we encounter the first valid ancestor - let mut current_hash = parent_hash; - let mut current_header = self.state.invalid_headers.get(¤t_hash); - while let Some(header) = current_header { - current_hash = header.parent_hash; - current_header = self.state.invalid_headers.get(¤t_hash); - - // If current_header is None, then the current_hash does not have an invalid - // ancestor in the cache, check its presence in blockchain tree - if current_header.is_none() && self.block_by_hash(current_hash)?.is_some() { - return Ok(Some(current_hash)) - } - } - Ok(None) - } - - /// Prepares the invalid payload response for the given hash, checking the - /// database for the parent hash and populating the payload status with the latest valid hash - /// according to the engine api spec. - fn prepare_invalid_response(&mut self, mut parent_hash: B256) -> ProviderResult { - // Edge case: the `latestValid` field is the zero hash if the parent block is the terminal - // PoW block, which we need to identify by looking at the parent's block difficulty - if let Some(parent) = self.block_by_hash(parent_hash)? { - if !parent.is_zero_difficulty() { - parent_hash = B256::ZERO; - } - } - - let valid_parent_hash = self.latest_valid_hash_for_invalid_payload(parent_hash)?; - Ok(PayloadStatus::from_status(PayloadStatusEnum::Invalid { - validation_error: PayloadValidationError::LinksToRejectedPayload.to_string(), - }) - .with_latest_valid_hash(valid_parent_hash.unwrap_or_default())) - } - - /// Returns true if the given hash is the last received sync target block. - /// - /// See [`ForkchoiceStateTracker::sync_target_state`] - fn is_sync_target_head(&self, block_hash: B256) -> bool { - if let Some(target) = self.state.forkchoice_state_tracker.sync_target_state() { - return target.head_block_hash == block_hash - } - false - } - - /// Checks if the given `check` hash points to an invalid header, inserting the given `head` - /// block into the invalid header cache if the `check` hash has a known invalid ancestor. - /// - /// Returns a payload status response according to the engine API spec if the block is known to - /// be invalid. - fn check_invalid_ancestor_with_head( - &mut self, - check: B256, - head: B256, - ) -> ProviderResult> { - // check if the check hash was previously marked as invalid - let Some(header) = self.state.invalid_headers.get(&check) else { return Ok(None) }; - - // populate the latest valid hash field - let status = self.prepare_invalid_response(header.parent_hash)?; - - // insert the head block into the invalid header cache - self.state.invalid_headers.insert_with_invalid_ancestor(head, header); - - Ok(Some(status)) - } - - /// Checks if the given `head` points to an invalid header, which requires a specific response - /// to a forkchoice update. - fn check_invalid_ancestor(&mut self, head: B256) -> ProviderResult> { - // check if the head was previously marked as invalid - let Some(header) = self.state.invalid_headers.get(&head) else { return Ok(None) }; - // populate the latest valid hash field - Ok(Some(self.prepare_invalid_response(header.parent_hash)?)) - } - - /// Validate if block is correct and satisfies all the consensus rules that concern the header - /// and block body itself. - fn validate_block(&self, block: &SealedBlockWithSenders) -> Result<(), ConsensusError> { - if let Err(e) = self.consensus.validate_header_with_total_difficulty(block, U256::MAX) { - error!( - ?block, - "Failed to validate total difficulty for block {}: {e}", - block.header.hash() - ); - return Err(e) - } - - if let Err(e) = self.consensus.validate_header(block) { - error!(?block, "Failed to validate header {}: {e}", block.header.hash()); - return Err(e) - } - - if let Err(e) = self.consensus.validate_block_pre_execution(block) { - error!(?block, "Failed to validate block {}: {e}", block.header.hash()); - return Err(e) - } - - Ok(()) - } - - fn buffer_block_without_senders(&mut self, block: SealedBlock) -> Result<(), InsertBlockError> { - match block.try_seal_with_senders() { - Ok(block) => self.buffer_block(block), - Err(block) => Err(InsertBlockError::sender_recovery_error(block)), - } - } - - fn buffer_block(&mut self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { - if let Err(err) = self.validate_block(&block) { - return Err(InsertBlockError::consensus_error(err, block.block)) - } - self.state.buffer.insert_block(block); - Ok(()) - } - - /// Returns true if the distance from the local tip to the block is greater than the configured - /// threshold. - /// - /// If the `local_tip` is greater than the `block`, then this will return false. - #[inline] - const fn exceeds_backfill_run_threshold(&self, local_tip: u64, block: u64) -> bool { - block > local_tip && block - local_tip > MIN_BLOCKS_FOR_PIPELINE_RUN - } - - /// Returns how far the local tip is from the given block. If the local tip is at the same - /// height or its block number is greater than the given block, this returns None. - #[inline] - const fn distance_from_local_tip(&self, local_tip: u64, block: u64) -> Option { - if block > local_tip { - Some(block - local_tip) - } else { - None - } - } - - /// Returns the target hash to sync to if the distance from the local tip to the block is - /// greater than the threshold and we're not synced to the finalized block yet (if we've seen - /// that block already). - /// - /// If this is invoked after a new block has been downloaded, the downloaded block could be the - /// (missing) finalized block. - fn backfill_sync_target( - &self, - canonical_tip_num: u64, - target_block_number: u64, - downloaded_block: Option, - ) -> Option { - let sync_target_state = self.state.forkchoice_state_tracker.sync_target_state(); - - // check if the distance exceeds the threshold for backfill sync - let mut exceeds_backfill_threshold = - self.exceeds_backfill_run_threshold(canonical_tip_num, target_block_number); - - // check if the downloaded block is the tracked finalized block - if let Some(buffered_finalized) = sync_target_state - .as_ref() - .and_then(|state| self.state.buffer.block(&state.finalized_block_hash)) - { - // if we have buffered the finalized block, we should check how far - // we're off - exceeds_backfill_threshold = - self.exceeds_backfill_run_threshold(canonical_tip_num, buffered_finalized.number); - } - - // If this is invoked after we downloaded a block we can check if this block is the - // finalized block - if let (Some(downloaded_block), Some(ref state)) = (downloaded_block, sync_target_state) { - if downloaded_block.hash == state.finalized_block_hash { - // we downloaded the finalized block and can now check how far we're off - exceeds_backfill_threshold = - self.exceeds_backfill_run_threshold(canonical_tip_num, downloaded_block.number); - } - } - - // if the number of missing blocks is greater than the max, trigger backfill - if exceeds_backfill_threshold { - if let Some(state) = sync_target_state { - // if we have already canonicalized the finalized block, we should skip backfill - match self.provider.header_by_hash_or_number(state.finalized_block_hash.into()) { - Err(err) => { - warn!(target: "consensus::engine", %err, "Failed to get finalized block header"); - } - Ok(None) => { - // ensure the finalized block is known (not the zero hash) - if !state.finalized_block_hash.is_zero() { - // we don't have the block yet and the distance exceeds the allowed - // threshold - return Some(state.finalized_block_hash) - } - - // OPTIMISTIC SYNCING - // - // It can happen when the node is doing an - // optimistic sync, where the CL has no knowledge of the finalized hash, - // but is expecting the EL to sync as high - // as possible before finalizing. - // - // This usually doesn't happen on ETH mainnet since CLs use the more - // secure checkpoint syncing. - // - // However, optimism chains will do this. The risk of a reorg is however - // low. - debug!(target: "consensus::engine", hash=?state.head_block_hash, "Setting head hash as an optimistic backfill target."); - return Some(state.head_block_hash) - } - Ok(Some(_)) => { - // we're fully synced to the finalized block - } - } - } - } - - None - } - - /// This handles downloaded blocks that are shown to be disconnected from the canonical chain. - /// - /// This mainly compares the missing parent of the downloaded block with the current canonical - /// tip, and decides whether or not backfill sync should be triggered. - fn on_disconnected_downloaded_block( - &self, - downloaded_block: BlockNumHash, - missing_parent: BlockNumHash, - head: BlockNumHash, - ) -> Option { - // compare the missing parent with the canonical tip - if let Some(target) = - self.backfill_sync_target(head.number, missing_parent.number, Some(downloaded_block)) - { - return Some(TreeEvent::BackfillAction(BackfillAction::Start(target.into()))); - } - - // continue downloading the missing parent - // - // this happens if either: - // * the missing parent block num < canonical tip num - // * this case represents a missing block on a fork that is shorter than the canonical - // chain - // * the missing parent block num >= canonical tip num, but the number of missing blocks is - // less than the backfill threshold - // * this case represents a potentially long range of blocks to download and execute - let request = if let Some(distance) = - self.distance_from_local_tip(head.number, missing_parent.number) - { - DownloadRequest::BlockRange(missing_parent.hash, distance) - } else { - // This happens when the missing parent is on an outdated - // sidechain and we can only download the missing block itself - DownloadRequest::single_block(missing_parent.hash) - }; - - Some(TreeEvent::Download(request)) - } - - /// Invoked with a block downloaded from the network - /// - /// Returns an event with the appropriate action to take, such as: - /// - download more missing blocks - /// - try to canonicalize the target if the `block` is the tracked target (head) block. - fn on_downloaded_block(&mut self, block: SealedBlockWithSenders) -> Option { - let block_num_hash = block.num_hash(); - let lowest_buffered_ancestor = self.lowest_buffered_ancestor_or(block_num_hash.hash); - if self - .check_invalid_ancestor_with_head(lowest_buffered_ancestor, block_num_hash.hash) - .ok()? - .is_some() - { - return None - } - - if !self.backfill_sync_state.is_idle() { - return None - } - - // try to append the block - match self.insert_block(block) { - Ok(InsertPayloadOk::Inserted(BlockStatus::Valid(_))) => { - if self.is_sync_target_head(block_num_hash.hash) { - return Some(TreeEvent::TreeAction(TreeAction::MakeCanonical( - block_num_hash.hash, - ))) - } - } - Ok(InsertPayloadOk::Inserted(BlockStatus::Disconnected { head, missing_ancestor })) => { - // block is not connected to the canonical head, we need to download - // its missing branch first - return self.on_disconnected_downloaded_block(block_num_hash, missing_ancestor, head) - } - _ => {} - } - None - } - - fn insert_block_without_senders( - &mut self, - block: SealedBlock, - ) -> Result { - match block.try_seal_with_senders() { - Ok(block) => self.insert_block(block), - Err(block) => Err(InsertBlockError::sender_recovery_error(block)), - } - } - - fn insert_block( - &mut self, - block: SealedBlockWithSenders, - ) -> Result { - self.insert_block_inner(block.clone()) - .map_err(|kind| InsertBlockError::new(block.block, kind)) - } - - fn insert_block_inner( - &mut self, - block: SealedBlockWithSenders, - ) -> Result { - if self.block_by_hash(block.hash())?.is_some() { - let attachment = BlockAttachment::Canonical; // TODO: remove or revise attachment - return Ok(InsertPayloadOk::AlreadySeen(BlockStatus::Valid(attachment))) - } - - // validate block consensus rules - self.validate_block(&block)?; - - let state_provider = self.state_provider(block.parent_hash).unwrap(); - let executor = self.executor_provider.executor(StateProviderDatabase::new(&state_provider)); - - let block_number = block.number; - let block_hash = block.hash(); - let block = block.unseal(); - let output = executor.execute((&block, U256::MAX).into()).unwrap(); - self.consensus.validate_block_post_execution( - &block, - PostExecutionInput::new(&output.receipts, &output.requests), - )?; - - // TODO: change StateRootProvider API to accept hashed post state - let hashed_state = HashedPostState::from_bundle_state(&output.state.state); - - let (state_root, trie_output) = state_provider.state_root_with_updates(&output.state)?; - if state_root != block.state_root { - return Err(ConsensusError::BodyStateRootDiff( - GotExpected { got: state_root, expected: block.state_root }.into(), - ) - .into()) - } - - let executed = ExecutedBlock { - block: Arc::new(block.block.seal(block_hash)), - senders: Arc::new(block.senders), - execution_output: Arc::new(ExecutionOutcome::new( - output.state, - Receipts::from(output.receipts), - block_number, - vec![Requests::from(output.requests)], - )), - hashed_state: Arc::new(hashed_state), - trie: Arc::new(trie_output), - }; - self.state.tree_state.insert_executed(executed); - - let attachment = BlockAttachment::Canonical; // TODO: remove or revise attachment - Ok(InsertPayloadOk::Inserted(BlockStatus::Valid(attachment))) - } - - /// Attempts to find the header for the given block hash if it is canonical. - pub fn find_canonical_header(&self, hash: B256) -> Result, ProviderError> { - let mut canonical = self.canonical_in_memory_state.header_by_hash(hash); - - if canonical.is_none() { - canonical = self.provider.header(&hash)?.map(|header| header.seal(hash)); - } - - Ok(canonical) - } - - /// Updates the tracked finalized block if we have it. - fn update_finalized_block( - &self, - finalized_block_hash: B256, - ) -> Result<(), OnForkChoiceUpdated> { - if finalized_block_hash.is_zero() { - return Ok(()) - } - - match self.find_canonical_header(finalized_block_hash) { - Ok(None) => { - debug!(target: "engine", "Finalized block not found in canonical chain"); - // if the finalized block is not known, we can't update the finalized block - return Err(OnForkChoiceUpdated::invalid_state()) - } - Ok(Some(finalized)) => { - self.canonical_in_memory_state.set_finalized(finalized); - } - Err(err) => { - error!(target: "engine", %err, "Failed to fetch finalized block header"); - } - } - - Ok(()) - } - - /// Updates the tracked safe block if we have it - fn update_safe_block(&self, safe_block_hash: B256) -> Result<(), OnForkChoiceUpdated> { - if safe_block_hash.is_zero() { - return Ok(()) - } - - match self.find_canonical_header(safe_block_hash) { - Ok(None) => { - debug!(target: "engine", "Safe block not found in canonical chain"); - // if the safe block is not known, we can't update the safe block - return Err(OnForkChoiceUpdated::invalid_state()) - } - Ok(Some(finalized)) => { - self.canonical_in_memory_state.set_safe(finalized); - } - Err(err) => { - error!(target: "engine", %err, "Failed to fetch safe block header"); - } - } - - Ok(()) - } - - /// Ensures that the given forkchoice state is consistent, assuming the head block has been - /// made canonical. - /// - /// If the forkchoice state is consistent, this will return Ok(()). Otherwise, this will - /// return an instance of [`OnForkChoiceUpdated`] that is INVALID. - /// - /// This also updates the safe and finalized blocks in the [`CanonicalInMemoryState`], if they - /// are consistent with the head block. - fn ensure_consistent_forkchoice_state( - &self, - state: ForkchoiceState, - ) -> Result<(), OnForkChoiceUpdated> { - // Ensure that the finalized block, if not zero, is known and in the canonical chain - // after the head block is canonicalized. - // - // This ensures that the finalized block is consistent with the head block, i.e. the - // finalized block is an ancestor of the head block. - self.update_finalized_block(state.finalized_block_hash)?; - - // Also ensure that the safe block, if not zero, is known and in the canonical chain - // after the head block is canonicalized. - // - // This ensures that the safe block is consistent with the head block, i.e. the safe - // block is an ancestor of the head block. - self.update_safe_block(state.safe_block_hash) - } - - /// Pre-validate forkchoice update and check whether it can be processed. - /// - /// This method returns the update outcome if validation fails or - /// the node is syncing and the update cannot be processed at the moment. - fn pre_validate_forkchoice_update( - &mut self, - state: ForkchoiceState, - ) -> ProviderResult> { - if state.head_block_hash.is_zero() { - return Ok(Some(OnForkChoiceUpdated::invalid_state())) - } - - // check if the new head hash is connected to any ancestor that we previously marked as - // invalid - let lowest_buffered_ancestor_fcu = self.lowest_buffered_ancestor_or(state.head_block_hash); - if let Some(status) = self.check_invalid_ancestor(lowest_buffered_ancestor_fcu)? { - return Ok(Some(OnForkChoiceUpdated::with_invalid(status))) - } - - if !self.backfill_sync_state.is_idle() { - // We can only process new forkchoice updates if the pipeline is idle, since it requires - // exclusive access to the database - trace!(target: "consensus::engine", "Pipeline is syncing, skipping forkchoice update"); - return Ok(Some(OnForkChoiceUpdated::syncing())) - } - - Ok(None) - } - - /// Validates the payload attributes with respect to the header and fork choice state. - /// - /// Note: At this point, the fork choice update is considered to be VALID, however, we can still - /// return an error if the payload attributes are invalid. - fn process_payload_attributes( - &self, - attrs: T::PayloadAttributes, - head: &Header, - state: ForkchoiceState, - ) -> OnForkChoiceUpdated { - // 7. Client software MUST ensure that payloadAttributes.timestamp is greater than timestamp - // of a block referenced by forkchoiceState.headBlockHash. If this condition isn't held - // client software MUST respond with -38003: `Invalid payload attributes` and MUST NOT - // begin a payload build process. In such an event, the forkchoiceState update MUST NOT - // be rolled back. - if attrs.timestamp() <= head.timestamp { - return OnForkChoiceUpdated::invalid_payload_attributes() - } - - // 8. Client software MUST begin a payload build process building on top of - // forkchoiceState.headBlockHash and identified via buildProcessId value if - // payloadAttributes is not null and the forkchoice state has been updated successfully. - // The build process is specified in the Payload building section. - match ::try_new( - state.head_block_hash, - attrs, - ) { - Ok(attributes) => { - // send the payload to the builder and return the receiver for the pending payload - // id, initiating payload job is handled asynchronously - let pending_payload_id = self.payload_builder.send_new_payload(attributes); - - // Client software MUST respond to this method call in the following way: - // { - // payloadStatus: { - // status: VALID, - // latestValidHash: forkchoiceState.headBlockHash, - // validationError: null - // }, - // payloadId: buildProcessId - // } - // - // if the payload is deemed VALID and the build process has begun. - OnForkChoiceUpdated::updated_with_pending_payload_id( - PayloadStatus::new(PayloadStatusEnum::Valid, Some(state.head_block_hash)), - pending_payload_id, - ) - } - Err(_) => OnForkChoiceUpdated::invalid_payload_attributes(), - } - } -} - -impl EngineApiTreeHandler for EngineApiTreeHandlerImpl -where - P: BlockReader + StateProviderFactory + Clone + 'static, - E: BlockExecutorProvider, - T: EngineTypes, -{ - type Engine = T; - - fn on_downloaded(&mut self, blocks: Vec) -> Option { - for block in blocks { - if let Some(event) = self.on_downloaded_block(block) { - let needs_backfill = event.is_backfill_action(); - self.on_tree_event(event); - if needs_backfill { - // can exit early if backfill is needed - break - } - } - } - None - } - - #[instrument(level = "trace", skip_all, fields(block_hash = %payload.block_hash(), block_num = %payload.block_number(),), target = "engine")] - fn on_new_payload( - &mut self, - payload: ExecutionPayload, - cancun_fields: Option, - ) -> ProviderResult> { - trace!(target: "engine", "invoked new payload"); - // Ensures that the given payload does not violate any consensus rules that concern the - // block's layout, like: - // - missing or invalid base fee - // - invalid extra data - // - invalid transactions - // - incorrect hash - // - the versioned hashes passed with the payload do not exactly match transaction - // versioned hashes - // - the block does not contain blob transactions if it is pre-cancun - // - // This validates the following engine API rule: - // - // 3. Given the expected array of blob versioned hashes client software **MUST** run its - // validation by taking the following steps: - // - // 1. Obtain the actual array by concatenating blob versioned hashes lists - // (`tx.blob_versioned_hashes`) of each [blob - // transaction](https://eips.ethereum.org/EIPS/eip-4844#new-transaction-type) included - // in the payload, respecting the order of inclusion. If the payload has no blob - // transactions the expected array **MUST** be `[]`. - // - // 2. Return `{status: INVALID, latestValidHash: null, validationError: errorMessage | - // null}` if the expected and the actual arrays don't match. - // - // This validation **MUST** be instantly run in all cases even during active sync process. - let parent_hash = payload.parent_hash(); - let block = match self - .payload_validator - .ensure_well_formed_payload(payload, cancun_fields.into()) - { - Ok(block) => block, - Err(error) => { - error!(target: "engine::tree", %error, "Invalid payload"); - // we need to convert the error to a payload status (response to the CL) - - let latest_valid_hash = - if error.is_block_hash_mismatch() || error.is_invalid_versioned_hashes() { - // Engine-API rules: - // > `latestValidHash: null` if the blockHash validation has failed () - // > `latestValidHash: null` if the expected and the actual arrays don't match () - None - } else { - self.latest_valid_hash_for_invalid_payload(parent_hash)? - }; - - let status = PayloadStatusEnum::from(error); - return Ok(TreeOutcome::new(PayloadStatus::new(status, latest_valid_hash))) - } - }; - - let block_hash = block.hash(); - let mut lowest_buffered_ancestor = self.lowest_buffered_ancestor_or(block_hash); - if lowest_buffered_ancestor == block_hash { - lowest_buffered_ancestor = block.parent_hash; - } - - // now check the block itself - if let Some(status) = - self.check_invalid_ancestor_with_head(lowest_buffered_ancestor, block_hash)? - { - return Ok(TreeOutcome::new(status)) - } - - let status = if !self.backfill_sync_state.is_idle() { - self.buffer_block_without_senders(block).unwrap(); - PayloadStatus::from_status(PayloadStatusEnum::Syncing) - } else { - let mut latest_valid_hash = None; - let status = match self.insert_block_without_senders(block).unwrap() { - InsertPayloadOk::Inserted(BlockStatus::Valid(_)) | - InsertPayloadOk::AlreadySeen(BlockStatus::Valid(_)) => { - latest_valid_hash = Some(block_hash); - PayloadStatusEnum::Valid - } - InsertPayloadOk::Inserted(BlockStatus::Disconnected { .. }) | - InsertPayloadOk::AlreadySeen(BlockStatus::Disconnected { .. }) => { - // not known to be invalid, but we don't know anything else - PayloadStatusEnum::Syncing - } - }; - PayloadStatus::new(status, latest_valid_hash) - }; - - let mut outcome = TreeOutcome::new(status); - if outcome.outcome.is_valid() && self.is_sync_target_head(block_hash) { - // if the block is valid and it is the sync target head, make it canonical - outcome = - outcome.with_event(TreeEvent::TreeAction(TreeAction::MakeCanonical(block_hash))); - } - - Ok(outcome) - } - - #[instrument(level = "trace", skip_all, fields(head = % state.head_block_hash, safe = % state.safe_block_hash,finalized = % state.finalized_block_hash), target = "engine")] - fn on_forkchoice_updated( - &mut self, - state: ForkchoiceState, - attrs: Option<::PayloadAttributes>, - ) -> ProviderResult> { - trace!(target: "engine", ?attrs, "invoked forkchoice update"); - self.canonical_in_memory_state.on_forkchoice_update_received(); - - if let Some(on_updated) = self.pre_validate_forkchoice_update(state)? { - self.state.forkchoice_state_tracker.set_latest(state, on_updated.forkchoice_status()); - return Ok(TreeOutcome::new(on_updated)) - } - - let valid_outcome = |head| { - TreeOutcome::new(OnForkChoiceUpdated::valid(PayloadStatus::new( - PayloadStatusEnum::Valid, - Some(head), - ))) - }; - - // Process the forkchoice update by trying to make the head block canonical - // - // We can only process this forkchoice update if: - // - we have the `head` block - // - the head block is part of a chain that is connected to the canonical chain. This - // includes reorgs. - // - // Performing a FCU involves: - // - marking the FCU's head block as canonical - // - updating in memory state to reflect the new canonical chain - // - updating canonical state trackers - // - emitting a canonicalization event for the new chain (including reorg) - // - if we have payload attributes, delegate them to the payload service - - // 1. ensure we have a new head block - if self.state.tree_state.canonical_block_hash() == state.head_block_hash { - trace!(target: "engine", "fcu head hash is already canonical"); - // the head block is already canonical - return Ok(valid_outcome(state.head_block_hash)) - } - - // 2. ensure we can apply a new chain update for the head block - if let Some(chain_update) = self.state.tree_state.on_new_head(state.head_block_hash) { - trace!(target: "engine", new_blocks = %chain_update.new_block_count(), reorged_blocks = %chain_update.reorged_block_count() ,"applying new chain update"); - // update the tracked canonical head - self.state.tree_state.set_canonical_head(chain_update.tip().num_hash()); - - let tip = chain_update.tip().header.clone(); - let notification = chain_update.to_chain_notification(); - - // update the tracked in-memory state with the new chain - self.canonical_in_memory_state.update_chain(chain_update); - self.canonical_in_memory_state.set_canonical_head(tip.clone()); - - // sends an event to all active listeners about the new canonical chain - self.canonical_in_memory_state.notify_canon_state(notification); - - // update the safe and finalized blocks and ensure their values are valid, but only - // after the head block is made canonical - if let Err(outcome) = self.ensure_consistent_forkchoice_state(state) { - // safe or finalized hashes are invalid - return Ok(TreeOutcome::new(outcome)) - } - - if let Some(attr) = attrs { - let updated = self.process_payload_attributes(attr, &tip, state); - return Ok(TreeOutcome::new(updated)) - } - - return Ok(valid_outcome(state.head_block_hash)) - } - - // 3. we don't have the block to perform the update - let target = self.lowest_buffered_ancestor_or(state.head_block_hash); - - Ok(TreeOutcome::new(OnForkChoiceUpdated::valid(PayloadStatus::from_status( - PayloadStatusEnum::Syncing, - ))) - .with_event(TreeEvent::Download(DownloadRequest::single_block(target)))) - } -} - -/// The state of the persistence task. -#[derive(Default, Debug)] -struct PersistenceState { - /// Hash of the last block persisted. - last_persisted_block_hash: B256, - /// Receiver end of channel where the result of the persistence task will be - /// sent when done. A None value means there's no persistence task in progress. - rx: Option>, - /// The last persisted block number. - last_persisted_block_number: u64, -} - -impl PersistenceState { - /// Determines if there is a persistence task in progress by checking if the - /// receiver is set. - const fn in_progress(&self) -> bool { - self.rx.is_some() - } - - /// Sets state for a started persistence task. - fn start(&mut self, rx: oneshot::Receiver) { - self.rx = Some(rx); - } - - /// Sets state for a finished persistence task. - fn finish(&mut self, last_persisted_block_hash: B256, last_persisted_block_number: u64) { - self.rx = None; - self.last_persisted_block_number = last_persisted_block_number; - self.last_persisted_block_hash = last_persisted_block_hash; - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::persistence::PersistenceAction; - use alloy_rlp::Decodable; - use reth_beacon_consensus::EthBeaconConsensus; - use reth_chain_state::{ - test_utils::{get_executed_block_with_number, get_executed_blocks}, - BlockState, - }; - use reth_chainspec::{ChainSpecBuilder, HOLESKY, MAINNET}; - use reth_ethereum_engine_primitives::EthEngineTypes; - use reth_evm::test_utils::MockExecutorProvider; - use reth_payload_builder::PayloadServiceCommand; - use reth_primitives::Bytes; - use reth_provider::test_utils::MockEthProvider; - use reth_rpc_types_compat::engine::block_to_payload_v1; - use std::{ - str::FromStr, - sync::mpsc::{channel, Sender}, - }; - use tokio::sync::mpsc::unbounded_channel; - - struct TestHarness { - tree: EngineApiTreeHandlerImpl, - to_tree_tx: Sender>>, - blocks: Vec, - action_rx: Receiver, - payload_command_rx: UnboundedReceiver>, - } - - impl TestHarness { - fn holesky() -> Self { - let (action_tx, action_rx) = channel(); - let persistence_handle = PersistenceHandle::new(action_tx); - - let chain_spec = HOLESKY.clone(); - let consensus = Arc::new(EthBeaconConsensus::new(chain_spec.clone())); - - let provider = MockEthProvider::default(); - let executor_factory = MockExecutorProvider::default(); - - let payload_validator = ExecutionPayloadValidator::new(chain_spec.clone()); - - let (to_tree_tx, to_tree_rx) = channel(); - let (from_tree_tx, from_tree_rx) = unbounded_channel(); - - let header = chain_spec.genesis_header().seal_slow(); - let engine_api_tree_state = EngineApiTreeState::new(10, 10, header.num_hash()); - let canonical_in_memory_state = CanonicalInMemoryState::with_head(header); - - let (to_payload_service, payload_command_rx) = unbounded_channel(); - let payload_builder = PayloadBuilderHandle::new(to_payload_service); - let tree = EngineApiTreeHandlerImpl::new( - provider, - executor_factory, - consensus, - payload_validator, - to_tree_rx, - from_tree_tx, - engine_api_tree_state, - canonical_in_memory_state, - persistence_handle, - payload_builder, - ); - - Self { tree, to_tree_tx, blocks: vec![], action_rx, payload_command_rx } - } - } - - fn get_default_test_harness(number_of_blocks: u64) -> TestHarness { - let blocks: Vec<_> = get_executed_blocks(0..number_of_blocks).collect(); - - let mut blocks_by_hash = HashMap::new(); - let mut blocks_by_number = BTreeMap::new(); - let mut state_by_hash = HashMap::new(); - let mut hash_by_number = HashMap::new(); - for block in &blocks { - let sealed_block = block.block(); - let hash = sealed_block.hash(); - let number = sealed_block.number; - blocks_by_hash.insert(hash, block.clone()); - blocks_by_number.entry(number).or_insert_with(Vec::new).push(block.clone()); - state_by_hash.insert(hash, Arc::new(BlockState::new(block.clone()))); - hash_by_number.insert(number, hash); - } - let tree_state = TreeState { blocks_by_hash, blocks_by_number, ..Default::default() }; - - let (action_tx, action_rx) = channel(); - let persistence_handle = PersistenceHandle::new(action_tx); - - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - let consensus = Arc::new(EthBeaconConsensus::new(chain_spec.clone())); - - let provider = MockEthProvider::default(); - let executor_factory = MockExecutorProvider::default(); - executor_factory.extend(vec![ExecutionOutcome::default()]); - - let payload_validator = ExecutionPayloadValidator::new(chain_spec); - - let (to_tree_tx, to_tree_rx) = channel(); - let (from_tree_tx, from_tree_rx) = unbounded_channel(); - - let engine_api_tree_state = EngineApiTreeState { - invalid_headers: InvalidHeaderCache::new(10), - buffer: BlockBuffer::new(10), - tree_state, - forkchoice_state_tracker: ForkchoiceStateTracker::default(), - }; - - let header = blocks.first().unwrap().block().header.clone(); - let canonical_in_memory_state = CanonicalInMemoryState::with_head(header); - - let (to_payload_service, payload_command_rx) = unbounded_channel(); - let payload_builder = PayloadBuilderHandle::new(to_payload_service); - let mut tree = EngineApiTreeHandlerImpl::new( - provider, - executor_factory, - consensus, - payload_validator, - to_tree_rx, - from_tree_tx, - engine_api_tree_state, - canonical_in_memory_state, - persistence_handle, - payload_builder, - ); - let last_executed_block = blocks.last().unwrap().clone(); - let pending = Some(BlockState::new(last_executed_block)); - tree.canonical_in_memory_state = - CanonicalInMemoryState::new(state_by_hash, hash_by_number, pending); - - TestHarness { tree, to_tree_tx, blocks, action_rx, payload_command_rx } - } - - #[tokio::test] - async fn test_tree_persist_blocks() { - // we need more than PERSISTENCE_THRESHOLD blocks to trigger the - // persistence task. - let TestHarness { tree, to_tree_tx, action_rx, mut blocks, payload_command_rx } = - get_default_test_harness(PERSISTENCE_THRESHOLD + 1); - std::thread::Builder::new().name("Tree Task".to_string()).spawn(|| tree.run()).unwrap(); - - // send a message to the tree to enter the main loop. - to_tree_tx.send(FromEngine::DownloadedBlocks(vec![])).unwrap(); - - let received_action = action_rx.recv().expect("Failed to receive saved blocks"); - if let PersistenceAction::SaveBlocks((saved_blocks, _)) = received_action { - // only PERSISTENCE_THRESHOLD will be persisted - blocks.pop(); - assert_eq!(saved_blocks.len() as u64, PERSISTENCE_THRESHOLD); - assert_eq!(saved_blocks, blocks); - } else { - panic!("unexpected action received {received_action:?}"); - } - } - - #[tokio::test] - async fn test_in_memory_state_trait_impl() { - let TestHarness { tree, to_tree_tx, action_rx, blocks, payload_command_rx } = - get_default_test_harness(10); - - let head_block = blocks.last().unwrap().block(); - let first_block = blocks.first().unwrap().block(); - - for executed_block in blocks { - let sealed_block = executed_block.block(); - - let expected_state = BlockState::new(executed_block.clone()); - - let actual_state_by_hash = - tree.canonical_in_memory_state.state_by_hash(sealed_block.hash()).unwrap(); - assert_eq!(expected_state, *actual_state_by_hash); - - let actual_state_by_number = - tree.canonical_in_memory_state.state_by_number(sealed_block.number).unwrap(); - assert_eq!(expected_state, *actual_state_by_number); - } - } - - #[tokio::test] - async fn test_engine_request_during_backfill() { - let TestHarness { mut tree, to_tree_tx, action_rx, blocks, payload_command_rx } = - get_default_test_harness(PERSISTENCE_THRESHOLD); - - // set backfill active - tree.backfill_sync_state = BackfillSyncState::Active; - - let (tx, rx) = oneshot::channel(); - tree.on_engine_message(FromEngine::Request(BeaconEngineMessage::ForkchoiceUpdated { - state: ForkchoiceState { - head_block_hash: B256::random(), - safe_block_hash: B256::random(), - finalized_block_hash: B256::random(), - }, - payload_attrs: None, - tx, - })); - - let resp = rx.await.unwrap().unwrap().await.unwrap(); - assert!(resp.payload_status.is_syncing()); - } - - #[tokio::test] - async fn test_holesky_payload() { - let s = include_str!("../test-data/holesky/1.rlp"); - let data = Bytes::from_str(s).unwrap(); - let block = Block::decode(&mut data.as_ref()).unwrap(); - let sealed = block.seal_slow(); - let payload = block_to_payload_v1(sealed); - - let TestHarness { mut tree, to_tree_tx, action_rx, blocks, payload_command_rx } = - TestHarness::holesky(); - - // set backfill active - tree.backfill_sync_state = BackfillSyncState::Active; - - let (tx, rx) = oneshot::channel(); - tree.on_engine_message(FromEngine::Request(BeaconEngineMessage::NewPayload { - payload: payload.clone().into(), - cancun_fields: None, - tx, - })); - - let resp = rx.await.unwrap().unwrap(); - assert!(resp.is_syncing()); - } - - #[tokio::test] - async fn test_tree_state_insert_executed() { - let mut tree_state = TreeState::new(BlockNumHash::default()); - let blocks: Vec<_> = get_executed_blocks(1..4).collect(); - - tree_state.insert_executed(blocks[0].clone()); - tree_state.insert_executed(blocks[1].clone()); - - assert_eq!( - tree_state.parent_to_child.get(&blocks[0].block.hash()), - Some(&HashSet::from([blocks[1].block.hash()])) - ); - - assert!(!tree_state.parent_to_child.contains_key(&blocks[1].block.hash())); - - tree_state.insert_executed(blocks[2].clone()); - - assert_eq!( - tree_state.parent_to_child.get(&blocks[1].block.hash()), - Some(&HashSet::from([blocks[2].block.hash()])) - ); - assert!(tree_state.parent_to_child.contains_key(&blocks[1].block.hash())); - - assert!(!tree_state.parent_to_child.contains_key(&blocks[2].block.hash())); - } - - #[tokio::test] - async fn test_tree_state_insert_executed_with_reorg() { - let mut tree_state = TreeState::new(BlockNumHash::default()); - let blocks: Vec<_> = get_executed_blocks(1..6).collect(); - - for block in &blocks { - tree_state.insert_executed(block.clone()); - } - assert_eq!(tree_state.blocks_by_hash.len(), 5); - - let fork_block_3 = get_executed_block_with_number(3, blocks[1].block.hash()); - let fork_block_4 = get_executed_block_with_number(4, fork_block_3.block.hash()); - let fork_block_5 = get_executed_block_with_number(5, fork_block_4.block.hash()); - - tree_state.insert_executed(fork_block_3.clone()); - tree_state.insert_executed(fork_block_4.clone()); - tree_state.insert_executed(fork_block_5.clone()); - - assert_eq!(tree_state.blocks_by_hash.len(), 8); - assert_eq!(tree_state.blocks_by_number[&3].len(), 2); // two blocks at height 3 (original and fork) - assert_eq!(tree_state.parent_to_child[&blocks[1].block.hash()].len(), 2); // block 2 should have two children - - // verify that we can insert the same block again without issues - tree_state.insert_executed(fork_block_4.clone()); - assert_eq!(tree_state.blocks_by_hash.len(), 8); - - assert!(tree_state.parent_to_child[&fork_block_3.block.hash()] - .contains(&fork_block_4.block.hash())); - assert!(tree_state.parent_to_child[&fork_block_4.block.hash()] - .contains(&fork_block_5.block.hash())); - - assert_eq!(tree_state.blocks_by_number[&4].len(), 2); - assert_eq!(tree_state.blocks_by_number[&5].len(), 2); - } - - #[tokio::test] - async fn test_tree_state_remove_before() { - let mut tree_state = TreeState::new(BlockNumHash::default()); - let blocks: Vec<_> = get_executed_blocks(1..6).collect(); - - for block in &blocks { - tree_state.insert_executed(block.clone()); - } - - tree_state.remove_before(3); - - assert!(!tree_state.blocks_by_hash.contains_key(&blocks[0].block.hash())); - assert!(!tree_state.blocks_by_hash.contains_key(&blocks[1].block.hash())); - assert!(!tree_state.blocks_by_number.contains_key(&1)); - assert!(!tree_state.blocks_by_number.contains_key(&2)); - - assert!(tree_state.blocks_by_hash.contains_key(&blocks[2].block.hash())); - assert!(tree_state.blocks_by_hash.contains_key(&blocks[3].block.hash())); - assert!(tree_state.blocks_by_hash.contains_key(&blocks[4].block.hash())); - assert!(tree_state.blocks_by_number.contains_key(&3)); - assert!(tree_state.blocks_by_number.contains_key(&4)); - assert!(tree_state.blocks_by_number.contains_key(&5)); - - assert!(!tree_state.parent_to_child.contains_key(&blocks[0].block.hash())); - assert!(!tree_state.parent_to_child.contains_key(&blocks[1].block.hash())); - assert!(tree_state.parent_to_child.contains_key(&blocks[2].block.hash())); - assert!(tree_state.parent_to_child.contains_key(&blocks[3].block.hash())); - assert!(!tree_state.parent_to_child.contains_key(&blocks[4].block.hash())); - - assert_eq!( - tree_state.parent_to_child.get(&blocks[2].block.hash()), - Some(&HashSet::from([blocks[3].block.hash()])) - ); - assert_eq!( - tree_state.parent_to_child.get(&blocks[3].block.hash()), - Some(&HashSet::from([blocks[4].block.hash()])) - ); - } - - #[tokio::test] - async fn test_tree_state_on_new_head() { - let mut tree_state = TreeState::new(BlockNumHash::default()); - let blocks: Vec<_> = get_executed_blocks(1..6).collect(); - - for block in &blocks { - tree_state.insert_executed(block.clone()); - } - - // set block 3 as the current canonical head - tree_state.set_canonical_head(blocks[2].block.num_hash()); - - // create a fork from block 2 - let fork_block_3 = get_executed_block_with_number(3, blocks[1].block.hash()); - let fork_block_4 = get_executed_block_with_number(4, fork_block_3.block.hash()); - let fork_block_5 = get_executed_block_with_number(5, fork_block_4.block.hash()); - - tree_state.insert_executed(fork_block_3.clone()); - tree_state.insert_executed(fork_block_4.clone()); - tree_state.insert_executed(fork_block_5.clone()); - - // normal (non-reorg) case - let result = tree_state.on_new_head(blocks[4].block.hash()); - assert!(matches!(result, Some(NewCanonicalChain::Commit { .. }))); - if let Some(NewCanonicalChain::Commit { new }) = result { - assert_eq!(new.len(), 2); - assert_eq!(new[0].block.hash(), blocks[3].block.hash()); - assert_eq!(new[1].block.hash(), blocks[4].block.hash()); - } - - // reorg case - let result = tree_state.on_new_head(fork_block_5.block.hash()); - assert!(matches!(result, Some(NewCanonicalChain::Reorg { .. }))); - if let Some(NewCanonicalChain::Reorg { new, old }) = result { - assert_eq!(new.len(), 3); - assert_eq!(new[0].block.hash(), fork_block_3.block.hash()); - assert_eq!(new[1].block.hash(), fork_block_4.block.hash()); - assert_eq!(new[2].block.hash(), fork_block_5.block.hash()); - - assert_eq!(old.len(), 1); - assert_eq!(old[0].block.hash(), blocks[2].block.hash()); - } - } -} diff --git a/crates/chain-state/src/memory_overlay.rs b/crates/engine/tree/src/tree/memory_overlay.rs similarity index 84% rename from crates/chain-state/src/memory_overlay.rs rename to crates/engine/tree/src/tree/memory_overlay.rs index cba585018a25..11c04f3998aa 100644 --- a/crates/chain-state/src/memory_overlay.rs +++ b/crates/engine/tree/src/tree/memory_overlay.rs @@ -1,26 +1,26 @@ use super::ExecutedBlock; use reth_errors::ProviderResult; use reth_primitives::{Account, Address, BlockNumber, Bytecode, StorageKey, StorageValue, B256}; -use reth_storage_api::{ +use reth_provider::{ AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateRootProvider, }; use reth_trie::{updates::TrieUpdates, AccountProof, HashedPostState}; /// A state provider that stores references to in-memory blocks along with their state as well as /// the historical state provider for fallback lookups. -#[allow(missing_debug_implementations)] -pub struct MemoryOverlayStateProvider { +#[derive(Debug)] +pub struct MemoryOverlayStateProvider { /// The collection of executed parent blocks. - pub(crate) in_memory: Vec, + in_memory: Vec, /// The collection of hashed state from in-memory blocks. - pub(crate) hashed_post_state: HashedPostState, + hashed_post_state: HashedPostState, /// Historical state provider for state lookups that are not found in in-memory blocks. - pub(crate) historical: Box, + historical: H, } -impl MemoryOverlayStateProvider { +impl MemoryOverlayStateProvider { /// Create new memory overlay state provider. - pub fn new(in_memory: Vec, historical: Box) -> Self { + pub fn new(in_memory: Vec, historical: H) -> Self { let mut hashed_post_state = HashedPostState::default(); for block in &in_memory { hashed_post_state.extend(block.hashed_state.as_ref().clone()); @@ -29,7 +29,10 @@ impl MemoryOverlayStateProvider { } } -impl BlockHashReader for MemoryOverlayStateProvider { +impl BlockHashReader for MemoryOverlayStateProvider +where + H: BlockHashReader, +{ fn block_hash(&self, number: BlockNumber) -> ProviderResult> { for block in self.in_memory.iter().rev() { if block.block.number == number { @@ -62,7 +65,10 @@ impl BlockHashReader for MemoryOverlayStateProvider { } } -impl AccountReader for MemoryOverlayStateProvider { +impl AccountReader for MemoryOverlayStateProvider +where + H: AccountReader + Send, +{ fn basic_account(&self, address: Address) -> ProviderResult> { for block in self.in_memory.iter().rev() { if let Some(account) = block.execution_output.account(&address) { @@ -74,7 +80,10 @@ impl AccountReader for MemoryOverlayStateProvider { } } -impl StateRootProvider for MemoryOverlayStateProvider { +impl StateRootProvider for MemoryOverlayStateProvider +where + H: StateRootProvider + Send, +{ // TODO: Currently this does not reuse available in-memory trie nodes. fn hashed_state_root(&self, hashed_state: &HashedPostState) -> ProviderResult { let mut state = self.hashed_post_state.clone(); @@ -93,7 +102,10 @@ impl StateRootProvider for MemoryOverlayStateProvider { } } -impl StateProofProvider for MemoryOverlayStateProvider { +impl StateProofProvider for MemoryOverlayStateProvider +where + H: StateProofProvider + Send, +{ // TODO: Currently this does not reuse available in-memory trie nodes. fn hashed_proof( &self, @@ -107,7 +119,10 @@ impl StateProofProvider for MemoryOverlayStateProvider { } } -impl StateProvider for MemoryOverlayStateProvider { +impl StateProvider for MemoryOverlayStateProvider +where + H: StateProvider + Send, +{ fn storage( &self, address: Address, diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs new file mode 100644 index 000000000000..0ab987a33096 --- /dev/null +++ b/crates/engine/tree/src/tree/mod.rs @@ -0,0 +1,1328 @@ +use crate::{ + backfill::BackfillAction, + chain::FromOrchestrator, + engine::{DownloadRequest, EngineApiEvent, FromEngine}, + persistence::PersistenceHandle, +}; +use parking_lot::RwLock; +use reth_beacon_consensus::{ + BeaconEngineMessage, ForkchoiceStateTracker, InvalidHeaderCache, OnForkChoiceUpdated, +}; +use reth_blockchain_tree::{ + error::InsertBlockErrorKind, BlockAttachment, BlockBuffer, BlockStatus, +}; +use reth_blockchain_tree_api::{error::InsertBlockError, InsertPayloadOk}; +use reth_consensus::{Consensus, PostExecutionInput}; +use reth_engine_primitives::EngineTypes; +use reth_errors::{ConsensusError, ProviderResult}; +use reth_evm::execute::{BlockExecutorProvider, Executor}; +use reth_payload_primitives::PayloadTypes; +use reth_payload_validator::ExecutionPayloadValidator; +use reth_primitives::{ + Address, Block, BlockNumber, GotExpected, Receipts, Requests, SealedBlock, + SealedBlockWithSenders, SealedHeader, B256, U256, +}; +use reth_provider::{ + providers::ChainInfoTracker, BlockReader, ExecutionOutcome, StateProvider, + StateProviderFactory, StateRootProvider, +}; +use reth_revm::database::StateProviderDatabase; +use reth_rpc_types::{ + engine::{ + CancunPayloadFields, ForkchoiceState, PayloadStatus, PayloadStatusEnum, + PayloadValidationError, + }, + ExecutionPayload, +}; +use reth_trie::{updates::TrieUpdates, HashedPostState}; +use std::{ + collections::{BTreeMap, HashMap}, + marker::PhantomData, + sync::{mpsc::Receiver, Arc}, +}; +use tokio::sync::{mpsc::UnboundedSender, oneshot}; +use tracing::*; + +mod memory_overlay; +pub use memory_overlay::MemoryOverlayStateProvider; + +/// Maximum number of blocks to be kept only in memory without triggering persistence. +const PERSISTENCE_THRESHOLD: u64 = 256; + +/// Represents an executed block stored in-memory. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ExecutedBlock { + block: Arc, + senders: Arc>, + execution_output: Arc, + hashed_state: Arc, + trie: Arc, +} + +impl ExecutedBlock { + pub(crate) const fn new( + block: Arc, + senders: Arc>, + execution_output: Arc, + hashed_state: Arc, + trie: Arc, + ) -> Self { + Self { block, senders, execution_output, hashed_state, trie } + } + + /// Returns a reference to the executed block. + pub(crate) fn block(&self) -> &SealedBlock { + &self.block + } + + /// Returns a reference to the block's senders + pub(crate) fn senders(&self) -> &Vec
{ + &self.senders + } + + /// Returns a reference to the block's execution outcome + pub(crate) fn execution_outcome(&self) -> &ExecutionOutcome { + &self.execution_output + } + + /// Returns a reference to the hashed state result of the execution outcome + pub(crate) fn hashed_state(&self) -> &HashedPostState { + &self.hashed_state + } + + /// Returns a reference to the trie updates for the block + pub(crate) fn trie_updates(&self) -> &TrieUpdates { + &self.trie + } +} + +/// Keeps track of the state of the tree. +#[derive(Debug, Default)] +pub struct TreeState { + /// All executed blocks by hash. + blocks_by_hash: HashMap, + /// Executed blocks grouped by their respective block number. + blocks_by_number: BTreeMap>, + /// Pending state not yet applied + pending: Option>, + /// Block number and hash of the current head. + current_head: Option<(BlockNumber, B256)>, +} + +impl TreeState { + fn block_by_hash(&self, hash: B256) -> Option> { + self.blocks_by_hash.get(&hash).map(|b| b.block.clone()) + } + + fn block_by_number(&self, number: BlockNumber) -> Option> { + self.blocks_by_number + .get(&number) + .and_then(|blocks| blocks.last()) + .map(|executed_block| executed_block.block.clone()) + } + + /// Insert executed block into the state. + fn insert_executed(&mut self, executed: ExecutedBlock) { + self.blocks_by_number.entry(executed.block.number).or_default().push(executed.clone()); + let existing = self.blocks_by_hash.insert(executed.block.hash(), executed); + debug_assert!(existing.is_none(), "inserted duplicate block"); + } + + /// Remove blocks before specified block number. + pub(crate) fn remove_before(&mut self, block_number: BlockNumber) { + while self + .blocks_by_number + .first_key_value() + .map(|entry| entry.0 < &block_number) + .unwrap_or_default() + { + let (_, to_remove) = self.blocks_by_number.pop_first().unwrap(); + for block in to_remove { + let block_hash = block.block.hash(); + let removed = self.blocks_by_hash.remove(&block_hash); + debug_assert!( + removed.is_some(), + "attempted to remove non-existing block {block_hash}" + ); + } + } + } + + /// Returns the maximum block number stored. + pub(crate) fn max_block_number(&self) -> BlockNumber { + *self.blocks_by_number.last_key_value().unwrap_or((&BlockNumber::default(), &vec![])).0 + } +} + +/// Container type for in memory state data. +#[derive(Debug, Default)] +pub struct InMemoryStateImpl { + blocks: RwLock>>, + numbers: RwLock>, + pending: RwLock>, +} + +impl InMemoryStateImpl { + const fn new( + blocks: HashMap>, + numbers: HashMap, + pending: Option, + ) -> Self { + Self { + blocks: RwLock::new(blocks), + numbers: RwLock::new(numbers), + pending: RwLock::new(pending), + } + } +} + +impl InMemoryState for InMemoryStateImpl { + fn state_by_hash(&self, hash: B256) -> Option> { + self.blocks.read().get(&hash).cloned() + } + + fn state_by_number(&self, number: u64) -> Option> { + self.numbers.read().get(&number).and_then(|hash| self.blocks.read().get(hash).cloned()) + } + + fn head_state(&self) -> Option> { + self.numbers + .read() + .iter() + .max_by_key(|(&number, _)| number) + .and_then(|(_, hash)| self.blocks.read().get(hash).cloned()) + } + + fn pending_state(&self) -> Option> { + self.pending.read().as_ref().map(|state| Arc::new(State(state.0.clone()))) + } +} + +/// Inner type to provide in memory state. It includes a chain tracker to be +/// advanced internally by the tree. +#[derive(Debug)] +struct CanonicalInMemoryStateInner { + chain_info_tracker: ChainInfoTracker, + in_memory_state: InMemoryStateImpl, +} + +/// This type is responsible for providing the blocks, receipts, and state for +/// all canonical blocks not on disk yet and keeps track of the block range that +/// is in memory. +#[derive(Debug, Clone)] +pub struct CanonicalInMemoryState { + inner: Arc, +} + +impl CanonicalInMemoryState { + fn new( + blocks: HashMap>, + numbers: HashMap, + pending: Option, + ) -> Self { + let in_memory_state = InMemoryStateImpl::new(blocks, numbers, pending); + let head_state = in_memory_state.head_state(); + let header = match head_state { + Some(state) => state.block().block().header.clone(), + None => SealedHeader::default(), + }; + let chain_info_tracker = ChainInfoTracker::new(header); + let inner = CanonicalInMemoryStateInner { chain_info_tracker, in_memory_state }; + + Self { inner: Arc::new(inner) } + } + + fn with_header(header: SealedHeader) -> Self { + let chain_info_tracker = ChainInfoTracker::new(header); + let in_memory_state = InMemoryStateImpl::default(); + let inner = CanonicalInMemoryStateInner { chain_info_tracker, in_memory_state }; + + Self { inner: Arc::new(inner) } + } +} + +impl InMemoryState for CanonicalInMemoryState { + fn state_by_hash(&self, hash: B256) -> Option> { + self.inner.in_memory_state.state_by_hash(hash) + } + + fn state_by_number(&self, number: u64) -> Option> { + self.inner.in_memory_state.state_by_number(number) + } + + fn head_state(&self) -> Option> { + self.inner.in_memory_state.head_state() + } + + fn pending_state(&self) -> Option> { + self.inner.in_memory_state.pending_state() + } +} + +/// Tracks the state of the engine api internals. +/// +/// This type is shareable. +#[derive(Debug)] +pub struct EngineApiTreeState { + /// Tracks the state of the blockchain tree. + tree_state: TreeState, + /// Tracks the forkchoice state updates received by the CL. + forkchoice_state_tracker: ForkchoiceStateTracker, + /// Buffer of detached blocks. + buffer: BlockBuffer, + /// Tracks the header of invalid payloads that were rejected by the engine because they're + /// invalid. + invalid_headers: InvalidHeaderCache, +} + +impl EngineApiTreeState { + fn new(block_buffer_limit: u32, max_invalid_header_cache_length: u32) -> Self { + Self { + invalid_headers: InvalidHeaderCache::new(max_invalid_header_cache_length), + buffer: BlockBuffer::new(block_buffer_limit), + tree_state: TreeState::default(), + forkchoice_state_tracker: ForkchoiceStateTracker::default(), + } + } +} + +/// The type responsible for processing engine API requests. +/// +/// TODO: design: should the engine handler functions also accept the response channel or return the +/// result and the caller redirects the response +pub trait EngineApiTreeHandler { + /// The engine type that this handler is for. + type Engine: EngineTypes; + + /// Invoked when previously requested blocks were downloaded. + fn on_downloaded(&mut self, blocks: Vec) -> Option; + + /// When the Consensus layer receives a new block via the consensus gossip protocol, + /// the transactions in the block are sent to the execution layer in the form of a + /// [`ExecutionPayload`]. The Execution layer executes the transactions and validates the + /// state in the block header, then passes validation data back to Consensus layer, that + /// adds the block to the head of its own blockchain and attests to it. The block is then + /// broadcast over the consensus p2p network in the form of a "Beacon block". + /// + /// These responses should adhere to the [Engine API Spec for + /// `engine_newPayload`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#specification). + /// + /// This returns a [`PayloadStatus`] that represents the outcome of a processed new payload and + /// returns an error if an internal error occurred. + fn on_new_payload( + &mut self, + payload: ExecutionPayload, + cancun_fields: Option, + ) -> ProviderResult>; + + /// Invoked when we receive a new forkchoice update message. Calls into the blockchain tree + /// to resolve chain forks and ensure that the Execution Layer is working with the latest valid + /// chain. + /// + /// These responses should adhere to the [Engine API Spec for + /// `engine_forkchoiceUpdated`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#specification-1). + /// + /// Returns an error if an internal error occurred like a database error. + fn on_forkchoice_updated( + &mut self, + state: ForkchoiceState, + attrs: Option<::PayloadAttributes>, + ) -> ProviderResult>; +} + +/// The outcome of a tree operation. +#[derive(Debug)] +pub struct TreeOutcome { + /// The outcome of the operation. + pub outcome: T, + /// An optional event to tell the caller to do something. + pub event: Option, +} + +impl TreeOutcome { + /// Create new tree outcome. + pub const fn new(outcome: T) -> Self { + Self { outcome, event: None } + } + + /// Set event on the outcome. + pub fn with_event(mut self, event: TreeEvent) -> Self { + self.event = Some(event); + self + } +} + +/// Events that can be emitted by the [`EngineApiTreeHandler`]. +#[derive(Debug)] +pub enum TreeEvent { + /// Tree action is needed. + TreeAction(TreeAction), + /// Backfill action is needed. + BackfillAction(BackfillAction), + /// Block download is needed. + Download(DownloadRequest), +} + +/// The actions that can be performed on the tree. +#[derive(Debug)] +pub enum TreeAction { + /// Make target canonical. + MakeCanonical(B256), +} + +#[derive(Debug)] +pub struct EngineApiTreeHandlerImpl { + provider: P, + executor_provider: E, + consensus: Arc, + payload_validator: ExecutionPayloadValidator, + state: EngineApiTreeState, + incoming: Receiver>>, + outgoing: UnboundedSender, + persistence: PersistenceHandle, + persistence_state: PersistenceState, + /// (tmp) The flag indicating whether the pipeline is active. + is_pipeline_active: bool, + canonical_in_memory_state: CanonicalInMemoryState, + _marker: PhantomData, +} + +impl EngineApiTreeHandlerImpl +where + P: BlockReader + StateProviderFactory + Clone + 'static, + E: BlockExecutorProvider, + T: EngineTypes + 'static, +{ + #[allow(clippy::too_many_arguments)] + fn new( + provider: P, + executor_provider: E, + consensus: Arc, + payload_validator: ExecutionPayloadValidator, + incoming: Receiver>>, + outgoing: UnboundedSender, + state: EngineApiTreeState, + header: SealedHeader, + persistence: PersistenceHandle, + ) -> Self { + Self { + provider, + executor_provider, + consensus, + payload_validator, + incoming, + outgoing, + persistence, + persistence_state: PersistenceState::default(), + is_pipeline_active: false, + state, + canonical_in_memory_state: CanonicalInMemoryState::with_header(header), + _marker: PhantomData, + } + } + + #[allow(clippy::too_many_arguments)] + fn spawn_new( + provider: P, + executor_provider: E, + consensus: Arc, + payload_validator: ExecutionPayloadValidator, + incoming: Receiver>>, + state: EngineApiTreeState, + header: SealedHeader, + persistence: PersistenceHandle, + ) -> UnboundedSender { + let (outgoing, rx) = tokio::sync::mpsc::unbounded_channel(); + let task = Self::new( + provider, + executor_provider, + consensus, + payload_validator, + incoming, + outgoing.clone(), + state, + header, + persistence, + ); + std::thread::Builder::new().name("Tree Task".to_string()).spawn(|| task.run()).unwrap(); + outgoing + } + + fn run(mut self) { + while let Ok(msg) = self.incoming.recv() { + match msg { + FromEngine::Event(event) => match event { + FromOrchestrator::BackfillSyncFinished => { + todo!() + } + FromOrchestrator::BackfillSyncStarted => { + todo!() + } + }, + FromEngine::Request(request) => match request { + BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx } => { + let output = self.on_forkchoice_updated(state, payload_attrs); + if let Err(err) = tx.send(output.map(|o| o.outcome).map_err(Into::into)) { + error!("Failed to send event: {err:?}"); + } + } + BeaconEngineMessage::NewPayload { payload, cancun_fields, tx } => { + let output = self.on_new_payload(payload, cancun_fields); + if let Err(err) = tx.send(output.map(|o| o.outcome).map_err(|e| { + reth_beacon_consensus::BeaconOnNewPayloadError::Internal(Box::new(e)) + })) { + error!("Failed to send event: {err:?}"); + } + } + BeaconEngineMessage::TransitionConfigurationExchanged => { + todo!() + } + }, + FromEngine::DownloadedBlocks(blocks) => { + if let Some(event) = self.on_downloaded(blocks) { + if let Err(err) = self.outgoing.send(EngineApiEvent::FromTree(event)) { + error!("Failed to send event: {err:?}"); + } + } + } + } + + if self.should_persist() && !self.persistence_state.in_progress() { + let blocks_to_persist = self.get_blocks_to_persist(); + let (tx, rx) = oneshot::channel(); + self.persistence.save_blocks(blocks_to_persist, tx); + self.persistence_state.start(rx); + } + + if self.persistence_state.in_progress() { + let rx = self + .persistence_state + .rx + .as_mut() + .expect("if a persistence task is in progress Receiver must be Some"); + // Check if persistence has completed + if let Ok(last_persisted_block_hash) = rx.try_recv() { + if let Some(block) = + self.state.tree_state.block_by_hash(last_persisted_block_hash) + { + self.persistence_state.finish(last_persisted_block_hash, block.number); + self.remove_persisted_blocks_from_memory(); + } else { + error!("could not find persisted block with hash {last_persisted_block_hash} in memory"); + } + } + } + } + } + + /// Returns true if the canonical chain length minus the last persisted + /// block is greater than or equal to the persistence threshold. + fn should_persist(&self) -> bool { + self.state.tree_state.max_block_number() - + self.persistence_state.last_persisted_block_number >= + PERSISTENCE_THRESHOLD + } + + fn get_blocks_to_persist(&self) -> Vec { + let start = self.persistence_state.last_persisted_block_number; + let end = start + PERSISTENCE_THRESHOLD; + + // NOTE: this is an exclusive range, to try to include exactly PERSISTENCE_THRESHOLD blocks + self.state + .tree_state + .blocks_by_number + .range(start..end) + .flat_map(|(_, blocks)| blocks.iter().cloned()) + .collect() + } + + fn remove_persisted_blocks_from_memory(&mut self) { + let keys_to_remove: Vec = self + .state + .tree_state + .blocks_by_number + .range(..=self.persistence_state.last_persisted_block_number) + .map(|(&k, _)| k) + .collect(); + + for key in keys_to_remove { + if let Some(blocks) = self.state.tree_state.blocks_by_number.remove(&key) { + // Remove corresponding blocks from blocks_by_hash + for block in blocks { + self.state.tree_state.blocks_by_hash.remove(&block.block().hash()); + } + } + } + } + + /// Return block from database or in-memory state by hash. + fn block_by_hash(&self, hash: B256) -> ProviderResult> { + // check database first + let mut block = self.provider.block_by_hash(hash)?; + if block.is_none() { + // Note: it's fine to return the unsealed block because the caller already has + // the hash + block = self + .state + .tree_state + .block_by_hash(hash) + // TODO: clone for compatibility. should we return an Arc here? + .map(|block| block.as_ref().clone().unseal()); + } + Ok(block) + } + + /// Return state provider with reference to in-memory blocks that overlay database state. + fn state_provider( + &self, + hash: B256, + ) -> ProviderResult>> { + let mut in_memory = Vec::new(); + let mut parent_hash = hash; + while let Some(executed) = self.state.tree_state.blocks_by_hash.get(&parent_hash) { + parent_hash = executed.block.parent_hash; + in_memory.insert(0, executed.clone()); + } + + let historical = self.provider.state_by_block_hash(parent_hash)?; + Ok(MemoryOverlayStateProvider::new(in_memory, historical)) + } + + /// Return the parent hash of the lowest buffered ancestor for the requested block, if there + /// are any buffered ancestors. If there are no buffered ancestors, and the block itself does + /// not exist in the buffer, this returns the hash that is passed in. + /// + /// Returns the parent hash of the block itself if the block is buffered and has no other + /// buffered ancestors. + fn lowest_buffered_ancestor_or(&self, hash: B256) -> B256 { + self.state + .buffer + .lowest_ancestor(&hash) + .map(|block| block.parent_hash) + .unwrap_or_else(|| hash) + } + + /// If validation fails, the response MUST contain the latest valid hash: + /// + /// - The block hash of the ancestor of the invalid payload satisfying the following two + /// conditions: + /// - It is fully validated and deemed VALID + /// - Any other ancestor of the invalid payload with a higher blockNumber is INVALID + /// - 0x0000000000000000000000000000000000000000000000000000000000000000 if the above + /// conditions are satisfied by a `PoW` block. + /// - null if client software cannot determine the ancestor of the invalid payload satisfying + /// the above conditions. + fn latest_valid_hash_for_invalid_payload( + &mut self, + parent_hash: B256, + ) -> ProviderResult> { + // Check if parent exists in side chain or in canonical chain. + if self.block_by_hash(parent_hash)?.is_some() { + return Ok(Some(parent_hash)) + } + + // iterate over ancestors in the invalid cache + // until we encounter the first valid ancestor + let mut current_hash = parent_hash; + let mut current_header = self.state.invalid_headers.get(¤t_hash); + while let Some(header) = current_header { + current_hash = header.parent_hash; + current_header = self.state.invalid_headers.get(¤t_hash); + + // If current_header is None, then the current_hash does not have an invalid + // ancestor in the cache, check its presence in blockchain tree + if current_header.is_none() && self.block_by_hash(current_hash)?.is_some() { + return Ok(Some(current_hash)) + } + } + Ok(None) + } + + /// Prepares the invalid payload response for the given hash, checking the + /// database for the parent hash and populating the payload status with the latest valid hash + /// according to the engine api spec. + fn prepare_invalid_response(&mut self, mut parent_hash: B256) -> ProviderResult { + // Edge case: the `latestValid` field is the zero hash if the parent block is the terminal + // PoW block, which we need to identify by looking at the parent's block difficulty + if let Some(parent) = self.block_by_hash(parent_hash)? { + if !parent.is_zero_difficulty() { + parent_hash = B256::ZERO; + } + } + + let valid_parent_hash = self.latest_valid_hash_for_invalid_payload(parent_hash)?; + Ok(PayloadStatus::from_status(PayloadStatusEnum::Invalid { + validation_error: PayloadValidationError::LinksToRejectedPayload.to_string(), + }) + .with_latest_valid_hash(valid_parent_hash.unwrap_or_default())) + } + + /// Checks if the given `check` hash points to an invalid header, inserting the given `head` + /// block into the invalid header cache if the `check` hash has a known invalid ancestor. + /// + /// Returns a payload status response according to the engine API spec if the block is known to + /// be invalid. + fn check_invalid_ancestor_with_head( + &mut self, + check: B256, + head: B256, + ) -> ProviderResult> { + // check if the check hash was previously marked as invalid + let Some(header) = self.state.invalid_headers.get(&check) else { return Ok(None) }; + + // populate the latest valid hash field + let status = self.prepare_invalid_response(header.parent_hash)?; + + // insert the head block into the invalid header cache + self.state.invalid_headers.insert_with_invalid_ancestor(head, header); + + Ok(Some(status)) + } + + /// Checks if the given `head` points to an invalid header, which requires a specific response + /// to a forkchoice update. + fn check_invalid_ancestor(&mut self, head: B256) -> ProviderResult> { + // check if the head was previously marked as invalid + let Some(header) = self.state.invalid_headers.get(&head) else { return Ok(None) }; + // populate the latest valid hash field + Ok(Some(self.prepare_invalid_response(header.parent_hash)?)) + } + + /// Validate if block is correct and satisfies all the consensus rules that concern the header + /// and block body itself. + fn validate_block(&self, block: &SealedBlockWithSenders) -> Result<(), ConsensusError> { + if let Err(e) = self.consensus.validate_header_with_total_difficulty(block, U256::MAX) { + error!( + ?block, + "Failed to validate total difficulty for block {}: {e}", + block.header.hash() + ); + return Err(e) + } + + if let Err(e) = self.consensus.validate_header(block) { + error!(?block, "Failed to validate header {}: {e}", block.header.hash()); + return Err(e) + } + + if let Err(e) = self.consensus.validate_block_pre_execution(block) { + error!(?block, "Failed to validate block {}: {e}", block.header.hash()); + return Err(e) + } + + Ok(()) + } + + fn buffer_block_without_senders(&mut self, block: SealedBlock) -> Result<(), InsertBlockError> { + match block.try_seal_with_senders() { + Ok(block) => self.buffer_block(block), + Err(block) => Err(InsertBlockError::sender_recovery_error(block)), + } + } + + fn buffer_block(&mut self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { + if let Err(err) = self.validate_block(&block) { + return Err(InsertBlockError::consensus_error(err, block.block)) + } + self.state.buffer.insert_block(block); + Ok(()) + } + + fn insert_block_without_senders( + &mut self, + block: SealedBlock, + ) -> Result { + match block.try_seal_with_senders() { + Ok(block) => self.insert_block(block), + Err(block) => Err(InsertBlockError::sender_recovery_error(block)), + } + } + + fn insert_block( + &mut self, + block: SealedBlockWithSenders, + ) -> Result { + self.insert_block_inner(block.clone()) + .map_err(|kind| InsertBlockError::new(block.block, kind)) + } + + fn insert_block_inner( + &mut self, + block: SealedBlockWithSenders, + ) -> Result { + if self.block_by_hash(block.hash())?.is_some() { + let attachment = BlockAttachment::Canonical; // TODO: remove or revise attachment + return Ok(InsertPayloadOk::AlreadySeen(BlockStatus::Valid(attachment))) + } + + // validate block consensus rules + self.validate_block(&block)?; + + let state_provider = self.state_provider(block.parent_hash).unwrap(); + let executor = self.executor_provider.executor(StateProviderDatabase::new(&state_provider)); + + let block_number = block.number; + let block_hash = block.hash(); + let block = block.unseal(); + let output = executor.execute((&block, U256::MAX).into()).unwrap(); + self.consensus.validate_block_post_execution( + &block, + PostExecutionInput::new(&output.receipts, &output.requests), + )?; + + // TODO: change StateRootProvider API to accept hashed post state + let hashed_state = HashedPostState::from_bundle_state(&output.state.state); + + let (state_root, trie_output) = state_provider.state_root_with_updates(&output.state)?; + if state_root != block.state_root { + return Err(ConsensusError::BodyStateRootDiff( + GotExpected { got: state_root, expected: block.state_root }.into(), + ) + .into()) + } + + let executed = ExecutedBlock { + block: Arc::new(block.block.seal(block_hash)), + senders: Arc::new(block.senders), + execution_output: Arc::new(ExecutionOutcome::new( + output.state, + Receipts::from(output.receipts), + block_number, + vec![Requests::from(output.requests)], + )), + hashed_state: Arc::new(hashed_state), + trie: Arc::new(trie_output), + }; + self.state.tree_state.insert_executed(executed); + + let attachment = BlockAttachment::Canonical; // TODO: remove or revise attachment + Ok(InsertPayloadOk::Inserted(BlockStatus::Valid(attachment))) + } + + /// Pre-validate forkchoice update and check whether it can be processed. + /// + /// This method returns the update outcome if validation fails or + /// the node is syncing and the update cannot be processed at the moment. + fn pre_validate_forkchoice_update( + &mut self, + state: ForkchoiceState, + ) -> ProviderResult> { + if state.head_block_hash.is_zero() { + return Ok(Some(OnForkChoiceUpdated::invalid_state())) + } + + // check if the new head hash is connected to any ancestor that we previously marked as + // invalid + let lowest_buffered_ancestor_fcu = self.lowest_buffered_ancestor_or(state.head_block_hash); + if let Some(status) = self.check_invalid_ancestor(lowest_buffered_ancestor_fcu)? { + return Ok(Some(OnForkChoiceUpdated::with_invalid(status))) + } + + if self.is_pipeline_active { + // We can only process new forkchoice updates if the pipeline is idle, since it requires + // exclusive access to the database + trace!(target: "consensus::engine", "Pipeline is syncing, skipping forkchoice update"); + return Ok(Some(OnForkChoiceUpdated::syncing())) + } + + Ok(None) + } +} + +impl EngineApiTreeHandler for EngineApiTreeHandlerImpl +where + P: BlockReader + StateProviderFactory + Clone + 'static, + E: BlockExecutorProvider, + T: EngineTypes + 'static, +{ + type Engine = T; + + fn on_downloaded(&mut self, _blocks: Vec) -> Option { + debug!("not implemented"); + None + } + + fn on_new_payload( + &mut self, + payload: ExecutionPayload, + cancun_fields: Option, + ) -> ProviderResult> { + // Ensures that the given payload does not violate any consensus rules that concern the + // block's layout, like: + // - missing or invalid base fee + // - invalid extra data + // - invalid transactions + // - incorrect hash + // - the versioned hashes passed with the payload do not exactly match transaction + // versioned hashes + // - the block does not contain blob transactions if it is pre-cancun + // + // This validates the following engine API rule: + // + // 3. Given the expected array of blob versioned hashes client software **MUST** run its + // validation by taking the following steps: + // + // 1. Obtain the actual array by concatenating blob versioned hashes lists + // (`tx.blob_versioned_hashes`) of each [blob + // transaction](https://eips.ethereum.org/EIPS/eip-4844#new-transaction-type) included + // in the payload, respecting the order of inclusion. If the payload has no blob + // transactions the expected array **MUST** be `[]`. + // + // 2. Return `{status: INVALID, latestValidHash: null, validationError: errorMessage | + // null}` if the expected and the actual arrays don't match. + // + // This validation **MUST** be instantly run in all cases even during active sync process. + let parent_hash = payload.parent_hash(); + let block = match self + .payload_validator + .ensure_well_formed_payload(payload, cancun_fields.into()) + { + Ok(block) => block, + Err(error) => { + error!(target: "engine::tree", %error, "Invalid payload"); + // we need to convert the error to a payload status (response to the CL) + + let latest_valid_hash = + if error.is_block_hash_mismatch() || error.is_invalid_versioned_hashes() { + // Engine-API rules: + // > `latestValidHash: null` if the blockHash validation has failed () + // > `latestValidHash: null` if the expected and the actual arrays don't match () + None + } else { + self.latest_valid_hash_for_invalid_payload(parent_hash)? + }; + + let status = PayloadStatusEnum::from(error); + return Ok(TreeOutcome::new(PayloadStatus::new(status, latest_valid_hash))) + } + }; + + let block_hash = block.hash(); + let mut lowest_buffered_ancestor = self.lowest_buffered_ancestor_or(block_hash); + if lowest_buffered_ancestor == block_hash { + lowest_buffered_ancestor = block.parent_hash; + } + + // now check the block itself + if let Some(status) = + self.check_invalid_ancestor_with_head(lowest_buffered_ancestor, block_hash)? + { + return Ok(TreeOutcome::new(status)) + } + + let status = if self.is_pipeline_active { + self.buffer_block_without_senders(block).unwrap(); + PayloadStatus::from_status(PayloadStatusEnum::Syncing) + } else { + let mut latest_valid_hash = None; + let status = match self.insert_block_without_senders(block).unwrap() { + InsertPayloadOk::Inserted(BlockStatus::Valid(_)) | + InsertPayloadOk::AlreadySeen(BlockStatus::Valid(_)) => { + latest_valid_hash = Some(block_hash); + PayloadStatusEnum::Valid + } + InsertPayloadOk::Inserted(BlockStatus::Disconnected { .. }) | + InsertPayloadOk::AlreadySeen(BlockStatus::Disconnected { .. }) => { + // TODO: isn't this check redundant? + // check if the block's parent is already marked as invalid + // if let Some(status) = self + // .check_invalid_ancestor_with_head(block.parent_hash, block.hash()) + // .map_err(|error| { + // InsertBlockError::new(block, InsertBlockErrorKind::Provider(error)) + // })? + // { + // return Ok(status) + // } + + // not known to be invalid, but we don't know anything else + PayloadStatusEnum::Syncing + } + }; + PayloadStatus::new(status, latest_valid_hash) + }; + + let mut outcome = TreeOutcome::new(status); + if outcome.outcome.is_valid() { + if let Some(target) = self.state.forkchoice_state_tracker.sync_target_state() { + if target.head_block_hash == block_hash { + outcome = outcome + .with_event(TreeEvent::TreeAction(TreeAction::MakeCanonical(block_hash))); + } + } + } + Ok(outcome) + } + + fn on_forkchoice_updated( + &mut self, + state: ForkchoiceState, + attrs: Option<::PayloadAttributes>, + ) -> ProviderResult> { + if let Some(on_updated) = self.pre_validate_forkchoice_update(state)? { + self.state.forkchoice_state_tracker.set_latest(state, on_updated.forkchoice_status()); + return Ok(TreeOutcome::new(on_updated)) + } + + todo!() + } +} + +/// The state of the persistence task. +#[derive(Default, Debug)] +struct PersistenceState { + /// Hash of the last block persisted. + last_persisted_block_hash: B256, + /// Receiver end of channel where the result of the persistence task will be + /// sent when done. A None value means there's no persistence task in progress. + rx: Option>, + /// The last persisted block number. + last_persisted_block_number: u64, +} + +impl PersistenceState { + /// Determines if there is a persistence task in progress by checking if the + /// receiver is set. + const fn in_progress(&self) -> bool { + self.rx.is_some() + } + + /// Sets state for a started persistence task. + fn start(&mut self, rx: oneshot::Receiver) { + self.rx = Some(rx); + } + + /// Sets state for a finished persistence task. + fn finish(&mut self, last_persisted_block_hash: B256, last_persisted_block_number: u64) { + self.rx = None; + self.last_persisted_block_number = last_persisted_block_number; + self.last_persisted_block_hash = last_persisted_block_hash; + } +} + +/// Represents the tree state kept in memory. +trait InMemoryState: Send + Sync { + /// Returns the state for a given block hash. + fn state_by_hash(&self, hash: B256) -> Option>; + /// Returns the state for a given block number. + fn state_by_number(&self, number: u64) -> Option>; + /// Returns the current chain head state. + fn head_state(&self) -> Option>; + /// Returns the pending state corresponding to the current head plus one, + /// from the payload received in newPayload that does not have a FCU yet. + fn pending_state(&self) -> Option>; +} + +/// State after applying the given block. +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct State(ExecutedBlock); + +impl State { + const fn new(executed_block: ExecutedBlock) -> Self { + Self(executed_block) + } + + fn block(&self) -> ExecutedBlock { + self.0.clone() + } + + fn hash(&self) -> B256 { + self.0.block().hash() + } + + fn number(&self) -> u64 { + self.0.block().number + } + + fn state_root(&self) -> B256 { + self.0.block().header.state_root + } + + fn receipts(&self) -> &Receipts { + &self.0.execution_outcome().receipts + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + static_files::StaticFileAction, + test_utils::{ + get_executed_block_with_number, get_executed_block_with_receipts, get_executed_blocks, + }, + }; + use rand::Rng; + use reth_beacon_consensus::EthBeaconConsensus; + use reth_chainspec::{ChainSpecBuilder, MAINNET}; + use reth_ethereum_engine_primitives::EthEngineTypes; + use reth_evm::test_utils::MockExecutorProvider; + use reth_primitives::Receipt; + use reth_provider::test_utils::MockEthProvider; + use std::sync::mpsc::{channel, Sender}; + use tokio::sync::mpsc::unbounded_channel; + + struct TestHarness { + tree: EngineApiTreeHandlerImpl, + to_tree_tx: Sender>>, + blocks: Vec, + sf_action_rx: Receiver, + } + + fn get_default_test_harness(number_of_blocks: u64) -> TestHarness { + let blocks: Vec<_> = get_executed_blocks(0..number_of_blocks).collect(); + + let mut blocks_by_hash = HashMap::new(); + let mut blocks_by_number = BTreeMap::new(); + let mut state_by_hash = HashMap::new(); + let mut hash_by_number = HashMap::new(); + for block in &blocks { + let sealed_block = block.block(); + let hash = sealed_block.hash(); + let number = sealed_block.number; + blocks_by_hash.insert(hash, block.clone()); + blocks_by_number.entry(number).or_insert_with(Vec::new).push(block.clone()); + state_by_hash.insert(hash, Arc::new(State(block.clone()))); + hash_by_number.insert(number, hash); + } + let tree_state = TreeState { blocks_by_hash, blocks_by_number, ..Default::default() }; + + let (action_tx, action_rx) = channel(); + let (sf_action_tx, sf_action_rx) = channel(); + let persistence_handle = PersistenceHandle::new(action_tx, sf_action_tx); + + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(MAINNET.genesis.clone()) + .paris_activated() + .build(), + ); + let consensus = Arc::new(EthBeaconConsensus::new(chain_spec.clone())); + + let provider = MockEthProvider::default(); + let executor_factory = MockExecutorProvider::default(); + executor_factory.extend(vec![ExecutionOutcome::default()]); + + let payload_validator = ExecutionPayloadValidator::new(chain_spec); + + let (to_tree_tx, to_tree_rx) = channel(); + let (from_tree_tx, from_tree_rx) = unbounded_channel(); + + let engine_api_tree_state = EngineApiTreeState { + invalid_headers: InvalidHeaderCache::new(10), + buffer: BlockBuffer::new(10), + tree_state, + forkchoice_state_tracker: ForkchoiceStateTracker::default(), + }; + + let header = blocks.first().unwrap().block().header.clone(); + let mut tree = EngineApiTreeHandlerImpl::new( + provider, + executor_factory, + consensus, + payload_validator, + to_tree_rx, + from_tree_tx, + engine_api_tree_state, + header, + persistence_handle, + ); + let last_executed_block = blocks.last().unwrap().clone(); + let pending = Some(State::new(last_executed_block)); + tree.canonical_in_memory_state = + CanonicalInMemoryState::new(state_by_hash, hash_by_number, pending); + + TestHarness { tree, to_tree_tx, blocks, sf_action_rx } + } + + fn create_mock_state(block_number: u64) -> State { + State::new(get_executed_block_with_number(block_number)) + } + + #[tokio::test] + async fn test_tree_persist_blocks() { + // we need more than PERSISTENCE_THRESHOLD blocks to trigger the + // persistence task. + let TestHarness { tree, to_tree_tx, sf_action_rx, mut blocks } = + get_default_test_harness(PERSISTENCE_THRESHOLD + 1); + std::thread::Builder::new().name("Tree Task".to_string()).spawn(|| tree.run()).unwrap(); + + // send a message to the tree to enter the main loop. + to_tree_tx.send(FromEngine::DownloadedBlocks(vec![])).unwrap(); + + let received_action = sf_action_rx.recv().expect("Failed to receive saved blocks"); + if let StaticFileAction::WriteExecutionData((saved_blocks, _)) = received_action { + // only PERSISTENCE_THRESHOLD will be persisted + blocks.pop(); + assert_eq!(saved_blocks.len() as u64, PERSISTENCE_THRESHOLD); + assert_eq!(saved_blocks, blocks); + } else { + panic!("unexpected action received {received_action:?}"); + } + } + + #[tokio::test] + async fn test_in_memory_state_trait_impl() { + let TestHarness { tree, to_tree_tx, sf_action_rx, blocks } = get_default_test_harness(10); + + let head_block = blocks.last().unwrap().block(); + let first_block = blocks.first().unwrap().block(); + + for executed_block in blocks { + let sealed_block = executed_block.block(); + + let expected_state = State::new(executed_block.clone()); + + let actual_state_by_hash = tree + .canonical_in_memory_state + .inner + .in_memory_state + .state_by_hash(sealed_block.hash()) + .unwrap(); + assert_eq!(expected_state, *actual_state_by_hash); + + let actual_state_by_number = tree + .canonical_in_memory_state + .inner + .in_memory_state + .state_by_number(sealed_block.number) + .unwrap(); + assert_eq!(expected_state, *actual_state_by_number); + } + } + + #[tokio::test] + async fn test_in_memory_state_impl_state_by_hash() { + let mut state_by_hash = HashMap::new(); + let number = rand::thread_rng().gen::(); + let state = Arc::new(create_mock_state(number)); + state_by_hash.insert(state.hash(), state.clone()); + + let in_memory_state = InMemoryStateImpl::new(state_by_hash, HashMap::new(), None); + + assert_eq!(in_memory_state.state_by_hash(state.hash()), Some(state)); + assert_eq!(in_memory_state.state_by_hash(B256::random()), None); + } + + #[tokio::test] + async fn test_in_memory_state_impl_state_by_number() { + let mut state_by_hash = HashMap::new(); + let mut hash_by_number = HashMap::new(); + + let number = rand::thread_rng().gen::(); + let state = Arc::new(create_mock_state(number)); + let hash = state.hash(); + + state_by_hash.insert(hash, state.clone()); + hash_by_number.insert(number, hash); + + let in_memory_state = InMemoryStateImpl::new(state_by_hash, hash_by_number, None); + + assert_eq!(in_memory_state.state_by_number(number), Some(state)); + assert_eq!(in_memory_state.state_by_number(number + 1), None); + } + + #[tokio::test] + async fn test_in_memory_state_impl_head_state() { + let mut state_by_hash = HashMap::new(); + let mut hash_by_number = HashMap::new(); + let state1 = Arc::new(create_mock_state(1)); + let state2 = Arc::new(create_mock_state(2)); + let hash1 = state1.hash(); + let hash2 = state2.hash(); + hash_by_number.insert(1, hash1); + hash_by_number.insert(2, hash2); + state_by_hash.insert(hash1, state1); + state_by_hash.insert(hash2, state2); + + let in_memory_state = InMemoryStateImpl::new(state_by_hash, hash_by_number, None); + let head_state = in_memory_state.head_state().unwrap(); + + assert_eq!(head_state.hash(), hash2); + assert_eq!(head_state.number(), 2); + } + + #[tokio::test] + async fn test_in_memory_state_impl_pending_state() { + let pending_number = rand::thread_rng().gen::(); + let pending_state = create_mock_state(pending_number); + let pending_hash = pending_state.hash(); + + let in_memory_state = + InMemoryStateImpl::new(HashMap::new(), HashMap::new(), Some(pending_state)); + + let result = in_memory_state.pending_state(); + assert!(result.is_some()); + let actual_pending_state = result.unwrap(); + assert_eq!(actual_pending_state.0.block().hash(), pending_hash); + assert_eq!(actual_pending_state.0.block().number, pending_number); + } + + #[tokio::test] + async fn test_in_memory_state_impl_no_pending_state() { + let in_memory_state = InMemoryStateImpl::new(HashMap::new(), HashMap::new(), None); + + assert_eq!(in_memory_state.pending_state(), None); + } + + #[tokio::test] + async fn test_state_new() { + let number = rand::thread_rng().gen::(); + let block = get_executed_block_with_number(number); + + let state = State::new(block.clone()); + + assert_eq!(state.0, block); + } + + #[tokio::test] + async fn test_state_block() { + let number = rand::thread_rng().gen::(); + let block = get_executed_block_with_number(number); + + let state = State::new(block.clone()); + + assert_eq!(state.block(), block); + } + + #[tokio::test] + async fn test_state_hash() { + let number = rand::thread_rng().gen::(); + let block = get_executed_block_with_number(number); + + let state = State::new(block.clone()); + + assert_eq!(state.hash(), block.block().hash()); + } + + #[tokio::test] + async fn test_state_number() { + let number = rand::thread_rng().gen::(); + let block = get_executed_block_with_number(number); + + let state = State::new(block); + + assert_eq!(state.number(), number); + } + + #[tokio::test] + async fn test_state_state_root() { + let number = rand::thread_rng().gen::(); + let block = get_executed_block_with_number(number); + + let state = State::new(block.clone()); + + assert_eq!(state.state_root(), block.block().state_root); + } + + #[tokio::test] + async fn test_state_receipts() { + let receipts = Receipts { receipt_vec: vec![vec![Some(Receipt::default())]] }; + + let block = get_executed_block_with_receipts(receipts.clone()); + + let state = State::new(block); + + assert_eq!(state.receipts(), &receipts); + } +} diff --git a/crates/engine/tree/test-data/holesky/1.rlp b/crates/engine/tree/test-data/holesky/1.rlp deleted file mode 100644 index 454e6b04ca2c..000000000000 --- a/crates/engine/tree/test-data/holesky/1.rlp +++ /dev/null @@ -1 +0,0 @@ -f90218f90213a0b5f7f912443c940f21fd611f12828d75b534364ed9e95ca4e307729a4661bde4a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a069d8c9d72f6fa4ad42d4702b433707212f90db395eb54dc20bc85de253788783a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800184017dd79d808465156ad899d883010d02846765746888676f312e32312e31856c696e7578a0b5f7f912443c940f21fd611f12828d75b534364ed9e95ca4e307729a4661bde488000000000000000084342770c0c0c0 \ No newline at end of file diff --git a/crates/engine/tree/test-data/holesky/2.rlp b/crates/engine/tree/test-data/holesky/2.rlp deleted file mode 100644 index 1b7d04893c0c..000000000000 --- a/crates/engine/tree/test-data/holesky/2.rlp +++ /dev/null @@ -1 +0,0 @@ -f90218f90213a0e9011e6d15a0d0c16f65a38f84375bf1a6b88201b0ad75a2660df0bb8d1ac381a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794c6e2459991bfe27cca6d86722f35da23a1e4cb97a069d8c9d72f6fa4ad42d4702b433707212f90db395eb54dc20bc85de253788783a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800284017e3711808465156af099d883010d02846765746888676f312e32312e31856c696e7578a0b212724aac0df10d75b1b6d795fd4cd17e4ca4f9ee1bfe97871a16a3af64eed1880000000000000000842da282a8c0c0 \ No newline at end of file diff --git a/crates/ethereum-forks/Cargo.toml b/crates/ethereum-forks/Cargo.toml index c0c0f83fe756..a1d25b5713ec 100644 --- a/crates/ethereum-forks/Cargo.toml +++ b/crates/ethereum-forks/Cargo.toml @@ -36,6 +36,7 @@ auto_impl.workspace = true [dev-dependencies] arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true +proptest-derive.workspace = true [features] default = ["std", "serde", "rustc-hash"] diff --git a/crates/ethereum-forks/src/hardfork/dev.rs b/crates/ethereum-forks/src/hardfork/dev.rs index 6ba54a421770..1abc7e75ecb1 100644 --- a/crates/ethereum-forks/src/hardfork/dev.rs +++ b/crates/ethereum-forks/src/hardfork/dev.rs @@ -31,7 +31,5 @@ pub static DEV_HARDFORKS: Lazy = Lazy::new(|| { (crate::OptimismHardfork::Bedrock.boxed(), ForkCondition::Block(0)), #[cfg(feature = "optimism")] (crate::OptimismHardfork::Ecotone.boxed(), ForkCondition::Timestamp(0)), - #[cfg(feature = "optimism")] - (crate::OptimismHardfork::Canyon.boxed(), ForkCondition::Timestamp(0)), ]) }); diff --git a/crates/ethereum/engine/Cargo.toml b/crates/ethereum/engine/Cargo.toml index 732d1f40bd84..492eb16bb54d 100644 --- a/crates/ethereum/engine/Cargo.toml +++ b/crates/ethereum/engine/Cargo.toml @@ -17,30 +17,18 @@ reth-chainspec.workspace = true reth-db-api.workspace = true reth-engine-tree.workspace = true reth-ethereum-engine-primitives.workspace = true -reth-evm-ethereum.workspace = true reth-network-p2p.workspace = true -reth-payload-validator.workspace = true -reth-provider.workspace = true -reth-prune.workspace = true reth-stages-api.workspace = true reth-tasks.workspace = true -reth-payload-builder.workspace = true # async futures.workspace = true pin-project.workspace = true +tokio = { workspace = true, features = ["sync"] } tokio-stream.workspace = true # misc thiserror.workspace = true [dev-dependencies] -reth-blockchain-tree.workspace = true -reth-consensus.workspace = true reth-engine-tree = { workspace = true, features = ["test-utils"] } -reth-evm.workspace = true -reth-exex-types.workspace = true -reth-primitives.workspace = true -reth-prune-types.workspace = true - -tokio = { workspace = true, features = ["sync"] } \ No newline at end of file diff --git a/crates/ethereum/engine/src/service.rs b/crates/ethereum/engine/src/service.rs index b4f63aa7254a..bb7e8b06bb9a 100644 --- a/crates/ethereum/engine/src/service.rs +++ b/crates/ethereum/engine/src/service.rs @@ -1,33 +1,25 @@ -use futures::{Stream, StreamExt}; +use futures::{ready, StreamExt}; use pin_project::pin_project; -use reth_beacon_consensus::{BeaconConsensusEngineEvent, BeaconEngineMessage, EthBeaconConsensus}; +use reth_beacon_consensus::{BeaconEngineMessage, EthBeaconConsensus}; use reth_chainspec::ChainSpec; use reth_db_api::database::Database; use reth_engine_tree::{ backfill::PipelineSync, + chain::ChainOrchestrator, download::BasicBlockDownloader, - engine::{EngineApiRequestHandler, EngineHandler}, - persistence::PersistenceHandle, - tree::EngineApiTreeHandlerImpl, -}; -pub use reth_engine_tree::{ - chain::{ChainEvent, ChainOrchestrator}, - engine::EngineApiEvent, + engine::{EngineApiEvent, EngineApiRequestHandler, EngineHandler, FromEngine}, }; use reth_ethereum_engine_primitives::EthEngineTypes; -use reth_evm_ethereum::execute::EthExecutorProvider; use reth_network_p2p::{bodies::client::BodiesClient, headers::client::HeadersClient}; -use reth_payload_builder::PayloadBuilderHandle; -use reth_payload_validator::ExecutionPayloadValidator; -use reth_provider::{providers::BlockchainProvider2, ProviderFactory}; -use reth_prune::Pruner; use reth_stages_api::Pipeline; use reth_tasks::TaskSpawner; use std::{ + future::Future, pin::Pin, - sync::{mpsc::channel, Arc}, + sync::{mpsc::Sender, Arc}, task::{Context, Poll}, }; +use tokio::sync::mpsc::UnboundedReceiver; use tokio_stream::wrappers::UnboundedReceiverStream; /// Alias for Ethereum chain orchestrator. @@ -57,64 +49,43 @@ where Client: HeadersClient + BodiesClient + Clone + Unpin + 'static, { /// Constructor for `EthService`. - #[allow(clippy::too_many_arguments)] pub fn new( chain_spec: Arc, client: Client, + to_tree: Sender>>, + from_tree: UnboundedReceiver, incoming_requests: UnboundedReceiverStream>, pipeline: Pipeline, pipeline_task_spawner: Box, - provider: ProviderFactory, - blockchain_db: BlockchainProvider2, - pruner: Pruner>, - payload_builder: PayloadBuilderHandle, ) -> Self { - let consensus = Arc::new(EthBeaconConsensus::new(chain_spec.clone())); - let downloader = BasicBlockDownloader::new(client, consensus.clone()); - - let (to_tree_tx, to_tree_rx) = channel(); - - let persistence_handle = PersistenceHandle::spawn_service(provider, pruner); - let payload_validator = ExecutionPayloadValidator::new(chain_spec.clone()); - let executor_factory = EthExecutorProvider::ethereum(chain_spec); - - let canonical_in_memory_state = blockchain_db.canonical_in_memory_state(); - - let from_tree = EngineApiTreeHandlerImpl::spawn_new( - blockchain_db, - executor_factory, - consensus, - payload_validator, - to_tree_rx, - persistence_handle, - payload_builder, - canonical_in_memory_state, - ); + let consensus = Arc::new(EthBeaconConsensus::new(chain_spec)); + let downloader = BasicBlockDownloader::new(client, consensus); - let engine_handler = EngineApiRequestHandler::new(to_tree_tx, from_tree); + let engine_handler = EngineApiRequestHandler::new(to_tree, from_tree); let handler = EngineHandler::new(engine_handler, downloader, incoming_requests); let backfill_sync = PipelineSync::new(pipeline, pipeline_task_spawner); Self { orchestrator: ChainOrchestrator::new(handler, backfill_sync) } } - - /// Returns a mutable reference to the orchestrator. - pub fn orchestrator_mut(&mut self) -> &mut EthServiceType { - &mut self.orchestrator - } } -impl Stream for EthService +impl Future for EthService where DB: Database + 'static, Client: HeadersClient + BodiesClient + Clone + Unpin + 'static, { - type Item = ChainEvent; + type Output = Result<(), EthServiceError>; - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + // Call poll on the inner orchestrator. let mut orchestrator = self.project().orchestrator; - StreamExt::poll_next_unpin(&mut orchestrator, cx) + loop { + match ready!(StreamExt::poll_next_unpin(&mut orchestrator, cx)) { + Some(_event) => continue, + None => return Poll::Ready(Ok(())), + } + } } } @@ -129,13 +100,10 @@ mod tests { use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_engine_tree::test_utils::TestPipelineBuilder; use reth_ethereum_engine_primitives::EthEngineTypes; - use reth_exex_types::FinishedExExHeight; use reth_network_p2p::test_utils::TestFullBlockClient; - use reth_primitives::SealedHeader; - use reth_provider::test_utils::create_test_provider_factory_with_chain_spec; use reth_tasks::TokioTaskExecutor; - use std::sync::Arc; - use tokio::sync::{mpsc::unbounded_channel, watch}; + use std::sync::{mpsc::channel, Arc}; + use tokio::sync::mpsc::unbounded_channel; #[test] fn eth_chain_orchestrator_build() { @@ -154,26 +122,18 @@ mod tests { let pipeline = TestPipelineBuilder::new().build(chain_spec.clone()); let pipeline_task_spawner = Box::::default(); - let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); - - let blockchain_db = - BlockchainProvider2::with_latest(provider_factory.clone(), SealedHeader::default()); - let (_tx, rx) = watch::channel(FinishedExExHeight::NoExExs); - let pruner = - Pruner::<_, ProviderFactory<_>>::new(provider_factory.clone(), vec![], 0, 0, None, rx); + let (to_tree_tx, _to_tree_rx) = channel(); + let (_from_tree_tx, from_tree_rx) = unbounded_channel(); - let (tx, _rx) = unbounded_channel(); - let _eth_service = EthService::new( + let _eth_chain_orchestrator = EthService::new( chain_spec, client, + to_tree_tx, + from_tree_rx, incoming_requests, pipeline, pipeline_task_spawner, - provider_factory, - blockchain_db, - pruner, - PayloadBuilderHandle::new(tx), ); } } diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index ee77ee0db4e9..cfee186c6334 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -4,7 +4,6 @@ use crate::{ dao_fork::{DAO_HARDFORK_BENEFICIARY, DAO_HARDKFORK_ACCOUNTS}, EthEvmConfig, }; -use core::fmt::Display; use reth_chainspec::{ChainSpec, EthereumHardforks, MAINNET}; use reth_ethereum_consensus::validate_block_post_execution; use reth_evm::{ @@ -34,11 +33,8 @@ use revm_primitives::{ BlockEnv, CfgEnvWithHandlerCfg, EVMError, EnvWithHandlerCfg, ResultAndState, }; -#[cfg(not(feature = "std"))] -use alloc::{boxed::Box, sync::Arc, vec, vec::Vec}; #[cfg(feature = "std")] -use std::sync::Arc; - +use std::{fmt::Display, sync::Arc, vec, vec::Vec}; /// Provides executors to execute regular ethereum blocks #[derive(Debug, Clone)] pub struct EthExecutorProvider { @@ -149,7 +145,7 @@ where ) -> Result where DB: Database, - DB::Error: Into + Display, + DB::Error: Into + std::fmt::Display, { // apply pre execution changes apply_beacon_root_contract_call( @@ -367,7 +363,7 @@ where impl Executor for EthBlockExecutor where EvmConfig: ConfigureEvm, - DB: Database + Display>, + DB: Database + std::fmt::Display>, { type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; type Output = BlockExecutionOutput; diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index fdb121ef8496..cd8398ebe963 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -18,9 +18,6 @@ use reth_primitives::{transaction::FillTxEnv, Address, Header, TransactionSigned use reth_revm::{Database, EvmBuilder}; use revm_primitives::{AnalysisKind, Bytes, CfgEnvWithHandlerCfg, Env, TxEnv, TxKind}; -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; - mod config; pub use config::{revm_spec, revm_spec_by_timestamp_after_merge}; @@ -110,10 +107,10 @@ impl ConfigureEvmEnv for EthEvmConfig { impl ConfigureEvm for EthEvmConfig { type DefaultExternalContext<'a> = (); - fn evm( + fn evm<'a, DB: Database + 'a>( &self, db: DB, - ) -> reth_revm::Evm<'_, Self::DefaultExternalContext<'_>, DB> { + ) -> reth_revm::Evm<'a, Self::DefaultExternalContext<'a>, DB> { EvmBuilder::default().with_db(db).build() } } diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index f22490859a95..2cce8650d153 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -35,7 +35,6 @@ reth-tokio-util.workspace = true reth-node-events.workspace = true reth-node-core.workspace = true reth-exex.workspace = true -reth-blockchain-tree.workspace = true # misc eyre.workspace = true diff --git a/crates/ethereum/node/src/launch.rs b/crates/ethereum/node/src/launch.rs index 898b376025fb..eb699cea2d58 100644 --- a/crates/ethereum/node/src/launch.rs +++ b/crates/ethereum/node/src/launch.rs @@ -2,14 +2,13 @@ use futures::{future::Either, stream, stream_select, StreamExt}; use reth_beacon_consensus::{ - hooks::{EngineHooks, StaticFileHook}, + hooks::{EngineHooks, PruneHook, StaticFileHook}, BeaconConsensusEngineHandle, }; -use reth_blockchain_tree::BlockchainTreeConfig; -use reth_ethereum_engine::service::{ChainEvent, EthService}; +use reth_ethereum_engine::service::EthService; use reth_ethereum_engine_primitives::EthEngineTypes; use reth_exex::ExExManagerHandle; -use reth_network::{NetworkEvents, NetworkSyncUpdater, SyncState}; +use reth_network::NetworkEvents; use reth_node_api::{FullNodeTypes, NodeAddOns}; use reth_node_builder::{ hooks::NodeHooks, @@ -20,17 +19,17 @@ use reth_node_builder::{ use reth_node_core::{ dirs::{ChainPath, DataDirPath}, exit::NodeExitFuture, - primitives::Head, rpc::eth::{helpers::AddDevSigners, FullEthApiServer}, version::{CARGO_PKG_VERSION, CLIENT_CODE, NAME_CLIENT, VERGEN_GIT_SHA}, }; use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; -use reth_provider::providers::BlockchainProvider2; +use reth_provider::providers::BlockchainProvider; use reth_rpc_engine_api::{capabilities::EngineCapabilities, EngineApi}; use reth_rpc_types::engine::ClientVersionV1; use reth_tasks::TaskExecutor; use reth_tokio_util::EventSender; -use reth_tracing::tracing::{debug, error, info}; +use reth_tracing::tracing::{debug, info}; +use std::sync::mpsc::channel; use tokio::sync::{mpsc::unbounded_channel, oneshot}; use tokio_stream::wrappers::UnboundedReceiverStream; @@ -51,7 +50,7 @@ impl EthNodeLauncher { impl LaunchNode> for EthNodeLauncher where T: FullNodeTypes< - Provider = BlockchainProvider2<::DB>, + Provider = BlockchainProvider<::DB>, Engine = EthEngineTypes, >, CB: NodeComponentsBuilder, @@ -74,15 +73,6 @@ where } = target; let NodeHooks { on_component_initialized, on_node_started, .. } = hooks; - // TODO: move tree_config and canon_state_notification_sender - // initialization to with_blockchain_db once the engine revamp is done - // https://github.com/paradigmxyz/reth/issues/8742 - let tree_config = BlockchainTreeConfig::default(); - - // NOTE: This is a temporary workaround to provide the canon state notification sender to the components builder because there's a cyclic dependency between the blockchain provider and the tree component. This will be removed once the Blockchain provider no longer depends on an instance of the tree: - let (canon_state_notification_sender, _receiver) = - tokio::sync::broadcast::channel(tree_config.max_reorg_depth() as usize * 2); - // setup the launch context let ctx = ctx .with_configured_globals() @@ -99,7 +89,7 @@ where .inspect(|_| { info!(target: "reth::cli", "Database opened"); }) - .with_prometheus_server().await? + .with_prometheus().await? .inspect(|this| { debug!(target: "reth::cli", chain=%this.chain_id(), genesis=?this.genesis_hash(), "Initializing genesis"); }) @@ -107,12 +97,10 @@ where .inspect(|this| { info!(target: "reth::cli", "\n{}", this.chain_spec().display_hardforks()); }) - .with_metrics_task() + .with_metrics() // passing FullNodeTypes as type parameter here so that we can build // later the components. - .with_blockchain_db::(move |provider_factory| { - Ok(BlockchainProvider2::new(provider_factory)?) - }, tree_config, canon_state_notification_sender)? + .with_blockchain_db::()? .with_components(components_builder, on_component_initialized).await?; // spawn exexs @@ -159,7 +147,8 @@ where let pipeline_events = pipeline.events(); - let initial_target = ctx.node_config().debug.tip; + // TODO: support --debug.tip + let _initial_target = ctx.node_config().debug.tip; let mut pruner_builder = ctx.pruner_builder(); if let Some(exex_manager_handle) = &exex_manager_handle { @@ -170,30 +159,34 @@ where let pruner_events = pruner.events(); info!(target: "reth::cli", prune_config=?ctx.prune_config().unwrap_or_default(), "Pruner initialized"); + hooks.add(PruneHook::new(pruner, Box::new(ctx.task_executor().clone()))); + + let (to_tree_tx, _to_tree_rx) = channel(); + let (_from_tree_tx, from_tree_rx) = unbounded_channel(); // Configure the consensus engine - let mut eth_service = EthService::new( + let eth_service = EthService::new( ctx.chain_spec(), network_client.clone(), + // to tree + to_tree_tx, + // from tree + from_tree_rx, UnboundedReceiverStream::new(consensus_engine_rx), pipeline, Box::new(ctx.task_executor().clone()), - ctx.provider_factory().clone(), - ctx.blockchain_db().clone(), - pruner, - ctx.components().payload_builder().clone(), ); let event_sender = EventSender::default(); let beacon_engine_handle = - BeaconConsensusEngineHandle::new(consensus_engine_tx, event_sender.clone()); + BeaconConsensusEngineHandle::new(consensus_engine_tx, event_sender); info!(target: "reth::cli", "Consensus engine initialized"); let events = stream_select!( ctx.components().network().event_listener().map(Into::into), - beacon_engine_handle.event_listener().map(Into::into), + // TODO get engine events pipeline_events.map(Into::into), if ctx.node_config().debug.tip.is_none() && !ctx.is_dev() { Either::Left( @@ -247,52 +240,11 @@ where .await?; // Run consensus engine to completion - let network_handle = ctx.components().network().clone(); - let chainspec = ctx.chain_spec(); - let (exit, rx) = oneshot::channel(); + let (tx, rx) = oneshot::channel(); info!(target: "reth::cli", "Starting consensus engine"); ctx.task_executor().spawn_critical_blocking("consensus engine", async move { - if let Some(initial_target) = initial_target { - debug!(target: "reth::cli", %initial_target, "start backfill sync"); - eth_service.orchestrator_mut().start_backfill_sync(initial_target); - } - - let mut res = Ok(()); - - // advance the chain and handle events - while let Some(event) = eth_service.next().await { - debug!(target: "reth::cli", "Event: {event:?}"); - match event { - ChainEvent::BackfillSyncFinished => { - network_handle.update_sync_state(SyncState::Idle); - } - ChainEvent::BackfillSyncStarted => { - network_handle.update_sync_state(SyncState::Syncing); - } - ChainEvent::FatalError => { - error!(target: "reth::cli", "Fatal error in consensus engine"); - res = Err(eyre::eyre!("Fatal error in consensus engine")); - break - } - ChainEvent::Handler(ev) => { - if let Some(head) = ev.canonical_header() { - let head_block = Head { - number: head.number, - hash: head.hash(), - difficulty: head.difficulty, - timestamp: head.timestamp, - total_difficulty: chainspec - .final_paris_total_difficulty(head.number) - .unwrap_or_default(), - }; - network_handle.update_status(head_block); - } - event_sender.notify(ev); - } - } - } - - let _ = exit.send(res); + let res = eth_service.await; + let _ = tx.send(res); }); let full_node = FullNode { @@ -313,7 +265,7 @@ where let handle = NodeHandle { node_exit_future: NodeExitFuture::new( - async { rx.await? }, + async { Ok(rx.await??) }, full_node.config.debug.terminate, ), node: full_node, diff --git a/crates/ethereum/node/tests/it/builder.rs b/crates/ethereum/node/tests/it/builder.rs index fe2ff7effe41..32ebf2d22b23 100644 --- a/crates/ethereum/node/tests/it/builder.rs +++ b/crates/ethereum/node/tests/it/builder.rs @@ -1,17 +1,11 @@ //! Node builder setup tests. -use std::sync::Arc; - -use reth_db::{ - test_utils::{create_test_rw_db, TempDatabase}, - DatabaseEnv, -}; +use reth_db::test_utils::create_test_rw_db; use reth_node_builder::{FullNodeComponents, NodeBuilder, NodeConfig}; use reth_node_ethereum::{ launch::EthNodeLauncher, node::{EthereumAddOns, EthereumNode}, }; -use reth_provider::providers::BlockchainProvider2; use reth_tasks::TaskManager; #[test] @@ -51,7 +45,7 @@ async fn test_eth_launcher() { let db = create_test_rw_db(); let _builder = NodeBuilder::new(config) .with_database(db) - .with_types_and_provider::>>>() + .with_types::() .with_components(EthereumNode::components()) .with_add_ons::() .launch_with_fn(|builder| { diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index e853cb1ad12c..c7b491243864 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -196,24 +196,25 @@ where } // Calculate the requests and the requests root. - let (requests, requests_root) = - if chain_spec.is_prague_active_at_timestamp(attributes.timestamp) { - // We do not calculate the EIP-6110 deposit requests because there are no - // transactions in an empty payload. - let withdrawal_requests = post_block_withdrawal_requests_contract_call( - &self.evm_config, - &mut db, - &initialized_cfg, - &initialized_block_env, - ) - .map_err(|err| PayloadBuilderError::Internal(err.into()))?; - - let requests = withdrawal_requests; - let requests_root = calculate_requests_root(&requests); - (Some(requests.into()), Some(requests_root)) - } else { - (None, None) - }; + let (requests, requests_root) = if chain_spec + .is_prague_active_at_timestamp(attributes.timestamp) + { + // We do not calculate the EIP-6110 deposit requests because there are no + // transactions in an empty payload. + let withdrawal_requests = post_block_withdrawal_requests_contract_call::( + &self.evm_config, + &mut db, + &initialized_cfg, + &initialized_block_env, + ) + .map_err(|err| PayloadBuilderError::Internal(err.into()))?; + + let requests = withdrawal_requests; + let requests_root = calculate_requests_root(&requests); + (Some(requests.into()), Some(requests_root)) + } else { + (None, None) + }; let header = Header { parent_hash: parent_block.hash(), diff --git a/crates/evm/execution-errors/Cargo.toml b/crates/evm/execution-errors/Cargo.toml index b60067dfd7d4..8ec3a7024cb5 100644 --- a/crates/evm/execution-errors/Cargo.toml +++ b/crates/evm/execution-errors/Cargo.toml @@ -17,7 +17,6 @@ reth-storage-errors.workspace = true reth-prune-types.workspace = true alloy-primitives.workspace = true -alloy-rlp.workspace = true alloy-eips.workspace = true revm-primitives.workspace = true diff --git a/crates/evm/execution-errors/src/lib.rs b/crates/evm/execution-errors/src/lib.rs index 5d8ec12bdbf3..1fdee985606b 100644 --- a/crates/evm/execution-errors/src/lib.rs +++ b/crates/evm/execution-errors/src/lib.rs @@ -23,7 +23,7 @@ use revm_primitives::EVMError; use alloc::{boxed::Box, string::String}; pub mod trie; -pub use trie::*; +pub use trie::{StateRootError, StorageRootError}; /// Transaction validation errors #[derive(thiserror_no_std::Error, Debug, Clone, PartialEq, Eq)] diff --git a/crates/evm/execution-errors/src/trie.rs b/crates/evm/execution-errors/src/trie.rs index 5690bc97e3aa..fd3533977ab2 100644 --- a/crates/evm/execution-errors/src/trie.rs +++ b/crates/evm/execution-errors/src/trie.rs @@ -1,34 +1,14 @@ //! Errors when computing the state root. -use reth_storage_errors::{db::DatabaseError, provider::ProviderError}; +use reth_storage_errors::db::DatabaseError; use thiserror_no_std::Error; -/// State root errors. -#[derive(Error, Debug, PartialEq, Eq, Clone)] -pub enum StateProofError { - /// Internal database error. - #[error(transparent)] - Database(#[from] DatabaseError), - /// RLP decoding error. - #[error(transparent)] - Rlp(#[from] alloy_rlp::Error), -} - -impl From for ProviderError { - fn from(value: StateProofError) -> Self { - match value { - StateProofError::Database(error) => Self::Database(error), - StateProofError::Rlp(error) => Self::Rlp(error), - } - } -} - /// State root errors. #[derive(Error, Debug, PartialEq, Eq, Clone)] pub enum StateRootError { /// Internal database error. #[error(transparent)] - Database(#[from] DatabaseError), + DB(#[from] DatabaseError), /// Storage root error. #[error(transparent)] StorageRootError(#[from] StorageRootError), @@ -37,8 +17,8 @@ pub enum StateRootError { impl From for DatabaseError { fn from(err: StateRootError) -> Self { match err { - StateRootError::Database(err) | - StateRootError::StorageRootError(StorageRootError::Database(err)) => err, + StateRootError::DB(err) | + StateRootError::StorageRootError(StorageRootError::DB(err)) => err, } } } @@ -48,5 +28,5 @@ impl From for DatabaseError { pub enum StorageRootError { /// Internal database error. #[error(transparent)] - Database(#[from] DatabaseError), + DB(#[from] DatabaseError), } diff --git a/crates/evm/execution-types/src/execute.rs b/crates/evm/execution-types/src/execute.rs deleted file mode 100644 index 2c132576415d..000000000000 --- a/crates/evm/execution-types/src/execute.rs +++ /dev/null @@ -1,41 +0,0 @@ -use reth_primitives::{Request, U256}; -use revm::db::BundleState; - -/// A helper type for ethereum block inputs that consists of a block and the total difficulty. -#[derive(Debug)] -pub struct BlockExecutionInput<'a, Block> { - /// The block to execute. - pub block: &'a Block, - /// The total difficulty of the block. - pub total_difficulty: U256, -} - -impl<'a, Block> BlockExecutionInput<'a, Block> { - /// Creates a new input. - pub const fn new(block: &'a Block, total_difficulty: U256) -> Self { - Self { block, total_difficulty } - } -} - -impl<'a, Block> From<(&'a Block, U256)> for BlockExecutionInput<'a, Block> { - fn from((block, total_difficulty): (&'a Block, U256)) -> Self { - Self::new(block, total_difficulty) - } -} - -/// The output of an ethereum block. -/// -/// Contains the state changes, transaction receipts, and total gas used in the block. -/// -/// TODO(mattsse): combine with `ExecutionOutcome` -#[derive(Debug, PartialEq, Eq)] -pub struct BlockExecutionOutput { - /// The changed state of the block after execution. - pub state: BundleState, - /// All the receipts of the transactions in the block. - pub receipts: Vec, - /// All the EIP-7685 requests of the transactions in the block. - pub requests: Vec, - /// The total gas used by the block. - pub gas_used: u64, -} diff --git a/crates/evm/execution-types/src/lib.rs b/crates/evm/execution-types/src/lib.rs index 881b2a33dad0..0692fa57eb94 100644 --- a/crates/evm/execution-types/src/lib.rs +++ b/crates/evm/execution-types/src/lib.rs @@ -8,11 +8,8 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -mod chain; -pub use chain::*; - -mod execute; -pub use execute::*; - mod execution_outcome; pub use execution_outcome::*; + +mod chain; +pub use chain::*; diff --git a/crates/evm/src/builder.rs b/crates/evm/src/builder.rs deleted file mode 100644 index 019e7d9a6be1..000000000000 --- a/crates/evm/src/builder.rs +++ /dev/null @@ -1,150 +0,0 @@ -//! Builder for creating an EVM with a database and environment. - -use revm::{inspector_handle_register, Database, Evm, EvmBuilder, GetInspector}; -use revm_primitives::{Env, EnvWithHandlerCfg}; - -/// Builder for creating an EVM with a database and environment. -/// -/// Wrapper around [`EvmBuilder`] that allows for setting the database and environment for the EVM. -/// -/// This is useful for creating an EVM with a custom database and environment without having to -/// necessarily rely on Revm inspector. -#[derive(Debug)] -pub struct RethEvmBuilder { - /// The database to use for the EVM. - db: DB, - /// The environment to use for the EVM. - env: Option>, - /// The external context for the EVM. - external_context: EXT, -} - -impl RethEvmBuilder -where - DB: Database, -{ - /// Create a new EVM builder with the given database. - pub const fn new(db: DB, external_context: EXT) -> Self { - Self { db, env: None, external_context } - } - - /// Set the environment for the EVM. - pub fn with_env(mut self, env: Box) -> Self { - self.env = Some(env); - self - } - - /// Set the external context for the EVM. - pub fn with_external_context(self, external_context: EXT1) -> RethEvmBuilder { - RethEvmBuilder { db: self.db, env: self.env, external_context } - } - - /// Build the EVM with the given database and environment. - pub fn build<'a>(self) -> Evm<'a, EXT, DB> { - let mut builder = - EvmBuilder::default().with_db(self.db).with_external_context(self.external_context); - if let Some(env) = self.env { - builder = builder.with_env(env); - } - - builder.build() - } - - /// Build the EVM with the given database and environment, using the given inspector. - pub fn build_with_inspector<'a, I>(self, inspector: I) -> Evm<'a, I, DB> - where - I: GetInspector, - EXT: 'a, - { - let mut builder = - EvmBuilder::default().with_db(self.db).with_external_context(self.external_context); - if let Some(env) = self.env { - builder = builder.with_env(env); - } - builder - .with_external_context(inspector) - .append_handler_register(inspector_handle_register) - .build() - } -} - -/// Trait for configuring an EVM builder. -pub trait ConfigureEvmBuilder { - /// The type of EVM builder that this trait can configure. - type Builder<'a, DB: Database>: EvmFactory; -} - -/// Trait for configuring the EVM for executing full blocks. -pub trait EvmFactory { - /// Associated type for the default external context that should be configured for the EVM. - type DefaultExternalContext<'a>; - - /// Provides the default external context. - fn default_external_context<'a>(&self) -> Self::DefaultExternalContext<'a>; - - /// Returns new EVM with the given database - /// - /// This does not automatically configure the EVM with [`crate::ConfigureEvmEnv`] methods. It is - /// up to the caller to call an appropriate method to fill the transaction and block - /// environment before executing any transactions using the provided EVM. - fn evm(self, db: DB) -> Evm<'static, Self::DefaultExternalContext<'static>, DB> - where - Self: Sized, - { - RethEvmBuilder::new(db, self.default_external_context()).build() - } - - /// Returns a new EVM with the given database configured with the given environment settings, - /// including the spec id. - /// - /// This will preserve any handler modifications - fn evm_with_env<'a, DB: Database + 'a>( - &self, - db: DB, - env: EnvWithHandlerCfg, - ) -> Evm<'a, Self::DefaultExternalContext<'a>, DB> { - RethEvmBuilder::new(db, self.default_external_context()).with_env(env.env).build() - } - - /// Returns a new EVM with the given database configured with the given environment settings, - /// including the spec id. - /// - /// This will use the given external inspector as the EVM external context. - /// - /// This will preserve any handler modifications - fn evm_with_env_and_inspector( - &self, - db: DB, - env: EnvWithHandlerCfg, - inspector: I, - ) -> Evm<'_, I, DB> - where - DB: Database, - I: GetInspector, - { - RethEvmBuilder::new(db, self.default_external_context()) - .with_env(env.env) - .build_with_inspector(inspector) - } - - /// Returns a new EVM with the given inspector. - /// - /// Caution: This does not automatically configure the EVM with [`crate::ConfigureEvmEnv`] - /// methods. It is up to the caller to call an appropriate method to fill the transaction - /// and block environment before executing any transactions using the provided EVM. - fn evm_with_inspector(&self, db: DB, inspector: I) -> Evm<'_, I, DB> - where - DB: Database, - I: GetInspector, - { - RethEvmBuilder::new(db, self.default_external_context()).build_with_inspector(inspector) - } -} - -impl EvmFactory for RethEvmBuilder { - type DefaultExternalContext<'a> = EXT; - - fn default_external_context<'a>(&self) -> Self::DefaultExternalContext<'a> { - self.external_context.clone() - } -} diff --git a/crates/evm/src/either.rs b/crates/evm/src/either.rs index 0a490b813596..f6af36d2eb63 100644 --- a/crates/evm/src/either.rs +++ b/crates/evm/src/either.rs @@ -2,9 +2,11 @@ use std::fmt::Display; -use crate::execute::{BatchExecutor, BlockExecutorProvider, Executor}; +use crate::execute::{ + BatchExecutor, BlockExecutionInput, BlockExecutionOutput, BlockExecutorProvider, Executor, +}; use reth_execution_errors::BlockExecutionError; -use reth_execution_types::{BlockExecutionInput, BlockExecutionOutput, ExecutionOutcome}; +use reth_execution_types::ExecutionOutcome; use reth_primitives::{BlockNumber, BlockWithSenders, Receipt}; use reth_prune_types::PruneModes; use reth_storage_errors::provider::ProviderError; diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index 68c398506120..9d3fd0a5e824 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -1,10 +1,9 @@ //! Traits for execution. -// Re-export execution types -pub use reth_execution_types::{BlockExecutionInput, BlockExecutionOutput, ExecutionOutcome}; - -use reth_primitives::{BlockNumber, BlockWithSenders, Receipt}; +use reth_execution_types::ExecutionOutcome; +use reth_primitives::{BlockNumber, BlockWithSenders, Receipt, Request, U256}; use reth_prune_types::PruneModes; +use revm::db::BundleState; use revm_primitives::db::Database; use std::fmt::Display; @@ -97,6 +96,45 @@ pub trait BatchExecutor { fn size_hint(&self) -> Option; } +/// The output of an ethereum block. +/// +/// Contains the state changes, transaction receipts, and total gas used in the block. +/// +/// TODO(mattsse): combine with `ExecutionOutcome` +#[derive(Debug, PartialEq, Eq)] +pub struct BlockExecutionOutput { + /// The changed state of the block after execution. + pub state: BundleState, + /// All the receipts of the transactions in the block. + pub receipts: Vec, + /// All the EIP-7685 requests of the transactions in the block. + pub requests: Vec, + /// The total gas used by the block. + pub gas_used: u64, +} + +/// A helper type for ethereum block inputs that consists of a block and the total difficulty. +#[derive(Debug)] +pub struct BlockExecutionInput<'a, Block> { + /// The block to execute. + pub block: &'a Block, + /// The total difficulty of the block. + pub total_difficulty: U256, +} + +impl<'a, Block> BlockExecutionInput<'a, Block> { + /// Creates a new input. + pub const fn new(block: &'a Block, total_difficulty: U256) -> Self { + Self { block, total_difficulty } + } +} + +impl<'a, Block> From<(&'a Block, U256)> for BlockExecutionInput<'a, Block> { + fn from((block, total_difficulty): (&'a Block, U256)) -> Self { + Self::new(block, total_difficulty) + } +} + /// A type that can create a new executor for block execution. pub trait BlockExecutorProvider: Send + Sync + Clone + Unpin + 'static { /// An executor that can execute a single block given a database. @@ -146,7 +184,6 @@ mod tests { use super::*; use reth_primitives::Block; use revm::db::{CacheDB, EmptyDBTyped}; - use revm_primitives::U256; use std::marker::PhantomData; #[derive(Clone, Default)] diff --git a/crates/evm/src/lib.rs b/crates/evm/src/lib.rs index c92eb8e923be..27eeb42ec4bd 100644 --- a/crates/evm/src/lib.rs +++ b/crates/evm/src/lib.rs @@ -21,7 +21,6 @@ use revm_primitives::{ BlockEnv, Bytes, CfgEnvWithHandlerCfg, Env, EnvWithHandlerCfg, SpecId, TxEnv, }; -pub mod builder; pub mod either; pub mod execute; pub mod noop; @@ -43,17 +42,17 @@ pub trait ConfigureEvm: ConfigureEvmEnv { /// This does not automatically configure the EVM with [`ConfigureEvmEnv`] methods. It is up to /// the caller to call an appropriate method to fill the transaction and block environment /// before executing any transactions using the provided EVM. - fn evm(&self, db: DB) -> Evm<'_, Self::DefaultExternalContext<'_>, DB>; + fn evm<'a, DB: Database + 'a>(&self, db: DB) -> Evm<'a, Self::DefaultExternalContext<'a>, DB>; /// Returns a new EVM with the given database configured with the given environment settings, /// including the spec id. /// /// This will preserve any handler modifications - fn evm_with_env( + fn evm_with_env<'a, DB: Database + 'a>( &self, db: DB, env: EnvWithHandlerCfg, - ) -> Evm<'_, Self::DefaultExternalContext<'_>, DB> { + ) -> Evm<'a, Self::DefaultExternalContext<'a>, DB> { let mut evm = self.evm(db); evm.modify_spec_id(env.spec_id()); evm.context.evm.env = env.env; @@ -66,12 +65,12 @@ pub trait ConfigureEvm: ConfigureEvmEnv { /// This will use the given external inspector as the EVM external context. /// /// This will preserve any handler modifications - fn evm_with_env_and_inspector( + fn evm_with_env_and_inspector<'a, DB, I>( &self, db: DB, env: EnvWithHandlerCfg, inspector: I, - ) -> Evm<'_, I, DB> + ) -> Evm<'a, I, DB> where DB: Database, I: GetInspector, @@ -87,9 +86,9 @@ pub trait ConfigureEvm: ConfigureEvmEnv { /// Caution: This does not automatically configure the EVM with [`ConfigureEvmEnv`] methods. It /// is up to the caller to call an appropriate method to fill the transaction and block /// environment before executing any transactions using the provided EVM. - fn evm_with_inspector(&self, db: DB, inspector: I) -> Evm<'_, I, DB> + fn evm_with_inspector<'a, DB, I>(&self, db: DB, inspector: I) -> Evm<'a, I, DB> where - DB: Database, + DB: Database + 'a, I: GetInspector, { EvmBuilder::default() diff --git a/crates/evm/src/noop.rs b/crates/evm/src/noop.rs index beac15be1662..80a2b76de834 100644 --- a/crates/evm/src/noop.rs +++ b/crates/evm/src/noop.rs @@ -3,13 +3,15 @@ use std::fmt::Display; use reth_execution_errors::BlockExecutionError; -use reth_execution_types::{BlockExecutionInput, BlockExecutionOutput, ExecutionOutcome}; +use reth_execution_types::ExecutionOutcome; use reth_primitives::{BlockNumber, BlockWithSenders, Receipt}; use reth_prune_types::PruneModes; use reth_storage_errors::provider::ProviderError; use revm_primitives::db::Database; -use crate::execute::{BatchExecutor, BlockExecutorProvider, Executor}; +use crate::execute::{ + BatchExecutor, BlockExecutionInput, BlockExecutionOutput, BlockExecutorProvider, Executor, +}; const UNAVAILABLE_FOR_NOOP: &str = "execution unavailable for noop"; diff --git a/crates/evm/src/provider.rs b/crates/evm/src/provider.rs index b847a0665a35..2e73ff2fa985 100644 --- a/crates/evm/src/provider.rs +++ b/crates/evm/src/provider.rs @@ -36,7 +36,7 @@ pub trait EvmEnvProvider: Send + Sync { { let mut cfg = CfgEnvWithHandlerCfg::new_with_spec_id(CfgEnv::default(), SpecId::LATEST); let mut block_env = BlockEnv::default(); - self.fill_env_with_header(&mut cfg, &mut block_env, header, evm_config)?; + self.fill_env_with_header::(&mut cfg, &mut block_env, header, evm_config)?; Ok((cfg, block_env)) } diff --git a/crates/evm/src/system_calls.rs b/crates/evm/src/system_calls.rs index 58759a866556..9d493f51795e 100644 --- a/crates/evm/src/system_calls.rs +++ b/crates/evm/src/system_calls.rs @@ -90,7 +90,7 @@ where // if the block number is zero (genesis block) then the parent beacon block root must // be 0x0 and no system transaction may occur as per EIP-4788 if block_number == 0 { - if !parent_beacon_block_root.is_zero() { + if parent_beacon_block_root != B256::ZERO { return Err(BlockValidationError::CancunGenesisParentBeaconBlockRootNotZero { parent_beacon_block_root, } @@ -162,7 +162,7 @@ where .build(); // initialize a block from the env, because the post block call needs the block itself - apply_withdrawal_requests_contract_call(evm_config, &mut evm_post_block) + apply_withdrawal_requests_contract_call::(evm_config, &mut evm_post_block) } /// Applies the post-block call to the EIP-7002 withdrawal requests contract. @@ -256,8 +256,11 @@ where let amount = data.get_u64(); - withdrawal_requests - .push(WithdrawalRequest { source_address, validator_pubkey, amount }.into()); + withdrawal_requests.push(Request::WithdrawalRequest(WithdrawalRequest { + source_address, + validator_pubkey, + amount, + })); } Ok(withdrawal_requests) @@ -292,7 +295,7 @@ where .build(); // initialize a block from the env, because the post block call needs the block itself - apply_consolidation_requests_contract_call(evm_config, &mut evm_post_block) + apply_consolidation_requests_contract_call::(evm_config, &mut evm_post_block) } /// Applies the post-block call to the EIP-7251 consolidation requests contract. diff --git a/crates/exex/exex/Cargo.toml b/crates/exex/exex/Cargo.toml index 48e658c408de..58a2695b89e0 100644 --- a/crates/exex/exex/Cargo.toml +++ b/crates/exex/exex/Cargo.toml @@ -14,40 +14,39 @@ workspace = true [dependencies] ## reth reth-config.workspace = true -reth-evm.workspace = true reth-exex-types.workspace = true reth-metrics.workspace = true -reth-network.workspace = true reth-node-api.workspace = true reth-node-core.workspace = true -reth-payload-builder.workspace = true -reth-primitives-traits.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-provider.workspace = true +reth-tasks.workspace = true +reth-tracing.workspace = true +reth-network.workspace = true +reth-payload-builder.workspace = true +reth-evm.workspace = true reth-prune-types.workspace = true reth-revm.workspace = true reth-stages-api.workspace = true -reth-tasks.workspace = true -reth-tracing.workspace = true ## async -futures.workspace = true -tokio-util.workspace = true tokio.workspace = true +tokio-util.workspace = true ## misc eyre.workspace = true metrics.workspace = true [dev-dependencies] -reth-blockchain-tree.workspace = true reth-chainspec.workspace = true -reth-db-api.workspace = true -reth-db-common.workspace = true reth-evm-ethereum.workspace = true +reth-testing-utils.workspace = true +reth-blockchain-tree.workspace = true +reth-db-common.workspace = true reth-node-api.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } -reth-testing-utils.workspace = true +reth-db-api.workspace = true secp256k1.workspace = true diff --git a/crates/exex/exex/src/backfill/job.rs b/crates/exex/exex/src/backfill.rs similarity index 57% rename from crates/exex/exex/src/backfill/job.rs rename to crates/exex/exex/src/backfill.rs index e3c04815bb2f..36f00573437c 100644 --- a/crates/exex/exex/src/backfill/job.rs +++ b/crates/exex/exex/src/backfill.rs @@ -1,12 +1,7 @@ -use crate::BackFillJobStream; -use std::{ - ops::RangeInclusive, - time::{Duration, Instant}, -}; - use reth_evm::execute::{ BatchExecutor, BlockExecutionError, BlockExecutionOutput, BlockExecutorProvider, Executor, }; +use reth_node_api::FullNodeComponents; use reth_primitives::{Block, BlockNumber, BlockWithSenders, Receipt}; use reth_primitives_traits::format_gas_throughput; use reth_provider::{ @@ -16,6 +11,68 @@ use reth_prune_types::PruneModes; use reth_revm::database::StateProviderDatabase; use reth_stages_api::ExecutionStageThresholds; use reth_tracing::tracing::{debug, trace}; +use std::{ + ops::RangeInclusive, + time::{Duration, Instant}, +}; + +/// Factory for creating new backfill jobs. +#[derive(Debug, Clone)] +pub struct BackfillJobFactory { + executor: E, + provider: P, + prune_modes: PruneModes, + thresholds: ExecutionStageThresholds, +} + +impl BackfillJobFactory { + /// Creates a new [`BackfillJobFactory`]. + pub fn new(executor: E, provider: P) -> Self { + Self { + executor, + provider, + prune_modes: PruneModes::none(), + thresholds: ExecutionStageThresholds::default(), + } + } + + /// Sets the prune modes + pub fn with_prune_modes(mut self, prune_modes: PruneModes) -> Self { + self.prune_modes = prune_modes; + self + } + + /// Sets the thresholds + pub const fn with_thresholds(mut self, thresholds: ExecutionStageThresholds) -> Self { + self.thresholds = thresholds; + self + } +} + +impl BackfillJobFactory { + /// Creates a new backfill job for the given range. + pub fn backfill(&self, range: RangeInclusive) -> BackfillJob { + BackfillJob { + executor: self.executor.clone(), + provider: self.provider.clone(), + prune_modes: self.prune_modes.clone(), + range, + thresholds: self.thresholds.clone(), + } + } +} + +impl BackfillJobFactory<(), ()> { + /// Creates a new [`BackfillJobFactory`] from [`FullNodeComponents`]. + pub fn new_from_components( + components: Node, + ) -> BackfillJobFactory { + BackfillJobFactory::<_, _>::new( + components.block_executor().clone(), + components.provider().clone(), + ) + } +} /// Backfill job started for a specific range. /// @@ -23,12 +80,11 @@ use reth_tracing::tracing::{debug, trace}; /// and yields [`Chain`] #[derive(Debug)] pub struct BackfillJob { - pub(crate) executor: E, - pub(crate) provider: P, - pub(crate) prune_modes: PruneModes, - pub(crate) thresholds: ExecutionStageThresholds, - pub(crate) range: RangeInclusive, - pub(crate) stream_parallelism: usize, + executor: E, + provider: P, + prune_modes: PruneModes, + thresholds: ExecutionStageThresholds, + range: RangeInclusive, } impl Iterator for BackfillJob @@ -142,15 +198,11 @@ impl BackfillJob { pub fn into_single_blocks(self) -> SingleBlockBackfillJob { self.into() } +} - /// Converts the backfill job into a backfill job stream. - pub fn into_stream(self) -> BackFillJobStream - where - E: BlockExecutorProvider + Clone + 'static, - P: HeaderProvider + BlockReader + StateProviderFactory + Clone + 'static, - { - let parallelism = self.stream_parallelism; - BackFillJobStream::new(self.into_single_blocks()).with_parallelism(parallelism) +impl From> for SingleBlockBackfillJob { + fn from(value: BackfillJob) -> Self { + Self { executor: value.executor, provider: value.provider, range: value.range } } } @@ -158,11 +210,11 @@ impl BackfillJob { /// /// It implements [`Iterator`] which executes a block each time the /// iterator is advanced and yields ([`BlockWithSenders`], [`BlockExecutionOutput`]) -#[derive(Debug, Clone)] +#[derive(Debug)] pub struct SingleBlockBackfillJob { executor: E, provider: P, - pub(crate) range: RangeInclusive, + range: RangeInclusive, } impl Iterator for SingleBlockBackfillJob @@ -182,7 +234,7 @@ where E: BlockExecutorProvider, P: HeaderProvider + BlockReader + StateProviderFactory, { - pub(crate) fn execute_block( + fn execute_block( &self, block_number: u64, ) -> Result<(BlockWithSenders, BlockExecutionOutput), BlockExecutionError> { @@ -210,29 +262,176 @@ where } } -impl From> for SingleBlockBackfillJob { - fn from(value: BackfillJob) -> Self { - Self { executor: value.executor, provider: value.provider, range: value.range } - } -} - #[cfg(test)] mod tests { - use std::sync::Arc; - - use crate::{ - backfill::test_utils::{blocks_and_execution_outputs, chain_spec, to_execution_outcome}, - BackfillJobFactory, - }; + use crate::BackfillJobFactory; + use eyre::OptionExt; use reth_blockchain_tree::noop::NoopBlockchainTree; + use reth_chainspec::{ChainSpec, ChainSpecBuilder, EthereumHardfork, MAINNET}; use reth_db_common::init::init_genesis; + use reth_evm::execute::{ + BlockExecutionInput, BlockExecutionOutput, BlockExecutorProvider, Executor, + }; use reth_evm_ethereum::execute::EthExecutorProvider; - use reth_primitives::public_key_to_address; + use reth_primitives::{ + b256, constants::ETH_TO_WEI, public_key_to_address, Address, Block, BlockWithSenders, + Genesis, GenesisAccount, Header, Receipt, Requests, SealedBlockWithSenders, Transaction, + TxEip2930, TxKind, U256, + }; use reth_provider::{ providers::BlockchainProvider, test_utils::create_test_provider_factory_with_chain_spec, + BlockWriter, ExecutionOutcome, LatestStateProviderRef, ProviderFactory, }; - use reth_testing_utils::generators; + use reth_revm::database::StateProviderDatabase; + use reth_testing_utils::generators::{self, sign_tx_with_key_pair}; use secp256k1::Keypair; + use std::sync::Arc; + + fn to_execution_outcome( + block_number: u64, + block_execution_output: &BlockExecutionOutput, + ) -> ExecutionOutcome { + ExecutionOutcome { + bundle: block_execution_output.state.clone(), + receipts: block_execution_output.receipts.clone().into(), + first_block: block_number, + requests: vec![Requests(block_execution_output.requests.clone())], + } + } + + fn chain_spec(address: Address) -> Arc { + // Create a chain spec with a genesis state that contains the + // provided sender + Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(Genesis { + alloc: [( + address, + GenesisAccount { balance: U256::from(ETH_TO_WEI), ..Default::default() }, + )] + .into(), + ..MAINNET.genesis.clone() + }) + .paris_activated() + .build(), + ) + } + + fn execute_block_and_commit_to_database( + provider_factory: &ProviderFactory, + chain_spec: Arc, + block: &BlockWithSenders, + ) -> eyre::Result> + where + DB: reth_db_api::database::Database, + { + let provider = provider_factory.provider()?; + + // Execute the block to produce a block execution output + let mut block_execution_output = EthExecutorProvider::ethereum(chain_spec) + .executor(StateProviderDatabase::new(LatestStateProviderRef::new( + provider.tx_ref(), + provider.static_file_provider().clone(), + ))) + .execute(BlockExecutionInput { block, total_difficulty: U256::ZERO })?; + block_execution_output.state.reverts.sort(); + + // Convert the block execution output to an execution outcome for committing to the database + let execution_outcome = to_execution_outcome(block.number, &block_execution_output); + + // Commit the block's execution outcome to the database + let provider_rw = provider_factory.provider_rw()?; + let block = block.clone().seal_slow(); + provider_rw.append_blocks_with_state( + vec![block], + execution_outcome, + Default::default(), + Default::default(), + )?; + provider_rw.commit()?; + + Ok(block_execution_output) + } + + fn blocks_and_execution_outputs( + provider_factory: ProviderFactory, + chain_spec: Arc, + key_pair: Keypair, + ) -> eyre::Result)>> + where + DB: reth_db_api::database::Database, + { + // First block has a transaction that transfers some ETH to zero address + let block1 = Block { + header: Header { + parent_hash: chain_spec.genesis_hash(), + receipts_root: b256!( + "d3a6acf9a244d78b33831df95d472c4128ea85bf079a1d41e32ed0b7d2244c9e" + ), + difficulty: chain_spec.fork(EthereumHardfork::Paris).ttd().expect("Paris TTD"), + number: 1, + gas_limit: 21000, + gas_used: 21000, + ..Default::default() + }, + body: vec![sign_tx_with_key_pair( + key_pair, + Transaction::Eip2930(TxEip2930 { + chain_id: chain_spec.chain.id(), + nonce: 0, + gas_limit: 21000, + gas_price: 1_500_000_000, + to: TxKind::Call(Address::ZERO), + value: U256::from(0.1 * ETH_TO_WEI as f64), + ..Default::default() + }), + )], + ..Default::default() + } + .with_recovered_senders() + .ok_or_eyre("failed to recover senders")?; + + // Second block resends the same transaction with increased nonce + let block2 = Block { + header: Header { + parent_hash: block1.header.hash_slow(), + receipts_root: b256!( + "d3a6acf9a244d78b33831df95d472c4128ea85bf079a1d41e32ed0b7d2244c9e" + ), + difficulty: chain_spec.fork(EthereumHardfork::Paris).ttd().expect("Paris TTD"), + number: 2, + gas_limit: 21000, + gas_used: 21000, + ..Default::default() + }, + body: vec![sign_tx_with_key_pair( + key_pair, + Transaction::Eip2930(TxEip2930 { + chain_id: chain_spec.chain.id(), + nonce: 1, + gas_limit: 21000, + gas_price: 1_500_000_000, + to: TxKind::Call(Address::ZERO), + value: U256::from(0.1 * ETH_TO_WEI as f64), + ..Default::default() + }), + )], + ..Default::default() + } + .with_recovered_senders() + .ok_or_eyre("failed to recover senders")?; + + let block_output1 = + execute_block_and_commit_to_database(&provider_factory, chain_spec.clone(), &block1)?; + let block_output2 = + execute_block_and_commit_to_database(&provider_factory, chain_spec, &block2)?; + + let block1 = block1.seal_slow(); + let block2 = block2.seal_slow(); + + Ok(vec![(block1, block_output1), (block2, block_output2)]) + } #[test] fn test_backfill() -> eyre::Result<()> { diff --git a/crates/exex/exex/src/backfill/factory.rs b/crates/exex/exex/src/backfill/factory.rs deleted file mode 100644 index 6e845e240954..000000000000 --- a/crates/exex/exex/src/backfill/factory.rs +++ /dev/null @@ -1,79 +0,0 @@ -use crate::BackfillJob; -use std::ops::RangeInclusive; - -use reth_node_api::FullNodeComponents; -use reth_primitives::BlockNumber; -use reth_prune_types::PruneModes; -use reth_stages_api::ExecutionStageThresholds; - -use super::stream::DEFAULT_PARALLELISM; - -/// Factory for creating new backfill jobs. -#[derive(Debug, Clone)] -pub struct BackfillJobFactory { - executor: E, - provider: P, - prune_modes: PruneModes, - thresholds: ExecutionStageThresholds, - stream_parallelism: usize, -} - -impl BackfillJobFactory { - /// Creates a new [`BackfillJobFactory`]. - pub fn new(executor: E, provider: P) -> Self { - Self { - executor, - provider, - prune_modes: PruneModes::none(), - thresholds: ExecutionStageThresholds::default(), - stream_parallelism: DEFAULT_PARALLELISM, - } - } - - /// Sets the prune modes - pub fn with_prune_modes(mut self, prune_modes: PruneModes) -> Self { - self.prune_modes = prune_modes; - self - } - - /// Sets the thresholds - pub const fn with_thresholds(mut self, thresholds: ExecutionStageThresholds) -> Self { - self.thresholds = thresholds; - self - } - - /// Sets the stream parallelism. - /// - /// Configures the [`BackFillJobStream`](super::stream::BackFillJobStream) created via - /// [`BackfillJob::into_stream`]. - pub const fn with_stream_parallelism(mut self, stream_parallelism: usize) -> Self { - self.stream_parallelism = stream_parallelism; - self - } -} - -impl BackfillJobFactory { - /// Creates a new backfill job for the given range. - pub fn backfill(&self, range: RangeInclusive) -> BackfillJob { - BackfillJob { - executor: self.executor.clone(), - provider: self.provider.clone(), - prune_modes: self.prune_modes.clone(), - range, - thresholds: self.thresholds.clone(), - stream_parallelism: self.stream_parallelism, - } - } -} - -impl BackfillJobFactory<(), ()> { - /// Creates a new [`BackfillJobFactory`] from [`FullNodeComponents`]. - pub fn new_from_components( - components: Node, - ) -> BackfillJobFactory { - BackfillJobFactory::<_, _>::new( - components.block_executor().clone(), - components.provider().clone(), - ) - } -} diff --git a/crates/exex/exex/src/backfill/mod.rs b/crates/exex/exex/src/backfill/mod.rs deleted file mode 100644 index 51f126223fdf..000000000000 --- a/crates/exex/exex/src/backfill/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -mod factory; -mod job; -mod stream; -#[cfg(test)] -mod test_utils; - -pub use factory::BackfillJobFactory; -pub use job::{BackfillJob, SingleBlockBackfillJob}; -pub use stream::BackFillJobStream; diff --git a/crates/exex/exex/src/backfill/stream.rs b/crates/exex/exex/src/backfill/stream.rs deleted file mode 100644 index 5529301bccc6..000000000000 --- a/crates/exex/exex/src/backfill/stream.rs +++ /dev/null @@ -1,161 +0,0 @@ -use crate::SingleBlockBackfillJob; -use std::{ - ops::RangeInclusive, - pin::Pin, - task::{ready, Context, Poll}, -}; - -use futures::{ - stream::{FuturesOrdered, Stream}, - StreamExt, -}; -use reth_evm::execute::{BlockExecutionError, BlockExecutionOutput, BlockExecutorProvider}; -use reth_primitives::{BlockNumber, BlockWithSenders, Receipt}; -use reth_provider::{BlockReader, HeaderProvider, StateProviderFactory}; -use tokio::task::JoinHandle; - -type BackfillTasks = FuturesOrdered< - JoinHandle), BlockExecutionError>>, ->; - -/// The default parallelism for active tasks in [`BackFillJobStream`]. -pub(crate) const DEFAULT_PARALLELISM: usize = 4; - -/// Stream for processing backfill jobs asynchronously. -/// -/// This struct manages the execution of [`SingleBlockBackfillJob`] tasks, allowing blocks to be -/// processed asynchronously but in order within a specified range. -#[derive(Debug)] -pub struct BackFillJobStream { - job: SingleBlockBackfillJob, - tasks: BackfillTasks, - range: RangeInclusive, - parallelism: usize, -} - -impl BackFillJobStream -where - E: BlockExecutorProvider + Clone + Send + 'static, - P: HeaderProvider + BlockReader + StateProviderFactory + Clone + Send + 'static, -{ - /// Creates a new [`BackFillJobStream`] with the default parallelism. - /// - /// # Parameters - /// - `job`: The [`SingleBlockBackfillJob`] to be executed asynchronously. - /// - /// # Returns - /// A new instance of [`BackFillJobStream`] with the default parallelism. - pub fn new(job: SingleBlockBackfillJob) -> Self { - let range = job.range.clone(); - Self { job, tasks: FuturesOrdered::new(), range, parallelism: DEFAULT_PARALLELISM } - } - - /// Configures the parallelism of the [`BackFillJobStream`] to handle active tasks. - /// - /// # Parameters - /// - `parallelism`: The parallelism to handle active tasks. - /// - /// # Returns - /// The modified instance of [`BackFillJobStream`] with the specified parallelism. - pub const fn with_parallelism(mut self, parallelism: usize) -> Self { - self.parallelism = parallelism; - self - } - - fn spawn_task( - &self, - block_number: BlockNumber, - ) -> JoinHandle), BlockExecutionError>> - { - let job = self.job.clone(); - tokio::task::spawn_blocking(move || job.execute_block(block_number)) - } -} - -impl Stream for BackFillJobStream -where - E: BlockExecutorProvider + Clone + Send + 'static, - P: HeaderProvider + BlockReader + StateProviderFactory + Clone + Send + 'static + Unpin, -{ - type Item = Result<(BlockWithSenders, BlockExecutionOutput), BlockExecutionError>; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.get_mut(); - - // Spawn new tasks only if we are below the parallelism configured. - while this.tasks.len() < this.parallelism { - if let Some(block_number) = this.range.next() { - let task = this.spawn_task(block_number); - this.tasks.push_back(task); - } else { - break; - } - } - - match ready!(this.tasks.poll_next_unpin(cx)) { - Some(res) => Poll::Ready(Some(res.map_err(|e| BlockExecutionError::Other(e.into()))?)), - None => Poll::Ready(None), - } - } -} - -#[cfg(test)] -mod tests { - use std::sync::Arc; - - use crate::{ - backfill::test_utils::{blocks_and_execution_outputs, chain_spec}, - BackfillJobFactory, - }; - use futures::StreamExt; - use reth_blockchain_tree::noop::NoopBlockchainTree; - use reth_db_common::init::init_genesis; - use reth_evm_ethereum::execute::EthExecutorProvider; - use reth_primitives::public_key_to_address; - use reth_provider::{ - providers::BlockchainProvider, test_utils::create_test_provider_factory_with_chain_spec, - }; - use reth_testing_utils::generators; - use secp256k1::Keypair; - - #[tokio::test] - async fn test_async_backfill() -> eyre::Result<()> { - reth_tracing::init_test_tracing(); - - // Create a key pair for the sender - let key_pair = Keypair::new_global(&mut generators::rng()); - let address = public_key_to_address(key_pair.public_key()); - - let chain_spec = chain_spec(address); - - let executor = EthExecutorProvider::ethereum(chain_spec.clone()); - let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); - init_genesis(provider_factory.clone())?; - let blockchain_db = BlockchainProvider::new( - provider_factory.clone(), - Arc::new(NoopBlockchainTree::default()), - )?; - - // Create first 2 blocks - let blocks_and_execution_outcomes = - blocks_and_execution_outputs(provider_factory, chain_spec, key_pair)?; - - // Backfill the first block - let factory = BackfillJobFactory::new(executor.clone(), blockchain_db.clone()); - let mut backfill_stream = factory.backfill(1..=1).into_stream(); - - // execute first block - let (block, mut execution_output) = backfill_stream.next().await.unwrap().unwrap(); - execution_output.state.reverts.sort(); - let sealed_block_with_senders = blocks_and_execution_outcomes[0].0.clone(); - let expected_block = sealed_block_with_senders.unseal(); - let expected_output = &blocks_and_execution_outcomes[0].1; - assert_eq!(block, expected_block); - assert_eq!(&execution_output, expected_output); - - // expect no more blocks - assert!(backfill_stream.next().await.is_none()); - - Ok(()) - } -} diff --git a/crates/exex/exex/src/backfill/test_utils.rs b/crates/exex/exex/src/backfill/test_utils.rs deleted file mode 100644 index 05b41cd2b998..000000000000 --- a/crates/exex/exex/src/backfill/test_utils.rs +++ /dev/null @@ -1,162 +0,0 @@ -use std::sync::Arc; - -use eyre::OptionExt; -use reth_chainspec::{ChainSpec, ChainSpecBuilder, EthereumHardfork, MAINNET}; -use reth_evm::execute::{ - BlockExecutionInput, BlockExecutionOutput, BlockExecutorProvider, Executor, -}; -use reth_evm_ethereum::execute::EthExecutorProvider; -use reth_primitives::{ - b256, constants::ETH_TO_WEI, Address, Block, BlockWithSenders, Genesis, GenesisAccount, Header, - Receipt, Requests, SealedBlockWithSenders, Transaction, TxEip2930, TxKind, U256, -}; -use reth_provider::{BlockWriter as _, ExecutionOutcome, LatestStateProviderRef, ProviderFactory}; -use reth_revm::database::StateProviderDatabase; -use reth_testing_utils::generators::sign_tx_with_key_pair; -use secp256k1::Keypair; - -pub(crate) fn to_execution_outcome( - block_number: u64, - block_execution_output: &BlockExecutionOutput, -) -> ExecutionOutcome { - ExecutionOutcome { - bundle: block_execution_output.state.clone(), - receipts: block_execution_output.receipts.clone().into(), - first_block: block_number, - requests: vec![Requests(block_execution_output.requests.clone())], - } -} - -pub(crate) fn chain_spec(address: Address) -> Arc { - // Create a chain spec with a genesis state that contains the - // provided sender - Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(Genesis { - alloc: [( - address, - GenesisAccount { balance: U256::from(ETH_TO_WEI), ..Default::default() }, - )] - .into(), - ..MAINNET.genesis.clone() - }) - .paris_activated() - .build(), - ) -} - -pub(crate) fn execute_block_and_commit_to_database( - provider_factory: &ProviderFactory, - chain_spec: Arc, - block: &BlockWithSenders, -) -> eyre::Result> -where - DB: reth_db_api::database::Database, -{ - let provider = provider_factory.provider()?; - - // Execute the block to produce a block execution output - let mut block_execution_output = EthExecutorProvider::ethereum(chain_spec) - .executor(StateProviderDatabase::new(LatestStateProviderRef::new( - provider.tx_ref(), - provider.static_file_provider().clone(), - ))) - .execute(BlockExecutionInput { block, total_difficulty: U256::ZERO })?; - block_execution_output.state.reverts.sort(); - - // Convert the block execution output to an execution outcome for committing to the database - let execution_outcome = to_execution_outcome(block.number, &block_execution_output); - - // Commit the block's execution outcome to the database - let provider_rw = provider_factory.provider_rw()?; - let block = block.clone().seal_slow(); - provider_rw.append_blocks_with_state( - vec![block], - execution_outcome, - Default::default(), - Default::default(), - )?; - provider_rw.commit()?; - - Ok(block_execution_output) -} - -pub(crate) fn blocks_and_execution_outputs( - provider_factory: ProviderFactory, - chain_spec: Arc, - key_pair: Keypair, -) -> eyre::Result)>> -where - DB: reth_db_api::database::Database, -{ - // First block has a transaction that transfers some ETH to zero address - let block1 = Block { - header: Header { - parent_hash: chain_spec.genesis_hash(), - receipts_root: b256!( - "d3a6acf9a244d78b33831df95d472c4128ea85bf079a1d41e32ed0b7d2244c9e" - ), - difficulty: chain_spec.fork(EthereumHardfork::Paris).ttd().expect("Paris TTD"), - number: 1, - gas_limit: 21000, - gas_used: 21000, - ..Default::default() - }, - body: vec![sign_tx_with_key_pair( - key_pair, - Transaction::Eip2930(TxEip2930 { - chain_id: chain_spec.chain.id(), - nonce: 0, - gas_limit: 21000, - gas_price: 1_500_000_000, - to: TxKind::Call(Address::ZERO), - value: U256::from(0.1 * ETH_TO_WEI as f64), - ..Default::default() - }), - )], - ..Default::default() - } - .with_recovered_senders() - .ok_or_eyre("failed to recover senders")?; - - // Second block resends the same transaction with increased nonce - let block2 = Block { - header: Header { - parent_hash: block1.header.hash_slow(), - receipts_root: b256!( - "d3a6acf9a244d78b33831df95d472c4128ea85bf079a1d41e32ed0b7d2244c9e" - ), - difficulty: chain_spec.fork(EthereumHardfork::Paris).ttd().expect("Paris TTD"), - number: 2, - gas_limit: 21000, - gas_used: 21000, - ..Default::default() - }, - body: vec![sign_tx_with_key_pair( - key_pair, - Transaction::Eip2930(TxEip2930 { - chain_id: chain_spec.chain.id(), - nonce: 1, - gas_limit: 21000, - gas_price: 1_500_000_000, - to: TxKind::Call(Address::ZERO), - value: U256::from(0.1 * ETH_TO_WEI as f64), - ..Default::default() - }), - )], - ..Default::default() - } - .with_recovered_senders() - .ok_or_eyre("failed to recover senders")?; - - let block_output1 = - execute_block_and_commit_to_database(&provider_factory, chain_spec.clone(), &block1)?; - let block_output2 = - execute_block_and_commit_to_database(&provider_factory, chain_spec, &block2)?; - - let block1 = block1.seal_slow(); - let block2 = block2.seal_slow(); - - Ok(vec![(block1, block_output1), (block2, block_output2)]) -} diff --git a/crates/net/downloaders/src/receipt_file_client.rs b/crates/net/downloaders/src/receipt_file_client.rs index c32a8903e0a9..2a18fbde866d 100644 --- a/crates/net/downloaders/src/receipt_file_client.rs +++ b/crates/net/downloaders/src/receipt_file_client.rs @@ -232,7 +232,7 @@ mod test { use tokio_util::codec::Decoder; #[derive(Debug, PartialEq, Eq, RlpDecodable)] - struct MockReceipt { + pub struct MockReceipt { tx_type: u8, status: u64, cumulative_gas_used: u64, diff --git a/crates/net/eth-wire-types/Cargo.toml b/crates/net/eth-wire-types/Cargo.toml index a2df9896541d..671883dae68e 100644 --- a/crates/net/eth-wire-types/Cargo.toml +++ b/crates/net/eth-wire-types/Cargo.toml @@ -38,6 +38,7 @@ alloy-chains = { workspace = true, features = ["arbitrary"] } arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-arbitrary-interop.workspace = true +proptest-derive.workspace = true rand.workspace = true [features] diff --git a/crates/net/eth-wire/Cargo.toml b/crates/net/eth-wire/Cargo.toml index 355491783b61..2846c0f7cf02 100644 --- a/crates/net/eth-wire/Cargo.toml +++ b/crates/net/eth-wire/Cargo.toml @@ -56,6 +56,7 @@ secp256k1 = { workspace = true, features = [ arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-arbitrary-interop.workspace = true +proptest-derive.workspace = true async-stream.workspace = true serde.workspace = true diff --git a/crates/net/eth-wire/src/p2pstream.rs b/crates/net/eth-wire/src/p2pstream.rs index 466987768ead..aa8770d058c6 100644 --- a/crates/net/eth-wire/src/p2pstream.rs +++ b/crates/net/eth-wire/src/p2pstream.rs @@ -613,24 +613,19 @@ where /// Returns `Poll::Ready(Ok(()))` when no buffered items remain. fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut this = self.project(); - let poll_res = loop { - match this.inner.as_mut().poll_ready(cx) { - Poll::Pending => break Poll::Pending, - Poll::Ready(Err(err)) => break Poll::Ready(Err(err.into())), - Poll::Ready(Ok(())) => { + loop { + match ready!(this.inner.as_mut().poll_flush(cx)) { + Err(err) => return Poll::Ready(Err(err.into())), + Ok(()) => { let Some(message) = this.outgoing_messages.pop_front() else { - break Poll::Ready(Ok(())) + return Poll::Ready(Ok(())) }; if let Err(err) = this.inner.as_mut().start_send(message) { - break Poll::Ready(Err(err.into())) + return Poll::Ready(Err(err.into())) } } } - }; - - ready!(this.inner.as_mut().poll_flush(cx))?; - - poll_res + } } fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { diff --git a/crates/net/network-api/src/lib.rs b/crates/net/network-api/src/lib.rs index 1e23390fd1e2..8efaec5f0fb7 100644 --- a/crates/net/network-api/src/lib.rs +++ b/crates/net/network-api/src/lib.rs @@ -18,6 +18,7 @@ pub use error::NetworkError; pub use reputation::{Reputation, ReputationChangeKind}; use reth_eth_wire::{capability::Capabilities, DisconnectReason, EthVersion, Status}; use reth_network_peers::NodeRecord; +use serde::{Deserialize, Serialize}; use std::{future::Future, net::SocketAddr, sync::Arc, time::Instant}; /// The `PeerId` type. @@ -245,8 +246,7 @@ impl std::fmt::Display for Direction { } /// The status of the network being ran by the local node. -#[derive(Clone, Debug)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct NetworkStatus { /// The local node client version. pub client_version: String, diff --git a/crates/net/network/src/lib.rs b/crates/net/network/src/lib.rs index d37319723e28..a22dbd532fb7 100644 --- a/crates/net/network/src/lib.rs +++ b/crates/net/network/src/lib.rs @@ -149,12 +149,12 @@ pub use session::{ SessionManager, }; +pub use transactions::{FilterAnnouncement, MessageFilter, ValidateTx68}; + pub use flattened_response::FlattenedResponse; pub use manager::DiscoveredEvent; pub use metrics::TxTypesCounter; pub use reth_eth_wire::{DisconnectReason, HelloMessageWithProtocols}; -pub use reth_network_p2p::sync::{NetworkSyncUpdater, SyncState}; pub use reth_network_types::{PeersConfig, SessionsConfig}; pub use session::EthRlpxConnection; pub use swarm::NetworkConnectionState; -pub use transactions::{FilterAnnouncement, MessageFilter, ValidateTx68}; diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index f0d9eef1beba..22db838c8a66 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -47,7 +47,7 @@ impl AnyNodeTypes { impl NodeTypes for AnyNodeTypes where P: NodePrimitives + Send + Sync + Unpin + 'static, - E: EngineTypes + Send + Sync + Unpin, + E: EngineTypes + Send + Sync + Unpin + 'static, { type Primitives = P; diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index bb77e419c29a..5fc3da56a7d2 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -29,7 +29,6 @@ reth-rpc-builder.workspace = true reth-rpc-layer.workspace = true reth-node-api.workspace = true reth-node-core.workspace = true -reth-node-metrics.workspace = true reth-network.workspace = true reth-primitives.workspace = true reth-payload-builder.workspace = true diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 9c146be0827a..f3c8889ea348 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -29,7 +29,7 @@ use reth_node_core::{ rpc::eth::{helpers::AddDevSigners, FullEthApiServer}, }; use reth_primitives::revm_primitives::EnvKzgSettings; -use reth_provider::{providers::BlockchainProvider, ChainSpecProvider, FullProvider}; +use reth_provider::{providers::BlockchainProvider, ChainSpecProvider}; use reth_tasks::TaskExecutor; use reth_transaction_pool::{PoolConfig, TransactionPool}; use secp256k1::SecretKey; @@ -40,7 +40,7 @@ use crate::{ components::NodeComponentsBuilder, node::FullNode, rpc::{EthApiBuilderProvider, RethRpcServerHandles, RpcContext}, - DefaultNodeLauncher, LaunchNode, Node, NodeHandle, + DefaultNodeLauncher, Node, NodeHandle, }; /// The adapter type for a reth node with the builtin provider type @@ -207,17 +207,6 @@ where pub fn with_types(self) -> NodeBuilderWithTypes> where T: NodeTypes, - { - self.with_types_and_provider() - } - - /// Configures the types of the node and the provider type that will be used by the node. - pub fn with_types_and_provider( - self, - ) -> NodeBuilderWithTypes> - where - T: NodeTypes, - P: FullProvider, { NodeBuilderWithTypes::new(self.config, self.database) } @@ -269,20 +258,6 @@ where WithLaunchContext { builder: self.builder.with_types(), task_executor: self.task_executor } } - /// Configures the types of the node and the provider type that will be used by the node. - pub fn with_types_and_provider( - self, - ) -> WithLaunchContext>> - where - T: NodeTypes, - P: FullProvider, - { - WithLaunchContext { - builder: self.builder.with_types_and_provider(), - task_executor: self.task_executor, - } - } - /// Preconfigures the node with a specific node implementation. /// /// This is a convenience method that sets the node's types and components in one call. @@ -333,14 +308,18 @@ where } } -impl WithLaunchContext> { +impl WithLaunchContext>> +where + DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, + T: NodeTypes, +{ /// Advances the state of the node builder to the next state where all components are configured pub fn with_components( self, components_builder: CB, - ) -> WithLaunchContext> + ) -> WithLaunchContext, CB, ()>> where - CB: NodeComponentsBuilder, + CB: NodeComponentsBuilder>, { WithLaunchContext { builder: self.builder.with_components(components_builder), @@ -349,16 +328,20 @@ impl WithLaunchContext> { } } -impl WithLaunchContext> +impl WithLaunchContext, CB, ()>> where - T: FullNodeTypes, - CB: NodeComponentsBuilder, + DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, + T: NodeTypes, + CB: NodeComponentsBuilder>, { /// Advances the state of the node builder to the next state where all customizable /// [`NodeAddOns`] types are configured. - pub fn with_add_ons(self) -> WithLaunchContext> + pub fn with_add_ons( + self, + ) -> WithLaunchContext, CB, AO>> where - AO: NodeAddOns>, + CB: NodeComponentsBuilder>, + AO: NodeAddOns, CB::Components>>, { WithLaunchContext { builder: self.builder.with_add_ons::(), @@ -367,22 +350,20 @@ where } } -impl WithLaunchContext> +impl WithLaunchContext, CB, AO>> where - T: FullNodeTypes, - CB: NodeComponentsBuilder, - AO: NodeAddOns>, + DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, + T: NodeTypes, + CB: NodeComponentsBuilder>, + AO: NodeAddOns, CB::Components>>, AO::EthApi: FullEthApiServer + AddDevSigners, { - /// Returns a reference to the node builder's config. - pub const fn config(&self) -> &NodeConfig { - &self.builder.config - } - /// Sets the hook that is run once the node's components are initialized. pub fn on_component_initialized(self, hook: F) -> Self where - F: FnOnce(NodeAdapter) -> eyre::Result<()> + Send + 'static, + F: FnOnce(NodeAdapter, CB::Components>) -> eyre::Result<()> + + Send + + 'static, { Self { builder: self.builder.on_component_initialized(hook), @@ -393,7 +374,9 @@ where /// Sets the hook that is run once the node has started. pub fn on_node_started(self, hook: F) -> Self where - F: FnOnce(FullNode, AO>) -> eyre::Result<()> + F: FnOnce( + FullNode, CB::Components>, AO>, + ) -> eyre::Result<()> + Send + 'static, { @@ -404,7 +387,7 @@ where pub fn on_rpc_started(self, hook: F) -> Self where F: FnOnce( - RpcContext<'_, NodeAdapter, AO::EthApi>, + RpcContext<'_, NodeAdapter, CB::Components>, AO::EthApi>, RethRpcServerHandles, ) -> eyre::Result<()> + Send @@ -416,7 +399,9 @@ where /// Sets the hook that is run to configure the rpc modules. pub fn extend_rpc_modules(self, hook: F) -> Self where - F: FnOnce(RpcContext<'_, NodeAdapter, AO::EthApi>) -> eyre::Result<()> + F: FnOnce( + RpcContext<'_, NodeAdapter, CB::Components>, AO::EthApi>, + ) -> eyre::Result<()> + Send + 'static, { @@ -430,7 +415,9 @@ where /// The `ExEx` ID must be unique. pub fn install_exex(self, exex_id: impl Into, exex: F) -> Self where - F: FnOnce(ExExContext>) -> R + Send + 'static, + F: FnOnce(ExExContext, CB::Components>>) -> R + + Send + + 'static, R: Future> + Send, E: Future> + Send, { @@ -440,22 +427,6 @@ where } } - /// Launches the node with the given launcher. - pub async fn launch_with(self, launcher: L) -> eyre::Result - where - L: LaunchNode>, - { - launcher.launch_node(self.builder).await - } - - /// Launches the node with the given closure. - pub fn launch_with_fn(self, launcher: L) -> R - where - L: FnOnce(Self) -> R, - { - launcher(self) - } - /// Check that the builder can be launched /// /// This is useful when writing tests to ensure that the builder is configured correctly. diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index bdad79c92dba..b141768d5f49 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -11,7 +11,8 @@ use rayon::ThreadPoolBuilder; use reth_auto_seal_consensus::MiningMode; use reth_beacon_consensus::EthBeaconConsensus; use reth_blockchain_tree::{ - BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, + noop::NoopBlockchainTree, BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, + TreeExternals, }; use reth_chainspec::{Chain, ChainSpec}; use reth_config::{config::EtlConfig, PruneConfig}; @@ -25,21 +26,11 @@ use reth_node_api::FullNodeTypes; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, node_config::NodeConfig, - version::{ - BUILD_PROFILE_NAME, CARGO_PKG_VERSION, VERGEN_BUILD_TIMESTAMP, VERGEN_CARGO_FEATURES, - VERGEN_CARGO_TARGET_TRIPLE, VERGEN_GIT_SHA, - }, -}; -use reth_node_metrics::{ - hooks::Hooks, - server::{MetricServer, MetricServerConfig}, - version::VersionInfo, }; use reth_primitives::{BlockNumber, Head, B256}; use reth_provider::{ - providers::{BlockchainProvider, BlockchainProvider2, StaticFileProvider}, - CanonStateNotificationSender, FullProvider, ProviderFactory, StaticFileProviderFactory, - TreeViewer, + providers::{BlockchainProvider, StaticFileProvider}, + CanonStateNotificationSender, ProviderFactory, StaticFileProviderFactory, }; use reth_prune::{PruneModes, PrunerBuilder}; use reth_rpc_builder::config::RethRpcServerConfig; @@ -54,27 +45,6 @@ use tokio::sync::{ oneshot, watch, }; -/// Allows to set a tree viewer for a configured blockchain provider. -// TODO: remove this helper trait once the engine revamp is done, the new -// blockchain provider won't require a TreeViewer. -// https://github.com/paradigmxyz/reth/issues/8742 -pub trait WithTree { - /// Setter for tree viewer. - fn set_tree(self, tree: Arc) -> Self; -} - -impl WithTree for BlockchainProvider { - fn set_tree(self, tree: Arc) -> Self { - self.with_tree(tree) - } -} - -impl WithTree for BlockchainProvider2 { - fn set_tree(self, _tree: Arc) -> Self { - self - } -} - /// Reusable setup for launching a node. /// /// This provides commonly used boilerplate for launching a node. @@ -401,6 +371,8 @@ where let has_receipt_pruning = self.toml_config().prune.as_ref().map_or(false, |a| a.has_receipts_pruning()); + info!(target: "reth::cli", "Verifying storage consistency."); + // Check for consistency between database and static files. If it fails, it unwinds to // the first block that's consistent between database and static files. if let Some(unwind_target) = factory @@ -482,37 +454,23 @@ where self.right().static_file_provider() } - /// This launches the prometheus endpoint. - /// /// Convenience function to [`Self::start_prometheus_endpoint`] - pub async fn with_prometheus_server(self) -> eyre::Result { + pub async fn with_prometheus(self) -> eyre::Result { self.start_prometheus_endpoint().await?; Ok(self) } /// Starts the prometheus endpoint. pub async fn start_prometheus_endpoint(&self) -> eyre::Result<()> { - let listen_addr = self.node_config().metrics; - if let Some(addr) = listen_addr { - info!(target: "reth::cli", "Starting metrics endpoint at {}", addr); - let config = MetricServerConfig::new( - addr, - VersionInfo { - version: CARGO_PKG_VERSION, - build_timestamp: VERGEN_BUILD_TIMESTAMP, - cargo_features: VERGEN_CARGO_FEATURES, - git_sha: VERGEN_GIT_SHA, - target_triple: VERGEN_CARGO_TARGET_TRIPLE, - build_profile: BUILD_PROFILE_NAME, - }, + let prometheus_handle = self.node_config().install_prometheus_recorder()?; + self.node_config() + .start_metrics_endpoint( + prometheus_handle, + self.database().clone(), + self.static_file_provider(), self.task_executor().clone(), - Hooks::new(self.database().clone(), self.static_file_provider()), - ); - - MetricServer::new(config).serve().await?; - } - - Ok(()) + ) + .await } /// Convenience function to [`Self::init_genesis`] @@ -528,12 +486,7 @@ where /// Creates a new `WithMeteredProvider` container and attaches it to the /// launch context. - /// - /// This spawns a metrics task that listens for metrics related events and updates metrics for - /// prometheus. - pub fn with_metrics_task( - self, - ) -> LaunchContextWith>> { + pub fn with_metrics(self) -> LaunchContextWith>> { let (metrics_sender, metrics_receiver) = unbounded_channel(); let with_metrics = @@ -565,18 +518,24 @@ where } /// Creates a `BlockchainProvider` and attaches it to the launch context. - pub fn with_blockchain_db( + pub fn with_blockchain_db( self, - create_blockchain_provider: F, - tree_config: BlockchainTreeConfig, - canon_state_notification_sender: CanonStateNotificationSender, ) -> eyre::Result>>> where - T: FullNodeTypes, - T::Provider: FullProvider, - F: FnOnce(ProviderFactory) -> eyre::Result, + T: FullNodeTypes::DB>>, { - let blockchain_db = create_blockchain_provider(self.provider_factory().clone())?; + let tree_config = BlockchainTreeConfig::default(); + + // NOTE: This is a temporary workaround to provide the canon state notification sender to the components builder because there's a cyclic dependency between the blockchain provider and the tree component. This will be removed once the Blockchain provider no longer depends on an instance of the tree: + let (canon_state_notification_sender, _receiver) = + tokio::sync::broadcast::channel(tree_config.max_reorg_depth() as usize * 2); + + let blockchain_db = BlockchainProvider::new( + self.provider_factory().clone(), + Arc::new(NoopBlockchainTree::with_canon_state_notifications( + canon_state_notification_sender.clone(), + )), + )?; let metered_providers = WithMeteredProviders { db_provider_container: WithMeteredProvider { @@ -602,8 +561,7 @@ where impl LaunchContextWith>> where DB: Database + DatabaseMetrics + Send + Sync + Clone + 'static, - T: FullNodeTypes, - T::Provider: FullProvider + WithTree, + T: FullNodeTypes>, { /// Returns access to the underlying database. pub fn database(&self) -> &DB { @@ -629,8 +587,8 @@ where self.right().db_provider_container.metrics_sender.clone() } - /// Returns a reference to the blockchain provider. - pub const fn blockchain_db(&self) -> &T::Provider { + /// Returns a reference to the `BlockchainProvider`. + pub const fn blockchain_db(&self) -> &BlockchainProvider { &self.right().blockchain_db } @@ -685,7 +643,7 @@ where let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); // Replace the tree component with the actual tree - let blockchain_db = self.blockchain_db().clone().set_tree(blockchain_tree); + let blockchain_db = self.blockchain_db().clone().with_tree(blockchain_tree); debug!(target: "reth::cli", "configured blockchain tree"); @@ -722,8 +680,7 @@ where impl LaunchContextWith>> where DB: Database + DatabaseMetrics + Send + Sync + Clone + 'static, - T: FullNodeTypes, - T::Provider: FullProvider + WithTree, + T: FullNodeTypes>, CB: NodeComponentsBuilder, { /// Returns the configured `ProviderFactory`. @@ -760,8 +717,8 @@ where &self.right().node_adapter } - /// Returns a reference to the blockchain provider. - pub const fn blockchain_db(&self) -> &T::Provider { + /// Returns a reference to the `BlockchainProvider`. + pub const fn blockchain_db(&self) -> &BlockchainProvider { &self.right().blockchain_db } @@ -857,14 +814,9 @@ pub struct WithMeteredProvider { /// Helper container to bundle the [`ProviderFactory`], [`BlockchainProvider`] /// and a metrics sender. #[allow(missing_debug_implementations)] -pub struct WithMeteredProviders -where - DB: Database, - T: FullNodeTypes, - T::Provider: FullProvider, -{ +pub struct WithMeteredProviders { db_provider_container: WithMeteredProvider, - blockchain_db: T::Provider, + blockchain_db: BlockchainProvider, canon_state_notification_sender: CanonStateNotificationSender, tree_config: BlockchainTreeConfig, // this field is used to store a reference to the FullNodeTypes so that we @@ -876,14 +828,12 @@ where #[allow(missing_debug_implementations)] pub struct WithComponents where - DB: Database, - T: FullNodeTypes, - T::Provider: FullProvider, + T: FullNodeTypes>, CB: NodeComponentsBuilder, { db_provider_container: WithMeteredProvider, tree_config: BlockchainTreeConfig, - blockchain_db: T::Provider, + blockchain_db: BlockchainProvider, node_adapter: NodeAdapter, head: Head, consensus: Arc, diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index 138403ed1012..fb22e7908404 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -6,12 +6,13 @@ mod exex; pub use common::LaunchContext; pub use exex::ExExLauncher; +use std::{future::Future, sync::Arc}; + use futures::{future::Either, stream, stream_select, StreamExt}; use reth_beacon_consensus::{ hooks::{EngineHooks, PruneHook, StaticFileHook}, BeaconConsensusEngine, }; -use reth_blockchain_tree::{noop::NoopBlockchainTree, BlockchainTreeConfig}; use reth_consensus_debug_client::{DebugConsensusClient, EtherscanBlockProvider, RpcBlockProvider}; use reth_engine_util::EngineMessageStreamExt; use reth_exex::ExExManagerHandle; @@ -31,7 +32,6 @@ use reth_rpc_types::engine::ClientVersionV1; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::TransactionPool; -use std::{future::Future, sync::Arc}; use tokio::sync::{mpsc::unbounded_channel, oneshot}; use tokio_stream::wrappers::UnboundedReceiverStream; @@ -119,19 +119,6 @@ where } = target; let NodeHooks { on_component_initialized, on_node_started, .. } = hooks; - // TODO: remove tree and move tree_config and canon_state_notification_sender - // initialization to with_blockchain_db once the engine revamp is done - // https://github.com/paradigmxyz/reth/issues/8742 - let tree_config = BlockchainTreeConfig::default(); - - // NOTE: This is a temporary workaround to provide the canon state notification sender to the components builder because there's a cyclic dependency between the blockchain provider and the tree component. This will be removed once the Blockchain provider no longer depends on an instance of the tree: - let (canon_state_notification_sender, _receiver) = - tokio::sync::broadcast::channel(tree_config.max_reorg_depth() as usize * 2); - - let tree = Arc::new(NoopBlockchainTree::with_canon_state_notifications( - canon_state_notification_sender.clone(), - )); - // setup the launch context let ctx = ctx .with_configured_globals() @@ -148,7 +135,7 @@ where .inspect(|_| { info!(target: "reth::cli", "Database opened"); }) - .with_prometheus_server().await? + .with_prometheus().await? .inspect(|this| { debug!(target: "reth::cli", chain=%this.chain_id(), genesis=?this.genesis_hash(), "Initializing genesis"); }) @@ -156,12 +143,10 @@ where .inspect(|this| { info!(target: "reth::cli", "\n{}", this.chain_spec().display_hardforks()); }) - .with_metrics_task() + .with_metrics() // passing FullNodeTypes as type parameter here so that we can build // later the components. - .with_blockchain_db::(move |provider_factory| { - Ok(BlockchainProvider::new(provider_factory, tree)?) - }, tree_config, canon_state_notification_sender)? + .with_blockchain_db::()? .with_components(components_builder, on_component_initialized).await?; // spawn exexs diff --git a/crates/node/core/Cargo.toml b/crates/node/core/Cargo.toml index b4317c6b1a95..42673c13e0e4 100644 --- a/crates/node/core/Cargo.toml +++ b/crates/node/core/Cargo.toml @@ -35,6 +35,7 @@ reth-discv4.workspace = true reth-discv5.workspace = true reth-net-nat.workspace = true reth-network-peers.workspace = true +reth-tasks.workspace = true reth-consensus-common.workspace = true reth-prune-types.workspace = true reth-stages-types.workspace = true @@ -43,6 +44,15 @@ reth-stages-types.workspace = true alloy-genesis.workspace = true alloy-rpc-types-engine.workspace = true +# async +tokio.workspace = true + +# metrics +reth-metrics.workspace = true +metrics.workspace = true +metrics-exporter-prometheus.workspace = true +metrics-process.workspace = true +metrics-util.workspace = true # misc eyre.workspace = true @@ -51,13 +61,17 @@ humantime.workspace = true const_format.workspace = true rand.workspace = true derive_more.workspace = true +once_cell.workspace = true # io dirs-next = "2.0.0" shellexpand.workspace = true serde_json.workspace = true - +# http/rpc +http.workspace = true +jsonrpsee.workspace = true +tower.workspace = true # tracing tracing.workspace = true @@ -72,11 +86,15 @@ secp256k1 = { workspace = true, features = [ # async futures.workspace = true +[target.'cfg(unix)'.dependencies] +tikv-jemalloc-ctl = { version = "0.5.0", optional = true } + +[target.'cfg(target_os = "linux")'.dependencies] +procfs = "0.16.0" [dev-dependencies] # test vectors generation proptest.workspace = true -tokio.workspace = true [features] optimism = [ @@ -84,9 +102,10 @@ optimism = [ "reth-provider/optimism", "reth-rpc-types-compat/optimism", "reth-rpc-eth-api/optimism", + "reth-rpc-eth-types/optimism", ] - +jemalloc = ["dep:tikv-jemalloc-ctl"] [build-dependencies] vergen = { version = "8.0.0", features = ["build", "cargo", "git", "gitcl"] } diff --git a/crates/node/core/src/args/mod.rs b/crates/node/core/src/args/mod.rs index 5aa5c58633fc..7d1f61903ffb 100644 --- a/crates/node/core/src/args/mod.rs +++ b/crates/node/core/src/args/mod.rs @@ -22,7 +22,7 @@ pub use database::DatabaseArgs; /// LogArgs struct for configuring the logger mod log; -pub use log::{ColorMode, LogArgs, Verbosity}; +pub use log::{ColorMode, LogArgs}; /// `PayloadBuilderArgs` struct for configuring the payload builder mod payload_builder; diff --git a/crates/node/core/src/args/pruning.rs b/crates/node/core/src/args/pruning.rs index 620ae3e8050a..1621f2d8ed8b 100644 --- a/crates/node/core/src/args/pruning.rs +++ b/crates/node/core/src/args/pruning.rs @@ -21,18 +21,15 @@ impl PruningArgs { if !self.full { return None } - Some(PruneConfig { block_interval: 5, segments: PruneModes { sender_recovery: Some(PruneMode::Full), transaction_lookup: None, - // prune all receipts if chain doesn't have deposit contract specified in chain spec receipts: chain_spec .deposit_contract .as_ref() - .map(|contract| PruneMode::Before(contract.block)) - .or(Some(PruneMode::Full)), + .map(|contract| PruneMode::Before(contract.block)), account_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), storage_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), receipts_log_filter: ReceiptsLogPruneConfig( diff --git a/crates/node/core/src/lib.rs b/crates/node/core/src/lib.rs index 52286bea5091..27a81cc26e7c 100644 --- a/crates/node/core/src/lib.rs +++ b/crates/node/core/src/lib.rs @@ -12,9 +12,14 @@ pub mod args; pub mod cli; pub mod dirs; pub mod exit; +pub mod metrics; pub mod node_config; pub mod utils; pub mod version; + +// Re-export for backwards compatibility. +pub use metrics::prometheus_exporter; + /// Re-exported from `reth_primitives`. pub mod primitives { pub use reth_primitives::*; diff --git a/crates/node/core/src/metrics/mod.rs b/crates/node/core/src/metrics/mod.rs new file mode 100644 index 000000000000..109c59c9f858 --- /dev/null +++ b/crates/node/core/src/metrics/mod.rs @@ -0,0 +1,4 @@ +//! Metrics utilities for the node. + +pub mod prometheus_exporter; +pub mod version_metrics; diff --git a/crates/node/core/src/metrics/prometheus_exporter.rs b/crates/node/core/src/metrics/prometheus_exporter.rs new file mode 100644 index 000000000000..f19a0e15bc85 --- /dev/null +++ b/crates/node/core/src/metrics/prometheus_exporter.rs @@ -0,0 +1,317 @@ +//! Prometheus exporter + +use crate::metrics::version_metrics::VersionInfo; +use eyre::WrapErr; +use http::Response; +use metrics::describe_gauge; +use metrics_exporter_prometheus::{PrometheusBuilder, PrometheusHandle}; +use metrics_util::layers::{PrefixLayer, Stack}; +use reth_db_api::database_metrics::DatabaseMetrics; +use reth_metrics::metrics::Unit; +use reth_provider::providers::StaticFileProvider; +use reth_tasks::TaskExecutor; +use std::{convert::Infallible, net::SocketAddr, sync::Arc}; + +pub(crate) trait Hook: Fn() + Send + Sync {} +impl Hook for T {} + +/// Installs Prometheus as the metrics recorder. +pub fn install_recorder() -> eyre::Result { + let recorder = PrometheusBuilder::new().build_recorder(); + let handle = recorder.handle(); + + // Build metrics stack + Stack::new(recorder) + .push(PrefixLayer::new("reth")) + .install() + .wrap_err("Couldn't set metrics recorder.")?; + + Ok(handle) +} + +/// Serves Prometheus metrics over HTTP with hooks. +/// +/// The hooks are called every time the metrics are requested at the given endpoint, and can be used +/// to record values for pull-style metrics, i.e. metrics that are not automatically updated. +pub(crate) async fn serve_with_hooks( + listen_addr: SocketAddr, + handle: PrometheusHandle, + hooks: impl IntoIterator, + task_executor: TaskExecutor, +) -> eyre::Result<()> { + let hooks: Vec<_> = hooks.into_iter().collect(); + + // Start endpoint + start_endpoint( + listen_addr, + handle, + Arc::new(move || hooks.iter().for_each(|hook| hook())), + task_executor, + ) + .await + .wrap_err("Could not start Prometheus endpoint")?; + + Ok(()) +} + +/// Starts an endpoint at the given address to serve Prometheus metrics. +async fn start_endpoint( + listen_addr: SocketAddr, + handle: PrometheusHandle, + hook: Arc, + task_executor: TaskExecutor, +) -> eyre::Result<()> { + let listener = + tokio::net::TcpListener::bind(listen_addr).await.wrap_err("Could not bind to address")?; + + task_executor.spawn_with_graceful_shutdown_signal(|mut signal| async move { + loop { + let io = tokio::select! { + _ = &mut signal => break, + io = listener.accept() => { + match io { + Ok((stream, _remote_addr)) => stream, + Err(err) => { + tracing::error!(%err, "failed to accept connection"); + continue; + } + } + } + }; + + let handle = handle.clone(); + let hook = hook.clone(); + let service = tower::service_fn(move |_| { + (hook)(); + let metrics = handle.render(); + async move { Ok::<_, Infallible>(Response::new(metrics)) } + }); + + let mut shutdown = signal.clone().ignore_guard(); + tokio::task::spawn(async move { + if let Err(error) = + jsonrpsee::server::serve_with_graceful_shutdown(io, service, &mut shutdown) + .await + { + tracing::debug!(%error, "failed to serve request") + } + }); + } + }); + + Ok(()) +} + +/// Serves Prometheus metrics over HTTP with database and process metrics. +pub async fn serve( + listen_addr: SocketAddr, + handle: PrometheusHandle, + db: Metrics, + static_file_provider: StaticFileProvider, + process: metrics_process::Collector, + task_executor: TaskExecutor, +) -> eyre::Result<()> +where + Metrics: DatabaseMetrics + 'static + Send + Sync, +{ + let db_metrics_hook = move || db.report_metrics(); + let static_file_metrics_hook = move || { + let _ = static_file_provider.report_metrics().map_err( + |error| tracing::error!(%error, "Failed to report static file provider metrics"), + ); + }; + + // Clone `process` to move it into the hook and use the original `process` for describe below. + let cloned_process = process.clone(); + let hooks: Vec>> = vec![ + Box::new(db_metrics_hook), + Box::new(static_file_metrics_hook), + Box::new(move || cloned_process.collect()), + Box::new(collect_memory_stats), + Box::new(collect_io_stats), + ]; + serve_with_hooks(listen_addr, handle, hooks, task_executor).await?; + + // We describe the metrics after the recorder is installed, otherwise this information is not + // registered + describe_gauge!("db.table_size", Unit::Bytes, "The size of a database table (in bytes)"); + describe_gauge!("db.table_pages", "The number of database pages for a table"); + describe_gauge!("db.table_entries", "The number of entries for a table"); + describe_gauge!("db.freelist", "The number of pages on the freelist"); + describe_gauge!("db.page_size", Unit::Bytes, "The size of a database page (in bytes)"); + describe_gauge!( + "db.timed_out_not_aborted_transactions", + "Number of timed out transactions that were not aborted by the user yet" + ); + + describe_gauge!("static_files.segment_size", Unit::Bytes, "The size of a static file segment"); + describe_gauge!("static_files.segment_files", "The number of files for a static file segment"); + describe_gauge!( + "static_files.segment_entries", + "The number of entries for a static file segment" + ); + + process.describe(); + describe_memory_stats(); + describe_io_stats(); + VersionInfo::default().register_version_metrics(); + + Ok(()) +} + +#[cfg(all(feature = "jemalloc", unix))] +fn collect_memory_stats() { + use metrics::gauge; + use tikv_jemalloc_ctl::{epoch, stats}; + use tracing::error; + + if epoch::advance().map_err(|error| error!(%error, "Failed to advance jemalloc epoch")).is_err() + { + return + } + + if let Ok(value) = stats::active::read() + .map_err(|error| error!(%error, "Failed to read jemalloc.stats.active")) + { + gauge!("jemalloc.active").set(value as f64); + } + + if let Ok(value) = stats::allocated::read() + .map_err(|error| error!(%error, "Failed to read jemalloc.stats.allocated")) + { + gauge!("jemalloc.allocated").set(value as f64); + } + + if let Ok(value) = stats::mapped::read() + .map_err(|error| error!(%error, "Failed to read jemalloc.stats.mapped")) + { + gauge!("jemalloc.mapped").set(value as f64); + } + + if let Ok(value) = stats::metadata::read() + .map_err(|error| error!(%error, "Failed to read jemalloc.stats.metadata")) + { + gauge!("jemalloc.metadata").set(value as f64); + } + + if let Ok(value) = stats::resident::read() + .map_err(|error| error!(%error, "Failed to read jemalloc.stats.resident")) + { + gauge!("jemalloc.resident").set(value as f64); + } + + if let Ok(value) = stats::retained::read() + .map_err(|error| error!(%error, "Failed to read jemalloc.stats.retained")) + { + gauge!("jemalloc.retained").set(value as f64); + } +} + +#[cfg(all(feature = "jemalloc", unix))] +fn describe_memory_stats() { + describe_gauge!( + "jemalloc.active", + Unit::Bytes, + "Total number of bytes in active pages allocated by the application" + ); + describe_gauge!( + "jemalloc.allocated", + Unit::Bytes, + "Total number of bytes allocated by the application" + ); + describe_gauge!( + "jemalloc.mapped", + Unit::Bytes, + "Total number of bytes in active extents mapped by the allocator" + ); + describe_gauge!( + "jemalloc.metadata", + Unit::Bytes, + "Total number of bytes dedicated to jemalloc metadata" + ); + describe_gauge!( + "jemalloc.resident", + Unit::Bytes, + "Total number of bytes in physically resident data pages mapped by the allocator" + ); + describe_gauge!( + "jemalloc.retained", + Unit::Bytes, + "Total number of bytes in virtual memory mappings that were retained rather than \ + being returned to the operating system via e.g. munmap(2)" + ); +} + +#[cfg(not(all(feature = "jemalloc", unix)))] +fn collect_memory_stats() {} + +#[cfg(not(all(feature = "jemalloc", unix)))] +fn describe_memory_stats() {} + +#[cfg(target_os = "linux")] +fn collect_io_stats() { + use metrics::counter; + use tracing::error; + + let Ok(process) = procfs::process::Process::myself() + .map_err(|error| error!(%error, "Failed to get currently running process")) + else { + return + }; + + let Ok(io) = process.io().map_err( + |error| error!(%error, "Failed to get IO stats for the currently running process"), + ) else { + return + }; + + counter!("io.rchar").absolute(io.rchar); + counter!("io.wchar").absolute(io.wchar); + counter!("io.syscr").absolute(io.syscr); + counter!("io.syscw").absolute(io.syscw); + counter!("io.read_bytes").absolute(io.read_bytes); + counter!("io.write_bytes").absolute(io.write_bytes); + counter!("io.cancelled_write_bytes").absolute(io.cancelled_write_bytes); +} + +#[cfg(target_os = "linux")] +fn describe_io_stats() { + use metrics::describe_counter; + + describe_counter!("io.rchar", "Characters read"); + describe_counter!("io.wchar", "Characters written"); + describe_counter!("io.syscr", "Read syscalls"); + describe_counter!("io.syscw", "Write syscalls"); + describe_counter!("io.read_bytes", Unit::Bytes, "Bytes read"); + describe_counter!("io.write_bytes", Unit::Bytes, "Bytes written"); + describe_counter!("io.cancelled_write_bytes", Unit::Bytes, "Cancelled write bytes"); +} + +#[cfg(not(target_os = "linux"))] +const fn collect_io_stats() {} + +#[cfg(not(target_os = "linux"))] +const fn describe_io_stats() {} + +#[cfg(test)] +mod tests { + use crate::node_config::PROMETHEUS_RECORDER_HANDLE; + + // Dependencies using different version of the `metrics` crate (to be exact, 0.21 vs 0.22) + // may not be able to communicate with each other through the global recorder. + // + // This test ensures that `metrics-process` dependency plays well with the current + // `metrics-exporter-prometheus` dependency version. + #[test] + fn process_metrics() { + // initialize the lazy handle + let _ = &*PROMETHEUS_RECORDER_HANDLE; + + let process = metrics_process::Collector::default(); + process.describe(); + process.collect(); + + let metrics = PROMETHEUS_RECORDER_HANDLE.render(); + assert!(metrics.contains("process_cpu_seconds_total"), "{metrics:?}"); + } +} diff --git a/crates/node/metrics/src/version.rs b/crates/node/core/src/metrics/version_metrics.rs similarity index 71% rename from crates/node/metrics/src/version.rs rename to crates/node/core/src/metrics/version_metrics.rs index 6cd8df4320c0..03769d990f35 100644 --- a/crates/node/metrics/src/version.rs +++ b/crates/node/core/src/metrics/version_metrics.rs @@ -1,4 +1,6 @@ //! This exposes reth's version information over prometheus. + +use crate::version::{BUILD_PROFILE_NAME, VERGEN_GIT_SHA}; use metrics::gauge; /// Contains version information for the application. @@ -18,6 +20,19 @@ pub struct VersionInfo { pub build_profile: &'static str, } +impl Default for VersionInfo { + fn default() -> Self { + Self { + version: env!("CARGO_PKG_VERSION"), + build_timestamp: env!("VERGEN_BUILD_TIMESTAMP"), + cargo_features: env!("VERGEN_CARGO_FEATURES"), + git_sha: VERGEN_GIT_SHA, + target_triple: env!("VERGEN_CARGO_TARGET_TRIPLE"), + build_profile: BUILD_PROFILE_NAME, + } + } +} + impl VersionInfo { /// This exposes reth's version information over prometheus. pub fn register_version_metrics(&self) { diff --git a/crates/node/core/src/node_config.rs b/crates/node/core/src/node_config.rs index 82ed8b660623..1f5bea21beb8 100644 --- a/crates/node/core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -6,27 +6,39 @@ use crate::{ PruningArgs, RpcServerArgs, TxPoolArgs, }, dirs::{ChainPath, DataDirPath}, + metrics::prometheus_exporter, utils::get_single_header, }; +use metrics_exporter_prometheus::PrometheusHandle; +use once_cell::sync::Lazy; use reth_chainspec::{ChainSpec, MAINNET}; use reth_config::config::PruneConfig; -use reth_db_api::database::Database; +use reth_db_api::{database::Database, database_metrics::DatabaseMetrics}; use reth_network_p2p::headers::client::HeadersClient; - use reth_primitives::{ revm_primitives::EnvKzgSettings, BlockHashOrNumber, BlockNumber, Head, SealedHeader, B256, }; -use reth_provider::{BlockHashReader, HeaderProvider, ProviderFactory, StageCheckpointReader}; +use reth_provider::{ + providers::StaticFileProvider, BlockHashReader, HeaderProvider, ProviderFactory, + StageCheckpointReader, +}; use reth_stages_types::StageId; use reth_storage_errors::provider::ProviderResult; +use reth_tasks::TaskExecutor; use std::{net::SocketAddr, path::PathBuf, sync::Arc}; use tracing::*; +/// The default prometheus recorder handle. We use a global static to ensure that it is only +/// installed once. +pub static PROMETHEUS_RECORDER_HANDLE: Lazy = + Lazy::new(|| prometheus_exporter::install_recorder().unwrap()); + /// This includes all necessary configuration to launch the node. /// The individual configuration options can be overwritten before launching the node. /// /// # Example /// ```rust +/// # use reth_tasks::{TaskManager, TaskSpawner}; /// # use reth_node_core::{ /// # node_config::NodeConfig, /// # args::RpcServerArgs, @@ -35,6 +47,10 @@ use tracing::*; /// # use tokio::runtime::Handle; /// /// async fn t() { +/// let handle = Handle::current(); +/// let manager = TaskManager::new(handle); +/// let executor = manager.executor(); +/// /// // create the builder /// let builder = NodeConfig::default(); /// @@ -50,6 +66,7 @@ use tracing::*; /// /// # Example /// ```rust +/// # use reth_tasks::{TaskManager, TaskSpawner}; /// # use reth_node_core::{ /// # node_config::NodeConfig, /// # args::RpcServerArgs, @@ -58,6 +75,10 @@ use tracing::*; /// # use tokio::runtime::Handle; /// /// async fn t() { +/// let handle = Handle::current(); +/// let manager = TaskManager::new(handle); +/// let executor = manager.executor(); +/// /// // create the builder with a test database, using the `test` method /// let builder = NodeConfig::test(); /// @@ -263,6 +284,38 @@ impl NodeConfig { Ok(EnvKzgSettings::Default) } + /// Installs the prometheus recorder. + pub fn install_prometheus_recorder(&self) -> eyre::Result { + Ok(PROMETHEUS_RECORDER_HANDLE.clone()) + } + + /// Serves the prometheus endpoint over HTTP with the given database and prometheus handle. + pub async fn start_metrics_endpoint( + &self, + prometheus_handle: PrometheusHandle, + db: Metrics, + static_file_provider: StaticFileProvider, + task_executor: TaskExecutor, + ) -> eyre::Result<()> + where + Metrics: DatabaseMetrics + 'static + Send + Sync, + { + if let Some(listen_addr) = self.metrics { + info!(target: "reth::cli", addr = %listen_addr, "Starting metrics endpoint"); + prometheus_exporter::serve( + listen_addr, + prometheus_handle, + db, + static_file_provider, + metrics_process::Collector::default(), + task_executor, + ) + .await?; + } + + Ok(()) + } + /// Fetches the head block from the database. /// /// If the database is empty, returns the genesis block. diff --git a/crates/node/core/src/version.rs b/crates/node/core/src/version.rs index 78dbcfbcf5e9..adc922787189 100644 --- a/crates/node/core/src/version.rs +++ b/crates/node/core/src/version.rs @@ -20,12 +20,6 @@ pub const VERGEN_GIT_SHA: &str = const_format::str_index!(VERGEN_GIT_SHA_LONG, . /// The build timestamp. pub const VERGEN_BUILD_TIMESTAMP: &str = env!("VERGEN_BUILD_TIMESTAMP"); -/// The target triple. -pub const VERGEN_CARGO_TARGET_TRIPLE: &str = env!("VERGEN_CARGO_TARGET_TRIPLE"); - -/// The build features. -pub const VERGEN_CARGO_FEATURES: &str = env!("VERGEN_CARGO_FEATURES"); - /// The short version information for reth. /// /// - The latest version from Cargo.toml @@ -79,8 +73,7 @@ pub const LONG_VERSION: &str = const_format::concatcp!( BUILD_PROFILE_NAME ); -/// The build profile name. -pub const BUILD_PROFILE_NAME: &str = { +pub(crate) const BUILD_PROFILE_NAME: &str = { // Derived from https://stackoverflow.com/questions/73595435/how-to-get-profile-from-cargo-toml-in-build-rs-or-at-runtime // We split on the path separator of the *host* machine, which may be different from // `std::path::MAIN_SEPARATOR_STR`. diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index 3fe989ab0b54..bc0cfb1373a9 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -74,30 +74,28 @@ impl NodeState { self.peers_info.as_ref().map(|info| info.num_connected_peers()).unwrap_or_default() } - fn build_current_stage( - &self, - stage_id: StageId, - checkpoint: StageCheckpoint, - target: Option, - ) -> CurrentStage { - let (eta, entities_checkpoint) = self - .current_stage - .as_ref() - .filter(|current_stage| current_stage.stage_id == stage_id) - .map_or_else( - || (Eta::default(), None), - |current_stage| (current_stage.eta, current_stage.entities_checkpoint), - ); - - CurrentStage { stage_id, eta, checkpoint, entities_checkpoint, target } - } - /// Processes an event emitted by the pipeline fn handle_pipeline_event(&mut self, event: PipelineEvent) { match event { PipelineEvent::Prepare { pipeline_stages_progress, stage_id, checkpoint, target } => { let checkpoint = checkpoint.unwrap_or_default(); - let current_stage = self.build_current_stage(stage_id, checkpoint, target); + let current_stage = CurrentStage { + stage_id, + eta: match &self.current_stage { + Some(current_stage) if current_stage.stage_id == stage_id => { + current_stage.eta + } + _ => Eta::default(), + }, + checkpoint, + entities_checkpoint: match &self.current_stage { + Some(current_stage) if current_stage.stage_id == stage_id => { + current_stage.entities_checkpoint + } + _ => None, + }, + target, + }; info!( pipeline_stages = %pipeline_stages_progress, @@ -111,7 +109,23 @@ impl NodeState { } PipelineEvent::Run { pipeline_stages_progress, stage_id, checkpoint, target } => { let checkpoint = checkpoint.unwrap_or_default(); - let current_stage = self.build_current_stage(stage_id, checkpoint, target); + let current_stage = CurrentStage { + stage_id, + eta: match &self.current_stage { + Some(current_stage) if current_stage.stage_id == stage_id => { + current_stage.eta + } + _ => Eta::default(), + }, + checkpoint, + entities_checkpoint: match &self.current_stage { + Some(current_stage) if current_stage.stage_id == stage_id => { + current_stage.entities_checkpoint + } + _ => None, + }, + target, + }; if let Some(stage_eta) = current_stage.eta.fmt_for_stage(stage_id) { info!( diff --git a/crates/node/metrics/Cargo.toml b/crates/node/metrics/Cargo.toml deleted file mode 100644 index aaa2cb3fcd86..000000000000 --- a/crates/node/metrics/Cargo.toml +++ /dev/null @@ -1,52 +0,0 @@ -[package] -name = "reth-node-metrics" -version.workspace = true -edition.workspace = true -rust-version.workspace = true -license.workspace = true -homepage.workspace = true -repository.workspace = true - -[dependencies] -reth-db-api.workspace = true -reth-provider.workspace = true -reth-metrics.workspace = true -reth-tasks.workspace = true - -metrics.workspace = true -metrics-exporter-prometheus.workspace = true -metrics-process.workspace = true -metrics-util.workspace = true - -tokio.workspace = true - -once_cell.workspace = true - -jsonrpsee = { workspace = true, features = ["server"] } -http.workspace = true -tower.workspace = true - -tracing.workspace = true -eyre.workspace = true - -[target.'cfg(unix)'.dependencies] -tikv-jemalloc-ctl = { version = "0.5.0", optional = true } - -[target.'cfg(target_os = "linux")'.dependencies] -procfs = "0.16.0" - -[dev-dependencies] -reth-db = { workspace = true, features = ["test-utils"] } -reqwest.workspace = true -reth-chainspec.workspace = true -socket2 = { version = "0.4", default-features = false } - -[lints] -workspace = true - -[features] -jemalloc = ["dep:tikv-jemalloc-ctl"] - - -[build-dependencies] -vergen = { version = "8.0.0", features = ["build", "cargo", "git", "gitcl"] } diff --git a/crates/node/metrics/src/hooks.rs b/crates/node/metrics/src/hooks.rs deleted file mode 100644 index 18755717667c..000000000000 --- a/crates/node/metrics/src/hooks.rs +++ /dev/null @@ -1,126 +0,0 @@ -use metrics_process::Collector; -use reth_db_api::database_metrics::DatabaseMetrics; -use reth_provider::providers::StaticFileProvider; -use std::{fmt, sync::Arc}; -pub(crate) trait Hook: Fn() + Send + Sync {} -impl Hook for T {} - -impl fmt::Debug for Hooks { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let hooks_len = self.inner.len(); - f.debug_struct("Hooks") - .field("inner", &format!("Arc>>, len: {}", hooks_len)) - .finish() - } -} - -/// Helper type for managing hooks -#[derive(Clone)] -pub struct Hooks { - inner: Arc>>>, -} - -impl Hooks { - /// Create a new set of hooks - pub fn new( - db: Metrics, - static_file_provider: StaticFileProvider, - ) -> Self { - let hooks: Vec>> = vec![ - Box::new(move || db.report_metrics()), - Box::new(move || { - let _ = static_file_provider.report_metrics().map_err( - |error| tracing::error!(%error, "Failed to report static file provider metrics"), - ); - }), - Box::new(move || Collector::default().collect()), - Box::new(collect_memory_stats), - Box::new(collect_io_stats), - ]; - Self { inner: Arc::new(hooks) } - } - - pub(crate) fn iter(&self) -> impl Iterator>> { - self.inner.iter() - } -} - -#[cfg(all(feature = "jemalloc", unix))] -fn collect_memory_stats() { - use metrics::gauge; - use tikv_jemalloc_ctl::{epoch, stats}; - use tracing::error; - - if epoch::advance().map_err(|error| error!(%error, "Failed to advance jemalloc epoch")).is_err() - { - return - } - - if let Ok(value) = stats::active::read() - .map_err(|error| error!(%error, "Failed to read jemalloc.stats.active")) - { - gauge!("jemalloc.active").set(value as f64); - } - - if let Ok(value) = stats::allocated::read() - .map_err(|error| error!(%error, "Failed to read jemalloc.stats.allocated")) - { - gauge!("jemalloc.allocated").set(value as f64); - } - - if let Ok(value) = stats::mapped::read() - .map_err(|error| error!(%error, "Failed to read jemalloc.stats.mapped")) - { - gauge!("jemalloc.mapped").set(value as f64); - } - - if let Ok(value) = stats::metadata::read() - .map_err(|error| error!(%error, "Failed to read jemalloc.stats.metadata")) - { - gauge!("jemalloc.metadata").set(value as f64); - } - - if let Ok(value) = stats::resident::read() - .map_err(|error| error!(%error, "Failed to read jemalloc.stats.resident")) - { - gauge!("jemalloc.resident").set(value as f64); - } - - if let Ok(value) = stats::retained::read() - .map_err(|error| error!(%error, "Failed to read jemalloc.stats.retained")) - { - gauge!("jemalloc.retained").set(value as f64); - } -} - -#[cfg(not(all(feature = "jemalloc", unix)))] -const fn collect_memory_stats() {} - -#[cfg(target_os = "linux")] -fn collect_io_stats() { - use metrics::counter; - use tracing::error; - - let Ok(process) = procfs::process::Process::myself() - .map_err(|error| error!(%error, "Failed to get currently running process")) - else { - return - }; - - let Ok(io) = process.io().map_err( - |error| error!(%error, "Failed to get IO stats for the currently running process"), - ) else { - return - }; - - counter!("io.rchar").absolute(io.rchar); - counter!("io.wchar").absolute(io.wchar); - counter!("io.syscr").absolute(io.syscr); - counter!("io.syscw").absolute(io.syscw); - counter!("io.read_bytes").absolute(io.read_bytes); - counter!("io.write_bytes").absolute(io.write_bytes); - counter!("io.cancelled_write_bytes").absolute(io.cancelled_write_bytes); -} - -#[cfg(not(target_os = "linux"))] -const fn collect_io_stats() {} diff --git a/crates/node/metrics/src/lib.rs b/crates/node/metrics/src/lib.rs deleted file mode 100644 index 4abc39a32dc3..000000000000 --- a/crates/node/metrics/src/lib.rs +++ /dev/null @@ -1,18 +0,0 @@ -//! Metrics utilities for the node. -#![doc( - html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", - html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", - issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" -)] -#![cfg_attr(not(test), warn(unused_crate_dependencies))] -#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] - -/// The metrics hooks for prometheus. -pub mod hooks; -pub mod recorder; -/// The metric server serving the metrics. -pub mod server; -pub mod version; - -pub use metrics_exporter_prometheus::*; -pub use metrics_process::*; diff --git a/crates/node/metrics/src/recorder.rs b/crates/node/metrics/src/recorder.rs deleted file mode 100644 index 05047992faae..000000000000 --- a/crates/node/metrics/src/recorder.rs +++ /dev/null @@ -1,58 +0,0 @@ -//! Prometheus recorder - -use eyre::WrapErr; -use metrics_exporter_prometheus::{PrometheusBuilder, PrometheusHandle}; -use metrics_util::layers::{PrefixLayer, Stack}; -use once_cell::sync::Lazy; - -/// Installs the Prometheus recorder as the global recorder. -pub fn install_prometheus_recorder() -> &'static PrometheusHandle { - &PROMETHEUS_RECORDER_HANDLE -} - -/// The default Prometheus recorder handle. We use a global static to ensure that it is only -/// installed once. -static PROMETHEUS_RECORDER_HANDLE: Lazy = - Lazy::new(|| PrometheusRecorder::install().unwrap()); - -/// Prometheus recorder installer -#[derive(Debug)] -pub struct PrometheusRecorder; - -impl PrometheusRecorder { - /// Installs Prometheus as the metrics recorder. - pub fn install() -> eyre::Result { - let recorder = PrometheusBuilder::new().build_recorder(); - let handle = recorder.handle(); - - // Build metrics stack - Stack::new(recorder) - .push(PrefixLayer::new("reth")) - .install() - .wrap_err("Couldn't set metrics recorder.")?; - - Ok(handle) - } -} - -#[cfg(test)] -mod tests { - use super::*; - // Dependencies using different version of the `metrics` crate (to be exact, 0.21 vs 0.22) - // may not be able to communicate with each other through the global recorder. - // - // This test ensures that `metrics-process` dependency plays well with the current - // `metrics-exporter-prometheus` dependency version. - #[test] - fn process_metrics() { - // initialize the lazy handle - let _ = &*PROMETHEUS_RECORDER_HANDLE; - - let process = metrics_process::Collector::default(); - process.describe(); - process.collect(); - - let metrics = PROMETHEUS_RECORDER_HANDLE.render(); - assert!(metrics.contains("process_cpu_seconds_total"), "{metrics:?}"); - } -} diff --git a/crates/node/metrics/src/server.rs b/crates/node/metrics/src/server.rs deleted file mode 100644 index 5eadaaab527a..000000000000 --- a/crates/node/metrics/src/server.rs +++ /dev/null @@ -1,270 +0,0 @@ -use crate::{ - hooks::{Hook, Hooks}, - recorder::install_prometheus_recorder, - version::VersionInfo, -}; -use eyre::WrapErr; -use http::{header::CONTENT_TYPE, HeaderValue, Response}; -use metrics::describe_gauge; -use metrics_process::Collector; -use reth_metrics::metrics::Unit; -use reth_tasks::TaskExecutor; -use std::{convert::Infallible, net::SocketAddr, sync::Arc}; -use tracing::info; - -/// Configuration for the [`MetricServer`] -#[derive(Debug)] -pub struct MetricServerConfig { - listen_addr: SocketAddr, - version_info: VersionInfo, - task_executor: TaskExecutor, - hooks: Hooks, -} - -impl MetricServerConfig { - /// Create a new [`MetricServerConfig`] with the given configuration - pub const fn new( - listen_addr: SocketAddr, - version_info: VersionInfo, - task_executor: TaskExecutor, - hooks: Hooks, - ) -> Self { - Self { listen_addr, hooks, task_executor, version_info } - } -} - -/// [`MetricServer`] responsible for serving the metrics endpoint -#[derive(Debug)] -pub struct MetricServer { - config: MetricServerConfig, -} - -impl MetricServer { - /// Create a new [`MetricServer`] with the given configuration - pub const fn new(config: MetricServerConfig) -> Self { - Self { config } - } - - /// Spawns the metrics server - pub async fn serve(&self) -> eyre::Result<()> { - let MetricServerConfig { listen_addr, hooks, task_executor, version_info } = &self.config; - - info!(target: "reth::cli", addr = %listen_addr, "Starting metrics endpoint"); - - let hooks = hooks.clone(); - self.start_endpoint( - *listen_addr, - Arc::new(move || hooks.iter().for_each(|hook| hook())), - task_executor.clone(), - ) - .await - .wrap_err("Could not start Prometheus endpoint")?; - - // Describe metrics after recorder installation - describe_db_metrics(); - describe_static_file_metrics(); - Collector::default().describe(); - describe_memory_stats(); - describe_io_stats(); - - version_info.register_version_metrics(); - - Ok(()) - } - - async fn start_endpoint( - &self, - listen_addr: SocketAddr, - hook: Arc, - task_executor: TaskExecutor, - ) -> eyre::Result<()> { - let listener = tokio::net::TcpListener::bind(listen_addr) - .await - .wrap_err("Could not bind to address")?; - - task_executor.spawn_with_graceful_shutdown_signal(|mut signal| async move { - loop { - let io = tokio::select! { - _ = &mut signal => break, - io = listener.accept() => { - match io { - Ok((stream, _remote_addr)) => stream, - Err(err) => { - tracing::error!(%err, "failed to accept connection"); - continue; - } - } - } - }; - - let handle = install_prometheus_recorder(); - let hook = hook.clone(); - let service = tower::service_fn(move |_| { - (hook)(); - let metrics = handle.render(); - let mut response = Response::new(metrics); - response - .headers_mut() - .insert(CONTENT_TYPE, HeaderValue::from_static("text/plain")); - async move { Ok::<_, Infallible>(response) } - }); - - let mut shutdown = signal.clone().ignore_guard(); - tokio::task::spawn(async move { - if let Err(error) = - jsonrpsee::server::serve_with_graceful_shutdown(io, service, &mut shutdown) - .await - { - tracing::debug!(%error, "failed to serve request") - } - }); - } - }); - - Ok(()) - } -} - -fn describe_db_metrics() { - describe_gauge!("db.table_size", Unit::Bytes, "The size of a database table (in bytes)"); - describe_gauge!("db.table_pages", "The number of database pages for a table"); - describe_gauge!("db.table_entries", "The number of entries for a table"); - describe_gauge!("db.freelist", "The number of pages on the freelist"); - describe_gauge!("db.page_size", Unit::Bytes, "The size of a database page (in bytes)"); - describe_gauge!( - "db.timed_out_not_aborted_transactions", - "Number of timed out transactions that were not aborted by the user yet" - ); -} - -fn describe_static_file_metrics() { - describe_gauge!("static_files.segment_size", Unit::Bytes, "The size of a static file segment"); - describe_gauge!("static_files.segment_files", "The number of files for a static file segment"); - describe_gauge!( - "static_files.segment_entries", - "The number of entries for a static file segment" - ); -} - -#[cfg(all(feature = "jemalloc", unix))] -fn describe_memory_stats() { - describe_gauge!( - "jemalloc.active", - Unit::Bytes, - "Total number of bytes in active pages allocated by the application" - ); - describe_gauge!( - "jemalloc.allocated", - Unit::Bytes, - "Total number of bytes allocated by the application" - ); - describe_gauge!( - "jemalloc.mapped", - Unit::Bytes, - "Total number of bytes in active extents mapped by the allocator" - ); - describe_gauge!( - "jemalloc.metadata", - Unit::Bytes, - "Total number of bytes dedicated to jemalloc metadata" - ); - describe_gauge!( - "jemalloc.resident", - Unit::Bytes, - "Total number of bytes in physically resident data pages mapped by the allocator" - ); - describe_gauge!( - "jemalloc.retained", - Unit::Bytes, - "Total number of bytes in virtual memory mappings that were retained rather than \ - being returned to the operating system via e.g. munmap(2)" - ); -} - -#[cfg(not(all(feature = "jemalloc", unix)))] -const fn describe_memory_stats() {} - -#[cfg(target_os = "linux")] -fn describe_io_stats() { - use metrics::describe_counter; - - describe_counter!("io.rchar", "Characters read"); - describe_counter!("io.wchar", "Characters written"); - describe_counter!("io.syscr", "Read syscalls"); - describe_counter!("io.syscw", "Write syscalls"); - describe_counter!("io.read_bytes", Unit::Bytes, "Bytes read"); - describe_counter!("io.write_bytes", Unit::Bytes, "Bytes written"); - describe_counter!("io.cancelled_write_bytes", Unit::Bytes, "Cancelled write bytes"); -} - -#[cfg(not(target_os = "linux"))] -const fn describe_io_stats() {} - -#[cfg(test)] -mod tests { - use super::*; - use reqwest::Client; - use reth_chainspec::MAINNET; - use reth_db::{ - test_utils::{create_test_rw_db, create_test_static_files_dir, TempDatabase}, - DatabaseEnv, - }; - use reth_provider::{ - providers::StaticFileProvider, ProviderFactory, StaticFileProviderFactory, - }; - use reth_tasks::TaskManager; - use socket2::{Domain, Socket, Type}; - use std::net::{SocketAddr, TcpListener}; - - fn create_test_db() -> ProviderFactory>> { - let (_, static_dir_path) = create_test_static_files_dir(); - ProviderFactory::new( - create_test_rw_db(), - MAINNET.clone(), - StaticFileProvider::read_write(static_dir_path).unwrap(), - ) - } - - fn get_random_available_addr() -> SocketAddr { - let addr = &"127.0.0.1:0".parse::().unwrap().into(); - let socket = Socket::new(Domain::IPV4, Type::STREAM, None).unwrap(); - socket.set_reuse_address(true).unwrap(); - socket.bind(addr).unwrap(); - socket.listen(1).unwrap(); - let listener = TcpListener::from(socket); - listener.local_addr().unwrap() - } - - #[tokio::test] - async fn test_metrics_endpoint() { - let version_info = VersionInfo { - version: "test", - build_timestamp: "test", - cargo_features: "test", - git_sha: "test", - target_triple: "test", - build_profile: "test", - }; - - let tasks = TaskManager::current(); - let executor = tasks.executor(); - - let factory = create_test_db(); - let hooks = Hooks::new(factory.db_ref().clone(), factory.static_file_provider()); - - let listen_addr = get_random_available_addr(); - let config = MetricServerConfig::new(listen_addr, version_info, executor, hooks); - - MetricServer::new(config).serve().await.unwrap(); - - // Send request to the metrics endpoint - let url = format!("http://{}", listen_addr); - let response = Client::new().get(&url).send().await.unwrap(); - assert!(response.status().is_success()); - - // Check the response body - let body = response.text().await.unwrap(); - assert!(body.contains("reth_db_table_size")); - assert!(body.contains("reth_jemalloc_metadata")); - } -} diff --git a/crates/node/metrics/src/version_metrics.rs b/crates/node/metrics/src/version_metrics.rs deleted file mode 100644 index 63b5009fa088..000000000000 --- a/crates/node/metrics/src/version_metrics.rs +++ /dev/null @@ -1,75 +0,0 @@ -//! This exposes reth's version information over prometheus. -use metrics::gauge; - -/// The build timestamp. -pub const VERGEN_BUILD_TIMESTAMP: &str = env!("VERGEN_BUILD_TIMESTAMP"); -/// The cargo features enabled for the build. -pub const VERGEN_CARGO_FEATURES: &str = env!("VERGEN_CARGO_FEATURES"); -/// The target triple for the build. -pub const VERGEN_CARGO_TARGET_TRIPLE: &str = env!("VERGEN_CARGO_TARGET_TRIPLE"); -/// The full SHA of the latest commit. -pub const VERGEN_GIT_SHA_LONG: &str = env!("VERGEN_GIT_SHA"); -/// The 8 character short SHA of the latest commit. -pub const VERGEN_GIT_SHA: &str = const_format::str_index!(VERGEN_GIT_SHA_LONG, ..8); - -/// The build profile name. -pub const BUILD_PROFILE_NAME: &str = { - // Derived from https://stackoverflow.com/questions/73595435/how-to-get-profile-from-cargo-toml-in-build-rs-or-at-runtime - // We split on the path separator of the *host* machine, which may be different from - // `std::path::MAIN_SEPARATOR_STR`. - const OUT_DIR: &str = env!("OUT_DIR"); - let unix_parts = const_format::str_split!(OUT_DIR, '/'); - if unix_parts.len() >= 4 { - unix_parts[unix_parts.len() - 4] - } else { - let win_parts = const_format::str_split!(OUT_DIR, '\\'); - win_parts[win_parts.len() - 4] - } -}; - -/// Contains version information for the application. -#[derive(Debug, Clone)] -pub struct VersionInfo { - /// The version of the application. - pub version: &'static str, - /// The build timestamp of the application. - pub build_timestamp: &'static str, - /// The cargo features enabled for the build. - pub cargo_features: &'static str, - /// The Git SHA of the build. - pub git_sha: &'static str, - /// The target triple for the build. - pub target_triple: &'static str, - /// The build profile (e.g., debug or release). - pub build_profile: &'static str, -} - -impl Default for VersionInfo { - fn default() -> Self { - Self { - version: env!("CARGO_PKG_VERSION"), - build_timestamp: VERGEN_BUILD_TIMESTAMP, - cargo_features: VERGEN_CARGO_FEATURES, - git_sha: VERGEN_GIT_SHA, - target_triple: VERGEN_CARGO_TARGET_TRIPLE, - build_profile: BUILD_PROFILE_NAME, - } - } -} - -impl VersionInfo { - /// This exposes reth's version information over prometheus. - pub fn register_version_metrics(&self) { - let labels: [(&str, &str); 6] = [ - ("version", self.version), - ("build_timestamp", self.build_timestamp), - ("cargo_features", self.cargo_features), - ("git_sha", self.git_sha), - ("target_triple", self.target_triple), - ("build_profile", self.build_profile), - ]; - - let gauge = gauge!("info", &labels); - gauge.set(1) - } -} diff --git a/crates/optimism/cli/src/commands/import_receipts.rs b/crates/optimism/cli/src/commands/import_receipts.rs index f6b4a792ce24..fade01da44ca 100644 --- a/crates/optimism/cli/src/commands/import_receipts.rs +++ b/crates/optimism/cli/src/commands/import_receipts.rs @@ -16,8 +16,8 @@ use reth_node_core::version::SHORT_VERSION; use reth_optimism_primitives::bedrock_import::is_dup_tx; use reth_primitives::Receipts; use reth_provider::{ - writer::StorageWriter, OriginalValuesKnown, ProviderFactory, StageCheckpointReader, - StateWriter, StaticFileProviderFactory, StaticFileWriter, StatsReader, + OriginalValuesKnown, ProviderFactory, StageCheckpointReader, StateWriter, + StaticFileProviderFactory, StaticFileWriter, StatsReader, }; use reth_stages::StageId; use reth_static_file_types::StaticFileSegment; @@ -140,7 +140,7 @@ where ); // We're reusing receipt writing code internal to - // `StorageWriter::append_receipts_from_blocks`, so we just use a default empty + // `ExecutionOutcome::write_to_storage`, so we just use a default empty // `BundleState`. let execution_outcome = ExecutionOutcome::new(Default::default(), receipts, first_block, Default::default()); @@ -149,8 +149,11 @@ where static_file_provider.get_writer(first_block, StaticFileSegment::Receipts)?; // finally, write the receipts - let mut storage_writer = StorageWriter::new(Some(&provider), Some(static_file_producer)); - storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::Yes)?; + execution_outcome.write_to_storage( + &provider, + Some(static_file_producer), + OriginalValuesKnown::Yes, + )?; } provider.commit()?; diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index f2b4a2b83cb6..7c4371b699c0 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -111,13 +111,13 @@ impl ConfigureEvmEnv for OptimismEvmConfig { impl ConfigureEvm for OptimismEvmConfig { type DefaultExternalContext<'a> = (); - fn evm(&self, db: DB) -> Evm<'_, Self::DefaultExternalContext<'_>, DB> { + fn evm<'a, DB: Database + 'a>(&self, db: DB) -> Evm<'a, Self::DefaultExternalContext<'a>, DB> { EvmBuilder::default().with_db(db).optimism().build() } - fn evm_with_inspector(&self, db: DB, inspector: I) -> Evm<'_, I, DB> + fn evm_with_inspector<'a, DB, I>(&self, db: DB, inspector: I) -> Evm<'a, I, DB> where - DB: Database, + DB: Database + 'a, I: GetInspector, { EvmBuilder::default() diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 0b163a571bfb..ddbc4916671c 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -80,6 +80,7 @@ optimism = [ "reth-beacon-consensus/optimism", "reth-revm/optimism", "reth-auto-seal-consensus/optimism", + "reth-rpc-eth-types/optimism", "reth-optimism-rpc/optimism" ] test-utils = ["reth-node-builder/test-utils"] diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml index 1f0b15b6e38f..26d1ab5779b8 100644 --- a/crates/optimism/rpc/Cargo.toml +++ b/crates/optimism/rpc/Cargo.toml @@ -38,7 +38,6 @@ tokio.workspace = true # rpc jsonrpsee.workspace = true -jsonrpsee-types.workspace = true # misc thiserror.workspace = true @@ -60,5 +59,6 @@ optimism = [ "reth-primitives/optimism", "reth-provider/optimism", "reth-rpc-eth-api/optimism", + "reth-rpc-eth-types/optimism", "revm/optimism" ] \ No newline at end of file diff --git a/crates/optimism/rpc/src/error.rs b/crates/optimism/rpc/src/error.rs index 29a348ab741d..7b7d3bed92fa 100644 --- a/crates/optimism/rpc/src/error.rs +++ b/crates/optimism/rpc/src/error.rs @@ -1,85 +1,31 @@ //! RPC errors specific to OP. -use reth_primitives::revm_primitives::{InvalidTransaction, OptimismInvalidTransaction}; -use reth_rpc_eth_api::AsEthApiError; +use jsonrpsee::types::ErrorObject; use reth_rpc_eth_types::EthApiError; -use reth_rpc_server_types::result::{internal_rpc_err, rpc_err}; -use reth_rpc_types::error::EthRpcErrorCode; +use reth_rpc_server_types::result::internal_rpc_err; +use reth_rpc_types::ToRpcError; /// Optimism specific errors, that extend [`EthApiError`]. #[derive(Debug, thiserror::Error)] pub enum OpEthApiError { - /// L1 ethereum error. - #[error(transparent)] - Eth(#[from] EthApiError), /// Thrown when calculating L1 gas fee. #[error("failed to calculate l1 gas fee")] L1BlockFeeError, /// Thrown when calculating L1 gas used #[error("failed to calculate l1 gas used")] L1BlockGasError, - /// Wrapper for [`revm_primitives::InvalidTransaction`](InvalidTransaction). - #[error(transparent)] - InvalidTransaction(OptimismInvalidTransactionError), } -impl AsEthApiError for OpEthApiError { - fn as_err(&self) -> Option<&EthApiError> { +impl ToRpcError for OpEthApiError { + fn to_rpc_error(&self) -> ErrorObject<'static> { match self { - Self::Eth(err) => Some(err), - _ => None, + Self::L1BlockFeeError | Self::L1BlockGasError => internal_rpc_err(self.to_string()), } } } -impl From for jsonrpsee_types::error::ErrorObject<'static> { +impl From for EthApiError { fn from(err: OpEthApiError) -> Self { - match err { - OpEthApiError::Eth(err) => err.into(), - OpEthApiError::L1BlockFeeError | OpEthApiError::L1BlockGasError => { - internal_rpc_err(err.to_string()) - } - OpEthApiError::InvalidTransaction(err) => err.into(), - } - } -} - -/// Optimism specific invalid transaction errors -#[derive(thiserror::Error, Debug)] -pub enum OptimismInvalidTransactionError { - /// A deposit transaction was submitted as a system transaction post-regolith. - #[error("no system transactions allowed after regolith")] - DepositSystemTxPostRegolith, - /// A deposit transaction halted post-regolith - #[error("deposit transaction halted after regolith")] - HaltedDepositPostRegolith, -} - -impl From for jsonrpsee_types::error::ErrorObject<'static> { - fn from(err: OptimismInvalidTransactionError) -> Self { - match err { - OptimismInvalidTransactionError::DepositSystemTxPostRegolith | - OptimismInvalidTransactionError::HaltedDepositPostRegolith => { - rpc_err(EthRpcErrorCode::TransactionRejected.code(), err.to_string(), None) - } - } - } -} - -impl TryFrom for OptimismInvalidTransactionError { - type Error = InvalidTransaction; - - fn try_from(err: InvalidTransaction) -> Result { - match err { - InvalidTransaction::OptimismError(err) => match err { - OptimismInvalidTransaction::DepositSystemTxPostRegolith => { - Ok(Self::DepositSystemTxPostRegolith) - } - OptimismInvalidTransaction::HaltedDepositPostRegolith => { - Ok(Self::HaltedDepositPostRegolith) - } - }, - _ => Err(err), - } + Self::other(err) } } diff --git a/crates/optimism/rpc/src/eth/block.rs b/crates/optimism/rpc/src/eth/block.rs index c1bdc6098ccd..c48d70907f10 100644 --- a/crates/optimism/rpc/src/eth/block.rs +++ b/crates/optimism/rpc/src/eth/block.rs @@ -2,11 +2,8 @@ use reth_primitives::TransactionMeta; use reth_provider::{BlockReaderIdExt, HeaderProvider}; -use reth_rpc_eth_api::{ - helpers::{EthApiSpec, EthBlocks, LoadBlock, LoadReceipt, LoadTransaction}, - FromEthApiError, -}; -use reth_rpc_eth_types::{EthStateCache, ReceiptBuilder}; +use reth_rpc_eth_api::helpers::{EthApiSpec, EthBlocks, LoadBlock, LoadReceipt, LoadTransaction}; +use reth_rpc_eth_types::{EthResult, EthStateCache, ReceiptBuilder}; use reth_rpc_types::{AnyTransactionReceipt, BlockId}; use crate::{op_receipt_fields, OpEthApi}; @@ -22,7 +19,7 @@ where async fn block_receipts( &self, block_id: BlockId, - ) -> Result>, Self::Error> + ) -> EthResult>> where Self: LoadReceipt, { @@ -55,13 +52,11 @@ where let optimism_tx_meta = self.build_op_tx_meta(tx, l1_block_info.clone(), timestamp)?; - ReceiptBuilder::new(tx, meta, receipt, &receipts) - .map(|builder| { - op_receipt_fields(builder, tx, receipt, optimism_tx_meta).build() - }) - .map_err(Self::Error::from_eth_err) + ReceiptBuilder::new(tx, meta, receipt, &receipts).map(|builder| { + op_receipt_fields(builder, tx, receipt, optimism_tx_meta).build() + }) }) - .collect::, Self::Error>>(); + .collect::>>(); return receipts.map(Some) } diff --git a/crates/optimism/rpc/src/eth/call.rs b/crates/optimism/rpc/src/eth/call.rs index d3bea8decdc1..03aa9a1f40a5 100644 --- a/crates/optimism/rpc/src/eth/call.rs +++ b/crates/optimism/rpc/src/eth/call.rs @@ -3,22 +3,13 @@ use reth_primitives::{ revm_primitives::{BlockEnv, OptimismFields, TxEnv}, Bytes, }; -use reth_rpc_eth_api::{ - helpers::{Call, EthCall}, - EthApiTypes, FromEthApiError, -}; -use reth_rpc_eth_types::EthApiError; +use reth_rpc_eth_api::helpers::Call; +use reth_rpc_eth_types::EthResult; use reth_rpc_types::TransactionRequest; use crate::OpEthApi; -impl EthCall for OpEthApi where EthApiError: From {} - -impl Call for OpEthApi -where - Eth: Call + EthApiTypes, - EthApiError: From, -{ +impl Call for OpEthApi { fn call_gas_limit(&self) -> u64 { self.inner.call_gas_limit() } @@ -31,9 +22,8 @@ where &self, block_env: &BlockEnv, request: TransactionRequest, - ) -> Result { - let mut env = - self.inner.create_txn_env(block_env, request).map_err(Self::Error::from_eth_err)?; + ) -> EthResult { + let mut env = Eth::create_txn_env(&self.inner, block_env, request)?; env.optimism = OptimismFields { enveloped_tx: Some(Bytes::new()), ..Default::default() }; diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index 1f2b27c86e8f..1178ac1a77da 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -18,10 +18,10 @@ use reth_provider::{BlockReaderIdExt, ChainSpecProvider, HeaderProvider, StatePr use reth_rpc::eth::DevSigner; use reth_rpc_eth_api::{ helpers::{ - AddDevSigners, EthApiSpec, EthFees, EthSigner, EthState, LoadFee, LoadState, SpawnBlocking, - Trace, UpdateRawTxForwarder, + AddDevSigners, EthApiSpec, EthCall, EthFees, EthSigner, EthState, LoadFee, LoadState, + SpawnBlocking, Trace, UpdateRawTxForwarder, }, - EthApiTypes, RawTransactionForwarder, + RawTransactionForwarder, }; use reth_rpc_eth_types::EthStateCache; use reth_rpc_types::SyncStatus; @@ -29,8 +29,6 @@ use reth_tasks::{pool::BlockingTaskPool, TaskSpawner}; use reth_transaction_pool::TransactionPool; use tokio::sync::{AcquireError, OwnedSemaphorePermit}; -use crate::OpEthApiError; - /// OP-Reth `Eth` API implementation. /// /// This type provides the functionality for handling `eth_` related requests. @@ -53,13 +51,6 @@ impl OpEthApi { } } -impl EthApiTypes for OpEthApi -where - Eth: Send + Sync, -{ - type Error = OpEthApiError; -} - impl EthApiSpec for OpEthApi { fn protocol_version(&self) -> impl Future> + Send { self.inner.protocol_version() @@ -151,6 +142,8 @@ impl EthState for OpEthApi { } } +impl EthCall for OpEthApi {} + impl EthFees for OpEthApi {} impl Trace for OpEthApi { diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index bef18a716086..f11771d615f6 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -1,11 +1,8 @@ //! Loads and formats OP receipt RPC response. use reth_primitives::{Receipt, TransactionMeta, TransactionSigned}; -use reth_rpc_eth_api::{ - helpers::{EthApiSpec, LoadReceipt, LoadTransaction}, - FromEthApiError, -}; -use reth_rpc_eth_types::{EthApiError, EthStateCache, ReceiptBuilder}; +use reth_rpc_eth_api::helpers::{EthApiSpec, LoadReceipt, LoadTransaction}; +use reth_rpc_eth_types::{EthApiError, EthResult, EthStateCache, ReceiptBuilder}; use reth_rpc_types::{AnyTransactionReceipt, OptimismTransactionReceiptFields}; use crate::{OpEthApi, OptimismTxMeta}; @@ -24,19 +21,17 @@ where tx: TransactionSigned, meta: TransactionMeta, receipt: Receipt, - ) -> Result { + ) -> EthResult { let (block, receipts) = LoadReceipt::cache(self) .get_block_and_receipts(meta.block_hash) - .await - .map_err(Self::Error::from_eth_err)? - .ok_or(Self::Error::from_eth_err(EthApiError::UnknownBlockNumber))?; + .await? + .ok_or(EthApiError::UnknownBlockNumber)?; let block = block.unseal(); let l1_block_info = reth_evm_optimism::extract_l1_info(&block).ok(); let optimism_tx_meta = self.build_op_tx_meta(&tx, l1_block_info, block.timestamp)?; - let resp_builder = ReceiptBuilder::new(&tx, meta, &receipt, &receipts) - .map_err(Self::Error::from_eth_err)?; + let resp_builder = ReceiptBuilder::new(&tx, meta, &receipt, &receipts)?; let resp_builder = op_receipt_fields(resp_builder, &tx, &receipt, optimism_tx_meta); Ok(resp_builder.build()) diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 6689e230f1ce..326c3c73d1c8 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -7,9 +7,9 @@ use reth_primitives::TransactionSigned; use reth_provider::{BlockReaderIdExt, TransactionsProvider}; use reth_rpc_eth_api::{ helpers::{EthApiSpec, EthSigner, EthTransactions, LoadTransaction}, - EthApiTypes, RawTransactionForwarder, + RawTransactionForwarder, }; -use reth_rpc_eth_types::EthStateCache; +use reth_rpc_eth_types::{EthResult, EthStateCache}; use revm::L1BlockInfo; use crate::{OpEthApi, OpEthApiError}; @@ -79,7 +79,7 @@ where tx: &TransactionSigned, l1_block_info: Option, block_timestamp: u64, - ) -> Result::Error> { + ) -> EthResult { let Some(l1_block_info) = l1_block_info else { return Ok(OptimismTxMeta::default()) }; let (l1_fee, l1_data_gas) = if !tx.is_deposit() { diff --git a/crates/payload/builder/src/events.rs b/crates/payload/builder/src/events.rs index 6235ddf7fe22..271eb2267ec4 100644 --- a/crates/payload/builder/src/events.rs +++ b/crates/payload/builder/src/events.rs @@ -20,12 +20,11 @@ pub enum Events { /// Represents a receiver for various payload events. #[derive(Debug)] pub struct PayloadEvents { - /// The receiver for the payload events. pub receiver: broadcast::Receiver>, } impl PayloadEvents { - /// Convert this receiver into a stream of `PayloadEvents`. + // Convert this receiver into a stream of PayloadEvents. pub fn into_stream(self) -> BroadcastStream> { BroadcastStream::new(self.receiver) } diff --git a/crates/payload/builder/src/lib.rs b/crates/payload/builder/src/lib.rs index 2a29fe916ead..b3baf11991de 100644 --- a/crates/payload/builder/src/lib.rs +++ b/crates/payload/builder/src/lib.rs @@ -113,11 +113,9 @@ pub mod noop; #[cfg(any(test, feature = "test-utils"))] pub mod test_utils; -pub use events::{Events, PayloadEvents}; +pub use events::Events; pub use reth_rpc_types::engine::PayloadId; -pub use service::{ - PayloadBuilderHandle, PayloadBuilderService, PayloadServiceCommand, PayloadStore, -}; +pub use service::{PayloadBuilderHandle, PayloadBuilderService, PayloadStore}; pub use traits::{KeepPayloadJobAlive, PayloadJob, PayloadJobGenerator}; // re-export the Ethereum engine primitives for convenience diff --git a/crates/primitives-traits/Cargo.toml b/crates/primitives-traits/Cargo.toml index ede1af20c487..b7eb8515f26b 100644 --- a/crates/primitives-traits/Cargo.toml +++ b/crates/primitives-traits/Cargo.toml @@ -44,6 +44,7 @@ alloy-consensus = { workspace = true, features = ["arbitrary"] } arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-arbitrary-interop.workspace = true +proptest-derive.workspace = true test-fuzz.workspace = true rand.workspace = true serde_json.workspace = true diff --git a/crates/primitives-traits/src/account.rs b/crates/primitives-traits/src/account.rs index 6099e713118c..8ecbf19a9159 100644 --- a/crates/primitives-traits/src/account.rs +++ b/crates/primitives-traits/src/account.rs @@ -61,10 +61,6 @@ impl Bytecode { /// Create new bytecode from raw bytes. /// /// No analysis will be performed. - /// - /// # Panics - /// - /// Panics if bytecode is EOF and has incorrect format. pub fn new_raw(bytes: Bytes) -> Self { Self(RevmBytecode::new_raw(bytes)) } @@ -91,10 +87,10 @@ impl Compact for Bytecode { buf.put_slice(map); 1 + 8 + map.len() } - RevmBytecode::Eof(eof) => { - buf.put_u8(3); - buf.put_slice(eof.raw().as_ref()); - 1 + eof.raw().as_ref().len() + RevmBytecode::Eof(_) => { + // buf.put_u8(3); + // TODO(EOF) + todo!("EOF") } }; len + bytecode.len() + 4 @@ -118,10 +114,8 @@ impl Compact for Bytecode { JumpTable::from_slice(buf), ) }), - 3 => { - // EOF bytecode object will be decoded from the raw bytecode - Self(RevmBytecode::new_raw(bytes)) - } + // TODO(EOF) + 3 => todo!("EOF"), _ => unreachable!("Junk data in database: unknown Bytecode variant"), }; (decoded, &[]) diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index c8c1cbd8dd95..6421e99b0d3c 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -25,7 +25,7 @@ pub mod account; pub use account::{Account, Bytecode}; mod integer_list; -pub use integer_list::{IntegerList, RoaringBitmapError}; +pub use integer_list::IntegerList; pub mod request; pub use request::{Request, Requests}; diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 55ccf3b3325c..3f44fc62c0c8 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -51,6 +51,7 @@ zstd = { workspace = true, features = ["experimental"], optional = true } # arbitrary utils arbitrary = { workspace = true, features = ["derive"], optional = true } proptest = { workspace = true, optional = true } +# proptest-derive = { workspace = true, optional = true } [dev-dependencies] # eth @@ -66,6 +67,7 @@ assert_matches.workspace = true arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-arbitrary-interop.workspace = true +proptest-derive.workspace = true rand.workspace = true serde_json.workspace = true test-fuzz.workspace = true diff --git a/crates/primitives/src/transaction/eip4844.rs b/crates/primitives/src/transaction/eip4844.rs index b035b396c099..234c558a1ee0 100644 --- a/crates/primitives/src/transaction/eip4844.rs +++ b/crates/primitives/src/transaction/eip4844.rs @@ -5,8 +5,6 @@ use crate::{ }; use alloy_rlp::{length_of_length, Decodable, Encodable, Header}; use core::mem; - -#[cfg(any(test, feature = "reth-codec"))] use reth_codecs::Compact; /// To be used with `Option` to place or replace one bit on the bitflag struct. diff --git a/crates/primitives/src/transaction/eip7702.rs b/crates/primitives/src/transaction/eip7702.rs index e98bd8344ad1..768d9cf28fb8 100644 --- a/crates/primitives/src/transaction/eip7702.rs +++ b/crates/primitives/src/transaction/eip7702.rs @@ -15,10 +15,7 @@ use reth_codecs::Compact; /// [EIP-7702 Set Code Transaction](https://eips.ethereum.org/EIPS/eip-7702) /// /// Set EOA account code for one transaction -#[cfg_attr( - any(test, feature = "reth-codec"), - reth_codecs::reth_codec(no_arbitrary, add_arbitrary_tests) -)] +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::reth_codec)] #[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Serialize, Deserialize)] pub struct TxEip7702 { /// Added as EIP-155: Simple replay attack protection @@ -248,73 +245,6 @@ impl TxEip7702 { } } -// TODO(onbjerg): This is temporary until we upstream `Arbitrary` to EIP-7702 types and `Signature` -// in alloy -#[cfg(any(test, feature = "arbitrary"))] -impl<'a> arbitrary::Arbitrary<'a> for TxEip7702 { - fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - use arbitrary::Arbitrary; - #[derive(Arbitrary)] - struct ArbitrarySignedAuth { - chain_id: ChainId, - address: alloy_primitives::Address, - nonce: Option, - parity: bool, - r: U256, - s: U256, - } - - let iter = u.arbitrary_iter::()?; - let mut authorization_list = Vec::new(); - for auth in iter { - let auth = auth?; - - let sig = alloy_primitives::Signature::from_rs_and_parity( - auth.r, - auth.s, - alloy_primitives::Parity::Parity(auth.parity), - ) - .unwrap_or_else(|_| { - // Give a default one if the randomly generated one failed - alloy_primitives::Signature::from_rs_and_parity( - alloy_primitives::b256!( - "1fd474b1f9404c0c5df43b7620119ffbc3a1c3f942c73b6e14e9f55255ed9b1d" - ) - .into(), - alloy_primitives::b256!( - "29aca24813279a901ec13b5f7bb53385fa1fc627b946592221417ff74a49600d" - ) - .into(), - false, - ) - .unwrap() - }); - - authorization_list.push( - alloy_eips::eip7702::Authorization { - chain_id: auth.chain_id, - address: auth.address, - nonce: auth.nonce.into(), - } - .into_signed(sig), - ); - } - - Ok(Self { - chain_id: Arbitrary::arbitrary(u)?, - nonce: Arbitrary::arbitrary(u)?, - gas_limit: Arbitrary::arbitrary(u)?, - max_fee_per_gas: Arbitrary::arbitrary(u)?, - max_priority_fee_per_gas: Arbitrary::arbitrary(u)?, - to: Arbitrary::arbitrary(u)?, - value: Arbitrary::arbitrary(u)?, - access_list: Arbitrary::arbitrary(u)?, - authorization_list, - input: Arbitrary::arbitrary(u)?, - }) - } -} - #[cfg(test)] mod tests { use super::TxEip7702; diff --git a/crates/primitives/src/transaction/tx_type.rs b/crates/primitives/src/transaction/tx_type.rs index 963d38ace9e2..d31cba06071b 100644 --- a/crates/primitives/src/transaction/tx_type.rs +++ b/crates/primitives/src/transaction/tx_type.rs @@ -1,5 +1,6 @@ use crate::{U64, U8}; use alloy_rlp::{Decodable, Encodable}; +use bytes::Buf; use serde::{Deserialize, Serialize}; #[cfg(test)] @@ -8,7 +9,6 @@ use reth_codecs::Compact; /// For backwards compatibility purposes only 2 bits of the type are encoded in the identifier /// parameter. In the case of a 3, the full transaction type is read from the buffer as a /// single byte. -#[cfg(any(test, feature = "reth-codec"))] const COMPACT_EXTENDED_IDENTIFIER_FLAG: usize = 3; /// Identifier for legacy transaction, however [`TxLegacy`](crate::TxLegacy) this is technically not @@ -166,7 +166,6 @@ impl reth_codecs::Compact for TxType { // parameter. In the case of a 3, the full transaction type is read from the buffer as a // single byte. fn from_compact(mut buf: &[u8], identifier: usize) -> (Self, &[u8]) { - use bytes::Buf; ( match identifier { 0 => Self::Legacy, diff --git a/crates/prune/types/Cargo.toml b/crates/prune/types/Cargo.toml index 13def8eaa8b0..4fd5b9336812 100644 --- a/crates/prune/types/Cargo.toml +++ b/crates/prune/types/Cargo.toml @@ -25,6 +25,7 @@ thiserror.workspace = true arbitrary = { workspace = true, features = ["derive"] } assert_matches.workspace = true proptest.workspace = true +proptest-derive.workspace = true proptest-arbitrary-interop.workspace = true serde_json.workspace = true test-fuzz.workspace = true diff --git a/crates/revm/src/batch.rs b/crates/revm/src/batch.rs index 400a3044e1ae..02ffba017bdf 100644 --- a/crates/revm/src/batch.rs +++ b/crates/revm/src/batch.rs @@ -1,14 +1,12 @@ //! Helper for handling execution of multiple blocks. -use crate::{ - precompile::{Address, HashSet}, - primitives::alloy_primitives::BlockNumber, -}; +use crate::{precompile::Address, primitives::alloy_primitives::BlockNumber}; use core::time::Duration; use reth_execution_errors::BlockExecutionError; use reth_primitives::{Receipt, Receipts, Request, Requests}; use reth_prune_types::{PruneMode, PruneModes, PruneSegmentError, MINIMUM_PRUNING_DISTANCE}; use revm::db::states::bundle_state::BundleRetention; +use std::collections::HashSet; use tracing::debug; #[cfg(not(feature = "std"))] @@ -218,12 +216,7 @@ mod tests { use super::*; use reth_primitives::{Address, Log, Receipt}; use reth_prune_types::{PruneMode, ReceiptsLogPruneConfig}; - #[cfg(feature = "std")] use std::collections::BTreeMap; - #[cfg(not(feature = "std"))] - extern crate alloc; - #[cfg(not(feature = "std"))] - use alloc::collections::BTreeMap; #[test] fn test_save_receipts_empty() { diff --git a/crates/revm/src/state_change.rs b/crates/revm/src/state_change.rs index b8bd293de030..f0ca1255771c 100644 --- a/crates/revm/src/state_change.rs +++ b/crates/revm/src/state_change.rs @@ -1,4 +1,3 @@ -use crate::precompile::HashMap; use alloy_eips::eip2935::{HISTORY_STORAGE_ADDRESS, HISTORY_STORAGE_CODE}; use reth_chainspec::{ChainSpec, EthereumHardforks}; use reth_consensus_common::calc; @@ -10,6 +9,15 @@ use revm::{ Database, DatabaseCommit, }; +// reuse revm's hashbrown implementation for no-std +#[cfg(not(feature = "std"))] +use crate::precompile::HashMap; +#[cfg(not(feature = "std"))] +use alloc::{boxed::Box, format, string::ToString, vec::Vec}; + +#[cfg(feature = "std")] +use std::collections::HashMap; + /// Collect all balance changes at the end of the block. /// /// Balance changes might include the block reward, uncle rewards, withdrawals, or irregular diff --git a/crates/revm/src/test_utils.rs b/crates/revm/src/test_utils.rs index b55cd3fd0117..09c66d588cb3 100644 --- a/crates/revm/src/test_utils.rs +++ b/crates/revm/src/test_utils.rs @@ -1,4 +1,3 @@ -use crate::precompile::HashMap; use reth_primitives::{ keccak256, Account, Address, BlockNumber, Bytecode, Bytes, StorageKey, B256, U256, }; @@ -7,9 +6,7 @@ use reth_storage_api::{ }; use reth_storage_errors::provider::ProviderResult; use reth_trie::{updates::TrieUpdates, AccountProof, HashedPostState}; - -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; +use std::collections::HashMap; /// Mock state for testing #[derive(Debug, Default, Clone, Eq, PartialEq)] diff --git a/crates/rpc/ipc/src/server/mod.rs b/crates/rpc/ipc/src/server/mod.rs index 28c0f6e8cb4f..6dff8a8afae0 100644 --- a/crates/rpc/ipc/src/server/mod.rs +++ b/crates/rpc/ipc/src/server/mod.rs @@ -32,7 +32,10 @@ use tower::{layer::util::Identity, Layer, Service}; use tracing::{debug, instrument, trace, warn, Instrument}; // re-export so can be used during builder setup use crate::{ - server::{connection::IpcConnDriver, rpc_service::RpcServiceCfg}, + server::{ + connection::IpcConnDriver, + rpc_service::{RpcService, RpcServiceCfg}, + }, stream_codec::StreamCodec, }; use tokio::sync::mpsc; @@ -43,8 +46,6 @@ mod connection; mod ipc; mod rpc_service; -pub use rpc_service::RpcService; - /// Ipc Server implementation /// /// This is an adapted `jsonrpsee` Server, but for `Ipc` connections. diff --git a/crates/rpc/rpc-builder/src/auth.rs b/crates/rpc/rpc-builder/src/auth.rs index 25626e4f12d3..be904f6efc80 100644 --- a/crates/rpc/rpc-builder/src/auth.rs +++ b/crates/rpc/rpc-builder/src/auth.rs @@ -198,7 +198,7 @@ impl AuthRpcModule { /// Create a new `AuthRpcModule` with the given `engine_api`. pub fn new(engine: EngineApi) -> Self where - EngineT: EngineTypes, + EngineT: EngineTypes + 'static, EngineApi: EngineApiServer, { let mut module = RpcModule::new(()); diff --git a/crates/rpc/rpc-builder/src/cors.rs b/crates/rpc/rpc-builder/src/cors.rs index c68cf84942c0..0d98b4411bfb 100644 --- a/crates/rpc/rpc-builder/src/cors.rs +++ b/crates/rpc/rpc-builder/src/cors.rs @@ -4,19 +4,10 @@ use tower_http::cors::{AllowOrigin, Any, CorsLayer}; /// Error thrown when parsing cors domains went wrong #[derive(Debug, thiserror::Error)] pub enum CorsDomainError { - /// Represents an invalid header value for a domain #[error("{domain} is an invalid header value")] - InvalidHeader { - /// The domain that caused the invalid header - domain: String, - }, - - /// Indicates that a wildcard origin was used incorrectly in a list + InvalidHeader { domain: String }, #[error("wildcard origin (`*`) cannot be passed as part of a list: {input}")] - WildCardNotAllowed { - /// The input string containing the incorrectly used wildcard - input: String, - }, + WildCardNotAllowed { input: String }, } /// Creates a [`CorsLayer`] from the given domains diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index d3346bcdc185..e92067011344 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -93,7 +93,7 @@ //! Network: NetworkInfo + Peers + Clone + 'static, //! Events: CanonStateSubscriptions + Clone + 'static, //! EngineApi: EngineApiServer, -//! EngineT: EngineTypes, +//! EngineT: EngineTypes + 'static, //! EvmConfig: ConfigureEvm, //! { //! // configure the rpc module per transport @@ -177,9 +177,10 @@ use serde::{Deserialize, Serialize}; use tower::Layer; use tower_http::cors::CorsLayer; -use crate::{auth::AuthRpcModule, error::WsHttpSamePortError, metrics::RpcRequestMetrics}; - -pub use cors::CorsDomainError; +use crate::{ + auth::AuthRpcModule, cors::CorsDomainError, error::WsHttpSamePortError, + metrics::RpcRequestMetrics, +}; // re-export for convenience pub use jsonrpsee::server::ServerBuilder; @@ -207,7 +208,6 @@ pub use eth::EthHandlers; // Rpc server metrics mod metrics; -pub use metrics::{MeteredRequestFuture, RpcRequestMetricsService}; /// Convenience function for starting a server in one step. #[allow(clippy::too_many_arguments)] @@ -434,7 +434,7 @@ where RpcRegistryInner, ) where - EngineT: EngineTypes, + EngineT: EngineTypes + 'static, EngineApi: EngineApiServer, EthApi: FullEthApiServer, { @@ -974,7 +974,7 @@ where /// Note: This does _not_ register the `engine_` in this registry. pub fn create_auth_module(&self, engine_api: EngineApi) -> AuthRpcModule where - EngineT: EngineTypes, + EngineT: EngineTypes + 'static, EngineApi: EngineApiServer, { let mut module = RpcModule::new(()); @@ -1141,6 +1141,7 @@ pub struct RpcServerConfig { /// JWT secret for authentication jwt_secret: Option, /// Configurable RPC middleware + #[allow(dead_code)] rpc_middleware: RpcServiceBuilder, } @@ -1336,9 +1337,8 @@ impl RpcServerConfig { /// Returns the [`RpcServerHandle`] with the handle to the started servers. pub async fn start(self, modules: &TransportRpcModules) -> Result where - RpcMiddleware: Layer> + Clone + Send + 'static, - for<'a> >>::Service: - Send + Sync + 'static + RpcServiceT<'a>, + RpcMiddleware: for<'a> Layer> + Clone + Send + 'static, + >::Service: Send + std::marker::Sync, { let mut http_handle = None; let mut ws_handle = None; @@ -1396,7 +1396,7 @@ impl RpcServerConfig { .option_layer(Self::maybe_jwt_layer(self.jwt_secret)), ) .set_rpc_middleware( - self.rpc_middleware.clone().layer( + RpcServiceBuilder::new().layer( modules .http .as_ref() @@ -1444,8 +1444,7 @@ impl RpcServerConfig { .option_layer(Self::maybe_jwt_layer(self.jwt_secret)), ) .set_rpc_middleware( - self.rpc_middleware - .clone() + RpcServiceBuilder::new() .layer(modules.ws.as_ref().map(RpcRequestMetrics::ws).unwrap_or_default()), ) .build(ws_socket_addr) @@ -1469,7 +1468,7 @@ impl RpcServerConfig { .option_layer(Self::maybe_jwt_layer(self.jwt_secret)), ) .set_rpc_middleware( - self.rpc_middleware.clone().layer( + RpcServiceBuilder::new().layer( modules.http.as_ref().map(RpcRequestMetrics::http).unwrap_or_default(), ), ) diff --git a/crates/rpc/rpc-builder/src/metrics.rs b/crates/rpc/rpc-builder/src/metrics.rs index 08fd38898558..4b638a7b36d7 100644 --- a/crates/rpc/rpc-builder/src/metrics.rs +++ b/crates/rpc/rpc-builder/src/metrics.rs @@ -81,11 +81,9 @@ struct RpcServerMetricsInner { /// A [`RpcServiceT`] middleware that captures RPC metrics for the server. /// /// This is created per connection and captures metrics for each request. -#[derive(Clone, Debug)] -pub struct RpcRequestMetricsService { - /// The metrics collector for RPC requests +#[derive(Clone)] +pub(crate) struct RpcRequestMetricsService { metrics: RpcRequestMetrics, - /// The inner service being wrapped inner: S, } @@ -127,7 +125,7 @@ impl Drop for RpcRequestMetricsService { /// Response future to update the metrics for a single request/response pair. #[pin_project::pin_project] -pub struct MeteredRequestFuture { +pub(crate) struct MeteredRequestFuture { #[pin] fut: F, /// time when the request started diff --git a/crates/rpc/rpc-builder/tests/it/main.rs b/crates/rpc/rpc-builder/tests/it/main.rs index a64ad1da2f54..65ddebb3fd9c 100644 --- a/crates/rpc/rpc-builder/tests/it/main.rs +++ b/crates/rpc/rpc-builder/tests/it/main.rs @@ -1,6 +1,5 @@ mod auth; mod http; -mod middleware; mod serde; mod startup; pub mod utils; diff --git a/crates/rpc/rpc-builder/tests/it/middleware.rs b/crates/rpc/rpc-builder/tests/it/middleware.rs deleted file mode 100644 index 59cc86d4dc86..000000000000 --- a/crates/rpc/rpc-builder/tests/it/middleware.rs +++ /dev/null @@ -1,80 +0,0 @@ -use crate::utils::{test_address, test_rpc_builder}; -use jsonrpsee::{ - server::{middleware::rpc::RpcServiceT, RpcServiceBuilder}, - types::Request, - MethodResponse, -}; -use reth_rpc::EthApi; -use reth_rpc_builder::{RpcServerConfig, TransportRpcModuleConfig}; -use reth_rpc_eth_api::EthApiClient; -use reth_rpc_server_types::RpcModuleSelection; -use std::{ - future::Future, - pin::Pin, - sync::{ - atomic::{AtomicUsize, Ordering}, - Arc, - }, -}; -use tower::Layer; - -#[derive(Clone, Default)] -struct MyMiddlewareLayer { - count: Arc, -} - -impl Layer for MyMiddlewareLayer { - type Service = MyMiddlewareService; - - fn layer(&self, inner: S) -> Self::Service { - MyMiddlewareService { service: inner, count: self.count.clone() } - } -} - -#[derive(Clone)] -struct MyMiddlewareService { - service: S, - count: Arc, -} - -impl<'a, S> RpcServiceT<'a> for MyMiddlewareService -where - S: RpcServiceT<'a> + Send + Sync + Clone + 'static, -{ - type Future = Pin + Send + 'a>>; - - fn call(&self, req: Request<'a>) -> Self::Future { - tracing::info!("MyMiddleware processed call {}", req.method); - let count = self.count.clone(); - let service = self.service.clone(); - Box::pin(async move { - let rp = service.call(req).await; - // Modify the state. - count.fetch_add(1, Ordering::Relaxed); - rp - }) - } -} - -#[tokio::test(flavor = "multi_thread")] -async fn test_rpc_middleware() { - let builder = test_rpc_builder(); - let modules = builder.build( - TransportRpcModuleConfig::set_http(RpcModuleSelection::All), - Box::new(EthApi::with_spawner), - ); - - let mylayer = MyMiddlewareLayer::default(); - - let handle = RpcServerConfig::http(Default::default()) - .with_http_address(test_address()) - .set_rpc_middleware(RpcServiceBuilder::new().layer(mylayer.clone())) - .start(&modules) - .await - .unwrap(); - - let client = handle.http_client().unwrap(); - EthApiClient::protocol_version(&client).await.unwrap(); - let count = mylayer.count.load(Ordering::Relaxed); - assert_eq!(count, 1); -} diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 862a8ca02c7f..881413210ae4 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -65,7 +65,7 @@ struct EngineApiInner { impl EngineApi where Provider: HeaderProvider + BlockReader + StateProviderFactory + EvmEnvProvider + 'static, - EngineT: EngineTypes, + EngineT: EngineTypes + 'static, { /// Create new instance of [`EngineApi`]. pub fn new( @@ -612,7 +612,7 @@ where impl EngineApiServer for EngineApi where Provider: HeaderProvider + BlockReader + StateProviderFactory + EvmEnvProvider + 'static, - EngineT: EngineTypes, + EngineT: EngineTypes + 'static, { /// Handler for `engine_newPayloadV1` /// See also diff --git a/crates/rpc/rpc-eth-api/Cargo.toml b/crates/rpc/rpc-eth-api/Cargo.toml index 20e73908be69..b1295d69e5f6 100644 --- a/crates/rpc/rpc-eth-api/Cargo.toml +++ b/crates/rpc/rpc-eth-api/Cargo.toml @@ -35,7 +35,6 @@ alloy-dyn-abi = { workspace = true, features = ["eip712"] } # rpc jsonrpsee = { workspace = true, features = ["server", "macros"] } -jsonrpsee-types.workspace = true # async async-trait.workspace = true @@ -54,4 +53,5 @@ optimism = [ "reth-primitives/optimism", "revm/optimism", "reth-provider/optimism", + "reth-rpc-eth-types/optimism" ] diff --git a/crates/rpc/rpc-eth-api/src/core.rs b/crates/rpc/rpc-eth-api/src/core.rs index a86b5c956f16..3ba0a59e1000 100644 --- a/crates/rpc/rpc-eth-api/src/core.rs +++ b/crates/rpc/rpc-eth-api/src/core.rs @@ -3,7 +3,7 @@ use alloy_dyn_abi::TypedData; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::{Account, Address, BlockId, BlockNumberOrTag, Bytes, B256, B64, U256, U64}; +use reth_primitives::{Address, BlockId, BlockNumberOrTag, Bytes, B256, B64, U256, U64}; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; use reth_rpc_types::{ serde_helpers::JsonStorageKey, @@ -245,10 +245,6 @@ pub trait EthApi { #[method(name = "gasPrice")] async fn gas_price(&self) -> RpcResult; - /// Returns the account details by specifying an address and a block number/tag - #[method(name = "getAccount")] - async fn get_account(&self, address: Address, block: BlockId) -> RpcResult; - /// Introduced in EIP-1559, returns suggestion for the priority for dynamic fee transactions. #[method(name = "maxPriorityFeePerGas")] async fn max_priority_fee_per_gas(&self) -> RpcResult; @@ -334,8 +330,7 @@ pub trait EthApi { #[async_trait::async_trait] impl EthApiServer for T where - T: FullEthApi, - jsonrpsee_types::error::ErrorObject<'static>: From, + Self: FullEthApi, { /// Handler for: `eth_protocolVersion` async fn protocol_version(&self) -> RpcResult { @@ -626,11 +621,6 @@ where return Ok(EthFees::gas_price(self).await?) } - /// Handler for: `eth_getAccount` - async fn get_account(&self, _address: Address, _block: BlockId) -> RpcResult { - Err(internal_rpc_err("unimplemented")) - } - /// Handler for: `eth_maxPriorityFeePerGas` async fn max_priority_fee_per_gas(&self) -> RpcResult { trace!(target: "rpc::eth", "Serving eth_maxPriorityFeePerGas"); diff --git a/crates/rpc/rpc-eth-api/src/helpers/block.rs b/crates/rpc/rpc-eth-api/src/helpers/block.rs index 837006a970ef..78f1ef9da66b 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/block.rs @@ -5,12 +5,10 @@ use std::sync::Arc; use futures::Future; use reth_primitives::{BlockId, Receipt, SealedBlock, SealedBlockWithSenders, TransactionMeta}; use reth_provider::{BlockIdReader, BlockReader, BlockReaderIdExt, HeaderProvider}; -use reth_rpc_eth_types::{EthApiError, EthStateCache, ReceiptBuilder}; +use reth_rpc_eth_types::{EthApiError, EthResult, EthStateCache, ReceiptBuilder}; use reth_rpc_types::{AnyTransactionReceipt, Header, Index, RichBlock}; use reth_rpc_types_compat::block::{from_block, uncle_block_from_header}; -use crate::FromEthApiError; - use super::{LoadPendingBlock, LoadReceipt, SpawnBlocking}; /// Block related functions for the [`EthApiServer`](crate::EthApiServer) trait in the @@ -25,7 +23,7 @@ pub trait EthBlocks: LoadBlock { fn rpc_block_header( &self, block_id: BlockId, - ) -> impl Future, Self::Error>> + Send + ) -> impl Future>> + Send where Self: LoadPendingBlock + SpawnBlocking, { @@ -40,7 +38,7 @@ pub trait EthBlocks: LoadBlock { &self, block_id: BlockId, full: bool, - ) -> impl Future, Self::Error>> + Send + ) -> impl Future>> + Send where Self: LoadPendingBlock + SpawnBlocking, { @@ -51,11 +49,10 @@ pub trait EthBlocks: LoadBlock { }; let block_hash = block.hash(); let total_difficulty = EthBlocks::provider(self) - .header_td_by_number(block.number) - .map_err(Self::Error::from_eth_err)? + .header_td_by_number(block.number)? .ok_or(EthApiError::UnknownBlockNumber)?; - let block = from_block(block.unseal(), total_difficulty, full.into(), Some(block_hash)) - .map_err(Self::Error::from_eth_err)?; + let block = + from_block(block.unseal(), total_difficulty, full.into(), Some(block_hash))?; Ok(Some(block.into())) } } @@ -66,30 +63,19 @@ pub trait EthBlocks: LoadBlock { fn block_transaction_count( &self, block_id: BlockId, - ) -> impl Future, Self::Error>> + Send { + ) -> impl Future>> + Send { async move { if block_id.is_pending() { // Pending block can be fetched directly without need for caching - return Ok(LoadBlock::provider(self) - .pending_block() - .map_err(Self::Error::from_eth_err)? - .map(|block| block.body.len())) + return Ok(LoadBlock::provider(self).pending_block()?.map(|block| block.body.len())) } - let block_hash = match LoadBlock::provider(self) - .block_hash_for_id(block_id) - .map_err(Self::Error::from_eth_err)? - { + let block_hash = match LoadBlock::provider(self).block_hash_for_id(block_id)? { Some(block_hash) => block_hash, None => return Ok(None), }; - Ok(self - .cache() - .get_block_transactions(block_hash) - .await - .map_err(Self::Error::from_eth_err)? - .map(|txs| txs.len())) + Ok(self.cache().get_block_transactions(block_hash).await?.map(|txs| txs.len())) } } @@ -99,7 +85,7 @@ pub trait EthBlocks: LoadBlock { fn block_receipts( &self, block_id: BlockId, - ) -> impl Future>, Self::Error>> + Send + ) -> impl Future>>> + Send where Self: LoadReceipt, { @@ -130,9 +116,8 @@ pub trait EthBlocks: LoadBlock { ReceiptBuilder::new(&tx, meta, receipt, &receipts) .map(|builder| builder.build()) - .map_err(Self::Error::from_eth_err) }) - .collect::, Self::Error>>(); + .collect::>>(); return receipts.map(Some) } @@ -144,26 +129,19 @@ pub trait EthBlocks: LoadBlock { fn load_block_and_receipts( &self, block_id: BlockId, - ) -> impl Future>)>, Self::Error>> + Send + ) -> impl Future>)>>> + Send where Self: LoadReceipt, { async move { if block_id.is_pending() { return Ok(LoadBlock::provider(self) - .pending_block_and_receipts() - .map_err(Self::Error::from_eth_err)? + .pending_block_and_receipts()? .map(|(sb, receipts)| (sb, Arc::new(receipts)))) } - if let Some(block_hash) = LoadBlock::provider(self) - .block_hash_for_id(block_id) - .map_err(Self::Error::from_eth_err)? - { - return LoadReceipt::cache(self) - .get_block_and_receipts(block_hash) - .await - .map_err(Self::Error::from_eth_err) + if let Some(block_hash) = LoadBlock::provider(self).block_hash_for_id(block_id)? { + return Ok(LoadReceipt::cache(self).get_block_and_receipts(block_hash).await?) } Ok(None) @@ -173,11 +151,8 @@ pub trait EthBlocks: LoadBlock { /// Returns uncle headers of given block. /// /// Returns an empty vec if there are none. - fn ommers( - &self, - block_id: BlockId, - ) -> Result>, Self::Error> { - LoadBlock::provider(self).ommers_by_id(block_id).map_err(Self::Error::from_eth_err) + fn ommers(&self, block_id: BlockId) -> EthResult>> { + Ok(LoadBlock::provider(self).ommers_by_id(block_id)?) } /// Returns uncle block at given index in given block. @@ -187,18 +162,13 @@ pub trait EthBlocks: LoadBlock { &self, block_id: BlockId, index: Index, - ) -> impl Future, Self::Error>> + Send { + ) -> impl Future>> + Send { async move { let uncles = if block_id.is_pending() { // Pending block can be fetched directly without need for caching - LoadBlock::provider(self) - .pending_block() - .map_err(Self::Error::from_eth_err)? - .map(|block| block.ommers) + LoadBlock::provider(self).pending_block()?.map(|block| block.ommers) } else { - LoadBlock::provider(self) - .ommers_by_id(block_id) - .map_err(Self::Error::from_eth_err)? + LoadBlock::provider(self).ommers_by_id(block_id)? } .unwrap_or_default(); @@ -228,7 +198,7 @@ pub trait LoadBlock: LoadPendingBlock + SpawnBlocking { fn block( &self, block_id: BlockId, - ) -> impl Future, Self::Error>> + Send { + ) -> impl Future>> + Send { async move { self.block_with_senders(block_id) .await @@ -240,13 +210,12 @@ pub trait LoadBlock: LoadPendingBlock + SpawnBlocking { fn block_with_senders( &self, block_id: BlockId, - ) -> impl Future, Self::Error>> + Send { + ) -> impl Future>> + Send { async move { if block_id.is_pending() { // Pending block can be fetched directly without need for caching - let maybe_pending = LoadPendingBlock::provider(self) - .pending_block_with_senders() - .map_err(Self::Error::from_eth_err)?; + let maybe_pending = + LoadPendingBlock::provider(self).pending_block_with_senders()?; return if maybe_pending.is_some() { Ok(maybe_pending) } else { @@ -254,18 +223,12 @@ pub trait LoadBlock: LoadPendingBlock + SpawnBlocking { } } - let block_hash = match LoadPendingBlock::provider(self) - .block_hash_for_id(block_id) - .map_err(Self::Error::from_eth_err)? - { + let block_hash = match LoadPendingBlock::provider(self).block_hash_for_id(block_id)? { Some(block_hash) => block_hash, None => return Ok(None), }; - self.cache() - .get_sealed_block_with_senders(block_hash) - .await - .map_err(Self::Error::from_eth_err) + Ok(self.cache().get_sealed_block_with_senders(block_hash).await?) } } } diff --git a/crates/rpc/rpc-eth-api/src/helpers/blocking_task.rs b/crates/rpc/rpc-eth-api/src/helpers/blocking_task.rs index d23453b5ed83..4a2c81b0fdfe 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/blocking_task.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/blocking_task.rs @@ -2,14 +2,12 @@ //! are executed on the `tokio` runtime. use futures::Future; -use reth_rpc_eth_types::EthApiError; +use reth_rpc_eth_types::{EthApiError, EthResult}; use reth_tasks::{pool::BlockingTaskPool, TaskSpawner}; use tokio::sync::{oneshot, AcquireError, OwnedSemaphorePermit}; -use crate::EthApiTypes; - /// Executes code on a blocking thread. -pub trait SpawnBlocking: EthApiTypes + Clone + Send + Sync + 'static { +pub trait SpawnBlocking: Clone + Send + Sync + 'static { /// Returns a handle for spawning IO heavy blocking tasks. /// /// Runtime access in default trait method implementations. @@ -35,9 +33,9 @@ pub trait SpawnBlocking: EthApiTypes + Clone + Send + Sync + 'static { /// /// Note: This is expected for futures that are dominated by blocking IO operations, for tracing /// or CPU bound operations in general use [`spawn_tracing`](Self::spawn_tracing). - fn spawn_blocking_io(&self, f: F) -> impl Future> + Send + fn spawn_blocking_io(&self, f: F) -> impl Future> + Send where - F: FnOnce(Self) -> Result + Send + 'static, + F: FnOnce(Self) -> EthResult + Send + 'static, R: Send + 'static, { let (tx, rx) = oneshot::channel(); @@ -55,9 +53,9 @@ pub trait SpawnBlocking: EthApiTypes + Clone + Send + Sync + 'static { /// Note: This is expected for futures that are predominantly CPU bound, as it uses `rayon` /// under the hood, for blocking IO futures use [`spawn_blocking`](Self::spawn_blocking_io). See /// . - fn spawn_tracing(&self, f: F) -> impl Future> + Send + fn spawn_tracing(&self, f: F) -> impl Future> + Send where - F: FnOnce(Self) -> Result + Send + 'static, + F: FnOnce(Self) -> EthResult + Send + 'static, R: Send + 'static, { let this = self.clone(); diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index aaf75a827f79..a8fc6d8e2305 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -19,11 +19,9 @@ use reth_rpc_eth_types::{ apply_block_overrides, apply_state_overrides, caller_gas_allowance, cap_tx_gas_limit_with_caller_allowance, get_precompiles, CallFees, }, - EthApiError, RevertError, RpcInvalidTransactionError, StateCacheDb, -}; -use reth_rpc_server_types::constants::gas_oracle::{ - CALL_STIPEND_GAS, ESTIMATE_GAS_ERROR_RATIO, MIN_TRANSACTION_GAS, + EthApiError, EthResult, RevertError, RpcInvalidTransactionError, StateCacheDb, }; +use reth_rpc_server_types::constants::gas_oracle::{ESTIMATE_GAS_ERROR_RATIO, MIN_TRANSACTION_GAS}; use reth_rpc_types::{ state::{EvmOverrides, StateOverride}, AccessListWithGasUsed, BlockId, Bundle, EthCallResponse, StateContext, TransactionInfo, @@ -33,8 +31,6 @@ use revm::{Database, DatabaseCommit}; use revm_inspectors::access_list::AccessListInspector; use tracing::trace; -use crate::{AsEthApiError, FromEthApiError, FromEvmError, IntoEthApiError}; - use super::{LoadBlock, LoadPendingBlock, LoadState, LoadTransaction, SpawnBlocking, Trace}; /// Execution related functions for the [`EthApiServer`](crate::EthApiServer) trait in @@ -46,7 +42,7 @@ pub trait EthCall: Call + LoadPendingBlock { request: TransactionRequest, at: BlockId, state_override: Option, - ) -> impl Future> + Send { + ) -> impl Future> + Send { Call::estimate_gas_at(self, request, at, state_override) } @@ -56,12 +52,12 @@ pub trait EthCall: Call + LoadPendingBlock { request: TransactionRequest, block_number: Option, overrides: EvmOverrides, - ) -> impl Future> + Send { + ) -> impl Future> + Send { async move { let (res, _env) = self.transact_call_at(request, block_number.unwrap_or_default(), overrides).await?; - ensure_success(res.result).map_err(Self::Error::from_eth_err) + ensure_success(res.result) } } @@ -72,16 +68,14 @@ pub trait EthCall: Call + LoadPendingBlock { bundle: Bundle, state_context: Option, mut state_override: Option, - ) -> impl Future, Self::Error>> + Send + ) -> impl Future>> + Send where Self: LoadBlock, { async move { let Bundle { transactions, block_override } = bundle; if transactions.is_empty() { - return Err( - EthApiError::InvalidParams(String::from("transactions are empty.")).into() - ) + return Err(EthApiError::InvalidParams(String::from("transactions are empty."))) } let StateContext { transaction_index, block_number } = @@ -96,7 +90,7 @@ pub trait EthCall: Call + LoadPendingBlock { self.block_with_senders(target_block) )?; - let Some(block) = block else { return Err(EthApiError::UnknownBlockNumber.into()) }; + let Some(block) = block else { return Err(EthApiError::UnknownBlockNumber) }; let gas_limit = self.call_gas_limit(); // we're essentially replaying the transactions in the block here, hence we need the @@ -142,16 +136,14 @@ pub trait EthCall: Call + LoadPendingBlock { let state_overrides = state_override.take(); let overrides = EvmOverrides::new(state_overrides, block_overrides.clone()); - let env = this - .prepare_call_env( - cfg.clone(), - block_env.clone(), - tx, - gas_limit, - &mut db, - overrides, - ) - .map(Into::into)?; + let env = this.prepare_call_env( + cfg.clone(), + block_env.clone(), + tx, + gas_limit, + &mut db, + overrides, + )?; let (res, _) = this.transact(&mut db, env)?; match ensure_success(res.result) { @@ -185,7 +177,7 @@ pub trait EthCall: Call + LoadPendingBlock { &self, request: TransactionRequest, block_number: Option, - ) -> impl Future> + Send + ) -> impl Future> + Send where Self: Trace, { @@ -208,7 +200,7 @@ pub trait EthCall: Call + LoadPendingBlock { block: BlockEnv, at: BlockId, mut request: TransactionRequest, - ) -> Result + ) -> EthResult where Self: Trace, { @@ -236,8 +228,7 @@ pub trait EthCall: Call + LoadPendingBlock { let to = if let Some(TxKind::Call(to)) = request.to { to } else { - let nonce = - db.basic_ref(from).map_err(Self::Error::from_eth_err)?.unwrap_or_default().nonce; + let nonce = db.basic_ref(from)?.unwrap_or_default().nonce; from.create(nonce) }; @@ -257,8 +248,7 @@ pub trait EthCall: Call + LoadPendingBlock { Err(RpcInvalidTransactionError::Revert(RevertError::new(output))) } ExecutionResult::Success { .. } => Ok(()), - } - .map_err(Self::Error::from_eth_err)?; + }?; let access_list = inspector.into_access_list(); @@ -287,9 +277,9 @@ pub trait Call: LoadState + SpawnBlocking { fn evm_config(&self) -> &impl ConfigureEvm; /// Executes the closure with the state that corresponds to the given [`BlockId`]. - fn with_state_at_block(&self, at: BlockId, f: F) -> Result + fn with_state_at_block(&self, at: BlockId, f: F) -> EthResult where - F: FnOnce(StateProviderTraitObjWrapper<'_>) -> Result, + F: FnOnce(StateProviderTraitObjWrapper<'_>) -> EthResult, { let state = self.state_at_block_id(at)?; f(StateProviderTraitObjWrapper(&state)) @@ -301,13 +291,13 @@ pub trait Call: LoadState + SpawnBlocking { &self, db: DB, env: EnvWithHandlerCfg, - ) -> Result<(ResultAndState, EnvWithHandlerCfg), Self::Error> + ) -> EthResult<(ResultAndState, EnvWithHandlerCfg)> where DB: Database, - EthApiError: From, + ::Error: Into, { let mut evm = self.evm_config().evm_with_env(db, env); - let res = evm.transact().map_err(Self::Error::from_evm_err)?; + let res = evm.transact()?; let (_, env) = evm.into_db_and_env_with_handler_cfg(); Ok((res, env)) } @@ -318,7 +308,7 @@ pub trait Call: LoadState + SpawnBlocking { request: TransactionRequest, at: BlockId, overrides: EvmOverrides, - ) -> impl Future> + Send + ) -> impl Future> + Send where Self: LoadPendingBlock, { @@ -327,14 +317,14 @@ pub trait Call: LoadState + SpawnBlocking { } /// Executes the closure with the state that corresponds to the given [`BlockId`] on a new task - fn spawn_with_state_at_block( + fn spawn_with_state_at_block( &self, at: BlockId, f: F, - ) -> impl Future> + Send + ) -> impl Future> + Send where - F: FnOnce(StateProviderTraitObjWrapper<'_>) -> Result + Send + 'static, - R: Send + 'static, + F: FnOnce(StateProviderTraitObjWrapper<'_>) -> EthResult + Send + 'static, + T: Send + 'static, { self.spawn_tracing(move |this| { let state = this.state_at_block_id(at)?; @@ -353,10 +343,10 @@ pub trait Call: LoadState + SpawnBlocking { at: BlockId, overrides: EvmOverrides, f: F, - ) -> impl Future> + Send + ) -> impl Future> + Send where Self: LoadPendingBlock, - F: FnOnce(StateCacheDbRefMutWrapper<'_, '_>, EnvWithHandlerCfg) -> Result + F: FnOnce(StateCacheDbRefMutWrapper<'_, '_>, EnvWithHandlerCfg) -> EthResult + Send + 'static, R: Send + 'static, @@ -381,7 +371,7 @@ pub trait Call: LoadState + SpawnBlocking { f(StateCacheDbRefMutWrapper(&mut db), env) }) .await - .map_err(|_| EthApiError::InternalBlockingTaskError.into()) + .map_err(|_| EthApiError::InternalBlockingTaskError) } } @@ -398,10 +388,10 @@ pub trait Call: LoadState + SpawnBlocking { &self, hash: B256, f: F, - ) -> impl Future, Self::Error>> + Send + ) -> impl Future>> + Send where Self: LoadBlock + LoadPendingBlock + LoadTransaction, - F: FnOnce(TransactionInfo, ResultAndState, StateCacheDb<'_>) -> Result + F: FnOnce(TransactionInfo, ResultAndState, StateCacheDb<'_>) -> EthResult + Send + 'static, R: Send + 'static, @@ -461,10 +451,10 @@ pub trait Call: LoadState + SpawnBlocking { block_env: BlockEnv, transactions: impl IntoIterator, target_tx_hash: B256, - ) -> Result + ) -> Result where DB: DatabaseRef, - EthApiError: From, + EthApiError: From<::Error>, { let env = EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()); @@ -478,7 +468,7 @@ pub trait Call: LoadState + SpawnBlocking { let sender = tx.signer(); self.evm_config().fill_tx_env(evm.tx_mut(), &tx.into_signed(), sender); - evm.transact_commit().map_err(Self::Error::from_evm_err)?; + evm.transact_commit()?; index += 1; } Ok(index) @@ -490,7 +480,7 @@ pub trait Call: LoadState + SpawnBlocking { request: TransactionRequest, at: BlockId, state_override: Option, - ) -> impl Future> + Send + ) -> impl Future> + Send where Self: LoadPendingBlock, { @@ -515,7 +505,7 @@ pub trait Call: LoadState + SpawnBlocking { request: TransactionRequest, state: S, state_override: Option, - ) -> Result + ) -> EthResult where S: StateProvider, { @@ -545,7 +535,7 @@ pub trait Call: LoadState + SpawnBlocking { // Apply any state overrides if specified. if let Some(state_override) = state_override { - apply_state_overrides(state_override, &mut db).map_err(Self::Error::from_eth_err)?; + apply_state_overrides(state_override, &mut db)?; } // Optimize for simple transfer transactions, potentially reducing the gas estimate. @@ -576,8 +566,7 @@ pub trait Call: LoadState + SpawnBlocking { // The caller allowance is check by doing `(account.balance - tx.value) / tx.gas_price` if env.tx.gas_price > U256::ZERO { // cap the highest gas limit by max gas caller can afford with given gas price - highest_gas_limit = highest_gas_limit - .min(caller_gas_allowance(&mut db, &env.tx).map_err(Self::Error::from_eth_err)?); + highest_gas_limit = highest_gas_limit.min(caller_gas_allowance(&mut db, &env.tx)?); } // We can now normalize the highest gas limit to a u64 @@ -595,9 +584,8 @@ pub trait Call: LoadState + SpawnBlocking { // If the gas price or gas limit was specified in the request, retry the transaction // with the block's gas limit to determine if the failure was due to // insufficient gas. - Err(err) - if err.is_gas_too_high() && - (tx_request_gas_limit.is_some() || tx_request_gas_price.is_some()) => + Err(EthApiError::InvalidTransaction(RpcInvalidTransactionError::GasTooHigh)) + if tx_request_gas_limit.is_some() || tx_request_gas_price.is_some() => { return Err(self.map_out_of_gas_err(block_env_gas_limit, env, &mut db)) } @@ -610,7 +598,7 @@ pub trait Call: LoadState + SpawnBlocking { ExecutionResult::Halt { reason, gas_used } => { // here we don't check for invalid opcode because already executed with highest gas // limit - return Err(RpcInvalidTransactionError::halt(reason, gas_used).into_eth_err()) + return Err(RpcInvalidTransactionError::halt(reason, gas_used).into()) } ExecutionResult::Revert { output, .. } => { // if price or limit was included in the request then we can execute the request @@ -619,18 +607,14 @@ pub trait Call: LoadState + SpawnBlocking { Err(self.map_out_of_gas_err(block_env_gas_limit, env, &mut db)) } else { // the transaction did revert - Err(RpcInvalidTransactionError::Revert(RevertError::new(output)).into_eth_err()) + Err(RpcInvalidTransactionError::Revert(RevertError::new(output)).into()) } } }; // At this point we know the call succeeded but want to find the _best_ (lowest) gas the // transaction succeeds with. We find this by doing a binary search over the possible range. - - // we know the tx succeeded with the configured gas limit, so we can use that as the - // highest, in case we applied a gas cap due to caller allowance above - highest_gas_limit = env.tx.gas_limit; - + // // NOTE: this is the gas the transaction used, which is less than the // transaction requires to succeed. let mut gas_used = res.result.gas_used(); @@ -643,7 +627,7 @@ pub trait Call: LoadState + SpawnBlocking { // // Calculate the optimistic gas limit by adding gas used and gas refund, // then applying a 64/63 multiplier to account for gas forwarding rules. - let optimistic_gas_limit = (gas_used + gas_refund + CALL_STIPEND_GAS) * 64 / 63; + let optimistic_gas_limit = (gas_used + gas_refund) * 64 / 63; if optimistic_gas_limit < highest_gas_limit { // Set the transaction's gas limit to the calculated optimistic gas limit. env.tx.gas_limit = optimistic_gas_limit; @@ -685,7 +669,8 @@ pub trait Call: LoadState + SpawnBlocking { // Execute transaction and handle potential gas errors, adjusting limits accordingly. match self.transact(&mut db, env.clone()) { - Err(err) if err.is_gas_too_high() => { + // Check if the error is due to gas being too high. + Err(EthApiError::InvalidTransaction(RpcInvalidTransactionError::GasTooHigh)) => { // Increase the lowest gas limit if gas is too high lowest_gas_limit = mid_gas_limit; } @@ -722,7 +707,7 @@ pub trait Call: LoadState + SpawnBlocking { tx_gas_limit: u64, highest_gas_limit: &mut u64, lowest_gas_limit: &mut u64, - ) -> Result<(), Self::Error> { + ) -> EthResult<()> { match result { ExecutionResult::Success { .. } => { // Cap the highest gas limit with the succeeding gas limit. @@ -750,7 +735,7 @@ pub trait Call: LoadState + SpawnBlocking { // These cases should be unreachable because we know the transaction // succeeds, but if they occur, treat them as an // error. - return Err(RpcInvalidTransactionError::EvmHalt(err).into_eth_err()) + return Err(RpcInvalidTransactionError::EvmHalt(err).into()) } } } @@ -767,7 +752,7 @@ pub trait Call: LoadState + SpawnBlocking { env_gas_limit: U256, mut env: EnvWithHandlerCfg, db: &mut CacheDB>, - ) -> Self::Error + ) -> EthApiError where S: StateProvider, { @@ -781,14 +766,14 @@ pub trait Call: LoadState + SpawnBlocking { ExecutionResult::Success { .. } => { // transaction succeeded by manually increasing the gas limit to // highest, which means the caller lacks funds to pay for the tx - RpcInvalidTransactionError::BasicOutOfGas(req_gas_limit).into_eth_err() + RpcInvalidTransactionError::BasicOutOfGas(req_gas_limit).into() } ExecutionResult::Revert { output, .. } => { // reverted again after bumping the limit - RpcInvalidTransactionError::Revert(RevertError::new(output)).into_eth_err() + RpcInvalidTransactionError::Revert(RevertError::new(output)).into() } ExecutionResult::Halt { reason, .. } => { - RpcInvalidTransactionError::EvmHalt(reason).into_eth_err() + RpcInvalidTransactionError::EvmHalt(reason).into() } } } @@ -801,10 +786,10 @@ pub trait Call: LoadState + SpawnBlocking { &self, block_env: &BlockEnv, request: TransactionRequest, - ) -> Result { + ) -> EthResult { // Ensure that if versioned hashes are set, they're not empty if request.blob_versioned_hashes.as_ref().map_or(false, |hashes| hashes.is_empty()) { - return Err(RpcInvalidTransactionError::BlobTransactionMissingBlobHashes.into_eth_err()) + return Err(RpcInvalidTransactionError::BlobTransactionMissingBlobHashes.into()) } let TransactionRequest { @@ -842,18 +827,14 @@ pub trait Call: LoadState + SpawnBlocking { let env = TxEnv { gas_limit: gas_limit .try_into() - .map_err(|_| RpcInvalidTransactionError::GasUintOverflow) - .map_err(Self::Error::from_eth_err)?, + .map_err(|_| RpcInvalidTransactionError::GasUintOverflow)?, nonce, caller: from.unwrap_or_default(), gas_price, gas_priority_fee: max_priority_fee_per_gas, transact_to: to.unwrap_or(TxKind::Create), value: value.unwrap_or_default(), - data: input - .try_into_unique_input() - .map_err(Self::Error::from_eth_err)? - .unwrap_or_default(), + data: input.try_into_unique_input()?.unwrap_or_default(), chain_id, access_list: access_list.unwrap_or_default().into(), // EIP-4844 fields @@ -876,7 +857,7 @@ pub trait Call: LoadState + SpawnBlocking { cfg: CfgEnvWithHandlerCfg, block: BlockEnv, request: TransactionRequest, - ) -> Result { + ) -> EthResult { let tx = self.create_txn_env(&block, request)?; Ok(EnvWithHandlerCfg::new_with_cfg_env(cfg, block, tx)) } @@ -898,7 +879,7 @@ pub trait Call: LoadState + SpawnBlocking { gas_limit: u64, db: &mut CacheDB, overrides: EvmOverrides, - ) -> Result + ) -> EthResult where DB: DatabaseRef, EthApiError: From<::Error>, diff --git a/crates/rpc/rpc-eth-api/src/helpers/error.rs b/crates/rpc/rpc-eth-api/src/helpers/error.rs deleted file mode 100644 index 041a019052bd..000000000000 --- a/crates/rpc/rpc-eth-api/src/helpers/error.rs +++ /dev/null @@ -1,88 +0,0 @@ -//! Helper traits to wrap generic l1 errors, in network specific error type configured in -//! [`EthApiTypes`](crate::EthApiTypes). - -use reth_rpc_eth_types::EthApiError; -use revm_primitives::EVMError; - -/// Helper trait to wrap core [`EthApiError`]. -pub trait FromEthApiError: From { - /// Converts from error via [`EthApiError`]. - fn from_eth_err(err: E) -> Self - where - EthApiError: From; -} - -impl FromEthApiError for T -where - T: From, -{ - fn from_eth_err(err: E) -> Self - where - EthApiError: From, - { - T::from(EthApiError::from(err)) - } -} - -/// Helper trait to wrap core [`EthApiError`]. -pub trait IntoEthApiError: Into { - /// Converts into error via [`EthApiError`]. - fn into_eth_err(self) -> E - where - E: FromEthApiError; -} - -impl IntoEthApiError for T -where - EthApiError: From, -{ - fn into_eth_err(self) -> E - where - E: FromEthApiError, - { - E::from_eth_err(self) - } -} - -/// Helper trait to access wrapped core error. -pub trait AsEthApiError { - /// Returns reference to [`EthApiError`], if this an error variant inherited from core - /// functionality. - fn as_err(&self) -> Option<&EthApiError>; - - /// Returns `true` if error is - /// [`RpcInvalidTransactionError::GasTooHigh`](reth_rpc_eth_types::RpcInvalidTransactionError::GasTooHigh). - fn is_gas_too_high(&self) -> bool { - if let Some(err) = self.as_err() { - return err.is_gas_too_high() - } - - false - } -} - -impl AsEthApiError for EthApiError { - fn as_err(&self) -> Option<&EthApiError> { - Some(self) - } -} - -/// Helper trait to convert from revm errors. -pub trait FromEvmError: From { - /// Converts from a revm error. - fn from_evm_err(err: EVMError) -> Self - where - EthApiError: From; -} - -impl FromEvmError for T -where - T: From, -{ - fn from_evm_err(err: EVMError) -> Self - where - EthApiError: From, - { - err.into_eth_err() - } -} diff --git a/crates/rpc/rpc-eth-api/src/helpers/fee.rs b/crates/rpc/rpc-eth-api/src/helpers/fee.rs index 290833eeca6e..54c577ea2504 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/fee.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/fee.rs @@ -4,14 +4,12 @@ use futures::Future; use reth_primitives::U256; use reth_provider::{BlockIdReader, BlockReaderIdExt, ChainSpecProvider, HeaderProvider}; use reth_rpc_eth_types::{ - fee_history::calculate_reward_percentiles_for_block, EthApiError, EthStateCache, + fee_history::calculate_reward_percentiles_for_block, EthApiError, EthResult, EthStateCache, FeeHistoryCache, FeeHistoryEntry, GasPriceOracle, RpcInvalidTransactionError, }; use reth_rpc_types::{BlockNumberOrTag, FeeHistory}; use tracing::debug; -use crate::FromEthApiError; - use super::LoadBlock; /// Fee related functions for the [`EthApiServer`](crate::EthApiServer) trait in the @@ -20,7 +18,7 @@ pub trait EthFees: LoadFee { /// Returns a suggestion for a gas price for legacy transactions. /// /// See also: - fn gas_price(&self) -> impl Future> + Send + fn gas_price(&self) -> impl Future> + Send where Self: LoadBlock, { @@ -28,7 +26,7 @@ pub trait EthFees: LoadFee { } /// Returns a suggestion for a base fee for blob transactions. - fn blob_base_fee(&self) -> impl Future> + Send + fn blob_base_fee(&self) -> impl Future> + Send where Self: LoadBlock, { @@ -36,7 +34,7 @@ pub trait EthFees: LoadFee { } /// Returns a suggestion for the priority fee (the tip) - fn suggested_priority_fee(&self) -> impl Future> + Send + fn suggested_priority_fee(&self) -> impl Future> + Send where Self: 'static, { @@ -52,7 +50,7 @@ pub trait EthFees: LoadFee { mut block_count: u64, newest_block: BlockNumberOrTag, reward_percentiles: Option>, - ) -> impl Future> + Send { + ) -> impl Future> + Send { async move { if block_count == 0 { return Ok(FeeHistory::default()) @@ -74,11 +72,10 @@ pub trait EthFees: LoadFee { block_count = max_fee_history } - let Some(end_block) = LoadFee::provider(self) - .block_number_for_id(newest_block.into()) - .map_err(Self::Error::from_eth_err)? + let Some(end_block) = + LoadFee::provider(self).block_number_for_id(newest_block.into())? else { - return Err(EthApiError::UnknownBlockNumber.into()) + return Err(EthApiError::UnknownBlockNumber) }; // need to add 1 to the end block to get the correct (inclusive) range @@ -94,7 +91,7 @@ pub trait EthFees: LoadFee { // Note: The types used ensure that the percentiles are never < 0 if let Some(percentiles) = &reward_percentiles { if percentiles.windows(2).any(|w| w[0] > w[1] || w[0] > 100.) { - return Err(EthApiError::InvalidRewardPercentiles.into()) + return Err(EthApiError::InvalidRewardPercentiles) } } @@ -119,7 +116,7 @@ pub trait EthFees: LoadFee { if let Some(fee_entries) = fee_entries { if fee_entries.len() != block_count as usize { - return Err(EthApiError::InvalidBlockRange.into()) + return Err(EthApiError::InvalidBlockRange) } for entry in &fee_entries { @@ -147,9 +144,9 @@ pub trait EthFees: LoadFee { base_fee_per_blob_gas.push(last_entry.next_block_blob_fee().unwrap_or_default()); } else { // read the requested header range - let headers = LoadFee::provider(self).sealed_headers_range(start_block..=end_block).map_err(Self::Error::from_eth_err)?; + let headers = LoadFee::provider(self).sealed_headers_range(start_block..=end_block)?; if headers.len() != block_count as usize { - return Err(EthApiError::InvalidBlockRange.into()) + return Err(EthApiError::InvalidBlockRange) } for header in &headers { @@ -165,7 +162,7 @@ pub trait EthFees: LoadFee { if let Some(percentiles) = &reward_percentiles { let (transactions, receipts) = LoadFee::cache(self) .get_transactions_and_receipts(header.hash()) - .await.map_err(Self::Error::from_eth_err)? + .await? .ok_or(EthApiError::InvalidBlockRange)?; rewards.push( calculate_reward_percentiles_for_block( @@ -254,7 +251,7 @@ pub trait LoadFee: LoadBlock { fn legacy_gas_price( &self, gas_price: Option, - ) -> impl Future> + Send { + ) -> impl Future> + Send { async move { match gas_price { Some(gas_price) => Ok(gas_price), @@ -274,7 +271,7 @@ pub trait LoadFee: LoadBlock { &self, max_fee_per_gas: Option, max_priority_fee_per_gas: Option, - ) -> impl Future> + Send { + ) -> impl Future> + Send { async move { let max_fee_per_gas = match max_fee_per_gas { Some(max_fee_per_gas) => max_fee_per_gas, @@ -306,7 +303,7 @@ pub trait LoadFee: LoadBlock { fn eip4844_blob_fee( &self, blob_fee: Option, - ) -> impl Future> + Send { + ) -> impl Future> + Send { async move { match blob_fee { Some(blob_fee) => Ok(blob_fee), @@ -318,7 +315,7 @@ pub trait LoadFee: LoadBlock { /// Returns a suggestion for a gas price for legacy transactions. /// /// See also: - fn gas_price(&self) -> impl Future> + Send { + fn gas_price(&self) -> impl Future> + Send { let header = self.block(BlockNumberOrTag::Latest.into()); let suggested_tip = self.suggested_priority_fee(); async move { @@ -329,21 +326,21 @@ pub trait LoadFee: LoadBlock { } /// Returns a suggestion for a base fee for blob transactions. - fn blob_base_fee(&self) -> impl Future> + Send { + fn blob_base_fee(&self) -> impl Future> + Send { async move { self.block(BlockNumberOrTag::Latest.into()) .await? .and_then(|h: reth_primitives::SealedBlock| h.next_block_blob_fee()) - .ok_or(EthApiError::ExcessBlobGasNotSet.into()) + .ok_or(EthApiError::ExcessBlobGasNotSet) .map(U256::from) } } /// Returns a suggestion for the priority fee (the tip) - fn suggested_priority_fee(&self) -> impl Future> + Send + fn suggested_priority_fee(&self) -> impl Future> + Send where Self: 'static, { - async move { self.gas_oracle().suggest_tip_cap().await.map_err(Self::Error::from_eth_err) } + self.gas_oracle().suggest_tip_cap() } } diff --git a/crates/rpc/rpc-eth-api/src/helpers/mod.rs b/crates/rpc/rpc-eth-api/src/helpers/mod.rs index ecfd63388e3b..b82a621acaf4 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/mod.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/mod.rs @@ -17,7 +17,6 @@ pub mod block; pub mod blocking_task; pub mod call; -pub mod error; pub mod fee; pub mod pending_block; pub mod receipt; @@ -26,7 +25,6 @@ pub mod spec; pub mod state; pub mod trace; pub mod transaction; -pub mod types; pub use block::{EthBlocks, LoadBlock}; pub use blocking_task::SpawnBlocking; @@ -40,8 +38,6 @@ pub use state::{EthState, LoadState}; pub use trace::Trace; pub use transaction::{EthTransactions, LoadTransaction, UpdateRawTxForwarder}; -use crate::EthApiTypes; - /// Extension trait that bundles traits needed for tracing transactions. pub trait TraceExt: LoadTransaction + LoadBlock + LoadPendingBlock + SpawnBlocking + Trace + Call @@ -54,21 +50,12 @@ impl TraceExt for T where T: LoadTransaction + LoadBlock + LoadPendingBlock + /// /// This trait is automatically implemented for any type that implements all the `Eth` traits. pub trait FullEthApi: - EthApiTypes - + EthApiSpec - + EthTransactions - + EthBlocks - + EthState - + EthCall - + EthFees - + Trace - + LoadReceipt + EthApiSpec + EthTransactions + EthBlocks + EthState + EthCall + EthFees + Trace + LoadReceipt { } impl FullEthApi for T where - T: EthApiTypes - + EthApiSpec + T: EthApiSpec + EthTransactions + EthBlocks + EthState diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 183b1c791401..a17fbb43f2fe 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -19,29 +19,27 @@ use reth_primitives::{ EMPTY_OMMER_ROOT_HASH, U256, }; use reth_provider::{ - BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ProviderError, - StateProviderFactory, + BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderFactory, }; use reth_revm::{ database::StateProviderDatabase, state_change::post_block_withdrawals_balance_increments, }; use reth_rpc_eth_types::{ - pending_block::pre_block_blockhashes_update, EthApiError, PendingBlock, PendingBlockEnv, - PendingBlockEnvOrigin, + pending_block::pre_block_blockhashes_update, EthApiError, EthResult, PendingBlock, + PendingBlockEnv, PendingBlockEnvOrigin, }; use reth_transaction_pool::{BestTransactionsAttributes, TransactionPool}; use revm::{db::states::bundle_state::BundleRetention, DatabaseCommit, State}; use tokio::sync::Mutex; use tracing::debug; -use crate::{EthApiTypes, FromEthApiError, FromEvmError}; - use super::SpawnBlocking; /// Loads a pending block from database. /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` blocks RPC methods. -pub trait LoadPendingBlock: EthApiTypes { +#[auto_impl::auto_impl(&, Arc)] +pub trait LoadPendingBlock { /// Returns a handle for reading data from disk. /// /// Data access in default (L1) trait method implementations. @@ -67,19 +65,16 @@ pub trait LoadPendingBlock: EthApiTypes { /// Configures the [`CfgEnvWithHandlerCfg`] and [`BlockEnv`] for the pending block /// /// If no pending block is available, this will derive it from the `latest` block - fn pending_block_env_and_cfg(&self) -> Result { + fn pending_block_env_and_cfg(&self) -> EthResult { let origin: PendingBlockEnvOrigin = if let Some(pending) = - self.provider().pending_block_with_senders().map_err(Self::Error::from_eth_err)? + self.provider().pending_block_with_senders()? { PendingBlockEnvOrigin::ActualPending(pending) } else { // no pending block from the CL yet, so we use the latest block and modify the env // values that we can - let latest = self - .provider() - .latest_header() - .map_err(Self::Error::from_eth_err)? - .ok_or_else(|| EthApiError::UnknownBlockNumber)?; + let latest = + self.provider().latest_header()?.ok_or_else(|| EthApiError::UnknownBlockNumber)?; let (mut latest_header, block_hash) = latest.split(); // child block @@ -107,14 +102,12 @@ pub trait LoadPendingBlock: EthApiTypes { let mut block_env = BlockEnv::default(); // Note: for the PENDING block we assume it is past the known merge block and thus this will // not fail when looking up the total difficulty value for the blockenv. - self.provider() - .fill_env_with_header( - &mut cfg, - &mut block_env, - origin.header(), - self.evm_config().clone(), - ) - .map_err(Self::Error::from_eth_err)?; + self.provider().fill_env_with_header( + &mut cfg, + &mut block_env, + origin.header(), + self.evm_config().clone(), + )?; Ok(PendingBlockEnv::new(cfg, block_env, origin)) } @@ -122,7 +115,7 @@ pub trait LoadPendingBlock: EthApiTypes { /// Returns the locally built pending block fn local_pending_block( &self, - ) -> impl Future, Self::Error>> + Send + ) -> impl Future>> + Send where Self: SpawnBlocking, { @@ -204,17 +197,11 @@ pub trait LoadPendingBlock: EthApiTypes { /// /// After Cancun, if the origin is the actual pending block, the block includes the EIP-4788 pre /// block contract call using the parent beacon block root received from the CL. - fn build_block(&self, env: PendingBlockEnv) -> Result - where - EthApiError: From, - { + fn build_block(&self, env: PendingBlockEnv) -> EthResult { let PendingBlockEnv { cfg, block_env, origin } = env; let parent_hash = origin.build_target_hash(); - let state_provider = self - .provider() - .history_by_block_hash(parent_hash) - .map_err(Self::Error::from_eth_err)?; + let state_provider = self.provider().history_by_block_hash(parent_hash)?; let state = StateProviderDatabase::new(state_provider); let mut db = State::builder().with_database(state).with_bundle_update().build(); @@ -329,7 +316,7 @@ pub trait LoadPendingBlock: EthApiTypes { } err => { // this is an error that we should treat as fatal for this attempt - return Err(Self::Error::from_evm_err(err)) + return Err(err.into()) } } } @@ -372,7 +359,7 @@ pub trait LoadPendingBlock: EthApiTypes { ); // increment account balances for withdrawals - db.increment_balances(balance_increments).map_err(Self::Error::from_eth_err)?; + db.increment_balances(balance_increments)?; // merge all transitions into bundle state. db.merge_transitions(BundleRetention::PlainState); @@ -391,9 +378,7 @@ pub trait LoadPendingBlock: EthApiTypes { // calculate the state root let state_provider = &db.database; - let state_root = state_provider - .state_root(execution_outcome.state()) - .map_err(Self::Error::from_eth_err)?; + let state_root = state_provider.state_root(execution_outcome.state())?; // create the block header let transactions_root = calculate_transaction_root(&executed_txs); diff --git a/crates/rpc/rpc-eth-api/src/helpers/receipt.rs b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs index 63016e3d2ec8..5cd6c03c4d9f 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/receipt.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs @@ -3,15 +3,14 @@ use futures::Future; use reth_primitives::{Receipt, TransactionMeta, TransactionSigned}; -use reth_rpc_eth_types::{EthApiError, EthStateCache, ReceiptBuilder}; +use reth_rpc_eth_types::{EthApiError, EthResult, EthStateCache, ReceiptBuilder}; use reth_rpc_types::AnyTransactionReceipt; -use crate::{EthApiTypes, FromEthApiError}; - /// Assembles transaction receipt data w.r.t to network. /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` receipts RPC methods. -pub trait LoadReceipt: EthApiTypes + Send + Sync { +#[auto_impl::auto_impl(&, Arc)] +pub trait LoadReceipt: Send + Sync { /// Returns a handle for reading data from memory. /// /// Data access in default (L1) trait method implementations. @@ -23,17 +22,12 @@ pub trait LoadReceipt: EthApiTypes + Send + Sync { tx: TransactionSigned, meta: TransactionMeta, receipt: Receipt, - ) -> impl Future> + Send { + ) -> impl Future> + Send { async move { // get all receipts for the block - let all_receipts = match self - .cache() - .get_receipts(meta.block_hash) - .await - .map_err(Self::Error::from_eth_err)? - { + let all_receipts = match self.cache().get_receipts(meta.block_hash).await? { Some(recpts) => recpts, - None => return Err(EthApiError::UnknownBlockNumber.into()), + None => return Err(EthApiError::UnknownBlockNumber), }; Ok(ReceiptBuilder::new(&tx, meta, &receipt, &all_receipts)?.build()) diff --git a/crates/rpc/rpc-eth-api/src/helpers/state.rs b/crates/rpc/rpc-eth-api/src/helpers/state.rs index 48d350e0e72c..0da2a49c3b18 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/state.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/state.rs @@ -8,15 +8,15 @@ use reth_primitives::{Address, BlockId, Bytes, Header, B256, U256}; use reth_provider::{ BlockIdReader, ChainSpecProvider, StateProvider, StateProviderBox, StateProviderFactory, }; -use reth_rpc_eth_types::{EthApiError, EthStateCache, PendingBlockEnv, RpcInvalidTransactionError}; +use reth_rpc_eth_types::{ + EthApiError, EthResult, EthStateCache, PendingBlockEnv, RpcInvalidTransactionError, +}; use reth_rpc_types::{serde_helpers::JsonStorageKey, EIP1186AccountProofResponse}; use reth_rpc_types_compat::proof::from_primitive_account_proof; use reth_transaction_pool::{PoolTransaction, TransactionPool}; use revm::db::BundleState; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, SpecId}; -use crate::{EthApiTypes, FromEthApiError}; - use super::{EthApiSpec, LoadPendingBlock, SpawnBlocking}; /// Helper methods for `eth_` methods relating to state (accounts). @@ -32,7 +32,7 @@ pub trait EthState: LoadState + SpawnBlocking { &self, address: Address, block_id: Option, - ) -> impl Future> + Send { + ) -> impl Future> + Send { LoadState::transaction_count(self, address, block_id) } @@ -41,12 +41,11 @@ pub trait EthState: LoadState + SpawnBlocking { &self, address: Address, block_id: Option, - ) -> impl Future> + Send { + ) -> impl Future> + Send { self.spawn_blocking_io(move |this| { Ok(this .state_at_block_id_or_latest(block_id)? - .account_code(address) - .map_err(Self::Error::from_eth_err)? + .account_code(address)? .unwrap_or_default() .original_bytes()) }) @@ -57,12 +56,11 @@ pub trait EthState: LoadState + SpawnBlocking { &self, address: Address, block_id: Option, - ) -> impl Future> + Send { + ) -> impl Future> + Send { self.spawn_blocking_io(move |this| { Ok(this .state_at_block_id_or_latest(block_id)? - .account_balance(address) - .map_err(Self::Error::from_eth_err)? + .account_balance(address)? .unwrap_or_default()) }) } @@ -73,12 +71,11 @@ pub trait EthState: LoadState + SpawnBlocking { address: Address, index: JsonStorageKey, block_id: Option, - ) -> impl Future> + Send { + ) -> impl Future> + Send { self.spawn_blocking_io(move |this| { Ok(B256::new( this.state_at_block_id_or_latest(block_id)? - .storage(address, index.0) - .map_err(Self::Error::from_eth_err)? + .storage(address, index.0)? .unwrap_or_default() .to_be_bytes(), )) @@ -91,25 +88,21 @@ pub trait EthState: LoadState + SpawnBlocking { address: Address, keys: Vec, block_id: Option, - ) -> Result< - impl Future> + Send, - Self::Error, - > + ) -> EthResult> + Send> where Self: EthApiSpec, { - let chain_info = self.chain_info().map_err(Self::Error::from_eth_err)?; + let chain_info = self.chain_info()?; let block_id = block_id.unwrap_or_default(); // Check whether the distance to the block exceeds the maximum configured window. let block_number = self .provider() - .block_number_for_id(block_id) - .map_err(Self::Error::from_eth_err)? + .block_number_for_id(block_id)? .ok_or(EthApiError::UnknownBlockNumber)?; let max_window = self.max_proof_window(); if chain_info.best_number.saturating_sub(block_number) > max_window { - return Err(EthApiError::ExceedsMaxProofWindow.into()) + return Err(EthApiError::ExceedsMaxProofWindow) } Ok(async move { @@ -120,9 +113,7 @@ pub trait EthState: LoadState + SpawnBlocking { self.spawn_blocking_io(move |this| { let state = this.state_at_block_id(block_id)?; let storage_keys = keys.iter().map(|key| key.0).collect::>(); - let proof = state - .proof(&BundleState::default(), address, &storage_keys) - .map_err(Self::Error::from_eth_err)?; + let proof = state.proof(&BundleState::default(), address, &storage_keys)?; Ok(from_primitive_account_proof(proof)) }) .await @@ -133,7 +124,7 @@ pub trait EthState: LoadState + SpawnBlocking { /// Loads state from database. /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` state RPC methods. -pub trait LoadState: EthApiTypes { +pub trait LoadState { /// Returns a handle for reading state from database. /// /// Data access in default trait method implementations. @@ -150,21 +141,21 @@ pub trait LoadState: EthApiTypes { fn pool(&self) -> impl TransactionPool; /// Returns the state at the given block number - fn state_at_hash(&self, block_hash: B256) -> Result { - self.provider().history_by_block_hash(block_hash).map_err(Self::Error::from_eth_err) + fn state_at_hash(&self, block_hash: B256) -> EthResult { + Ok(self.provider().history_by_block_hash(block_hash)?) } /// Returns the state at the given [`BlockId`] enum. /// /// Note: if not [`BlockNumberOrTag::Pending`](reth_primitives::BlockNumberOrTag) then this /// will only return canonical state. See also - fn state_at_block_id(&self, at: BlockId) -> Result { - self.provider().state_by_block_id(at).map_err(Self::Error::from_eth_err) + fn state_at_block_id(&self, at: BlockId) -> EthResult { + Ok(self.provider().state_by_block_id(at)?) } /// Returns the _latest_ state - fn latest_state(&self) -> Result { - self.provider().latest().map_err(Self::Error::from_eth_err) + fn latest_state(&self) -> EthResult { + Ok(self.provider().latest()?) } /// Returns the state at the given [`BlockId`] enum or the latest. @@ -173,7 +164,7 @@ pub trait LoadState: EthApiTypes { fn state_at_block_id_or_latest( &self, block_id: Option, - ) -> Result { + ) -> EthResult { if let Some(block_id) = block_id { self.state_at_block_id(block_id) } else { @@ -190,7 +181,7 @@ pub trait LoadState: EthApiTypes { fn evm_env_at( &self, at: BlockId, - ) -> impl Future> + Send + ) -> impl Future> + Send where Self: LoadPendingBlock + SpawnBlocking, { @@ -202,14 +193,9 @@ pub trait LoadState: EthApiTypes { } else { // Use cached values if there is no pending block let block_hash = LoadPendingBlock::provider(self) - .block_hash_for_id(at) - .map_err(Self::Error::from_eth_err)? + .block_hash_for_id(at)? .ok_or_else(|| EthApiError::UnknownBlockNumber)?; - let (cfg, env) = self - .cache() - .get_evm_env(block_hash) - .await - .map_err(Self::Error::from_eth_err)?; + let (cfg, env) = self.cache().get_evm_env(block_hash).await?; Ok((cfg, env, block_hash.into())) } } @@ -221,7 +207,7 @@ pub trait LoadState: EthApiTypes { fn evm_env_for_raw_block( &self, header: &Header, - ) -> impl Future> + Send + ) -> impl Future> + Send where Self: LoadPendingBlock + SpawnBlocking, { @@ -244,7 +230,7 @@ pub trait LoadState: EthApiTypes { &self, address: Address, block_id: Option, - ) -> impl Future> + Send + ) -> impl Future> + Send where Self: SpawnBlocking, { @@ -254,20 +240,15 @@ pub trait LoadState: EthApiTypes { if let Some(highest_nonce) = address_txs.iter().map(|item| item.transaction.nonce()).max() { - let tx_count = highest_nonce.checked_add(1).ok_or(Self::Error::from( - EthApiError::InvalidTransaction(RpcInvalidTransactionError::NonceMaxValue), - ))?; + let tx_count = highest_nonce + .checked_add(1) + .ok_or(RpcInvalidTransactionError::NonceMaxValue)?; return Ok(U256::from(tx_count)) } } let state = this.state_at_block_id_or_latest(block_id)?; - Ok(U256::from( - state - .account_nonce(address) - .map_err(Self::Error::from_eth_err)? - .unwrap_or_default(), - )) + Ok(U256::from(state.account_nonce(address)?.unwrap_or_default())) }) } } diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs index 09ad7f22fa21..d48e566ed51d 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -6,15 +6,13 @@ use reth_primitives::B256; use reth_revm::database::StateProviderDatabase; use reth_rpc_eth_types::{ cache::db::{StateCacheDb, StateCacheDbRefMutWrapper, StateProviderTraitObjWrapper}, - EthApiError, + EthApiError, EthResult, }; use reth_rpc_types::{BlockId, TransactionInfo}; use revm::{db::CacheDB, Database, DatabaseCommit, GetInspector, Inspector}; use revm_inspectors::tracing::{TracingInspector, TracingInspectorConfig}; use revm_primitives::{EnvWithHandlerCfg, EvmState, ExecutionResult, ResultAndState}; -use crate::FromEvmError; - use super::{Call, LoadBlock, LoadPendingBlock, LoadState, LoadTransaction}; /// Executes CPU heavy tasks. @@ -31,10 +29,10 @@ pub trait Trace: LoadState { db: DB, env: EnvWithHandlerCfg, inspector: I, - ) -> Result<(ResultAndState, EnvWithHandlerCfg), Self::Error> + ) -> EthResult<(ResultAndState, EnvWithHandlerCfg)> where DB: Database, - EthApiError: From, + ::Error: Into, I: GetInspector, { self.inspect_and_return_db(db, env, inspector).map(|(res, env, _)| (res, env)) @@ -50,15 +48,14 @@ pub trait Trace: LoadState { db: DB, env: EnvWithHandlerCfg, inspector: I, - ) -> Result<(ResultAndState, EnvWithHandlerCfg, DB), Self::Error> + ) -> EthResult<(ResultAndState, EnvWithHandlerCfg, DB)> where DB: Database, - EthApiError: From, - + ::Error: Into, I: GetInspector, { let mut evm = self.evm_config().evm_with_env_and_inspector(db, env, inspector); - let res = evm.transact().map_err(Self::Error::from_evm_err)?; + let res = evm.transact()?; let (db, env) = evm.into_db_and_env_with_handler_cfg(); Ok((res, env, db)) } @@ -76,10 +73,10 @@ pub trait Trace: LoadState { config: TracingInspectorConfig, at: BlockId, f: F, - ) -> Result + ) -> EthResult where Self: Call, - F: FnOnce(TracingInspector, ResultAndState) -> Result, + F: FnOnce(TracingInspector, ResultAndState) -> EthResult, { self.with_state_at_block(at, |state| { let mut db = CacheDB::new(StateProviderDatabase::new(state)); @@ -102,10 +99,10 @@ pub trait Trace: LoadState { config: TracingInspectorConfig, at: BlockId, f: F, - ) -> impl Future> + Send + ) -> impl Future> + Send where Self: LoadPendingBlock + Call, - F: FnOnce(TracingInspector, ResultAndState, StateCacheDb<'_>) -> Result + F: FnOnce(TracingInspector, ResultAndState, StateCacheDb<'_>) -> EthResult + Send + 'static, R: Send + 'static, @@ -133,7 +130,7 @@ pub trait Trace: LoadState { hash: B256, config: TracingInspectorConfig, f: F, - ) -> impl Future, Self::Error>> + Send + ) -> impl Future>> + Send where Self: LoadPendingBlock + LoadTransaction + Call, F: FnOnce( @@ -141,7 +138,7 @@ pub trait Trace: LoadState { TracingInspector, ResultAndState, StateCacheDb<'_>, - ) -> Result + ) -> EthResult + Send + 'static, R: Send + 'static, @@ -163,15 +160,10 @@ pub trait Trace: LoadState { hash: B256, mut inspector: Insp, f: F, - ) -> impl Future, Self::Error>> + Send + ) -> impl Future>> + Send where Self: LoadPendingBlock + LoadTransaction + Call, - F: FnOnce( - TransactionInfo, - Insp, - ResultAndState, - StateCacheDb<'_>, - ) -> Result + F: FnOnce(TransactionInfo, Insp, ResultAndState, StateCacheDb<'_>) -> EthResult + Send + 'static, Insp: for<'a, 'b> Inspector> + Send + 'static, @@ -230,7 +222,7 @@ pub trait Trace: LoadState { highest_index: Option, config: TracingInspectorConfig, f: F, - ) -> impl Future>, Self::Error>> + Send + ) -> impl Future>>> + Send where Self: LoadBlock, F: Fn( @@ -239,7 +231,7 @@ pub trait Trace: LoadState { ExecutionResult, &EvmState, &StateCacheDb<'_>, - ) -> Result + ) -> EthResult + Send + 'static, R: Send + 'static, @@ -268,16 +260,10 @@ pub trait Trace: LoadState { highest_index: Option, mut inspector_setup: Setup, f: F, - ) -> impl Future>, Self::Error>> + Send + ) -> impl Future>>> + Send where Self: LoadBlock, - F: Fn( - TransactionInfo, - Insp, - ExecutionResult, - &EvmState, - &StateCacheDb<'_>, - ) -> Result + F: Fn(TransactionInfo, Insp, ExecutionResult, &EvmState, &StateCacheDb<'_>) -> EthResult + Send + 'static, Setup: FnMut() -> Insp + Send + 'static, @@ -374,7 +360,7 @@ pub trait Trace: LoadState { block_id: BlockId, config: TracingInspectorConfig, f: F, - ) -> impl Future>, Self::Error>> + Send + ) -> impl Future>>> + Send where Self: LoadBlock, // This is the callback that's invoked for each transaction with the inspector, the result, @@ -385,7 +371,7 @@ pub trait Trace: LoadState { ExecutionResult, &EvmState, &StateCacheDb<'_>, - ) -> Result + ) -> EthResult + Send + 'static, R: Send + 'static, @@ -412,18 +398,12 @@ pub trait Trace: LoadState { block_id: BlockId, insp_setup: Setup, f: F, - ) -> impl Future>, Self::Error>> + Send + ) -> impl Future>>> + Send where Self: LoadBlock, // This is the callback that's invoked for each transaction with the inspector, the result, // state and db - F: Fn( - TransactionInfo, - Insp, - ExecutionResult, - &EvmState, - &StateCacheDb<'_>, - ) -> Result + F: Fn(TransactionInfo, Insp, ExecutionResult, &EvmState, &StateCacheDb<'_>) -> EthResult + Send + 'static, Setup: FnMut() -> Insp + Send + 'static, diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index bd2b2ffd55f0..fa4c9be30787 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -24,11 +24,9 @@ use reth_rpc_types::{ use reth_rpc_types_compat::transaction::from_recovered_with_block_context; use reth_transaction_pool::{TransactionOrigin, TransactionPool}; -use crate::{FromEthApiError, IntoEthApiError}; +use super::EthSigner; -use super::{ - Call, EthApiSpec, EthSigner, LoadBlock, LoadFee, LoadPendingBlock, LoadReceipt, SpawnBlocking, -}; +use super::{Call, EthApiSpec, LoadBlock, LoadFee, LoadPendingBlock, LoadReceipt, SpawnBlocking}; /// Transaction related functions for the [`EthApiServer`](crate::EthApiServer) trait in /// the `eth_` namespace. @@ -77,7 +75,7 @@ pub trait EthTransactions: LoadTransaction { fn transaction_by_hash( &self, hash: B256, - ) -> impl Future, Self::Error>> + Send { + ) -> impl Future>> + Send { LoadTransaction::transaction_by_hash(self, hash) } @@ -87,10 +85,8 @@ pub trait EthTransactions: LoadTransaction { fn transactions_by_block( &self, block: B256, - ) -> impl Future>, Self::Error>> + Send { - async move { - self.cache().get_block_transactions(block).await.map_err(Self::Error::from_eth_err) - } + ) -> impl Future>>> + Send { + async move { Ok(self.cache().get_block_transactions(block).await?) } } /// Returns the EIP-2718 encoded transaction by hash. @@ -103,7 +99,7 @@ pub trait EthTransactions: LoadTransaction { fn raw_transaction_by_hash( &self, hash: B256, - ) -> impl Future, Self::Error>> + Send { + ) -> impl Future>> + Send { async move { // Note: this is mostly used to fetch pooled transactions so we check the pool first if let Some(tx) = @@ -114,8 +110,7 @@ pub trait EthTransactions: LoadTransaction { self.spawn_blocking_io(move |ref this| { Ok(LoadTransaction::provider(this) - .transaction_by_hash(hash) - .map_err(Self::Error::from_eth_err)? + .transaction_by_hash(hash)? .map(|tx| tx.envelope_encoded())) }) .await @@ -126,7 +121,7 @@ pub trait EthTransactions: LoadTransaction { fn historical_transaction_by_hash_at( &self, hash: B256, - ) -> impl Future, Self::Error>> + Send { + ) -> impl Future>> + Send { async move { match self.transaction_by_hash_at(hash).await? { None => Ok(None), @@ -142,7 +137,7 @@ pub trait EthTransactions: LoadTransaction { fn transaction_receipt( &self, hash: B256, - ) -> impl Future, Self::Error>> + Send + ) -> impl Future>> + Send where Self: LoadReceipt + 'static, { @@ -162,26 +157,19 @@ pub trait EthTransactions: LoadTransaction { fn load_transaction_and_receipt( &self, hash: TxHash, - ) -> impl Future< - Output = Result, Self::Error>, - > + Send + ) -> impl Future>> + Send where Self: 'static, { let this = self.clone(); self.spawn_blocking_io(move |_| { - let (tx, meta) = match LoadTransaction::provider(&this) - .transaction_by_hash_with_meta(hash) - .map_err(Self::Error::from_eth_err)? - { - Some((tx, meta)) => (tx, meta), - None => return Ok(None), - }; + let (tx, meta) = + match LoadTransaction::provider(&this).transaction_by_hash_with_meta(hash)? { + Some((tx, meta)) => (tx, meta), + None => return Ok(None), + }; - let receipt = match EthTransactions::provider(&this) - .receipt_by_hash(hash) - .map_err(Self::Error::from_eth_err)? - { + let receipt = match EthTransactions::provider(&this).receipt_by_hash(hash)? { Some(recpt) => recpt, None => return Ok(None), }; @@ -197,7 +185,7 @@ pub trait EthTransactions: LoadTransaction { &self, block_id: BlockId, index: usize, - ) -> impl Future, Self::Error>> + Send + ) -> impl Future>> + Send where Self: LoadBlock, { @@ -228,7 +216,7 @@ pub trait EthTransactions: LoadTransaction { &self, block_id: BlockId, index: usize, - ) -> impl Future, Self::Error>> + Send + ) -> impl Future>> + Send where Self: LoadBlock, { @@ -246,10 +234,7 @@ pub trait EthTransactions: LoadTransaction { /// Decodes and recovers the transaction and submits it to the pool. /// /// Returns the hash of the transaction. - fn send_raw_transaction( - &self, - tx: Bytes, - ) -> impl Future> + Send { + fn send_raw_transaction(&self, tx: Bytes) -> impl Future> + Send { async move { // On optimism, transactions are forwarded directly to the sequencer to be included in // blocks that it builds. @@ -265,11 +250,8 @@ pub trait EthTransactions: LoadTransaction { ); // submit the transaction to the pool with a `Local` origin - let hash = self - .pool() - .add_transaction(TransactionOrigin::Local, pool_transaction) - .await - .map_err(Self::Error::from_eth_err)?; + let hash = + self.pool().add_transaction(TransactionOrigin::Local, pool_transaction).await?; Ok(hash) } @@ -280,18 +262,18 @@ pub trait EthTransactions: LoadTransaction { fn send_transaction( &self, mut request: TransactionRequest, - ) -> impl Future> + Send + ) -> impl Future> + Send where Self: EthApiSpec + LoadBlock + LoadPendingBlock + LoadFee + Call, { async move { let from = match request.from { Some(from) => from, - None => return Err(SignError::NoAccount.into_eth_err()), + None => return Err(SignError::NoAccount.into()), }; if self.find_signer(&from).is_err() { - return Err(SignError::NoAccount.into_eth_err()); + return Err(SignError::NoAccount.into()); } // set nonce if not already set before @@ -465,7 +447,7 @@ pub trait EthTransactions: LoadTransaction { TypedTransactionRequest::EIP4844(req) } - None => return Err(EthApiError::ConflictingFeeFieldsInRequest.into()), + None => return Err(EthApiError::ConflictingFeeFieldsInRequest), }; let signed_tx = self.sign_request(&from, transaction)?; @@ -475,14 +457,13 @@ pub trait EthTransactions: LoadTransaction { let pool_transaction = match recovered.try_into() { Ok(converted) => <::Pool as TransactionPool>::Transaction::from_recovered_pooled_transaction(converted), - Err(_) => return Err(EthApiError::TransactionConversionError.into()), + Err(_) => return Err(EthApiError::TransactionConversionError), }; // submit the transaction to the pool with a `Local` origin let hash = LoadTransaction::pool(self) .add_transaction(TransactionOrigin::Local, pool_transaction) - .await - .map_err(Self::Error::from_eth_err)?; + .await?; Ok(hash) } @@ -493,16 +474,16 @@ pub trait EthTransactions: LoadTransaction { &self, from: &Address, request: TypedTransactionRequest, - ) -> Result { + ) -> EthResult { for signer in self.signers().read().iter() { if signer.is_signer_for(from) { return match signer.sign_transaction(request, from) { Ok(tx) => Ok(tx), - Err(e) => Err(e.into_eth_err()), + Err(e) => Err(e.into()), } } } - Err(EthApiError::InvalidTransactionSignature.into()) + Err(EthApiError::InvalidTransactionSignature) } /// Signs given message. Returns the signature. @@ -510,37 +491,23 @@ pub trait EthTransactions: LoadTransaction { &self, account: Address, message: Bytes, - ) -> impl Future> + Send { - async move { - Ok(self - .find_signer(&account)? - .sign(account, &message) - .await - .map_err(Self::Error::from_eth_err)? - .to_hex_bytes()) - } + ) -> impl Future> + Send { + async move { Ok(self.find_signer(&account)?.sign(account, &message).await?.to_hex_bytes()) } } /// Encodes and signs the typed data according EIP-712. Payload must implement Eip712 trait. - fn sign_typed_data(&self, data: &TypedData, account: Address) -> Result { - Ok(self - .find_signer(&account)? - .sign_typed_data(account, data) - .map_err(Self::Error::from_eth_err)? - .to_hex_bytes()) + fn sign_typed_data(&self, data: &TypedData, account: Address) -> EthResult { + Ok(self.find_signer(&account)?.sign_typed_data(account, data)?.to_hex_bytes()) } /// Returns the signer for the given account, if found in configured signers. - fn find_signer( - &self, - account: &Address, - ) -> Result, Self::Error> { + fn find_signer(&self, account: &Address) -> Result, SignError> { self.signers() .read() .iter() .find(|signer| signer.is_signer_for(account)) .map(|signer| dyn_clone::clone_box(&**signer)) - .ok_or_else(|| SignError::NoAccount.into_eth_err()) + .ok_or(SignError::NoAccount) } } @@ -576,16 +543,12 @@ pub trait LoadTransaction: SpawnBlocking { fn transaction_by_hash( &self, hash: B256, - ) -> impl Future, Self::Error>> + Send { + ) -> impl Future>> + Send { async move { // Try to find the transaction on disk let mut resp = self .spawn_blocking_io(move |this| { - match this - .provider() - .transaction_by_hash_with_meta(hash) - .map_err(Self::Error::from_eth_err)? - { + match this.provider().transaction_by_hash_with_meta(hash)? { None => Ok(None), Some((tx, meta)) => { // Note: we assume this transaction is valid, because it's mined (or @@ -627,8 +590,7 @@ pub trait LoadTransaction: SpawnBlocking { fn transaction_by_hash_at( &self, transaction_hash: B256, - ) -> impl Future, Self::Error>> + Send - { + ) -> impl Future>> + Send { async move { match self.transaction_by_hash(transaction_hash).await? { None => Ok(None), @@ -663,8 +625,8 @@ pub trait LoadTransaction: SpawnBlocking { fn transaction_and_block( &self, hash: B256, - ) -> impl Future, Self::Error>> - + Send { + ) -> impl Future>> + Send + { async move { let (transaction, at) = match self.transaction_by_hash_at(hash).await? { None => return Ok(None), @@ -676,11 +638,7 @@ pub trait LoadTransaction: SpawnBlocking { BlockId::Hash(hash) => hash.block_hash, _ => return Ok(None), }; - let block = self - .cache() - .get_block_with_senders(block_hash) - .await - .map_err(Self::Error::from_eth_err)?; + let block = self.cache().get_block_with_senders(block_hash).await?; Ok(block.map(|block| (transaction, block.seal(block_hash)))) } } diff --git a/crates/rpc/rpc-eth-api/src/helpers/types.rs b/crates/rpc/rpc-eth-api/src/helpers/types.rs deleted file mode 100644 index 088f9d9b69e9..000000000000 --- a/crates/rpc/rpc-eth-api/src/helpers/types.rs +++ /dev/null @@ -1,17 +0,0 @@ -//! Trait for specifying `eth` API types that may be network dependent. - -use std::error::Error; - -use crate::{AsEthApiError, FromEthApiError, FromEvmError}; - -/// Network specific `eth` API types. -pub trait EthApiTypes: Send + Sync { - /// Extension of [`EthApiError`](reth_rpc_eth_types::EthApiError), with network specific errors. - type Error: Into> - + FromEthApiError - + AsEthApiError - + FromEvmError - + Error - + Send - + Sync; -} diff --git a/crates/rpc/rpc-eth-api/src/lib.rs b/crates/rpc/rpc-eth-api/src/lib.rs index c707a94eef4f..1aed94d5cc6e 100644 --- a/crates/rpc/rpc-eth-api/src/lib.rs +++ b/crates/rpc/rpc-eth-api/src/lib.rs @@ -21,10 +21,6 @@ pub mod pubsub; pub use bundle::{EthBundleApiServer, EthCallBundleApiServer}; pub use core::{EthApiServer, FullEthApiServer}; pub use filter::EthFilterApiServer; -pub use helpers::{ - error::{AsEthApiError, FromEthApiError, FromEvmError, IntoEthApiError}, - types::EthApiTypes, -}; pub use pubsub::EthPubSubApiServer; pub use helpers::transaction::RawTransactionForwarder; diff --git a/crates/rpc/rpc-eth-types/Cargo.toml b/crates/rpc/rpc-eth-types/Cargo.toml index 3fb20836e46f..b1c307191025 100644 --- a/crates/rpc/rpc-eth-types/Cargo.toml +++ b/crates/rpc/rpc-eth-types/Cargo.toml @@ -55,3 +55,14 @@ tracing.workspace = true [dev-dependencies] serde_json.workspace = true + +[features] +optimism = [ + "reth-primitives/optimism", + "reth-provider/optimism", + "reth-revm/optimism", + "reth-chainspec/optimism", + "reth-execution-types/optimism", + "reth-revm/optimism", + "revm/optimism" +] \ No newline at end of file diff --git a/crates/rpc/rpc-eth-types/src/cache/multi_consumer.rs b/crates/rpc/rpc-eth-types/src/cache/multi_consumer.rs index 8ca7208d22bf..77d861343307 100644 --- a/crates/rpc/rpc-eth-types/src/cache/multi_consumer.rs +++ b/crates/rpc/rpc-eth-types/src/cache/multi_consumer.rs @@ -61,12 +61,15 @@ where } } - /// Remove consumers for a given key, this will also remove the key from the cache. + /// Remove consumers for a given key. pub fn remove(&mut self, key: &K) -> Option> { - let _ = self.cache.remove(key); - self.queued - .remove(key) - .inspect(|removed| self.metrics.queued_consumers_count.decrement(removed.len() as f64)) + match self.queued.remove(key) { + Some(removed) => { + self.metrics.queued_consumers_count.decrement(removed.len() as f64); + Some(removed) + } + None => None, + } } /// Returns a reference to the value for a given key and promotes that element to be the most diff --git a/crates/rpc/rpc-eth-types/src/error.rs b/crates/rpc/rpc-eth-types/src/error.rs index 62e5d9d97ce9..67fd11eae2c0 100644 --- a/crates/rpc/rpc-eth-types/src/error.rs +++ b/crates/rpc/rpc-eth-types/src/error.rs @@ -17,7 +17,6 @@ use reth_transaction_pool::error::{ }; use revm::primitives::{EVMError, ExecutionResult, HaltReason, OutOfGasError}; use revm_inspectors::tracing::{js::JsInspectorError, MuxError}; -use tracing::error; /// Result alias pub type EthResult = Result; @@ -138,11 +137,6 @@ impl EthApiError { pub fn other(err: E) -> Self { Self::Other(Box::new(err)) } - - /// Returns `true` if error is [`RpcInvalidTransactionError::GasTooHigh`] - pub const fn is_gas_too_high(&self) -> bool { - matches!(self, Self::InvalidTransaction(RpcInvalidTransactionError::GasTooHigh)) - } } impl From for jsonrpsee_types::error::ErrorObject<'static> { @@ -378,11 +372,6 @@ pub enum RpcInvalidTransactionError { /// Any other error #[error("{0}")] Other(Box), - /// Unexpected [`InvalidTransaction`](revm::primitives::InvalidTransaction) error, Optimism - /// errors should not be handled on this level. - // TODO: Remove when optimism feature removed in revm - #[error("unexpected transaction error")] - UnexpectedTransactionError, } impl RpcInvalidTransactionError { @@ -392,6 +381,29 @@ impl RpcInvalidTransactionError { } } +/// Optimism specific invalid transaction errors +#[cfg(feature = "optimism")] +#[derive(thiserror::Error, Debug)] +pub enum OptimismInvalidTransactionError { + /// A deposit transaction was submitted as a system transaction post-regolith. + #[error("no system transactions allowed after regolith")] + DepositSystemTxPostRegolith, + /// A deposit transaction halted post-regolith + #[error("deposit transaction halted after regolith")] + HaltedDepositPostRegolith, +} + +#[cfg(feature = "optimism")] +impl ToRpcError for OptimismInvalidTransactionError { + fn to_rpc_error(&self) -> jsonrpsee_types::error::ErrorObject<'static> { + match self { + Self::DepositSystemTxPostRegolith | Self::HaltedDepositPostRegolith => { + rpc_err(EthRpcErrorCode::TransactionRejected.code(), self.to_string(), None) + } + } + } +} + impl RpcInvalidTransactionError { /// Returns the rpc error code for this error. const fn error_code(&self) -> i32 { @@ -450,7 +462,7 @@ impl From for RpcInvalidTransactionError { InvalidTransaction::InvalidChainId => Self::InvalidChainId, InvalidTransaction::PriorityFeeGreaterThanMaxFee => Self::TipAboveFeeCap, InvalidTransaction::GasPriceLessThanBasefee => Self::FeeCapTooLow, - InvalidTransaction::CallerGasLimitMoreThanBlock | + InvalidTransaction::CallerGasLimitMoreThanBlock => Self::GasTooHigh, InvalidTransaction::CallGasCostMoreThanGasLimit => Self::GasTooHigh, InvalidTransaction::RejectCallerWithCode => Self::SenderNoEOA, InvalidTransaction::LackOfFundForMaxFee { .. } => Self::InsufficientFunds, @@ -476,15 +488,17 @@ impl From for RpcInvalidTransactionError { InvalidTransaction::AuthorizationListInvalidFields => { Self::AuthorizationListInvalidFields } - #[allow(unreachable_patterns)] - _ => { - error!(target: "rpc", - ?err, - "unexpected transaction error" - ); - - Self::UnexpectedTransactionError - } + #[cfg(feature = "optimism")] + InvalidTransaction::OptimismError(err) => match err { + revm_primitives::OptimismInvalidTransaction::DepositSystemTxPostRegolith => { + Self::other(OptimismInvalidTransactionError::DepositSystemTxPostRegolith) + } + revm_primitives::OptimismInvalidTransaction::HaltedDepositPostRegolith => { + Self::Other(Box::new( + OptimismInvalidTransactionError::HaltedDepositPostRegolith, + )) + } + }, } } } diff --git a/crates/rpc/rpc-layer/src/auth_layer.rs b/crates/rpc/rpc-layer/src/auth_layer.rs index 41ebce32dfb3..255273194a37 100644 --- a/crates/rpc/rpc-layer/src/auth_layer.rs +++ b/crates/rpc/rpc-layer/src/auth_layer.rs @@ -102,11 +102,9 @@ where } } -/// A future representing the response of an RPC request #[pin_project] #[allow(missing_debug_implementations)] pub struct ResponseFuture { - /// The kind of response future, error or pending #[pin] kind: Kind, } diff --git a/crates/rpc/rpc-layer/src/lib.rs b/crates/rpc/rpc-layer/src/lib.rs index 8387bb160e8b..e4f7dbe06f16 100644 --- a/crates/rpc/rpc-layer/src/lib.rs +++ b/crates/rpc/rpc-layer/src/lib.rs @@ -15,8 +15,6 @@ mod auth_client_layer; mod auth_layer; mod jwt_validator; -pub use auth_layer::{AuthService, ResponseFuture}; - // Export alloy JWT types pub use alloy_rpc_types_engine::{Claims, JwtError, JwtSecret}; diff --git a/crates/rpc/rpc-server-types/src/constants.rs b/crates/rpc/rpc-server-types/src/constants.rs index e433bda0d4a7..e3c129bf6e28 100644 --- a/crates/rpc/rpc-server-types/src/constants.rs +++ b/crates/rpc/rpc-server-types/src/constants.rs @@ -87,9 +87,6 @@ pub mod gas_oracle { /// Taken from Geth's implementation in order to pass the hive tests /// pub const ESTIMATE_GAS_ERROR_RATIO: f64 = 0.015; - - /// Gas required at the beginning of a call. - pub const CALL_STIPEND_GAS: u64 = 2_300; } /// Cache specific constants diff --git a/crates/rpc/rpc-types/Cargo.toml b/crates/rpc/rpc-types/Cargo.toml index 46f957d25d1a..2f52b907e144 100644 --- a/crates/rpc/rpc-types/Cargo.toml +++ b/crates/rpc/rpc-types/Cargo.toml @@ -33,6 +33,7 @@ jsonrpsee-types = { workspace = true, optional = true } alloy-primitives = { workspace = true, features = ["rand", "rlp", "serde", "arbitrary"] } arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true +proptest-derive.workspace = true rand.workspace = true similar-asserts.workspace = true bytes.workspace = true @@ -40,4 +41,4 @@ serde_json.workspace = true [features] default = ["jsonrpsee-types"] -arbitrary = ["alloy-primitives/arbitrary", "alloy-rpc-types/arbitrary"] +arbitrary = ["alloy-primitives/arbitrary", "alloy-rpc-types/arbitrary"] \ No newline at end of file diff --git a/crates/rpc/rpc-types/src/lib.rs b/crates/rpc/rpc-types/src/lib.rs index 47c10e881579..6a73c740213a 100644 --- a/crates/rpc/rpc-types/src/lib.rs +++ b/crates/rpc/rpc-types/src/lib.rs @@ -11,11 +11,8 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #[allow(hidden_glob_reexports)] mod eth; +mod peer; -/// Alias for a peer identifier -pub type PeerId = B512; - -use alloy_primitives::B512; // re-export for convenience pub use alloy_rpc_types::serde_helpers; @@ -54,3 +51,5 @@ pub use eth::{ error::ToRpcError, transaction::{self, TransactionRequest, TypedTransactionRequest}, }; + +pub use peer::*; diff --git a/crates/rpc/rpc-types/src/net.rs b/crates/rpc/rpc-types/src/net.rs new file mode 100644 index 000000000000..eb77ac7922d4 --- /dev/null +++ b/crates/rpc/rpc-types/src/net.rs @@ -0,0 +1,13 @@ +use alloy_rpc_types_admin::EthProtocolInfo; +use serde::{Deserialize, Serialize}; + +/// The status of the network being ran by the local node. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct NetworkStatus { + /// The local node client version. + pub client_version: String, + /// The current ethereum protocol version + pub protocol_version: u64, + /// Information about the Ethereum Wire Protocol. + pub eth_protocol_info: EthProtocolInfo, +} diff --git a/crates/rpc/rpc-types/src/peer.rs b/crates/rpc/rpc-types/src/peer.rs new file mode 100644 index 000000000000..a07e61d00285 --- /dev/null +++ b/crates/rpc/rpc-types/src/peer.rs @@ -0,0 +1,4 @@ +use alloy_primitives::B512; + +/// Alias for a peer identifier +pub type PeerId = B512; diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 78db90e81a09..5c2ebaa2357a 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -90,4 +90,5 @@ optimism = [ "reth-provider/optimism", "reth-rpc-eth-api/optimism", "reth-revm/optimism", + "reth-rpc-eth-types/optimism", ] diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index cbf35a7d51c5..847ab6ae5a33 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -14,11 +14,8 @@ use reth_provider::{ }; use reth_revm::database::StateProviderDatabase; use reth_rpc_api::DebugApiServer; -use reth_rpc_eth_api::{ - helpers::{Call, EthApiSpec, EthTransactions, TraceExt}, - EthApiTypes, FromEthApiError, -}; -use reth_rpc_eth_types::{EthApiError, StateCacheDb}; +use reth_rpc_eth_api::helpers::{Call, EthApiSpec, EthTransactions, TraceExt}; +use reth_rpc_eth_types::{EthApiError, EthResult, StateCacheDb}; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; use reth_rpc_types::{ state::EvmOverrides, @@ -71,7 +68,7 @@ where + StateProviderFactory + EvmEnvProvider + 'static, - Eth: EthApiTypes + TraceExt + 'static, + Eth: TraceExt + 'static, { /// Acquires a permit to execute a tracing call. async fn acquire_trace_permit(&self) -> Result { @@ -86,7 +83,7 @@ where cfg: CfgEnvWithHandlerCfg, block_env: BlockEnv, opts: GethDebugTracingOptions, - ) -> Result, Eth::Error> { + ) -> EthResult> { if transactions.is_empty() { // nothing to trace return Ok(Vec::new()) @@ -144,10 +141,9 @@ where &self, rlp_block: Bytes, opts: GethDebugTracingOptions, - ) -> Result, Eth::Error> { - let block = Block::decode(&mut rlp_block.as_ref()) - .map_err(BlockError::RlpDecodeRawBlock) - .map_err(Eth::Error::from_eth_err)?; + ) -> EthResult> { + let block = + Block::decode(&mut rlp_block.as_ref()).map_err(BlockError::RlpDecodeRawBlock)?; let (cfg, block_env) = self.eth_api().evm_env_for_raw_block(&block.header).await?; // we trace on top the block's parent block @@ -162,9 +158,8 @@ where .map(|tx| { tx.into_ecrecovered() .ok_or_else(|| EthApiError::InvalidTransactionSignature) - .map_err(Eth::Error::from_eth_err) }) - .collect::, Eth::Error>>()? + .collect::>>()? } else { block .body @@ -172,9 +167,8 @@ where .map(|tx| { tx.into_ecrecovered_unchecked() .ok_or_else(|| EthApiError::InvalidTransactionSignature) - .map_err(Eth::Error::from_eth_err) }) - .collect::, Eth::Error>>()? + .collect::>>()? }; self.trace_block(parent.into(), transactions, cfg, block_env, opts).await @@ -185,12 +179,11 @@ where &self, block_id: BlockId, opts: GethDebugTracingOptions, - ) -> Result, Eth::Error> { + ) -> EthResult> { let block_hash = self .inner .provider - .block_hash_for_id(block_id) - .map_err(Eth::Error::from_eth_err)? + .block_hash_for_id(block_id)? .ok_or_else(|| EthApiError::UnknownBlockNumber)?; let ((cfg, block_env, _), block) = futures::try_join!( @@ -220,9 +213,9 @@ where &self, tx_hash: B256, opts: GethDebugTracingOptions, - ) -> Result { + ) -> EthResult { let (transaction, block) = match self.inner.eth_api.transaction_and_block(tx_hash).await? { - None => return Err(EthApiError::TransactionNotFound.into()), + None => return Err(EthApiError::TransactionNotFound), Some(res) => res, }; let (cfg, block_env, _) = self.inner.eth_api.evm_env_at(block.hash().into()).await?; @@ -284,7 +277,7 @@ where call: TransactionRequest, block_id: Option, opts: GethDebugTracingCallOptions, - ) -> Result { + ) -> EthResult { let at = block_id.unwrap_or_default(); let GethDebugTracingCallOptions { tracing_options, state_overrides, block_overrides } = opts; @@ -320,9 +313,8 @@ where .inner .eth_api .spawn_with_call_at(call, at, overrides, move |db, env| { - let (res, env) = this.eth_api().inspect(db, env, &mut inspector)?; + let (res, _) = this.eth_api().inspect(db, env, &mut inspector)?; let frame = inspector - .with_transaction_gas_limit(env.tx.gas_limit) .into_geth_builder() .geth_call_traces(call_config, res.result.gas_used()); Ok(frame.into()) @@ -338,24 +330,22 @@ where TracingInspectorConfig::from_geth_prestate_config(&prestate_config), ); - let frame = self - .inner - .eth_api - .spawn_with_call_at(call, at, overrides, move |db, env| { - // wrapper is hack to get around 'higher-ranked lifetime error', - // see - let db = db.0; - - let (res, env) = - this.eth_api().inspect(&mut *db, env, &mut inspector)?; - let frame = inspector - .with_transaction_gas_limit(env.tx.gas_limit) - .into_geth_builder() - .geth_prestate_traces(&res, prestate_config, db) - .map_err(Eth::Error::from_eth_err)?; - Ok(frame) - }) - .await?; + let frame = + self.inner + .eth_api + .spawn_with_call_at(call, at, overrides, move |db, env| { + // wrapper is hack to get around 'higher-ranked lifetime error', + // see + let db = db.0; + + let (res, _) = + this.eth_api().inspect(&mut *db, env, &mut inspector)?; + let frame = inspector + .into_geth_builder() + .geth_prestate_traces(&res, prestate_config, db)?; + Ok(frame) + }) + .await?; return Ok(frame.into()) } GethDebugBuiltInTracerType::NoopTracer => Ok(NoopFrame::default().into()), @@ -364,8 +354,7 @@ where .into_mux_config() .map_err(|_| EthApiError::InvalidTracerConfig)?; - let mut inspector = MuxInspector::try_from_config(mux_config) - .map_err(Eth::Error::from_eth_err)?; + let mut inspector = MuxInspector::try_from_config(mux_config)?; let frame = self .inner @@ -377,9 +366,7 @@ where let (res, _) = this.eth_api().inspect(&mut *db, env, &mut inspector)?; - let frame = inspector - .try_into_mux_frame(&res, db) - .map_err(Eth::Error::from_eth_err)?; + let frame = inspector.try_into_mux_frame(&res, db)?; Ok(frame.into()) }) .await?; @@ -399,11 +386,10 @@ where // let db = db.0; - let mut inspector = - JsInspector::new(code, config).map_err(Eth::Error::from_eth_err)?; + let mut inspector = JsInspector::new(code, config)?; let (res, _) = this.eth_api().inspect(&mut *db, env.clone(), &mut inspector)?; - inspector.json_result(res, &env, db).map_err(Eth::Error::from_eth_err) + Ok(inspector.json_result(res, &env, db)?) }) .await?; @@ -417,20 +403,17 @@ where let mut inspector = TracingInspector::new(inspector_config); - let (res, tx_gas_limit, inspector) = self + let (res, inspector) = self .inner .eth_api .spawn_with_call_at(call, at, overrides, move |db, env| { - let (res, env) = this.eth_api().inspect(db, env, &mut inspector)?; - Ok((res, env.tx.gas_limit, inspector)) + let (res, _) = this.eth_api().inspect(db, env, &mut inspector)?; + Ok((res, inspector)) }) .await?; let gas_used = res.result.gas_used(); let return_value = res.result.into_output().unwrap_or_default(); - let frame = inspector - .with_transaction_gas_limit(tx_gas_limit) - .into_geth_builder() - .geth_traces(gas_used, return_value, config); + let frame = inspector.into_geth_builder().geth_traces(gas_used, return_value, config); Ok(frame.into()) } @@ -443,9 +426,9 @@ where bundles: Vec, state_context: Option, opts: Option, - ) -> Result>, Eth::Error> { + ) -> EthResult>> { if bundles.is_empty() { - return Err(EthApiError::InvalidParams(String::from("bundles are empty.")).into()) + return Err(EthApiError::InvalidParams(String::from("bundles are empty."))) } let StateContext { transaction_index, block_number } = state_context.unwrap_or_default(); @@ -563,7 +546,7 @@ where env: EnvWithHandlerCfg, db: &mut StateCacheDb<'_>, transaction_context: Option, - ) -> Result<(GethTrace, revm_primitives::EvmState), Eth::Error> { + ) -> EthResult<(GethTrace, revm_primitives::EvmState)> { let GethDebugTracingOptions { config, tracer, tracer_config, .. } = opts; if let Some(tracer) = tracer { @@ -583,10 +566,9 @@ where TracingInspectorConfig::from_geth_call_config(&call_config), ); - let (res, env) = self.eth_api().inspect(db, env, &mut inspector)?; + let (res, _) = self.eth_api().inspect(db, env, &mut inspector)?; let frame = inspector - .with_transaction_gas_limit(env.tx.gas_limit) .into_geth_builder() .geth_call_traces(call_config, res.result.gas_used()); @@ -600,13 +582,13 @@ where let mut inspector = TracingInspector::new( TracingInspectorConfig::from_geth_prestate_config(&prestate_config), ); - let (res, env) = self.eth_api().inspect(&mut *db, env, &mut inspector)?; + let (res, _) = self.eth_api().inspect(&mut *db, env, &mut inspector)?; - let frame = inspector - .with_transaction_gas_limit(env.tx.gas_limit) - .into_geth_builder() - .geth_prestate_traces(&res, prestate_config, db) - .map_err(Eth::Error::from_eth_err)?; + let frame = inspector.into_geth_builder().geth_prestate_traces( + &res, + prestate_config, + db, + )?; return Ok((frame.into(), res.state)) } @@ -618,13 +600,10 @@ where .into_mux_config() .map_err(|_| EthApiError::InvalidTracerConfig)?; - let mut inspector = MuxInspector::try_from_config(mux_config) - .map_err(Eth::Error::from_eth_err)?; + let mut inspector = MuxInspector::try_from_config(mux_config)?; let (res, _) = self.eth_api().inspect(&mut *db, env, &mut inspector)?; - let frame = inspector - .try_into_mux_frame(&res, db) - .map_err(Eth::Error::from_eth_err)?; + let frame = inspector.try_into_mux_frame(&res, db)?; return Ok((frame.into(), res.state)) } }, @@ -634,13 +613,11 @@ where code, config, transaction_context.unwrap_or_default(), - ) - .map_err(Eth::Error::from_eth_err)?; + )?; let (res, env) = self.eth_api().inspect(&mut *db, env, &mut inspector)?; let state = res.state.clone(); - let result = - inspector.json_result(res, &env, db).map_err(Eth::Error::from_eth_err)?; + let result = inspector.json_result(res, &env, db)?; Ok((GethTrace::JS(result), state)) } } @@ -651,13 +628,10 @@ where let mut inspector = TracingInspector::new(inspector_config); - let (res, env) = self.eth_api().inspect(db, env, &mut inspector)?; + let (res, _) = self.eth_api().inspect(db, env, &mut inspector)?; let gas_used = res.result.gas_used(); let return_value = res.result.into_output().unwrap_or_default(); - let frame = inspector - .with_transaction_gas_limit(env.tx.gas_limit) - .into_geth_builder() - .geth_traces(gas_used, return_value, config); + let frame = inspector.into_geth_builder().geth_traces(gas_used, return_value, config); Ok((frame.into(), res.state)) } @@ -716,7 +690,7 @@ where /// /// Returns the bytes of the transaction for the given hash. async fn raw_transaction(&self, hash: B256) -> RpcResult> { - self.inner.eth_api.raw_transaction_by_hash(hash).await.map_err(Into::into) + Ok(self.inner.eth_api.raw_transaction_by_hash(hash).await?) } /// Handler for `debug_getRawTransactions` @@ -765,9 +739,7 @@ where opts: Option, ) -> RpcResult> { let _permit = self.acquire_trace_permit().await; - Self::debug_trace_raw_block(self, rlp_block, opts.unwrap_or_default()) - .await - .map_err(Into::into) + Ok(Self::debug_trace_raw_block(self, rlp_block, opts.unwrap_or_default()).await?) } /// Handler for `debug_traceBlockByHash` @@ -777,9 +749,7 @@ where opts: Option, ) -> RpcResult> { let _permit = self.acquire_trace_permit().await; - Self::debug_trace_block(self, block.into(), opts.unwrap_or_default()) - .await - .map_err(Into::into) + Ok(Self::debug_trace_block(self, block.into(), opts.unwrap_or_default()).await?) } /// Handler for `debug_traceBlockByNumber` @@ -789,9 +759,7 @@ where opts: Option, ) -> RpcResult> { let _permit = self.acquire_trace_permit().await; - Self::debug_trace_block(self, block.into(), opts.unwrap_or_default()) - .await - .map_err(Into::into) + Ok(Self::debug_trace_block(self, block.into(), opts.unwrap_or_default()).await?) } /// Handler for `debug_traceTransaction` @@ -801,9 +769,7 @@ where opts: Option, ) -> RpcResult { let _permit = self.acquire_trace_permit().await; - Self::debug_trace_transaction(self, tx_hash, opts.unwrap_or_default()) - .await - .map_err(Into::into) + Ok(Self::debug_trace_transaction(self, tx_hash, opts.unwrap_or_default()).await?) } /// Handler for `debug_traceCall` @@ -814,9 +780,7 @@ where opts: Option, ) -> RpcResult { let _permit = self.acquire_trace_permit().await; - Self::debug_trace_call(self, request, block_number, opts.unwrap_or_default()) - .await - .map_err(Into::into) + Ok(Self::debug_trace_call(self, request, block_number, opts.unwrap_or_default()).await?) } async fn debug_trace_call_many( @@ -826,7 +790,7 @@ where opts: Option, ) -> RpcResult>> { let _permit = self.acquire_trace_permit().await; - Self::debug_trace_call_many(self, bundles, state_context, opts).await.map_err(Into::into) + Ok(Self::debug_trace_call_many(self, bundles, state_context, opts).await?) } async fn debug_backtrace_at(&self, _location: &str) -> RpcResult<()> { diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index 9cabc1f6f5fd..d28013822ee1 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -10,7 +10,6 @@ use reth_primitives::{ PooledTransactionsElement, U256, }; use reth_revm::database::StateProviderDatabase; -use reth_rpc_eth_api::{FromEthApiError, FromEvmError}; use reth_rpc_types::mev::{EthCallBundle, EthCallBundleResponse, EthCallBundleTransactionResult}; use reth_tasks::pool::BlockingTaskGuard; use revm::{ @@ -24,7 +23,9 @@ use reth_rpc_eth_api::{ helpers::{Call, EthTransactions, LoadPendingBlock}, EthCallBundleApiServer, }; -use reth_rpc_eth_types::{utils::recover_raw_transaction, EthApiError, RpcInvalidTransactionError}; +use reth_rpc_eth_types::{ + utils::recover_raw_transaction, EthApiError, EthResult, RpcInvalidTransactionError, +}; /// `Eth` bundle implementation. pub struct EthBundle { @@ -47,10 +48,7 @@ where /// another (or the same) block. This can be used to simulate future blocks with the current /// state, or it can be used to simulate a past block. The sender is responsible for signing the /// transactions and using the correct nonce and ensuring validity - pub async fn call_bundle( - &self, - bundle: EthCallBundle, - ) -> Result { + pub async fn call_bundle(&self, bundle: EthCallBundle) -> EthResult { let EthCallBundle { txs, block_number, @@ -63,14 +61,12 @@ where if txs.is_empty() { return Err(EthApiError::InvalidParams( EthBundleError::EmptyBundleTransactions.to_string(), - ) - .into()) + )) } if block_number == 0 { return Err(EthApiError::InvalidParams( EthBundleError::BundleMissingBlockNumber.to_string(), - ) - .into()) + )) } let transactions = txs @@ -97,8 +93,7 @@ where { return Err(EthApiError::InvalidParams( EthBundleError::Eip4844BlobGasExceeded.to_string(), - ) - .into()) + )) } let block_id: reth_rpc_types::BlockId = state_block_number.into(); @@ -126,8 +121,7 @@ where let parent_block = block_env.number.saturating_to::(); // here we need to fetch the _next_ block's basefee based on the parent block let parent = LoadPendingBlock::provider(&self.inner.eth_api) - .header_by_number(parent_block) - .map_err(Eth::Error::from_eth_err)? + .header_by_number(parent_block)? .ok_or_else(|| EthApiError::UnknownBlockNumber)?; if let Some(base_fee) = parent.next_block_base_fee( LoadPendingBlock::provider(&self.inner.eth_api) @@ -152,8 +146,7 @@ where let env = EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, TxEnv::default()); let db = CacheDB::new(StateProviderDatabase::new(state)); - let initial_coinbase = DatabaseRef::basic_ref(&db, coinbase) - .map_err(Eth::Error::from_eth_err)? + let initial_coinbase = DatabaseRef::basic_ref(&db, coinbase)? .map(|acc| acc.balance) .unwrap_or_default(); let mut coinbase_balance_before_tx = initial_coinbase; @@ -171,9 +164,8 @@ where // Verify that the given blob data, commitments, and proofs are all valid for // this transaction. if let PooledTransactionsElement::BlobTransaction(ref tx) = tx { - tx.validate(EnvKzgSettings::Default.get()).map_err(|e| { - Eth::Error::from_eth_err(EthApiError::InvalidParams(e.to_string())) - })?; + tx.validate(EnvKzgSettings::Default.get()) + .map_err(|e| EthApiError::InvalidParams(e.to_string()))?; } let tx = tx.into_transaction(); @@ -181,11 +173,9 @@ where hash_bytes.extend_from_slice(tx.hash().as_slice()); let gas_price = tx .effective_tip_per_gas(basefee) - .ok_or_else(|| RpcInvalidTransactionError::FeeCapTooLow) - .map_err(Eth::Error::from_eth_err)?; + .ok_or_else(|| RpcInvalidTransactionError::FeeCapTooLow)?; Call::evm_config(ð_api).fill_tx_env(evm.tx_mut(), &tx, signer); - let ResultAndState { result, state } = - evm.transact().map_err(Eth::Error::from_evm_err)?; + let ResultAndState { result, state } = evm.transact()?; let gas_used = result.gas_used(); total_gas_used += gas_used; @@ -264,7 +254,7 @@ where Eth: EthTransactions + LoadPendingBlock + Call + 'static, { async fn call_bundle(&self, request: EthCallBundle) -> RpcResult { - Self::call_bundle(self, request).await.map_err(Into::into) + Ok(Self::call_bundle(self, request).await?) } } diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index 590d73f7f7e4..09065dfc1a12 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -10,11 +10,10 @@ use reth_primitives::{BlockNumberOrTag, U256}; use reth_provider::{BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider}; use reth_rpc_eth_api::{ helpers::{transaction::UpdateRawTxForwarder, EthSigner, SpawnBlocking}, - EthApiTypes, RawTransactionForwarder, + RawTransactionForwarder, }; use reth_rpc_eth_types::{ - EthApiBuilderCtx, EthApiError, EthStateCache, FeeHistoryCache, GasCap, GasPriceOracle, - PendingBlock, + EthApiBuilderCtx, EthStateCache, FeeHistoryCache, GasCap, GasPriceOracle, PendingBlock, }; use reth_tasks::{ pool::{BlockingTaskGuard, BlockingTaskPool}, @@ -115,13 +114,6 @@ where } } -impl EthApiTypes for EthApi -where - Self: Send + Sync, -{ - type Error = EthApiError; -} - impl std::fmt::Debug for EthApi { @@ -139,7 +131,7 @@ impl Clone for EthApi SpawnBlocking for EthApi where - Self: EthApiTypes + Clone + Send + Sync + 'static, + Self: Clone + Send + Sync + 'static, { #[inline] fn io_task_spawner(&self) -> impl reth_tasks::TaskSpawner { diff --git a/crates/rpc/rpc/src/eth/helpers/receipt.rs b/crates/rpc/rpc/src/eth/helpers/receipt.rs index eb4483705f73..db1fee781fd3 100644 --- a/crates/rpc/rpc/src/eth/helpers/receipt.rs +++ b/crates/rpc/rpc/src/eth/helpers/receipt.rs @@ -1,13 +1,13 @@ //! Builds an RPC receipt response w.r.t. data layout of network. -use reth_rpc_eth_api::{helpers::LoadReceipt, EthApiTypes}; +use reth_rpc_eth_api::helpers::LoadReceipt; use reth_rpc_eth_types::EthStateCache; use crate::EthApi; impl LoadReceipt for EthApi where - Self: EthApiTypes, + Self: Send + Sync, { #[inline] fn cache(&self) -> &EthStateCache { diff --git a/crates/rpc/rpc/src/eth/helpers/state.rs b/crates/rpc/rpc/src/eth/helpers/state.rs index f76be9d88758..b291eb8a2016 100644 --- a/crates/rpc/rpc/src/eth/helpers/state.rs +++ b/crates/rpc/rpc/src/eth/helpers/state.rs @@ -3,10 +3,7 @@ use reth_provider::{ChainSpecProvider, StateProviderFactory}; use reth_transaction_pool::TransactionPool; -use reth_rpc_eth_api::{ - helpers::{EthState, LoadState, SpawnBlocking}, - EthApiTypes, -}; +use reth_rpc_eth_api::helpers::{EthState, LoadState, SpawnBlocking}; use reth_rpc_eth_types::EthStateCache; use crate::EthApi; @@ -22,7 +19,6 @@ where impl LoadState for EthApi where - Self: EthApiTypes, Provider: StateProviderFactory + ChainSpecProvider, Pool: TransactionPool, { diff --git a/crates/rpc/rpc/src/eth/helpers/transaction.rs b/crates/rpc/rpc/src/eth/helpers/transaction.rs index 635281c08e76..872af0cee451 100644 --- a/crates/rpc/rpc/src/eth/helpers/transaction.rs +++ b/crates/rpc/rpc/src/eth/helpers/transaction.rs @@ -1,7 +1,5 @@ //! Contains RPC handler implementations specific to transactions -use std::sync::Arc; - use reth_provider::{BlockReaderIdExt, TransactionsProvider}; use reth_rpc_eth_api::{ helpers::{EthSigner, EthTransactions, LoadTransaction, SpawnBlocking}, @@ -25,7 +23,7 @@ where } #[inline] - fn raw_tx_forwarder(&self) -> Option> { + fn raw_tx_forwarder(&self) -> Option> { self.inner.raw_tx_forwarder() } @@ -45,7 +43,7 @@ where type Pool = Pool; #[inline] - fn provider(&self) -> impl TransactionsProvider { + fn provider(&self) -> impl reth_provider::TransactionsProvider { self.inner.provider() } diff --git a/crates/rpc/rpc/src/otterscan.rs b/crates/rpc/rpc/src/otterscan.rs index bf8279719fec..320c6856de06 100644 --- a/crates/rpc/rpc/src/otterscan.rs +++ b/crates/rpc/rpc/src/otterscan.rs @@ -85,8 +85,7 @@ where TransferInspector::new(false), |_tx_info, inspector, _, _| Ok(inspector.into_transfers()), ) - .await - .map_err(Into::into)? + .await? .map(|transfer_operations| { transfer_operations .iter() @@ -116,8 +115,7 @@ where _ => Ok(None), }) .await - .map(Option::flatten) - .map_err(Into::into)?; + .map(Option::flatten)?; Ok(maybe_revert) } @@ -130,8 +128,7 @@ where TracingInspectorConfig::default_parity(), move |_tx_info, inspector, _, _| Ok(inspector.into_traces().into_nodes()), ) - .await - .map_err(Into::into)? + .await? .map(|traces| { traces .into_iter() @@ -328,8 +325,7 @@ where Ok(inspector.into_parity_builder().into_localized_transaction_traces(tx_info)) }, ) - .await - .map_err(Into::into)? + .await? .map(|traces| { traces .into_iter() diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 461a1cad5fcd..fd0174a4e174 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -1,7 +1,7 @@ use std::{collections::HashSet, sync::Arc}; use async_trait::async_trait; -use jsonrpsee::core::RpcResult; +use jsonrpsee::core::RpcResult as Result; use reth_chainspec::EthereumHardforks; use reth_consensus_common::calc::{ base_block_reward, base_block_reward_pre_merge, block_reward, ommer_reward, @@ -11,11 +11,11 @@ use reth_primitives::{BlockId, Bytes, Header, B256, U256}; use reth_provider::{BlockReader, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; use reth_revm::database::StateProviderDatabase; use reth_rpc_api::TraceApiServer; -use reth_rpc_eth_api::{ - helpers::{Call, TraceExt}, - FromEthApiError, +use reth_rpc_eth_api::helpers::{Call, TraceExt}; +use reth_rpc_eth_types::{ + error::{EthApiError, EthResult}, + utils::recover_raw_transaction, }; -use reth_rpc_eth_types::{error::EthApiError, utils::recover_raw_transaction}; use reth_rpc_types::{ state::{EvmOverrides, StateOverride}, trace::{ @@ -79,10 +79,7 @@ where Eth: TraceExt + 'static, { /// Executes the given call and returns a number of possible traces for it. - pub async fn trace_call( - &self, - trace_request: TraceCallRequest, - ) -> Result { + pub async fn trace_call(&self, trace_request: TraceCallRequest) -> EthResult { let at = trace_request.block_id.unwrap_or_default(); let config = TracingInspectorConfig::from_parity_config(&trace_request.trace_types); let overrides = @@ -96,10 +93,11 @@ where let db = db.0; let (res, _) = this.eth_api().inspect(&mut *db, env, &mut inspector)?; - let trace_res = inspector - .into_parity_builder() - .into_trace_results_with_state(&res, &trace_request.trace_types, &db) - .map_err(Eth::Error::from_eth_err)?; + let trace_res = inspector.into_parity_builder().into_trace_results_with_state( + &res, + &trace_request.trace_types, + &db, + )?; Ok(trace_res) }) .await @@ -111,7 +109,7 @@ where tx: Bytes, trace_types: HashSet, block_id: Option, - ) -> Result { + ) -> EthResult { let tx = recover_raw_transaction(tx)?; let (cfg, block, at) = self.inner.eth_api.evm_env_at(block_id.unwrap_or_default()).await?; @@ -127,10 +125,11 @@ where self.inner .eth_api .spawn_trace_at_with_state(env, config, at, move |inspector, res, db| { - inspector - .into_parity_builder() - .into_trace_results_with_state(&res, &trace_types, &db) - .map_err(Eth::Error::from_eth_err) + Ok(inspector.into_parity_builder().into_trace_results_with_state( + &res, + &trace_types, + &db, + )?) }) .await } @@ -143,7 +142,7 @@ where &self, calls: Vec<(TransactionRequest, HashSet)>, block_id: Option, - ) -> Result, Eth::Error> { + ) -> EthResult> { let at = block_id.unwrap_or(BlockId::pending()); let (cfg, block_env, at) = self.inner.eth_api.evm_env_at(at).await?; @@ -170,10 +169,11 @@ where let mut inspector = TracingInspector::new(config); let (res, _) = this.eth_api().inspect(&mut db, env, &mut inspector)?; - let trace_res = inspector - .into_parity_builder() - .into_trace_results_with_state(&res, &trace_types, &db) - .map_err(Eth::Error::from_eth_err)?; + let trace_res = inspector.into_parity_builder().into_trace_results_with_state( + &res, + &trace_types, + &db, + )?; results.push(trace_res); @@ -196,15 +196,16 @@ where &self, hash: B256, trace_types: HashSet, - ) -> Result { + ) -> EthResult { let config = TracingInspectorConfig::from_parity_config(&trace_types); self.inner .eth_api .spawn_trace_transaction_in_block(hash, config, move |_, inspector, res, db| { - let trace_res = inspector - .into_parity_builder() - .into_trace_results_with_state(&res, &trace_types, &db) - .map_err(Eth::Error::from_eth_err)?; + let trace_res = inspector.into_parity_builder().into_trace_results_with_state( + &res, + &trace_types, + &db, + )?; Ok(trace_res) }) .await @@ -222,7 +223,7 @@ where &self, hash: B256, indices: Vec, - ) -> Result, Eth::Error> { + ) -> EthResult> { if indices.len() != 1 { // The OG impl failed if it gets more than a single index return Ok(None) @@ -237,7 +238,7 @@ where &self, hash: B256, index: usize, - ) -> Result, Eth::Error> { + ) -> EthResult> { Ok(self.trace_transaction(hash).await?.and_then(|traces| traces.into_iter().nth(index))) } @@ -248,21 +249,20 @@ where pub async fn trace_filter( &self, filter: TraceFilter, - ) -> Result, Eth::Error> { + ) -> EthResult> { let matcher = filter.matcher(); - let TraceFilter { from_block, to_block, after, count, .. } = filter; + let TraceFilter { from_block, to_block, .. } = filter; let start = from_block.unwrap_or(0); let end = if let Some(to_block) = to_block { to_block } else { - self.provider().best_block_number().map_err(Eth::Error::from_eth_err)? + self.provider().best_block_number()? }; if start > end { return Err(EthApiError::InvalidParams( "invalid parameters: fromBlock cannot be greater than toBlock".to_string(), - ) - .into()) + )) } // ensure that the range is not too large, since we need to fetch all blocks in the range @@ -270,12 +270,11 @@ where if distance > 100 { return Err(EthApiError::InvalidParams( "Block range too large; currently limited to 100 blocks".to_string(), - ) - .into()) + )) } // fetch all blocks in that range - let blocks = self.provider().block_range(start..=end).map_err(Eth::Error::from_eth_err)?; + let blocks = self.provider().block_range(start..=end)?; // find relevant blocks to trace let mut target_blocks = Vec::new(); @@ -283,10 +282,7 @@ where let mut transaction_indices = HashSet::new(); let mut highest_matching_index = 0; for (tx_idx, tx) in block.body.iter().enumerate() { - let from = tx - .recover_signer_unchecked() - .ok_or(BlockError::InvalidSignature) - .map_err(Eth::Error::from_eth_err)?; + let from = tx.recover_signer_unchecked().ok_or(BlockError::InvalidSignature)?; let to = tx.to(); if matcher.matches(from, to) { let idx = tx_idx as u64; @@ -306,15 +302,17 @@ where num.into(), Some(highest_idx), TracingInspectorConfig::default_parity(), - move |tx_info, inspector, _, _, _| { + move |tx_info, inspector, res, _, _| { if let Some(idx) = tx_info.index { if !indices.contains(&idx) { // only record traces for relevant transactions return Ok(None) } } - let traces = - inspector.into_parity_builder().into_localized_transaction_traces(tx_info); + let traces = inspector + .with_transaction_gas_used(res.gas_used()) + .into_parity_builder() + .into_localized_transaction_traces(tx_info); Ok(Some(traces)) }, ); @@ -343,20 +341,6 @@ where } } - // apply after and count to traces if specified, this allows for a pagination style. - // only consider traces after - if let Some(after) = after.map(|a| a as usize).filter(|a| *a < all_traces.len()) { - all_traces = all_traces.split_off(after); - } - - // at most, return count of traces - if let Some(count) = count { - let count = count as usize; - if count < all_traces.len() { - all_traces.truncate(count); - } - }; - Ok(all_traces) } @@ -364,15 +348,17 @@ where pub async fn trace_transaction( &self, hash: B256, - ) -> Result>, Eth::Error> { + ) -> EthResult>> { self.inner .eth_api .spawn_trace_transaction_in_block( hash, TracingInspectorConfig::default_parity(), - move |tx_info, inspector, _, _| { - let traces = - inspector.into_parity_builder().into_localized_transaction_traces(tx_info); + move |tx_info, inspector, res, _| { + let traces = inspector + .with_transaction_gas_used(res.result.gas_used()) + .into_parity_builder() + .into_localized_transaction_traces(tx_info); Ok(traces) }, ) @@ -383,13 +369,15 @@ where pub async fn trace_block( &self, block_id: BlockId, - ) -> Result>, Eth::Error> { + ) -> EthResult>> { let traces = self.inner.eth_api.trace_block_with( block_id, TracingInspectorConfig::default_parity(), - |tx_info, inspector, _, _, _| { - let traces = - inspector.into_parity_builder().into_localized_transaction_traces(tx_info); + |tx_info, inspector, res, _, _| { + let traces = inspector + .with_transaction_gas_used(res.gas_used()) + .into_parity_builder() + .into_localized_transaction_traces(tx_info); Ok(traces) }, ); @@ -418,7 +406,7 @@ where &self, block_id: BlockId, trace_types: HashSet, - ) -> Result>, Eth::Error> { + ) -> EthResult>> { self.inner .eth_api .trace_block_with( @@ -431,8 +419,7 @@ where // If statediffs were requested, populate them with the account balance and // nonce from pre-state if let Some(ref mut state_diff) = full_trace.state_diff { - populate_state_diff(state_diff, db, state.iter()) - .map_err(Eth::Error::from_eth_err)?; + populate_state_diff(state_diff, db, state.iter())?; } let trace = TraceResultsWithTransactionHash { @@ -450,7 +437,7 @@ where pub async fn trace_transaction_opcode_gas( &self, tx_hash: B256, - ) -> Result, Eth::Error> { + ) -> EthResult> { self.inner .eth_api .spawn_trace_transaction_in_block_with_inspector( @@ -474,7 +461,7 @@ where pub async fn trace_block_opcode_gas( &self, block_id: BlockId, - ) -> Result, Eth::Error> { + ) -> EthResult> { let res = self .inner .eth_api @@ -507,7 +494,7 @@ where /// - if Paris hardfork is activated, no block rewards are given /// - if Paris hardfork is not activated, calculate block rewards with block number only /// - if Paris hardfork is unknown, calculate block rewards with block number and ttd - fn calculate_base_block_reward(&self, header: &Header) -> Result, Eth::Error> { + fn calculate_base_block_reward(&self, header: &Header) -> EthResult> { let chain_spec = self.provider().chain_spec(); let is_paris_activated = chain_spec.is_paris_active_at_block(header.number); @@ -517,11 +504,7 @@ where None => { // if Paris hardfork is unknown, we need to fetch the total difficulty at the // block's height and check if it is pre-merge to calculate the base block reward - if let Some(header_td) = self - .provider() - .header_td_by_number(header.number) - .map_err(Eth::Error::from_eth_err)? - { + if let Some(header_td) = self.provider().header_td_by_number(header.number)? { base_block_reward( chain_spec.as_ref(), header.number, @@ -587,11 +570,11 @@ where block_id: Option, state_overrides: Option, block_overrides: Option>, - ) -> RpcResult { + ) -> Result { let _permit = self.acquire_trace_permit().await; let request = TraceCallRequest { call, trace_types, block_id, state_overrides, block_overrides }; - Ok(Self::trace_call(self, request).await.map_err(Into::into)?) + Ok(Self::trace_call(self, request).await?) } /// Handler for `trace_callMany` @@ -599,9 +582,9 @@ where &self, calls: Vec<(TransactionRequest, HashSet)>, block_id: Option, - ) -> RpcResult> { + ) -> Result> { let _permit = self.acquire_trace_permit().await; - Ok(Self::trace_call_many(self, calls, block_id).await.map_err(Into::into)?) + Ok(Self::trace_call_many(self, calls, block_id).await?) } /// Handler for `trace_rawTransaction` @@ -610,11 +593,9 @@ where data: Bytes, trace_types: HashSet, block_id: Option, - ) -> RpcResult { + ) -> Result { let _permit = self.acquire_trace_permit().await; - Ok(Self::trace_raw_transaction(self, data, trace_types, block_id) - .await - .map_err(Into::into)?) + Ok(Self::trace_raw_transaction(self, data, trace_types, block_id).await?) } /// Handler for `trace_replayBlockTransactions` @@ -622,11 +603,9 @@ where &self, block_id: BlockId, trace_types: HashSet, - ) -> RpcResult>> { + ) -> Result>> { let _permit = self.acquire_trace_permit().await; - Ok(Self::replay_block_transactions(self, block_id, trace_types) - .await - .map_err(Into::into)?) + Ok(Self::replay_block_transactions(self, block_id, trace_types).await?) } /// Handler for `trace_replayTransaction` @@ -634,18 +613,18 @@ where &self, transaction: B256, trace_types: HashSet, - ) -> RpcResult { + ) -> Result { let _permit = self.acquire_trace_permit().await; - Ok(Self::replay_transaction(self, transaction, trace_types).await.map_err(Into::into)?) + Ok(Self::replay_transaction(self, transaction, trace_types).await?) } /// Handler for `trace_block` async fn trace_block( &self, block_id: BlockId, - ) -> RpcResult>> { + ) -> Result>> { let _permit = self.acquire_trace_permit().await; - Ok(Self::trace_block(self, block_id).await.map_err(Into::into)?) + Ok(Self::trace_block(self, block_id).await?) } /// Handler for `trace_filter` @@ -654,8 +633,8 @@ where /// /// # Limitations /// This currently requires block filter fields, since reth does not have address indices yet. - async fn trace_filter(&self, filter: TraceFilter) -> RpcResult> { - Ok(Self::trace_filter(self, filter).await.map_err(Into::into)?) + async fn trace_filter(&self, filter: TraceFilter) -> Result> { + Ok(Self::trace_filter(self, filter).await?) } /// Returns transaction trace at given index. @@ -664,35 +643,33 @@ where &self, hash: B256, indices: Vec, - ) -> RpcResult> { + ) -> Result> { let _permit = self.acquire_trace_permit().await; - Ok(Self::trace_get(self, hash, indices.into_iter().map(Into::into).collect()) - .await - .map_err(Into::into)?) + Ok(Self::trace_get(self, hash, indices.into_iter().map(Into::into).collect()).await?) } /// Handler for `trace_transaction` async fn trace_transaction( &self, hash: B256, - ) -> RpcResult>> { + ) -> Result>> { let _permit = self.acquire_trace_permit().await; - Ok(Self::trace_transaction(self, hash).await.map_err(Into::into)?) + Ok(Self::trace_transaction(self, hash).await?) } /// Handler for `trace_transactionOpcodeGas` async fn trace_transaction_opcode_gas( &self, tx_hash: B256, - ) -> RpcResult> { + ) -> Result> { let _permit = self.acquire_trace_permit().await; - Ok(Self::trace_transaction_opcode_gas(self, tx_hash).await.map_err(Into::into)?) + Ok(Self::trace_transaction_opcode_gas(self, tx_hash).await?) } /// Handler for `trace_blockOpcodeGas` - async fn trace_block_opcode_gas(&self, block_id: BlockId) -> RpcResult> { + async fn trace_block_opcode_gas(&self, block_id: BlockId) -> Result> { let _permit = self.acquire_trace_permit().await; - Ok(Self::trace_block_opcode_gas(self, block_id).await.map_err(Into::into)?) + Ok(Self::trace_block_opcode_gas(self, block_id).await?) } } diff --git a/crates/stages/api/src/pipeline/ctrl.rs b/crates/stages/api/src/pipeline/ctrl.rs index 161857552451..8fc64c2ab708 100644 --- a/crates/stages/api/src/pipeline/ctrl.rs +++ b/crates/stages/api/src/pipeline/ctrl.rs @@ -4,7 +4,7 @@ use reth_primitives_traits::SealedHeader; /// Determines the control flow during pipeline execution. /// /// See [`Pipeline::run_loop`](crate::Pipeline::run_loop) for more information. -#[derive(Debug, Clone, Eq, PartialEq)] +#[derive(Debug, Eq, PartialEq)] pub enum ControlFlow { /// An unwind was requested and must be performed before continuing. Unwind { diff --git a/crates/stages/api/src/pipeline/set.rs b/crates/stages/api/src/pipeline/set.rs index baa9b0f3fcda..99de4a06b278 100644 --- a/crates/stages/api/src/pipeline/set.rs +++ b/crates/stages/api/src/pipeline/set.rs @@ -190,22 +190,20 @@ where /// # Panics /// /// Panics if the stage is not in this set. - #[track_caller] pub fn disable(mut self, stage_id: StageId) -> Self { - let entry = self - .stages - .get_mut(&stage_id) - .unwrap_or_else(|| panic!("Cannot disable a stage that is not in the set: {stage_id}")); + let entry = + self.stages.get_mut(&stage_id).expect("Cannot disable a stage that is not in the set."); entry.enabled = false; self } /// Disables all given stages. See [`disable`](Self::disable). - /// - /// If any of the stages is not in this set, it is ignored. pub fn disable_all(mut self, stages: &[StageId]) -> Self { for stage_id in stages { - let Some(entry) = self.stages.get_mut(stage_id) else { continue }; + let entry = self + .stages + .get_mut(stage_id) + .expect("Cannot disable a stage that is not in the set."); entry.enabled = false; } self @@ -214,7 +212,6 @@ where /// Disables the given stage if the given closure returns true. /// /// See [`Self::disable`] - #[track_caller] pub fn disable_if(self, stage_id: StageId, f: F) -> Self where F: FnOnce() -> bool, @@ -228,7 +225,6 @@ where /// Disables all given stages if the given closure returns true. /// /// See [`Self::disable`] - #[track_caller] pub fn disable_all_if(self, stages: &[StageId], f: F) -> Self where F: FnOnce() -> bool, diff --git a/crates/stages/stages/Cargo.toml b/crates/stages/stages/Cargo.toml index fce1df25cf20..757f4dcaf26a 100644 --- a/crates/stages/stages/Cargo.toml +++ b/crates/stages/stages/Cargo.toml @@ -33,7 +33,6 @@ reth-storage-errors.workspace = true reth-revm.workspace = true reth-stages-api.workspace = true reth-trie = { workspace = true, features = ["metrics"] } -reth-trie-db = { workspace = true, features = ["metrics"] } reth-testing-utils = { workspace = true, optional = true } diff --git a/crates/stages/stages/benches/setup/mod.rs b/crates/stages/stages/benches/setup/mod.rs index e5ec504ecd4b..0f2dd2acf692 100644 --- a/crates/stages/stages/benches/setup/mod.rs +++ b/crates/stages/stages/benches/setup/mod.rs @@ -6,7 +6,6 @@ use reth_db_api::{ transaction::{DbTx, DbTxMut}, }; use reth_primitives::{Account, Address, SealedBlock, B256, U256}; -use reth_provider::TrieWriter; use reth_stages::{ stages::{AccountHashingStage, StorageHashingStage}, test_utils::{StorageKind, TestStageDB}, @@ -27,7 +26,6 @@ mod constants; mod account_hashing; pub use account_hashing::*; use reth_stages_api::{ExecInput, Stage, UnwindInput}; -use reth_trie_db::DatabaseStateRoot; pub(crate) type StageRange = (ExecInput, UnwindInput); @@ -140,10 +138,12 @@ pub(crate) fn txs_testdata(num_blocks: u64) -> TestStageDB { let offset = transitions.len() as u64; - let provider_rw = db.factory.provider_rw().unwrap(); db.insert_changesets(transitions, None).unwrap(); - provider_rw.write_trie_updates(&updates).unwrap(); - provider_rw.commit().unwrap(); + db.commit(|tx| { + updates.write_to_database(tx)?; + Ok(()) + }) + .unwrap(); let (transitions, final_state) = random_changeset_range( &mut rng, diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index cd42dd12601d..6776597a6065 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -206,7 +206,7 @@ impl Stage for BodyStage { // Write transactions for transaction in block.body { let appended_tx_number = static_file_producer - .append_transaction(next_tx_num, &transaction.into())?; + .append_transaction(next_tx_num, transaction.into())?; if appended_tx_number != next_tx_num { // This scenario indicates a critical error in the logic of adding new @@ -740,7 +740,7 @@ mod tests { body.tx_num_range().try_for_each(|tx_num| { let transaction = random_signed_tx(&mut rng); static_file_producer - .append_transaction(tx_num, &transaction.into()) + .append_transaction(tx_num, transaction.into()) .map(drop) })?; diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index 43eaf45d5745..f526a030a198 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -10,7 +10,6 @@ use reth_primitives::{BlockNumber, Header, StaticFileSegment}; use reth_primitives_traits::format_gas_throughput; use reth_provider::{ providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter}, - writer::StorageWriter, BlockReader, DatabaseProviderRW, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderError, StateWriter, StatsReader, TransactionVariant, }; @@ -359,11 +358,8 @@ where } let time = Instant::now(); - // write output - let mut writer = StorageWriter::new(Some(provider), static_file_producer); - writer.write_to_storage(state, OriginalValuesKnown::Yes)?; - + state.write_to_storage(provider, static_file_producer, OriginalValuesKnown::Yes)?; let db_write_duration = time.elapsed(); debug!( target: "sync::stages::execution", diff --git a/crates/stages/stages/src/stages/hashing_storage.rs b/crates/stages/stages/src/stages/hashing_storage.rs index dbefa4b0e483..662f1d1a7728 100644 --- a/crates/stages/stages/src/stages/hashing_storage.rs +++ b/crates/stages/stages/src/stages/hashing_storage.rs @@ -535,7 +535,7 @@ mod tests { storage_cursor.delete_current()?; } - if !entry.value.is_zero() { + if entry.value != U256::ZERO { storage_cursor.upsert(bn_address.address(), entry)?; } } diff --git a/crates/stages/stages/src/stages/headers.rs b/crates/stages/stages/src/stages/headers.rs index 46130d76013d..6b326034f7d2 100644 --- a/crates/stages/stages/src/stages/headers.rs +++ b/crates/stages/stages/src/stages/headers.rs @@ -138,7 +138,7 @@ where })?; // Append to Headers segment - writer.append_header(&header, td, &header_hash)?; + writer.append_header(header, td, header_hash)?; } info!(target: "sync::stages::headers", total = total_headers, "Writing headers hash index"); diff --git a/crates/stages/stages/src/stages/merkle.rs b/crates/stages/stages/src/stages/merkle.rs index f85ef565f4bf..9bbd68ed3515 100644 --- a/crates/stages/stages/src/stages/merkle.rs +++ b/crates/stages/stages/src/stages/merkle.rs @@ -7,15 +7,14 @@ use reth_db_api::{ }; use reth_primitives::{BlockNumber, GotExpected, SealedHeader, B256}; use reth_provider::{ - writer::StorageWriter, DatabaseProviderRW, HeaderProvider, ProviderError, - StageCheckpointReader, StageCheckpointWriter, StatsReader, + DatabaseProviderRW, HeaderProvider, ProviderError, StageCheckpointReader, + StageCheckpointWriter, StatsReader, }; use reth_stages_api::{ BlockErrorKind, EntitiesCheckpoint, ExecInput, ExecOutput, MerkleCheckpoint, Stage, StageCheckpoint, StageError, StageId, UnwindInput, UnwindOutput, }; use reth_trie::{IntermediateStateRootState, StateRoot, StateRootProgress, StoredSubNode}; -use reth_trie_db::DatabaseStateRoot; use std::fmt::Debug; use tracing::*; @@ -218,8 +217,7 @@ impl Stage for MerkleStage { })?; match progress { StateRootProgress::Progress(state, hashed_entries_walked, updates) => { - let writer = StorageWriter::new(Some(provider), None); - writer.write_trie_updates(&updates)?; + updates.write_to_database(tx)?; let checkpoint = MerkleCheckpoint::new( to_block, @@ -239,8 +237,7 @@ impl Stage for MerkleStage { }) } StateRootProgress::Complete(root, hashed_entries_walked, updates) => { - let writer = StorageWriter::new(Some(provider), None); - writer.write_trie_updates(&updates)?; + updates.write_to_database(tx)?; entities_checkpoint.processed += hashed_entries_walked as u64; @@ -255,8 +252,7 @@ impl Stage for MerkleStage { error!(target: "sync::stages::merkle", %e, ?current_block_number, ?to_block, "Incremental state root failed! {INVALID_STATE_ROOT_ERROR_MESSAGE}"); StageError::Fatal(Box::new(e)) })?; - let writer = StorageWriter::new(Some(provider), None); - writer.write_trie_updates(&updates)?; + updates.write_to_database(provider.tx_ref())?; let total_hashed_entries = (provider.count_entries::()? + provider.count_entries::()?) @@ -329,8 +325,7 @@ impl Stage for MerkleStage { validate_state_root(block_root, target.seal_slow(), input.unwind_to)?; // Validation passed, apply unwind changes to the database. - let writer = StorageWriter::new(Some(provider), None); - writer.write_trie_updates(&updates)?; + updates.write_to_database(provider.tx_ref())?; // TODO(alexey): update entities checkpoint } else { @@ -567,7 +562,7 @@ mod tests { } let storage = storage_entries .into_iter() - .filter(|v| !v.value.is_zero()) + .filter(|v| v.value != U256::ZERO) .map(|v| (v.key, v.value)) .collect::>(); accounts.insert(key, (account, storage)); @@ -585,7 +580,7 @@ mod tests { let hash = last_header.hash_slow(); writer.prune_headers(1).unwrap(); writer.commit().unwrap(); - writer.append_header(&last_header, U256::ZERO, &hash).unwrap(); + writer.append_header(last_header, U256::ZERO, hash).unwrap(); writer.commit().unwrap(); Ok(blocks) @@ -641,7 +636,7 @@ mod tests { storage_cursor.delete_current().unwrap(); } - if !value.is_zero() { + if value != U256::ZERO { let storage_entry = StorageEntry { key: hashed_slot, value }; storage_cursor.upsert(hashed_address, storage_entry).unwrap(); } diff --git a/crates/stages/stages/src/stages/utils.rs b/crates/stages/stages/src/stages/utils.rs index 15e88a284011..3b623c358e55 100644 --- a/crates/stages/stages/src/stages/utils.rs +++ b/crates/stages/stages/src/stages/utils.rs @@ -186,7 +186,7 @@ where Ok(()) } -/// Shard and insert the indices list according to [`LoadMode`] and its length. +/// Shard and insert the indice list according to [`LoadMode`] and its length. pub(crate) fn load_indices( cursor: &mut C, partial_key: P, diff --git a/crates/stages/stages/src/test_utils/test_db.rs b/crates/stages/stages/src/test_utils/test_db.rs index 0ee61355e64c..8f72b5aab225 100644 --- a/crates/stages/stages/src/test_utils/test_db.rs +++ b/crates/stages/stages/src/test_utils/test_db.rs @@ -156,11 +156,11 @@ impl TestStageDB { for block_number in 0..header.number { let mut prev = header.clone().unseal(); prev.number = block_number; - writer.append_header(&prev, U256::ZERO, &B256::ZERO)?; + writer.append_header(prev, U256::ZERO, B256::ZERO)?; } } - writer.append_header(header.header(), td, &header.hash())?; + writer.append_header(header.header().clone(), td, header.hash())?; } else { tx.put::(header.number, header.hash())?; tx.put::(header.number, td.into())?; @@ -266,7 +266,7 @@ impl TestStageDB { let res = block.body.iter().try_for_each(|body_tx| { if let Some(txs_writer) = &mut txs_writer { - txs_writer.append_transaction(next_tx_num, &body_tx.clone().into())?; + txs_writer.append_transaction(next_tx_num, body_tx.clone().into())?; } else { tx.put::(next_tx_num, body_tx.clone().into())? } @@ -386,7 +386,7 @@ impl TestStageDB { tx.put::(hashed_address, account)?; // Insert into storage tables. - storage.into_iter().filter(|e| !e.value.is_zero()).try_for_each(|entry| { + storage.into_iter().filter(|e| e.value != U256::ZERO).try_for_each(|entry| { let hashed_entry = StorageEntry { key: keccak256(entry.key), ..entry }; let mut cursor = tx.cursor_dup_write::()?; diff --git a/crates/stages/types/Cargo.toml b/crates/stages/types/Cargo.toml index 54b14b335cb9..76bb9f4292c2 100644 --- a/crates/stages/types/Cargo.toml +++ b/crates/stages/types/Cargo.toml @@ -23,6 +23,7 @@ serde.workspace = true [dev-dependencies] arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true +proptest-derive.workspace = true proptest-arbitrary-interop.workspace = true test-fuzz.workspace = true rand.workspace = true diff --git a/crates/static-file/static-file/src/segments/headers.rs b/crates/static-file/static-file/src/segments/headers.rs index 3212c0cd8894..5824d1d1ac7d 100644 --- a/crates/static-file/static-file/src/segments/headers.rs +++ b/crates/static-file/static-file/src/segments/headers.rs @@ -50,7 +50,7 @@ impl Segment for Headers { debug_assert_eq!(header_td_block, canonical_header_block); let _static_file_block = - static_file_writer.append_header(&header, header_td.0, &canonical_header)?; + static_file_writer.append_header(header, header_td.0, canonical_header)?; debug_assert_eq!(_static_file_block, header_block); } diff --git a/crates/static-file/static-file/src/segments/transactions.rs b/crates/static-file/static-file/src/segments/transactions.rs index 19b6aeb579a8..4361f8ca661e 100644 --- a/crates/static-file/static-file/src/segments/transactions.rs +++ b/crates/static-file/static-file/src/segments/transactions.rs @@ -47,7 +47,7 @@ impl Segment for Transactions { for entry in transactions_walker { let (tx_number, transaction) = entry?; - static_file_writer.append_transaction(tx_number, &transaction)?; + static_file_writer.append_transaction(tx_number, transaction)?; } } diff --git a/crates/storage/codecs/Cargo.toml b/crates/storage/codecs/Cargo.toml index dea9972816a8..4789ff6e1971 100644 --- a/crates/storage/codecs/Cargo.toml +++ b/crates/storage/codecs/Cargo.toml @@ -38,6 +38,7 @@ serde_json.workspace = true arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true +proptest-derive.workspace = true proptest-arbitrary-interop.workspace = true [features] diff --git a/crates/storage/db-api/Cargo.toml b/crates/storage/db-api/Cargo.toml index 2dd7b8713ca4..7286e03f2da4 100644 --- a/crates/storage/db-api/Cargo.toml +++ b/crates/storage/db-api/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] # reth reth-codecs.workspace = true -reth-primitives = { workspace = true, features = ["reth-codec"] } +reth-primitives.workspace = true reth-primitives-traits.workspace = true reth-prune-types.workspace = true reth-storage-errors.workspace = true @@ -58,6 +58,7 @@ iai-callgrind.workspace = true arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-arbitrary-interop.workspace = true +proptest-derive.workspace = true paste.workspace = true diff --git a/crates/storage/db-common/Cargo.toml b/crates/storage/db-common/Cargo.toml index 5c453df1cf67..d80236defd32 100644 --- a/crates/storage/db-common/Cargo.toml +++ b/crates/storage/db-common/Cargo.toml @@ -16,7 +16,6 @@ reth-db-api.workspace = true reth-provider.workspace = true reth-config.workspace = true reth-trie.workspace = true -reth-trie-db.workspace = true reth-etl.workspace = true reth-codecs.workspace = true reth-stages-types.workspace = true diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index 63a1760ea5c0..bbaf61cf3e64 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -11,17 +11,15 @@ use reth_primitives::{ Account, Address, Bytecode, Receipts, StaticFileSegment, StorageEntry, B256, U256, }; use reth_provider::{ + bundle_state::{BundleStateInit, RevertsInit}, errors::provider::ProviderResult, providers::{StaticFileProvider, StaticFileWriter}, - writer::StorageWriter, - BlockHashReader, BlockNumReader, BundleStateInit, ChainSpecProvider, DatabaseProviderRW, - ExecutionOutcome, HashingWriter, HistoryWriter, OriginalValuesKnown, ProviderError, - ProviderFactory, RevertsInit, StageCheckpointWriter, StateWriter, StaticFileProviderFactory, - TrieWriter, + BlockHashReader, BlockNumReader, ChainSpecProvider, DatabaseProviderRW, ExecutionOutcome, + HashingWriter, HistoryWriter, OriginalValuesKnown, ProviderError, ProviderFactory, + StageCheckpointWriter, StateWriter, StaticFileProviderFactory, }; use reth_stages_types::{StageCheckpoint, StageId}; use reth_trie::{IntermediateStateRootState, StateRoot as StateRootComputer, StateRootProgress}; -use reth_trie_db::DatabaseStateRoot; use serde::{Deserialize, Serialize}; use std::{ collections::{BTreeMap, HashMap}, @@ -66,7 +64,7 @@ pub enum InitDatabaseError { #[error( "state root mismatch, state dump: {expected_state_root}, computed: {computed_state_root}" )] - StateRootMismatch { + SateRootMismatch { /// Expected state root. expected_state_root: B256, /// Actual state root. @@ -203,8 +201,7 @@ pub fn insert_state<'a, 'b, DB: Database>( Vec::new(), ); - let mut storage_writer = StorageWriter::new(Some(provider), None); - storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::Yes)?; + execution_outcome.write_to_storage(provider, None, OriginalValuesKnown::Yes)?; trace!(target: "reth::cli", "Inserted state"); @@ -285,7 +282,7 @@ pub fn insert_genesis_header( Ok(None) | Err(ProviderError::MissingStaticFileBlock(StaticFileSegment::Headers, 0)) => { let (difficulty, hash) = (header.difficulty, block_hash); let mut writer = static_file_provider.latest_writer(StaticFileSegment::Headers)?; - writer.append_header(&header, difficulty, &hash)?; + writer.append_header(header, difficulty, hash)?; } Ok(Some(_)) => {} Err(e) => return Err(e), @@ -336,7 +333,7 @@ pub fn init_from_state_dump( "Computed state root does not match state root in state dump" ); - Err(InitDatabaseError::StateRootMismatch { expected_state_root, computed_state_root })? + Err(InitDatabaseError::SateRootMismatch { expected_state_root, computed_state_root })? } else { info!(target: "reth::cli", ?computed_state_root, @@ -464,7 +461,7 @@ fn compute_state_root(provider: &DatabaseProviderRW) -> eyre:: .root_with_progress()? { StateRootProgress::Progress(state, _, updates) => { - let updated_len = provider.write_trie_updates(&updates)?; + let updated_len = updates.write_to_database(tx)?; total_flushed_updates += updated_len; trace!(target: "reth::cli", @@ -484,7 +481,7 @@ fn compute_state_root(provider: &DatabaseProviderRW) -> eyre:: } } StateRootProgress::Complete(root, _, updates) => { - let updated_len = provider.write_trie_updates(&updates)?; + let updated_len = updates.write_to_database(tx)?; total_flushed_updates += updated_len; trace!(target: "reth::cli", diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index 619942f38ae1..117ec5ccc7b6 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] # reth reth-db-api.workspace = true -reth-primitives = { workspace = true, features = ["reth-codec"] } +reth-primitives.workspace = true reth-primitives-traits.workspace = true reth-fs-util.workspace = true reth-storage-errors.workspace = true @@ -35,21 +35,21 @@ eyre = { workspace = true, optional = true } serde = { workspace = true, default-features = false } # metrics -reth-metrics = { workspace = true, optional = true } -metrics = { workspace = true, optional = true } +reth-metrics.workspace = true +metrics.workspace = true # misc bytes.workspace = true -page_size = { version = "0.6.0", optional = true } +page_size = "0.6.0" thiserror.workspace = true tempfile = { workspace = true, optional = true } derive_more.workspace = true paste.workspace = true -rustc-hash = { workspace = true, optional = true } +rustc-hash.workspace = true sysinfo = { version = "0.30", default-features = false } # arbitrary utils -strum = { workspace = true, features = ["derive"], optional = true } +strum = { workspace = true, features = ["derive"] } [dev-dependencies] # reth libs with arbitrary @@ -77,15 +77,7 @@ assert_matches.workspace = true [features] default = ["mdbx"] -mdbx = [ - "dep:reth-libmdbx", - "dep:eyre", - "dep:page_size", - "reth-metrics", - "dep:metrics", - "dep:strum", - "dep:rustc-hash", -] +mdbx = ["dep:reth-libmdbx", "dep:eyre"] test-utils = ["dep:tempfile", "arbitrary"] bench = [] arbitrary = ["reth-primitives/arbitrary", "reth-db-api/arbitrary"] diff --git a/crates/storage/db/src/lib.rs b/crates/storage/db/src/lib.rs index c16f2b73c4e9..e5414b574328 100644 --- a/crates/storage/db/src/lib.rs +++ b/crates/storage/db/src/lib.rs @@ -17,11 +17,9 @@ mod implementation; pub mod lockfile; -#[cfg(feature = "mdbx")] mod metrics; pub mod static_file; pub mod tables; -#[cfg(feature = "mdbx")] mod utils; pub mod version; @@ -30,7 +28,6 @@ pub mod mdbx; pub use reth_storage_errors::db::{DatabaseError, DatabaseWriteOperation}; pub use tables::*; -#[cfg(feature = "mdbx")] pub use utils::is_database_empty; #[cfg(feature = "mdbx")] diff --git a/crates/storage/db/src/tables/mod.rs b/crates/storage/db/src/tables/mod.rs index fb64fa86fcf8..c3c0d0b3f8ae 100644 --- a/crates/storage/db/src/tables/mod.rs +++ b/crates/storage/db/src/tables/mod.rs @@ -16,7 +16,6 @@ pub mod codecs; mod raw; pub use raw::{RawDupSort, RawKey, RawTable, RawValue, TableRawRow}; -#[cfg(feature = "mdbx")] pub(crate) mod utils; use reth_db_api::{ diff --git a/crates/storage/errors/Cargo.toml b/crates/storage/errors/Cargo.toml index 5ef6f15771ef..d8e699f8df40 100644 --- a/crates/storage/errors/Cargo.toml +++ b/crates/storage/errors/Cargo.toml @@ -11,7 +11,6 @@ repository.workspace = true workspace = true [dependencies] -alloy-rlp.workspace = true reth-primitives.workspace = true reth-fs-util.workspace = true diff --git a/crates/storage/errors/src/provider.rs b/crates/storage/errors/src/provider.rs index 0979156ca042..c3d47aa0bd1b 100644 --- a/crates/storage/errors/src/provider.rs +++ b/crates/storage/errors/src/provider.rs @@ -21,9 +21,6 @@ pub enum ProviderError { /// Database error. #[error(transparent)] Database(#[from] crate::db::DatabaseError), - /// RLP error. - #[error(transparent)] - Rlp(#[from] alloy_rlp::Error), /// Filesystem path error. #[error("{0}")] FsPathError(String), diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 48058084e7af..bca77d0c5486 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -16,7 +16,7 @@ workspace = true reth-chainspec.workspace = true reth-blockchain-tree-api.workspace = true reth-execution-types.workspace = true -reth-primitives = { workspace = true, features = ["reth-codec"] } +reth-primitives.workspace = true reth-fs-util.workspace = true reth-errors.workspace = true reth-storage-errors.workspace = true @@ -27,11 +27,9 @@ reth-db-api.workspace = true reth-prune-types.workspace = true reth-stages-types.workspace = true reth-trie = { workspace = true, features = ["metrics"] } -reth-trie-db = { workspace = true, features = ["metrics"] } reth-nippy-jar.workspace = true reth-codecs.workspace = true reth-evm.workspace = true -reth-chain-state.workspace = true # ethereum alloy-rpc-types-engine.workspace = true @@ -39,6 +37,7 @@ revm.workspace = true # async tokio = { workspace = true, features = ["sync", "macros", "rt-multi-thread"] } +tokio-stream = { workspace = true, features = ["sync"] } # tracing tracing.workspace = true @@ -49,7 +48,9 @@ metrics.workspace = true # misc auto_impl.workspace = true +derive_more.workspace = true itertools.workspace = true +pin-project.workspace = true parking_lot.workspace = true dashmap = { workspace = true, features = ["inline"] } strum.workspace = true @@ -75,4 +76,4 @@ rand.workspace = true [features] optimism = ["reth-primitives/optimism", "reth-execution-types/optimism"] serde = ["reth-execution-types/serde"] -test-utils = ["alloy-rlp", "reth-db/test-utils", "reth-nippy-jar/test-utils", "reth-trie/test-utils", "reth-chain-state/test-utils", "reth-db/test-utils"] +test-utils = ["alloy-rlp", "reth-db/test-utils", "reth-nippy-jar/test-utils"] diff --git a/crates/storage/provider/src/bundle_state/execution_outcome.rs b/crates/storage/provider/src/bundle_state/execution_outcome.rs new file mode 100644 index 000000000000..ebb69201ea89 --- /dev/null +++ b/crates/storage/provider/src/bundle_state/execution_outcome.rs @@ -0,0 +1,1036 @@ +use crate::{ + providers::StaticFileProviderRWRefMut, writer::StorageWriter, DatabaseProviderRW, StateChanges, + StateReverts, StateWriter, +}; +use reth_db::Database; +pub use reth_execution_types::*; +use reth_storage_errors::provider::ProviderResult; +pub use revm::db::states::OriginalValuesKnown; + +impl StateWriter for ExecutionOutcome { + fn write_to_storage( + self, + provider_rw: &DatabaseProviderRW, + static_file_producer: Option>, + is_value_known: OriginalValuesKnown, + ) -> ProviderResult<()> + where + DB: Database, + { + let (plain_state, reverts) = self.bundle.into_plain_state_and_reverts(is_value_known); + + StateReverts(reverts).write_to_db(provider_rw, self.first_block)?; + + StorageWriter::new(Some(provider_rw), static_file_producer) + .append_receipts_from_blocks(self.first_block, self.receipts.into_iter())?; + + StateChanges(plain_state).write_to_db(provider_rw)?; + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{test_utils::create_test_provider_factory, AccountReader}; + use reth_db::{tables, test_utils::create_test_rw_db}; + use reth_db_api::{ + cursor::{DbCursorRO, DbDupCursorRO}, + database::Database, + models::{AccountBeforeTx, BlockNumberAddress}, + transaction::{DbTx, DbTxMut}, + }; + use reth_primitives::{ + keccak256, Account, Address, Receipt, Receipts, StorageEntry, B256, U256, + }; + use reth_trie::{test_utils::state_root, StateRoot}; + use revm::{ + db::{ + states::{ + bundle_state::BundleRetention, changes::PlainStorageRevert, PlainStorageChangeset, + }, + BundleState, EmptyDB, + }, + primitives::{ + Account as RevmAccount, AccountInfo as RevmAccountInfo, AccountStatus, EvmStorageSlot, + }, + DatabaseCommit, State, + }; + use std::collections::{BTreeMap, HashMap}; + + #[test] + fn write_to_db_account_info() { + let factory = create_test_provider_factory(); + let provider = factory.provider_rw().unwrap(); + + let address_a = Address::ZERO; + let address_b = Address::repeat_byte(0xff); + + let account_a = RevmAccountInfo { balance: U256::from(1), nonce: 1, ..Default::default() }; + let account_b = RevmAccountInfo { balance: U256::from(2), nonce: 2, ..Default::default() }; + let account_b_changed = + RevmAccountInfo { balance: U256::from(3), nonce: 3, ..Default::default() }; + + let mut state = State::builder().with_bundle_update().build(); + state.insert_not_existing(address_a); + state.insert_account(address_b, account_b.clone()); + + // 0x00.. is created + state.commit(HashMap::from([( + address_a, + RevmAccount { + info: account_a.clone(), + status: AccountStatus::Touched | AccountStatus::Created, + storage: HashMap::default(), + }, + )])); + + // 0xff.. is changed (balance + 1, nonce + 1) + state.commit(HashMap::from([( + address_b, + RevmAccount { + info: account_b_changed.clone(), + status: AccountStatus::Touched, + storage: HashMap::default(), + }, + )])); + + state.merge_transitions(BundleRetention::Reverts); + let mut revm_bundle_state = state.take_bundle(); + + // Write plain state and reverts separately. + let reverts = revm_bundle_state.take_all_reverts().into_plain_state_reverts(); + let plain_state = revm_bundle_state.into_plain_state(OriginalValuesKnown::Yes); + assert!(plain_state.storage.is_empty()); + assert!(plain_state.contracts.is_empty()); + StateChanges(plain_state) + .write_to_db(&provider) + .expect("Could not write plain state to DB"); + + assert_eq!(reverts.storage, [[]]); + StateReverts(reverts).write_to_db(&provider, 1).expect("Could not write reverts to DB"); + + let reth_account_a = account_a.into(); + let reth_account_b = account_b.into(); + let reth_account_b_changed = account_b_changed.clone().into(); + + // Check plain state + assert_eq!( + provider.basic_account(address_a).expect("Could not read account state"), + Some(reth_account_a), + "Account A state is wrong" + ); + assert_eq!( + provider.basic_account(address_b).expect("Could not read account state"), + Some(reth_account_b_changed), + "Account B state is wrong" + ); + + // Check change set + let mut changeset_cursor = provider + .tx_ref() + .cursor_dup_read::() + .expect("Could not open changeset cursor"); + assert_eq!( + changeset_cursor.seek_exact(1).expect("Could not read account change set"), + Some((1, AccountBeforeTx { address: address_a, info: None })), + "Account A changeset is wrong" + ); + assert_eq!( + changeset_cursor.next_dup().expect("Changeset table is malformed"), + Some((1, AccountBeforeTx { address: address_b, info: Some(reth_account_b) })), + "Account B changeset is wrong" + ); + + let mut state = State::builder().with_bundle_update().build(); + state.insert_account(address_b, account_b_changed.clone()); + + // 0xff.. is destroyed + state.commit(HashMap::from([( + address_b, + RevmAccount { + status: AccountStatus::Touched | AccountStatus::SelfDestructed, + info: account_b_changed, + storage: HashMap::default(), + }, + )])); + + state.merge_transitions(BundleRetention::Reverts); + let mut revm_bundle_state = state.take_bundle(); + + // Write plain state and reverts separately. + let reverts = revm_bundle_state.take_all_reverts().into_plain_state_reverts(); + let plain_state = revm_bundle_state.into_plain_state(OriginalValuesKnown::Yes); + // Account B selfdestructed so flag for it should be present. + assert_eq!( + plain_state.storage, + [PlainStorageChangeset { address: address_b, wipe_storage: true, storage: vec![] }] + ); + assert!(plain_state.contracts.is_empty()); + StateChanges(plain_state) + .write_to_db(&provider) + .expect("Could not write plain state to DB"); + + assert_eq!( + reverts.storage, + [[PlainStorageRevert { address: address_b, wiped: true, storage_revert: vec![] }]] + ); + StateReverts(reverts).write_to_db(&provider, 2).expect("Could not write reverts to DB"); + + // Check new plain state for account B + assert_eq!( + provider.basic_account(address_b).expect("Could not read account state"), + None, + "Account B should be deleted" + ); + + // Check change set + assert_eq!( + changeset_cursor.seek_exact(2).expect("Could not read account change set"), + Some((2, AccountBeforeTx { address: address_b, info: Some(reth_account_b_changed) })), + "Account B changeset is wrong after deletion" + ); + } + + #[test] + fn write_to_db_storage() { + let factory = create_test_provider_factory(); + let provider = factory.provider_rw().unwrap(); + + let address_a = Address::ZERO; + let address_b = Address::repeat_byte(0xff); + + let account_b = RevmAccountInfo { balance: U256::from(2), nonce: 2, ..Default::default() }; + + let mut state = State::builder().with_bundle_update().build(); + state.insert_not_existing(address_a); + state.insert_account_with_storage( + address_b, + account_b.clone(), + HashMap::from([(U256::from(1), U256::from(1))]), + ); + + state.commit(HashMap::from([ + ( + address_a, + RevmAccount { + status: AccountStatus::Touched | AccountStatus::Created, + info: RevmAccountInfo::default(), + // 0x00 => 0 => 1 + // 0x01 => 0 => 2 + storage: HashMap::from([ + ( + U256::from(0), + EvmStorageSlot { present_value: U256::from(1), ..Default::default() }, + ), + ( + U256::from(1), + EvmStorageSlot { present_value: U256::from(2), ..Default::default() }, + ), + ]), + }, + ), + ( + address_b, + RevmAccount { + status: AccountStatus::Touched, + info: account_b, + // 0x01 => 1 => 2 + storage: HashMap::from([( + U256::from(1), + EvmStorageSlot { + present_value: U256::from(2), + original_value: U256::from(1), + ..Default::default() + }, + )]), + }, + ), + ])); + + state.merge_transitions(BundleRetention::Reverts); + + ExecutionOutcome::new(state.take_bundle(), Receipts::default(), 1, Vec::new()) + .write_to_storage(&provider, None, OriginalValuesKnown::Yes) + .expect("Could not write bundle state to DB"); + + // Check plain storage state + let mut storage_cursor = provider + .tx_ref() + .cursor_dup_read::() + .expect("Could not open plain storage state cursor"); + + assert_eq!( + storage_cursor.seek_exact(address_a).unwrap(), + Some((address_a, StorageEntry { key: B256::ZERO, value: U256::from(1) })), + "Slot 0 for account A should be 1" + ); + assert_eq!( + storage_cursor.next_dup().unwrap(), + Some(( + address_a, + StorageEntry { key: B256::from(U256::from(1).to_be_bytes()), value: U256::from(2) } + )), + "Slot 1 for account A should be 2" + ); + assert_eq!( + storage_cursor.next_dup().unwrap(), + None, + "Account A should only have 2 storage slots" + ); + + assert_eq!( + storage_cursor.seek_exact(address_b).unwrap(), + Some(( + address_b, + StorageEntry { key: B256::from(U256::from(1).to_be_bytes()), value: U256::from(2) } + )), + "Slot 1 for account B should be 2" + ); + assert_eq!( + storage_cursor.next_dup().unwrap(), + None, + "Account B should only have 1 storage slot" + ); + + // Check change set + let mut changeset_cursor = provider + .tx_ref() + .cursor_dup_read::() + .expect("Could not open storage changeset cursor"); + assert_eq!( + changeset_cursor.seek_exact(BlockNumberAddress((1, address_a))).unwrap(), + Some(( + BlockNumberAddress((1, address_a)), + StorageEntry { key: B256::ZERO, value: U256::from(0) } + )), + "Slot 0 for account A should have changed from 0" + ); + assert_eq!( + changeset_cursor.next_dup().unwrap(), + Some(( + BlockNumberAddress((1, address_a)), + StorageEntry { key: B256::from(U256::from(1).to_be_bytes()), value: U256::from(0) } + )), + "Slot 1 for account A should have changed from 0" + ); + assert_eq!( + changeset_cursor.next_dup().unwrap(), + None, + "Account A should only be in the changeset 2 times" + ); + + assert_eq!( + changeset_cursor.seek_exact(BlockNumberAddress((1, address_b))).unwrap(), + Some(( + BlockNumberAddress((1, address_b)), + StorageEntry { key: B256::from(U256::from(1).to_be_bytes()), value: U256::from(1) } + )), + "Slot 1 for account B should have changed from 1" + ); + assert_eq!( + changeset_cursor.next_dup().unwrap(), + None, + "Account B should only be in the changeset 1 time" + ); + + // Delete account A + let mut state = State::builder().with_bundle_update().build(); + state.insert_account(address_a, RevmAccountInfo::default()); + + state.commit(HashMap::from([( + address_a, + RevmAccount { + status: AccountStatus::Touched | AccountStatus::SelfDestructed, + info: RevmAccountInfo::default(), + storage: HashMap::default(), + }, + )])); + + state.merge_transitions(BundleRetention::Reverts); + ExecutionOutcome::new(state.take_bundle(), Receipts::default(), 2, Vec::new()) + .write_to_storage(&provider, None, OriginalValuesKnown::Yes) + .expect("Could not write bundle state to DB"); + + assert_eq!( + storage_cursor.seek_exact(address_a).unwrap(), + None, + "Account A should have no storage slots after deletion" + ); + + assert_eq!( + changeset_cursor.seek_exact(BlockNumberAddress((2, address_a))).unwrap(), + Some(( + BlockNumberAddress((2, address_a)), + StorageEntry { key: B256::ZERO, value: U256::from(1) } + )), + "Slot 0 for account A should have changed from 1 on deletion" + ); + assert_eq!( + changeset_cursor.next_dup().unwrap(), + Some(( + BlockNumberAddress((2, address_a)), + StorageEntry { key: B256::from(U256::from(1).to_be_bytes()), value: U256::from(2) } + )), + "Slot 1 for account A should have changed from 2 on deletion" + ); + assert_eq!( + changeset_cursor.next_dup().unwrap(), + None, + "Account A should only be in the changeset 2 times on deletion" + ); + } + + #[test] + fn write_to_db_multiple_selfdestructs() { + let factory = create_test_provider_factory(); + let provider = factory.provider_rw().unwrap(); + + let address1 = Address::random(); + let account_info = RevmAccountInfo { nonce: 1, ..Default::default() }; + + // Block #0: initial state. + let mut init_state = State::builder().with_bundle_update().build(); + init_state.insert_not_existing(address1); + init_state.commit(HashMap::from([( + address1, + RevmAccount { + info: account_info.clone(), + status: AccountStatus::Touched | AccountStatus::Created, + // 0x00 => 0 => 1 + // 0x01 => 0 => 2 + storage: HashMap::from([ + ( + U256::ZERO, + EvmStorageSlot { present_value: U256::from(1), ..Default::default() }, + ), + ( + U256::from(1), + EvmStorageSlot { present_value: U256::from(2), ..Default::default() }, + ), + ]), + }, + )])); + init_state.merge_transitions(BundleRetention::Reverts); + ExecutionOutcome::new(init_state.take_bundle(), Receipts::default(), 0, Vec::new()) + .write_to_storage(&provider, None, OriginalValuesKnown::Yes) + .expect("Could not write init bundle state to DB"); + + let mut state = State::builder().with_bundle_update().build(); + state.insert_account_with_storage( + address1, + account_info.clone(), + HashMap::from([(U256::ZERO, U256::from(1)), (U256::from(1), U256::from(2))]), + ); + + // Block #1: change storage. + state.commit(HashMap::from([( + address1, + RevmAccount { + status: AccountStatus::Touched, + info: account_info.clone(), + // 0x00 => 1 => 2 + storage: HashMap::from([( + U256::ZERO, + EvmStorageSlot { + original_value: U256::from(1), + present_value: U256::from(2), + ..Default::default() + }, + )]), + }, + )])); + state.merge_transitions(BundleRetention::Reverts); + + // Block #2: destroy account. + state.commit(HashMap::from([( + address1, + RevmAccount { + status: AccountStatus::Touched | AccountStatus::SelfDestructed, + info: account_info.clone(), + storage: HashMap::default(), + }, + )])); + state.merge_transitions(BundleRetention::Reverts); + + // Block #3: re-create account and change storage. + state.commit(HashMap::from([( + address1, + RevmAccount { + status: AccountStatus::Touched | AccountStatus::Created, + info: account_info.clone(), + storage: HashMap::default(), + }, + )])); + state.merge_transitions(BundleRetention::Reverts); + + // Block #4: change storage. + state.commit(HashMap::from([( + address1, + RevmAccount { + status: AccountStatus::Touched, + info: account_info.clone(), + // 0x00 => 0 => 2 + // 0x02 => 0 => 4 + // 0x06 => 0 => 6 + storage: HashMap::from([ + ( + U256::ZERO, + EvmStorageSlot { present_value: U256::from(2), ..Default::default() }, + ), + ( + U256::from(2), + EvmStorageSlot { present_value: U256::from(4), ..Default::default() }, + ), + ( + U256::from(6), + EvmStorageSlot { present_value: U256::from(6), ..Default::default() }, + ), + ]), + }, + )])); + state.merge_transitions(BundleRetention::Reverts); + + // Block #5: Destroy account again. + state.commit(HashMap::from([( + address1, + RevmAccount { + status: AccountStatus::Touched | AccountStatus::SelfDestructed, + info: account_info.clone(), + storage: HashMap::default(), + }, + )])); + state.merge_transitions(BundleRetention::Reverts); + + // Block #6: Create, change, destroy and re-create in the same block. + state.commit(HashMap::from([( + address1, + RevmAccount { + status: AccountStatus::Touched | AccountStatus::Created, + info: account_info.clone(), + storage: HashMap::default(), + }, + )])); + state.commit(HashMap::from([( + address1, + RevmAccount { + status: AccountStatus::Touched, + info: account_info.clone(), + // 0x00 => 0 => 2 + storage: HashMap::from([( + U256::ZERO, + EvmStorageSlot { present_value: U256::from(2), ..Default::default() }, + )]), + }, + )])); + state.commit(HashMap::from([( + address1, + RevmAccount { + status: AccountStatus::Touched | AccountStatus::SelfDestructed, + info: account_info.clone(), + storage: HashMap::default(), + }, + )])); + state.commit(HashMap::from([( + address1, + RevmAccount { + status: AccountStatus::Touched | AccountStatus::Created, + info: account_info.clone(), + storage: HashMap::default(), + }, + )])); + state.merge_transitions(BundleRetention::Reverts); + + // Block #7: Change storage. + state.commit(HashMap::from([( + address1, + RevmAccount { + status: AccountStatus::Touched, + info: account_info, + // 0x00 => 0 => 9 + storage: HashMap::from([( + U256::ZERO, + EvmStorageSlot { present_value: U256::from(9), ..Default::default() }, + )]), + }, + )])); + state.merge_transitions(BundleRetention::Reverts); + + let bundle = state.take_bundle(); + + ExecutionOutcome::new(bundle, Receipts::default(), 1, Vec::new()) + .write_to_storage(&provider, None, OriginalValuesKnown::Yes) + .expect("Could not write bundle state to DB"); + + let mut storage_changeset_cursor = provider + .tx_ref() + .cursor_dup_read::() + .expect("Could not open plain storage state cursor"); + let mut storage_changes = storage_changeset_cursor.walk_range(..).unwrap(); + + // Iterate through all storage changes + + // Block + // : + // ... + + // Block #0 + // 0x00: 0 + // 0x01: 0 + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((0, address1)), + StorageEntry { key: B256::with_last_byte(0), value: U256::ZERO } + ))) + ); + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((0, address1)), + StorageEntry { key: B256::with_last_byte(1), value: U256::ZERO } + ))) + ); + + // Block #1 + // 0x00: 1 + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((1, address1)), + StorageEntry { key: B256::with_last_byte(0), value: U256::from(1) } + ))) + ); + + // Block #2 (destroyed) + // 0x00: 2 + // 0x01: 2 + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((2, address1)), + StorageEntry { key: B256::with_last_byte(0), value: U256::from(2) } + ))) + ); + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((2, address1)), + StorageEntry { key: B256::with_last_byte(1), value: U256::from(2) } + ))) + ); + + // Block #3 + // no storage changes + + // Block #4 + // 0x00: 0 + // 0x02: 0 + // 0x06: 0 + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((4, address1)), + StorageEntry { key: B256::with_last_byte(0), value: U256::ZERO } + ))) + ); + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((4, address1)), + StorageEntry { key: B256::with_last_byte(2), value: U256::ZERO } + ))) + ); + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((4, address1)), + StorageEntry { key: B256::with_last_byte(6), value: U256::ZERO } + ))) + ); + + // Block #5 (destroyed) + // 0x00: 2 + // 0x02: 4 + // 0x06: 6 + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((5, address1)), + StorageEntry { key: B256::with_last_byte(0), value: U256::from(2) } + ))) + ); + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((5, address1)), + StorageEntry { key: B256::with_last_byte(2), value: U256::from(4) } + ))) + ); + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((5, address1)), + StorageEntry { key: B256::with_last_byte(6), value: U256::from(6) } + ))) + ); + + // Block #6 + // no storage changes (only inter block changes) + + // Block #7 + // 0x00: 0 + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((7, address1)), + StorageEntry { key: B256::with_last_byte(0), value: U256::ZERO } + ))) + ); + assert_eq!(storage_changes.next(), None); + } + + #[test] + fn storage_change_after_selfdestruct_within_block() { + let factory = create_test_provider_factory(); + let provider = factory.provider_rw().unwrap(); + + let address1 = Address::random(); + let account1 = RevmAccountInfo { nonce: 1, ..Default::default() }; + + // Block #0: initial state. + let mut init_state = State::builder().with_bundle_update().build(); + init_state.insert_not_existing(address1); + init_state.commit(HashMap::from([( + address1, + RevmAccount { + info: account1.clone(), + status: AccountStatus::Touched | AccountStatus::Created, + // 0x00 => 0 => 1 + // 0x01 => 0 => 2 + storage: HashMap::from([ + ( + U256::ZERO, + EvmStorageSlot { present_value: U256::from(1), ..Default::default() }, + ), + ( + U256::from(1), + EvmStorageSlot { present_value: U256::from(2), ..Default::default() }, + ), + ]), + }, + )])); + init_state.merge_transitions(BundleRetention::Reverts); + ExecutionOutcome::new(init_state.take_bundle(), Receipts::default(), 0, Vec::new()) + .write_to_storage(&provider, None, OriginalValuesKnown::Yes) + .expect("Could not write init bundle state to DB"); + + let mut state = State::builder().with_bundle_update().build(); + state.insert_account_with_storage( + address1, + account1.clone(), + HashMap::from([(U256::ZERO, U256::from(1)), (U256::from(1), U256::from(2))]), + ); + + // Block #1: Destroy, re-create, change storage. + state.commit(HashMap::from([( + address1, + RevmAccount { + status: AccountStatus::Touched | AccountStatus::SelfDestructed, + info: account1.clone(), + storage: HashMap::default(), + }, + )])); + + state.commit(HashMap::from([( + address1, + RevmAccount { + status: AccountStatus::Touched | AccountStatus::Created, + info: account1.clone(), + storage: HashMap::default(), + }, + )])); + + state.commit(HashMap::from([( + address1, + RevmAccount { + status: AccountStatus::Touched, + info: account1, + // 0x01 => 0 => 5 + storage: HashMap::from([( + U256::from(1), + EvmStorageSlot { present_value: U256::from(5), ..Default::default() }, + )]), + }, + )])); + + // Commit block #1 changes to the database. + state.merge_transitions(BundleRetention::Reverts); + ExecutionOutcome::new(state.take_bundle(), Receipts::default(), 1, Vec::new()) + .write_to_storage(&provider, None, OriginalValuesKnown::Yes) + .expect("Could not write bundle state to DB"); + + let mut storage_changeset_cursor = provider + .tx_ref() + .cursor_dup_read::() + .expect("Could not open plain storage state cursor"); + let range = BlockNumberAddress::range(1..=1); + let mut storage_changes = storage_changeset_cursor.walk_range(range).unwrap(); + + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((1, address1)), + StorageEntry { key: B256::with_last_byte(0), value: U256::from(1) } + ))) + ); + assert_eq!( + storage_changes.next(), + Some(Ok(( + BlockNumberAddress((1, address1)), + StorageEntry { key: B256::with_last_byte(1), value: U256::from(2) } + ))) + ); + assert_eq!(storage_changes.next(), None); + } + + #[test] + fn revert_to_indices() { + let base = ExecutionOutcome { + bundle: BundleState::default(), + receipts: vec![vec![Some(Receipt::default()); 2]; 7].into(), + first_block: 10, + requests: Vec::new(), + }; + + let mut this = base.clone(); + assert!(this.revert_to(10)); + assert_eq!(this.receipts.len(), 1); + + let mut this = base.clone(); + assert!(!this.revert_to(9)); + assert_eq!(this.receipts.len(), 7); + + let mut this = base.clone(); + assert!(this.revert_to(15)); + assert_eq!(this.receipts.len(), 6); + + let mut this = base.clone(); + assert!(this.revert_to(16)); + assert_eq!(this.receipts.len(), 7); + + let mut this = base; + assert!(!this.revert_to(17)); + assert_eq!(this.receipts.len(), 7); + } + + #[test] + fn bundle_state_state_root() { + type PreState = BTreeMap)>; + let mut prestate: PreState = (0..10) + .map(|key| { + let account = Account { nonce: 1, balance: U256::from(key), bytecode_hash: None }; + let storage = + (1..11).map(|key| (B256::with_last_byte(key), U256::from(key))).collect(); + (Address::with_last_byte(key), (account, storage)) + }) + .collect(); + + let db = create_test_rw_db(); + + // insert initial state to the database + db.update(|tx| { + for (address, (account, storage)) in &prestate { + let hashed_address = keccak256(address); + tx.put::(hashed_address, *account).unwrap(); + for (slot, value) in storage { + tx.put::( + hashed_address, + StorageEntry { key: keccak256(slot), value: *value }, + ) + .unwrap(); + } + } + + let (_, updates) = StateRoot::from_tx(tx).root_with_updates().unwrap(); + updates.write_to_database(tx).unwrap(); + }) + .unwrap(); + + let tx = db.tx().unwrap(); + let mut state = State::builder().with_bundle_update().build(); + + let assert_state_root = |state: &State, expected: &PreState, msg| { + assert_eq!( + ExecutionOutcome::new( + state.bundle_state.clone(), + Receipts::default(), + 0, + Vec::new() + ) + .hash_state_slow() + .state_root(&tx) + .unwrap(), + state_root(expected.clone().into_iter().map(|(address, (account, storage))| ( + address, + (account, storage.into_iter()) + ))), + "{msg}" + ); + }; + + // database only state root is correct + assert_state_root(&state, &prestate, "empty"); + + // destroy account 1 + let address1 = Address::with_last_byte(1); + let account1_old = prestate.remove(&address1).unwrap(); + state.insert_account(address1, account1_old.0.into()); + state.commit(HashMap::from([( + address1, + RevmAccount { + status: AccountStatus::Touched | AccountStatus::SelfDestructed, + info: RevmAccountInfo::default(), + storage: HashMap::default(), + }, + )])); + state.merge_transitions(BundleRetention::PlainState); + assert_state_root(&state, &prestate, "destroyed account"); + + // change slot 2 in account 2 + let address2 = Address::with_last_byte(2); + let slot2 = U256::from(2); + let slot2_key = B256::from(slot2); + let account2 = prestate.get_mut(&address2).unwrap(); + let account2_slot2_old_value = *account2.1.get(&slot2_key).unwrap(); + state.insert_account_with_storage( + address2, + account2.0.into(), + HashMap::from([(slot2, account2_slot2_old_value)]), + ); + + let account2_slot2_new_value = U256::from(100); + account2.1.insert(slot2_key, account2_slot2_new_value); + state.commit(HashMap::from([( + address2, + RevmAccount { + status: AccountStatus::Touched, + info: account2.0.into(), + storage: HashMap::from_iter([( + slot2, + EvmStorageSlot::new_changed(account2_slot2_old_value, account2_slot2_new_value), + )]), + }, + )])); + state.merge_transitions(BundleRetention::PlainState); + assert_state_root(&state, &prestate, "changed storage"); + + // change balance of account 3 + let address3 = Address::with_last_byte(3); + let account3 = prestate.get_mut(&address3).unwrap(); + state.insert_account(address3, account3.0.into()); + + account3.0.balance = U256::from(24); + state.commit(HashMap::from([( + address3, + RevmAccount { + status: AccountStatus::Touched, + info: account3.0.into(), + storage: HashMap::default(), + }, + )])); + state.merge_transitions(BundleRetention::PlainState); + assert_state_root(&state, &prestate, "changed balance"); + + // change nonce of account 4 + let address4 = Address::with_last_byte(4); + let account4 = prestate.get_mut(&address4).unwrap(); + state.insert_account(address4, account4.0.into()); + + account4.0.nonce = 128; + state.commit(HashMap::from([( + address4, + RevmAccount { + status: AccountStatus::Touched, + info: account4.0.into(), + storage: HashMap::default(), + }, + )])); + state.merge_transitions(BundleRetention::PlainState); + assert_state_root(&state, &prestate, "changed nonce"); + + // recreate account 1 + let account1_new = + Account { nonce: 56, balance: U256::from(123), bytecode_hash: Some(B256::random()) }; + prestate.insert(address1, (account1_new, BTreeMap::default())); + state.commit(HashMap::from([( + address1, + RevmAccount { + status: AccountStatus::Touched | AccountStatus::Created, + info: account1_new.into(), + storage: HashMap::default(), + }, + )])); + state.merge_transitions(BundleRetention::PlainState); + assert_state_root(&state, &prestate, "recreated"); + + // update storage for account 1 + let slot20 = U256::from(20); + let slot20_key = B256::from(slot20); + let account1_slot20_value = U256::from(12345); + prestate.get_mut(&address1).unwrap().1.insert(slot20_key, account1_slot20_value); + state.commit(HashMap::from([( + address1, + RevmAccount { + status: AccountStatus::Touched | AccountStatus::Created, + info: account1_new.into(), + storage: HashMap::from_iter([( + slot20, + EvmStorageSlot::new_changed(U256::ZERO, account1_slot20_value), + )]), + }, + )])); + state.merge_transitions(BundleRetention::PlainState); + assert_state_root(&state, &prestate, "recreated changed storage"); + } + + #[test] + fn prepend_state() { + let address1 = Address::random(); + let address2 = Address::random(); + + let account1 = RevmAccountInfo { nonce: 1, ..Default::default() }; + let account1_changed = RevmAccountInfo { nonce: 1, ..Default::default() }; + let account2 = RevmAccountInfo { nonce: 1, ..Default::default() }; + + let present_state = BundleState::builder(2..=2) + .state_present_account_info(address1, account1_changed.clone()) + .build(); + assert_eq!(present_state.reverts.len(), 1); + let previous_state = BundleState::builder(1..=1) + .state_present_account_info(address1, account1) + .state_present_account_info(address2, account2.clone()) + .build(); + assert_eq!(previous_state.reverts.len(), 1); + + let mut test = ExecutionOutcome { + bundle: present_state, + receipts: vec![vec![Some(Receipt::default()); 2]; 1].into(), + first_block: 2, + requests: Vec::new(), + }; + + test.prepend_state(previous_state); + + assert_eq!(test.receipts.len(), 1); + let end_state = test.state(); + assert_eq!(end_state.state.len(), 2); + // reverts num should stay the same. + assert_eq!(end_state.reverts.len(), 1); + // account1 is not overwritten. + assert_eq!(end_state.state.get(&address1).unwrap().info, Some(account1_changed)); + // account2 got inserted + assert_eq!(end_state.state.get(&address2).unwrap().info, Some(account2)); + } +} diff --git a/crates/storage/provider/src/bundle_state/mod.rs b/crates/storage/provider/src/bundle_state/mod.rs index 58b76f1eacf7..3dad9389f67d 100644 --- a/crates/storage/provider/src/bundle_state/mod.rs +++ b/crates/storage/provider/src/bundle_state/mod.rs @@ -1,5 +1,10 @@ //! Bundle state module. //! This module contains all the logic related to bundle state. +mod execution_outcome; +mod state_changes; mod state_reverts; -pub use state_reverts::StorageRevertsIter; + +pub use execution_outcome::{AccountRevertInit, BundleStateInit, OriginalValuesKnown, RevertsInit}; +pub use state_changes::StateChanges; +pub use state_reverts::{StateReverts, StorageRevertsIter}; diff --git a/crates/storage/provider/src/bundle_state/state_changes.rs b/crates/storage/provider/src/bundle_state/state_changes.rs new file mode 100644 index 000000000000..ba9acfcccfa6 --- /dev/null +++ b/crates/storage/provider/src/bundle_state/state_changes.rs @@ -0,0 +1,88 @@ +use crate::DatabaseProviderRW; +use rayon::slice::ParallelSliceMut; +use reth_db::{tables, Database}; +use reth_db_api::{ + cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW}, + transaction::DbTxMut, +}; +use reth_primitives::{Bytecode, StorageEntry, U256}; +use reth_storage_errors::db::DatabaseError; +use revm::db::states::{PlainStorageChangeset, StateChangeset}; + +/// A change to the state of the world. +#[derive(Debug, Default)] +pub struct StateChanges(pub StateChangeset); + +impl From for StateChanges { + fn from(revm: StateChangeset) -> Self { + Self(revm) + } +} + +impl StateChanges { + /// Write the bundle state to the database. + pub fn write_to_db(mut self, provider: &DatabaseProviderRW) -> Result<(), DatabaseError> + where + DB: Database, + { + // sort all entries so they can be written to database in more performant way. + // and take smaller memory footprint. + self.0.accounts.par_sort_by_key(|a| a.0); + self.0.storage.par_sort_by_key(|a| a.address); + self.0.contracts.par_sort_by_key(|a| a.0); + + // Write new account state + tracing::trace!(target: "provider::bundle_state", len = self.0.accounts.len(), "Writing new account state"); + let mut accounts_cursor = provider.tx_ref().cursor_write::()?; + // write account to database. + for (address, account) in self.0.accounts { + if let Some(account) = account { + tracing::trace!(target: "provider::bundle_state", ?address, "Updating plain state account"); + accounts_cursor.upsert(address, account.into())?; + } else if accounts_cursor.seek_exact(address)?.is_some() { + tracing::trace!(target: "provider::bundle_state", ?address, "Deleting plain state account"); + accounts_cursor.delete_current()?; + } + } + + // Write bytecode + tracing::trace!(target: "provider::bundle_state", len = self.0.contracts.len(), "Writing bytecodes"); + let mut bytecodes_cursor = provider.tx_ref().cursor_write::()?; + for (hash, bytecode) in self.0.contracts { + bytecodes_cursor.upsert(hash, Bytecode(bytecode))?; + } + + // Write new storage state and wipe storage if needed. + tracing::trace!(target: "provider::bundle_state", len = self.0.storage.len(), "Writing new storage state"); + let mut storages_cursor = + provider.tx_ref().cursor_dup_write::()?; + for PlainStorageChangeset { address, wipe_storage, storage } in self.0.storage { + // Wiping of storage. + if wipe_storage && storages_cursor.seek_exact(address)?.is_some() { + storages_cursor.delete_current_duplicates()?; + } + // cast storages to B256. + let mut storage = storage + .into_iter() + .map(|(k, value)| StorageEntry { key: k.into(), value }) + .collect::>(); + // sort storage slots by key. + storage.par_sort_unstable_by_key(|a| a.key); + + for entry in storage { + tracing::trace!(target: "provider::bundle_state", ?address, ?entry.key, "Updating plain state storage"); + if let Some(db_entry) = storages_cursor.seek_by_key_subkey(address, entry.key)? { + if db_entry.key == entry.key { + storages_cursor.delete_current()?; + } + } + + if entry.value != U256::ZERO { + storages_cursor.upsert(address, entry)?; + } + } + } + + Ok(()) + } +} diff --git a/crates/storage/provider/src/bundle_state/state_reverts.rs b/crates/storage/provider/src/bundle_state/state_reverts.rs index 37d44cde51de..b5bb77bc13a0 100644 --- a/crates/storage/provider/src/bundle_state/state_reverts.rs +++ b/crates/storage/provider/src/bundle_state/state_reverts.rs @@ -1,7 +1,103 @@ -use reth_primitives::{B256, U256}; -use revm::db::states::RevertToSlot; +use crate::DatabaseProviderRW; +use rayon::slice::ParallelSliceMut; +use reth_db::{tables, Database}; +use reth_db_api::{ + cursor::{DbCursorRO, DbDupCursorRO, DbDupCursorRW}, + models::{AccountBeforeTx, BlockNumberAddress}, + transaction::DbTxMut, +}; +use reth_primitives::{BlockNumber, StorageEntry, B256, U256}; +use reth_storage_errors::db::DatabaseError; +use revm::db::states::{PlainStateReverts, PlainStorageRevert, RevertToSlot}; use std::iter::Peekable; +/// Revert of the state. +#[derive(Debug, Default)] +pub struct StateReverts(pub PlainStateReverts); + +impl From for StateReverts { + fn from(revm: PlainStateReverts) -> Self { + Self(revm) + } +} + +impl StateReverts { + /// Write reverts to database. + /// + /// `Note::` Reverts will delete all wiped storage from plain state. + pub fn write_to_db( + self, + provider: &DatabaseProviderRW, + first_block: BlockNumber, + ) -> Result<(), DatabaseError> + where + DB: Database, + { + // Write storage changes + tracing::trace!(target: "provider::reverts", "Writing storage changes"); + let mut storages_cursor = + provider.tx_ref().cursor_dup_write::()?; + let mut storage_changeset_cursor = + provider.tx_ref().cursor_dup_write::()?; + for (block_index, mut storage_changes) in self.0.storage.into_iter().enumerate() { + let block_number = first_block + block_index as BlockNumber; + + tracing::trace!(target: "provider::reverts", block_number, "Writing block change"); + // sort changes by address. + storage_changes.par_sort_unstable_by_key(|a| a.address); + for PlainStorageRevert { address, wiped, storage_revert } in storage_changes { + let storage_id = BlockNumberAddress((block_number, address)); + + let mut storage = storage_revert + .into_iter() + .map(|(k, v)| (B256::new(k.to_be_bytes()), v)) + .collect::>(); + // sort storage slots by key. + storage.par_sort_unstable_by_key(|a| a.0); + + // If we are writing the primary storage wipe transition, the pre-existing plain + // storage state has to be taken from the database and written to storage history. + // See [StorageWipe::Primary] for more details. + let mut wiped_storage = Vec::new(); + if wiped { + tracing::trace!(target: "provider::reverts", ?address, "Wiping storage"); + if let Some((_, entry)) = storages_cursor.seek_exact(address)? { + wiped_storage.push((entry.key, entry.value)); + while let Some(entry) = storages_cursor.next_dup_val()? { + wiped_storage.push((entry.key, entry.value)) + } + } + } + + tracing::trace!(target: "provider::reverts", ?address, ?storage, "Writing storage reverts"); + for (key, value) in StorageRevertsIter::new(storage, wiped_storage) { + storage_changeset_cursor.append_dup(storage_id, StorageEntry { key, value })?; + } + } + } + + // Write account changes + tracing::trace!(target: "provider::reverts", "Writing account changes"); + let mut account_changeset_cursor = + provider.tx_ref().cursor_dup_write::()?; + + for (block_index, mut account_block_reverts) in self.0.accounts.into_iter().enumerate() { + let block_number = first_block + block_index as BlockNumber; + // Sort accounts by address. + account_block_reverts.par_sort_by_key(|a| a.0); + + for (address, info) in account_block_reverts { + account_changeset_cursor.append_dup( + block_number, + AccountBeforeTx { address, info: info.map(Into::into) }, + )?; + } + } + + Ok(()) + } +} + /// Iterator over storage reverts. /// See [`StorageRevertsIter::next`] for more details. #[allow(missing_debug_implementations)] diff --git a/crates/storage/provider/src/lib.rs b/crates/storage/provider/src/lib.rs index 894a41620c52..a578fa09d562 100644 --- a/crates/storage/provider/src/lib.rs +++ b/crates/storage/provider/src/lib.rs @@ -34,18 +34,11 @@ pub use reth_storage_errors::provider::{ProviderError, ProviderResult}; pub use reth_execution_types::*; pub mod bundle_state; - -/// Re-export `OriginalValuesKnown` -pub use revm::db::states::OriginalValuesKnown; +pub use bundle_state::{OriginalValuesKnown, StateChanges, StateReverts}; /// Writer standalone type. pub mod writer; -pub use reth_chain_state::{ - CanonStateNotification, CanonStateNotificationSender, CanonStateNotificationStream, - CanonStateNotifications, CanonStateSubscriptions, -}; - pub(crate) fn to_range>(bounds: R) -> std::ops::Range { let start = match bounds.start_bound() { std::ops::Bound::Included(&v) => v, diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs deleted file mode 100644 index 952cdff4bc54..000000000000 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ /dev/null @@ -1,826 +0,0 @@ -use crate::{ - providers::{BundleStateProvider, StaticFileProvider}, - AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, - BlockSource, BlockchainTreePendingStateProvider, CanonChainTracker, CanonStateNotifications, - CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, DatabaseProviderFactory, - DatabaseProviderRO, EvmEnvProvider, FullExecutionDataProvider, HeaderProvider, ProviderError, - ProviderFactory, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, - RequestsProvider, StageCheckpointReader, StateProviderBox, StateProviderFactory, - StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, -}; -use alloy_rpc_types_engine::ForkchoiceState; -use reth_chain_state::CanonicalInMemoryState; -use reth_chainspec::{ChainInfo, ChainSpec}; -use reth_db_api::{ - database::Database, - models::{AccountBeforeTx, StoredBlockBodyIndices}, -}; -use reth_evm::ConfigureEvmEnv; -use reth_primitives::{ - Account, Address, Block, BlockHash, BlockHashOrNumber, BlockId, BlockNumHash, BlockNumber, - BlockNumberOrTag, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, - SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, - Withdrawal, Withdrawals, B256, U256, -}; -use reth_prune_types::{PruneCheckpoint, PruneSegment}; -use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_errors::provider::ProviderResult; -use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; -use std::{ - ops::{RangeBounds, RangeInclusive}, - sync::Arc, - time::Instant, -}; -use tracing::trace; - -/// The main type for interacting with the blockchain. -/// -/// This type serves as the main entry point for interacting with the blockchain and provides data -/// from database storage and from the blockchain tree (pending state etc.) It is a simple wrapper -/// type that holds an instance of the database and the blockchain tree. -#[allow(missing_debug_implementations)] -pub struct BlockchainProvider2 { - /// Provider type used to access the database. - database: ProviderFactory, - /// Tracks the chain info wrt forkchoice updates and in memory canonical - /// state. - canonical_in_memory_state: CanonicalInMemoryState, -} - -impl Clone for BlockchainProvider2 { - fn clone(&self) -> Self { - Self { - database: self.database.clone(), - canonical_in_memory_state: self.canonical_in_memory_state.clone(), - } - } -} - -impl BlockchainProvider2 { - /// Create new provider instance that wraps the database and the blockchain tree, using the - /// provided latest header to initialize the chain info tracker. - pub fn with_latest(database: ProviderFactory, latest: SealedHeader) -> Self { - Self { database, canonical_in_memory_state: CanonicalInMemoryState::with_head(latest) } - } -} - -impl BlockchainProvider2 -where - DB: Database, -{ - /// Create a new provider using only the database, fetching the latest header from - /// the database to initialize the provider. - pub fn new(database: ProviderFactory) -> ProviderResult { - let provider = database.provider()?; - let best: ChainInfo = provider.chain_info()?; - match provider.header_by_number(best.best_number)? { - Some(header) => { - drop(provider); - Ok(Self::with_latest(database, header.seal(best.best_hash))) - } - None => Err(ProviderError::HeaderNotFound(best.best_number.into())), - } - } - - /// Gets a clone of `canonical_in_memory_state`. - pub fn canonical_in_memory_state(&self) -> CanonicalInMemoryState { - self.canonical_in_memory_state.clone() - } -} - -impl BlockchainProvider2 -where - DB: Database, -{ - /// Ensures that the given block number is canonical (synced) - /// - /// This is a helper for guarding the `HistoricalStateProvider` against block numbers that are - /// out of range and would lead to invalid results, mainly during initial sync. - /// - /// Verifying the `block_number` would be expensive since we need to lookup sync table - /// Instead, we ensure that the `block_number` is within the range of the - /// [`Self::best_block_number`] which is updated when a block is synced. - #[inline] - fn ensure_canonical_block(&self, block_number: BlockNumber) -> ProviderResult<()> { - let latest = self.best_block_number()?; - if block_number > latest { - Err(ProviderError::HeaderNotFound(block_number.into())) - } else { - Ok(()) - } - } -} - -impl DatabaseProviderFactory for BlockchainProvider2 -where - DB: Database, -{ - fn database_provider_ro(&self) -> ProviderResult> { - self.database.provider() - } -} - -impl StaticFileProviderFactory for BlockchainProvider2 { - fn static_file_provider(&self) -> StaticFileProvider { - self.database.static_file_provider() - } -} - -impl HeaderProvider for BlockchainProvider2 -where - DB: Database, -{ - fn header(&self, block_hash: &BlockHash) -> ProviderResult> { - self.database.header(block_hash) - } - - fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { - self.database.header_by_number(num) - } - - fn header_td(&self, hash: &BlockHash) -> ProviderResult> { - self.database.header_td(hash) - } - - fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult> { - self.database.header_td_by_number(number) - } - - fn headers_range(&self, range: impl RangeBounds) -> ProviderResult> { - self.database.headers_range(range) - } - - fn sealed_header(&self, number: BlockNumber) -> ProviderResult> { - self.database.sealed_header(number) - } - - fn sealed_headers_range( - &self, - range: impl RangeBounds, - ) -> ProviderResult> { - self.database.sealed_headers_range(range) - } - - fn sealed_headers_while( - &self, - range: impl RangeBounds, - predicate: impl FnMut(&SealedHeader) -> bool, - ) -> ProviderResult> { - self.database.sealed_headers_while(range, predicate) - } -} - -impl BlockHashReader for BlockchainProvider2 -where - DB: Database, -{ - fn block_hash(&self, number: u64) -> ProviderResult> { - self.database.block_hash(number) - } - - fn canonical_hashes_range( - &self, - start: BlockNumber, - end: BlockNumber, - ) -> ProviderResult> { - self.database.canonical_hashes_range(start, end) - } -} - -impl BlockNumReader for BlockchainProvider2 -where - DB: Database, -{ - fn chain_info(&self) -> ProviderResult { - Ok(self.canonical_in_memory_state.chain_info()) - } - - fn best_block_number(&self) -> ProviderResult { - Ok(self.canonical_in_memory_state.get_canonical_block_number()) - } - - fn last_block_number(&self) -> ProviderResult { - self.database.last_block_number() - } - - fn block_number(&self, hash: B256) -> ProviderResult> { - self.database.block_number(hash) - } -} - -impl BlockIdReader for BlockchainProvider2 -where - DB: Database, -{ - fn pending_block_num_hash(&self) -> ProviderResult> { - Ok(self.canonical_in_memory_state.pending_block_num_hash()) - } - - fn safe_block_num_hash(&self) -> ProviderResult> { - Ok(self.canonical_in_memory_state.get_safe_num_hash()) - } - - fn finalized_block_num_hash(&self) -> ProviderResult> { - Ok(self.canonical_in_memory_state.get_finalized_num_hash()) - } -} - -impl BlockReader for BlockchainProvider2 -where - DB: Database, -{ - fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { - let block = match source { - BlockSource::Any | BlockSource::Canonical => { - // check in memory first - // Note: it's fine to return the unsealed block because the caller already has - // the hash - let mut block = self - .canonical_in_memory_state - .state_by_hash(hash) - .map(|block_state| block_state.block().block().clone().unseal()); - - if block.is_none() { - block = self.database.block_by_hash(hash)?; - } - block - } - BlockSource::Pending => { - self.canonical_in_memory_state.pending_block().map(|block| block.unseal()) - } - }; - - Ok(block) - } - - fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { - match id { - BlockHashOrNumber::Hash(hash) => self.find_block_by_hash(hash, BlockSource::Any), - BlockHashOrNumber::Number(num) => self.database.block_by_number(num), - } - } - - fn pending_block(&self) -> ProviderResult> { - Ok(self.canonical_in_memory_state.pending_block()) - } - - fn pending_block_with_senders(&self) -> ProviderResult> { - Ok(self.canonical_in_memory_state.pending_block_with_senders()) - } - - fn pending_block_and_receipts(&self) -> ProviderResult)>> { - Ok(self.canonical_in_memory_state.pending_block_and_receipts()) - } - - fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { - self.database.ommers(id) - } - - fn block_body_indices( - &self, - number: BlockNumber, - ) -> ProviderResult> { - self.database.block_body_indices(number) - } - - /// Returns the block with senders with matching number or hash from database. - /// - /// **NOTE: If [`TransactionVariant::NoHash`] is provided then the transactions have invalid - /// hashes, since they would need to be calculated on the spot, and we want fast querying.** - /// - /// Returns `None` if block is not found. - fn block_with_senders( - &self, - id: BlockHashOrNumber, - transaction_kind: TransactionVariant, - ) -> ProviderResult> { - self.database.block_with_senders(id, transaction_kind) - } - - fn sealed_block_with_senders( - &self, - id: BlockHashOrNumber, - transaction_kind: TransactionVariant, - ) -> ProviderResult> { - self.database.sealed_block_with_senders(id, transaction_kind) - } - - fn block_range(&self, range: RangeInclusive) -> ProviderResult> { - self.database.block_range(range) - } - - fn block_with_senders_range( - &self, - range: RangeInclusive, - ) -> ProviderResult> { - self.database.block_with_senders_range(range) - } - - fn sealed_block_with_senders_range( - &self, - range: RangeInclusive, - ) -> ProviderResult> { - self.database.sealed_block_with_senders_range(range) - } -} - -impl TransactionsProvider for BlockchainProvider2 -where - DB: Database, -{ - fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { - self.database.transaction_id(tx_hash) - } - - fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { - self.database.transaction_by_id(id) - } - - fn transaction_by_id_no_hash( - &self, - id: TxNumber, - ) -> ProviderResult> { - self.database.transaction_by_id_no_hash(id) - } - - fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { - self.database.transaction_by_hash(hash) - } - - fn transaction_by_hash_with_meta( - &self, - tx_hash: TxHash, - ) -> ProviderResult> { - self.database.transaction_by_hash_with_meta(tx_hash) - } - - fn transaction_block(&self, id: TxNumber) -> ProviderResult> { - self.database.transaction_block(id) - } - - fn transactions_by_block( - &self, - id: BlockHashOrNumber, - ) -> ProviderResult>> { - self.database.transactions_by_block(id) - } - - fn transactions_by_block_range( - &self, - range: impl RangeBounds, - ) -> ProviderResult>> { - self.database.transactions_by_block_range(range) - } - - fn transactions_by_tx_range( - &self, - range: impl RangeBounds, - ) -> ProviderResult> { - self.database.transactions_by_tx_range(range) - } - - fn senders_by_tx_range( - &self, - range: impl RangeBounds, - ) -> ProviderResult> { - self.database.senders_by_tx_range(range) - } - - fn transaction_sender(&self, id: TxNumber) -> ProviderResult> { - self.database.transaction_sender(id) - } -} - -impl ReceiptProvider for BlockchainProvider2 -where - DB: Database, -{ - fn receipt(&self, id: TxNumber) -> ProviderResult> { - self.database.receipt(id) - } - - fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { - self.database.receipt_by_hash(hash) - } - - fn receipts_by_block(&self, block: BlockHashOrNumber) -> ProviderResult>> { - self.database.receipts_by_block(block) - } - - fn receipts_by_tx_range( - &self, - range: impl RangeBounds, - ) -> ProviderResult> { - self.database.receipts_by_tx_range(range) - } -} - -impl ReceiptProviderIdExt for BlockchainProvider2 -where - DB: Database, -{ - fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { - match block { - BlockId::Hash(rpc_block_hash) => { - let mut receipts = self.receipts_by_block(rpc_block_hash.block_hash.into())?; - if receipts.is_none() && !rpc_block_hash.require_canonical.unwrap_or(false) { - let block_state = self - .canonical_in_memory_state - .state_by_hash(rpc_block_hash.block_hash) - .ok_or(ProviderError::StateForHashNotFound(rpc_block_hash.block_hash))?; - receipts = Some(block_state.executed_block_receipts()); - } - Ok(receipts) - } - BlockId::Number(num_tag) => match num_tag { - BlockNumberOrTag::Pending => Ok(self - .canonical_in_memory_state - .pending_state() - .map(|block_state| block_state.executed_block_receipts())), - _ => { - if let Some(num) = self.convert_block_number(num_tag)? { - self.receipts_by_block(num.into()) - } else { - Ok(None) - } - } - }, - } - } -} - -impl WithdrawalsProvider for BlockchainProvider2 -where - DB: Database, -{ - fn withdrawals_by_block( - &self, - id: BlockHashOrNumber, - timestamp: u64, - ) -> ProviderResult> { - self.database.withdrawals_by_block(id, timestamp) - } - - fn latest_withdrawal(&self) -> ProviderResult> { - self.database.latest_withdrawal() - } -} - -impl RequestsProvider for BlockchainProvider2 -where - DB: Database, -{ - fn requests_by_block( - &self, - id: BlockHashOrNumber, - timestamp: u64, - ) -> ProviderResult> { - self.database.requests_by_block(id, timestamp) - } -} - -impl StageCheckpointReader for BlockchainProvider2 -where - DB: Database, -{ - fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult> { - self.database.provider()?.get_stage_checkpoint(id) - } - - fn get_stage_checkpoint_progress(&self, id: StageId) -> ProviderResult>> { - self.database.provider()?.get_stage_checkpoint_progress(id) - } - - fn get_all_checkpoints(&self) -> ProviderResult> { - self.database.provider()?.get_all_checkpoints() - } -} - -impl EvmEnvProvider for BlockchainProvider2 -where - DB: Database, -{ - fn fill_env_at( - &self, - cfg: &mut CfgEnvWithHandlerCfg, - block_env: &mut BlockEnv, - at: BlockHashOrNumber, - evm_config: EvmConfig, - ) -> ProviderResult<()> - where - EvmConfig: ConfigureEvmEnv, - { - self.database.provider()?.fill_env_at(cfg, block_env, at, evm_config) - } - - fn fill_env_with_header( - &self, - cfg: &mut CfgEnvWithHandlerCfg, - block_env: &mut BlockEnv, - header: &Header, - evm_config: EvmConfig, - ) -> ProviderResult<()> - where - EvmConfig: ConfigureEvmEnv, - { - self.database.provider()?.fill_env_with_header(cfg, block_env, header, evm_config) - } - - fn fill_cfg_env_at( - &self, - cfg: &mut CfgEnvWithHandlerCfg, - at: BlockHashOrNumber, - evm_config: EvmConfig, - ) -> ProviderResult<()> - where - EvmConfig: ConfigureEvmEnv, - { - self.database.provider()?.fill_cfg_env_at(cfg, at, evm_config) - } - - fn fill_cfg_env_with_header( - &self, - cfg: &mut CfgEnvWithHandlerCfg, - header: &Header, - evm_config: EvmConfig, - ) -> ProviderResult<()> - where - EvmConfig: ConfigureEvmEnv, - { - self.database.provider()?.fill_cfg_env_with_header(cfg, header, evm_config) - } -} - -impl PruneCheckpointReader for BlockchainProvider2 -where - DB: Database, -{ - fn get_prune_checkpoint( - &self, - segment: PruneSegment, - ) -> ProviderResult> { - self.database.provider()?.get_prune_checkpoint(segment) - } - - fn get_prune_checkpoints(&self) -> ProviderResult> { - self.database.provider()?.get_prune_checkpoints() - } -} - -impl ChainSpecProvider for BlockchainProvider2 -where - DB: Send + Sync, -{ - fn chain_spec(&self) -> Arc { - self.database.chain_spec() - } -} - -impl StateProviderFactory for BlockchainProvider2 -where - DB: Database, -{ - /// Storage provider for latest block - fn latest(&self) -> ProviderResult { - trace!(target: "providers::blockchain", "Getting latest block state provider"); - self.database.latest() - } - - fn history_by_block_number( - &self, - block_number: BlockNumber, - ) -> ProviderResult { - trace!(target: "providers::blockchain", ?block_number, "Getting history by block number"); - self.ensure_canonical_block(block_number)?; - self.database.history_by_block_number(block_number) - } - - fn history_by_block_hash(&self, block_hash: BlockHash) -> ProviderResult { - trace!(target: "providers::blockchain", ?block_hash, "Getting history by block hash"); - self.database.history_by_block_hash(block_hash) - } - - fn state_by_block_hash(&self, block: BlockHash) -> ProviderResult { - trace!(target: "providers::blockchain", ?block, "Getting state by block hash"); - let mut state = self.history_by_block_hash(block); - - // we failed to get the state by hash, from disk, hash block be the pending block - if state.is_err() { - if let Ok(Some(pending)) = self.pending_state_by_hash(block) { - // we found pending block by hash - state = Ok(pending) - } - } - - state - } - - /// Returns the state provider for pending state. - /// - /// If there's no pending block available then the latest state provider is returned: - /// [`Self::latest`] - fn pending(&self) -> ProviderResult { - trace!(target: "providers::blockchain", "Getting provider for pending state"); - - if let Some(block) = self.canonical_in_memory_state.pending_block_num_hash() { - let historical = self.database.history_by_block_hash(block.hash)?; - let pending_provider = - self.canonical_in_memory_state.state_provider(block.hash, historical); - - return Ok(Box::new(pending_provider)); - } - - // fallback to latest state if the pending block is not available - self.latest() - } - - fn pending_state_by_hash(&self, block_hash: B256) -> ProviderResult> { - let historical = self.database.history_by_block_hash(block_hash)?; - if let Some(block) = self.canonical_in_memory_state.pending_block_num_hash() { - if block.hash == block_hash { - let pending_provider = - self.canonical_in_memory_state.state_provider(block_hash, historical); - - return Ok(Some(Box::new(pending_provider))) - } - } - Ok(None) - } - - fn pending_with_provider( - &self, - bundle_state_data: Box, - ) -> ProviderResult { - let state_provider = self.pending()?; - - let bundle_state_provider = BundleStateProvider::new(state_provider, bundle_state_data); - Ok(Box::new(bundle_state_provider)) - } -} - -impl CanonChainTracker for BlockchainProvider2 -where - DB: Send + Sync, - Self: BlockReader, -{ - fn on_forkchoice_update_received(&self, _update: &ForkchoiceState) { - // update timestamp - self.canonical_in_memory_state.on_forkchoice_update_received(); - } - - fn last_received_update_timestamp(&self) -> Option { - self.canonical_in_memory_state.last_received_update_timestamp() - } - - fn on_transition_configuration_exchanged(&self) { - self.canonical_in_memory_state.on_transition_configuration_exchanged(); - } - - fn last_exchanged_transition_configuration_timestamp(&self) -> Option { - self.canonical_in_memory_state.last_exchanged_transition_configuration_timestamp() - } - - fn set_canonical_head(&self, header: SealedHeader) { - self.canonical_in_memory_state.set_canonical_head(header); - } - - fn set_safe(&self, header: SealedHeader) { - self.canonical_in_memory_state.set_safe(header); - } - - fn set_finalized(&self, header: SealedHeader) { - self.canonical_in_memory_state.set_finalized(header); - } -} - -impl BlockReaderIdExt for BlockchainProvider2 -where - Self: BlockReader + BlockIdReader + ReceiptProviderIdExt, -{ - fn block_by_id(&self, id: BlockId) -> ProviderResult> { - match id { - BlockId::Number(num) => self.block_by_number_or_tag(num), - BlockId::Hash(hash) => { - // TODO: should we only apply this for the RPCs that are listed in EIP-1898? - // so not at the provider level? - // if we decide to do this at a higher level, then we can make this an automatic - // trait impl - if Some(true) == hash.require_canonical { - // check the database, canonical blocks are only stored in the database - self.find_block_by_hash(hash.block_hash, BlockSource::Canonical) - } else { - self.block_by_hash(hash.block_hash) - } - } - } - } - - fn header_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult> { - Ok(match id { - BlockNumberOrTag::Latest => { - Some(self.canonical_in_memory_state.get_canonical_head().unseal()) - } - BlockNumberOrTag::Finalized => { - self.canonical_in_memory_state.get_finalized_header().map(|h| h.unseal()) - } - BlockNumberOrTag::Safe => { - self.canonical_in_memory_state.get_safe_header().map(|h| h.unseal()) - } - BlockNumberOrTag::Earliest => self.header_by_number(0)?, - BlockNumberOrTag::Pending => self.canonical_in_memory_state.pending_header(), - - BlockNumberOrTag::Number(num) => self.header_by_number(num)?, - }) - } - - fn sealed_header_by_number_or_tag( - &self, - id: BlockNumberOrTag, - ) -> ProviderResult> { - match id { - BlockNumberOrTag::Latest => { - Ok(Some(self.canonical_in_memory_state.get_canonical_head())) - } - BlockNumberOrTag::Finalized => { - Ok(self.canonical_in_memory_state.get_finalized_header()) - } - BlockNumberOrTag::Safe => Ok(self.canonical_in_memory_state.get_safe_header()), - BlockNumberOrTag::Earliest => { - self.header_by_number(0)?.map_or_else(|| Ok(None), |h| Ok(Some(h.seal_slow()))) - } - BlockNumberOrTag::Pending => Ok(self.canonical_in_memory_state.pending_sealed_header()), - BlockNumberOrTag::Number(num) => { - self.header_by_number(num)?.map_or_else(|| Ok(None), |h| Ok(Some(h.seal_slow()))) - } - } - } - - fn sealed_header_by_id(&self, id: BlockId) -> ProviderResult> { - Ok(match id { - BlockId::Number(num) => self.sealed_header_by_number_or_tag(num)?, - BlockId::Hash(hash) => self.header(&hash.block_hash)?.map(|h| h.seal_slow()), - }) - } - - fn header_by_id(&self, id: BlockId) -> ProviderResult> { - Ok(match id { - BlockId::Number(num) => self.header_by_number_or_tag(num)?, - BlockId::Hash(hash) => self.header(&hash.block_hash)?, - }) - } - - fn ommers_by_id(&self, id: BlockId) -> ProviderResult>> { - match id { - BlockId::Number(num) => self.ommers_by_number_or_tag(num), - BlockId::Hash(hash) => { - // TODO: EIP-1898 question, see above - // here it is not handled - self.ommers(BlockHashOrNumber::Hash(hash.block_hash)) - } - } - } -} - -impl BlockchainTreePendingStateProvider for BlockchainProvider2 -where - DB: Send + Sync, -{ - fn find_pending_state_provider( - &self, - _block_hash: BlockHash, - ) -> Option> { - // TODO: check in memory overlay https://github.com/paradigmxyz/reth/issues/9614 - None - } -} - -impl CanonStateSubscriptions for BlockchainProvider2 -where - DB: Send + Sync, -{ - fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { - self.canonical_in_memory_state.subscribe_canon_state() - } -} - -impl ChangeSetReader for BlockchainProvider2 -where - DB: Database, -{ - fn account_block_changeset( - &self, - block_number: BlockNumber, - ) -> ProviderResult> { - self.database.provider()?.account_block_changeset(block_number) - } -} - -impl AccountReader for BlockchainProvider2 -where - DB: Database + Sync + Send, -{ - /// Get basic account information. - fn basic_account(&self, address: Address) -> ProviderResult> { - self.database.provider()?.basic_account(address) - } -} diff --git a/crates/chain-state/src/chain_info.rs b/crates/storage/provider/src/providers/chain_info.rs similarity index 100% rename from crates/chain-state/src/chain_info.rs rename to crates/storage/provider/src/providers/chain_info.rs diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index ed4c1498fafc..bfd0a39bcbb3 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -512,9 +512,6 @@ impl StageCheckpointReader for ProviderFactory { fn get_stage_checkpoint_progress(&self, id: StageId) -> ProviderResult>> { self.provider()?.get_stage_checkpoint_progress(id) } - fn get_all_checkpoints(&self) -> ProviderResult> { - self.provider()?.get_all_checkpoints() - } } impl EvmEnvProvider for ProviderFactory { @@ -774,7 +771,7 @@ mod tests { // Checkpoint and no gap let mut static_file_writer = provider.static_file_provider().latest_writer(StaticFileSegment::Headers).unwrap(); - static_file_writer.append_header(head.header(), U256::ZERO, &head.hash()).unwrap(); + static_file_writer.append_header(head.header().clone(), U256::ZERO, head.hash()).unwrap(); static_file_writer.commit().unwrap(); drop(static_file_writer); diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index a688f125899e..80246013924e 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -1,5 +1,5 @@ use crate::{ - bundle_state::StorageRevertsIter, + bundle_state::{BundleStateInit, RevertsInit}, providers::{database::metrics, static_file::StaticFileWriter, StaticFileProvider}, to_range, traits::{ @@ -7,20 +7,16 @@ use crate::{ }, writer::StorageWriter, AccountReader, BlockExecutionReader, BlockExecutionWriter, BlockHashReader, BlockNumReader, - BlockReader, BlockWriter, BundleStateInit, EvmEnvProvider, FinalizedBlockReader, - FinalizedBlockWriter, HashingWriter, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, - HistoricalStateProvider, HistoryWriter, LatestStateProvider, OriginalValuesKnown, - ProviderError, PruneCheckpointReader, PruneCheckpointWriter, RequestsProvider, RevertsInit, - StageCheckpointReader, StateChangeWriter, StateProviderBox, StateWriter, StatsReader, - StorageReader, StorageTrieWriter, TransactionVariant, TransactionsProvider, - TransactionsProviderExt, TrieWriter, WithdrawalsProvider, + BlockReader, BlockWriter, EvmEnvProvider, FinalizedBlockReader, FinalizedBlockWriter, + HashingWriter, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, HistoricalStateProvider, + HistoryWriter, LatestStateProvider, OriginalValuesKnown, ProviderError, PruneCheckpointReader, + PruneCheckpointWriter, RequestsProvider, StageCheckpointReader, StateProviderBox, StateWriter, + StatsReader, StorageReader, TransactionVariant, TransactionsProvider, TransactionsProviderExt, + WithdrawalsProvider, }; use itertools::{izip, Itertools}; -use rayon::slice::ParallelSliceMut; use reth_chainspec::{ChainInfo, ChainSpec, EthereumHardforks}; -use reth_db::{ - cursor::DbDupCursorRW, tables, BlockNumberList, PlainAccountState, PlainStorageState, -}; +use reth_db::{tables, BlockNumberList, PlainAccountState, PlainStorageState}; use reth_db_api::{ common::KeyValue, cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, RangeWalker}, @@ -38,25 +34,20 @@ use reth_execution_types::{Chain, ExecutionOutcome}; use reth_network_p2p::headers::downloader::SyncTarget; use reth_primitives::{ keccak256, Account, Address, Block, BlockHash, BlockHashOrNumber, BlockNumber, - BlockWithSenders, Bytecode, GotExpected, Header, Receipt, Requests, SealedBlock, - SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, TransactionMeta, - TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, TxHash, TxNumber, - Withdrawal, Withdrawals, B256, U256, + BlockWithSenders, GotExpected, Header, Receipt, Requests, SealedBlock, SealedBlockWithSenders, + SealedHeader, StaticFileSegment, StorageEntry, TransactionMeta, TransactionSigned, + TransactionSignedEcRecovered, TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, + Withdrawals, B256, U256, }; use reth_prune_types::{PruneCheckpoint, PruneLimiter, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_errors::provider::{ProviderResult, RootMismatch}; use reth_trie::{ prefix_set::{PrefixSet, PrefixSetMut, TriePrefixSets}, - trie_cursor::DatabaseStorageTrieCursor, - updates::{StorageTrieUpdates, TrieUpdates}, - HashedPostStateSorted, Nibbles, StateRoot, StoredNibbles, -}; -use reth_trie_db::DatabaseStateRoot; -use revm::{ - db::states::{PlainStateReverts, PlainStorageChangeset, PlainStorageRevert, StateChangeset}, - primitives::{BlockEnv, CfgEnvWithHandlerCfg}, + updates::TrieUpdates, + HashedPostStateSorted, Nibbles, StateRoot, }; +use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; use std::{ cmp::Ordering, collections::{hash_map, BTreeMap, BTreeSet, HashMap, HashSet}, @@ -210,11 +201,11 @@ impl DatabaseProviderRW { for block_number in 0..block.number { let mut prev = block.header.clone().unseal(); prev.number = block_number; - writer.append_header(&prev, U256::ZERO, &B256::ZERO)?; + writer.append_header(prev, U256::ZERO, B256::ZERO)?; } } - writer.append_header(block.header.as_ref(), ttd, &block.hash())?; + writer.append_header(block.header.as_ref().clone(), ttd, block.hash())?; self.insert_block(block) } @@ -1010,7 +1001,7 @@ impl DatabaseProvider { } // insert value if needed - if !old_storage_value.is_zero() { + if *old_storage_value != U256::ZERO { plain_storage_cursor.upsert(*address, storage_entry)?; } } @@ -1108,7 +1099,7 @@ impl DatabaseProvider { } // insert value if needed - if !old_storage_value.is_zero() { + if *old_storage_value != U256::ZERO { plain_storage_cursor.upsert(*address, storage_entry)?; } } @@ -1957,7 +1948,7 @@ impl BlockNumReader for DatabaseProvider { impl BlockReader for DatabaseProvider { fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { - if source.is_canonical() { + if source.is_database() { self.block(hash.into()) } else { Ok(None) @@ -2517,14 +2508,6 @@ impl StageCheckpointReader for DatabaseProvider { Ok(self.tx.get::(id.to_string())?) } - fn get_all_checkpoints(&self) -> ProviderResult> { - self.tx - .cursor_read::()? - .walk(None)? - .collect::, _>>() - .map_err(ProviderError::Database) - } - /// Get stage checkpoint progress. fn get_stage_checkpoint_progress(&self, id: StageId) -> ProviderResult>> { Ok(self.tx.get::(id.to_string())?) @@ -2635,224 +2618,6 @@ impl StorageReader for DatabaseProvider { } } -impl StateChangeWriter for DatabaseProvider { - fn write_state_reverts( - &self, - reverts: PlainStateReverts, - first_block: BlockNumber, - ) -> ProviderResult<()> { - // Write storage changes - tracing::trace!("Writing storage changes"); - let mut storages_cursor = self.tx_ref().cursor_dup_write::()?; - let mut storage_changeset_cursor = - self.tx_ref().cursor_dup_write::()?; - for (block_index, mut storage_changes) in reverts.storage.into_iter().enumerate() { - let block_number = first_block + block_index as BlockNumber; - - tracing::trace!(block_number, "Writing block change"); - // sort changes by address. - storage_changes.par_sort_unstable_by_key(|a| a.address); - for PlainStorageRevert { address, wiped, storage_revert } in storage_changes { - let storage_id = BlockNumberAddress((block_number, address)); - - let mut storage = storage_revert - .into_iter() - .map(|(k, v)| (B256::new(k.to_be_bytes()), v)) - .collect::>(); - // sort storage slots by key. - storage.par_sort_unstable_by_key(|a| a.0); - - // If we are writing the primary storage wipe transition, the pre-existing plain - // storage state has to be taken from the database and written to storage history. - // See [StorageWipe::Primary] for more details. - let mut wiped_storage = Vec::new(); - if wiped { - tracing::trace!(?address, "Wiping storage"); - if let Some((_, entry)) = storages_cursor.seek_exact(address)? { - wiped_storage.push((entry.key, entry.value)); - while let Some(entry) = storages_cursor.next_dup_val()? { - wiped_storage.push((entry.key, entry.value)) - } - } - } - - tracing::trace!(?address, ?storage, "Writing storage reverts"); - for (key, value) in StorageRevertsIter::new(storage, wiped_storage) { - storage_changeset_cursor.append_dup(storage_id, StorageEntry { key, value })?; - } - } - } - - // Write account changes - tracing::trace!("Writing account changes"); - let mut account_changeset_cursor = - self.tx_ref().cursor_dup_write::()?; - - for (block_index, mut account_block_reverts) in reverts.accounts.into_iter().enumerate() { - let block_number = first_block + block_index as BlockNumber; - // Sort accounts by address. - account_block_reverts.par_sort_by_key(|a| a.0); - - for (address, info) in account_block_reverts { - account_changeset_cursor.append_dup( - block_number, - AccountBeforeTx { address, info: info.map(Into::into) }, - )?; - } - } - - Ok(()) - } - - fn write_state_changes(&self, mut changes: StateChangeset) -> ProviderResult<()> { - // sort all entries so they can be written to database in more performant way. - // and take smaller memory footprint. - changes.accounts.par_sort_by_key(|a| a.0); - changes.storage.par_sort_by_key(|a| a.address); - changes.contracts.par_sort_by_key(|a| a.0); - - // Write new account state - tracing::trace!(len = changes.accounts.len(), "Writing new account state"); - let mut accounts_cursor = self.tx_ref().cursor_write::()?; - // write account to database. - for (address, account) in changes.accounts { - if let Some(account) = account { - tracing::trace!(?address, "Updating plain state account"); - accounts_cursor.upsert(address, account.into())?; - } else if accounts_cursor.seek_exact(address)?.is_some() { - tracing::trace!(?address, "Deleting plain state account"); - accounts_cursor.delete_current()?; - } - } - - // Write bytecode - tracing::trace!(len = changes.contracts.len(), "Writing bytecodes"); - let mut bytecodes_cursor = self.tx_ref().cursor_write::()?; - for (hash, bytecode) in changes.contracts { - bytecodes_cursor.upsert(hash, Bytecode(bytecode))?; - } - - // Write new storage state and wipe storage if needed. - tracing::trace!(len = changes.storage.len(), "Writing new storage state"); - let mut storages_cursor = self.tx_ref().cursor_dup_write::()?; - for PlainStorageChangeset { address, wipe_storage, storage } in changes.storage { - // Wiping of storage. - if wipe_storage && storages_cursor.seek_exact(address)?.is_some() { - storages_cursor.delete_current_duplicates()?; - } - // cast storages to B256. - let mut storage = storage - .into_iter() - .map(|(k, value)| StorageEntry { key: k.into(), value }) - .collect::>(); - // sort storage slots by key. - storage.par_sort_unstable_by_key(|a| a.key); - - for entry in storage { - tracing::trace!(?address, ?entry.key, "Updating plain state storage"); - if let Some(db_entry) = storages_cursor.seek_by_key_subkey(address, entry.key)? { - if db_entry.key == entry.key { - storages_cursor.delete_current()?; - } - } - - if !entry.value.is_zero() { - storages_cursor.upsert(address, entry)?; - } - } - } - - Ok(()) - } -} - -impl TrieWriter for DatabaseProvider { - /// Writes trie updates. Returns the number of entries modified. - fn write_trie_updates(&self, trie_updates: &TrieUpdates) -> ProviderResult { - if trie_updates.is_empty() { - return Ok(0) - } - - // Track the number of inserted entries. - let mut num_entries = 0; - - // Merge updated and removed nodes. Updated nodes must take precedence. - let mut account_updates = trie_updates - .removed_nodes_ref() - .iter() - .filter_map(|n| { - (!trie_updates.account_nodes_ref().contains_key(n)).then_some((n, None)) - }) - .collect::>(); - account_updates.extend( - trie_updates.account_nodes_ref().iter().map(|(nibbles, node)| (nibbles, Some(node))), - ); - // Sort trie node updates. - account_updates.sort_unstable_by(|a, b| a.0.cmp(b.0)); - - let tx = self.tx_ref(); - let mut account_trie_cursor = tx.cursor_write::()?; - for (key, updated_node) in account_updates { - let nibbles = StoredNibbles(key.clone()); - match updated_node { - Some(node) => { - if !nibbles.0.is_empty() { - num_entries += 1; - account_trie_cursor.upsert(nibbles, node.clone())?; - } - } - None => { - num_entries += 1; - if account_trie_cursor.seek_exact(nibbles)?.is_some() { - account_trie_cursor.delete_current()?; - } - } - } - } - - num_entries += self.write_storage_trie_updates(trie_updates.storage_tries_ref())?; - - Ok(num_entries) - } -} - -impl StorageTrieWriter for DatabaseProvider { - /// Writes storage trie updates from the given storage trie map. First sorts the storage trie - /// updates by the hashed address, writing in sorted order. - fn write_storage_trie_updates( - &self, - storage_tries: &HashMap, - ) -> ProviderResult { - let mut num_entries = 0; - let mut storage_tries = Vec::from_iter(storage_tries); - storage_tries.sort_unstable_by(|a, b| a.0.cmp(b.0)); - let mut cursor = self.tx_ref().cursor_dup_write::()?; - for (hashed_address, storage_trie_updates) in storage_tries { - let mut db_storage_trie_cursor = - DatabaseStorageTrieCursor::new(cursor, *hashed_address); - num_entries += - db_storage_trie_cursor.write_storage_trie_updates(storage_trie_updates)?; - cursor = db_storage_trie_cursor.cursor; - } - - Ok(num_entries) - } - - fn write_individual_storage_trie_updates( - &self, - hashed_address: B256, - updates: &StorageTrieUpdates, - ) -> ProviderResult { - if updates.is_empty() { - return Ok(0) - } - - let cursor = self.tx_ref().cursor_dup_write::()?; - let mut trie_db_cursor = DatabaseStorageTrieCursor::new(cursor, hashed_address); - Ok(trie_db_cursor.write_storage_trie_updates(updates)?) - } -} - impl HashingWriter for DatabaseProvider { fn unwind_account_hashing( &self, @@ -2931,7 +2696,7 @@ impl HashingWriter for DatabaseProvider { hashed_storage.delete_current()?; } - if !value.is_zero() { + if value != U256::ZERO { hashed_storage.upsert(hashed_address, StorageEntry { key, value })?; } } @@ -2971,7 +2736,7 @@ impl HashingWriter for DatabaseProvider { hashed_storage_cursor.delete_current()?; } - if !value.is_zero() { + if value != U256::ZERO { hashed_storage_cursor.upsert(hashed_address, StorageEntry { key, value })?; } Ok(()) @@ -3048,7 +2813,7 @@ impl HashingWriter for DatabaseProvider { block_hash: end_block_hash, }))) } - self.write_trie_updates(&trie_updates)?; + trie_updates.write_to_database(&self.tx)?; } durations_recorder.record_relative(metrics::Action::InsertMerkleTree); @@ -3257,7 +3022,7 @@ impl BlockExecutionWriter for DatabaseProviderRW { block_hash: parent_hash, }))) } - self.write_trie_updates(&trie_updates)?; + trie_updates.write_to_database(&self.tx)?; // get blocks let blocks = self.take_block_range(range.clone())?; @@ -3345,7 +3110,7 @@ impl BlockExecutionWriter for DatabaseProviderRW { block_hash: parent_hash, }))) } - self.write_trie_updates(&trie_updates)?; + trie_updates.write_to_database(&self.tx)?; // get blocks let blocks = self.take_block_range(range.clone())?; @@ -3368,27 +3133,6 @@ impl BlockExecutionWriter for DatabaseProviderRW { } impl BlockWriter for DatabaseProviderRW { - /// Inserts the block into the database, always modifying the following tables: - /// * [`CanonicalHeaders`](tables::CanonicalHeaders) - /// * [`Headers`](tables::Headers) - /// * [`HeaderNumbers`](tables::HeaderNumbers) - /// * [`HeaderTerminalDifficulties`](tables::HeaderTerminalDifficulties) - /// * [`BlockBodyIndices`](tables::BlockBodyIndices) - /// - /// If there are transactions in the block, the following tables will be modified: - /// * [`Transactions`](tables::Transactions) - /// * [`TransactionBlocks`](tables::TransactionBlocks) - /// - /// If ommers are not empty, this will modify [`BlockOmmers`](tables::BlockOmmers). - /// If withdrawals are not empty, this will modify - /// [`BlockWithdrawals`](tables::BlockWithdrawals). - /// If requests are not empty, this will modify [`BlockRequests`](tables::BlockRequests). - /// - /// If the provider has __not__ configured full sender pruning, this will modify - /// [`TransactionSenders`](tables::TransactionSenders). - /// - /// If the provider has __not__ configured full transaction lookup pruning, this will modify - /// [`TransactionHashNumbers`](tables::TransactionHashNumbers). fn insert_block( &self, block: SealedBlockWithSenders, @@ -3559,16 +3303,14 @@ impl BlockWriter for DatabaseProviderRW { // Write state and changesets to the database. // Must be written after blocks because of the receipt lookup. - // TODO: should _these_ be moved to storagewriter? seems like storagewriter should be - // _above_ db provider - let mut storage_writer = StorageWriter::new(Some(self), None); - storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::No)?; + execution_outcome.write_to_storage(self, None, OriginalValuesKnown::No)?; durations_recorder.record_relative(metrics::Action::InsertState); // insert hashes and intermediate merkle nodes { + let storage_writer = StorageWriter::new(Some(self), None); storage_writer.write_hashed_state(&hashed_state)?; - self.write_trie_updates(&trie_updates)?; + trie_updates.write_to_database(&self.tx)?; } durations_recorder.record_relative(metrics::Action::InsertHashes); diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index be6db2dcadb1..330c880c7eb7 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -1,18 +1,17 @@ use crate::{ AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, - BlockSource, BlockchainTreePendingStateProvider, CanonChainTracker, ChainSpecProvider, - ChangeSetReader, DatabaseProviderFactory, EvmEnvProvider, FullExecutionDataProvider, - HeaderProvider, ProviderError, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, - RequestsProvider, StageCheckpointReader, StateProviderBox, StateProviderFactory, - StaticFileProviderFactory, TransactionVariant, TransactionsProvider, TreeViewer, - WithdrawalsProvider, + BlockSource, BlockchainTreePendingStateProvider, CanonChainTracker, CanonStateNotifications, + CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, DatabaseProviderFactory, + EvmEnvProvider, FullExecutionDataProvider, HeaderProvider, ProviderError, + PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, RequestsProvider, + StageCheckpointReader, StateProviderBox, StateProviderFactory, StaticFileProviderFactory, + TransactionVariant, TransactionsProvider, TreeViewer, WithdrawalsProvider, }; use reth_blockchain_tree_api::{ error::{CanonicalError, InsertBlockError}, BlockValidationKind, BlockchainTreeEngine, BlockchainTreeViewer, CanonicalOutcome, InsertPayloadOk, }; -use reth_chain_state::{CanonStateNotifications, CanonStateSubscriptions, ChainInfoTracker}; use reth_chainspec::{ChainInfo, ChainSpec}; use reth_db_api::{ database::Database, @@ -55,13 +54,13 @@ pub use state::{ mod bundle_state_provider; pub use bundle_state_provider::BundleStateProvider; +mod chain_info; +pub use chain_info::ChainInfoTracker; + mod consistent_view; use alloy_rpc_types_engine::ForkchoiceState; pub use consistent_view::{ConsistentDbView, ConsistentViewError}; -mod blockchain_provider; -pub use blockchain_provider::BlockchainProvider2; - /// The main type for interacting with the blockchain. /// /// This type serves as the main entry point for interacting with the blockchain and provides data @@ -291,7 +290,7 @@ where block } BlockSource::Pending => self.tree.block_by_hash(hash).map(|block| block.unseal()), - BlockSource::Canonical => self.database.block_by_hash(hash)?, + BlockSource::Database => self.database.block_by_hash(hash)?, }; Ok(block) @@ -527,10 +526,6 @@ where fn get_stage_checkpoint_progress(&self, id: StageId) -> ProviderResult>> { self.database.provider()?.get_stage_checkpoint_progress(id) } - - fn get_all_checkpoints(&self) -> ProviderResult> { - self.database.provider()?.get_all_checkpoints() - } } impl EvmEnvProvider for BlockchainProvider @@ -825,7 +820,7 @@ where // trait impl if Some(true) == hash.require_canonical { // check the database, canonical blocks are only stored in the database - self.find_block_by_hash(hash.block_hash, BlockSource::Canonical) + self.find_block_by_hash(hash.block_hash, BlockSource::Database) } else { self.block_by_hash(hash.block_hash) } diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index cbef08dcee27..268f5c6d3593 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -15,8 +15,7 @@ use reth_primitives::{ }; use reth_storage_api::StateProofProvider; use reth_storage_errors::provider::ProviderResult; -use reth_trie::{proof::Proof, updates::TrieUpdates, AccountProof, HashedPostState, StateRoot}; -use reth_trie_db::{DatabaseProof, DatabaseStateRoot}; +use reth_trie::{updates::TrieUpdates, AccountProof, HashedPostState}; use std::fmt::Debug; /// State provider for a given block number which takes a tx reference. @@ -131,7 +130,7 @@ impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { ); } - Ok(HashedPostState::from_reverts(self.tx, self.block_number)?) + Ok(HashedPostState::from_revert_range(self.tx, self.block_number..=tip)?) } fn history_info( @@ -260,8 +259,7 @@ impl<'b, TX: DbTx> StateRootProvider for HistoricalStateProviderRef<'b, TX> { fn hashed_state_root(&self, hashed_state: &HashedPostState) -> ProviderResult { let mut revert_state = self.revert_state()?; revert_state.extend(hashed_state.clone()); - StateRoot::overlay_root(self.tx, revert_state) - .map_err(|err| ProviderError::Database(err.into())) + revert_state.state_root(self.tx).map_err(|err| ProviderError::Database(err.into())) } fn hashed_state_root_with_updates( @@ -270,7 +268,8 @@ impl<'b, TX: DbTx> StateRootProvider for HistoricalStateProviderRef<'b, TX> { ) -> ProviderResult<(B256, TrieUpdates)> { let mut revert_state = self.revert_state()?; revert_state.extend(hashed_state.clone()); - StateRoot::overlay_root_with_updates(self.tx, revert_state) + revert_state + .state_root_with_updates(self.tx) .map_err(|err| ProviderError::Database(err.into())) } } @@ -285,8 +284,9 @@ impl<'b, TX: DbTx> StateProofProvider for HistoricalStateProviderRef<'b, TX> { ) -> ProviderResult { let mut revert_state = self.revert_state()?; revert_state.extend(hashed_state.clone()); - Proof::overlay_account_proof(self.tx, revert_state, address, slots) - .map_err(Into::::into) + revert_state + .account_proof(self.tx, address, slots) + .map_err(|err| ProviderError::Database(err.into())) } } diff --git a/crates/storage/provider/src/providers/state/latest.rs b/crates/storage/provider/src/providers/state/latest.rs index 8c95c8c26174..6bca0d69d467 100644 --- a/crates/storage/provider/src/providers/state/latest.rs +++ b/crates/storage/provider/src/providers/state/latest.rs @@ -12,8 +12,7 @@ use reth_primitives::{ }; use reth_storage_api::StateProofProvider; use reth_storage_errors::provider::{ProviderError, ProviderResult}; -use reth_trie::{proof::Proof, updates::TrieUpdates, AccountProof, HashedPostState, StateRoot}; -use reth_trie_db::{DatabaseProof, DatabaseStateRoot}; +use reth_trie::{updates::TrieUpdates, AccountProof, HashedPostState}; /// State provider over latest state that takes tx reference. #[derive(Debug)] @@ -76,15 +75,15 @@ impl<'b, TX: DbTx> BlockHashReader for LatestStateProviderRef<'b, TX> { impl<'b, TX: DbTx> StateRootProvider for LatestStateProviderRef<'b, TX> { fn hashed_state_root(&self, hashed_state: &HashedPostState) -> ProviderResult { - StateRoot::overlay_root(self.tx, hashed_state.clone()) - .map_err(|err| ProviderError::Database(err.into())) + hashed_state.state_root(self.tx).map_err(|err| ProviderError::Database(err.into())) } fn hashed_state_root_with_updates( &self, hashed_state: &HashedPostState, ) -> ProviderResult<(B256, TrieUpdates)> { - StateRoot::overlay_root_with_updates(self.tx, hashed_state.clone()) + hashed_state + .state_root_with_updates(self.tx) .map_err(|err| ProviderError::Database(err.into())) } } @@ -96,8 +95,9 @@ impl<'b, TX: DbTx> StateProofProvider for LatestStateProviderRef<'b, TX> { address: Address, slots: &[B256], ) -> ProviderResult { - Proof::overlay_account_proof(self.tx, hashed_state.clone(), address, slots) - .map_err(Into::::into) + Ok(hashed_state + .account_proof(self.tx, address, slots) + .map_err(Into::::into)?) } } diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 65304b4854f1..548b395d74ea 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -532,18 +532,6 @@ impl StaticFileProvider { provider: &DatabaseProvider, has_receipt_pruning: bool, ) -> ProviderResult> { - // OVM chain contains duplicate transactions, so is inconsistent by default since reth db - // not designed for duplicate transactions (see ). Undefined behaviour for queries - // to OVM chain is also in op-erigon. - if provider.chain_spec().is_optimism_mainnet() { - info!(target: "reth::cli", - "Skipping storage verification for OP mainnet, expected inconsistency in OVM chain" - ); - return Ok(None); - } - - info!(target: "reth::cli", "Verifying storage consistency."); - let mut unwind_target: Option = None; let mut update_unwind_target = |new_target: BlockNumber| { if let Some(target) = unwind_target.as_mut() { diff --git a/crates/storage/provider/src/providers/static_file/mod.rs b/crates/storage/provider/src/providers/static_file/mod.rs index abbc774c7b40..c5abdbe00c31 100644 --- a/crates/storage/provider/src/providers/static_file/mod.rs +++ b/crates/storage/provider/src/providers/static_file/mod.rs @@ -107,7 +107,7 @@ mod tests { for header in headers.clone() { td += header.header().difficulty; let hash = header.hash(); - writer.append_header(&header.unseal(), td, &hash).unwrap(); + writer.append_header(header.unseal(), td, hash).unwrap(); } writer.commit().unwrap(); } diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index f973afde6e2c..df4417ace2bc 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -13,7 +13,6 @@ use reth_primitives::{ }; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ - borrow::Borrow, path::{Path, PathBuf}, sync::{Arc, Weak}, time::Instant, @@ -467,9 +466,9 @@ impl StaticFileProviderRW { /// Returns the current [`BlockNumber`] as seen in the static file. pub fn append_header( &mut self, - header: &Header, + header: Header, total_difficulty: U256, - hash: &BlockHash, + hash: BlockHash, ) -> ProviderResult { let start = Instant::now(); self.ensure_no_queued_prune()?; @@ -502,7 +501,7 @@ impl StaticFileProviderRW { pub fn append_transaction( &mut self, tx_num: TxNumber, - tx: &TransactionSignedNoHash, + tx: TransactionSignedNoHash, ) -> ProviderResult { let start = Instant::now(); self.ensure_no_queued_prune()?; @@ -529,7 +528,7 @@ impl StaticFileProviderRW { pub fn append_receipt( &mut self, tx_num: TxNumber, - receipt: &Receipt, + receipt: Receipt, ) -> ProviderResult { let start = Instant::now(); self.ensure_no_queued_prune()?; @@ -550,10 +549,9 @@ impl StaticFileProviderRW { /// Appends multiple receipts to the static file. /// /// Returns the current [`TxNumber`] as seen in the static file, if any. - pub fn append_receipts(&mut self, receipts: I) -> ProviderResult> + pub fn append_receipts(&mut self, receipts: I) -> ProviderResult> where - I: Iterator>, - R: Borrow, + I: IntoIterator>, { let mut receipts_iter = receipts.into_iter().peekable(); // If receipts are empty, we can simply return None @@ -570,8 +568,7 @@ impl StaticFileProviderRW { for receipt_result in receipts_iter { let (tx_num, receipt) = receipt_result?; - tx_number = - self.append_with_tx_number(StaticFileSegment::Receipts, tx_num, receipt.borrow())?; + tx_number = self.append_with_tx_number(StaticFileSegment::Receipts, tx_num, receipt)?; count += 1; } diff --git a/crates/storage/provider/src/test_utils/events.rs b/crates/storage/provider/src/test_utils/events.rs new file mode 100644 index 000000000000..39e53772ca5d --- /dev/null +++ b/crates/storage/provider/src/test_utils/events.rs @@ -0,0 +1,35 @@ +use std::sync::{Arc, Mutex}; +use tokio::sync::broadcast::{self, Sender}; + +use crate::{CanonStateNotification, CanonStateNotifications, CanonStateSubscriptions, Chain}; + +/// A test `ChainEventSubscriptions` +#[derive(Clone, Debug, Default)] +pub struct TestCanonStateSubscriptions { + canon_notif_tx: Arc>>>, +} + +impl TestCanonStateSubscriptions { + /// Adds new block commit to the queue that can be consumed with + /// [`TestCanonStateSubscriptions::subscribe_to_canonical_state`] + pub fn add_next_commit(&self, new: Arc) { + let event = CanonStateNotification::Commit { new }; + self.canon_notif_tx.lock().as_mut().unwrap().retain(|tx| tx.send(event.clone()).is_ok()) + } + + /// Adds reorg to the queue that can be consumed with + /// [`TestCanonStateSubscriptions::subscribe_to_canonical_state`] + pub fn add_next_reorg(&self, old: Arc, new: Arc) { + let event = CanonStateNotification::Reorg { old, new }; + self.canon_notif_tx.lock().as_mut().unwrap().retain(|tx| tx.send(event.clone()).is_ok()) + } +} + +impl CanonStateSubscriptions for TestCanonStateSubscriptions { + fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { + let (canon_notif_tx, canon_notif_rx) = broadcast::channel(100); + self.canon_notif_tx.lock().as_mut().unwrap().push(canon_notif_tx); + + canon_notif_rx + } +} diff --git a/crates/storage/provider/src/test_utils/mod.rs b/crates/storage/provider/src/test_utils/mod.rs index edbbe4582926..4d40ad54e990 100644 --- a/crates/storage/provider/src/test_utils/mod.rs +++ b/crates/storage/provider/src/test_utils/mod.rs @@ -1,22 +1,19 @@ -use crate::{providers::StaticFileProvider, HashingWriter, ProviderFactory, TrieWriter}; +use crate::{providers::StaticFileProvider, ProviderFactory}; use reth_chainspec::{ChainSpec, MAINNET}; use reth_db::{ test_utils::{create_test_rw_db, create_test_static_files_dir, TempDatabase}, - Database, DatabaseEnv, + DatabaseEnv, }; -use reth_errors::ProviderResult; -use reth_primitives::{Account, StorageEntry, B256}; -use reth_trie::StateRoot; -use reth_trie_db::DatabaseStateRoot; use std::sync::Arc; pub mod blocks; +mod events; mod mock; mod noop; +pub use events::TestCanonStateSubscriptions; pub use mock::{ExtendedAccount, MockEthProvider}; pub use noop::NoopProvider; -pub use reth_chain_state::test_utils::TestCanonStateSubscriptions; /// Creates test provider factory with mainnet chain spec. pub fn create_test_provider_factory() -> ProviderFactory>> { @@ -35,39 +32,3 @@ pub fn create_test_provider_factory_with_chain_spec( StaticFileProvider::read_write(static_dir.into_path()).expect("static file provider"), ) } - -/// Inserts the genesis alloc from the provided chain spec into the trie. -pub fn insert_genesis( - provider_factory: &ProviderFactory, - chain_spec: Arc, -) -> ProviderResult { - let provider = provider_factory.provider_rw()?; - - // Hash accounts and insert them into hashing table. - let genesis = chain_spec.genesis(); - let alloc_accounts = genesis - .alloc - .iter() - .map(|(addr, account)| (*addr, Some(Account::from_genesis_account(account)))); - provider.insert_account_for_hashing(alloc_accounts).unwrap(); - - let alloc_storage = genesis.alloc.clone().into_iter().filter_map(|(addr, account)| { - // Only return `Some` if there is storage. - account.storage.map(|storage| { - ( - addr, - storage.into_iter().map(|(key, value)| StorageEntry { key, value: value.into() }), - ) - }) - }); - provider.insert_storage_for_hashing(alloc_storage)?; - - let (root, updates) = StateRoot::from_tx(provider.tx_ref()) - .root_with_updates() - .map_err(Into::::into)?; - provider.write_trie_updates(&updates).unwrap(); - - provider.commit()?; - - Ok(root) -} diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index 1f0dfb4d161e..877c4d7afca2 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -3,7 +3,6 @@ use std::{ sync::Arc, }; -use reth_chain_state::{CanonStateNotifications, CanonStateSubscriptions}; use reth_chainspec::{ChainInfo, ChainSpec, MAINNET}; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; @@ -25,10 +24,11 @@ use crate::{ providers::StaticFileProvider, traits::{BlockSource, ReceiptProvider}, AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, - ChainSpecProvider, ChangeSetReader, EvmEnvProvider, HeaderProvider, PruneCheckpointReader, - ReceiptProviderIdExt, RequestsProvider, StageCheckpointReader, StateProvider, StateProviderBox, - StateProviderFactory, StateRootProvider, StaticFileProviderFactory, TransactionVariant, - TransactionsProvider, WithdrawalsProvider, + CanonStateNotifications, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, + EvmEnvProvider, HeaderProvider, PruneCheckpointReader, ReceiptProviderIdExt, RequestsProvider, + StageCheckpointReader, StateProvider, StateProviderBox, StateProviderFactory, + StateRootProvider, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, + WithdrawalsProvider, }; /// Supports various api interfaces for testing purposes. @@ -442,10 +442,6 @@ impl StageCheckpointReader for NoopProvider { fn get_stage_checkpoint_progress(&self, _id: StageId) -> ProviderResult>> { Ok(None) } - - fn get_all_checkpoints(&self) -> ProviderResult> { - Ok(Vec::new()) - } } impl WithdrawalsProvider for NoopProvider { diff --git a/crates/chain-state/src/notifications.rs b/crates/storage/provider/src/traits/chain.rs similarity index 98% rename from crates/chain-state/src/notifications.rs rename to crates/storage/provider/src/traits/chain.rs index d0279b5bc80b..878e67a9f237 100644 --- a/crates/chain-state/src/notifications.rs +++ b/crates/storage/provider/src/traits/chain.rs @@ -1,8 +1,8 @@ //! Canonical chain state notification trait and types. +use crate::{BlockReceipts, Chain}; use auto_impl::auto_impl; use derive_more::{Deref, DerefMut}; -use reth_execution_types::{BlockReceipts, Chain}; use reth_primitives::{SealedBlockWithSenders, SealedHeader}; use std::{ pin::Pin, @@ -61,7 +61,7 @@ impl Stream for CanonStateNotificationStream { } /// Chain action that is triggered when a new block is imported or old block is reverted. -/// and will return all `ExecutionOutcome` and +/// and will return all [`crate::ExecutionOutcome`] and /// [`reth_primitives::SealedBlockWithSenders`] of both reverted and committed blocks. #[derive(Clone, Debug)] pub enum CanonStateNotification { diff --git a/crates/storage/provider/src/traits/full.rs b/crates/storage/provider/src/traits/full.rs index f47bd3efd2e3..c53150560d3a 100644 --- a/crates/storage/provider/src/traits/full.rs +++ b/crates/storage/provider/src/traits/full.rs @@ -1,11 +1,10 @@ //! Helper provider traits to encapsulate all provider traits for simplicity. use crate::{ - AccountReader, BlockReaderIdExt, ChainSpecProvider, ChangeSetReader, DatabaseProviderFactory, - EvmEnvProvider, HeaderProvider, StageCheckpointReader, StateProviderFactory, - StaticFileProviderFactory, TransactionsProvider, + AccountReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, + DatabaseProviderFactory, EvmEnvProvider, HeaderProvider, StageCheckpointReader, + StateProviderFactory, StaticFileProviderFactory, TransactionsProvider, }; -use reth_chain_state::CanonStateSubscriptions; use reth_db_api::database::Database; /// Helper trait to unify all provider traits for simplicity. diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index c89815a9f670..466a9e2908d0 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -16,7 +16,14 @@ mod header_sync_gap; pub use header_sync_gap::{HeaderSyncGap, HeaderSyncGapProvider}; mod state; -pub use state::{StateChangeWriter, StateWriter}; +pub use state::StateWriter; + +mod chain; +pub use chain::{ + CanonStateNotification, CanonStateNotificationSender, CanonStateNotificationStream, + CanonStateNotifications, CanonStateSubscriptions, ForkChoiceNotifications, ForkChoiceStream, + ForkChoiceSubscriptions, +}; mod spec; pub use spec::ChainSpecProvider; @@ -24,9 +31,6 @@ pub use spec::ChainSpecProvider; mod hashing; pub use hashing::HashingWriter; -mod trie; -pub use trie::{StorageTrieWriter, TrieWriter}; - mod history; pub use history::HistoryWriter; diff --git a/crates/storage/provider/src/traits/state.rs b/crates/storage/provider/src/traits/state.rs index eec2ee11ff7b..b445892a060a 100644 --- a/crates/storage/provider/src/traits/state.rs +++ b/crates/storage/provider/src/traits/state.rs @@ -1,33 +1,19 @@ -use reth_execution_types::ExecutionOutcome; -use reth_primitives::BlockNumber; +use crate::{providers::StaticFileProviderRWRefMut, DatabaseProviderRW}; +use reth_db::Database; use reth_storage_errors::provider::ProviderResult; -use revm::db::{ - states::{PlainStateReverts, StateChangeset}, - OriginalValuesKnown, -}; +use revm::db::OriginalValuesKnown; -/// A helper trait for [`ExecutionOutcome`] to write state and receipts to storage. +/// A helper trait for [`ExecutionOutcome`](reth_execution_types::ExecutionOutcome) to +/// write state and receipts to storage. pub trait StateWriter { /// Write the data and receipts to the database or static files if `static_file_producer` is /// `Some`. It should be `None` if there is any kind of pruning/filtering over the receipts. - fn write_to_storage( - &mut self, - execution_outcome: ExecutionOutcome, + fn write_to_storage( + self, + provider_rw: &DatabaseProviderRW, + static_file_producer: Option>, is_value_known: OriginalValuesKnown, - ) -> ProviderResult<()>; -} - -/// A trait specifically for writing state changes or reverts -pub trait StateChangeWriter { - /// Write state reverts to the database. - /// - /// NOTE: Reverts will delete all wiped storage from plain state. - fn write_state_reverts( - &self, - reverts: PlainStateReverts, - first_block: BlockNumber, - ) -> ProviderResult<()>; - - /// Write state changes to the database. - fn write_state_changes(&self, changes: StateChangeset) -> ProviderResult<()>; + ) -> ProviderResult<()> + where + DB: Database; } diff --git a/crates/storage/provider/src/traits/tree_viewer.rs b/crates/storage/provider/src/traits/tree_viewer.rs index f75dbae24d22..a8eea44a6928 100644 --- a/crates/storage/provider/src/traits/tree_viewer.rs +++ b/crates/storage/provider/src/traits/tree_viewer.rs @@ -1,6 +1,5 @@ -use crate::BlockchainTreePendingStateProvider; +use crate::{BlockchainTreePendingStateProvider, CanonStateSubscriptions}; use reth_blockchain_tree_api::{BlockchainTreeEngine, BlockchainTreeViewer}; -use reth_chain_state::CanonStateSubscriptions; /// Helper trait to combine all the traits we need for the `BlockchainProvider` /// diff --git a/crates/storage/provider/src/traits/trie.rs b/crates/storage/provider/src/traits/trie.rs deleted file mode 100644 index 960af93c8547..000000000000 --- a/crates/storage/provider/src/traits/trie.rs +++ /dev/null @@ -1,36 +0,0 @@ -use std::collections::HashMap; - -use auto_impl::auto_impl; -use reth_primitives::B256; -use reth_storage_errors::provider::ProviderResult; -use reth_trie::updates::{StorageTrieUpdates, TrieUpdates}; - -/// Trie Writer -#[auto_impl(&, Arc, Box)] -pub trait TrieWriter: Send + Sync { - /// Writes trie updates to the database. - /// - /// Returns the number of entries modified. - fn write_trie_updates(&self, trie_updates: &TrieUpdates) -> ProviderResult; -} - -/// Storage Trie Writer -#[auto_impl(&, Arc, Box)] -pub trait StorageTrieWriter: Send + Sync { - /// Writes storage trie updates from the given storage trie map. - /// - /// First sorts the storage trie updates by the hashed address key, writing in sorted order. - /// - /// Returns the number of entries modified. - fn write_storage_trie_updates( - &self, - storage_tries: &HashMap, - ) -> ProviderResult; - - /// Writes storage trie updates for the given hashed address. - fn write_individual_storage_trie_updates( - &self, - hashed_address: B256, - updates: &StorageTrieUpdates, - ) -> ProviderResult; -} diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index eb21e90991d7..f5a3554d3b6b 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -1,7 +1,4 @@ -use crate::{ - providers::StaticFileProviderRWRefMut, DatabaseProvider, DatabaseProviderRO, - DatabaseProviderRW, StateChangeWriter, StateWriter, TrieWriter, -}; +use crate::{providers::StaticFileProviderRWRefMut, DatabaseProviderRW}; use itertools::Itertools; use reth_db::{ cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW}, @@ -10,16 +7,11 @@ use reth_db::{ Database, }; use reth_errors::{ProviderError, ProviderResult}; -use reth_execution_types::ExecutionOutcome; -use reth_primitives::{ - BlockNumber, Header, StaticFileSegment, StorageEntry, TransactionSignedNoHash, B256, U256, -}; +use reth_primitives::{BlockNumber, StorageEntry, U256}; use reth_storage_api::ReceiptWriter; use reth_storage_errors::writer::StorageWriterError; -use reth_trie::{updates::TrieUpdates, HashedPostStateSorted}; -use revm::db::OriginalValuesKnown; +use reth_trie::HashedPostStateSorted; use static_file::StaticFileWriter; -use std::borrow::Borrow; mod database; mod static_file; @@ -33,24 +25,29 @@ enum StorageType { /// [`StorageWriter`] is responsible for managing the writing to either database, static file or /// both. #[derive(Debug)] -pub struct StorageWriter<'a, 'b, TX> { - database_writer: Option<&'a DatabaseProvider>, +pub struct StorageWriter<'a, 'b, DB: Database> { + database_writer: Option<&'a DatabaseProviderRW>, static_file_writer: Option>, } -impl<'a, 'b, TX> StorageWriter<'a, 'b, TX> { +impl<'a, 'b, DB: Database> StorageWriter<'a, 'b, DB> { /// Creates a new instance of [`StorageWriter`]. /// /// # Parameters /// - `database_writer`: An optional reference to a database writer. /// - `static_file_writer`: An optional mutable reference to a static file writer. pub const fn new( - database_writer: Option<&'a DatabaseProvider>, + database_writer: Option<&'a DatabaseProviderRW>, static_file_writer: Option>, ) -> Self { Self { database_writer, static_file_writer } } + /// Creates a new instance of [`StorageWriter`] from a database writer. + pub const fn from_database_writer(database_writer: &'a DatabaseProviderRW) -> Self { + Self::new(Some(database_writer), None) + } + /// Creates a new instance of [`StorageWriter`] from a static file writer. pub const fn from_static_file_writer( static_file_writer: StaticFileProviderRWRefMut<'b>, @@ -58,31 +55,11 @@ impl<'a, 'b, TX> StorageWriter<'a, 'b, TX> { Self::new(None, Some(static_file_writer)) } - /// Creates a new instance of [`StorageWriter`] from a read-only database provider. - pub const fn from_database_provider_ro( - database: &'a DatabaseProviderRO, - ) -> StorageWriter<'_, '_, ::TX> - where - DB: Database, - { - StorageWriter::new(Some(database), None) - } - - /// Creates a new instance of [`StorageWriter`] from a read-write database provider. - pub fn from_database_provider_rw( - database: &'a DatabaseProviderRW, - ) -> StorageWriter<'_, '_, ::TXMut> - where - DB: Database, - { - StorageWriter::new(Some(database), None) - } - /// Returns a reference to the database writer. /// /// # Panics /// If the database writer is not set. - fn database_writer(&self) -> &DatabaseProvider { + fn database_writer(&self) -> &DatabaseProviderRW { self.database_writer.as_ref().expect("should exist") } @@ -117,108 +94,7 @@ impl<'a, 'b, TX> StorageWriter<'a, 'b, TX> { } Ok(()) } -} - -impl<'a, 'b, TX> StorageWriter<'a, 'b, TX> -where - TX: DbTx, -{ - /// Appends headers to static files, using the - /// [`HeaderTerminalDifficulties`](tables::HeaderTerminalDifficulties) table to determine the - /// total difficulty of the parent block during header insertion. - /// - /// NOTE: The static file writer used to construct this [`StorageWriter`] MUST be a writer for - /// the Headers segment. - pub fn append_headers_from_blocks( - &mut self, - initial_block_number: BlockNumber, - headers: impl Iterator, - ) -> ProviderResult<()> - where - I: Borrow<(H, B256)>, - H: Borrow
, - { - self.ensure_database_writer()?; - self.ensure_static_file_writer()?; - let mut td_cursor = - self.database_writer().tx_ref().cursor_read::()?; - - let first_td = if initial_block_number == 0 { - U256::ZERO - } else { - td_cursor - .seek_exact(initial_block_number - 1)? - .map(|(_, td)| td.0) - .ok_or_else(|| ProviderError::TotalDifficultyNotFound(initial_block_number))? - }; - - for pair in headers { - let (header, hash) = pair.borrow(); - let header = header.borrow(); - let td = first_td + header.difficulty; - self.static_file_writer().append_header(header, td, hash)?; - } - - Ok(()) - } - - /// Appends transactions to static files, using the - /// [`BlockBodyIndices`](tables::BlockBodyIndices) table to determine the transaction number - /// when appending to static files. - /// - /// NOTE: The static file writer used to construct this [`StorageWriter`] MUST be a writer for - /// the Transactions segment. - pub fn append_transactions_from_blocks( - &mut self, - initial_block_number: BlockNumber, - transactions: impl Iterator, - ) -> ProviderResult<()> - where - T: Borrow>, - { - self.ensure_database_writer()?; - self.ensure_static_file_writer()?; - - let mut bodies_cursor = - self.database_writer().tx_ref().cursor_read::()?; - - let mut last_tx_idx = None; - for (idx, transactions) in transactions.enumerate() { - let block_number = initial_block_number + idx as u64; - - let mut first_tx_index = - bodies_cursor.seek_exact(block_number)?.map(|(_, indices)| indices.first_tx_num()); - - // If there are no indices, that means there have been no transactions - // - // So instead of returning an error, use zero - if block_number == initial_block_number && first_tx_index.is_none() { - first_tx_index = Some(0); - } - let mut tx_index = first_tx_index - .or(last_tx_idx) - .ok_or_else(|| ProviderError::BlockBodyIndicesNotFound(block_number))?; - - for tx in transactions.borrow() { - self.static_file_writer().append_transaction(tx_index, tx)?; - tx_index += 1; - } - - self.static_file_writer() - .increment_block(StaticFileSegment::Transactions, block_number)?; - - // update index - last_tx_idx = Some(tx_index); - } - Ok(()) - } -} - -impl<'a, 'b, TX> StorageWriter<'a, 'b, TX> -where - TX: DbTxMut + DbTx, -{ /// Writes the hashed state changes to the database pub fn write_hashed_state(&self, hashed_state: &HashedPostStateSorted) -> ProviderResult<()> { self.ensure_database_writer()?; @@ -253,7 +129,7 @@ where } } - if !entry.value.is_zero() { + if entry.value != U256::ZERO { hashed_storage_cursor.upsert(*hashed_address, entry)?; } } @@ -267,15 +143,12 @@ where /// ATTENTION: If called from [`StorageWriter`] without a static file producer, it will always /// write them to database. Otherwise, it will look into the pruning configuration to decide. /// - /// NOTE: The static file writer used to construct this [`StorageWriter`] MUST be a writer for - /// the Receipts segment. - /// /// # Parameters /// - `initial_block_number`: The starting block number. /// - `blocks`: An iterator over blocks, each block having a vector of optional receipts. If /// `receipt` is `None`, it has been pruned. pub fn append_receipts_from_blocks( - &mut self, + mut self, initial_block_number: BlockNumber, blocks: impl Iterator>>, ) -> ProviderResult<()> { @@ -298,27 +171,14 @@ where StorageType::StaticFile(self.static_file_writer()) }; - let mut last_tx_idx = None; for (idx, receipts) in blocks.enumerate() { let block_number = initial_block_number + idx as u64; - let mut first_tx_index = - bodies_cursor.seek_exact(block_number)?.map(|(_, indices)| indices.first_tx_num()); - - // If there are no indices, that means there have been no transactions - // - // So instead of returning an error, use zero - if block_number == initial_block_number && first_tx_index.is_none() { - first_tx_index = Some(0); - } - - let first_tx_index = first_tx_index - .or(last_tx_idx) + let first_tx_index = bodies_cursor + .seek_exact(block_number)? + .map(|(_, indices)| indices.first_tx_num()) .ok_or_else(|| ProviderError::BlockBodyIndicesNotFound(block_number))?; - // update for empty blocks - last_tx_idx = Some(first_tx_index); - match &mut storage_type { StorageType::Database(cursor) => { DatabaseWriter(cursor).append_block_receipts( @@ -339,70 +199,15 @@ where Ok(()) } - - /// Writes trie updates. Returns the number of entries modified. - pub fn write_trie_updates(&self, trie_updates: &TrieUpdates) -> ProviderResult { - self.ensure_database_writer()?; - self.database_writer().write_trie_updates(trie_updates) - } -} - -impl<'a, 'b, TX> StateWriter for StorageWriter<'a, 'b, TX> -where - TX: DbTxMut + DbTx, -{ - /// Write the data and receipts to the database or static files if `static_file_producer` is - /// `Some`. It should be `None` if there is any kind of pruning/filtering over the receipts. - fn write_to_storage( - &mut self, - execution_outcome: ExecutionOutcome, - is_value_known: OriginalValuesKnown, - ) -> ProviderResult<()> { - self.ensure_database_writer()?; - let (plain_state, reverts) = - execution_outcome.bundle.into_plain_state_and_reverts(is_value_known); - - self.database_writer().write_state_reverts(reverts, execution_outcome.first_block)?; - - self.append_receipts_from_blocks( - execution_outcome.first_block, - execution_outcome.receipts.into_iter(), - )?; - - self.database_writer().write_state_changes(plain_state)?; - - Ok(()) - } } #[cfg(test)] mod tests { use super::*; - use crate::{test_utils::create_test_provider_factory, AccountReader, TrieWriter}; - use reth_db::tables; - use reth_db_api::{ - cursor::{DbCursorRO, DbDupCursorRO}, - models::{AccountBeforeTx, BlockNumberAddress}, - transaction::{DbTx, DbTxMut}, - }; - use reth_primitives::{ - keccak256, Account, Address, Receipt, Receipts, StorageEntry, B256, U256, - }; - use reth_trie::{test_utils::state_root, HashedPostState, HashedStorage, StateRoot}; - use reth_trie_db::DatabaseStateRoot; - use revm::{ - db::{ - states::{ - bundle_state::BundleRetention, changes::PlainStorageRevert, PlainStorageChangeset, - }, - BundleState, EmptyDB, - }, - primitives::{ - Account as RevmAccount, AccountInfo as RevmAccountInfo, AccountStatus, EvmStorageSlot, - }, - DatabaseCommit, State, - }; - use std::collections::{BTreeMap, HashMap}; + use crate::test_utils::create_test_provider_factory; + use reth_db_api::transaction::DbTx; + use reth_primitives::{keccak256, Account, Address, B256}; + use reth_trie::{HashedPostState, HashedStorage}; #[test] fn wiped_entries_are_removed() { @@ -455,993 +260,4 @@ mod tests { Ok(None) ); } - - #[test] - fn write_to_db_account_info() { - let factory = create_test_provider_factory(); - let provider = factory.provider_rw().unwrap(); - - let address_a = Address::ZERO; - let address_b = Address::repeat_byte(0xff); - - let account_a = RevmAccountInfo { balance: U256::from(1), nonce: 1, ..Default::default() }; - let account_b = RevmAccountInfo { balance: U256::from(2), nonce: 2, ..Default::default() }; - let account_b_changed = - RevmAccountInfo { balance: U256::from(3), nonce: 3, ..Default::default() }; - - let mut state = State::builder().with_bundle_update().build(); - state.insert_not_existing(address_a); - state.insert_account(address_b, account_b.clone()); - - // 0x00.. is created - state.commit(HashMap::from([( - address_a, - RevmAccount { - info: account_a.clone(), - status: AccountStatus::Touched | AccountStatus::Created, - storage: HashMap::default(), - }, - )])); - - // 0xff.. is changed (balance + 1, nonce + 1) - state.commit(HashMap::from([( - address_b, - RevmAccount { - info: account_b_changed.clone(), - status: AccountStatus::Touched, - storage: HashMap::default(), - }, - )])); - - state.merge_transitions(BundleRetention::Reverts); - let mut revm_bundle_state = state.take_bundle(); - - // Write plain state and reverts separately. - let reverts = revm_bundle_state.take_all_reverts().into_plain_state_reverts(); - let plain_state = revm_bundle_state.into_plain_state(OriginalValuesKnown::Yes); - assert!(plain_state.storage.is_empty()); - assert!(plain_state.contracts.is_empty()); - provider.write_state_changes(plain_state).expect("Could not write plain state to DB"); - - assert_eq!(reverts.storage, [[]]); - provider.write_state_reverts(reverts, 1).expect("Could not write reverts to DB"); - - let reth_account_a = account_a.into(); - let reth_account_b = account_b.into(); - let reth_account_b_changed = account_b_changed.clone().into(); - - // Check plain state - assert_eq!( - provider.basic_account(address_a).expect("Could not read account state"), - Some(reth_account_a), - "Account A state is wrong" - ); - assert_eq!( - provider.basic_account(address_b).expect("Could not read account state"), - Some(reth_account_b_changed), - "Account B state is wrong" - ); - - // Check change set - let mut changeset_cursor = provider - .tx_ref() - .cursor_dup_read::() - .expect("Could not open changeset cursor"); - assert_eq!( - changeset_cursor.seek_exact(1).expect("Could not read account change set"), - Some((1, AccountBeforeTx { address: address_a, info: None })), - "Account A changeset is wrong" - ); - assert_eq!( - changeset_cursor.next_dup().expect("Changeset table is malformed"), - Some((1, AccountBeforeTx { address: address_b, info: Some(reth_account_b) })), - "Account B changeset is wrong" - ); - - let mut state = State::builder().with_bundle_update().build(); - state.insert_account(address_b, account_b_changed.clone()); - - // 0xff.. is destroyed - state.commit(HashMap::from([( - address_b, - RevmAccount { - status: AccountStatus::Touched | AccountStatus::SelfDestructed, - info: account_b_changed, - storage: HashMap::default(), - }, - )])); - - state.merge_transitions(BundleRetention::Reverts); - let mut revm_bundle_state = state.take_bundle(); - - // Write plain state and reverts separately. - let reverts = revm_bundle_state.take_all_reverts().into_plain_state_reverts(); - let plain_state = revm_bundle_state.into_plain_state(OriginalValuesKnown::Yes); - // Account B selfdestructed so flag for it should be present. - assert_eq!( - plain_state.storage, - [PlainStorageChangeset { address: address_b, wipe_storage: true, storage: vec![] }] - ); - assert!(plain_state.contracts.is_empty()); - provider.write_state_changes(plain_state).expect("Could not write plain state to DB"); - - assert_eq!( - reverts.storage, - [[PlainStorageRevert { address: address_b, wiped: true, storage_revert: vec![] }]] - ); - provider.write_state_reverts(reverts, 2).expect("Could not write reverts to DB"); - - // Check new plain state for account B - assert_eq!( - provider.basic_account(address_b).expect("Could not read account state"), - None, - "Account B should be deleted" - ); - - // Check change set - assert_eq!( - changeset_cursor.seek_exact(2).expect("Could not read account change set"), - Some((2, AccountBeforeTx { address: address_b, info: Some(reth_account_b_changed) })), - "Account B changeset is wrong after deletion" - ); - } - - #[test] - fn write_to_db_storage() { - let factory = create_test_provider_factory(); - let provider = factory.provider_rw().unwrap(); - - let address_a = Address::ZERO; - let address_b = Address::repeat_byte(0xff); - - let account_b = RevmAccountInfo { balance: U256::from(2), nonce: 2, ..Default::default() }; - - let mut state = State::builder().with_bundle_update().build(); - state.insert_not_existing(address_a); - state.insert_account_with_storage( - address_b, - account_b.clone(), - HashMap::from([(U256::from(1), U256::from(1))]), - ); - - state.commit(HashMap::from([ - ( - address_a, - RevmAccount { - status: AccountStatus::Touched | AccountStatus::Created, - info: RevmAccountInfo::default(), - // 0x00 => 0 => 1 - // 0x01 => 0 => 2 - storage: HashMap::from([ - ( - U256::from(0), - EvmStorageSlot { present_value: U256::from(1), ..Default::default() }, - ), - ( - U256::from(1), - EvmStorageSlot { present_value: U256::from(2), ..Default::default() }, - ), - ]), - }, - ), - ( - address_b, - RevmAccount { - status: AccountStatus::Touched, - info: account_b, - // 0x01 => 1 => 2 - storage: HashMap::from([( - U256::from(1), - EvmStorageSlot { - present_value: U256::from(2), - original_value: U256::from(1), - ..Default::default() - }, - )]), - }, - ), - ])); - - state.merge_transitions(BundleRetention::Reverts); - - let outcome = - ExecutionOutcome::new(state.take_bundle(), Receipts::default(), 1, Vec::new()); - let mut writer = StorageWriter::new(Some(&provider), None); - writer - .write_to_storage(outcome, OriginalValuesKnown::Yes) - .expect("Could not write bundle state to DB"); - - // Check plain storage state - let mut storage_cursor = provider - .tx_ref() - .cursor_dup_read::() - .expect("Could not open plain storage state cursor"); - - assert_eq!( - storage_cursor.seek_exact(address_a).unwrap(), - Some((address_a, StorageEntry { key: B256::ZERO, value: U256::from(1) })), - "Slot 0 for account A should be 1" - ); - assert_eq!( - storage_cursor.next_dup().unwrap(), - Some(( - address_a, - StorageEntry { key: B256::from(U256::from(1).to_be_bytes()), value: U256::from(2) } - )), - "Slot 1 for account A should be 2" - ); - assert_eq!( - storage_cursor.next_dup().unwrap(), - None, - "Account A should only have 2 storage slots" - ); - - assert_eq!( - storage_cursor.seek_exact(address_b).unwrap(), - Some(( - address_b, - StorageEntry { key: B256::from(U256::from(1).to_be_bytes()), value: U256::from(2) } - )), - "Slot 1 for account B should be 2" - ); - assert_eq!( - storage_cursor.next_dup().unwrap(), - None, - "Account B should only have 1 storage slot" - ); - - // Check change set - let mut changeset_cursor = provider - .tx_ref() - .cursor_dup_read::() - .expect("Could not open storage changeset cursor"); - assert_eq!( - changeset_cursor.seek_exact(BlockNumberAddress((1, address_a))).unwrap(), - Some(( - BlockNumberAddress((1, address_a)), - StorageEntry { key: B256::ZERO, value: U256::from(0) } - )), - "Slot 0 for account A should have changed from 0" - ); - assert_eq!( - changeset_cursor.next_dup().unwrap(), - Some(( - BlockNumberAddress((1, address_a)), - StorageEntry { key: B256::from(U256::from(1).to_be_bytes()), value: U256::from(0) } - )), - "Slot 1 for account A should have changed from 0" - ); - assert_eq!( - changeset_cursor.next_dup().unwrap(), - None, - "Account A should only be in the changeset 2 times" - ); - - assert_eq!( - changeset_cursor.seek_exact(BlockNumberAddress((1, address_b))).unwrap(), - Some(( - BlockNumberAddress((1, address_b)), - StorageEntry { key: B256::from(U256::from(1).to_be_bytes()), value: U256::from(1) } - )), - "Slot 1 for account B should have changed from 1" - ); - assert_eq!( - changeset_cursor.next_dup().unwrap(), - None, - "Account B should only be in the changeset 1 time" - ); - - // Delete account A - let mut state = State::builder().with_bundle_update().build(); - state.insert_account(address_a, RevmAccountInfo::default()); - - state.commit(HashMap::from([( - address_a, - RevmAccount { - status: AccountStatus::Touched | AccountStatus::SelfDestructed, - info: RevmAccountInfo::default(), - storage: HashMap::default(), - }, - )])); - - state.merge_transitions(BundleRetention::Reverts); - let outcome = - ExecutionOutcome::new(state.take_bundle(), Receipts::default(), 2, Vec::new()); - let mut writer = StorageWriter::new(Some(&provider), None); - writer - .write_to_storage(outcome, OriginalValuesKnown::Yes) - .expect("Could not write bundle state to DB"); - - assert_eq!( - storage_cursor.seek_exact(address_a).unwrap(), - None, - "Account A should have no storage slots after deletion" - ); - - assert_eq!( - changeset_cursor.seek_exact(BlockNumberAddress((2, address_a))).unwrap(), - Some(( - BlockNumberAddress((2, address_a)), - StorageEntry { key: B256::ZERO, value: U256::from(1) } - )), - "Slot 0 for account A should have changed from 1 on deletion" - ); - assert_eq!( - changeset_cursor.next_dup().unwrap(), - Some(( - BlockNumberAddress((2, address_a)), - StorageEntry { key: B256::from(U256::from(1).to_be_bytes()), value: U256::from(2) } - )), - "Slot 1 for account A should have changed from 2 on deletion" - ); - assert_eq!( - changeset_cursor.next_dup().unwrap(), - None, - "Account A should only be in the changeset 2 times on deletion" - ); - } - - #[test] - fn write_to_db_multiple_selfdestructs() { - let factory = create_test_provider_factory(); - let provider = factory.provider_rw().unwrap(); - - let address1 = Address::random(); - let account_info = RevmAccountInfo { nonce: 1, ..Default::default() }; - - // Block #0: initial state. - let mut init_state = State::builder().with_bundle_update().build(); - init_state.insert_not_existing(address1); - init_state.commit(HashMap::from([( - address1, - RevmAccount { - info: account_info.clone(), - status: AccountStatus::Touched | AccountStatus::Created, - // 0x00 => 0 => 1 - // 0x01 => 0 => 2 - storage: HashMap::from([ - ( - U256::ZERO, - EvmStorageSlot { present_value: U256::from(1), ..Default::default() }, - ), - ( - U256::from(1), - EvmStorageSlot { present_value: U256::from(2), ..Default::default() }, - ), - ]), - }, - )])); - init_state.merge_transitions(BundleRetention::Reverts); - - let outcome = - ExecutionOutcome::new(init_state.take_bundle(), Receipts::default(), 0, Vec::new()); - let mut writer = StorageWriter::new(Some(&provider), None); - writer - .write_to_storage(outcome, OriginalValuesKnown::Yes) - .expect("Could not write bundle state to DB"); - - let mut state = State::builder().with_bundle_update().build(); - state.insert_account_with_storage( - address1, - account_info.clone(), - HashMap::from([(U256::ZERO, U256::from(1)), (U256::from(1), U256::from(2))]), - ); - - // Block #1: change storage. - state.commit(HashMap::from([( - address1, - RevmAccount { - status: AccountStatus::Touched, - info: account_info.clone(), - // 0x00 => 1 => 2 - storage: HashMap::from([( - U256::ZERO, - EvmStorageSlot { - original_value: U256::from(1), - present_value: U256::from(2), - ..Default::default() - }, - )]), - }, - )])); - state.merge_transitions(BundleRetention::Reverts); - - // Block #2: destroy account. - state.commit(HashMap::from([( - address1, - RevmAccount { - status: AccountStatus::Touched | AccountStatus::SelfDestructed, - info: account_info.clone(), - storage: HashMap::default(), - }, - )])); - state.merge_transitions(BundleRetention::Reverts); - - // Block #3: re-create account and change storage. - state.commit(HashMap::from([( - address1, - RevmAccount { - status: AccountStatus::Touched | AccountStatus::Created, - info: account_info.clone(), - storage: HashMap::default(), - }, - )])); - state.merge_transitions(BundleRetention::Reverts); - - // Block #4: change storage. - state.commit(HashMap::from([( - address1, - RevmAccount { - status: AccountStatus::Touched, - info: account_info.clone(), - // 0x00 => 0 => 2 - // 0x02 => 0 => 4 - // 0x06 => 0 => 6 - storage: HashMap::from([ - ( - U256::ZERO, - EvmStorageSlot { present_value: U256::from(2), ..Default::default() }, - ), - ( - U256::from(2), - EvmStorageSlot { present_value: U256::from(4), ..Default::default() }, - ), - ( - U256::from(6), - EvmStorageSlot { present_value: U256::from(6), ..Default::default() }, - ), - ]), - }, - )])); - state.merge_transitions(BundleRetention::Reverts); - - // Block #5: Destroy account again. - state.commit(HashMap::from([( - address1, - RevmAccount { - status: AccountStatus::Touched | AccountStatus::SelfDestructed, - info: account_info.clone(), - storage: HashMap::default(), - }, - )])); - state.merge_transitions(BundleRetention::Reverts); - - // Block #6: Create, change, destroy and re-create in the same block. - state.commit(HashMap::from([( - address1, - RevmAccount { - status: AccountStatus::Touched | AccountStatus::Created, - info: account_info.clone(), - storage: HashMap::default(), - }, - )])); - state.commit(HashMap::from([( - address1, - RevmAccount { - status: AccountStatus::Touched, - info: account_info.clone(), - // 0x00 => 0 => 2 - storage: HashMap::from([( - U256::ZERO, - EvmStorageSlot { present_value: U256::from(2), ..Default::default() }, - )]), - }, - )])); - state.commit(HashMap::from([( - address1, - RevmAccount { - status: AccountStatus::Touched | AccountStatus::SelfDestructed, - info: account_info.clone(), - storage: HashMap::default(), - }, - )])); - state.commit(HashMap::from([( - address1, - RevmAccount { - status: AccountStatus::Touched | AccountStatus::Created, - info: account_info.clone(), - storage: HashMap::default(), - }, - )])); - state.merge_transitions(BundleRetention::Reverts); - - // Block #7: Change storage. - state.commit(HashMap::from([( - address1, - RevmAccount { - status: AccountStatus::Touched, - info: account_info, - // 0x00 => 0 => 9 - storage: HashMap::from([( - U256::ZERO, - EvmStorageSlot { present_value: U256::from(9), ..Default::default() }, - )]), - }, - )])); - state.merge_transitions(BundleRetention::Reverts); - - let bundle = state.take_bundle(); - - let outcome = ExecutionOutcome::new(bundle, Receipts::default(), 1, Vec::new()); - let mut writer = StorageWriter::new(Some(&provider), None); - writer - .write_to_storage(outcome, OriginalValuesKnown::Yes) - .expect("Could not write bundle state to DB"); - - let mut storage_changeset_cursor = provider - .tx_ref() - .cursor_dup_read::() - .expect("Could not open plain storage state cursor"); - let mut storage_changes = storage_changeset_cursor.walk_range(..).unwrap(); - - // Iterate through all storage changes - - // Block - // : - // ... - - // Block #0 - // 0x00: 0 - // 0x01: 0 - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((0, address1)), - StorageEntry { key: B256::with_last_byte(0), value: U256::ZERO } - ))) - ); - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((0, address1)), - StorageEntry { key: B256::with_last_byte(1), value: U256::ZERO } - ))) - ); - - // Block #1 - // 0x00: 1 - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((1, address1)), - StorageEntry { key: B256::with_last_byte(0), value: U256::from(1) } - ))) - ); - - // Block #2 (destroyed) - // 0x00: 2 - // 0x01: 2 - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((2, address1)), - StorageEntry { key: B256::with_last_byte(0), value: U256::from(2) } - ))) - ); - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((2, address1)), - StorageEntry { key: B256::with_last_byte(1), value: U256::from(2) } - ))) - ); - - // Block #3 - // no storage changes - - // Block #4 - // 0x00: 0 - // 0x02: 0 - // 0x06: 0 - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((4, address1)), - StorageEntry { key: B256::with_last_byte(0), value: U256::ZERO } - ))) - ); - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((4, address1)), - StorageEntry { key: B256::with_last_byte(2), value: U256::ZERO } - ))) - ); - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((4, address1)), - StorageEntry { key: B256::with_last_byte(6), value: U256::ZERO } - ))) - ); - - // Block #5 (destroyed) - // 0x00: 2 - // 0x02: 4 - // 0x06: 6 - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((5, address1)), - StorageEntry { key: B256::with_last_byte(0), value: U256::from(2) } - ))) - ); - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((5, address1)), - StorageEntry { key: B256::with_last_byte(2), value: U256::from(4) } - ))) - ); - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((5, address1)), - StorageEntry { key: B256::with_last_byte(6), value: U256::from(6) } - ))) - ); - - // Block #6 - // no storage changes (only inter block changes) - - // Block #7 - // 0x00: 0 - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((7, address1)), - StorageEntry { key: B256::with_last_byte(0), value: U256::ZERO } - ))) - ); - assert_eq!(storage_changes.next(), None); - } - - #[test] - fn storage_change_after_selfdestruct_within_block() { - let factory = create_test_provider_factory(); - let provider = factory.provider_rw().unwrap(); - - let address1 = Address::random(); - let account1 = RevmAccountInfo { nonce: 1, ..Default::default() }; - - // Block #0: initial state. - let mut init_state = State::builder().with_bundle_update().build(); - init_state.insert_not_existing(address1); - init_state.commit(HashMap::from([( - address1, - RevmAccount { - info: account1.clone(), - status: AccountStatus::Touched | AccountStatus::Created, - // 0x00 => 0 => 1 - // 0x01 => 0 => 2 - storage: HashMap::from([ - ( - U256::ZERO, - EvmStorageSlot { present_value: U256::from(1), ..Default::default() }, - ), - ( - U256::from(1), - EvmStorageSlot { present_value: U256::from(2), ..Default::default() }, - ), - ]), - }, - )])); - init_state.merge_transitions(BundleRetention::Reverts); - let outcome = - ExecutionOutcome::new(init_state.take_bundle(), Receipts::default(), 0, Vec::new()); - let mut writer = StorageWriter::new(Some(&provider), None); - writer - .write_to_storage(outcome, OriginalValuesKnown::Yes) - .expect("Could not write bundle state to DB"); - - let mut state = State::builder().with_bundle_update().build(); - state.insert_account_with_storage( - address1, - account1.clone(), - HashMap::from([(U256::ZERO, U256::from(1)), (U256::from(1), U256::from(2))]), - ); - - // Block #1: Destroy, re-create, change storage. - state.commit(HashMap::from([( - address1, - RevmAccount { - status: AccountStatus::Touched | AccountStatus::SelfDestructed, - info: account1.clone(), - storage: HashMap::default(), - }, - )])); - - state.commit(HashMap::from([( - address1, - RevmAccount { - status: AccountStatus::Touched | AccountStatus::Created, - info: account1.clone(), - storage: HashMap::default(), - }, - )])); - - state.commit(HashMap::from([( - address1, - RevmAccount { - status: AccountStatus::Touched, - info: account1, - // 0x01 => 0 => 5 - storage: HashMap::from([( - U256::from(1), - EvmStorageSlot { present_value: U256::from(5), ..Default::default() }, - )]), - }, - )])); - - // Commit block #1 changes to the database. - state.merge_transitions(BundleRetention::Reverts); - let outcome = - ExecutionOutcome::new(state.take_bundle(), Receipts::default(), 1, Vec::new()); - let mut writer = StorageWriter::new(Some(&provider), None); - writer - .write_to_storage(outcome, OriginalValuesKnown::Yes) - .expect("Could not write bundle state to DB"); - - let mut storage_changeset_cursor = provider - .tx_ref() - .cursor_dup_read::() - .expect("Could not open plain storage state cursor"); - let range = BlockNumberAddress::range(1..=1); - let mut storage_changes = storage_changeset_cursor.walk_range(range).unwrap(); - - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((1, address1)), - StorageEntry { key: B256::with_last_byte(0), value: U256::from(1) } - ))) - ); - assert_eq!( - storage_changes.next(), - Some(Ok(( - BlockNumberAddress((1, address1)), - StorageEntry { key: B256::with_last_byte(1), value: U256::from(2) } - ))) - ); - assert_eq!(storage_changes.next(), None); - } - - #[test] - fn revert_to_indices() { - let base = ExecutionOutcome { - bundle: BundleState::default(), - receipts: vec![vec![Some(Receipt::default()); 2]; 7].into(), - first_block: 10, - requests: Vec::new(), - }; - - let mut this = base.clone(); - assert!(this.revert_to(10)); - assert_eq!(this.receipts.len(), 1); - - let mut this = base.clone(); - assert!(!this.revert_to(9)); - assert_eq!(this.receipts.len(), 7); - - let mut this = base.clone(); - assert!(this.revert_to(15)); - assert_eq!(this.receipts.len(), 6); - - let mut this = base.clone(); - assert!(this.revert_to(16)); - assert_eq!(this.receipts.len(), 7); - - let mut this = base; - assert!(!this.revert_to(17)); - assert_eq!(this.receipts.len(), 7); - } - - #[test] - fn bundle_state_state_root() { - type PreState = BTreeMap)>; - let mut prestate: PreState = (0..10) - .map(|key| { - let account = Account { nonce: 1, balance: U256::from(key), bytecode_hash: None }; - let storage = - (1..11).map(|key| (B256::with_last_byte(key), U256::from(key))).collect(); - (Address::with_last_byte(key), (account, storage)) - }) - .collect(); - - let provider_factory = create_test_provider_factory(); - let provider_rw = provider_factory.provider_rw().unwrap(); - - // insert initial state to the database - let tx = provider_rw.tx_ref(); - for (address, (account, storage)) in &prestate { - let hashed_address = keccak256(address); - tx.put::(hashed_address, *account).unwrap(); - for (slot, value) in storage { - tx.put::( - hashed_address, - StorageEntry { key: keccak256(slot), value: *value }, - ) - .unwrap(); - } - } - - let (_, updates) = StateRoot::from_tx(tx).root_with_updates().unwrap(); - provider_rw.write_trie_updates(&updates).unwrap(); - - let mut state = State::builder().with_bundle_update().build(); - - let assert_state_root = |state: &State, expected: &PreState, msg| { - assert_eq!( - StateRoot::overlay_root( - tx, - ExecutionOutcome::new( - state.bundle_state.clone(), - Receipts::default(), - 0, - Vec::new() - ) - .hash_state_slow() - ) - .unwrap(), - state_root(expected.clone().into_iter().map(|(address, (account, storage))| ( - address, - (account, storage.into_iter()) - ))), - "{msg}" - ); - }; - - // database only state root is correct - assert_state_root(&state, &prestate, "empty"); - - // destroy account 1 - let address1 = Address::with_last_byte(1); - let account1_old = prestate.remove(&address1).unwrap(); - state.insert_account(address1, account1_old.0.into()); - state.commit(HashMap::from([( - address1, - RevmAccount { - status: AccountStatus::Touched | AccountStatus::SelfDestructed, - info: RevmAccountInfo::default(), - storage: HashMap::default(), - }, - )])); - state.merge_transitions(BundleRetention::PlainState); - assert_state_root(&state, &prestate, "destroyed account"); - - // change slot 2 in account 2 - let address2 = Address::with_last_byte(2); - let slot2 = U256::from(2); - let slot2_key = B256::from(slot2); - let account2 = prestate.get_mut(&address2).unwrap(); - let account2_slot2_old_value = *account2.1.get(&slot2_key).unwrap(); - state.insert_account_with_storage( - address2, - account2.0.into(), - HashMap::from([(slot2, account2_slot2_old_value)]), - ); - - let account2_slot2_new_value = U256::from(100); - account2.1.insert(slot2_key, account2_slot2_new_value); - state.commit(HashMap::from([( - address2, - RevmAccount { - status: AccountStatus::Touched, - info: account2.0.into(), - storage: HashMap::from_iter([( - slot2, - EvmStorageSlot::new_changed(account2_slot2_old_value, account2_slot2_new_value), - )]), - }, - )])); - state.merge_transitions(BundleRetention::PlainState); - assert_state_root(&state, &prestate, "changed storage"); - - // change balance of account 3 - let address3 = Address::with_last_byte(3); - let account3 = prestate.get_mut(&address3).unwrap(); - state.insert_account(address3, account3.0.into()); - - account3.0.balance = U256::from(24); - state.commit(HashMap::from([( - address3, - RevmAccount { - status: AccountStatus::Touched, - info: account3.0.into(), - storage: HashMap::default(), - }, - )])); - state.merge_transitions(BundleRetention::PlainState); - assert_state_root(&state, &prestate, "changed balance"); - - // change nonce of account 4 - let address4 = Address::with_last_byte(4); - let account4 = prestate.get_mut(&address4).unwrap(); - state.insert_account(address4, account4.0.into()); - - account4.0.nonce = 128; - state.commit(HashMap::from([( - address4, - RevmAccount { - status: AccountStatus::Touched, - info: account4.0.into(), - storage: HashMap::default(), - }, - )])); - state.merge_transitions(BundleRetention::PlainState); - assert_state_root(&state, &prestate, "changed nonce"); - - // recreate account 1 - let account1_new = - Account { nonce: 56, balance: U256::from(123), bytecode_hash: Some(B256::random()) }; - prestate.insert(address1, (account1_new, BTreeMap::default())); - state.commit(HashMap::from([( - address1, - RevmAccount { - status: AccountStatus::Touched | AccountStatus::Created, - info: account1_new.into(), - storage: HashMap::default(), - }, - )])); - state.merge_transitions(BundleRetention::PlainState); - assert_state_root(&state, &prestate, "recreated"); - - // update storage for account 1 - let slot20 = U256::from(20); - let slot20_key = B256::from(slot20); - let account1_slot20_value = U256::from(12345); - prestate.get_mut(&address1).unwrap().1.insert(slot20_key, account1_slot20_value); - state.commit(HashMap::from([( - address1, - RevmAccount { - status: AccountStatus::Touched | AccountStatus::Created, - info: account1_new.into(), - storage: HashMap::from_iter([( - slot20, - EvmStorageSlot::new_changed(U256::ZERO, account1_slot20_value), - )]), - }, - )])); - state.merge_transitions(BundleRetention::PlainState); - assert_state_root(&state, &prestate, "recreated changed storage"); - } - - #[test] - fn prepend_state() { - let address1 = Address::random(); - let address2 = Address::random(); - - let account1 = RevmAccountInfo { nonce: 1, ..Default::default() }; - let account1_changed = RevmAccountInfo { nonce: 1, ..Default::default() }; - let account2 = RevmAccountInfo { nonce: 1, ..Default::default() }; - - let present_state = BundleState::builder(2..=2) - .state_present_account_info(address1, account1_changed.clone()) - .build(); - assert_eq!(present_state.reverts.len(), 1); - let previous_state = BundleState::builder(1..=1) - .state_present_account_info(address1, account1) - .state_present_account_info(address2, account2.clone()) - .build(); - assert_eq!(previous_state.reverts.len(), 1); - - let mut test = ExecutionOutcome { - bundle: present_state, - receipts: vec![vec![Some(Receipt::default()); 2]; 1].into(), - first_block: 2, - requests: Vec::new(), - }; - - test.prepend_state(previous_state); - - assert_eq!(test.receipts.len(), 1); - let end_state = test.state(); - assert_eq!(end_state.state.len(), 2); - // reverts num should stay the same. - assert_eq!(end_state.reverts.len(), 1); - // account1 is not overwritten. - assert_eq!(end_state.state.get(&address1).unwrap().info, Some(account1_changed)); - // account2 got inserted - assert_eq!(end_state.state.get(&address2).unwrap().info, Some(account2)); - } } diff --git a/crates/storage/provider/src/writer/static_file.rs b/crates/storage/provider/src/writer/static_file.rs index 54d9bf5b98a5..b31b7dabd311 100644 --- a/crates/storage/provider/src/writer/static_file.rs +++ b/crates/storage/provider/src/writer/static_file.rs @@ -14,12 +14,10 @@ impl<'a> ReceiptWriter for StaticFileWriter<'a, StaticFileProviderRWRefMut<'_>> ) -> ProviderResult<()> { // Increment block on static file header. self.0.increment_block(StaticFileSegment::Receipts, block_number)?; - let receipts = receipts.iter().enumerate().map(|(tx_idx, receipt)| { + let receipts = receipts.into_iter().enumerate().map(|(tx_idx, receipt)| { Ok(( first_tx_index + tx_idx as u64, - receipt - .as_ref() - .expect("receipt should not be filtered when saving to static files."), + receipt.expect("receipt should not be filtered when saving to static files."), )) }); self.0.append_receipts(receipts)?; diff --git a/crates/storage/storage-api/src/block.rs b/crates/storage/storage-api/src/block.rs index fe97fb3713de..3dc22de8ae4f 100644 --- a/crates/storage/storage-api/src/block.rs +++ b/crates/storage/storage-api/src/block.rs @@ -23,10 +23,10 @@ pub enum BlockSource { #[default] Any, /// The block was fetched from the pending block source, the blockchain tree that buffers - /// blocks that are not yet part of the canonical chain. + /// blocks that are not yet finalized. Pending, - /// The block must be part of the canonical chain. - Canonical, + /// The block was fetched from the database. + Database, } impl BlockSource { @@ -35,9 +35,9 @@ impl BlockSource { matches!(self, Self::Pending | Self::Any) } - /// Returns `true` if the block source is `Canonical` or `Any`. - pub const fn is_canonical(&self) -> bool { - matches!(self, Self::Canonical | Self::Any) + /// Returns `true` if the block source is `Database` or `Any`. + pub const fn is_database(&self) -> bool { + matches!(self, Self::Database | Self::Any) } } diff --git a/crates/storage/storage-api/src/stage_checkpoint.rs b/crates/storage/storage-api/src/stage_checkpoint.rs index d59f3dfb2554..3815239be2c2 100644 --- a/crates/storage/storage-api/src/stage_checkpoint.rs +++ b/crates/storage/storage-api/src/stage_checkpoint.rs @@ -10,10 +10,6 @@ pub trait StageCheckpointReader: Send + Sync { /// Get stage checkpoint progress. fn get_stage_checkpoint_progress(&self, id: StageId) -> ProviderResult>>; - - /// Reads all stage checkpoints and returns a list with the name of the stage and the checkpoint - /// data. - fn get_all_checkpoints(&self) -> ProviderResult>; } /// The trait for updating stage checkpoint related data. diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index 459784b61017..77edd6f3e541 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -15,7 +15,7 @@ workspace = true # reth reth-chainspec.workspace = true reth-eth-wire-types.workspace = true -reth-primitives = { workspace = true, features = ["c-kzg"] } +reth-primitives.workspace = true reth-execution-types.workspace = true reth-fs-util.workspace = true reth-provider.workspace = true diff --git a/crates/trie/common/Cargo.toml b/crates/trie/common/Cargo.toml index 3812016fffc8..da5d5a828cbf 100644 --- a/crates/trie/common/Cargo.toml +++ b/crates/trie/common/Cargo.toml @@ -38,6 +38,7 @@ arbitrary = { workspace = true, features = ["derive"] } assert_matches.workspace = true proptest.workspace = true proptest-arbitrary-interop.workspace = true +proptest-derive.workspace = true serde_json.workspace = true test-fuzz.workspace = true toml.workspace = true diff --git a/crates/trie/common/src/account.rs b/crates/trie/common/src/account.rs index 269202601182..64860ab78b31 100644 --- a/crates/trie/common/src/account.rs +++ b/crates/trie/common/src/account.rs @@ -35,7 +35,7 @@ impl From for TrieAccount { storage_root_unhashed( storage .into_iter() - .filter(|(_, value)| !value.is_zero()) + .filter(|(_, value)| *value != B256::ZERO) .map(|(slot, value)| (slot, U256::from_be_bytes(*value))), ) }) diff --git a/crates/trie/common/src/lib.rs b/crates/trie/common/src/lib.rs index bdec36028b94..ee19b7ed91a2 100644 --- a/crates/trie/common/src/lib.rs +++ b/crates/trie/common/src/lib.rs @@ -26,7 +26,7 @@ pub use subnode::StoredSubNode; mod proofs; #[cfg(any(test, feature = "test-utils"))] pub use proofs::triehash; -pub use proofs::*; +pub use proofs::{AccountProof, StorageProof}; pub mod root; diff --git a/crates/trie/common/src/proofs.rs b/crates/trie/common/src/proofs.rs index 8fa72e2395ae..11953a48decf 100644 --- a/crates/trie/common/src/proofs.rs +++ b/crates/trie/common/src/proofs.rs @@ -2,121 +2,12 @@ use crate::{Nibbles, TrieAccount}; use alloy_primitives::{keccak256, Address, Bytes, B256, U256}; -use alloy_rlp::{encode_fixed_size, Decodable}; +use alloy_rlp::encode_fixed_size; use alloy_trie::{ - nodes::TrieNode, proof::{verify_proof, ProofVerificationError}, EMPTY_ROOT_HASH, }; -use reth_primitives_traits::{constants::KECCAK_EMPTY, Account}; -use std::collections::{BTreeMap, HashMap}; - -/// The state multiproof of target accounts and multiproofs of their storage tries. -#[derive(Clone, Default, Debug)] -pub struct MultiProof { - /// State trie multiproof for requested accounts. - pub account_subtree: BTreeMap, - /// Storage trie multiproofs. - pub storage_multiproofs: HashMap, -} - -impl MultiProof { - /// Construct the account proof from the multiproof. - pub fn account_proof( - &self, - address: Address, - slots: &[B256], - ) -> Result { - let hashed_address = keccak256(address); - let nibbles = Nibbles::unpack(hashed_address); - - // Retrieve the account proof. - let proof = self - .account_subtree - .iter() - .filter(|(path, _)| nibbles.starts_with(path)) - .map(|(_, node)| node.clone()) - .collect::>(); - - // Inspect the last node in the proof. If it's a leaf node with matching suffix, - // then the node contains the encoded trie account. - let info = 'info: { - if let Some(last) = proof.last() { - if let TrieNode::Leaf(leaf) = TrieNode::decode(&mut &last[..])? { - if nibbles.ends_with(&leaf.key) { - let account = TrieAccount::decode(&mut &leaf.value[..])?; - break 'info Some(Account { - balance: account.balance, - nonce: account.nonce, - bytecode_hash: (account.code_hash != KECCAK_EMPTY) - .then_some(account.code_hash), - }) - } - } - } - None - }; - - // Retrieve proofs for requested storage slots. - let storage_multiproof = self.storage_multiproofs.get(&hashed_address); - let storage_root = storage_multiproof.map(|m| m.root).unwrap_or(EMPTY_ROOT_HASH); - let mut storage_proofs = Vec::with_capacity(slots.len()); - for slot in slots { - let proof = if let Some(multiproof) = &storage_multiproof { - multiproof.storage_proof(*slot)? - } else { - StorageProof::new(*slot) - }; - storage_proofs.push(proof); - } - Ok(AccountProof { address, info, proof, storage_root, storage_proofs }) - } -} - -/// The merkle multiproof of storage trie. -#[derive(Clone, Debug)] -pub struct StorageMultiProof { - /// Storage trie root. - pub root: B256, - /// Storage multiproof for requested slots. - pub subtree: BTreeMap, -} - -impl Default for StorageMultiProof { - fn default() -> Self { - Self { root: EMPTY_ROOT_HASH, subtree: BTreeMap::default() } - } -} - -impl StorageMultiProof { - /// Return storage proofs for the target storage slot (unhashed). - pub fn storage_proof(&self, slot: B256) -> Result { - let nibbles = Nibbles::unpack(keccak256(slot)); - - // Retrieve the storage proof. - let proof = self - .subtree - .iter() - .filter(|(path, _)| nibbles.starts_with(path)) - .map(|(_, node)| node.clone()) - .collect::>(); - - // Inspect the last node in the proof. If it's a leaf node with matching suffix, - // then the node contains the encoded slot value. - let value = 'value: { - if let Some(last) = proof.last() { - if let TrieNode::Leaf(leaf) = TrieNode::decode(&mut &last[..])? { - if nibbles.ends_with(&leaf.key) { - break 'value U256::decode(&mut &leaf.value[..])? - } - } - } - U256::ZERO - }; - - Ok(StorageProof { key: slot, nibbles, value, proof }) - } -} +use reth_primitives_traits::Account; /// The merkle proof with the relevant account info. #[derive(PartialEq, Eq, Debug)] @@ -146,6 +37,23 @@ impl AccountProof { } } + /// Set account info, storage root and requested storage proofs. + pub fn set_account( + &mut self, + info: Account, + storage_root: B256, + storage_proofs: Vec, + ) { + self.info = Some(info); + self.storage_root = storage_root; + self.storage_proofs = storage_proofs; + } + + /// Set proof path. + pub fn set_proof(&mut self, proof: Vec) { + self.proof = proof; + } + /// Verify the storage proofs and account proof against the provided state root. pub fn verify(&self, root: B256) -> Result<(), ProofVerificationError> { // Verify storage proofs. @@ -198,6 +106,16 @@ impl StorageProof { Self { key, nibbles, ..Default::default() } } + /// Set storage value. + pub fn set_value(&mut self, value: U256) { + self.value = value; + } + + /// Set proof path. + pub fn set_proof(&mut self, proof: Vec) { + self.proof = proof; + } + /// Verify the proof against the provided storage root. pub fn verify(&self, root: B256) -> Result<(), ProofVerificationError> { let expected = diff --git a/crates/trie/db/Cargo.toml b/crates/trie/db/Cargo.toml deleted file mode 100644 index 3c479072b658..000000000000 --- a/crates/trie/db/Cargo.toml +++ /dev/null @@ -1,78 +0,0 @@ -[package] -name = "reth-trie-db" -version.workspace = true -edition.workspace = true -rust-version.workspace = true -license.workspace = true -homepage.workspace = true -repository.workspace = true -description = "Database integration with merkle trie implementation" - -[lints] -workspace = true - -[dependencies] -# reth -reth-primitives.workspace = true -reth-execution-errors.workspace = true -reth-db.workspace = true -reth-db-api.workspace = true -reth-stages-types.workspace = true -reth-trie-common.workspace = true -reth-trie.workspace = true - -revm.workspace = true - -# alloy -alloy-rlp.workspace = true - -# tracing -tracing.workspace = true - -# misc -rayon.workspace = true -derive_more.workspace = true -auto_impl.workspace = true -itertools.workspace = true - -# `metrics` feature -reth-metrics = { workspace = true, optional = true } -metrics = { workspace = true, optional = true } - -# `test-utils` feature -triehash = { version = "0.8", optional = true } - -# `serde` feature -serde = { workspace = true, optional = true } - -[dev-dependencies] -# reth -reth-chainspec.workspace = true -reth-primitives = { workspace = true, features = ["test-utils", "arbitrary"] } -reth-db = { workspace = true, features = ["test-utils"] } -reth-provider = { workspace = true, features = ["test-utils"] } -reth-storage-errors.workspace = true -reth-trie-common = { workspace = true, features = ["test-utils", "arbitrary"] } -reth-trie = { workspace = true, features = ["test-utils"] } - -# trie -triehash = "0.8" - -# misc -proptest.workspace = true -proptest-arbitrary-interop.workspace = true -tokio = { workspace = true, default-features = false, features = [ - "sync", - "rt", - "macros", -] } -tokio-stream.workspace = true -once_cell.workspace = true -serde_json.workspace = true -similar-asserts.workspace = true -criterion.workspace = true - -[features] -metrics = ["reth-metrics", "reth-trie/metrics", "dep:metrics"] -serde = ["dep:serde"] -test-utils = ["triehash", "reth-trie-common/test-utils"] diff --git a/crates/trie/db/src/lib.rs b/crates/trie/db/src/lib.rs deleted file mode 100644 index 21deac82d27c..000000000000 --- a/crates/trie/db/src/lib.rs +++ /dev/null @@ -1,9 +0,0 @@ -//! An integration of [`reth-trie`] with [`reth-db`]. - -mod proof; -mod state; -mod storage; - -pub use proof::DatabaseProof; -pub use state::DatabaseStateRoot; -pub use storage::DatabaseStorageRoot; diff --git a/crates/trie/db/src/proof.rs b/crates/trie/db/src/proof.rs deleted file mode 100644 index 09f8098fe2c9..000000000000 --- a/crates/trie/db/src/proof.rs +++ /dev/null @@ -1,46 +0,0 @@ -use reth_db_api::transaction::DbTx; -use reth_execution_errors::StateProofError; -use reth_primitives::{Address, B256}; -use reth_trie::{ - hashed_cursor::{DatabaseHashedCursorFactory, HashedPostStateCursorFactory}, - proof::Proof, - HashedPostState, -}; -use reth_trie_common::AccountProof; - -/// Extends [`Proof`] with operations specific for working with a database transaction. -pub trait DatabaseProof<'a, TX> { - /// Create a new [Proof] from database transaction. - fn from_tx(tx: &'a TX) -> Self; - - /// Generates the state proof for target account and slots on top of this [`HashedPostState`]. - fn overlay_account_proof( - tx: &'a TX, - post_state: HashedPostState, - address: Address, - slots: &[B256], - ) -> Result; -} - -impl<'a, TX: DbTx> DatabaseProof<'a, TX> for Proof<&'a TX, DatabaseHashedCursorFactory<'a, TX>> { - /// Create a new [Proof] instance from database transaction. - fn from_tx(tx: &'a TX) -> Self { - Self::new(tx, DatabaseHashedCursorFactory::new(tx)) - } - - fn overlay_account_proof( - tx: &'a TX, - post_state: HashedPostState, - address: Address, - slots: &[B256], - ) -> Result { - let prefix_sets = post_state.construct_prefix_sets(); - let sorted = post_state.into_sorted(); - let hashed_cursor_factory = - HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(tx), &sorted); - Proof::from_tx(tx) - .with_hashed_cursor_factory(hashed_cursor_factory) - .with_prefix_sets_mut(prefix_sets) - .account_proof(address, slots) - } -} diff --git a/crates/trie/db/src/state.rs b/crates/trie/db/src/state.rs deleted file mode 100644 index 8c72825e1d31..000000000000 --- a/crates/trie/db/src/state.rs +++ /dev/null @@ -1,208 +0,0 @@ -use reth_db_api::transaction::DbTx; -use reth_execution_errors::StateRootError; -use reth_primitives::{BlockNumber, B256}; -use reth_trie::{ - hashed_cursor::{DatabaseHashedCursorFactory, HashedPostStateCursorFactory}, - prefix_set::PrefixSetLoader, - updates::TrieUpdates, - HashedPostState, StateRoot, StateRootProgress, -}; -use std::ops::RangeInclusive; -use tracing::debug; - -/// Extends [`StateRoot`] with operations specific for working with a database transaction. -pub trait DatabaseStateRoot<'a, TX>: Sized { - /// Create a new [`StateRoot`] instance. - fn from_tx(tx: &'a TX) -> Self; - - /// Given a block number range, identifies all the accounts and storage keys that - /// have changed. - /// - /// # Returns - /// - /// An instance of state root calculator with account and storage prefixes loaded. - fn incremental_root_calculator( - tx: &'a TX, - range: RangeInclusive, - ) -> Result; - - /// Computes the state root of the trie with the changed account and storage prefixes and - /// existing trie nodes. - /// - /// # Returns - /// - /// The updated state root. - fn incremental_root( - tx: &'a TX, - range: RangeInclusive, - ) -> Result; - - /// Computes the state root of the trie with the changed account and storage prefixes and - /// existing trie nodes collecting updates in the process. - /// - /// Ignores the threshold. - /// - /// # Returns - /// - /// The updated state root and the trie updates. - fn incremental_root_with_updates( - tx: &'a TX, - range: RangeInclusive, - ) -> Result<(B256, TrieUpdates), StateRootError>; - - /// Computes the state root of the trie with the changed account and storage prefixes and - /// existing trie nodes collecting updates in the process. - /// - /// # Returns - /// - /// The intermediate progress of state root computation. - fn incremental_root_with_progress( - tx: &'a TX, - range: RangeInclusive, - ) -> Result; - - /// Calculate the state root for this [`HashedPostState`]. - /// Internally, this method retrieves prefixsets and uses them - /// to calculate incremental state root. - /// - /// # Example - /// - /// ``` - /// use reth_db::test_utils::create_test_rw_db; - /// use reth_db_api::database::Database; - /// use reth_primitives::{Account, U256}; - /// use reth_trie::{HashedPostState, StateRoot}; - /// use reth_trie_db::DatabaseStateRoot; - /// - /// // Initialize the database - /// let db = create_test_rw_db(); - /// - /// // Initialize hashed post state - /// let mut hashed_state = HashedPostState::default(); - /// hashed_state.accounts.insert( - /// [0x11; 32].into(), - /// Some(Account { nonce: 1, balance: U256::from(10), bytecode_hash: None }), - /// ); - /// - /// // Calculate the state root - /// let tx = db.tx().expect("failed to create transaction"); - /// let state_root = StateRoot::overlay_root(&tx, hashed_state); - /// ``` - /// - /// # Returns - /// - /// The state root for this [`HashedPostState`]. - fn overlay_root(tx: &'a TX, post_state: HashedPostState) -> Result; - - /// Calculates the state root for this [`HashedPostState`] and returns it alongside trie - /// updates. See [`Self::overlay_root`] for more info. - fn overlay_root_with_updates( - tx: &'a TX, - post_state: HashedPostState, - ) -> Result<(B256, TrieUpdates), StateRootError>; -} - -impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX> - for StateRoot<&'a TX, DatabaseHashedCursorFactory<'a, TX>> -{ - fn from_tx(tx: &'a TX) -> Self { - Self::new(tx, DatabaseHashedCursorFactory::new(tx)) - } - - fn incremental_root_calculator( - tx: &'a TX, - range: RangeInclusive, - ) -> Result { - let loaded_prefix_sets = PrefixSetLoader::new(tx).load(range)?; - Ok(Self::from_tx(tx).with_prefix_sets(loaded_prefix_sets)) - } - - fn incremental_root( - tx: &'a TX, - range: RangeInclusive, - ) -> Result { - debug!(target: "trie::loader", ?range, "incremental state root"); - Self::incremental_root_calculator(tx, range)?.root() - } - - fn incremental_root_with_updates( - tx: &'a TX, - range: RangeInclusive, - ) -> Result<(B256, TrieUpdates), StateRootError> { - debug!(target: "trie::loader", ?range, "incremental state root"); - Self::incremental_root_calculator(tx, range)?.root_with_updates() - } - - fn incremental_root_with_progress( - tx: &'a TX, - range: RangeInclusive, - ) -> Result { - debug!(target: "trie::loader", ?range, "incremental state root with progress"); - Self::incremental_root_calculator(tx, range)?.root_with_progress() - } - - fn overlay_root(tx: &'a TX, post_state: HashedPostState) -> Result { - let prefix_sets = post_state.construct_prefix_sets().freeze(); - let sorted = post_state.into_sorted(); - StateRoot::new( - tx, - HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(tx), &sorted), - ) - .with_prefix_sets(prefix_sets) - .root() - } - - fn overlay_root_with_updates( - tx: &'a TX, - post_state: HashedPostState, - ) -> Result<(B256, TrieUpdates), StateRootError> { - let prefix_sets = post_state.construct_prefix_sets().freeze(); - let sorted = post_state.into_sorted(); - StateRoot::new( - tx, - HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(tx), &sorted), - ) - .with_prefix_sets(prefix_sets) - .root_with_updates() - } -} - -#[cfg(test)] -mod tests { - use super::*; - use reth_db::test_utils::create_test_rw_db; - use reth_db_api::database::Database; - use reth_primitives::{hex, revm_primitives::AccountInfo, Address, U256}; - use revm::db::BundleState; - use std::collections::HashMap; - - #[test] - fn from_bundle_state_with_rayon() { - let address1 = Address::with_last_byte(1); - let address2 = Address::with_last_byte(2); - let slot1 = U256::from(1015); - let slot2 = U256::from(2015); - - let account1 = AccountInfo { nonce: 1, ..Default::default() }; - let account2 = AccountInfo { nonce: 2, ..Default::default() }; - - let bundle_state = BundleState::builder(2..=2) - .state_present_account_info(address1, account1) - .state_present_account_info(address2, account2) - .state_storage(address1, HashMap::from([(slot1, (U256::ZERO, U256::from(10)))])) - .state_storage(address2, HashMap::from([(slot2, (U256::ZERO, U256::from(20)))])) - .build(); - assert_eq!(bundle_state.reverts.len(), 1); - - let post_state = HashedPostState::from_bundle_state(&bundle_state.state); - assert_eq!(post_state.accounts.len(), 2); - assert_eq!(post_state.storages.len(), 2); - - let db = create_test_rw_db(); - let tx = db.tx().expect("failed to create transaction"); - assert_eq!( - StateRoot::overlay_root(&tx, post_state).unwrap(), - hex!("b464525710cafcf5d4044ac85b72c08b1e76231b8d91f288fe438cc41d8eaafd") - ); - } -} diff --git a/crates/trie/db/src/storage.rs b/crates/trie/db/src/storage.rs deleted file mode 100644 index b4c31dbe343b..000000000000 --- a/crates/trie/db/src/storage.rs +++ /dev/null @@ -1,39 +0,0 @@ -use reth_db_api::transaction::DbTx; -use reth_primitives::{Address, B256}; -use reth_trie::{hashed_cursor::DatabaseHashedCursorFactory, StorageRoot}; - -#[cfg(feature = "metrics")] -use reth_trie::metrics::{TrieRootMetrics, TrieType}; - -/// Extends [`StorageRoot`] with operations specific for working with a database transaction. -pub trait DatabaseStorageRoot<'a, TX> { - /// Create a new storage root calculator from database transaction and raw address. - fn from_tx(tx: &'a TX, address: Address) -> Self; - - /// Create a new storage root calculator from database transaction and hashed address. - fn from_tx_hashed(tx: &'a TX, hashed_address: B256) -> Self; -} - -impl<'a, TX: DbTx> DatabaseStorageRoot<'a, TX> - for StorageRoot<&'a TX, DatabaseHashedCursorFactory<'a, TX>> -{ - fn from_tx(tx: &'a TX, address: Address) -> Self { - Self::new( - tx, - DatabaseHashedCursorFactory::new(tx), - address, - #[cfg(feature = "metrics")] - TrieRootMetrics::new(TrieType::Storage), - ) - } - - fn from_tx_hashed(tx: &'a TX, hashed_address: B256) -> Self { - Self::new_hashed( - tx, - DatabaseHashedCursorFactory::new(tx), - hashed_address, - #[cfg(feature = "metrics")] - TrieRootMetrics::new(TrieType::Storage), - ) - } -} diff --git a/crates/trie/db/tests/fuzz_in_memory_nodes.rs b/crates/trie/db/tests/fuzz_in_memory_nodes.rs deleted file mode 100644 index 5c213924b421..000000000000 --- a/crates/trie/db/tests/fuzz_in_memory_nodes.rs +++ /dev/null @@ -1,59 +0,0 @@ -use proptest::prelude::*; -use reth_db::{cursor::DbCursorRW, tables, transaction::DbTxMut}; -use reth_primitives::{Account, B256, U256}; -use reth_provider::test_utils::create_test_provider_factory; -use reth_trie::{ - prefix_set::{PrefixSetMut, TriePrefixSets}, - test_utils::state_root_prehashed, - trie_cursor::InMemoryTrieCursorFactory, - StateRoot, -}; -use reth_trie_common::Nibbles; -use reth_trie_db::DatabaseStateRoot; -use std::collections::BTreeMap; - -proptest! { - #![proptest_config(ProptestConfig { - cases: 128, ..ProptestConfig::default() - })] - - #[test] - fn fuzz_in_memory_nodes(mut init_state: BTreeMap, mut updated_state: BTreeMap) { - let factory = create_test_provider_factory(); - let provider = factory.provider_rw().unwrap(); - let mut hashed_account_cursor = provider.tx_ref().cursor_write::().unwrap(); - - // Insert init state into database - for (hashed_address, balance) in init_state.clone() { - hashed_account_cursor.upsert(hashed_address, Account { balance, ..Default::default() }).unwrap(); - } - - // Compute initial root and updates - let (_, trie_updates) = StateRoot::from_tx(provider.tx_ref()) - .root_with_updates() - .unwrap(); - - // Insert state updates into database - let mut changes = PrefixSetMut::default(); - for (hashed_address, balance) in updated_state.clone() { - hashed_account_cursor.upsert(hashed_address, Account { balance, ..Default::default() }).unwrap(); - changes.insert(Nibbles::unpack(hashed_address)); - } - - // Compute root with in-memory trie nodes overlay - let (state_root, _) = StateRoot::from_tx(provider.tx_ref()) - .with_prefix_sets(TriePrefixSets { account_prefix_set: changes.freeze(), ..Default::default() }) - .with_trie_cursor_factory(InMemoryTrieCursorFactory::new(provider.tx_ref(), &trie_updates.into_sorted())) - .root_with_updates() - .unwrap(); - - // Verify the result - let mut state = BTreeMap::default(); - state.append(&mut init_state); - state.append(&mut updated_state); - let expected_root = state_root_prehashed( - state.iter().map(|(&key, &balance)| (key, (Account { balance, ..Default::default() }, std::iter::empty()))) - ); - assert_eq!(expected_root, state_root); - } -} diff --git a/crates/trie/db/tests/proof.rs b/crates/trie/db/tests/proof.rs deleted file mode 100644 index fbc40254bab7..000000000000 --- a/crates/trie/db/tests/proof.rs +++ /dev/null @@ -1,288 +0,0 @@ -use once_cell::sync::Lazy; -use reth_chainspec::{Chain, ChainSpec, HOLESKY, MAINNET}; -use reth_db_api::database::Database; -use reth_primitives::{ - constants::EMPTY_ROOT_HASH, keccak256, Account, Address, Bytes, StorageEntry, B256, U256, -}; -use reth_provider::{ - test_utils::create_test_provider_factory, HashingWriter, ProviderFactory, TrieWriter, -}; -use reth_storage_errors::provider::ProviderResult; -use reth_trie::{proof::Proof, Nibbles, StateRoot}; -use reth_trie_common::{AccountProof, StorageProof}; -use reth_trie_db::{DatabaseProof, DatabaseStateRoot}; -use std::{str::FromStr, sync::Arc}; - -/* - World State (sampled from ) - | address | prefix | hash | balance - |--------------------------------------------|-----------|--------------------------------------------------------------------|-------- - | 0x2031f89b3ea8014eb51a78c316e42af3e0d7695f | 0xa711355 | 0xa711355ec1c8f7e26bb3ccbcb0b75d870d15846c0b98e5cc452db46c37faea40 | 45 eth - | 0x33f0fc440b8477fcfbe9d0bf8649e7dea9baedb2 | 0xa77d337 | 0xa77d337781e762f3577784bab7491fcc43e291ce5a356b9bc517ac52eed3a37a | 1 wei - | 0x62b0dd4aab2b1a0a04e279e2b828791a10755528 | 0xa7f9365 | 0xa7f936599f93b769acf90c7178fd2ddcac1b5b4bc9949ee5a04b7e0823c2446e | 1.1 eth - | 0x1ed9b1dd266b607ee278726d324b855a093394a6 | 0xa77d397 | 0xa77d397a32b8ab5eb4b043c65b1f00c93f517bc8883c5cd31baf8e8a279475e3 | .12 eth - - All expected testspec results were obtained from querying proof RPC on the running geth instance `geth init crates/trie/testdata/proof-genesis.json && geth --http`. -*/ -static TEST_SPEC: Lazy> = Lazy::new(|| { - ChainSpec { - chain: Chain::from_id(12345), - genesis: serde_json::from_str(include_str!("../../trie/testdata/proof-genesis.json")) - .expect("Can't deserialize test genesis json"), - ..Default::default() - } - .into() -}); - -fn convert_to_proof<'a>(path: impl IntoIterator) -> Vec { - path.into_iter().map(Bytes::from_str).collect::, _>>().unwrap() -} - -fn insert_genesis( - provider_factory: &ProviderFactory, - chain_spec: Arc, -) -> ProviderResult { - let provider = provider_factory.provider_rw()?; - - // Hash accounts and insert them into hashing table. - let genesis = chain_spec.genesis(); - let alloc_accounts = genesis - .alloc - .iter() - .map(|(addr, account)| (*addr, Some(Account::from_genesis_account(account)))); - provider.insert_account_for_hashing(alloc_accounts).unwrap(); - - let alloc_storage = genesis.alloc.clone().into_iter().filter_map(|(addr, account)| { - // Only return `Some` if there is storage. - account.storage.map(|storage| { - ( - addr, - storage.into_iter().map(|(key, value)| StorageEntry { key, value: value.into() }), - ) - }) - }); - provider.insert_storage_for_hashing(alloc_storage)?; - - let (root, updates) = StateRoot::from_tx(provider.tx_ref()) - .root_with_updates() - .map_err(Into::::into)?; - provider.write_trie_updates(&updates).unwrap(); - - provider.commit()?; - - Ok(root) -} - -#[test] -fn testspec_proofs() { - // Create test database and insert genesis accounts. - let factory = create_test_provider_factory(); - let root = insert_genesis(&factory, TEST_SPEC.clone()).unwrap(); - - let data = Vec::from([ - ( - "0x2031f89b3ea8014eb51a78c316e42af3e0d7695f", - convert_to_proof([ - "0xe48200a7a040f916999be583c572cc4dd369ec53b0a99f7de95f13880cf203d98f935ed1b3", - "0xf87180a04fb9bab4bb88c062f32452b7c94c8f64d07b5851d44a39f1e32ba4b1829fdbfb8080808080a0b61eeb2eb82808b73c4ad14140a2836689f4ab8445d69dd40554eaf1fce34bc080808080808080a0dea230ff2026e65de419288183a340125b04b8405cc61627b3b4137e2260a1e880", - "0xf8719f31355ec1c8f7e26bb3ccbcb0b75d870d15846c0b98e5cc452db46c37faea40b84ff84d80890270801d946c940000a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" - ]) - ), - ( - "0x33f0fc440b8477fcfbe9d0bf8649e7dea9baedb2", - convert_to_proof([ - "0xe48200a7a040f916999be583c572cc4dd369ec53b0a99f7de95f13880cf203d98f935ed1b3", - "0xf87180a04fb9bab4bb88c062f32452b7c94c8f64d07b5851d44a39f1e32ba4b1829fdbfb8080808080a0b61eeb2eb82808b73c4ad14140a2836689f4ab8445d69dd40554eaf1fce34bc080808080808080a0dea230ff2026e65de419288183a340125b04b8405cc61627b3b4137e2260a1e880", - "0xe48200d3a0ef957210bca5b9b402d614eb8408c88cfbf4913eb6ab83ca233c8b8f0e626b54", - "0xf851808080a02743a5addaf4cf9b8c0c073e1eaa555deaaf8c41cb2b41958e88624fa45c2d908080808080a0bfbf6937911dfb88113fecdaa6bde822e4e99dae62489fcf61a91cb2f36793d680808080808080", - "0xf8679e207781e762f3577784bab7491fcc43e291ce5a356b9bc517ac52eed3a37ab846f8448001a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" - ]) - ), - ( - "0x62b0dd4aab2b1a0a04e279e2b828791a10755528", - convert_to_proof([ - "0xe48200a7a040f916999be583c572cc4dd369ec53b0a99f7de95f13880cf203d98f935ed1b3", - "0xf87180a04fb9bab4bb88c062f32452b7c94c8f64d07b5851d44a39f1e32ba4b1829fdbfb8080808080a0b61eeb2eb82808b73c4ad14140a2836689f4ab8445d69dd40554eaf1fce34bc080808080808080a0dea230ff2026e65de419288183a340125b04b8405cc61627b3b4137e2260a1e880", - "0xf8709f3936599f93b769acf90c7178fd2ddcac1b5b4bc9949ee5a04b7e0823c2446eb84ef84c80880f43fc2c04ee0000a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" - ]) - ), - ( - "0x1ed9b1dd266b607ee278726d324b855a093394a6", - convert_to_proof([ - "0xe48200a7a040f916999be583c572cc4dd369ec53b0a99f7de95f13880cf203d98f935ed1b3", - "0xf87180a04fb9bab4bb88c062f32452b7c94c8f64d07b5851d44a39f1e32ba4b1829fdbfb8080808080a0b61eeb2eb82808b73c4ad14140a2836689f4ab8445d69dd40554eaf1fce34bc080808080808080a0dea230ff2026e65de419288183a340125b04b8405cc61627b3b4137e2260a1e880", - "0xe48200d3a0ef957210bca5b9b402d614eb8408c88cfbf4913eb6ab83ca233c8b8f0e626b54", - "0xf851808080a02743a5addaf4cf9b8c0c073e1eaa555deaaf8c41cb2b41958e88624fa45c2d908080808080a0bfbf6937911dfb88113fecdaa6bde822e4e99dae62489fcf61a91cb2f36793d680808080808080", - "0xf86f9e207a32b8ab5eb4b043c65b1f00c93f517bc8883c5cd31baf8e8a279475e3b84ef84c808801aa535d3d0c0000a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" - ]) - ), - ]); - - let provider = factory.provider().unwrap(); - for (target, expected_proof) in data { - let target = Address::from_str(target).unwrap(); - let account_proof = Proof::from_tx(provider.tx_ref()).account_proof(target, &[]).unwrap(); - similar_asserts::assert_eq!( - account_proof.proof, - expected_proof, - "proof for {target:?} does not match" - ); - assert_eq!(account_proof.verify(root), Ok(())); - } -} - -#[test] -fn testspec_empty_storage_proof() { - // Create test database and insert genesis accounts. - let factory = create_test_provider_factory(); - let root = insert_genesis(&factory, TEST_SPEC.clone()).unwrap(); - - let target = Address::from_str("0x1ed9b1dd266b607ee278726d324b855a093394a6").unwrap(); - let slots = Vec::from([B256::with_last_byte(1), B256::with_last_byte(3)]); - - let provider = factory.provider().unwrap(); - let account_proof = Proof::from_tx(provider.tx_ref()).account_proof(target, &slots).unwrap(); - assert_eq!(account_proof.storage_root, EMPTY_ROOT_HASH, "expected empty storage root"); - - assert_eq!(slots.len(), account_proof.storage_proofs.len()); - for (idx, slot) in slots.into_iter().enumerate() { - let proof = account_proof.storage_proofs.get(idx).unwrap(); - assert_eq!(proof, &StorageProof::new(slot)); - assert_eq!(proof.verify(account_proof.storage_root), Ok(())); - } - assert_eq!(account_proof.verify(root), Ok(())); -} - -#[test] -fn mainnet_genesis_account_proof() { - // Create test database and insert genesis accounts. - let factory = create_test_provider_factory(); - let root = insert_genesis(&factory, MAINNET.clone()).unwrap(); - - // Address from mainnet genesis allocation. - // keccak256 - `0xcf67b71c90b0d523dd5004cf206f325748da347685071b34812e21801f5270c4` - let target = Address::from_str("0x000d836201318ec6899a67540690382780743280").unwrap(); - - // `cast proof 0x000d836201318ec6899a67540690382780743280 --block 0` - let expected_account_proof = convert_to_proof([ - "0xf90211a090dcaf88c40c7bbc95a912cbdde67c175767b31173df9ee4b0d733bfdd511c43a0babe369f6b12092f49181ae04ca173fb68d1a5456f18d20fa32cba73954052bda0473ecf8a7e36a829e75039a3b055e51b8332cbf03324ab4af2066bbd6fbf0021a0bbda34753d7aa6c38e603f360244e8f59611921d9e1f128372fec0d586d4f9e0a04e44caecff45c9891f74f6a2156735886eedf6f1a733628ebc802ec79d844648a0a5f3f2f7542148c973977c8a1e154c4300fec92f755f7846f1b734d3ab1d90e7a0e823850f50bf72baae9d1733a36a444ab65d0a6faaba404f0583ce0ca4dad92da0f7a00cbe7d4b30b11faea3ae61b7f1f2b315b61d9f6bd68bfe587ad0eeceb721a07117ef9fc932f1a88e908eaead8565c19b5645dc9e5b1b6e841c5edbdfd71681a069eb2de283f32c11f859d7bcf93da23990d3e662935ed4d6b39ce3673ec84472a0203d26456312bbc4da5cd293b75b840fc5045e493d6f904d180823ec22bfed8ea09287b5c21f2254af4e64fca76acc5cd87399c7f1ede818db4326c98ce2dc2208a06fc2d754e304c48ce6a517753c62b1a9c1d5925b89707486d7fc08919e0a94eca07b1c54f15e299bd58bdfef9741538c7828b5d7d11a489f9c20d052b3471df475a051f9dd3739a927c89e357580a4c97b40234aa01ed3d5e0390dc982a7975880a0a089d613f26159af43616fd9455bb461f4869bfede26f2130835ed067a8b967bfb80", - "0xf90211a0dae48f5b47930c28bb116fbd55e52cd47242c71bf55373b55eb2805ee2e4a929a00f1f37f337ec800e2e5974e2e7355f10f1a4832b39b846d916c3597a460e0676a0da8f627bb8fbeead17b318e0a8e4f528db310f591bb6ab2deda4a9f7ca902ab5a0971c662648d58295d0d0aa4b8055588da0037619951217c22052802549d94a2fa0ccc701efe4b3413fd6a61a6c9f40e955af774649a8d9fd212d046a5a39ddbb67a0d607cdb32e2bd635ee7f2f9e07bc94ddbd09b10ec0901b66628e15667aec570ba05b89203dc940e6fa70ec19ad4e01d01849d3a5baa0a8f9c0525256ed490b159fa0b84227d48df68aecc772939a59afa9e1a4ab578f7b698bdb1289e29b6044668ea0fd1c992070b94ace57e48cbf6511a16aa770c645f9f5efba87bbe59d0a042913a0e16a7ccea6748ae90de92f8aef3b3dc248a557b9ac4e296934313f24f7fced5fa042373cf4a00630d94de90d0a23b8f38ced6b0f7cb818b8925fee8f0c2a28a25aa05f89d2161c1741ff428864f7889866484cef622de5023a46e795dfdec336319fa07597a017664526c8c795ce1da27b8b72455c49657113e0455552dbc068c5ba31a0d5be9089012fda2c585a1b961e988ea5efcd3a06988e150a8682091f694b37c5a0f7b0352e38c315b2d9a14d51baea4ddee1770974c806e209355233c3c89dce6ea049bf6e8df0acafd0eff86defeeb305568e44d52d2235cf340ae15c6034e2b24180", - "0xf901f1a0cf67e0f5d5f8d70e53a6278056a14ddca46846f5ef69c7bde6810d058d4a9eda80a06732ada65afd192197fe7ce57792a7f25d26978e64e954b7b84a1f7857ac279da05439f8d011683a6fc07efb90afca198fd7270c795c835c7c85d91402cda992eaa0449b93033b6152d289045fdb0bf3f44926f831566faa0e616b7be1abaad2cb2da031be6c3752bcd7afb99b1bb102baf200f8567c394d464315323a363697646616a0a40e3ed11d906749aa501279392ffde868bd35102db41364d9c601fd651f974aa0044bfa4fe8dd1a58e6c7144da79326e94d1331c0b00373f6ae7f3662f45534b7a098005e3e48db68cb1dc9b9f034ff74d2392028ddf718b0f2084133017da2c2e7a02a62bc40414ee95b02e202a9e89babbabd24bef0abc3fc6dcd3e9144ceb0b725a0239facd895bbf092830390a8676f34b35b29792ae561f196f86614e0448a5792a0a4080f88925daff6b4ce26d188428841bd65655d8e93509f2106020e76d41eefa04918987904be42a6894256ca60203283d1b89139cf21f09f5719c44b8cdbb8f7a06201fc3ef0827e594d953b5e3165520af4fceb719e11cc95fd8d3481519bfd8ca05d0e353d596bd725b09de49c01ede0f29023f0153d7b6d401556aeb525b2959ba0cd367d0679950e9c5f2aa4298fd4b081ade2ea429d71ff390c50f8520e16e30880", - "0xf87180808080808080a0dbee8b33c73b86df839f309f7ac92eee19836e08b39302ffa33921b3c6a09f66a06068b283d51aeeee682b8fb5458354315d0b91737441ede5e137c18b4775174a8080808080a0fe7779c7d58c2fda43eba0a6644043c86ebb9ceb4836f89e30831f23eb059ece8080", - "0xf8719f20b71c90b0d523dd5004cf206f325748da347685071b34812e21801f5270c4b84ff84d80890ad78ebc5ac6200000a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" - ]); - - let provider = factory.provider().unwrap(); - let account_proof = Proof::from_tx(provider.tx_ref()).account_proof(target, &[]).unwrap(); - similar_asserts::assert_eq!(account_proof.proof, expected_account_proof); - assert_eq!(account_proof.verify(root), Ok(())); -} - -#[test] -fn mainnet_genesis_account_proof_nonexistent() { - // Create test database and insert genesis accounts. - let factory = create_test_provider_factory(); - let root = insert_genesis(&factory, MAINNET.clone()).unwrap(); - - // Address that does not exist in mainnet genesis allocation. - // keccak256 - `0x18f415ffd7f66bb1924d90f0e82fb79ca8c6d8a3473cd9a95446a443b9db1761` - let target = Address::from_str("0x000d836201318ec6899a67540690382780743281").unwrap(); - - // `cast proof 0x000d836201318ec6899a67540690382780743281 --block 0` - let expected_account_proof = convert_to_proof([ - "0xf90211a090dcaf88c40c7bbc95a912cbdde67c175767b31173df9ee4b0d733bfdd511c43a0babe369f6b12092f49181ae04ca173fb68d1a5456f18d20fa32cba73954052bda0473ecf8a7e36a829e75039a3b055e51b8332cbf03324ab4af2066bbd6fbf0021a0bbda34753d7aa6c38e603f360244e8f59611921d9e1f128372fec0d586d4f9e0a04e44caecff45c9891f74f6a2156735886eedf6f1a733628ebc802ec79d844648a0a5f3f2f7542148c973977c8a1e154c4300fec92f755f7846f1b734d3ab1d90e7a0e823850f50bf72baae9d1733a36a444ab65d0a6faaba404f0583ce0ca4dad92da0f7a00cbe7d4b30b11faea3ae61b7f1f2b315b61d9f6bd68bfe587ad0eeceb721a07117ef9fc932f1a88e908eaead8565c19b5645dc9e5b1b6e841c5edbdfd71681a069eb2de283f32c11f859d7bcf93da23990d3e662935ed4d6b39ce3673ec84472a0203d26456312bbc4da5cd293b75b840fc5045e493d6f904d180823ec22bfed8ea09287b5c21f2254af4e64fca76acc5cd87399c7f1ede818db4326c98ce2dc2208a06fc2d754e304c48ce6a517753c62b1a9c1d5925b89707486d7fc08919e0a94eca07b1c54f15e299bd58bdfef9741538c7828b5d7d11a489f9c20d052b3471df475a051f9dd3739a927c89e357580a4c97b40234aa01ed3d5e0390dc982a7975880a0a089d613f26159af43616fd9455bb461f4869bfede26f2130835ed067a8b967bfb80", - "0xf90211a0586b1ddec8db4824154209d355a1989b6c43aa69aba36e9d70c9faa53e7452baa0f86db47d628c73764d74b9ccaed73b8486d97a7731d57008fc9efaf417411860a0d9faed7b9ea107b5d98524246c977e782377f976e34f70717e8b1207f2f9b981a00218f59ccedf797c95e27c56405b9bf16845050fb43e773b66b26bc6992744f5a0dbf396f480c4e024156644adea7c331688d03742369e9d87ab8913bc439ff975a0aced524f39b22c62a5be512ddbca89f0b89b47c311065ccf423dee7013c7ea83a0c06b05f80b237b403adc019c0bc95b5de935021b14a75cbc18509eec60dfd83aa085339d45c4a52b7d523c301701f1ab339964e9c907440cff0a871c98dcf8811ea03ae9f6b8e227ec9be9461f0947b01696f78524c4519a6dee9fba14d209952cf9a0af17f551f9fa1ba4be41d0b342b160e2e8468d7e98a65a2dbf9d5fe5d6928024a0b850ac3bc03e9a309cc59ce5f1ab8db264870a7a22786081753d1db91897b8e6a09e796a4904bd78cb2655b5f346c94350e2d5f0dbf2bc00ac00871cd7ba46b241a0f6f0377427b900529caf32abf32ba1eb93f5f70153aa50b90bf55319a434c252a0725eaf27c8ee07e9b2511a6d6a0d71c649d855e8a9ed26e667903e2e94ae47cba0e4139fb48aa1a524d47f6e0df80314b88b52202d7e853da33c276aa8572283a8a05e9003d54a45935fdebae3513dc7cd16626dc05e1d903ae7f47f1a35aa6e234580", - "0xf901d1a0b7c55b381eb205712a2f5d1b7d6309ac725da79ab159cb77dc2783af36e6596da0b3b48aa390e0f3718b486ccc32b01682f92819e652315c1629058cd4d9bb1545a0e3c0cc68af371009f14416c27e17f05f4f696566d2ba45362ce5711d4a01d0e4a0bad1e085e431b510508e2a9e3712633a414b3fe6fd358635ab206021254c1e10a0f8407fe8d5f557b9e012d52e688139bd932fec40d48630d7ff4204d27f8cc68da08c6ca46eff14ad4950e65469c394ca9d6b8690513b1c1a6f91523af00082474c80a0630c034178cb1290d4d906edf28688804d79d5e37a3122c909adab19ac7dc8c5a059f6d047c5d1cc75228c4517a537763cb410c38554f273e5448a53bc3c7166e7a0d842f53ce70c3aad1e616fa6485d3880d15c936fcc306ec14ae35236e5a60549a0218ee2ee673c69b4e1b953194b2568157a69085b86e4f01644fa06ab472c6cf9a016a35a660ea496df7c0da646378bfaa9562f401e42a5c2fe770b7bbe22433585a0dd0fbbe227a4d50868cdbb3107573910fd97131ea8d835bef81d91a2fc30b175a06aafa3d78cf179bf055bd5ec629be0ff8352ce0aec9125a4d75be3ee7eb71f10a01d6817ef9f64fcbb776ff6df0c83138dcd2001bd752727af3e60f4afc123d8d58080" - ]); - - let provider = factory.provider().unwrap(); - let account_proof = Proof::from_tx(provider.tx_ref()).account_proof(target, &[]).unwrap(); - similar_asserts::assert_eq!(account_proof.proof, expected_account_proof); - assert_eq!(account_proof.verify(root), Ok(())); -} - -#[test] -fn holesky_deposit_contract_proof() { - // Create test database and insert genesis accounts. - let factory = create_test_provider_factory(); - let root = insert_genesis(&factory, HOLESKY.clone()).unwrap(); - - let target = Address::from_str("0x4242424242424242424242424242424242424242").unwrap(); - // existent - let slot_22 = - B256::from_str("0x0000000000000000000000000000000000000000000000000000000000000022") - .unwrap(); - let slot_23 = - B256::from_str("0x0000000000000000000000000000000000000000000000000000000000000023") - .unwrap(); - let slot_24 = - B256::from_str("0x0000000000000000000000000000000000000000000000000000000000000024") - .unwrap(); - // non-existent - let slot_100 = - B256::from_str("0x0000000000000000000000000000000000000000000000000000000000000100") - .unwrap(); - let slots = Vec::from([slot_22, slot_23, slot_24, slot_100]); - - // `cast proof 0x4242424242424242424242424242424242424242 0x22 0x23 0x24 0x100 --block 0` - let expected = AccountProof { - address: target, - info: Some(Account { - balance: U256::ZERO, - nonce: 0, - bytecode_hash: Some(B256::from_str("0x2034f79e0e33b0ae6bef948532021baceb116adf2616478703bec6b17329f1cc").unwrap()) - }), - storage_root: B256::from_str("0x556a482068355939c95a3412bdb21213a301483edb1b64402fb66ac9f3583599").unwrap(), - proof: convert_to_proof([ - "0xf90211a0ea92fb71507739d5afe328d607b2c5e98322b7aa7cdfeccf817543058b54af70a0bd0c2525b5bee47abf7120c9e01ec3249699d687f80ebb96ed9ad9de913dbab0a0ab4b14b89416eb23c6b64204fa45cfcb39d4220016a9cd0815ebb751fe45eb71a0986ae29c2148b9e61f9a7543f44a1f8d029f1c5095b359652e9ec94e64b5d393a0555d54aa23ed990b0488153418637df7b2c878b604eb761aa2673b609937b0eba0140afb6a3909cc6047b3d44af13fc83f161a7e4c4ddba430a2841862912eb222a031b1185c1f455022d9e42ce04a71f174eb9441b1ada67449510500f4d85b3b22a051ecd01e18113b23cc65e62f67d69b33ee15d20bf81a6b524f7df90ded00ca15a0703769d6a7befad000bc2b4faae3e41b809b1b1241fe2964262554e7e3603488a0e5de7f600e4e6c3c3e5630e0c66f50506a17c9715642fccb63667e81397bbf93a095f783cd1d464a60e3c8adcadc28c6eb9fec7306664df39553be41dccc909606a04225fda3b89f0c59bf40129d1d5e5c3bf67a2129f0c55e53ffdd2cebf185d644a078e0f7fd3ae5a9bc90f66169614211b48fe235eb64818b3935d3e69c53523b9aa0a870e00e53ebaa1e9ec16e5f36606fd7d21d3a3c96894c0a2a23550949d4fdf7a0809226b69cee1f4f22ced1974e7805230da1909036a49a7652428999431afac2a0f11593b2407e86e11997325d8df2d22d937bbe0aef8302ba40c6be0601b04fc380", - "0xf901f1a09da7d9755fe0c558b3c3de9fdcdf9f28ae641f38c9787b05b73ab22ae53af3e2a0d9990bf0b810d1145ecb2b011fd68c63cc85564e6724166fd4a9520180706e5fa05f5f09855df46330aa310e8d6be5fb82d1a4b975782d9b29acf06ac8d3e72b1ca0ca976997ddaf06f18992f6207e4f6a05979d07acead96568058789017cc6d06ba04d78166b48044fdc28ed22d2fd39c8df6f8aaa04cb71d3a17286856f6893ff83a004f8c7cc4f1335182a1709fb28fc67d52e59878480210abcba864d5d1fd4a066a0fc3b71c33e2e6b77c5e494c1db7fdbb447473f003daf378c7a63ba9bf3f0049d80a07b8e7a21c1178d28074f157b50fca85ee25c12568ff8e9706dcbcdacb77bf854a0973274526811393ea0bf4811ca9077531db00d06b86237a2ecd683f55ba4bcb0a03a93d726d7487874e51b52d8d534c63aa2a689df18e3b307c0d6cb0a388b00f3a06aa67101d011d1c22fe739ef83b04b5214a3e2f8e1a2625d8bfdb116b447e86fa02dd545b33c62d33a183e127a08a4767fba891d9f3b94fc20a2ca02600d6d1fffa0f3b039a4f32349e85c782d1164c1890e5bf16badc9ee4cf827db6afd2229dde6a0d9240a9d2d5851d05a97ff3305334dfdb0101e1e321fc279d2bb3cad6afa8fc8a01b69c6ab5173de8a8ec53a6ebba965713a4cc7feb86cb3e230def37c230ca2b280", - "0xf869a0202a47fc6863b89a6b51890ef3c1550d560886c027141d2058ba1e2d4c66d99ab846f8448080a0556a482068355939c95a3412bdb21213a301483edb1b64402fb66ac9f3583599a02034f79e0e33b0ae6bef948532021baceb116adf2616478703bec6b17329f1cc" - ]), - storage_proofs: Vec::from([ - StorageProof { - key: slot_22, - nibbles: Nibbles::unpack(keccak256(slot_22)), - value: U256::from_str("0xf5a5fd42d16a20302798ef6ed309979b43003d2320d9f0e8ea9831a92759fb4b").unwrap(), - proof: convert_to_proof([ - "0xf9019180a0aafd5b14a6edacd149e110ba6776a654f2dbffca340902be933d011113f2750380a0a502c93b1918c4c6534d4593ae03a5a23fa10ebc30ffb7080b297bff2446e42da02eb2bf45fd443bd1df8b6f9c09726a4c6252a0f7896a131a081e39a7f644b38980a0a9cf7f673a0bce76fd40332afe8601542910b48dea44e93933a3e5e930da5d19a0ddf79db0a36d0c8134ba143bcb541cd4795a9a2bae8aca0ba24b8d8963c2a77da0b973ec0f48f710bf79f63688485755cbe87f9d4c68326bb83c26af620802a80ea0f0855349af6bf84afc8bca2eda31c8ef8c5139be1929eeb3da4ba6b68a818cb0a0c271e189aeeb1db5d59d7fe87d7d6327bbe7cfa389619016459196497de3ccdea0e7503ba5799e77aa31bbe1310c312ca17b2c5bcc8fa38f266675e8f154c2516ba09278b846696d37213ab9d20a5eb42b03db3173ce490a2ef3b2f3b3600579fc63a0e9041059114f9c910adeca12dbba1fef79b2e2c8899f2d7213cd22dfe4310561a047c59da56bb2bf348c9dd2a2e8f5538a92b904b661cfe54a4298b85868bbe4858080", - "0xf85180a0776aa456ba9c5008e03b82b841a9cf2fc1e8578cfacd5c9015804eae315f17fb80808080808080808080808080a072e3e284d47badbb0a5ca1421e1179d3ea90cc10785b26b74fb8a81f0f9e841880", - "0xf843a020035b26e3e9eee00e0d72fd1ee8ddca6894550dca6916ea2ac6baa90d11e510a1a0f5a5fd42d16a20302798ef6ed309979b43003d2320d9f0e8ea9831a92759fb4b" - ]) - }, - StorageProof { - key: slot_23, - nibbles: Nibbles::unpack(keccak256(slot_23)), - value: U256::from_str("0xdb56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71").unwrap(), - proof: convert_to_proof([ - "0xf9019180a0aafd5b14a6edacd149e110ba6776a654f2dbffca340902be933d011113f2750380a0a502c93b1918c4c6534d4593ae03a5a23fa10ebc30ffb7080b297bff2446e42da02eb2bf45fd443bd1df8b6f9c09726a4c6252a0f7896a131a081e39a7f644b38980a0a9cf7f673a0bce76fd40332afe8601542910b48dea44e93933a3e5e930da5d19a0ddf79db0a36d0c8134ba143bcb541cd4795a9a2bae8aca0ba24b8d8963c2a77da0b973ec0f48f710bf79f63688485755cbe87f9d4c68326bb83c26af620802a80ea0f0855349af6bf84afc8bca2eda31c8ef8c5139be1929eeb3da4ba6b68a818cb0a0c271e189aeeb1db5d59d7fe87d7d6327bbe7cfa389619016459196497de3ccdea0e7503ba5799e77aa31bbe1310c312ca17b2c5bcc8fa38f266675e8f154c2516ba09278b846696d37213ab9d20a5eb42b03db3173ce490a2ef3b2f3b3600579fc63a0e9041059114f9c910adeca12dbba1fef79b2e2c8899f2d7213cd22dfe4310561a047c59da56bb2bf348c9dd2a2e8f5538a92b904b661cfe54a4298b85868bbe4858080", - "0xf8518080808080a0d546c4ca227a267d29796643032422374624ed109b3d94848c5dc06baceaee76808080808080a027c48e210ccc6e01686be2d4a199d35f0e1e8df624a8d3a17c163be8861acd6680808080", - "0xf843a0207b2b5166478fd4318d2acc6cc2c704584312bdd8781b32d5d06abda57f4230a1a0db56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71" - ]) - }, - StorageProof { - key: slot_24, - nibbles: Nibbles::unpack(keccak256(slot_24)), - value: U256::from_str("0xc78009fdf07fc56a11f122370658a353aaa542ed63e44c4bc15ff4cd105ab33c").unwrap(), - proof: convert_to_proof([ - "0xf9019180a0aafd5b14a6edacd149e110ba6776a654f2dbffca340902be933d011113f2750380a0a502c93b1918c4c6534d4593ae03a5a23fa10ebc30ffb7080b297bff2446e42da02eb2bf45fd443bd1df8b6f9c09726a4c6252a0f7896a131a081e39a7f644b38980a0a9cf7f673a0bce76fd40332afe8601542910b48dea44e93933a3e5e930da5d19a0ddf79db0a36d0c8134ba143bcb541cd4795a9a2bae8aca0ba24b8d8963c2a77da0b973ec0f48f710bf79f63688485755cbe87f9d4c68326bb83c26af620802a80ea0f0855349af6bf84afc8bca2eda31c8ef8c5139be1929eeb3da4ba6b68a818cb0a0c271e189aeeb1db5d59d7fe87d7d6327bbe7cfa389619016459196497de3ccdea0e7503ba5799e77aa31bbe1310c312ca17b2c5bcc8fa38f266675e8f154c2516ba09278b846696d37213ab9d20a5eb42b03db3173ce490a2ef3b2f3b3600579fc63a0e9041059114f9c910adeca12dbba1fef79b2e2c8899f2d7213cd22dfe4310561a047c59da56bb2bf348c9dd2a2e8f5538a92b904b661cfe54a4298b85868bbe4858080", - "0xf85180808080a030263404acfee103d0b1019053ff3240fce433c69b709831673285fa5887ce4c80808080808080a0f8f1fbb1f7b482d9860480feebb83ff54a8b6ec1ead61cc7d2f25d7c01659f9c80808080", - "0xf843a020d332d19b93bcabe3cce7ca0c18a052f57e5fd03b4758a09f30f5ddc4b22ec4a1a0c78009fdf07fc56a11f122370658a353aaa542ed63e44c4bc15ff4cd105ab33c" - ]) - }, - StorageProof { - key: slot_100, - nibbles: Nibbles::unpack(keccak256(slot_100)), - value: U256::ZERO, - proof: convert_to_proof([ - "0xf9019180a0aafd5b14a6edacd149e110ba6776a654f2dbffca340902be933d011113f2750380a0a502c93b1918c4c6534d4593ae03a5a23fa10ebc30ffb7080b297bff2446e42da02eb2bf45fd443bd1df8b6f9c09726a4c6252a0f7896a131a081e39a7f644b38980a0a9cf7f673a0bce76fd40332afe8601542910b48dea44e93933a3e5e930da5d19a0ddf79db0a36d0c8134ba143bcb541cd4795a9a2bae8aca0ba24b8d8963c2a77da0b973ec0f48f710bf79f63688485755cbe87f9d4c68326bb83c26af620802a80ea0f0855349af6bf84afc8bca2eda31c8ef8c5139be1929eeb3da4ba6b68a818cb0a0c271e189aeeb1db5d59d7fe87d7d6327bbe7cfa389619016459196497de3ccdea0e7503ba5799e77aa31bbe1310c312ca17b2c5bcc8fa38f266675e8f154c2516ba09278b846696d37213ab9d20a5eb42b03db3173ce490a2ef3b2f3b3600579fc63a0e9041059114f9c910adeca12dbba1fef79b2e2c8899f2d7213cd22dfe4310561a047c59da56bb2bf348c9dd2a2e8f5538a92b904b661cfe54a4298b85868bbe4858080", - "0xf891a090bacef44b189ddffdc5f22edc70fe298c58e5e523e6e1dfdf7dbc6d657f7d1b80a026eed68746028bc369eb456b7d3ee475aa16f34e5eaa0c98fdedb9c59ebc53b0808080a09ce86197173e14e0633db84ce8eea32c5454eebe954779255644b45b717e8841808080a0328c7afb2c58ef3f8c4117a8ebd336f1a61d24591067ed9c5aae94796cac987d808080808080" - ]) - }, - ]) - }; - - let provider = factory.provider().unwrap(); - let account_proof = Proof::from_tx(provider.tx_ref()).account_proof(target, &slots).unwrap(); - similar_asserts::assert_eq!(account_proof, expected); - assert_eq!(account_proof.verify(root), Ok(())); -} diff --git a/crates/trie/db/tests/trie.rs b/crates/trie/db/tests/trie.rs deleted file mode 100644 index 8a9dbee9b2ac..000000000000 --- a/crates/trie/db/tests/trie.rs +++ /dev/null @@ -1,773 +0,0 @@ -use proptest::{prelude::ProptestConfig, proptest}; -use proptest_arbitrary_interop::arb; -use reth_db::{tables, test_utils::TempDatabase, DatabaseEnv}; -use reth_db_api::{ - cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO}, - transaction::DbTxMut, -}; -use reth_primitives::{hex_literal::hex, Account, StorageEntry, U256}; -use reth_provider::{ - test_utils::create_test_provider_factory, DatabaseProviderRW, StorageTrieWriter, TrieWriter, -}; -use reth_trie::{ - prefix_set::PrefixSetMut, - test_utils::{state_root, state_root_prehashed, storage_root, storage_root_prehashed}, - BranchNodeCompact, StateRoot, StorageRoot, TrieMask, -}; -use reth_trie_common::triehash::KeccakHasher; -use reth_trie_db::{DatabaseStateRoot, DatabaseStorageRoot}; -use std::{ - collections::{BTreeMap, HashMap}, - ops::Mul, - str::FromStr, - sync::Arc, -}; - -use alloy_rlp::Encodable; -use reth_db_api::transaction::DbTx; -use reth_primitives::{constants::EMPTY_ROOT_HASH, keccak256, Address, B256}; -use reth_trie::{ - prefix_set::TriePrefixSets, updates::StorageTrieUpdates, HashBuilder, - IntermediateStateRootState, Nibbles, StateRootProgress, TrieAccount, -}; - -fn insert_account( - tx: &impl DbTxMut, - address: Address, - account: Account, - storage: &BTreeMap, -) { - let hashed_address = keccak256(address); - tx.put::(hashed_address, account).unwrap(); - insert_storage(tx, hashed_address, storage); -} - -fn insert_storage(tx: &impl DbTxMut, hashed_address: B256, storage: &BTreeMap) { - for (k, v) in storage { - tx.put::( - hashed_address, - StorageEntry { key: keccak256(k), value: *v }, - ) - .unwrap(); - } -} - -fn incremental_vs_full_root(inputs: &[&str], modified: &str) { - let factory = create_test_provider_factory(); - let tx = factory.provider_rw().unwrap(); - let hashed_address = B256::with_last_byte(1); - - let mut hashed_storage_cursor = - tx.tx_ref().cursor_dup_write::().unwrap(); - let data = inputs.iter().map(|x| B256::from_str(x).unwrap()); - let value = U256::from(0); - for key in data { - hashed_storage_cursor.upsert(hashed_address, StorageEntry { key, value }).unwrap(); - } - - // Generate the intermediate nodes on the receiving end of the channel - let (_, _, trie_updates) = - StorageRoot::from_tx_hashed(tx.tx_ref(), hashed_address).root_with_updates().unwrap(); - - // 1. Some state transition happens, update the hashed storage to the new value - let modified_key = B256::from_str(modified).unwrap(); - let value = U256::from(1); - if hashed_storage_cursor.seek_by_key_subkey(hashed_address, modified_key).unwrap().is_some() { - hashed_storage_cursor.delete_current().unwrap(); - } - hashed_storage_cursor - .upsert(hashed_address, StorageEntry { key: modified_key, value }) - .unwrap(); - - // 2. Calculate full merkle root - let loader = StorageRoot::from_tx_hashed(tx.tx_ref(), hashed_address); - let modified_root = loader.root().unwrap(); - - // Update the intermediate roots table so that we can run the incremental verification - tx.write_individual_storage_trie_updates(hashed_address, &trie_updates).unwrap(); - - // 3. Calculate the incremental root - let mut storage_changes = PrefixSetMut::default(); - storage_changes.insert(Nibbles::unpack(modified_key)); - let loader = StorageRoot::from_tx_hashed(tx.tx_ref(), hashed_address) - .with_prefix_set(storage_changes.freeze()); - let incremental_root = loader.root().unwrap(); - - assert_eq!(modified_root, incremental_root); -} - -#[test] -fn branch_node_child_changes() { - incremental_vs_full_root( - &[ - "1000000000000000000000000000000000000000000000000000000000000000", - "1100000000000000000000000000000000000000000000000000000000000000", - "1110000000000000000000000000000000000000000000000000000000000000", - "1200000000000000000000000000000000000000000000000000000000000000", - "1220000000000000000000000000000000000000000000000000000000000000", - "1320000000000000000000000000000000000000000000000000000000000000", - ], - "1200000000000000000000000000000000000000000000000000000000000000", - ); -} - -#[test] -fn arbitrary_storage_root() { - proptest!(ProptestConfig::with_cases(10), |(item in arb::<(Address, std::collections::BTreeMap)>())| { - let (address, storage) = item; - - let hashed_address = keccak256(address); - let factory = create_test_provider_factory(); - let tx = factory.provider_rw().unwrap(); - for (key, value) in &storage { - tx.tx_ref().put::( - hashed_address, - StorageEntry { key: keccak256(key), value: *value }, - ) - .unwrap(); - } - tx.commit().unwrap(); - - let tx = factory.provider_rw().unwrap(); - let got = StorageRoot::from_tx(tx.tx_ref(), address).root().unwrap(); - let expected = storage_root(storage.into_iter()); - assert_eq!(expected, got); - }); -} - -#[test] -// This ensures we dont add empty accounts to the trie -fn test_empty_account() { - let state: State = BTreeMap::from([ - ( - Address::random(), - ( - Account { nonce: 0, balance: U256::from(0), bytecode_hash: None }, - BTreeMap::from([(B256::with_last_byte(0x4), U256::from(12))]), - ), - ), - ( - Address::random(), - ( - Account { nonce: 0, balance: U256::from(0), bytecode_hash: None }, - BTreeMap::default(), - ), - ), - ( - Address::random(), - ( - Account { - nonce: 155, - balance: U256::from(414241124u32), - bytecode_hash: Some(keccak256("test")), - }, - BTreeMap::from([ - (B256::ZERO, U256::from(3)), - (B256::with_last_byte(2), U256::from(1)), - ]), - ), - ), - ]); - test_state_root_with_state(state); -} - -#[test] -// This ensures we return an empty root when there are no storage entries -fn test_empty_storage_root() { - let factory = create_test_provider_factory(); - let tx = factory.provider_rw().unwrap(); - - let address = Address::random(); - let code = "el buen fla"; - let account = Account { - nonce: 155, - balance: U256::from(414241124u32), - bytecode_hash: Some(keccak256(code)), - }; - insert_account(tx.tx_ref(), address, account, &Default::default()); - tx.commit().unwrap(); - - let tx = factory.provider_rw().unwrap(); - let got = StorageRoot::from_tx(tx.tx_ref(), address).root().unwrap(); - assert_eq!(got, EMPTY_ROOT_HASH); -} - -#[test] -// This ensures that the walker goes over all the storage slots -fn test_storage_root() { - let factory = create_test_provider_factory(); - let tx = factory.provider_rw().unwrap(); - - let address = Address::random(); - let storage = - BTreeMap::from([(B256::ZERO, U256::from(3)), (B256::with_last_byte(2), U256::from(1))]); - - let code = "el buen fla"; - let account = Account { - nonce: 155, - balance: U256::from(414241124u32), - bytecode_hash: Some(keccak256(code)), - }; - - insert_account(tx.tx_ref(), address, account, &storage); - tx.commit().unwrap(); - - let tx = factory.provider_rw().unwrap(); - let got = StorageRoot::from_tx(tx.tx_ref(), address).root().unwrap(); - - assert_eq!(storage_root(storage.into_iter()), got); -} - -type State = BTreeMap)>; - -#[test] -fn arbitrary_state_root() { - proptest!( - ProptestConfig::with_cases(10), | (state in arb::()) | { - test_state_root_with_state(state); - } - ); -} - -#[test] -fn arbitrary_state_root_with_progress() { - proptest!( - ProptestConfig::with_cases(10), | (state in arb::()) | { - let hashed_entries_total = state.len() + - state.values().map(|(_, slots)| slots.len()).sum::(); - - let factory = create_test_provider_factory(); - let tx = factory.provider_rw().unwrap(); - - for (address, (account, storage)) in &state { - insert_account(tx.tx_ref(), *address, *account, storage) - } - tx.commit().unwrap(); - let tx = factory.provider_rw().unwrap(); - - let expected = state_root(state); - - let threshold = 10; - let mut got = None; - let mut hashed_entries_walked = 0; - - let mut intermediate_state: Option> = None; - while got.is_none() { - let calculator = StateRoot::from_tx(tx.tx_ref()) - .with_threshold(threshold) - .with_intermediate_state(intermediate_state.take().map(|state| *state)); - match calculator.root_with_progress().unwrap() { - StateRootProgress::Progress(state, walked, _) => { - intermediate_state = Some(state); - hashed_entries_walked += walked; - }, - StateRootProgress::Complete(root, walked, _) => { - got = Some(root); - hashed_entries_walked += walked; - }, - }; - } - assert_eq!(expected, got.unwrap()); - assert_eq!(hashed_entries_total, hashed_entries_walked) - } - ); -} - -fn test_state_root_with_state(state: State) { - let factory = create_test_provider_factory(); - let tx = factory.provider_rw().unwrap(); - - for (address, (account, storage)) in &state { - insert_account(tx.tx_ref(), *address, *account, storage) - } - tx.commit().unwrap(); - let expected = state_root(state); - - let tx = factory.provider_rw().unwrap(); - let got = StateRoot::from_tx(tx.tx_ref()).root().unwrap(); - assert_eq!(expected, got); -} - -fn encode_account(account: Account, storage_root: Option) -> Vec { - let account = TrieAccount::from((account, storage_root.unwrap_or(EMPTY_ROOT_HASH))); - let mut account_rlp = Vec::with_capacity(account.length()); - account.encode(&mut account_rlp); - account_rlp -} - -#[test] -fn storage_root_regression() { - let factory = create_test_provider_factory(); - let tx = factory.provider_rw().unwrap(); - // Some address whose hash starts with 0xB041 - let address3 = Address::from_str("16b07afd1c635f77172e842a000ead9a2a222459").unwrap(); - let key3 = keccak256(address3); - assert_eq!(key3[0], 0xB0); - assert_eq!(key3[1], 0x41); - - let storage = BTreeMap::from( - [ - ("1200000000000000000000000000000000000000000000000000000000000000", 0x42), - ("1400000000000000000000000000000000000000000000000000000000000000", 0x01), - ("3000000000000000000000000000000000000000000000000000000000E00000", 0x127a89), - ("3000000000000000000000000000000000000000000000000000000000E00001", 0x05), - ] - .map(|(slot, val)| (B256::from_str(slot).unwrap(), U256::from(val))), - ); - - let mut hashed_storage_cursor = - tx.tx_ref().cursor_dup_write::().unwrap(); - for (hashed_slot, value) in storage.clone() { - hashed_storage_cursor.upsert(key3, StorageEntry { key: hashed_slot, value }).unwrap(); - } - tx.commit().unwrap(); - let tx = factory.provider_rw().unwrap(); - - let account3_storage_root = StorageRoot::from_tx(tx.tx_ref(), address3).root().unwrap(); - let expected_root = storage_root_prehashed(storage); - assert_eq!(expected_root, account3_storage_root); -} - -#[test] -fn account_and_storage_trie() { - let ether = U256::from(1e18); - let storage = BTreeMap::from( - [ - ("1200000000000000000000000000000000000000000000000000000000000000", 0x42), - ("1400000000000000000000000000000000000000000000000000000000000000", 0x01), - ("3000000000000000000000000000000000000000000000000000000000E00000", 0x127a89), - ("3000000000000000000000000000000000000000000000000000000000E00001", 0x05), - ] - .map(|(slot, val)| (B256::from_str(slot).unwrap(), U256::from(val))), - ); - - let factory = create_test_provider_factory(); - let tx = factory.provider_rw().unwrap(); - - let mut hashed_account_cursor = tx.tx_ref().cursor_write::().unwrap(); - let mut hashed_storage_cursor = - tx.tx_ref().cursor_dup_write::().unwrap(); - - let mut hash_builder = HashBuilder::default(); - - // Insert first account - let key1 = - B256::from_str("b000000000000000000000000000000000000000000000000000000000000000").unwrap(); - let account1 = Account { nonce: 0, balance: U256::from(3).mul(ether), bytecode_hash: None }; - hashed_account_cursor.upsert(key1, account1).unwrap(); - hash_builder.add_leaf(Nibbles::unpack(key1), &encode_account(account1, None)); - - // Some address whose hash starts with 0xB040 - let address2 = Address::from_str("7db3e81b72d2695e19764583f6d219dbee0f35ca").unwrap(); - let key2 = keccak256(address2); - assert_eq!(key2[0], 0xB0); - assert_eq!(key2[1], 0x40); - let account2 = Account { nonce: 0, balance: ether, ..Default::default() }; - hashed_account_cursor.upsert(key2, account2).unwrap(); - hash_builder.add_leaf(Nibbles::unpack(key2), &encode_account(account2, None)); - - // Some address whose hash starts with 0xB041 - let address3 = Address::from_str("16b07afd1c635f77172e842a000ead9a2a222459").unwrap(); - let key3 = keccak256(address3); - assert_eq!(key3[0], 0xB0); - assert_eq!(key3[1], 0x41); - let code_hash = - B256::from_str("5be74cad16203c4905c068b012a2e9fb6d19d036c410f16fd177f337541440dd").unwrap(); - let account3 = - Account { nonce: 0, balance: U256::from(2).mul(ether), bytecode_hash: Some(code_hash) }; - hashed_account_cursor.upsert(key3, account3).unwrap(); - for (hashed_slot, value) in storage { - if hashed_storage_cursor - .seek_by_key_subkey(key3, hashed_slot) - .unwrap() - .filter(|e| e.key == hashed_slot) - .is_some() - { - hashed_storage_cursor.delete_current().unwrap(); - } - hashed_storage_cursor.upsert(key3, StorageEntry { key: hashed_slot, value }).unwrap(); - } - let account3_storage_root = StorageRoot::from_tx(tx.tx_ref(), address3).root().unwrap(); - hash_builder - .add_leaf(Nibbles::unpack(key3), &encode_account(account3, Some(account3_storage_root))); - - let key4a = - B256::from_str("B1A0000000000000000000000000000000000000000000000000000000000000").unwrap(); - let account4a = Account { nonce: 0, balance: U256::from(4).mul(ether), ..Default::default() }; - hashed_account_cursor.upsert(key4a, account4a).unwrap(); - hash_builder.add_leaf(Nibbles::unpack(key4a), &encode_account(account4a, None)); - - let key5 = - B256::from_str("B310000000000000000000000000000000000000000000000000000000000000").unwrap(); - let account5 = Account { nonce: 0, balance: U256::from(8).mul(ether), ..Default::default() }; - hashed_account_cursor.upsert(key5, account5).unwrap(); - hash_builder.add_leaf(Nibbles::unpack(key5), &encode_account(account5, None)); - - let key6 = - B256::from_str("B340000000000000000000000000000000000000000000000000000000000000").unwrap(); - let account6 = Account { nonce: 0, balance: U256::from(1).mul(ether), ..Default::default() }; - hashed_account_cursor.upsert(key6, account6).unwrap(); - hash_builder.add_leaf(Nibbles::unpack(key6), &encode_account(account6, None)); - - // Populate account & storage trie DB tables - let expected_root = - B256::from_str("72861041bc90cd2f93777956f058a545412b56de79af5eb6b8075fe2eabbe015").unwrap(); - let computed_expected_root: B256 = triehash::trie_root::([ - (key1, encode_account(account1, None)), - (key2, encode_account(account2, None)), - (key3, encode_account(account3, Some(account3_storage_root))), - (key4a, encode_account(account4a, None)), - (key5, encode_account(account5, None)), - (key6, encode_account(account6, None)), - ]); - // Check computed trie root to ensure correctness - assert_eq!(computed_expected_root, expected_root); - - // Check hash builder root - assert_eq!(hash_builder.root(), computed_expected_root); - - // Check state root calculation from scratch - let (root, trie_updates) = StateRoot::from_tx(tx.tx_ref()).root_with_updates().unwrap(); - assert_eq!(root, computed_expected_root); - - // Check account trie - let account_updates = trie_updates.clone().into_sorted(); - let account_updates = account_updates.account_nodes_ref(); - assert_eq!(account_updates.len(), 2); - - let (nibbles1a, node1a) = account_updates.first().unwrap(); - assert_eq!(nibbles1a[..], [0xB]); - assert_eq!(node1a.state_mask, TrieMask::new(0b1011)); - assert_eq!(node1a.tree_mask, TrieMask::new(0b0001)); - assert_eq!(node1a.hash_mask, TrieMask::new(0b1001)); - assert_eq!(node1a.root_hash, None); - assert_eq!(node1a.hashes.len(), 2); - - let (nibbles2a, node2a) = account_updates.last().unwrap(); - assert_eq!(nibbles2a[..], [0xB, 0x0]); - assert_eq!(node2a.state_mask, TrieMask::new(0b10001)); - assert_eq!(node2a.tree_mask, TrieMask::new(0b00000)); - assert_eq!(node2a.hash_mask, TrieMask::new(0b10000)); - assert_eq!(node2a.root_hash, None); - assert_eq!(node2a.hashes.len(), 1); - - // Check storage trie - let mut updated_storage_trie = - trie_updates.storage_tries_ref().iter().filter(|(_, u)| !u.storage_nodes_ref().is_empty()); - assert_eq!(updated_storage_trie.clone().count(), 1); - let (_, storage_trie_updates) = updated_storage_trie.next().unwrap(); - assert_eq!(storage_trie_updates.storage_nodes_ref().len(), 1); - - let (nibbles3, node3) = storage_trie_updates.storage_nodes_ref().iter().next().unwrap(); - assert!(nibbles3.is_empty()); - assert_eq!(node3.state_mask, TrieMask::new(0b1010)); - assert_eq!(node3.tree_mask, TrieMask::new(0b0000)); - assert_eq!(node3.hash_mask, TrieMask::new(0b0010)); - - assert_eq!(node3.hashes.len(), 1); - assert_eq!(node3.root_hash, Some(account3_storage_root)); - - // Add an account - // Some address whose hash starts with 0xB1 - let address4b = Address::from_str("4f61f2d5ebd991b85aa1677db97307caf5215c91").unwrap(); - let key4b = keccak256(address4b); - assert_eq!(key4b.0[0], key4a.0[0]); - let account4b = Account { nonce: 0, balance: U256::from(5).mul(ether), bytecode_hash: None }; - hashed_account_cursor.upsert(key4b, account4b).unwrap(); - - let mut prefix_set = PrefixSetMut::default(); - prefix_set.insert(Nibbles::unpack(key4b)); - - let expected_state_root = - B256::from_str("8e263cd4eefb0c3cbbb14e5541a66a755cad25bcfab1e10dd9d706263e811b28").unwrap(); - - let (root, trie_updates) = StateRoot::from_tx(tx.tx_ref()) - .with_prefix_sets(TriePrefixSets { - account_prefix_set: prefix_set.freeze(), - ..Default::default() - }) - .root_with_updates() - .unwrap(); - assert_eq!(root, expected_state_root); - - let account_updates = trie_updates.into_sorted(); - let account_updates = account_updates.account_nodes_ref(); - assert_eq!(account_updates.len(), 2); - - let (nibbles1b, node1b) = account_updates.first().unwrap(); - assert_eq!(nibbles1b[..], [0xB]); - assert_eq!(node1b.state_mask, TrieMask::new(0b1011)); - assert_eq!(node1b.tree_mask, TrieMask::new(0b0001)); - assert_eq!(node1b.hash_mask, TrieMask::new(0b1011)); - assert_eq!(node1b.root_hash, None); - assert_eq!(node1b.hashes.len(), 3); - assert_eq!(node1a.hashes[0], node1b.hashes[0]); - assert_eq!(node1a.hashes[1], node1b.hashes[2]); - - let (nibbles2b, node2b) = account_updates.last().unwrap(); - assert_eq!(nibbles2b[..], [0xB, 0x0]); - assert_eq!(node2a, node2b); - tx.commit().unwrap(); - - { - let tx = factory.provider_rw().unwrap(); - let mut hashed_account_cursor = - tx.tx_ref().cursor_write::().unwrap(); - - let account = hashed_account_cursor.seek_exact(key2).unwrap().unwrap(); - hashed_account_cursor.delete_current().unwrap(); - - let mut account_prefix_set = PrefixSetMut::default(); - account_prefix_set.insert(Nibbles::unpack(account.0)); - - let computed_expected_root: B256 = triehash::trie_root::([ - (key1, encode_account(account1, None)), - // DELETED: (key2, encode_account(account2, None)), - (key3, encode_account(account3, Some(account3_storage_root))), - (key4a, encode_account(account4a, None)), - (key4b, encode_account(account4b, None)), - (key5, encode_account(account5, None)), - (key6, encode_account(account6, None)), - ]); - - let (root, trie_updates) = StateRoot::from_tx(tx.tx_ref()) - .with_prefix_sets(TriePrefixSets { - account_prefix_set: account_prefix_set.freeze(), - ..Default::default() - }) - .root_with_updates() - .unwrap(); - assert_eq!(root, computed_expected_root); - assert_eq!( - trie_updates.account_nodes_ref().len() + trie_updates.removed_nodes_ref().len(), - 1 - ); - - assert_eq!(trie_updates.account_nodes_ref().len(), 1); - - let (nibbles1c, node1c) = trie_updates.account_nodes_ref().iter().next().unwrap(); - assert_eq!(nibbles1c[..], [0xB]); - - assert_eq!(node1c.state_mask, TrieMask::new(0b1011)); - assert_eq!(node1c.tree_mask, TrieMask::new(0b0000)); - assert_eq!(node1c.hash_mask, TrieMask::new(0b1011)); - - assert_eq!(node1c.root_hash, None); - - assert_eq!(node1c.hashes.len(), 3); - assert_ne!(node1c.hashes[0], node1b.hashes[0]); - assert_eq!(node1c.hashes[1], node1b.hashes[1]); - assert_eq!(node1c.hashes[2], node1b.hashes[2]); - } - - { - let tx = factory.provider_rw().unwrap(); - let mut hashed_account_cursor = - tx.tx_ref().cursor_write::().unwrap(); - - let account2 = hashed_account_cursor.seek_exact(key2).unwrap().unwrap(); - hashed_account_cursor.delete_current().unwrap(); - let account3 = hashed_account_cursor.seek_exact(key3).unwrap().unwrap(); - hashed_account_cursor.delete_current().unwrap(); - - let mut account_prefix_set = PrefixSetMut::default(); - account_prefix_set.insert(Nibbles::unpack(account2.0)); - account_prefix_set.insert(Nibbles::unpack(account3.0)); - - let computed_expected_root: B256 = triehash::trie_root::([ - (key1, encode_account(account1, None)), - // DELETED: (key2, encode_account(account2, None)), - // DELETED: (key3, encode_account(account3, Some(account3_storage_root))), - (key4a, encode_account(account4a, None)), - (key4b, encode_account(account4b, None)), - (key5, encode_account(account5, None)), - (key6, encode_account(account6, None)), - ]); - - let (root, trie_updates) = StateRoot::from_tx(tx.tx_ref()) - .with_prefix_sets(TriePrefixSets { - account_prefix_set: account_prefix_set.freeze(), - ..Default::default() - }) - .root_with_updates() - .unwrap(); - assert_eq!(root, computed_expected_root); - assert_eq!( - trie_updates.account_nodes_ref().len() + trie_updates.removed_nodes_ref().len(), - 1 - ); - assert!(!trie_updates - .storage_tries_ref() - .iter() - .any(|(_, u)| !u.storage_nodes_ref().is_empty() || !u.removed_nodes_ref().is_empty())); // no storage root update - - assert_eq!(trie_updates.account_nodes_ref().len(), 1); - - let (nibbles1d, node1d) = trie_updates.account_nodes_ref().iter().next().unwrap(); - assert_eq!(nibbles1d[..], [0xB]); - - assert_eq!(node1d.state_mask, TrieMask::new(0b1011)); - assert_eq!(node1d.tree_mask, TrieMask::new(0b0000)); - assert_eq!(node1d.hash_mask, TrieMask::new(0b1010)); - - assert_eq!(node1d.root_hash, None); - - assert_eq!(node1d.hashes.len(), 2); - assert_eq!(node1d.hashes[0], node1b.hashes[1]); - assert_eq!(node1d.hashes[1], node1b.hashes[2]); - } -} - -#[test] -fn account_trie_around_extension_node() { - let factory = create_test_provider_factory(); - let tx = factory.provider_rw().unwrap(); - - let expected = extension_node_trie(&tx); - - let (got, updates) = StateRoot::from_tx(tx.tx_ref()).root_with_updates().unwrap(); - assert_eq!(expected, got); - assert_trie_updates(updates.account_nodes_ref()); -} - -#[test] -fn account_trie_around_extension_node_with_dbtrie() { - let factory = create_test_provider_factory(); - let tx = factory.provider_rw().unwrap(); - - let expected = extension_node_trie(&tx); - - let (got, updates) = StateRoot::from_tx(tx.tx_ref()).root_with_updates().unwrap(); - assert_eq!(expected, got); - tx.write_trie_updates(&updates).unwrap(); - - // read the account updates from the db - let mut accounts_trie = tx.tx_ref().cursor_read::().unwrap(); - let walker = accounts_trie.walk(None).unwrap(); - let account_updates = walker - .into_iter() - .map(|item| { - let (key, node) = item.unwrap(); - (key.0, node) - }) - .collect(); - assert_trie_updates(&account_updates); -} - -proptest! { - #![proptest_config(ProptestConfig { - cases: 128, ..ProptestConfig::default() - })] - - #[test] - fn fuzz_state_root_incremental(account_changes: [BTreeMap; 5]) { - let factory = create_test_provider_factory(); - let tx = factory.provider_rw().unwrap(); - let mut hashed_account_cursor = tx.tx_ref().cursor_write::().unwrap(); - - let mut state = BTreeMap::default(); - for accounts in account_changes { - let should_generate_changeset = !state.is_empty(); - let mut changes = PrefixSetMut::default(); - for (hashed_address, balance) in accounts.clone() { - hashed_account_cursor.upsert(hashed_address, Account { balance, ..Default::default() }).unwrap(); - if should_generate_changeset { - changes.insert(Nibbles::unpack(hashed_address)); - } - } - - let (state_root, trie_updates) = StateRoot::from_tx(tx.tx_ref()) - .with_prefix_sets(TriePrefixSets { account_prefix_set: changes.freeze(), ..Default::default() }) - .root_with_updates() - .unwrap(); - - state.append(&mut accounts.clone()); - let expected_root = state_root_prehashed( - state.iter().map(|(&key, &balance)| (key, (Account { balance, ..Default::default() }, std::iter::empty()))) - ); - assert_eq!(expected_root, state_root); - tx.write_trie_updates(&trie_updates).unwrap(); - } - } -} - -#[test] -fn storage_trie_around_extension_node() { - let factory = create_test_provider_factory(); - let tx = factory.provider_rw().unwrap(); - - let hashed_address = B256::random(); - let (expected_root, expected_updates) = extension_node_storage_trie(&tx, hashed_address); - - let (got, _, updates) = - StorageRoot::from_tx_hashed(tx.tx_ref(), hashed_address).root_with_updates().unwrap(); - assert_eq!(expected_root, got); - assert_eq!(expected_updates, updates); - assert_trie_updates(updates.storage_nodes_ref()); -} - -fn extension_node_storage_trie( - tx: &DatabaseProviderRW>>, - hashed_address: B256, -) -> (B256, StorageTrieUpdates) { - let value = U256::from(1); - - let mut hashed_storage = tx.tx_ref().cursor_write::().unwrap(); - - let mut hb = HashBuilder::default().with_updates(true); - - for key in [ - hex!("30af561000000000000000000000000000000000000000000000000000000000"), - hex!("30af569000000000000000000000000000000000000000000000000000000000"), - hex!("30af650000000000000000000000000000000000000000000000000000000000"), - hex!("30af6f0000000000000000000000000000000000000000000000000000000000"), - hex!("30af8f0000000000000000000000000000000000000000000000000000000000"), - hex!("3100000000000000000000000000000000000000000000000000000000000000"), - ] { - hashed_storage.upsert(hashed_address, StorageEntry { key: B256::new(key), value }).unwrap(); - hb.add_leaf(Nibbles::unpack(key), &alloy_rlp::encode_fixed_size(&value)); - } - - let root = hb.root(); - let (_, updates) = hb.split(); - let trie_updates = StorageTrieUpdates::new(updates); - (root, trie_updates) -} - -fn extension_node_trie(tx: &DatabaseProviderRW>>) -> B256 { - let a = Account { nonce: 0, balance: U256::from(1u64), bytecode_hash: Some(B256::random()) }; - let val = encode_account(a, None); - - let mut hashed_accounts = tx.tx_ref().cursor_write::().unwrap(); - let mut hb = HashBuilder::default(); - - for key in [ - hex!("30af561000000000000000000000000000000000000000000000000000000000"), - hex!("30af569000000000000000000000000000000000000000000000000000000000"), - hex!("30af650000000000000000000000000000000000000000000000000000000000"), - hex!("30af6f0000000000000000000000000000000000000000000000000000000000"), - hex!("30af8f0000000000000000000000000000000000000000000000000000000000"), - hex!("3100000000000000000000000000000000000000000000000000000000000000"), - ] { - hashed_accounts.upsert(B256::new(key), a).unwrap(); - hb.add_leaf(Nibbles::unpack(key), &val); - } - - hb.root() -} - -fn assert_trie_updates(account_updates: &HashMap) { - assert_eq!(account_updates.len(), 2); - - let node = account_updates.get(&[0x3][..]).unwrap(); - let expected = BranchNodeCompact::new(0b0011, 0b0001, 0b0000, vec![], None); - assert_eq!(node, &expected); - - let node = account_updates.get(&[0x3, 0x0, 0xA, 0xF][..]).unwrap(); - assert_eq!(node.state_mask, TrieMask::new(0b101100000)); - assert_eq!(node.tree_mask, TrieMask::new(0b000000000)); - assert_eq!(node.hash_mask, TrieMask::new(0b001000000)); - - assert_eq!(node.root_hash, None); - assert_eq!(node.hashes.len(), 1); -} diff --git a/crates/trie/parallel/Cargo.toml b/crates/trie/parallel/Cargo.toml index 92f939dd0cb3..36b7cbdc4a28 100644 --- a/crates/trie/parallel/Cargo.toml +++ b/crates/trie/parallel/Cargo.toml @@ -17,7 +17,6 @@ reth-primitives.workspace = true reth-db.workspace = true reth-db-api.workspace = true reth-trie.workspace = true -reth-trie-db.workspace = true reth-execution-errors.workspace = true reth-provider.workspace = true diff --git a/crates/trie/parallel/benches/root.rs b/crates/trie/parallel/benches/root.rs index bbd2ff228f80..66d0593da178 100644 --- a/crates/trie/parallel/benches/root.rs +++ b/crates/trie/parallel/benches/root.rs @@ -6,14 +6,11 @@ use rayon::ThreadPoolBuilder; use reth_primitives::{Account, B256, U256}; use reth_provider::{ providers::ConsistentDbView, test_utils::create_test_provider_factory, writer::StorageWriter, - TrieWriter, }; use reth_tasks::pool::BlockingTaskPool; use reth_trie::{ - hashed_cursor::{DatabaseHashedCursorFactory, HashedPostStateCursorFactory}, - HashedPostState, HashedStorage, StateRoot, + hashed_cursor::HashedPostStateCursorFactory, HashedPostState, HashedStorage, StateRoot, }; -use reth_trie_db::DatabaseStateRoot; use reth_trie_parallel::{async_root::AsyncStateRoot, parallel_root::ParallelStateRoot}; use std::collections::HashMap; @@ -33,7 +30,7 @@ pub fn calculate_state_root(c: &mut Criterion) { storage_writer.write_hashed_state(&db_state.into_sorted()).unwrap(); let (_, updates) = StateRoot::from_tx(provider_rw.tx_ref()).root_with_updates().unwrap(); - provider_rw.write_trie_updates(&updates).unwrap(); + updates.write_to_database(provider_rw.tx_ref()).unwrap(); provider_rw.commit().unwrap(); } @@ -49,12 +46,11 @@ pub fn calculate_state_root(c: &mut Criterion) { (provider, sorted_state, prefix_sets) }, |(provider, sorted_state, prefix_sets)| async move { - let hashed_cursor_factory = HashedPostStateCursorFactory::new( - DatabaseHashedCursorFactory::new(provider.tx_ref()), - &sorted_state, - ); StateRoot::from_tx(provider.tx_ref()) - .with_hashed_cursor_factory(hashed_cursor_factory) + .with_hashed_cursor_factory(HashedPostStateCursorFactory::new( + provider.tx_ref(), + &sorted_state, + )) .with_prefix_sets(prefix_sets) .root() }, diff --git a/crates/trie/parallel/src/async_root.rs b/crates/trie/parallel/src/async_root.rs index cf3eabcdc32c..db6152b6a2cf 100644 --- a/crates/trie/parallel/src/async_root.rs +++ b/crates/trie/parallel/src/async_root.rs @@ -7,9 +7,7 @@ use reth_primitives::B256; use reth_provider::{providers::ConsistentDbView, DatabaseProviderFactory, ProviderError}; use reth_tasks::pool::BlockingTaskPool; use reth_trie::{ - hashed_cursor::{ - DatabaseHashedCursorFactory, HashedCursorFactory, HashedPostStateCursorFactory, - }, + hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, node_iter::{TrieElement, TrieNodeIter}, trie_cursor::TrieCursorFactory, updates::TrieUpdates, @@ -109,13 +107,9 @@ where let handle = self.blocking_pool.spawn_fifo(move || -> Result<_, AsyncStateRootError> { let provider = view.provider_ro()?; - let hashed_state = HashedPostStateCursorFactory::new( - DatabaseHashedCursorFactory::new(provider.tx_ref()), - &hashed_state_sorted, - ); Ok(StorageRoot::new_hashed( provider.tx_ref(), - hashed_state, + HashedPostStateCursorFactory::new(provider.tx_ref(), &hashed_state_sorted), hashed_address, #[cfg(feature = "metrics")] metrics, @@ -131,10 +125,7 @@ where let provider_ro = self.view.provider_ro()?; let tx = provider_ro.tx_ref(); - let hashed_cursor_factory = HashedPostStateCursorFactory::new( - DatabaseHashedCursorFactory::new(tx), - &hashed_state_sorted, - ); + let hashed_cursor_factory = HashedPostStateCursorFactory::new(tx, &hashed_state_sorted); let trie_cursor_factory = tx; let walker = TrieWalker::new( diff --git a/crates/trie/parallel/src/parallel_root.rs b/crates/trie/parallel/src/parallel_root.rs index b95d38fa422f..0983fd47e5a3 100644 --- a/crates/trie/parallel/src/parallel_root.rs +++ b/crates/trie/parallel/src/parallel_root.rs @@ -6,9 +6,7 @@ use reth_execution_errors::StorageRootError; use reth_primitives::B256; use reth_provider::{providers::ConsistentDbView, DatabaseProviderFactory, ProviderError}; use reth_trie::{ - hashed_cursor::{ - DatabaseHashedCursorFactory, HashedCursorFactory, HashedPostStateCursorFactory, - }, + hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, node_iter::{TrieElement, TrieNodeIter}, trie_cursor::TrieCursorFactory, updates::TrieUpdates, @@ -93,13 +91,9 @@ where .into_par_iter() .map(|(hashed_address, prefix_set)| { let provider_ro = self.view.provider_ro()?; - let hashed_cursor_factory = HashedPostStateCursorFactory::new( - DatabaseHashedCursorFactory::new(provider_ro.tx_ref()), - &hashed_state_sorted, - ); let storage_root_result = StorageRoot::new_hashed( provider_ro.tx_ref(), - hashed_cursor_factory, + HashedPostStateCursorFactory::new(provider_ro.tx_ref(), &hashed_state_sorted), hashed_address, #[cfg(feature = "metrics")] self.metrics.storage_trie.clone(), @@ -114,10 +108,8 @@ where let mut trie_updates = TrieUpdates::default(); let provider_ro = self.view.provider_ro()?; - let hashed_cursor_factory = HashedPostStateCursorFactory::new( - DatabaseHashedCursorFactory::new(provider_ro.tx_ref()), - &hashed_state_sorted, - ); + let hashed_cursor_factory = + HashedPostStateCursorFactory::new(provider_ro.tx_ref(), &hashed_state_sorted); let trie_cursor_factory = provider_ro.tx_ref(); let walker = TrieWalker::new( @@ -210,7 +202,7 @@ impl From for ProviderError { fn from(error: ParallelStateRootError) -> Self { match error { ParallelStateRootError::Provider(error) => error, - ParallelStateRootError::StorageRoot(StorageRootError::Database(error)) => { + ParallelStateRootError::StorageRoot(StorageRootError::DB(error)) => { Self::Database(error) } } diff --git a/crates/trie/trie/src/hashed_cursor/default.rs b/crates/trie/trie/src/hashed_cursor/default.rs index e667f4723173..197fd7ecde76 100644 --- a/crates/trie/trie/src/hashed_cursor/default.rs +++ b/crates/trie/trie/src/hashed_cursor/default.rs @@ -6,30 +6,13 @@ use reth_db_api::{ }; use reth_primitives::{Account, B256, U256}; -/// A struct wrapping database transaction that implements [`HashedCursorFactory`]. -#[derive(Debug)] -pub struct DatabaseHashedCursorFactory<'a, TX>(&'a TX); - -impl<'a, TX> Clone for DatabaseHashedCursorFactory<'a, TX> { - fn clone(&self) -> Self { - Self(self.0) - } -} - -impl<'a, TX> DatabaseHashedCursorFactory<'a, TX> { - /// Create new database hashed cursor factory. - pub const fn new(tx: &'a TX) -> Self { - Self(tx) - } -} - -impl<'a, TX: DbTx> HashedCursorFactory for DatabaseHashedCursorFactory<'a, TX> { - type AccountCursor = DatabaseHashedAccountCursor<::Cursor>; +impl<'a, TX: DbTx> HashedCursorFactory for &'a TX { + type AccountCursor = ::Cursor; type StorageCursor = DatabaseHashedStorageCursor<::DupCursor>; fn hashed_account_cursor(&self) -> Result { - Ok(DatabaseHashedAccountCursor(self.0.cursor_read::()?)) + self.cursor_read::() } fn hashed_storage_cursor( @@ -37,36 +20,24 @@ impl<'a, TX: DbTx> HashedCursorFactory for DatabaseHashedCursorFactory<'a, TX> { hashed_address: B256, ) -> Result { Ok(DatabaseHashedStorageCursor::new( - self.0.cursor_dup_read::()?, + self.cursor_dup_read::()?, hashed_address, )) } } -/// A struct wrapping database cursor over hashed accounts implementing [`HashedCursor`] for -/// iterating over accounts. -#[derive(Debug)] -pub struct DatabaseHashedAccountCursor(C); - -impl DatabaseHashedAccountCursor { - /// Create new database hashed account cursor. - pub const fn new(cursor: C) -> Self { - Self(cursor) - } -} - -impl HashedCursor for DatabaseHashedAccountCursor +impl HashedCursor for C where C: DbCursorRO, { type Value = Account; fn seek(&mut self, key: B256) -> Result, reth_db::DatabaseError> { - self.0.seek(key) + self.seek(key) } fn next(&mut self) -> Result, reth_db::DatabaseError> { - self.0.next() + self.next() } } diff --git a/crates/trie/trie/src/hashed_cursor/mod.rs b/crates/trie/trie/src/hashed_cursor/mod.rs index 053836e826d4..05de76721d59 100644 --- a/crates/trie/trie/src/hashed_cursor/mod.rs +++ b/crates/trie/trie/src/hashed_cursor/mod.rs @@ -2,7 +2,7 @@ use reth_primitives::{Account, B256, U256}; /// Default implementation of the hashed state cursor traits. mod default; -pub use default::*; +pub use default::DatabaseHashedStorageCursor; /// Implementation of hashed state cursor traits for the post state. mod post_state; diff --git a/crates/trie/trie/src/hashed_cursor/post_state.rs b/crates/trie/trie/src/hashed_cursor/post_state.rs index fffd66a73f6b..ac262f3d44fc 100644 --- a/crates/trie/trie/src/hashed_cursor/post_state.rs +++ b/crates/trie/trie/src/hashed_cursor/post_state.rs @@ -8,7 +8,7 @@ use reth_primitives::{Account, B256, U256}; use std::collections::HashSet; /// The hashed cursor factory for the post state. -#[derive(Clone, Debug)] +#[derive(Debug, Clone)] pub struct HashedPostStateCursorFactory<'a, CF> { cursor_factory: CF, post_state: &'a HashedPostStateSorted, @@ -328,7 +328,7 @@ where #[cfg(test)] mod tests { use super::*; - use crate::{hashed_cursor::DatabaseHashedCursorFactory, HashedPostState, HashedStorage}; + use crate::{HashedPostState, HashedStorage}; use proptest::prelude::*; use proptest_arbitrary_interop::arb; use reth_db::{tables, test_utils::create_test_rw_db}; @@ -387,8 +387,7 @@ mod tests { let sorted = hashed_post_state.into_sorted(); let tx = db.tx().unwrap(); - let factory = - HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(&tx), &sorted); + let factory = HashedPostStateCursorFactory::new(&tx, &sorted); assert_account_cursor_order(&factory, accounts.into_iter()); } @@ -407,10 +406,7 @@ mod tests { let sorted_post_state = HashedPostState::default().into_sorted(); let tx = db.tx().unwrap(); - let factory = HashedPostStateCursorFactory::new( - DatabaseHashedCursorFactory::new(&tx), - &sorted_post_state, - ); + let factory = HashedPostStateCursorFactory::new(&tx, &sorted_post_state); assert_account_cursor_order(&factory, accounts.into_iter()); } @@ -435,8 +431,7 @@ mod tests { let sorted = hashed_post_state.into_sorted(); let tx = db.tx().unwrap(); - let factory = - HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(&tx), &sorted); + let factory = HashedPostStateCursorFactory::new(&tx, &sorted); assert_account_cursor_order(&factory, accounts.into_iter()); } @@ -466,8 +461,7 @@ mod tests { let sorted = hashed_post_state.into_sorted(); let tx = db.tx().unwrap(); - let factory = - HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(&tx), &sorted); + let factory = HashedPostStateCursorFactory::new(&tx, &sorted); let expected = accounts.into_iter().filter(|x| !removed_keys.contains(&x.0)); assert_account_cursor_order(&factory, expected); } @@ -494,8 +488,7 @@ mod tests { let sorted = hashed_post_state.into_sorted(); let tx = db.tx().unwrap(); - let factory = - HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(&tx), &sorted); + let factory = HashedPostStateCursorFactory::new(&tx, &sorted); assert_account_cursor_order(&factory, accounts.into_iter()); } @@ -527,7 +520,7 @@ mod tests { let sorted = hashed_post_state.into_sorted(); let tx = db.tx().unwrap(); - let factory = HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(&tx), &sorted); + let factory = HashedPostStateCursorFactory::new(&tx, &sorted); assert_account_cursor_order(&factory, expected.into_iter()); } ); @@ -542,8 +535,7 @@ mod tests { { let sorted = HashedPostState::default().into_sorted(); let tx = db.tx().unwrap(); - let factory = - HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(&tx), &sorted); + let factory = HashedPostStateCursorFactory::new(&tx, &sorted); let mut cursor = factory.hashed_storage_cursor(address).unwrap(); assert!(cursor.is_storage_empty().unwrap()); } @@ -566,8 +558,7 @@ mod tests { { let sorted = HashedPostState::default().into_sorted(); let tx = db.tx().unwrap(); - let factory = - HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(&tx), &sorted); + let factory = HashedPostStateCursorFactory::new(&tx, &sorted); let mut cursor = factory.hashed_storage_cursor(address).unwrap(); assert!(!cursor.is_storage_empty().unwrap()); } @@ -582,8 +573,7 @@ mod tests { let sorted = hashed_post_state.into_sorted(); let tx = db.tx().unwrap(); - let factory = - HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(&tx), &sorted); + let factory = HashedPostStateCursorFactory::new(&tx, &sorted); let mut cursor = factory.hashed_storage_cursor(address).unwrap(); assert!(cursor.is_storage_empty().unwrap()); } @@ -599,8 +589,7 @@ mod tests { let sorted = hashed_post_state.into_sorted(); let tx = db.tx().unwrap(); - let factory = - HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(&tx), &sorted); + let factory = HashedPostStateCursorFactory::new(&tx, &sorted); let mut cursor = factory.hashed_storage_cursor(address).unwrap(); assert!(cursor.is_storage_empty().unwrap()); } @@ -616,8 +605,7 @@ mod tests { let sorted = hashed_post_state.into_sorted(); let tx = db.tx().unwrap(); - let factory = - HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(&tx), &sorted); + let factory = HashedPostStateCursorFactory::new(&tx, &sorted); let mut cursor = factory.hashed_storage_cursor(address).unwrap(); assert!(!cursor.is_storage_empty().unwrap()); } @@ -655,8 +643,7 @@ mod tests { let sorted = hashed_post_state.into_sorted(); let tx = db.tx().unwrap(); - let factory = - HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(&tx), &sorted); + let factory = HashedPostStateCursorFactory::new(&tx, &sorted); let expected = std::iter::once((address, db_storage.into_iter().chain(post_state_storage).collect())); assert_storage_cursor_order(&factory, expected); @@ -692,8 +679,7 @@ mod tests { let sorted = hashed_post_state.into_sorted(); let tx = db.tx().unwrap(); - let factory = - HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(&tx), &sorted); + let factory = HashedPostStateCursorFactory::new(&tx, &sorted); let expected = std::iter::once(( address, post_state_storage.into_iter().filter(|(_, value)| *value > U256::ZERO).collect(), @@ -730,8 +716,7 @@ mod tests { let sorted = hashed_post_state.into_sorted(); let tx = db.tx().unwrap(); - let factory = - HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(&tx), &sorted); + let factory = HashedPostStateCursorFactory::new(&tx, &sorted); let expected = std::iter::once((address, post_state_storage)); assert_storage_cursor_order(&factory, expected); } @@ -766,8 +751,7 @@ mod tests { let sorted = hashed_post_state.into_sorted(); let tx = db.tx().unwrap(); - let factory = - HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(&tx), &sorted); + let factory = HashedPostStateCursorFactory::new(&tx, &sorted); let expected = std::iter::once((address, storage)); assert_storage_cursor_order(&factory, expected); } @@ -814,7 +798,7 @@ mod tests { let sorted = hashed_post_state.into_sorted(); let tx = db.tx().unwrap(); - let factory = HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(&tx), &sorted); + let factory = HashedPostStateCursorFactory::new(&tx, &sorted); assert_storage_cursor_order(&factory, expected.into_iter()); }); } diff --git a/crates/trie/trie/src/proof.rs b/crates/trie/trie/src/proof.rs index 85a254f70b8b..eb492f81f4f6 100644 --- a/crates/trie/trie/src/proof.rs +++ b/crates/trie/trie/src/proof.rs @@ -2,17 +2,16 @@ use crate::{ hashed_cursor::{HashedCursorFactory, HashedStorageCursor}, node_iter::{TrieElement, TrieNodeIter}, prefix_set::TriePrefixSetsMut, - trie_cursor::TrieCursorFactory, + trie_cursor::{DatabaseAccountTrieCursor, DatabaseStorageTrieCursor}, walker::TrieWalker, HashBuilder, Nibbles, }; use alloy_rlp::{BufMut, Encodable}; -use reth_execution_errors::trie::StateProofError; -use reth_primitives::{keccak256, Address, B256}; -use reth_trie_common::{ - proof::ProofRetainer, AccountProof, MultiProof, StorageMultiProof, TrieAccount, -}; -use std::collections::HashMap; +use reth_db::tables; +use reth_db_api::transaction::DbTx; +use reth_execution_errors::{StateRootError, StorageRootError}; +use reth_primitives::{constants::EMPTY_ROOT_HASH, keccak256, Address, B256}; +use reth_trie_common::{proof::ProofRetainer, AccountProof, StorageProof, TrieAccount}; /// A struct for generating merkle proofs. /// @@ -20,36 +19,24 @@ use std::collections::HashMap; /// on the hash builder and follows the same algorithm as the state root calculator. /// See `StateRoot::root` for more info. #[derive(Debug)] -pub struct Proof { +pub struct Proof<'a, TX, H> { + /// A reference to the database transaction. + tx: &'a TX, /// The factory for hashed cursors. hashed_cursor_factory: H, - /// Creates cursor for traversing trie entities. - trie_cursor_factory: T, /// A set of prefix sets that have changes. prefix_sets: TriePrefixSetsMut, - /// Proof targets. - targets: HashMap>, } -impl Proof { - /// Create a new [Proof] instance. - pub fn new(t: T, h: H) -> Self { - Self { - trie_cursor_factory: t, - hashed_cursor_factory: h, - prefix_sets: TriePrefixSetsMut::default(), - targets: HashMap::default(), - } +impl<'a, TX, H> Proof<'a, TX, H> { + /// Creates a new proof generator. + pub fn new(tx: &'a TX, hashed_cursor_factory: H) -> Self { + Self { tx, hashed_cursor_factory, prefix_sets: TriePrefixSetsMut::default() } } /// Set the hashed cursor factory. - pub fn with_hashed_cursor_factory(self, hashed_cursor_factory: HF) -> Proof { - Proof { - trie_cursor_factory: self.trie_cursor_factory, - hashed_cursor_factory, - prefix_sets: self.prefix_sets, - targets: self.targets, - } + pub fn with_hashed_cursor_factory(self, hashed_cursor_factory: HF) -> Proof<'a, TX, HF> { + Proof { tx: self.tx, hashed_cursor_factory, prefix_sets: self.prefix_sets } } /// Set the prefix sets. They have to be mutable in order to allow extension with proof target. @@ -57,49 +44,43 @@ impl Proof { self.prefix_sets = prefix_sets; self } +} - /// Set the target accounts and slots. - pub fn with_targets(mut self, targets: HashMap>) -> Self { - self.targets = targets; - self +impl<'a, TX> Proof<'a, TX, &'a TX> { + /// Create a new [Proof] instance from database transaction. + pub fn from_tx(tx: &'a TX) -> Self { + Self::new(tx, tx) } } -impl Proof +impl<'a, TX, H> Proof<'a, TX, H> where - T: TrieCursorFactory, + TX: DbTx, H: HashedCursorFactory + Clone, { /// Generate an account proof from intermediate nodes. pub fn account_proof( - self, + &self, address: Address, slots: &[B256], - ) -> Result { - Ok(self - .with_targets(HashMap::from([( - keccak256(address), - slots.iter().map(keccak256).collect(), - )])) - .multi_proof()? - .account_proof(address, slots)?) - } + ) -> Result { + let target_hashed_address = keccak256(address); + let target_nibbles = Nibbles::unpack(target_hashed_address); + let mut account_proof = AccountProof::new(address); - /// Generate a state multiproof according to specified targets. - pub fn multi_proof(&self) -> Result { let hashed_account_cursor = self.hashed_cursor_factory.hashed_account_cursor()?; - let trie_cursor = self.trie_cursor_factory.account_trie_cursor()?; + let trie_cursor = + DatabaseAccountTrieCursor::new(self.tx.cursor_read::()?); // Create the walker. let mut prefix_set = self.prefix_sets.account_prefix_set.clone(); - prefix_set.extend(self.targets.keys().map(Nibbles::unpack)); + prefix_set.insert(target_nibbles.clone()); let walker = TrieWalker::new(trie_cursor, prefix_set.freeze()); // Create a hash builder to rebuild the root node since it is not available in the database. - let retainer = ProofRetainer::from_iter(self.targets.keys().map(Nibbles::unpack)); + let retainer = ProofRetainer::from_iter([target_nibbles]); let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); - let mut storage_multiproofs = HashMap::default(); let mut account_rlp = Vec::with_capacity(128); let mut account_node_iter = TrieNodeIter::new(walker, hashed_account_cursor); while let Some(account_node) = account_node_iter.try_next()? { @@ -108,44 +89,62 @@ where hash_builder.add_branch(node.key, node.value, node.children_are_in_trie); } TrieElement::Leaf(hashed_address, account) => { - let storage_multiproof = self.storage_multiproof(hashed_address)?; + let storage_root = if hashed_address == target_hashed_address { + let (storage_root, storage_proofs) = + self.storage_root_with_proofs(hashed_address, slots)?; + account_proof.set_account(account, storage_root, storage_proofs); + storage_root + } else { + self.storage_root(hashed_address)? + }; - // Encode account account_rlp.clear(); - let account = TrieAccount::from((account, storage_multiproof.root)); + let account = TrieAccount::from((account, storage_root)); account.encode(&mut account_rlp as &mut dyn BufMut); hash_builder.add_leaf(Nibbles::unpack(hashed_address), &account_rlp); - storage_multiproofs.insert(hashed_address, storage_multiproof); } } } + let _ = hash_builder.root(); - Ok(MultiProof { account_subtree: hash_builder.take_proofs(), storage_multiproofs }) + + let proofs = hash_builder.take_proofs(); + account_proof.set_proof(proofs.values().cloned().collect()); + + Ok(account_proof) + } + + /// Compute storage root. + pub fn storage_root(&self, hashed_address: B256) -> Result { + let (storage_root, _) = self.storage_root_with_proofs(hashed_address, &[])?; + Ok(storage_root) } - /// Generate a storage multiproof according to specified targets. - pub fn storage_multiproof( + /// Compute the storage root and retain proofs for requested slots. + pub fn storage_root_with_proofs( &self, hashed_address: B256, - ) -> Result { + slots: &[B256], + ) -> Result<(B256, Vec), StorageRootError> { let mut hashed_storage_cursor = self.hashed_cursor_factory.hashed_storage_cursor(hashed_address)?; + let mut proofs = slots.iter().copied().map(StorageProof::new).collect::>(); + // short circuit on empty storage if hashed_storage_cursor.is_storage_empty()? { - return Ok(StorageMultiProof::default()) + return Ok((EMPTY_ROOT_HASH, proofs)) } - let target_nibbles = self - .targets - .get(&hashed_address) - .map_or(Vec::new(), |slots| slots.iter().map(Nibbles::unpack).collect()); - + let target_nibbles = proofs.iter().map(|p| p.nibbles.clone()).collect::>(); let mut prefix_set = self.prefix_sets.storage_prefix_sets.get(&hashed_address).cloned().unwrap_or_default(); prefix_set.extend(target_nibbles.clone()); - let trie_cursor = self.trie_cursor_factory.storage_trie_cursor(hashed_address)?; + let trie_cursor = DatabaseStorageTrieCursor::new( + self.tx.cursor_dup_read::()?, + hashed_address, + ); let walker = TrieWalker::new(trie_cursor, prefix_set.freeze()); let retainer = ProofRetainer::from_iter(target_nibbles); @@ -157,15 +156,320 @@ where hash_builder.add_branch(node.key, node.value, node.children_are_in_trie); } TrieElement::Leaf(hashed_slot, value) => { - hash_builder.add_leaf( - Nibbles::unpack(hashed_slot), - alloy_rlp::encode_fixed_size(&value).as_ref(), - ); + let nibbles = Nibbles::unpack(hashed_slot); + if let Some(proof) = proofs.iter_mut().find(|proof| proof.nibbles == nibbles) { + proof.set_value(value); + } + hash_builder.add_leaf(nibbles, alloy_rlp::encode_fixed_size(&value).as_ref()); } } } let root = hash_builder.root(); - Ok(StorageMultiProof { root, subtree: hash_builder.take_proofs() }) + + let all_proof_nodes = hash_builder.take_proofs(); + for proof in &mut proofs { + // Iterate over all proof nodes and find the matching ones. + // The filtered results are guaranteed to be in order. + let matching_proof_nodes = all_proof_nodes + .iter() + .filter(|(path, _)| proof.nibbles.starts_with(path)) + .map(|(_, node)| node.clone()); + proof.set_proof(matching_proof_nodes.collect()); + } + + Ok((root, proofs)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::StateRoot; + use once_cell::sync::Lazy; + use reth_chainspec::{Chain, ChainSpec, HOLESKY, MAINNET}; + use reth_db_api::database::Database; + use reth_primitives::{Account, Bytes, StorageEntry, U256}; + use reth_provider::{test_utils::create_test_provider_factory, HashingWriter, ProviderFactory}; + use reth_storage_errors::provider::ProviderResult; + use std::{str::FromStr, sync::Arc}; + + /* + World State (sampled from ) + | address | prefix | hash | balance + |--------------------------------------------|-----------|--------------------------------------------------------------------|-------- + | 0x2031f89b3ea8014eb51a78c316e42af3e0d7695f | 0xa711355 | 0xa711355ec1c8f7e26bb3ccbcb0b75d870d15846c0b98e5cc452db46c37faea40 | 45 eth + | 0x33f0fc440b8477fcfbe9d0bf8649e7dea9baedb2 | 0xa77d337 | 0xa77d337781e762f3577784bab7491fcc43e291ce5a356b9bc517ac52eed3a37a | 1 wei + | 0x62b0dd4aab2b1a0a04e279e2b828791a10755528 | 0xa7f9365 | 0xa7f936599f93b769acf90c7178fd2ddcac1b5b4bc9949ee5a04b7e0823c2446e | 1.1 eth + | 0x1ed9b1dd266b607ee278726d324b855a093394a6 | 0xa77d397 | 0xa77d397a32b8ab5eb4b043c65b1f00c93f517bc8883c5cd31baf8e8a279475e3 | .12 eth + + All expected testspec results were obtained from querying proof RPC on the running geth instance `geth init crates/trie/testdata/proof-genesis.json && geth --http`. + */ + static TEST_SPEC: Lazy> = Lazy::new(|| { + ChainSpec { + chain: Chain::from_id(12345), + genesis: serde_json::from_str(include_str!("../testdata/proof-genesis.json")) + .expect("Can't deserialize test genesis json"), + ..Default::default() + } + .into() + }); + + fn convert_to_proof<'a>(path: impl IntoIterator) -> Vec { + path.into_iter().map(Bytes::from_str).collect::, _>>().unwrap() + } + + fn insert_genesis( + provider_factory: &ProviderFactory, + chain_spec: Arc, + ) -> ProviderResult { + let mut provider = provider_factory.provider_rw()?; + + // Hash accounts and insert them into hashing table. + let genesis = chain_spec.genesis(); + let alloc_accounts = genesis + .alloc + .iter() + .map(|(addr, account)| (*addr, Some(Account::from_genesis_account(account)))); + provider.insert_account_for_hashing(alloc_accounts).unwrap(); + + let alloc_storage = genesis.alloc.clone().into_iter().filter_map(|(addr, account)| { + // Only return `Some` if there is storage. + account.storage.map(|storage| { + ( + addr, + storage + .into_iter() + .map(|(key, value)| StorageEntry { key, value: value.into() }), + ) + }) + }); + provider.insert_storage_for_hashing(alloc_storage)?; + + let (root, updates) = StateRoot::from_tx(provider.tx_ref()) + .root_with_updates() + .map_err(Into::::into)?; + updates.write_to_database(provider.tx_mut())?; + + provider.commit()?; + + Ok(root) + } + + #[test] + fn testspec_proofs() { + // Create test database and insert genesis accounts. + let factory = create_test_provider_factory(); + let root = insert_genesis(&factory, TEST_SPEC.clone()).unwrap(); + + let data = Vec::from([ + ( + "0x2031f89b3ea8014eb51a78c316e42af3e0d7695f", + convert_to_proof([ + "0xe48200a7a040f916999be583c572cc4dd369ec53b0a99f7de95f13880cf203d98f935ed1b3", + "0xf87180a04fb9bab4bb88c062f32452b7c94c8f64d07b5851d44a39f1e32ba4b1829fdbfb8080808080a0b61eeb2eb82808b73c4ad14140a2836689f4ab8445d69dd40554eaf1fce34bc080808080808080a0dea230ff2026e65de419288183a340125b04b8405cc61627b3b4137e2260a1e880", + "0xf8719f31355ec1c8f7e26bb3ccbcb0b75d870d15846c0b98e5cc452db46c37faea40b84ff84d80890270801d946c940000a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" + ]) + ), + ( + "0x33f0fc440b8477fcfbe9d0bf8649e7dea9baedb2", + convert_to_proof([ + "0xe48200a7a040f916999be583c572cc4dd369ec53b0a99f7de95f13880cf203d98f935ed1b3", + "0xf87180a04fb9bab4bb88c062f32452b7c94c8f64d07b5851d44a39f1e32ba4b1829fdbfb8080808080a0b61eeb2eb82808b73c4ad14140a2836689f4ab8445d69dd40554eaf1fce34bc080808080808080a0dea230ff2026e65de419288183a340125b04b8405cc61627b3b4137e2260a1e880", + "0xe48200d3a0ef957210bca5b9b402d614eb8408c88cfbf4913eb6ab83ca233c8b8f0e626b54", + "0xf851808080a02743a5addaf4cf9b8c0c073e1eaa555deaaf8c41cb2b41958e88624fa45c2d908080808080a0bfbf6937911dfb88113fecdaa6bde822e4e99dae62489fcf61a91cb2f36793d680808080808080", + "0xf8679e207781e762f3577784bab7491fcc43e291ce5a356b9bc517ac52eed3a37ab846f8448001a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" + ]) + ), + ( + "0x62b0dd4aab2b1a0a04e279e2b828791a10755528", + convert_to_proof([ + "0xe48200a7a040f916999be583c572cc4dd369ec53b0a99f7de95f13880cf203d98f935ed1b3", + "0xf87180a04fb9bab4bb88c062f32452b7c94c8f64d07b5851d44a39f1e32ba4b1829fdbfb8080808080a0b61eeb2eb82808b73c4ad14140a2836689f4ab8445d69dd40554eaf1fce34bc080808080808080a0dea230ff2026e65de419288183a340125b04b8405cc61627b3b4137e2260a1e880", + "0xf8709f3936599f93b769acf90c7178fd2ddcac1b5b4bc9949ee5a04b7e0823c2446eb84ef84c80880f43fc2c04ee0000a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" + ]) + ), + ( + "0x1ed9b1dd266b607ee278726d324b855a093394a6", + convert_to_proof([ + "0xe48200a7a040f916999be583c572cc4dd369ec53b0a99f7de95f13880cf203d98f935ed1b3", + "0xf87180a04fb9bab4bb88c062f32452b7c94c8f64d07b5851d44a39f1e32ba4b1829fdbfb8080808080a0b61eeb2eb82808b73c4ad14140a2836689f4ab8445d69dd40554eaf1fce34bc080808080808080a0dea230ff2026e65de419288183a340125b04b8405cc61627b3b4137e2260a1e880", + "0xe48200d3a0ef957210bca5b9b402d614eb8408c88cfbf4913eb6ab83ca233c8b8f0e626b54", + "0xf851808080a02743a5addaf4cf9b8c0c073e1eaa555deaaf8c41cb2b41958e88624fa45c2d908080808080a0bfbf6937911dfb88113fecdaa6bde822e4e99dae62489fcf61a91cb2f36793d680808080808080", + "0xf86f9e207a32b8ab5eb4b043c65b1f00c93f517bc8883c5cd31baf8e8a279475e3b84ef84c808801aa535d3d0c0000a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" + ]) + ), + ]); + + let provider = factory.provider().unwrap(); + for (target, expected_proof) in data { + let target = Address::from_str(target).unwrap(); + let account_proof = + Proof::from_tx(provider.tx_ref()).account_proof(target, &[]).unwrap(); + similar_asserts::assert_eq!( + account_proof.proof, + expected_proof, + "proof for {target:?} does not match" + ); + assert_eq!(account_proof.verify(root), Ok(())); + } + } + + #[test] + fn testspec_empty_storage_proof() { + // Create test database and insert genesis accounts. + let factory = create_test_provider_factory(); + let root = insert_genesis(&factory, TEST_SPEC.clone()).unwrap(); + + let target = Address::from_str("0x1ed9b1dd266b607ee278726d324b855a093394a6").unwrap(); + let slots = Vec::from([B256::with_last_byte(1), B256::with_last_byte(3)]); + + let provider = factory.provider().unwrap(); + let account_proof = + Proof::from_tx(provider.tx_ref()).account_proof(target, &slots).unwrap(); + assert_eq!(account_proof.storage_root, EMPTY_ROOT_HASH, "expected empty storage root"); + + assert_eq!(slots.len(), account_proof.storage_proofs.len()); + for (idx, slot) in slots.into_iter().enumerate() { + let proof = account_proof.storage_proofs.get(idx).unwrap(); + assert_eq!(proof, &StorageProof::new(slot)); + assert_eq!(proof.verify(account_proof.storage_root), Ok(())); + } + assert_eq!(account_proof.verify(root), Ok(())); + } + + #[test] + fn mainnet_genesis_account_proof() { + // Create test database and insert genesis accounts. + let factory = create_test_provider_factory(); + let root = insert_genesis(&factory, MAINNET.clone()).unwrap(); + + // Address from mainnet genesis allocation. + // keccak256 - `0xcf67b71c90b0d523dd5004cf206f325748da347685071b34812e21801f5270c4` + let target = Address::from_str("0x000d836201318ec6899a67540690382780743280").unwrap(); + + // `cast proof 0x000d836201318ec6899a67540690382780743280 --block 0` + let expected_account_proof = convert_to_proof([ + "0xf90211a090dcaf88c40c7bbc95a912cbdde67c175767b31173df9ee4b0d733bfdd511c43a0babe369f6b12092f49181ae04ca173fb68d1a5456f18d20fa32cba73954052bda0473ecf8a7e36a829e75039a3b055e51b8332cbf03324ab4af2066bbd6fbf0021a0bbda34753d7aa6c38e603f360244e8f59611921d9e1f128372fec0d586d4f9e0a04e44caecff45c9891f74f6a2156735886eedf6f1a733628ebc802ec79d844648a0a5f3f2f7542148c973977c8a1e154c4300fec92f755f7846f1b734d3ab1d90e7a0e823850f50bf72baae9d1733a36a444ab65d0a6faaba404f0583ce0ca4dad92da0f7a00cbe7d4b30b11faea3ae61b7f1f2b315b61d9f6bd68bfe587ad0eeceb721a07117ef9fc932f1a88e908eaead8565c19b5645dc9e5b1b6e841c5edbdfd71681a069eb2de283f32c11f859d7bcf93da23990d3e662935ed4d6b39ce3673ec84472a0203d26456312bbc4da5cd293b75b840fc5045e493d6f904d180823ec22bfed8ea09287b5c21f2254af4e64fca76acc5cd87399c7f1ede818db4326c98ce2dc2208a06fc2d754e304c48ce6a517753c62b1a9c1d5925b89707486d7fc08919e0a94eca07b1c54f15e299bd58bdfef9741538c7828b5d7d11a489f9c20d052b3471df475a051f9dd3739a927c89e357580a4c97b40234aa01ed3d5e0390dc982a7975880a0a089d613f26159af43616fd9455bb461f4869bfede26f2130835ed067a8b967bfb80", + "0xf90211a0dae48f5b47930c28bb116fbd55e52cd47242c71bf55373b55eb2805ee2e4a929a00f1f37f337ec800e2e5974e2e7355f10f1a4832b39b846d916c3597a460e0676a0da8f627bb8fbeead17b318e0a8e4f528db310f591bb6ab2deda4a9f7ca902ab5a0971c662648d58295d0d0aa4b8055588da0037619951217c22052802549d94a2fa0ccc701efe4b3413fd6a61a6c9f40e955af774649a8d9fd212d046a5a39ddbb67a0d607cdb32e2bd635ee7f2f9e07bc94ddbd09b10ec0901b66628e15667aec570ba05b89203dc940e6fa70ec19ad4e01d01849d3a5baa0a8f9c0525256ed490b159fa0b84227d48df68aecc772939a59afa9e1a4ab578f7b698bdb1289e29b6044668ea0fd1c992070b94ace57e48cbf6511a16aa770c645f9f5efba87bbe59d0a042913a0e16a7ccea6748ae90de92f8aef3b3dc248a557b9ac4e296934313f24f7fced5fa042373cf4a00630d94de90d0a23b8f38ced6b0f7cb818b8925fee8f0c2a28a25aa05f89d2161c1741ff428864f7889866484cef622de5023a46e795dfdec336319fa07597a017664526c8c795ce1da27b8b72455c49657113e0455552dbc068c5ba31a0d5be9089012fda2c585a1b961e988ea5efcd3a06988e150a8682091f694b37c5a0f7b0352e38c315b2d9a14d51baea4ddee1770974c806e209355233c3c89dce6ea049bf6e8df0acafd0eff86defeeb305568e44d52d2235cf340ae15c6034e2b24180", + "0xf901f1a0cf67e0f5d5f8d70e53a6278056a14ddca46846f5ef69c7bde6810d058d4a9eda80a06732ada65afd192197fe7ce57792a7f25d26978e64e954b7b84a1f7857ac279da05439f8d011683a6fc07efb90afca198fd7270c795c835c7c85d91402cda992eaa0449b93033b6152d289045fdb0bf3f44926f831566faa0e616b7be1abaad2cb2da031be6c3752bcd7afb99b1bb102baf200f8567c394d464315323a363697646616a0a40e3ed11d906749aa501279392ffde868bd35102db41364d9c601fd651f974aa0044bfa4fe8dd1a58e6c7144da79326e94d1331c0b00373f6ae7f3662f45534b7a098005e3e48db68cb1dc9b9f034ff74d2392028ddf718b0f2084133017da2c2e7a02a62bc40414ee95b02e202a9e89babbabd24bef0abc3fc6dcd3e9144ceb0b725a0239facd895bbf092830390a8676f34b35b29792ae561f196f86614e0448a5792a0a4080f88925daff6b4ce26d188428841bd65655d8e93509f2106020e76d41eefa04918987904be42a6894256ca60203283d1b89139cf21f09f5719c44b8cdbb8f7a06201fc3ef0827e594d953b5e3165520af4fceb719e11cc95fd8d3481519bfd8ca05d0e353d596bd725b09de49c01ede0f29023f0153d7b6d401556aeb525b2959ba0cd367d0679950e9c5f2aa4298fd4b081ade2ea429d71ff390c50f8520e16e30880", + "0xf87180808080808080a0dbee8b33c73b86df839f309f7ac92eee19836e08b39302ffa33921b3c6a09f66a06068b283d51aeeee682b8fb5458354315d0b91737441ede5e137c18b4775174a8080808080a0fe7779c7d58c2fda43eba0a6644043c86ebb9ceb4836f89e30831f23eb059ece8080", + "0xf8719f20b71c90b0d523dd5004cf206f325748da347685071b34812e21801f5270c4b84ff84d80890ad78ebc5ac6200000a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" + ]); + + let provider = factory.provider().unwrap(); + let account_proof = Proof::from_tx(provider.tx_ref()).account_proof(target, &[]).unwrap(); + similar_asserts::assert_eq!(account_proof.proof, expected_account_proof); + assert_eq!(account_proof.verify(root), Ok(())); + } + + #[test] + fn mainnet_genesis_account_proof_nonexistent() { + // Create test database and insert genesis accounts. + let factory = create_test_provider_factory(); + let root = insert_genesis(&factory, MAINNET.clone()).unwrap(); + + // Address that does not exist in mainnet genesis allocation. + // keccak256 - `0x18f415ffd7f66bb1924d90f0e82fb79ca8c6d8a3473cd9a95446a443b9db1761` + let target = Address::from_str("0x000d836201318ec6899a67540690382780743281").unwrap(); + + // `cast proof 0x000d836201318ec6899a67540690382780743281 --block 0` + let expected_account_proof = convert_to_proof([ + "0xf90211a090dcaf88c40c7bbc95a912cbdde67c175767b31173df9ee4b0d733bfdd511c43a0babe369f6b12092f49181ae04ca173fb68d1a5456f18d20fa32cba73954052bda0473ecf8a7e36a829e75039a3b055e51b8332cbf03324ab4af2066bbd6fbf0021a0bbda34753d7aa6c38e603f360244e8f59611921d9e1f128372fec0d586d4f9e0a04e44caecff45c9891f74f6a2156735886eedf6f1a733628ebc802ec79d844648a0a5f3f2f7542148c973977c8a1e154c4300fec92f755f7846f1b734d3ab1d90e7a0e823850f50bf72baae9d1733a36a444ab65d0a6faaba404f0583ce0ca4dad92da0f7a00cbe7d4b30b11faea3ae61b7f1f2b315b61d9f6bd68bfe587ad0eeceb721a07117ef9fc932f1a88e908eaead8565c19b5645dc9e5b1b6e841c5edbdfd71681a069eb2de283f32c11f859d7bcf93da23990d3e662935ed4d6b39ce3673ec84472a0203d26456312bbc4da5cd293b75b840fc5045e493d6f904d180823ec22bfed8ea09287b5c21f2254af4e64fca76acc5cd87399c7f1ede818db4326c98ce2dc2208a06fc2d754e304c48ce6a517753c62b1a9c1d5925b89707486d7fc08919e0a94eca07b1c54f15e299bd58bdfef9741538c7828b5d7d11a489f9c20d052b3471df475a051f9dd3739a927c89e357580a4c97b40234aa01ed3d5e0390dc982a7975880a0a089d613f26159af43616fd9455bb461f4869bfede26f2130835ed067a8b967bfb80", + "0xf90211a0586b1ddec8db4824154209d355a1989b6c43aa69aba36e9d70c9faa53e7452baa0f86db47d628c73764d74b9ccaed73b8486d97a7731d57008fc9efaf417411860a0d9faed7b9ea107b5d98524246c977e782377f976e34f70717e8b1207f2f9b981a00218f59ccedf797c95e27c56405b9bf16845050fb43e773b66b26bc6992744f5a0dbf396f480c4e024156644adea7c331688d03742369e9d87ab8913bc439ff975a0aced524f39b22c62a5be512ddbca89f0b89b47c311065ccf423dee7013c7ea83a0c06b05f80b237b403adc019c0bc95b5de935021b14a75cbc18509eec60dfd83aa085339d45c4a52b7d523c301701f1ab339964e9c907440cff0a871c98dcf8811ea03ae9f6b8e227ec9be9461f0947b01696f78524c4519a6dee9fba14d209952cf9a0af17f551f9fa1ba4be41d0b342b160e2e8468d7e98a65a2dbf9d5fe5d6928024a0b850ac3bc03e9a309cc59ce5f1ab8db264870a7a22786081753d1db91897b8e6a09e796a4904bd78cb2655b5f346c94350e2d5f0dbf2bc00ac00871cd7ba46b241a0f6f0377427b900529caf32abf32ba1eb93f5f70153aa50b90bf55319a434c252a0725eaf27c8ee07e9b2511a6d6a0d71c649d855e8a9ed26e667903e2e94ae47cba0e4139fb48aa1a524d47f6e0df80314b88b52202d7e853da33c276aa8572283a8a05e9003d54a45935fdebae3513dc7cd16626dc05e1d903ae7f47f1a35aa6e234580", + "0xf901d1a0b7c55b381eb205712a2f5d1b7d6309ac725da79ab159cb77dc2783af36e6596da0b3b48aa390e0f3718b486ccc32b01682f92819e652315c1629058cd4d9bb1545a0e3c0cc68af371009f14416c27e17f05f4f696566d2ba45362ce5711d4a01d0e4a0bad1e085e431b510508e2a9e3712633a414b3fe6fd358635ab206021254c1e10a0f8407fe8d5f557b9e012d52e688139bd932fec40d48630d7ff4204d27f8cc68da08c6ca46eff14ad4950e65469c394ca9d6b8690513b1c1a6f91523af00082474c80a0630c034178cb1290d4d906edf28688804d79d5e37a3122c909adab19ac7dc8c5a059f6d047c5d1cc75228c4517a537763cb410c38554f273e5448a53bc3c7166e7a0d842f53ce70c3aad1e616fa6485d3880d15c936fcc306ec14ae35236e5a60549a0218ee2ee673c69b4e1b953194b2568157a69085b86e4f01644fa06ab472c6cf9a016a35a660ea496df7c0da646378bfaa9562f401e42a5c2fe770b7bbe22433585a0dd0fbbe227a4d50868cdbb3107573910fd97131ea8d835bef81d91a2fc30b175a06aafa3d78cf179bf055bd5ec629be0ff8352ce0aec9125a4d75be3ee7eb71f10a01d6817ef9f64fcbb776ff6df0c83138dcd2001bd752727af3e60f4afc123d8d58080" + ]); + + let provider = factory.provider().unwrap(); + let account_proof = Proof::from_tx(provider.tx_ref()).account_proof(target, &[]).unwrap(); + similar_asserts::assert_eq!(account_proof.proof, expected_account_proof); + assert_eq!(account_proof.verify(root), Ok(())); + } + + #[test] + fn holesky_deposit_contract_proof() { + // Create test database and insert genesis accounts. + let factory = create_test_provider_factory(); + let root = insert_genesis(&factory, HOLESKY.clone()).unwrap(); + + let target = Address::from_str("0x4242424242424242424242424242424242424242").unwrap(); + // existent + let slot_22 = + B256::from_str("0x0000000000000000000000000000000000000000000000000000000000000022") + .unwrap(); + let slot_23 = + B256::from_str("0x0000000000000000000000000000000000000000000000000000000000000023") + .unwrap(); + let slot_24 = + B256::from_str("0x0000000000000000000000000000000000000000000000000000000000000024") + .unwrap(); + // non-existent + let slot_100 = + B256::from_str("0x0000000000000000000000000000000000000000000000000000000000000100") + .unwrap(); + let slots = Vec::from([slot_22, slot_23, slot_24, slot_100]); + + // `cast proof 0x4242424242424242424242424242424242424242 0x22 0x23 0x24 0x100 --block 0` + let expected = AccountProof { + address: target, + info: Some(Account { + balance: U256::ZERO, + nonce: 0, + bytecode_hash: Some(B256::from_str("0x2034f79e0e33b0ae6bef948532021baceb116adf2616478703bec6b17329f1cc").unwrap()) + }), + storage_root: B256::from_str("0x556a482068355939c95a3412bdb21213a301483edb1b64402fb66ac9f3583599").unwrap(), + proof: convert_to_proof([ + "0xf90211a0ea92fb71507739d5afe328d607b2c5e98322b7aa7cdfeccf817543058b54af70a0bd0c2525b5bee47abf7120c9e01ec3249699d687f80ebb96ed9ad9de913dbab0a0ab4b14b89416eb23c6b64204fa45cfcb39d4220016a9cd0815ebb751fe45eb71a0986ae29c2148b9e61f9a7543f44a1f8d029f1c5095b359652e9ec94e64b5d393a0555d54aa23ed990b0488153418637df7b2c878b604eb761aa2673b609937b0eba0140afb6a3909cc6047b3d44af13fc83f161a7e4c4ddba430a2841862912eb222a031b1185c1f455022d9e42ce04a71f174eb9441b1ada67449510500f4d85b3b22a051ecd01e18113b23cc65e62f67d69b33ee15d20bf81a6b524f7df90ded00ca15a0703769d6a7befad000bc2b4faae3e41b809b1b1241fe2964262554e7e3603488a0e5de7f600e4e6c3c3e5630e0c66f50506a17c9715642fccb63667e81397bbf93a095f783cd1d464a60e3c8adcadc28c6eb9fec7306664df39553be41dccc909606a04225fda3b89f0c59bf40129d1d5e5c3bf67a2129f0c55e53ffdd2cebf185d644a078e0f7fd3ae5a9bc90f66169614211b48fe235eb64818b3935d3e69c53523b9aa0a870e00e53ebaa1e9ec16e5f36606fd7d21d3a3c96894c0a2a23550949d4fdf7a0809226b69cee1f4f22ced1974e7805230da1909036a49a7652428999431afac2a0f11593b2407e86e11997325d8df2d22d937bbe0aef8302ba40c6be0601b04fc380", + "0xf901f1a09da7d9755fe0c558b3c3de9fdcdf9f28ae641f38c9787b05b73ab22ae53af3e2a0d9990bf0b810d1145ecb2b011fd68c63cc85564e6724166fd4a9520180706e5fa05f5f09855df46330aa310e8d6be5fb82d1a4b975782d9b29acf06ac8d3e72b1ca0ca976997ddaf06f18992f6207e4f6a05979d07acead96568058789017cc6d06ba04d78166b48044fdc28ed22d2fd39c8df6f8aaa04cb71d3a17286856f6893ff83a004f8c7cc4f1335182a1709fb28fc67d52e59878480210abcba864d5d1fd4a066a0fc3b71c33e2e6b77c5e494c1db7fdbb447473f003daf378c7a63ba9bf3f0049d80a07b8e7a21c1178d28074f157b50fca85ee25c12568ff8e9706dcbcdacb77bf854a0973274526811393ea0bf4811ca9077531db00d06b86237a2ecd683f55ba4bcb0a03a93d726d7487874e51b52d8d534c63aa2a689df18e3b307c0d6cb0a388b00f3a06aa67101d011d1c22fe739ef83b04b5214a3e2f8e1a2625d8bfdb116b447e86fa02dd545b33c62d33a183e127a08a4767fba891d9f3b94fc20a2ca02600d6d1fffa0f3b039a4f32349e85c782d1164c1890e5bf16badc9ee4cf827db6afd2229dde6a0d9240a9d2d5851d05a97ff3305334dfdb0101e1e321fc279d2bb3cad6afa8fc8a01b69c6ab5173de8a8ec53a6ebba965713a4cc7feb86cb3e230def37c230ca2b280", + "0xf869a0202a47fc6863b89a6b51890ef3c1550d560886c027141d2058ba1e2d4c66d99ab846f8448080a0556a482068355939c95a3412bdb21213a301483edb1b64402fb66ac9f3583599a02034f79e0e33b0ae6bef948532021baceb116adf2616478703bec6b17329f1cc" + ]), + storage_proofs: Vec::from([ + StorageProof { + key: slot_22, + nibbles: Nibbles::unpack(keccak256(slot_22)), + value: U256::from_str("0xf5a5fd42d16a20302798ef6ed309979b43003d2320d9f0e8ea9831a92759fb4b").unwrap(), + proof: convert_to_proof([ + "0xf9019180a0aafd5b14a6edacd149e110ba6776a654f2dbffca340902be933d011113f2750380a0a502c93b1918c4c6534d4593ae03a5a23fa10ebc30ffb7080b297bff2446e42da02eb2bf45fd443bd1df8b6f9c09726a4c6252a0f7896a131a081e39a7f644b38980a0a9cf7f673a0bce76fd40332afe8601542910b48dea44e93933a3e5e930da5d19a0ddf79db0a36d0c8134ba143bcb541cd4795a9a2bae8aca0ba24b8d8963c2a77da0b973ec0f48f710bf79f63688485755cbe87f9d4c68326bb83c26af620802a80ea0f0855349af6bf84afc8bca2eda31c8ef8c5139be1929eeb3da4ba6b68a818cb0a0c271e189aeeb1db5d59d7fe87d7d6327bbe7cfa389619016459196497de3ccdea0e7503ba5799e77aa31bbe1310c312ca17b2c5bcc8fa38f266675e8f154c2516ba09278b846696d37213ab9d20a5eb42b03db3173ce490a2ef3b2f3b3600579fc63a0e9041059114f9c910adeca12dbba1fef79b2e2c8899f2d7213cd22dfe4310561a047c59da56bb2bf348c9dd2a2e8f5538a92b904b661cfe54a4298b85868bbe4858080", + "0xf85180a0776aa456ba9c5008e03b82b841a9cf2fc1e8578cfacd5c9015804eae315f17fb80808080808080808080808080a072e3e284d47badbb0a5ca1421e1179d3ea90cc10785b26b74fb8a81f0f9e841880", + "0xf843a020035b26e3e9eee00e0d72fd1ee8ddca6894550dca6916ea2ac6baa90d11e510a1a0f5a5fd42d16a20302798ef6ed309979b43003d2320d9f0e8ea9831a92759fb4b" + ]) + }, + StorageProof { + key: slot_23, + nibbles: Nibbles::unpack(keccak256(slot_23)), + value: U256::from_str("0xdb56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71").unwrap(), + proof: convert_to_proof([ + "0xf9019180a0aafd5b14a6edacd149e110ba6776a654f2dbffca340902be933d011113f2750380a0a502c93b1918c4c6534d4593ae03a5a23fa10ebc30ffb7080b297bff2446e42da02eb2bf45fd443bd1df8b6f9c09726a4c6252a0f7896a131a081e39a7f644b38980a0a9cf7f673a0bce76fd40332afe8601542910b48dea44e93933a3e5e930da5d19a0ddf79db0a36d0c8134ba143bcb541cd4795a9a2bae8aca0ba24b8d8963c2a77da0b973ec0f48f710bf79f63688485755cbe87f9d4c68326bb83c26af620802a80ea0f0855349af6bf84afc8bca2eda31c8ef8c5139be1929eeb3da4ba6b68a818cb0a0c271e189aeeb1db5d59d7fe87d7d6327bbe7cfa389619016459196497de3ccdea0e7503ba5799e77aa31bbe1310c312ca17b2c5bcc8fa38f266675e8f154c2516ba09278b846696d37213ab9d20a5eb42b03db3173ce490a2ef3b2f3b3600579fc63a0e9041059114f9c910adeca12dbba1fef79b2e2c8899f2d7213cd22dfe4310561a047c59da56bb2bf348c9dd2a2e8f5538a92b904b661cfe54a4298b85868bbe4858080", + "0xf8518080808080a0d546c4ca227a267d29796643032422374624ed109b3d94848c5dc06baceaee76808080808080a027c48e210ccc6e01686be2d4a199d35f0e1e8df624a8d3a17c163be8861acd6680808080", + "0xf843a0207b2b5166478fd4318d2acc6cc2c704584312bdd8781b32d5d06abda57f4230a1a0db56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71" + ]) + }, + StorageProof { + key: slot_24, + nibbles: Nibbles::unpack(keccak256(slot_24)), + value: U256::from_str("0xc78009fdf07fc56a11f122370658a353aaa542ed63e44c4bc15ff4cd105ab33c").unwrap(), + proof: convert_to_proof([ + "0xf9019180a0aafd5b14a6edacd149e110ba6776a654f2dbffca340902be933d011113f2750380a0a502c93b1918c4c6534d4593ae03a5a23fa10ebc30ffb7080b297bff2446e42da02eb2bf45fd443bd1df8b6f9c09726a4c6252a0f7896a131a081e39a7f644b38980a0a9cf7f673a0bce76fd40332afe8601542910b48dea44e93933a3e5e930da5d19a0ddf79db0a36d0c8134ba143bcb541cd4795a9a2bae8aca0ba24b8d8963c2a77da0b973ec0f48f710bf79f63688485755cbe87f9d4c68326bb83c26af620802a80ea0f0855349af6bf84afc8bca2eda31c8ef8c5139be1929eeb3da4ba6b68a818cb0a0c271e189aeeb1db5d59d7fe87d7d6327bbe7cfa389619016459196497de3ccdea0e7503ba5799e77aa31bbe1310c312ca17b2c5bcc8fa38f266675e8f154c2516ba09278b846696d37213ab9d20a5eb42b03db3173ce490a2ef3b2f3b3600579fc63a0e9041059114f9c910adeca12dbba1fef79b2e2c8899f2d7213cd22dfe4310561a047c59da56bb2bf348c9dd2a2e8f5538a92b904b661cfe54a4298b85868bbe4858080", + "0xf85180808080a030263404acfee103d0b1019053ff3240fce433c69b709831673285fa5887ce4c80808080808080a0f8f1fbb1f7b482d9860480feebb83ff54a8b6ec1ead61cc7d2f25d7c01659f9c80808080", + "0xf843a020d332d19b93bcabe3cce7ca0c18a052f57e5fd03b4758a09f30f5ddc4b22ec4a1a0c78009fdf07fc56a11f122370658a353aaa542ed63e44c4bc15ff4cd105ab33c" + ]) + }, + StorageProof { + key: slot_100, + nibbles: Nibbles::unpack(keccak256(slot_100)), + value: U256::ZERO, + proof: convert_to_proof([ + "0xf9019180a0aafd5b14a6edacd149e110ba6776a654f2dbffca340902be933d011113f2750380a0a502c93b1918c4c6534d4593ae03a5a23fa10ebc30ffb7080b297bff2446e42da02eb2bf45fd443bd1df8b6f9c09726a4c6252a0f7896a131a081e39a7f644b38980a0a9cf7f673a0bce76fd40332afe8601542910b48dea44e93933a3e5e930da5d19a0ddf79db0a36d0c8134ba143bcb541cd4795a9a2bae8aca0ba24b8d8963c2a77da0b973ec0f48f710bf79f63688485755cbe87f9d4c68326bb83c26af620802a80ea0f0855349af6bf84afc8bca2eda31c8ef8c5139be1929eeb3da4ba6b68a818cb0a0c271e189aeeb1db5d59d7fe87d7d6327bbe7cfa389619016459196497de3ccdea0e7503ba5799e77aa31bbe1310c312ca17b2c5bcc8fa38f266675e8f154c2516ba09278b846696d37213ab9d20a5eb42b03db3173ce490a2ef3b2f3b3600579fc63a0e9041059114f9c910adeca12dbba1fef79b2e2c8899f2d7213cd22dfe4310561a047c59da56bb2bf348c9dd2a2e8f5538a92b904b661cfe54a4298b85868bbe4858080", + "0xf891a090bacef44b189ddffdc5f22edc70fe298c58e5e523e6e1dfdf7dbc6d657f7d1b80a026eed68746028bc369eb456b7d3ee475aa16f34e5eaa0c98fdedb9c59ebc53b0808080a09ce86197173e14e0633db84ce8eea32c5454eebe954779255644b45b717e8841808080a0328c7afb2c58ef3f8c4117a8ebd336f1a61d24591067ed9c5aae94796cac987d808080808080" + ]) + }, + ]) + }; + + let provider = factory.provider().unwrap(); + let account_proof = + Proof::from_tx(provider.tx_ref()).account_proof(target, &slots).unwrap(); + similar_asserts::assert_eq!(account_proof, expected); + assert_eq!(account_proof.verify(root), Ok(())); } } diff --git a/crates/trie/trie/src/state.rs b/crates/trie/trie/src/state.rs index 65222fdc0a8e..84bfb8fd6f5f 100644 --- a/crates/trie/trie/src/state.rs +++ b/crates/trie/trie/src/state.rs @@ -1,6 +1,9 @@ use crate::{ + hashed_cursor::HashedPostStateCursorFactory, prefix_set::{PrefixSetMut, TriePrefixSetsMut}, - Nibbles, + proof::Proof, + updates::TrieUpdates, + Nibbles, StateRoot, }; use itertools::Itertools; use rayon::prelude::{IntoParallelIterator, ParallelIterator}; @@ -10,9 +13,14 @@ use reth_db_api::{ models::{AccountBeforeTx, BlockNumberAddress}, transaction::DbTx, }; +use reth_execution_errors::StateRootError; use reth_primitives::{keccak256, Account, Address, BlockNumber, B256, U256}; +use reth_trie_common::AccountProof; use revm::db::BundleAccount; -use std::collections::{hash_map, HashMap, HashSet}; +use std::{ + collections::{hash_map, HashMap, HashSet}, + ops::RangeInclusive, +}; /// Representation of in-memory hashed state. #[derive(PartialEq, Eq, Clone, Default, Debug)] @@ -54,13 +62,20 @@ impl HashedPostState { Self { accounts, storages } } - /// Initializes [`HashedPostState`] from reverts. Iterates over state reverts from the specified - /// block up to the current tip and aggregates them into hashed state in reverse. - pub fn from_reverts(tx: &TX, from: BlockNumber) -> Result { + /// Initialize [`HashedPostState`] from revert range. + /// Iterate over state reverts in the specified block range and + /// apply them to hashed state in reverse. + /// + /// NOTE: In order to have the resulting [`HashedPostState`] be a correct + /// overlay of the plain state, the end of the range must be the current tip. + pub fn from_revert_range( + tx: &TX, + range: RangeInclusive, + ) -> Result { // Iterate over account changesets and record value before first occurring account change. let mut accounts = HashMap::>::default(); let mut account_changesets_cursor = tx.cursor_read::()?; - for entry in account_changesets_cursor.walk_range(from..)? { + for entry in account_changesets_cursor.walk_range(range.clone())? { let (_, AccountBeforeTx { address, info }) = entry?; if let hash_map::Entry::Vacant(entry) = accounts.entry(address) { entry.insert(info); @@ -70,9 +85,7 @@ impl HashedPostState { // Iterate over storage changesets and record value before first occurring storage change. let mut storages = HashMap::>::default(); let mut storage_changesets_cursor = tx.cursor_read::()?; - for entry in - storage_changesets_cursor.walk_range(BlockNumberAddress((from, Address::ZERO))..)? - { + for entry in storage_changesets_cursor.walk_range(BlockNumberAddress::range(range))? { let (BlockNumberAddress((_, address)), storage) = entry?; let account_storage = storages.entry(address).or_default(); if let hash_map::Entry::Vacant(entry) = account_storage.entry(storage.key) { @@ -189,6 +202,74 @@ impl HashedPostState { TriePrefixSetsMut { account_prefix_set, storage_prefix_sets, destroyed_accounts } } + + /// Calculate the state root for this [`HashedPostState`]. + /// Internally, this method retrieves prefixsets and uses them + /// to calculate incremental state root. + /// + /// # Example + /// + /// ``` + /// use reth_db::test_utils::create_test_rw_db; + /// use reth_db_api::database::Database; + /// use reth_primitives::{Account, U256}; + /// use reth_trie::HashedPostState; + /// + /// // Initialize the database + /// let db = create_test_rw_db(); + /// + /// // Initialize hashed post state + /// let mut hashed_state = HashedPostState::default(); + /// hashed_state.accounts.insert( + /// [0x11; 32].into(), + /// Some(Account { nonce: 1, balance: U256::from(10), bytecode_hash: None }), + /// ); + /// + /// // Calculate the state root + /// let tx = db.tx().expect("failed to create transaction"); + /// let state_root = hashed_state.state_root(&tx); + /// ``` + /// + /// # Returns + /// + /// The state root for this [`HashedPostState`]. + pub fn state_root(&self, tx: &TX) -> Result { + let sorted = self.clone().into_sorted(); + let prefix_sets = self.construct_prefix_sets().freeze(); + StateRoot::from_tx(tx) + .with_hashed_cursor_factory(HashedPostStateCursorFactory::new(tx, &sorted)) + .with_prefix_sets(prefix_sets) + .root() + } + + /// Calculates the state root for this [`HashedPostState`] and returns it alongside trie + /// updates. See [`Self::state_root`] for more info. + pub fn state_root_with_updates( + &self, + tx: &TX, + ) -> Result<(B256, TrieUpdates), StateRootError> { + let sorted = self.clone().into_sorted(); + let prefix_sets = self.construct_prefix_sets().freeze(); + StateRoot::from_tx(tx) + .with_hashed_cursor_factory(HashedPostStateCursorFactory::new(tx, &sorted)) + .with_prefix_sets(prefix_sets) + .root_with_updates() + } + + /// Generates the state proof for target account and slots on top of this [`HashedPostState`]. + pub fn account_proof( + &self, + tx: &TX, + address: Address, + slots: &[B256], + ) -> Result { + let sorted = self.clone().into_sorted(); + let prefix_sets = self.construct_prefix_sets(); + Proof::from_tx(tx) + .with_hashed_cursor_factory(HashedPostStateCursorFactory::new(tx, &sorted)) + .with_prefix_sets_mut(prefix_sets) + .account_proof(address, slots) + } } /// Representation of in-memory hashed storage. @@ -228,7 +309,7 @@ impl HashedStorage { let mut non_zero_valued_slots = Vec::new(); let mut zero_valued_slots = HashSet::default(); for (hashed_slot, value) in self.storage { - if value.is_zero() { + if value == U256::ZERO { zero_valued_slots.insert(hashed_slot); } else { non_zero_valued_slots.push((hashed_slot, value)); @@ -311,6 +392,13 @@ impl HashedStorageSorted { #[cfg(test)] mod tests { use super::*; + use reth_db::test_utils::create_test_rw_db; + use reth_db_api::database::Database; + use reth_primitives::hex; + use revm::{ + db::states::BundleState, + primitives::{AccountInfo, HashMap}, + }; #[test] fn hashed_state_wiped_extension() { @@ -385,4 +473,34 @@ mod tests { ); assert_eq!(account_storage.map(|st| st.wiped), Some(true)); } + + #[test] + fn from_bundle_state_with_rayon() { + let address1 = Address::with_last_byte(1); + let address2 = Address::with_last_byte(2); + let slot1 = U256::from(1015); + let slot2 = U256::from(2015); + + let account1 = AccountInfo { nonce: 1, ..Default::default() }; + let account2 = AccountInfo { nonce: 2, ..Default::default() }; + + let bundle_state = BundleState::builder(2..=2) + .state_present_account_info(address1, account1) + .state_present_account_info(address2, account2) + .state_storage(address1, HashMap::from([(slot1, (U256::ZERO, U256::from(10)))])) + .state_storage(address2, HashMap::from([(slot2, (U256::ZERO, U256::from(20)))])) + .build(); + assert_eq!(bundle_state.reverts.len(), 1); + + let post_state = HashedPostState::from_bundle_state(&bundle_state.state); + assert_eq!(post_state.accounts.len(), 2); + assert_eq!(post_state.storages.len(), 2); + + let db = create_test_rw_db(); + let tx = db.tx().expect("failed to create transaction"); + assert_eq!( + post_state.state_root(&tx).unwrap(), + hex!("b464525710cafcf5d4044ac85b72c08b1e76231b8d91f288fe438cc41d8eaafd") + ); + } } diff --git a/crates/trie/trie/src/trie.rs b/crates/trie/trie/src/trie.rs index 2b5c6d0b63c7..c444a305638d 100644 --- a/crates/trie/trie/src/trie.rs +++ b/crates/trie/trie/src/trie.rs @@ -1,7 +1,7 @@ use crate::{ hashed_cursor::{HashedCursorFactory, HashedStorageCursor}, node_iter::{TrieElement, TrieNodeIter}, - prefix_set::{PrefixSet, TriePrefixSets}, + prefix_set::{PrefixSet, PrefixSetLoader, TriePrefixSets}, progress::{IntermediateStateRootState, StateRootProgress}, stats::TrieTracker, trie_cursor::TrieCursorFactory, @@ -10,12 +10,14 @@ use crate::{ HashBuilder, Nibbles, TrieAccount, }; use alloy_rlp::{BufMut, Encodable}; +use reth_db_api::transaction::DbTx; use reth_execution_errors::{StateRootError, StorageRootError}; -use reth_primitives::{constants::EMPTY_ROOT_HASH, keccak256, Address, B256}; -use tracing::trace; +use reth_primitives::{constants::EMPTY_ROOT_HASH, keccak256, Address, BlockNumber, B256}; +use std::ops::RangeInclusive; +use tracing::{debug, trace}; #[cfg(feature = "metrics")] -use crate::metrics::{StateRootMetrics, TrieRootMetrics}; +use crate::metrics::{StateRootMetrics, TrieRootMetrics, TrieType}; /// `StateRoot` is used to compute the root node of a state trie. #[derive(Debug)] @@ -36,23 +38,6 @@ pub struct StateRoot { } impl StateRoot { - /// Creates [`StateRoot`] with `trie_cursor_factory` and `hashed_cursor_factory`. All other - /// parameters are set to reasonable defaults. - /// - /// The cursors created by given factories are then used to walk through the accounts and - /// calculate the state root value with. - pub fn new(trie_cursor_factory: T, hashed_cursor_factory: H) -> Self { - Self { - trie_cursor_factory, - hashed_cursor_factory, - prefix_sets: TriePrefixSets::default(), - previous_state: None, - threshold: 100_000, - #[cfg(feature = "metrics")] - metrics: StateRootMetrics::default(), - } - } - /// Set the prefix sets. pub fn with_prefix_sets(mut self, prefix_sets: TriePrefixSets) -> Self { self.prefix_sets = prefix_sets; @@ -104,6 +89,79 @@ impl StateRoot { } } +impl<'a, TX: DbTx> StateRoot<&'a TX, &'a TX> { + /// Create a new [`StateRoot`] instance. + pub fn from_tx(tx: &'a TX) -> Self { + Self { + trie_cursor_factory: tx, + hashed_cursor_factory: tx, + prefix_sets: TriePrefixSets::default(), + previous_state: None, + threshold: 100_000, + #[cfg(feature = "metrics")] + metrics: StateRootMetrics::default(), + } + } + + /// Given a block number range, identifies all the accounts and storage keys that + /// have changed. + /// + /// # Returns + /// + /// An instance of state root calculator with account and storage prefixes loaded. + pub fn incremental_root_calculator( + tx: &'a TX, + range: RangeInclusive, + ) -> Result { + let loaded_prefix_sets = PrefixSetLoader::new(tx).load(range)?; + Ok(Self::from_tx(tx).with_prefix_sets(loaded_prefix_sets)) + } + + /// Computes the state root of the trie with the changed account and storage prefixes and + /// existing trie nodes. + /// + /// # Returns + /// + /// The updated state root. + pub fn incremental_root( + tx: &'a TX, + range: RangeInclusive, + ) -> Result { + debug!(target: "trie::loader", ?range, "incremental state root"); + Self::incremental_root_calculator(tx, range)?.root() + } + + /// Computes the state root of the trie with the changed account and storage prefixes and + /// existing trie nodes collecting updates in the process. + /// + /// Ignores the threshold. + /// + /// # Returns + /// + /// The updated state root and the trie updates. + pub fn incremental_root_with_updates( + tx: &'a TX, + range: RangeInclusive, + ) -> Result<(B256, TrieUpdates), StateRootError> { + debug!(target: "trie::loader", ?range, "incremental state root"); + Self::incremental_root_calculator(tx, range)?.root_with_updates() + } + + /// Computes the state root of the trie with the changed account and storage prefixes and + /// existing trie nodes collecting updates in the process. + /// + /// # Returns + /// + /// The intermediate progress of state root computation. + pub fn incremental_root_with_progress( + tx: &'a TX, + range: RangeInclusive, + ) -> Result { + debug!(target: "trie::loader", ?range, "incremental state root with progress"); + Self::incremental_root_calculator(tx, range)?.root_with_progress() + } +} + impl StateRoot where T: TrieCursorFactory + Clone, @@ -362,6 +420,30 @@ impl StorageRoot { } } +impl<'a, TX: DbTx> StorageRoot<&'a TX, &'a TX> { + /// Create a new storage root calculator from database transaction and raw address. + pub fn from_tx(tx: &'a TX, address: Address) -> Self { + Self::new( + tx, + tx, + address, + #[cfg(feature = "metrics")] + TrieRootMetrics::new(TrieType::Storage), + ) + } + + /// Create a new storage root calculator from database transaction and hashed address. + pub fn from_tx_hashed(tx: &'a TX, hashed_address: B256) -> Self { + Self::new_hashed( + tx, + tx, + hashed_address, + #[cfg(feature = "metrics")] + TrieRootMetrics::new(TrieType::Storage), + ) + } +} + impl StorageRoot where T: TrieCursorFactory, @@ -454,3 +536,780 @@ where Ok((root, storage_slots_walked, trie_updates)) } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + prefix_set::PrefixSetMut, + test_utils::{state_root, state_root_prehashed, storage_root, storage_root_prehashed}, + BranchNodeCompact, TrieMask, + }; + use proptest::{prelude::ProptestConfig, proptest}; + use proptest_arbitrary_interop::arb; + use reth_db::{tables, test_utils::TempDatabase, DatabaseEnv}; + use reth_db_api::{ + cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO}, + transaction::DbTxMut, + }; + use reth_primitives::{hex_literal::hex, Account, StorageEntry, U256}; + use reth_provider::{test_utils::create_test_provider_factory, DatabaseProviderRW}; + use reth_trie_common::triehash::KeccakHasher; + use std::{ + collections::{BTreeMap, HashMap}, + ops::Mul, + str::FromStr, + sync::Arc, + }; + + fn insert_account( + tx: &impl DbTxMut, + address: Address, + account: Account, + storage: &BTreeMap, + ) { + let hashed_address = keccak256(address); + tx.put::(hashed_address, account).unwrap(); + insert_storage(tx, hashed_address, storage); + } + + fn insert_storage(tx: &impl DbTxMut, hashed_address: B256, storage: &BTreeMap) { + for (k, v) in storage { + tx.put::( + hashed_address, + StorageEntry { key: keccak256(k), value: *v }, + ) + .unwrap(); + } + } + + fn incremental_vs_full_root(inputs: &[&str], modified: &str) { + let factory = create_test_provider_factory(); + let tx = factory.provider_rw().unwrap(); + let hashed_address = B256::with_last_byte(1); + + let mut hashed_storage_cursor = + tx.tx_ref().cursor_dup_write::().unwrap(); + let data = inputs.iter().map(|x| B256::from_str(x).unwrap()); + let value = U256::from(0); + for key in data { + hashed_storage_cursor.upsert(hashed_address, StorageEntry { key, value }).unwrap(); + } + + // Generate the intermediate nodes on the receiving end of the channel + let (_, _, trie_updates) = + StorageRoot::from_tx_hashed(tx.tx_ref(), hashed_address).root_with_updates().unwrap(); + + // 1. Some state transition happens, update the hashed storage to the new value + let modified_key = B256::from_str(modified).unwrap(); + let value = U256::from(1); + if hashed_storage_cursor.seek_by_key_subkey(hashed_address, modified_key).unwrap().is_some() + { + hashed_storage_cursor.delete_current().unwrap(); + } + hashed_storage_cursor + .upsert(hashed_address, StorageEntry { key: modified_key, value }) + .unwrap(); + + // 2. Calculate full merkle root + let loader = StorageRoot::from_tx_hashed(tx.tx_ref(), hashed_address); + let modified_root = loader.root().unwrap(); + + // Update the intermediate roots table so that we can run the incremental verification + trie_updates.write_to_database(tx.tx_ref(), hashed_address).unwrap(); + + // 3. Calculate the incremental root + let mut storage_changes = PrefixSetMut::default(); + storage_changes.insert(Nibbles::unpack(modified_key)); + let loader = StorageRoot::from_tx_hashed(tx.tx_ref(), hashed_address) + .with_prefix_set(storage_changes.freeze()); + let incremental_root = loader.root().unwrap(); + + assert_eq!(modified_root, incremental_root); + } + + #[test] + fn branch_node_child_changes() { + incremental_vs_full_root( + &[ + "1000000000000000000000000000000000000000000000000000000000000000", + "1100000000000000000000000000000000000000000000000000000000000000", + "1110000000000000000000000000000000000000000000000000000000000000", + "1200000000000000000000000000000000000000000000000000000000000000", + "1220000000000000000000000000000000000000000000000000000000000000", + "1320000000000000000000000000000000000000000000000000000000000000", + ], + "1200000000000000000000000000000000000000000000000000000000000000", + ); + } + + #[test] + fn arbitrary_storage_root() { + proptest!(ProptestConfig::with_cases(10), |(item in arb::<(Address, std::collections::BTreeMap)>())| { + let (address, storage) = item; + + let hashed_address = keccak256(address); + let factory = create_test_provider_factory(); + let tx = factory.provider_rw().unwrap(); + for (key, value) in &storage { + tx.tx_ref().put::( + hashed_address, + StorageEntry { key: keccak256(key), value: *value }, + ) + .unwrap(); + } + tx.commit().unwrap(); + + let tx = factory.provider_rw().unwrap(); + let got = StorageRoot::from_tx(tx.tx_ref(), address).root().unwrap(); + let expected = storage_root(storage.into_iter()); + assert_eq!(expected, got); + }); + } + + #[test] + // This ensures we dont add empty accounts to the trie + fn test_empty_account() { + let state: State = BTreeMap::from([ + ( + Address::random(), + ( + Account { nonce: 0, balance: U256::from(0), bytecode_hash: None }, + BTreeMap::from([(B256::with_last_byte(0x4), U256::from(12))]), + ), + ), + ( + Address::random(), + ( + Account { nonce: 0, balance: U256::from(0), bytecode_hash: None }, + BTreeMap::default(), + ), + ), + ( + Address::random(), + ( + Account { + nonce: 155, + balance: U256::from(414241124u32), + bytecode_hash: Some(keccak256("test")), + }, + BTreeMap::from([ + (B256::ZERO, U256::from(3)), + (B256::with_last_byte(2), U256::from(1)), + ]), + ), + ), + ]); + test_state_root_with_state(state); + } + + #[test] + // This ensures we return an empty root when there are no storage entries + fn test_empty_storage_root() { + let factory = create_test_provider_factory(); + let tx = factory.provider_rw().unwrap(); + + let address = Address::random(); + let code = "el buen fla"; + let account = Account { + nonce: 155, + balance: U256::from(414241124u32), + bytecode_hash: Some(keccak256(code)), + }; + insert_account(tx.tx_ref(), address, account, &Default::default()); + tx.commit().unwrap(); + + let tx = factory.provider_rw().unwrap(); + let got = StorageRoot::from_tx(tx.tx_ref(), address).root().unwrap(); + assert_eq!(got, EMPTY_ROOT_HASH); + } + + #[test] + // This ensures that the walker goes over all the storage slots + fn test_storage_root() { + let factory = create_test_provider_factory(); + let tx = factory.provider_rw().unwrap(); + + let address = Address::random(); + let storage = + BTreeMap::from([(B256::ZERO, U256::from(3)), (B256::with_last_byte(2), U256::from(1))]); + + let code = "el buen fla"; + let account = Account { + nonce: 155, + balance: U256::from(414241124u32), + bytecode_hash: Some(keccak256(code)), + }; + + insert_account(tx.tx_ref(), address, account, &storage); + tx.commit().unwrap(); + + let tx = factory.provider_rw().unwrap(); + let got = StorageRoot::from_tx(tx.tx_ref(), address).root().unwrap(); + + assert_eq!(storage_root(storage.into_iter()), got); + } + + type State = BTreeMap)>; + + #[test] + fn arbitrary_state_root() { + proptest!( + ProptestConfig::with_cases(10), | (state in arb::()) | { + test_state_root_with_state(state); + } + ); + } + + #[test] + fn arbitrary_state_root_with_progress() { + proptest!( + ProptestConfig::with_cases(10), | (state in arb::()) | { + let hashed_entries_total = state.len() + + state.values().map(|(_, slots)| slots.len()).sum::(); + + let factory = create_test_provider_factory(); + let tx = factory.provider_rw().unwrap(); + + for (address, (account, storage)) in &state { + insert_account(tx.tx_ref(), *address, *account, storage) + } + tx.commit().unwrap(); + let tx = factory.provider_rw().unwrap(); + + let expected = state_root(state); + + let threshold = 10; + let mut got = None; + let mut hashed_entries_walked = 0; + + let mut intermediate_state: Option> = None; + while got.is_none() { + let calculator = StateRoot::from_tx(tx.tx_ref()) + .with_threshold(threshold) + .with_intermediate_state(intermediate_state.take().map(|state| *state)); + match calculator.root_with_progress().unwrap() { + StateRootProgress::Progress(state, walked, _) => { + intermediate_state = Some(state); + hashed_entries_walked += walked; + }, + StateRootProgress::Complete(root, walked, _) => { + got = Some(root); + hashed_entries_walked += walked; + }, + }; + } + assert_eq!(expected, got.unwrap()); + assert_eq!(hashed_entries_total, hashed_entries_walked) + } + ); + } + + fn test_state_root_with_state(state: State) { + let factory = create_test_provider_factory(); + let tx = factory.provider_rw().unwrap(); + + for (address, (account, storage)) in &state { + insert_account(tx.tx_ref(), *address, *account, storage) + } + tx.commit().unwrap(); + let expected = state_root(state); + + let tx = factory.provider_rw().unwrap(); + let got = StateRoot::from_tx(tx.tx_ref()).root().unwrap(); + assert_eq!(expected, got); + } + + fn encode_account(account: Account, storage_root: Option) -> Vec { + let account = TrieAccount::from((account, storage_root.unwrap_or(EMPTY_ROOT_HASH))); + let mut account_rlp = Vec::with_capacity(account.length()); + account.encode(&mut account_rlp); + account_rlp + } + + #[test] + fn storage_root_regression() { + let factory = create_test_provider_factory(); + let tx = factory.provider_rw().unwrap(); + // Some address whose hash starts with 0xB041 + let address3 = Address::from_str("16b07afd1c635f77172e842a000ead9a2a222459").unwrap(); + let key3 = keccak256(address3); + assert_eq!(key3[0], 0xB0); + assert_eq!(key3[1], 0x41); + + let storage = BTreeMap::from( + [ + ("1200000000000000000000000000000000000000000000000000000000000000", 0x42), + ("1400000000000000000000000000000000000000000000000000000000000000", 0x01), + ("3000000000000000000000000000000000000000000000000000000000E00000", 0x127a89), + ("3000000000000000000000000000000000000000000000000000000000E00001", 0x05), + ] + .map(|(slot, val)| (B256::from_str(slot).unwrap(), U256::from(val))), + ); + + let mut hashed_storage_cursor = + tx.tx_ref().cursor_dup_write::().unwrap(); + for (hashed_slot, value) in storage.clone() { + hashed_storage_cursor.upsert(key3, StorageEntry { key: hashed_slot, value }).unwrap(); + } + tx.commit().unwrap(); + let tx = factory.provider_rw().unwrap(); + + let account3_storage_root = StorageRoot::from_tx(tx.tx_ref(), address3).root().unwrap(); + let expected_root = storage_root_prehashed(storage); + assert_eq!(expected_root, account3_storage_root); + } + + #[test] + fn account_and_storage_trie() { + let ether = U256::from(1e18); + let storage = BTreeMap::from( + [ + ("1200000000000000000000000000000000000000000000000000000000000000", 0x42), + ("1400000000000000000000000000000000000000000000000000000000000000", 0x01), + ("3000000000000000000000000000000000000000000000000000000000E00000", 0x127a89), + ("3000000000000000000000000000000000000000000000000000000000E00001", 0x05), + ] + .map(|(slot, val)| (B256::from_str(slot).unwrap(), U256::from(val))), + ); + + let factory = create_test_provider_factory(); + let tx = factory.provider_rw().unwrap(); + + let mut hashed_account_cursor = + tx.tx_ref().cursor_write::().unwrap(); + let mut hashed_storage_cursor = + tx.tx_ref().cursor_dup_write::().unwrap(); + + let mut hash_builder = HashBuilder::default(); + + // Insert first account + let key1 = + B256::from_str("b000000000000000000000000000000000000000000000000000000000000000") + .unwrap(); + let account1 = Account { nonce: 0, balance: U256::from(3).mul(ether), bytecode_hash: None }; + hashed_account_cursor.upsert(key1, account1).unwrap(); + hash_builder.add_leaf(Nibbles::unpack(key1), &encode_account(account1, None)); + + // Some address whose hash starts with 0xB040 + let address2 = Address::from_str("7db3e81b72d2695e19764583f6d219dbee0f35ca").unwrap(); + let key2 = keccak256(address2); + assert_eq!(key2[0], 0xB0); + assert_eq!(key2[1], 0x40); + let account2 = Account { nonce: 0, balance: ether, ..Default::default() }; + hashed_account_cursor.upsert(key2, account2).unwrap(); + hash_builder.add_leaf(Nibbles::unpack(key2), &encode_account(account2, None)); + + // Some address whose hash starts with 0xB041 + let address3 = Address::from_str("16b07afd1c635f77172e842a000ead9a2a222459").unwrap(); + let key3 = keccak256(address3); + assert_eq!(key3[0], 0xB0); + assert_eq!(key3[1], 0x41); + let code_hash = + B256::from_str("5be74cad16203c4905c068b012a2e9fb6d19d036c410f16fd177f337541440dd") + .unwrap(); + let account3 = + Account { nonce: 0, balance: U256::from(2).mul(ether), bytecode_hash: Some(code_hash) }; + hashed_account_cursor.upsert(key3, account3).unwrap(); + for (hashed_slot, value) in storage { + if hashed_storage_cursor + .seek_by_key_subkey(key3, hashed_slot) + .unwrap() + .filter(|e| e.key == hashed_slot) + .is_some() + { + hashed_storage_cursor.delete_current().unwrap(); + } + hashed_storage_cursor.upsert(key3, StorageEntry { key: hashed_slot, value }).unwrap(); + } + let account3_storage_root = StorageRoot::from_tx(tx.tx_ref(), address3).root().unwrap(); + hash_builder.add_leaf( + Nibbles::unpack(key3), + &encode_account(account3, Some(account3_storage_root)), + ); + + let key4a = + B256::from_str("B1A0000000000000000000000000000000000000000000000000000000000000") + .unwrap(); + let account4a = + Account { nonce: 0, balance: U256::from(4).mul(ether), ..Default::default() }; + hashed_account_cursor.upsert(key4a, account4a).unwrap(); + hash_builder.add_leaf(Nibbles::unpack(key4a), &encode_account(account4a, None)); + + let key5 = + B256::from_str("B310000000000000000000000000000000000000000000000000000000000000") + .unwrap(); + let account5 = + Account { nonce: 0, balance: U256::from(8).mul(ether), ..Default::default() }; + hashed_account_cursor.upsert(key5, account5).unwrap(); + hash_builder.add_leaf(Nibbles::unpack(key5), &encode_account(account5, None)); + + let key6 = + B256::from_str("B340000000000000000000000000000000000000000000000000000000000000") + .unwrap(); + let account6 = + Account { nonce: 0, balance: U256::from(1).mul(ether), ..Default::default() }; + hashed_account_cursor.upsert(key6, account6).unwrap(); + hash_builder.add_leaf(Nibbles::unpack(key6), &encode_account(account6, None)); + + // Populate account & storage trie DB tables + let expected_root = + B256::from_str("72861041bc90cd2f93777956f058a545412b56de79af5eb6b8075fe2eabbe015") + .unwrap(); + let computed_expected_root: B256 = triehash::trie_root::([ + (key1, encode_account(account1, None)), + (key2, encode_account(account2, None)), + (key3, encode_account(account3, Some(account3_storage_root))), + (key4a, encode_account(account4a, None)), + (key5, encode_account(account5, None)), + (key6, encode_account(account6, None)), + ]); + // Check computed trie root to ensure correctness + assert_eq!(computed_expected_root, expected_root); + + // Check hash builder root + assert_eq!(hash_builder.root(), computed_expected_root); + + // Check state root calculation from scratch + let (root, trie_updates) = StateRoot::from_tx(tx.tx_ref()).root_with_updates().unwrap(); + assert_eq!(root, computed_expected_root); + + // Check account trie + let account_updates = trie_updates.clone().into_sorted().account_nodes; + assert_eq!(account_updates.len(), 2); + + let (nibbles1a, node1a) = account_updates.first().unwrap(); + assert_eq!(nibbles1a[..], [0xB]); + assert_eq!(node1a.state_mask, TrieMask::new(0b1011)); + assert_eq!(node1a.tree_mask, TrieMask::new(0b0001)); + assert_eq!(node1a.hash_mask, TrieMask::new(0b1001)); + assert_eq!(node1a.root_hash, None); + assert_eq!(node1a.hashes.len(), 2); + + let (nibbles2a, node2a) = account_updates.last().unwrap(); + assert_eq!(nibbles2a[..], [0xB, 0x0]); + assert_eq!(node2a.state_mask, TrieMask::new(0b10001)); + assert_eq!(node2a.tree_mask, TrieMask::new(0b00000)); + assert_eq!(node2a.hash_mask, TrieMask::new(0b10000)); + assert_eq!(node2a.root_hash, None); + assert_eq!(node2a.hashes.len(), 1); + + // Check storage trie + let mut updated_storage_trie = + trie_updates.storage_tries.iter().filter(|(_, u)| !u.storage_nodes.is_empty()); + assert_eq!(updated_storage_trie.clone().count(), 1); + let (_, storage_trie_updates) = updated_storage_trie.next().unwrap(); + assert_eq!(storage_trie_updates.storage_nodes.len(), 1); + + let (nibbles3, node3) = storage_trie_updates.storage_nodes.iter().next().unwrap(); + assert!(nibbles3.is_empty()); + assert_eq!(node3.state_mask, TrieMask::new(0b1010)); + assert_eq!(node3.tree_mask, TrieMask::new(0b0000)); + assert_eq!(node3.hash_mask, TrieMask::new(0b0010)); + + assert_eq!(node3.hashes.len(), 1); + assert_eq!(node3.root_hash, Some(account3_storage_root)); + + // Add an account + // Some address whose hash starts with 0xB1 + let address4b = Address::from_str("4f61f2d5ebd991b85aa1677db97307caf5215c91").unwrap(); + let key4b = keccak256(address4b); + assert_eq!(key4b.0[0], key4a.0[0]); + let account4b = + Account { nonce: 0, balance: U256::from(5).mul(ether), bytecode_hash: None }; + hashed_account_cursor.upsert(key4b, account4b).unwrap(); + + let mut prefix_set = PrefixSetMut::default(); + prefix_set.insert(Nibbles::unpack(key4b)); + + let expected_state_root = + B256::from_str("8e263cd4eefb0c3cbbb14e5541a66a755cad25bcfab1e10dd9d706263e811b28") + .unwrap(); + + let (root, trie_updates) = StateRoot::from_tx(tx.tx_ref()) + .with_prefix_sets(TriePrefixSets { + account_prefix_set: prefix_set.freeze(), + ..Default::default() + }) + .root_with_updates() + .unwrap(); + assert_eq!(root, expected_state_root); + + let account_updates = trie_updates.into_sorted().account_nodes; + assert_eq!(account_updates.len(), 2); + + let (nibbles1b, node1b) = account_updates.first().unwrap(); + assert_eq!(nibbles1b[..], [0xB]); + assert_eq!(node1b.state_mask, TrieMask::new(0b1011)); + assert_eq!(node1b.tree_mask, TrieMask::new(0b0001)); + assert_eq!(node1b.hash_mask, TrieMask::new(0b1011)); + assert_eq!(node1b.root_hash, None); + assert_eq!(node1b.hashes.len(), 3); + assert_eq!(node1a.hashes[0], node1b.hashes[0]); + assert_eq!(node1a.hashes[1], node1b.hashes[2]); + + let (nibbles2b, node2b) = account_updates.last().unwrap(); + assert_eq!(nibbles2b[..], [0xB, 0x0]); + assert_eq!(node2a, node2b); + tx.commit().unwrap(); + + { + let tx = factory.provider_rw().unwrap(); + let mut hashed_account_cursor = + tx.tx_ref().cursor_write::().unwrap(); + + let account = hashed_account_cursor.seek_exact(key2).unwrap().unwrap(); + hashed_account_cursor.delete_current().unwrap(); + + let mut account_prefix_set = PrefixSetMut::default(); + account_prefix_set.insert(Nibbles::unpack(account.0)); + + let computed_expected_root: B256 = triehash::trie_root::([ + (key1, encode_account(account1, None)), + // DELETED: (key2, encode_account(account2, None)), + (key3, encode_account(account3, Some(account3_storage_root))), + (key4a, encode_account(account4a, None)), + (key4b, encode_account(account4b, None)), + (key5, encode_account(account5, None)), + (key6, encode_account(account6, None)), + ]); + + let (root, trie_updates) = StateRoot::from_tx(tx.tx_ref()) + .with_prefix_sets(TriePrefixSets { + account_prefix_set: account_prefix_set.freeze(), + ..Default::default() + }) + .root_with_updates() + .unwrap(); + assert_eq!(root, computed_expected_root); + assert_eq!(trie_updates.account_nodes.len() + trie_updates.removed_nodes.len(), 1); + + assert_eq!(trie_updates.account_nodes.len(), 1); + + let (nibbles1c, node1c) = trie_updates.account_nodes.iter().next().unwrap(); + assert_eq!(nibbles1c[..], [0xB]); + + assert_eq!(node1c.state_mask, TrieMask::new(0b1011)); + assert_eq!(node1c.tree_mask, TrieMask::new(0b0000)); + assert_eq!(node1c.hash_mask, TrieMask::new(0b1011)); + + assert_eq!(node1c.root_hash, None); + + assert_eq!(node1c.hashes.len(), 3); + assert_ne!(node1c.hashes[0], node1b.hashes[0]); + assert_eq!(node1c.hashes[1], node1b.hashes[1]); + assert_eq!(node1c.hashes[2], node1b.hashes[2]); + } + + { + let tx = factory.provider_rw().unwrap(); + let mut hashed_account_cursor = + tx.tx_ref().cursor_write::().unwrap(); + + let account2 = hashed_account_cursor.seek_exact(key2).unwrap().unwrap(); + hashed_account_cursor.delete_current().unwrap(); + let account3 = hashed_account_cursor.seek_exact(key3).unwrap().unwrap(); + hashed_account_cursor.delete_current().unwrap(); + + let mut account_prefix_set = PrefixSetMut::default(); + account_prefix_set.insert(Nibbles::unpack(account2.0)); + account_prefix_set.insert(Nibbles::unpack(account3.0)); + + let computed_expected_root: B256 = triehash::trie_root::([ + (key1, encode_account(account1, None)), + // DELETED: (key2, encode_account(account2, None)), + // DELETED: (key3, encode_account(account3, Some(account3_storage_root))), + (key4a, encode_account(account4a, None)), + (key4b, encode_account(account4b, None)), + (key5, encode_account(account5, None)), + (key6, encode_account(account6, None)), + ]); + + let (root, trie_updates) = StateRoot::from_tx(tx.tx_ref()) + .with_prefix_sets(TriePrefixSets { + account_prefix_set: account_prefix_set.freeze(), + ..Default::default() + }) + .root_with_updates() + .unwrap(); + assert_eq!(root, computed_expected_root); + assert_eq!(trie_updates.account_nodes.len() + trie_updates.removed_nodes.len(), 1); + assert!(!trie_updates + .storage_tries + .iter() + .any(|(_, u)| !u.storage_nodes.is_empty() || !u.removed_nodes.is_empty())); // no storage root update + + assert_eq!(trie_updates.account_nodes.len(), 1); + + let (nibbles1d, node1d) = trie_updates.account_nodes.iter().next().unwrap(); + assert_eq!(nibbles1d[..], [0xB]); + + assert_eq!(node1d.state_mask, TrieMask::new(0b1011)); + assert_eq!(node1d.tree_mask, TrieMask::new(0b0000)); + assert_eq!(node1d.hash_mask, TrieMask::new(0b1010)); + + assert_eq!(node1d.root_hash, None); + + assert_eq!(node1d.hashes.len(), 2); + assert_eq!(node1d.hashes[0], node1b.hashes[1]); + assert_eq!(node1d.hashes[1], node1b.hashes[2]); + } + } + + #[test] + fn account_trie_around_extension_node() { + let factory = create_test_provider_factory(); + let tx = factory.provider_rw().unwrap(); + + let expected = extension_node_trie(&tx); + + let (got, updates) = StateRoot::from_tx(tx.tx_ref()).root_with_updates().unwrap(); + assert_eq!(expected, got); + assert_trie_updates(&updates.account_nodes); + } + + #[test] + fn account_trie_around_extension_node_with_dbtrie() { + let factory = create_test_provider_factory(); + let tx = factory.provider_rw().unwrap(); + + let expected = extension_node_trie(&tx); + + let (got, updates) = StateRoot::from_tx(tx.tx_ref()).root_with_updates().unwrap(); + assert_eq!(expected, got); + updates.write_to_database(tx.tx_ref()).unwrap(); + + // read the account updates from the db + let mut accounts_trie = tx.tx_ref().cursor_read::().unwrap(); + let walker = accounts_trie.walk(None).unwrap(); + let account_updates = walker + .into_iter() + .map(|item| { + let (key, node) = item.unwrap(); + (key.0, node) + }) + .collect(); + assert_trie_updates(&account_updates); + } + + proptest! { + #![proptest_config(ProptestConfig { + cases: 128, ..ProptestConfig::default() + })] + + #[test] + fn fuzz_state_root_incremental(account_changes: [BTreeMap; 5]) { + let factory = create_test_provider_factory(); + let tx = factory.provider_rw().unwrap(); + let mut hashed_account_cursor = tx.tx_ref().cursor_write::().unwrap(); + + let mut state = BTreeMap::default(); + for accounts in account_changes { + let should_generate_changeset = !state.is_empty(); + let mut changes = PrefixSetMut::default(); + for (hashed_address, balance) in accounts.clone() { + hashed_account_cursor.upsert(hashed_address, Account { balance, ..Default::default() }).unwrap(); + if should_generate_changeset { + changes.insert(Nibbles::unpack(hashed_address)); + } + } + + let (state_root, trie_updates) = StateRoot::from_tx(tx.tx_ref()) + .with_prefix_sets(TriePrefixSets { account_prefix_set: changes.freeze(), ..Default::default() }) + .root_with_updates() + .unwrap(); + + state.append(&mut accounts.clone()); + let expected_root = state_root_prehashed( + state.iter().map(|(&key, &balance)| (key, (Account { balance, ..Default::default() }, std::iter::empty()))) + ); + assert_eq!(expected_root, state_root); + trie_updates.write_to_database(tx.tx_ref()).unwrap(); + } + } + } + + #[test] + fn storage_trie_around_extension_node() { + let factory = create_test_provider_factory(); + let tx = factory.provider_rw().unwrap(); + + let hashed_address = B256::random(); + let (expected_root, expected_updates) = extension_node_storage_trie(&tx, hashed_address); + + let (got, _, updates) = + StorageRoot::from_tx_hashed(tx.tx_ref(), hashed_address).root_with_updates().unwrap(); + assert_eq!(expected_root, got); + assert_eq!(expected_updates, updates); + assert_trie_updates(&updates.storage_nodes); + } + + fn extension_node_storage_trie( + tx: &DatabaseProviderRW>>, + hashed_address: B256, + ) -> (B256, StorageTrieUpdates) { + let value = U256::from(1); + + let mut hashed_storage = tx.tx_ref().cursor_write::().unwrap(); + + let mut hb = HashBuilder::default().with_updates(true); + + for key in [ + hex!("30af561000000000000000000000000000000000000000000000000000000000"), + hex!("30af569000000000000000000000000000000000000000000000000000000000"), + hex!("30af650000000000000000000000000000000000000000000000000000000000"), + hex!("30af6f0000000000000000000000000000000000000000000000000000000000"), + hex!("30af8f0000000000000000000000000000000000000000000000000000000000"), + hex!("3100000000000000000000000000000000000000000000000000000000000000"), + ] { + hashed_storage + .upsert(hashed_address, StorageEntry { key: B256::new(key), value }) + .unwrap(); + hb.add_leaf(Nibbles::unpack(key), &alloy_rlp::encode_fixed_size(&value)); + } + + let root = hb.root(); + let (_, updates) = hb.split(); + let trie_updates = StorageTrieUpdates { storage_nodes: updates, ..Default::default() }; + (root, trie_updates) + } + + fn extension_node_trie(tx: &DatabaseProviderRW>>) -> B256 { + let a = + Account { nonce: 0, balance: U256::from(1u64), bytecode_hash: Some(B256::random()) }; + let val = encode_account(a, None); + + let mut hashed_accounts = tx.tx_ref().cursor_write::().unwrap(); + let mut hb = HashBuilder::default(); + + for key in [ + hex!("30af561000000000000000000000000000000000000000000000000000000000"), + hex!("30af569000000000000000000000000000000000000000000000000000000000"), + hex!("30af650000000000000000000000000000000000000000000000000000000000"), + hex!("30af6f0000000000000000000000000000000000000000000000000000000000"), + hex!("30af8f0000000000000000000000000000000000000000000000000000000000"), + hex!("3100000000000000000000000000000000000000000000000000000000000000"), + ] { + hashed_accounts.upsert(B256::new(key), a).unwrap(); + hb.add_leaf(Nibbles::unpack(key), &val); + } + + hb.root() + } + + fn assert_trie_updates(account_updates: &HashMap) { + assert_eq!(account_updates.len(), 2); + + let node = account_updates.get(&[0x3][..]).unwrap(); + let expected = BranchNodeCompact::new(0b0011, 0b0001, 0b0000, vec![], None); + assert_eq!(node, &expected); + + let node = account_updates.get(&[0x3, 0x0, 0xA, 0xF][..]).unwrap(); + assert_eq!(node.state_mask, TrieMask::new(0b101100000)); + assert_eq!(node.tree_mask, TrieMask::new(0b000000000)); + assert_eq!(node.hash_mask, TrieMask::new(0b001000000)); + + assert_eq!(node.root_hash, None); + assert_eq!(node.hashes.len(), 1); + } +} diff --git a/crates/trie/trie/src/trie_cursor/database_cursors.rs b/crates/trie/trie/src/trie_cursor/database_cursors.rs index 7149c53c0e5a..4c9e5e6a73b2 100644 --- a/crates/trie/trie/src/trie_cursor/database_cursors.rs +++ b/crates/trie/trie/src/trie_cursor/database_cursors.rs @@ -1,17 +1,11 @@ use super::{TrieCursor, TrieCursorFactory}; -use crate::{ - updates::StorageTrieUpdates, BranchNodeCompact, Nibbles, StoredNibbles, StoredNibblesSubKey, -}; -use reth_db::{ - cursor::{DbCursorRW, DbDupCursorRW}, - tables, DatabaseError, -}; +use crate::{BranchNodeCompact, Nibbles, StoredNibbles, StoredNibblesSubKey}; +use reth_db::{tables, DatabaseError}; use reth_db_api::{ cursor::{DbCursorRO, DbDupCursorRO}, transaction::DbTx, }; use reth_primitives::B256; -use reth_trie_common::StorageTrieEntry; /// Implementation of the trie cursor factory for a database transaction. impl<'a, TX: DbTx> TrieCursorFactory for &'a TX { @@ -92,62 +86,6 @@ impl DatabaseStorageTrieCursor { } } -impl DatabaseStorageTrieCursor -where - C: DbCursorRO - + DbCursorRW - + DbDupCursorRO - + DbDupCursorRW, -{ - /// Writes storage updates - pub fn write_storage_trie_updates( - &mut self, - updates: &StorageTrieUpdates, - ) -> Result { - // The storage trie for this account has to be deleted. - if updates.is_deleted && self.cursor.seek_exact(self.hashed_address)?.is_some() { - self.cursor.delete_current_duplicates()?; - } - - // Merge updated and removed nodes. Updated nodes must take precedence. - let mut storage_updates = updates - .removed_nodes - .iter() - .filter_map(|n| (!updates.storage_nodes.contains_key(n)).then_some((n, None))) - .collect::>(); - storage_updates - .extend(updates.storage_nodes.iter().map(|(nibbles, node)| (nibbles, Some(node)))); - - // Sort trie node updates. - storage_updates.sort_unstable_by(|a, b| a.0.cmp(b.0)); - - let mut num_entries = 0; - for (nibbles, maybe_updated) in storage_updates.into_iter().filter(|(n, _)| !n.is_empty()) { - num_entries += 1; - let nibbles = StoredNibblesSubKey(nibbles.clone()); - // Delete the old entry if it exists. - if self - .cursor - .seek_by_key_subkey(self.hashed_address, nibbles.clone())? - .filter(|e| e.nibbles == nibbles) - .is_some() - { - self.cursor.delete_current()?; - } - - // There is an updated version of this node, insert new entry. - if let Some(node) = maybe_updated { - self.cursor.upsert( - self.hashed_address, - StorageTrieEntry { nibbles, node: node.clone() }, - )?; - } - } - - Ok(num_entries) - } -} - impl TrieCursor for DatabaseStorageTrieCursor where C: DbCursorRO + DbDupCursorRO + Send + Sync, diff --git a/crates/trie/trie/src/trie_cursor/in_memory.rs b/crates/trie/trie/src/trie_cursor/in_memory.rs index d02940e20181..c74ee0eaf3b3 100644 --- a/crates/trie/trie/src/trie_cursor/in_memory.rs +++ b/crates/trie/trie/src/trie_cursor/in_memory.rs @@ -296,3 +296,65 @@ fn compare_trie_node_entries( db_item.or(in_memory_item) } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + prefix_set::{PrefixSetMut, TriePrefixSets}, + test_utils::state_root_prehashed, + StateRoot, + }; + use proptest::prelude::*; + use reth_db::{cursor::DbCursorRW, tables, transaction::DbTxMut}; + use reth_primitives::{Account, U256}; + use reth_provider::test_utils::create_test_provider_factory; + use std::collections::BTreeMap; + + proptest! { + #![proptest_config(ProptestConfig { + cases: 128, ..ProptestConfig::default() + })] + + #[test] + fn fuzz_in_memory_nodes(mut init_state: BTreeMap, mut updated_state: BTreeMap) { + let factory = create_test_provider_factory(); + let provider = factory.provider_rw().unwrap(); + let mut hashed_account_cursor = provider.tx_ref().cursor_write::().unwrap(); + + // Insert init state into database + for (hashed_address, balance) in init_state.clone() { + hashed_account_cursor.upsert(hashed_address, Account { balance, ..Default::default() }).unwrap(); + } + + // Compute initial root and updates + let (_, trie_updates) = StateRoot::from_tx(provider.tx_ref()) + .root_with_updates() + .unwrap(); + + // Insert state updates into database + let mut changes = PrefixSetMut::default(); + for (hashed_address, balance) in updated_state.clone() { + hashed_account_cursor.upsert(hashed_address, Account { balance, ..Default::default() }).unwrap(); + changes.insert(Nibbles::unpack(hashed_address)); + } + + // Compute root with in-memory trie nodes overlay + let (state_root, _) = StateRoot::from_tx(provider.tx_ref()) + .with_prefix_sets(TriePrefixSets { account_prefix_set: changes.freeze(), ..Default::default() }) + .with_trie_cursor_factory(InMemoryTrieCursorFactory::new(provider.tx_ref(), &trie_updates.into_sorted())) + .root_with_updates() + .unwrap(); + + // Verify the result + let mut state = BTreeMap::default(); + state.append(&mut init_state); + state.append(&mut updated_state); + let expected_root = state_root_prehashed( + state.iter().map(|(&key, &balance)| (key, (Account { balance, ..Default::default() }, std::iter::empty()))) + ); + assert_eq!(expected_root, state_root); + + } + } +} diff --git a/crates/trie/trie/src/updates.rs b/crates/trie/trie/src/updates.rs index 2d35dbf4809f..a1f7767e6806 100644 --- a/crates/trie/trie/src/updates.rs +++ b/crates/trie/trie/src/updates.rs @@ -1,4 +1,12 @@ -use crate::{walker::TrieWalker, BranchNodeCompact, HashBuilder, Nibbles}; +use crate::{ + walker::TrieWalker, BranchNodeCompact, HashBuilder, Nibbles, StorageTrieEntry, StoredNibbles, + StoredNibblesSubKey, +}; +use reth_db::tables; +use reth_db_api::{ + cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW}, + transaction::{DbTx, DbTxMut}, +}; use reth_primitives::B256; use std::collections::{HashMap, HashSet}; @@ -76,6 +84,64 @@ impl TrieUpdates { .collect(); TrieUpdatesSorted { removed_nodes: self.removed_nodes, account_nodes, storage_tries } } + + /// Flush updates all aggregated updates to the database. + /// + /// # Returns + /// + /// The number of storage trie entries updated in the database. + pub fn write_to_database(self, tx: &TX) -> Result + where + TX: DbTx + DbTxMut, + { + if self.is_empty() { + return Ok(0) + } + + // Track the number of inserted entries. + let mut num_entries = 0; + + // Merge updated and removed nodes. Updated nodes must take precedence. + let mut account_updates = self + .removed_nodes + .into_iter() + .filter_map(|n| (!self.account_nodes.contains_key(&n)).then_some((n, None))) + .collect::>(); + account_updates + .extend(self.account_nodes.into_iter().map(|(nibbles, node)| (nibbles, Some(node)))); + // Sort trie node updates. + account_updates.sort_unstable_by(|a, b| a.0.cmp(&b.0)); + + let mut account_trie_cursor = tx.cursor_write::()?; + for (key, updated_node) in account_updates { + let nibbles = StoredNibbles(key); + match updated_node { + Some(node) => { + if !nibbles.0.is_empty() { + num_entries += 1; + account_trie_cursor.upsert(nibbles, node)?; + } + } + None => { + num_entries += 1; + if account_trie_cursor.seek_exact(nibbles)?.is_some() { + account_trie_cursor.delete_current()?; + } + } + } + } + + let mut storage_tries = Vec::from_iter(self.storage_tries); + storage_tries.sort_unstable_by(|a, b| a.0.cmp(&b.0)); + let mut storage_trie_cursor = tx.cursor_dup_write::()?; + for (hashed_address, storage_trie_updates) in storage_tries { + let updated_storage_entries = + storage_trie_updates.write_with_cursor(&mut storage_trie_cursor, hashed_address)?; + num_entries += updated_storage_entries; + } + + Ok(num_entries) + } } /// Trie updates for storage trie of a single account. @@ -90,14 +156,6 @@ pub struct StorageTrieUpdates { pub(crate) removed_nodes: HashSet, } -#[cfg(feature = "test-utils")] -impl StorageTrieUpdates { - /// Creates a new storage trie updates that are not marked as deleted. - pub fn new(updates: HashMap) -> Self { - Self { storage_nodes: updates, ..Default::default() } - } -} - impl StorageTrieUpdates { /// Returns empty storage trie updates with `deleted` set to `true`. pub fn deleted() -> Self { @@ -159,6 +217,77 @@ impl StorageTrieUpdates { storage_nodes, } } + + /// Initializes a storage trie cursor and writes updates to database. + pub fn write_to_database( + self, + tx: &TX, + hashed_address: B256, + ) -> Result + where + TX: DbTx + DbTxMut, + { + if self.is_empty() { + return Ok(0) + } + + let mut cursor = tx.cursor_dup_write::()?; + self.write_with_cursor(&mut cursor, hashed_address) + } + + /// Writes updates to database. + /// + /// # Returns + /// + /// The number of storage trie entries updated in the database. + fn write_with_cursor( + self, + cursor: &mut C, + hashed_address: B256, + ) -> Result + where + C: DbCursorRO + + DbCursorRW + + DbDupCursorRO + + DbDupCursorRW, + { + // The storage trie for this account has to be deleted. + if self.is_deleted && cursor.seek_exact(hashed_address)?.is_some() { + cursor.delete_current_duplicates()?; + } + + // Merge updated and removed nodes. Updated nodes must take precedence. + let mut storage_updates = self + .removed_nodes + .into_iter() + .filter_map(|n| (!self.storage_nodes.contains_key(&n)).then_some((n, None))) + .collect::>(); + storage_updates + .extend(self.storage_nodes.into_iter().map(|(nibbles, node)| (nibbles, Some(node)))); + // Sort trie node updates. + storage_updates.sort_unstable_by(|a, b| a.0.cmp(&b.0)); + + let mut num_entries = 0; + for (nibbles, maybe_updated) in storage_updates.into_iter().filter(|(n, _)| !n.is_empty()) { + num_entries += 1; + let nibbles = StoredNibblesSubKey(nibbles); + // Delete the old entry if it exists. + if cursor + .seek_by_key_subkey(hashed_address, nibbles.clone())? + .filter(|e| e.nibbles == nibbles) + .is_some() + { + cursor.delete_current()?; + } + + // There is an updated version of this node, insert new entry. + if let Some(node) = maybe_updated { + cursor.upsert(hashed_address, StorageTrieEntry { nibbles, node })?; + } + } + + Ok(num_entries) + } } /// Sorted trie updates used for lookups and insertions. diff --git a/docs/crates/network.md b/docs/crates/network.md index a6ac24305658..9e381877f606 100644 --- a/docs/crates/network.md +++ b/docs/crates/network.md @@ -649,7 +649,7 @@ fn on_bodies_request( ## Transactions Task The transactions task listens for, requests, and propagates transactions both from the node's peers, and those that are added locally (e.g., submitted via RPC). Note that this task focuses solely on the network communication involved with Ethereum transactions, we will talk more about the structure of the transaction pool itself -in the [transaction-pool](https://reth.rs/docs/reth_transaction_pool/index.html) chapter. +in the [transaction-pool](../../../ethereum/transaction-pool/README.md) chapter. Again, like the network management and ETH requests tasks, the transactions task is implemented as an endless future that runs as a background task on a standalone `tokio::task`. It's represented by the `TransactionsManager` struct: diff --git a/examples/beacon-api-sidecar-fetcher/src/main.rs b/examples/beacon-api-sidecar-fetcher/src/main.rs index c31153be135c..f3c7a843a396 100644 --- a/examples/beacon-api-sidecar-fetcher/src/main.rs +++ b/examples/beacon-api-sidecar-fetcher/src/main.rs @@ -28,7 +28,7 @@ pub mod mined_sidecar; fn main() { Cli::::parse() - .run(|builder, beacon_config| async move { + .run(|builder, args| async move { // launch the node let NodeHandle { node, node_exit_future } = builder.node(EthereumNode::default()).launch().await?; @@ -38,30 +38,27 @@ fn main() { let pool = node.pool.clone(); - node.task_executor.spawn(async move { - let mut sidecar_stream = MinedSidecarStream { - events: notifications, - pool, - beacon_config, - client: reqwest::Client::new(), - pending_requests: FuturesUnordered::new(), - queued_actions: VecDeque::new(), - }; + let mut sidecar_stream = MinedSidecarStream { + events: notifications, + pool, + beacon_config: args, + client: reqwest::Client::new(), + pending_requests: FuturesUnordered::new(), + queued_actions: VecDeque::new(), + }; - while let Some(result) = sidecar_stream.next().await { - match result { - Ok(blob_transaction) => { - // Handle successful transaction - println!("Processed BlobTransaction: {:?}", blob_transaction); - } - Err(e) => { - // Handle errors specifically - eprintln!("Failed to process transaction: {:?}", e); - } + while let Some(result) = sidecar_stream.next().await { + match result { + Ok(blob_transaction) => { + // Handle successful transaction + println!("Processed BlobTransaction: {:?}", blob_transaction); + } + Err(e) => { + // Handle errors specifically + eprintln!("Failed to process transaction: {:?}", e); } } - }); - + } node_exit_future.await }) .unwrap(); diff --git a/examples/custom-evm/src/main.rs b/examples/custom-evm/src/main.rs index 4c2f3b712a14..6e58acdca169 100644 --- a/examples/custom-evm/src/main.rs +++ b/examples/custom-evm/src/main.rs @@ -109,7 +109,7 @@ impl ConfigureEvmEnv for MyEvmConfig { impl ConfigureEvm for MyEvmConfig { type DefaultExternalContext<'a> = (); - fn evm(&self, db: DB) -> Evm<'_, Self::DefaultExternalContext<'_>, DB> { + fn evm<'a, DB: Database + 'a>(&self, db: DB) -> Evm<'a, Self::DefaultExternalContext<'a>, DB> { EvmBuilder::default() .with_db(db) // add additional precompiles @@ -117,9 +117,9 @@ impl ConfigureEvm for MyEvmConfig { .build() } - fn evm_with_inspector(&self, db: DB, inspector: I) -> Evm<'_, I, DB> + fn evm_with_inspector<'a, DB, I>(&self, db: DB, inspector: I) -> Evm<'a, I, DB> where - DB: Database, + DB: Database + 'a, I: GetInspector, { EvmBuilder::default() diff --git a/examples/db-access/src/main.rs b/examples/db-access/src/main.rs index 4c36c3348184..27047fd3f8ec 100644 --- a/examples/db-access/src/main.rs +++ b/examples/db-access/src/main.rs @@ -1,4 +1,5 @@ use reth_chainspec::ChainSpecBuilder; +use reth_db::open_db_read_only; use reth_primitives::{Address, B256}; use reth_provider::{ providers::StaticFileProvider, AccountReader, BlockReader, BlockSource, HeaderProvider, @@ -15,18 +16,20 @@ use std::path::Path; // Other parts of the code which include caching are parts of the `EthApi` abstraction. fn main() -> eyre::Result<()> { // Opens a RO handle to the database file. + // TODO: Should be able to do `ProviderFactory::new_with_db_path_ro(...)` instead of + // doing in 2 steps. let db_path = std::env::var("RETH_DB_PATH")?; let db_path = Path::new(&db_path); + let db = open_db_read_only(db_path.join("db").as_path(), Default::default())?; // Instantiate a provider factory for Ethereum mainnet using the provided DB. // TODO: Should the DB version include the spec so that you do not need to specify it here? let spec = ChainSpecBuilder::mainnet().build(); - let factory = ProviderFactory::new_with_database_path( - db_path, + let factory = ProviderFactory::new( + db, spec.into(), - Default::default(), StaticFileProvider::read_only(db_path.join("static_files"))?, - )?; + ); // This call opens a RO transaction on the database. To write to the DB you'd need to call // the `provider_rw` function and look for the `Writer` variants of the traits. diff --git a/examples/stateful-precompile/src/main.rs b/examples/stateful-precompile/src/main.rs index dd598a801212..d73e613d66a9 100644 --- a/examples/stateful-precompile/src/main.rs +++ b/examples/stateful-precompile/src/main.rs @@ -166,7 +166,7 @@ impl ConfigureEvmEnv for MyEvmConfig { impl ConfigureEvm for MyEvmConfig { type DefaultExternalContext<'a> = (); - fn evm(&self, db: DB) -> Evm<'_, Self::DefaultExternalContext<'_>, DB> { + fn evm<'a, DB: Database + 'a>(&self, db: DB) -> Evm<'a, Self::DefaultExternalContext<'a>, DB> { let new_cache = self.precompile_cache.clone(); EvmBuilder::default() .with_db(db) @@ -177,9 +177,9 @@ impl ConfigureEvm for MyEvmConfig { .build() } - fn evm_with_inspector(&self, db: DB, inspector: I) -> Evm<'_, I, DB> + fn evm_with_inspector<'a, DB, I>(&self, db: DB, inspector: I) -> Evm<'a, I, DB> where - DB: Database, + DB: Database + 'a, I: GetInspector, { let new_cache = self.precompile_cache.clone(); diff --git a/testing/ef-tests/src/models.rs b/testing/ef-tests/src/models.rs index e7556ec0d47e..2c580dc54ea8 100644 --- a/testing/ef-tests/src/models.rs +++ b/testing/ef-tests/src/models.rs @@ -41,7 +41,7 @@ pub struct BlockchainTest { } /// A block header in an Ethereum blockchain test. -#[derive(Debug, PartialEq, Eq, Clone, Deserialize, Default)] +#[derive(Debug, PartialEq, Eq, Clone, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Header { /// Bloom filter. @@ -120,7 +120,7 @@ impl From
for SealedHeader { } /// A block in an Ethereum blockchain test. -#[derive(Debug, PartialEq, Eq, Deserialize, Default)] +#[derive(Debug, PartialEq, Eq, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Block { /// Block header. @@ -138,7 +138,7 @@ pub struct Block { } /// Transaction sequence in block -#[derive(Debug, PartialEq, Eq, Deserialize, Default)] +#[derive(Debug, PartialEq, Eq, Deserialize)] #[serde(deny_unknown_fields)] #[serde(rename_all = "camelCase")] pub struct TransactionSequence { @@ -148,7 +148,7 @@ pub struct TransactionSequence { } /// Ethereum blockchain test data state. -#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Default)] +#[derive(Clone, Debug, Eq, PartialEq, Deserialize)] pub struct State(BTreeMap); impl State { @@ -194,7 +194,7 @@ impl Deref for State { } /// An account. -#[derive(Debug, PartialEq, Eq, Deserialize, Clone, Default)] +#[derive(Debug, PartialEq, Eq, Deserialize, Clone)] #[serde(deny_unknown_fields)] pub struct Account { /// Balance. diff --git a/testing/testing-utils/src/generators.rs b/testing/testing-utils/src/generators.rs index 62923b1acd2b..4ef65043f602 100644 --- a/testing/testing-utils/src/generators.rs +++ b/testing/testing-utils/src/generators.rs @@ -245,7 +245,7 @@ where let mut old_entries: Vec<_> = new_entries .into_iter() .filter_map(|entry| { - let old = if !entry.value.is_zero() { + let old = if entry.value != U256::ZERO { storage.insert(entry.key, entry.value) } else { let old = storage.remove(&entry.key); From 11698960d8d3976ac8b30994c302d365a163b789 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Mon, 29 Jul 2024 12:15:16 +0200 Subject: [PATCH 21/40] rm optimism flag from crates/optimism --- crates/optimism/cli/Cargo.toml | 3 +-- crates/optimism/cli/src/lib.rs | 9 +-------- crates/optimism/consensus/Cargo.toml | 2 +- crates/optimism/evm/Cargo.toml | 3 +-- crates/optimism/node/Cargo.toml | 5 +---- crates/optimism/payload/Cargo.toml | 3 +-- crates/optimism/payload/src/lib.rs | 2 +- crates/optimism/rpc/Cargo.toml | 5 ++--- crates/optimism/rpc/src/lib.rs | 4 +--- 9 files changed, 10 insertions(+), 26 deletions(-) diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml index 453062e488a7..78de5f5a1f9a 100644 --- a/crates/optimism/cli/Cargo.toml +++ b/crates/optimism/cli/Cargo.toml @@ -70,8 +70,7 @@ reth-stages = { workspace = true, features = ["test-utils"] } reth-db-common.workspace = true [features] - optimism = [ +default = [ "reth-primitives/optimism", - "reth-evm-optimism/optimism", "reth-provider/optimism", ] diff --git a/crates/optimism/cli/src/lib.rs b/crates/optimism/cli/src/lib.rs index b22840bdc459..39ea39ebc18f 100644 --- a/crates/optimism/cli/src/lib.rs +++ b/crates/optimism/cli/src/lib.rs @@ -5,7 +5,7 @@ html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] -#![cfg_attr(all(not(test), feature = "optimism"), warn(unused_crate_dependencies))] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] /// Optimism chain specification parser. @@ -45,15 +45,8 @@ use reth_node_core::{ version::{LONG_VERSION, SHORT_VERSION}, }; use reth_tracing::FileWorkerGuard; -use std::{ffi::OsString, fmt, sync::Arc}; use tracing::info; -/// Optimism chain specification parser. -pub mod chainspec; -/// Optimism CLI commands. -pub mod commands; -pub use commands::{import::ImportOpCommand, import_receipts::ImportReceiptsOpCommand}; - /// The main reth cli interface. /// /// This is the entrypoint to the executable. diff --git a/crates/optimism/consensus/Cargo.toml b/crates/optimism/consensus/Cargo.toml index 6be3d67fb4d5..ce33ef718bbc 100644 --- a/crates/optimism/consensus/Cargo.toml +++ b/crates/optimism/consensus/Cargo.toml @@ -21,4 +21,4 @@ reth-consensus.workspace = true tracing.workspace = true [features] -optimism = ["reth-primitives/optimism"] +default = ["reth-primitives/optimism"] diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index f53293edeebf..3b39182c3b98 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -37,9 +37,8 @@ tracing.workspace = true reth-revm = { workspace = true, features = ["test-utils"] } [features] -optimism = [ +default = [ "reth-primitives/optimism", "reth-execution-types/optimism", - "reth-optimism-consensus/optimism", "reth-revm/optimism", ] diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 0b163a571bfb..78190b4c48e9 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -69,17 +69,14 @@ alloy-primitives.workspace = true alloy-genesis.workspace = true [features] -optimism = [ +default = [ "reth-chainspec/optimism", "reth-primitives/optimism", "reth-provider/optimism", "reth-rpc-types-compat/optimism", "reth-rpc/optimism", - "reth-evm-optimism/optimism", - "reth-optimism-payload-builder/optimism", "reth-beacon-consensus/optimism", "reth-revm/optimism", "reth-auto-seal-consensus/optimism", - "reth-optimism-rpc/optimism" ] test-utils = ["reth-node-builder/test-utils"] diff --git a/crates/optimism/payload/Cargo.toml b/crates/optimism/payload/Cargo.toml index 6aaec1076779..b7591baaf80f 100644 --- a/crates/optimism/payload/Cargo.toml +++ b/crates/optimism/payload/Cargo.toml @@ -37,11 +37,10 @@ thiserror.workspace = true sha2.workspace = true [features] -optimism = [ +default = [ "reth-chainspec/optimism", "reth-primitives/optimism", "reth-provider/optimism", "reth-rpc-types-compat/optimism", - "reth-evm-optimism/optimism", "reth-revm/optimism", ] diff --git a/crates/optimism/payload/src/lib.rs b/crates/optimism/payload/src/lib.rs index 645b997f6fd9..4be22346ebb5 100644 --- a/crates/optimism/payload/src/lib.rs +++ b/crates/optimism/payload/src/lib.rs @@ -5,7 +5,7 @@ html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] -#![cfg_attr(all(not(test), feature = "optimism"), warn(unused_crate_dependencies))] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![allow(clippy::useless_let_if_seq)] diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml index 1f0b15b6e38f..95b617f6eaeb 100644 --- a/crates/optimism/rpc/Cargo.toml +++ b/crates/optimism/rpc/Cargo.toml @@ -54,11 +54,10 @@ client = [ "reth-rpc-eth-api/client" ] -optimism = [ +default = [ "reth-chainspec/optimism", - "reth-evm-optimism/optimism", "reth-primitives/optimism", "reth-provider/optimism", "reth-rpc-eth-api/optimism", "revm/optimism" -] \ No newline at end of file +] diff --git a/crates/optimism/rpc/src/lib.rs b/crates/optimism/rpc/src/lib.rs index a6ee9e33d01b..424f8afbb2cc 100644 --- a/crates/optimism/rpc/src/lib.rs +++ b/crates/optimism/rpc/src/lib.rs @@ -5,10 +5,8 @@ html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] -#![cfg_attr(all(not(test), feature = "optimism"), warn(unused_crate_dependencies))] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -// The `optimism` feature must be enabled to use this crate. -#![cfg(feature = "optimism")] pub mod api; pub mod error; From bf735a3f9713f10e7b1d9c4a41b4538205e66dac Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Mon, 29 Jul 2024 12:21:38 +0200 Subject: [PATCH 22/40] node-optimism does not have feature flag anymore --- .github/workflows/integration.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 103a87706bca..5f61ae6b8539 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -47,7 +47,7 @@ jobs: name: Run tests run: | cargo nextest run \ - --locked -p reth-node-optimism --features "optimism" + --locked -p reth-node-optimism integration-success: name: integration success From 9ddebb0c8fc2d079ea3936fcaa0a4eeadd7f788e Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Mon, 29 Jul 2024 12:23:23 +0200 Subject: [PATCH 23/40] rm optimism feature from node e2e --- crates/optimism/node/tests/e2e/main.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/crates/optimism/node/tests/e2e/main.rs b/crates/optimism/node/tests/e2e/main.rs index 5e0d022d22a0..e8c378b9e521 100644 --- a/crates/optimism/node/tests/e2e/main.rs +++ b/crates/optimism/node/tests/e2e/main.rs @@ -1,7 +1,5 @@ -#[cfg(feature = "optimism")] mod p2p; -#[cfg(feature = "optimism")] mod utils; const fn main() {} From 8e44890cecf4b797e3796c79edbc049c215c47d6 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Mon, 29 Jul 2024 12:32:36 +0200 Subject: [PATCH 24/40] Revert "rm optimsm feature from node e2e" This reverts commit 9ddebb0c8fc2d079ea3936fcaa0a4eeadd7f788e. --- crates/optimism/node/tests/e2e/main.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/optimism/node/tests/e2e/main.rs b/crates/optimism/node/tests/e2e/main.rs index e8c378b9e521..5e0d022d22a0 100644 --- a/crates/optimism/node/tests/e2e/main.rs +++ b/crates/optimism/node/tests/e2e/main.rs @@ -1,5 +1,7 @@ +#[cfg(feature = "optimism")] mod p2p; +#[cfg(feature = "optimism")] mod utils; const fn main() {} From 9e441cd9df5edfd40b29993ed8e72ebc75015b21 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Mon, 29 Jul 2024 12:32:59 +0200 Subject: [PATCH 25/40] Revert "node-optimism does not have feature flag anymore" This reverts commit bf735a3f9713f10e7b1d9c4a41b4538205e66dac. --- .github/workflows/integration.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 5f61ae6b8539..103a87706bca 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -47,7 +47,7 @@ jobs: name: Run tests run: | cargo nextest run \ - --locked -p reth-node-optimism + --locked -p reth-node-optimism --features "optimism" integration-success: name: integration success From 80a2a0cb5a12f788da7f7795b63fadaef076fdda Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Mon, 29 Jul 2024 12:33:01 +0200 Subject: [PATCH 26/40] Revert "rm optimism flag from crates/optimism" This reverts commit 11698960d8d3976ac8b30994c302d365a163b789. --- crates/optimism/cli/Cargo.toml | 3 ++- crates/optimism/cli/src/lib.rs | 9 ++++++++- crates/optimism/consensus/Cargo.toml | 2 +- crates/optimism/evm/Cargo.toml | 3 ++- crates/optimism/node/Cargo.toml | 5 ++++- crates/optimism/payload/Cargo.toml | 3 ++- crates/optimism/payload/src/lib.rs | 2 +- crates/optimism/rpc/Cargo.toml | 5 +++-- crates/optimism/rpc/src/lib.rs | 4 +++- 9 files changed, 26 insertions(+), 10 deletions(-) diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml index 78de5f5a1f9a..453062e488a7 100644 --- a/crates/optimism/cli/Cargo.toml +++ b/crates/optimism/cli/Cargo.toml @@ -70,7 +70,8 @@ reth-stages = { workspace = true, features = ["test-utils"] } reth-db-common.workspace = true [features] -default = [ + optimism = [ "reth-primitives/optimism", + "reth-evm-optimism/optimism", "reth-provider/optimism", ] diff --git a/crates/optimism/cli/src/lib.rs b/crates/optimism/cli/src/lib.rs index 39ea39ebc18f..b22840bdc459 100644 --- a/crates/optimism/cli/src/lib.rs +++ b/crates/optimism/cli/src/lib.rs @@ -5,7 +5,7 @@ html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] -#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(all(not(test), feature = "optimism"), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] /// Optimism chain specification parser. @@ -45,8 +45,15 @@ use reth_node_core::{ version::{LONG_VERSION, SHORT_VERSION}, }; use reth_tracing::FileWorkerGuard; +use std::{ffi::OsString, fmt, sync::Arc}; use tracing::info; +/// Optimism chain specification parser. +pub mod chainspec; +/// Optimism CLI commands. +pub mod commands; +pub use commands::{import::ImportOpCommand, import_receipts::ImportReceiptsOpCommand}; + /// The main reth cli interface. /// /// This is the entrypoint to the executable. diff --git a/crates/optimism/consensus/Cargo.toml b/crates/optimism/consensus/Cargo.toml index ce33ef718bbc..6be3d67fb4d5 100644 --- a/crates/optimism/consensus/Cargo.toml +++ b/crates/optimism/consensus/Cargo.toml @@ -21,4 +21,4 @@ reth-consensus.workspace = true tracing.workspace = true [features] -default = ["reth-primitives/optimism"] +optimism = ["reth-primitives/optimism"] diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index 3b39182c3b98..f53293edeebf 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -37,8 +37,9 @@ tracing.workspace = true reth-revm = { workspace = true, features = ["test-utils"] } [features] -default = [ +optimism = [ "reth-primitives/optimism", "reth-execution-types/optimism", + "reth-optimism-consensus/optimism", "reth-revm/optimism", ] diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 78190b4c48e9..0b163a571bfb 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -69,14 +69,17 @@ alloy-primitives.workspace = true alloy-genesis.workspace = true [features] -default = [ +optimism = [ "reth-chainspec/optimism", "reth-primitives/optimism", "reth-provider/optimism", "reth-rpc-types-compat/optimism", "reth-rpc/optimism", + "reth-evm-optimism/optimism", + "reth-optimism-payload-builder/optimism", "reth-beacon-consensus/optimism", "reth-revm/optimism", "reth-auto-seal-consensus/optimism", + "reth-optimism-rpc/optimism" ] test-utils = ["reth-node-builder/test-utils"] diff --git a/crates/optimism/payload/Cargo.toml b/crates/optimism/payload/Cargo.toml index b7591baaf80f..6aaec1076779 100644 --- a/crates/optimism/payload/Cargo.toml +++ b/crates/optimism/payload/Cargo.toml @@ -37,10 +37,11 @@ thiserror.workspace = true sha2.workspace = true [features] -default = [ +optimism = [ "reth-chainspec/optimism", "reth-primitives/optimism", "reth-provider/optimism", "reth-rpc-types-compat/optimism", + "reth-evm-optimism/optimism", "reth-revm/optimism", ] diff --git a/crates/optimism/payload/src/lib.rs b/crates/optimism/payload/src/lib.rs index 4be22346ebb5..645b997f6fd9 100644 --- a/crates/optimism/payload/src/lib.rs +++ b/crates/optimism/payload/src/lib.rs @@ -5,7 +5,7 @@ html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] -#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(all(not(test), feature = "optimism"), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![allow(clippy::useless_let_if_seq)] diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml index 95b617f6eaeb..1f0b15b6e38f 100644 --- a/crates/optimism/rpc/Cargo.toml +++ b/crates/optimism/rpc/Cargo.toml @@ -54,10 +54,11 @@ client = [ "reth-rpc-eth-api/client" ] -default = [ +optimism = [ "reth-chainspec/optimism", + "reth-evm-optimism/optimism", "reth-primitives/optimism", "reth-provider/optimism", "reth-rpc-eth-api/optimism", "revm/optimism" -] +] \ No newline at end of file diff --git a/crates/optimism/rpc/src/lib.rs b/crates/optimism/rpc/src/lib.rs index 424f8afbb2cc..a6ee9e33d01b 100644 --- a/crates/optimism/rpc/src/lib.rs +++ b/crates/optimism/rpc/src/lib.rs @@ -5,8 +5,10 @@ html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] -#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(all(not(test), feature = "optimism"), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +// The `optimism` feature must be enabled to use this crate. +#![cfg(feature = "optimism")] pub mod api; pub mod error; From 92048f911e6debc8c3110c3907332a214618ff42 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Mon, 29 Jul 2024 12:37:29 +0200 Subject: [PATCH 27/40] add optimism flags to optimism/bin --- crates/optimism/bin/Cargo.toml | 5 ++++- crates/optimism/cli/src/lib.rs | 7 ------- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/crates/optimism/bin/Cargo.toml b/crates/optimism/bin/Cargo.toml index 10af13c447f4..75182d2e8b45 100644 --- a/crates/optimism/bin/Cargo.toml +++ b/crates/optimism/bin/Cargo.toml @@ -34,4 +34,7 @@ default = ["jemalloc"] jemalloc = ["dep:tikv-jemallocator"] jemalloc-prof = ["jemalloc", "tikv-jemallocator?/profiling"] - +optimism = [ + "reth-optimism-cli/optimism", + "reth-node-optimism/optimism", +] diff --git a/crates/optimism/cli/src/lib.rs b/crates/optimism/cli/src/lib.rs index b22840bdc459..9199d8795303 100644 --- a/crates/optimism/cli/src/lib.rs +++ b/crates/optimism/cli/src/lib.rs @@ -45,15 +45,8 @@ use reth_node_core::{ version::{LONG_VERSION, SHORT_VERSION}, }; use reth_tracing::FileWorkerGuard; -use std::{ffi::OsString, fmt, sync::Arc}; use tracing::info; -/// Optimism chain specification parser. -pub mod chainspec; -/// Optimism CLI commands. -pub mod commands; -pub use commands::{import::ImportOpCommand, import_receipts::ImportReceiptsOpCommand}; - /// The main reth cli interface. /// /// This is the entrypoint to the executable. From c826ca7b86db70efad4fb70c831bae78f5684132 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Mon, 29 Jul 2024 12:57:38 +0200 Subject: [PATCH 28/40] force optimism feature on op crates --- Cargo.toml | 3 ++- crates/optimism/cli/src/lib.rs | 2 ++ crates/optimism/consensus/Cargo.toml | 2 +- crates/optimism/consensus/src/lib.rs | 2 ++ crates/optimism/evm/src/lib.rs | 2 ++ crates/optimism/node/src/lib.rs | 2 ++ crates/optimism/payload/src/lib.rs | 2 ++ 7 files changed, 13 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 2e519aa35b96..420442717cae 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -67,6 +67,7 @@ members = [ "crates/node/builder/", "crates/node/events/", "crates/node/metrics", + "crates/optimism/bin", "crates/optimism/cli", "crates/optimism/consensus", "crates/optimism/evm/", @@ -140,7 +141,7 @@ members = [ "examples/txpool-tracing/", "examples/custom-rlpx-subprotocol", "testing/ef-tests/", - "testing/testing-utils", "crates/optimism/bin", + "testing/testing-utils", ] default-members = ["bin/reth"] diff --git a/crates/optimism/cli/src/lib.rs b/crates/optimism/cli/src/lib.rs index 9199d8795303..8bb90cea43f5 100644 --- a/crates/optimism/cli/src/lib.rs +++ b/crates/optimism/cli/src/lib.rs @@ -7,6 +7,8 @@ )] #![cfg_attr(all(not(test), feature = "optimism"), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +// The `optimism` feature must be enabled to use this crate. +#![cfg(feature = "optimism")] /// Optimism chain specification parser. pub mod chainspec; diff --git a/crates/optimism/consensus/Cargo.toml b/crates/optimism/consensus/Cargo.toml index 6be3d67fb4d5..bd538a167f10 100644 --- a/crates/optimism/consensus/Cargo.toml +++ b/crates/optimism/consensus/Cargo.toml @@ -15,7 +15,7 @@ workspace = true # reth reth-consensus-common.workspace = true reth-chainspec.workspace = true -reth-primitives = { workspace = true, features = [ "optimism" ]} +reth-primitives.workspace = true reth-consensus.workspace = true tracing.workspace = true diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index d6eb28dbb90e..61aa23bde15f 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -6,6 +6,8 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +// The `optimism` feature must be enabled to use this crate. +#![cfg(feature = "optimism")] use reth_chainspec::{ChainSpec, EthereumHardforks, OptimismHardforks}; use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index f2b4a2b83cb6..1d7f22f3c7af 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -6,6 +6,8 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +// The `optimism` feature must be enabled to use this crate. +#![cfg(feature = "optimism")] use reth_chainspec::ChainSpec; use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; diff --git a/crates/optimism/node/src/lib.rs b/crates/optimism/node/src/lib.rs index b6f1ce7fc35e..68aebd0835fb 100644 --- a/crates/optimism/node/src/lib.rs +++ b/crates/optimism/node/src/lib.rs @@ -6,6 +6,8 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +// The `optimism` feature must be enabled to use this crate. +#![cfg(feature = "optimism")] /// CLI argument parsing for the optimism node. pub mod args; diff --git a/crates/optimism/payload/src/lib.rs b/crates/optimism/payload/src/lib.rs index 645b997f6fd9..2bb60594287a 100644 --- a/crates/optimism/payload/src/lib.rs +++ b/crates/optimism/payload/src/lib.rs @@ -8,6 +8,8 @@ #![cfg_attr(all(not(test), feature = "optimism"), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![allow(clippy::useless_let_if_seq)] +// The `optimism` feature must be enabled to use this crate. +#![cfg(feature = "optimism")] pub mod builder; pub use builder::OptimismPayloadBuilder; From ecf534f55914c02ea6f47e00c30fb31480a5777b Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Mon, 29 Jul 2024 13:02:50 +0200 Subject: [PATCH 29/40] rm op-reth doctest --- crates/optimism/cli/src/lib.rs | 40 +--------------------------------- 1 file changed, 1 insertion(+), 39 deletions(-) diff --git a/crates/optimism/cli/src/lib.rs b/crates/optimism/cli/src/lib.rs index 8bb90cea43f5..7599d21313ca 100644 --- a/crates/optimism/cli/src/lib.rs +++ b/crates/optimism/cli/src/lib.rs @@ -49,7 +49,7 @@ use reth_node_core::{ use reth_tracing::FileWorkerGuard; use tracing::info; -/// The main reth cli interface. +/// The main op-reth cli interface. /// /// This is the entrypoint to the executable. #[derive(Debug, Parser)] @@ -113,44 +113,6 @@ impl Cli { /// /// This accepts a closure that is used to launch the node via the /// [`NodeCommand`](reth_cli_commands::node::NodeCommand). - /// - /// - /// # Example - /// - /// ```no_run - /// use reth::cli::Cli; - /// use reth_node_optimism::OptimismNode; - /// - /// Cli::parse_args() - /// .run(|builder, _| async move { - /// let handle = builder.launch_node(OptimismNode::default()).await?; - /// - /// handle.wait_for_node_exit().await - /// }) - /// .unwrap(); - /// ``` - /// - /// # Example - /// - /// Parse additional CLI arguments for the node command and use it to configure the node. - /// - /// ```no_run - /// use clap::Parser; - /// use reth::cli::Cli; - /// - /// #[derive(Debug, Parser)] - /// pub struct MyArgs { - /// pub enable: bool, - /// } - /// - /// Cli::parse() - /// .run(|builder, my_args: MyArgs| async move { - /// // launch the node - /// - /// Ok(()) - /// }) - /// .unwrap(); - /// ```` pub fn run(mut self, launcher: L) -> eyre::Result<()> where L: FnOnce(WithLaunchContext>>, Ext) -> Fut, From 14512dd830fe16e24bb83f6c66098eaa9d236d00 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Mon, 29 Jul 2024 13:07:16 +0200 Subject: [PATCH 30/40] fix test compilation --- crates/optimism/bin/src/main.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/optimism/bin/src/main.rs b/crates/optimism/bin/src/main.rs index 29e297544413..df7de34d6fc9 100644 --- a/crates/optimism/bin/src/main.rs +++ b/crates/optimism/bin/src/main.rs @@ -1,4 +1,6 @@ #![allow(missing_docs, rustdoc::missing_crate_level_docs)] +// The `optimism` feature must be enabled to use this crate. +#![cfg(feature = "optimism")] use clap::Parser; use reth_node_optimism::{args::RollupArgs, rpc::SequencerClient, OptimismNode}; From 88d7506830da8a73f42eab58b7b724849ed7844d Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Mon, 29 Jul 2024 13:12:36 +0200 Subject: [PATCH 31/40] add asm-keccak to op-reth --- crates/optimism/bin/Cargo.toml | 2 ++ crates/optimism/node/Cargo.toml | 1 + 2 files changed, 3 insertions(+) diff --git a/crates/optimism/bin/Cargo.toml b/crates/optimism/bin/Cargo.toml index 75182d2e8b45..31be158c042a 100644 --- a/crates/optimism/bin/Cargo.toml +++ b/crates/optimism/bin/Cargo.toml @@ -34,6 +34,8 @@ default = ["jemalloc"] jemalloc = ["dep:tikv-jemallocator"] jemalloc-prof = ["jemalloc", "tikv-jemallocator?/profiling"] +asm-keccak = ["reth-node-optimism/asm-keccak"] + optimism = [ "reth-optimism-cli/optimism", "reth-node-optimism/optimism", diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 0b163a571bfb..53e0e56e5c6d 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -82,4 +82,5 @@ optimism = [ "reth-auto-seal-consensus/optimism", "reth-optimism-rpc/optimism" ] +asm-keccak = ["reth-primitives/asm-keccak"] test-utils = ["reth-node-builder/test-utils"] From 93a10d87fadd8ab559e8264879c170c07cf154bd Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Mon, 29 Jul 2024 13:26:15 +0200 Subject: [PATCH 32/40] add manifest path on Makefile for op builds --- Cargo.toml | 1 + Makefile | 10 +++++----- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 420442717cae..66fa42d0497a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -338,6 +338,7 @@ reth-node-ethereum = { path = "crates/ethereum/node" } reth-node-events = { path = "crates/node/events" } reth-node-metrics = { path = "crates/node/metrics" } reth-node-optimism = { path = "crates/optimism/node" } +reth-optimism-bin = { path = "crates/optimism/bin" } reth-optimism-cli = { path = "crates/optimism/cli" } reth-optimism-consensus = { path = "crates/optimism/consensus" } reth-optimism-payload-builder = { path = "crates/optimism/payload" } diff --git a/Makefile b/Makefile index d42c426671d4..d01a707f2b6b 100644 --- a/Makefile +++ b/Makefile @@ -52,7 +52,7 @@ install: ## Build and install the reth binary under `~/.cargo/bin`. .PHONY: install-op install-op: ## Build and install the op-reth binary under `~/.cargo/bin`. - cargo install --path bin/reth --bin op-reth --force --locked \ + cargo install --path crates/optimism/bin --bin op-reth --force --locked \ --features "optimism,$(FEATURES)" \ --profile "$(PROFILE)" \ $(CARGO_INSTALL_EXTRA_FLAGS) @@ -63,14 +63,14 @@ build: ## Build the reth binary into `target` directory. .PHONY: build-op build-op: ## Build the op-reth binary into `target` directory. - cargo build --bin op-reth --features "optimism,$(FEATURES)" --profile "$(PROFILE)" + cargo build --bin op-reth --features "optimism,$(FEATURES)" --profile "$(PROFILE)" --manifest-path crates/optimism/bin/Cargo.toml # Builds the reth binary natively. build-native-%: cargo build --bin reth --target $* --features "$(FEATURES)" --profile "$(PROFILE)" op-build-native-%: - cargo build --bin op-reth --target $* --features "optimism,$(FEATURES)" --profile "$(PROFILE)" + cargo build --bin op-reth --target $* --features "optimism,$(FEATURES)" --profile "$(PROFILE)" --manifest-path crates/optimism/bin/Cargo.toml # The following commands use `cross` to build a cross-compile. # @@ -105,7 +105,7 @@ build-%: op-build-%: RUSTFLAGS="-C link-arg=-lgcc -Clink-arg=-static-libgcc" \ - cross build --bin op-reth --target $* --features "optimism,$(FEATURES)" --profile "$(PROFILE)" + cross build --bin op-reth --target $* --features "optimism,$(FEATURES)" --profile "$(PROFILE)" --manifest-path crates/optimism/bin/Cargo.toml # Unfortunately we can't easily use cross to build for Darwin because of licensing issues. # If we wanted to, we would need to build a custom Docker image with the SDK available. @@ -313,7 +313,7 @@ maxperf: ## Builds `reth` with the most aggressive optimisations. .PHONY: maxperf-op maxperf-op: ## Builds `op-reth` with the most aggressive optimisations. - RUSTFLAGS="-C target-cpu=native" cargo build --profile maxperf --features jemalloc,asm-keccak,optimism --bin op-reth + RUSTFLAGS="-C target-cpu=native" cargo build --profile maxperf --features jemalloc,asm-keccak,optimism --bin op-reth --manifest-path crates/optimism/bin/Cargo.toml .PHONY: maxperf-no-asm maxperf-no-asm: ## Builds `reth` with the most aggressive optimisations, minus the "asm-keccak" feature. From c5261091821d13ad102acc9ef877ab3c60c94b24 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Mon, 29 Jul 2024 13:38:46 +0200 Subject: [PATCH 33/40] exclude reth-optimism-bin from hack check --- .github/workflows/lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index b108ddb96b48..f183404866f0 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -70,7 +70,7 @@ jobs: - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true - - run: cargo hack check + - run: cargo hack check --workspace --exclude reth-optimism-bin msrv: name: MSRV / ${{ matrix.network }} From 05f74203f830b8c27f522acd6eea744a3b73d59e Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Mon, 29 Jul 2024 13:51:05 +0200 Subject: [PATCH 34/40] exclude op-reth from hack check --- .github/workflows/lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index f183404866f0..b4a927621173 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -70,7 +70,7 @@ jobs: - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true - - run: cargo hack check --workspace --exclude reth-optimism-bin + - run: cargo hack check --workspace --exclude op-reth msrv: name: MSRV / ${{ matrix.network }} From 46308402e92cfb1a17c40269ec8f595a28ab723e Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Mon, 29 Jul 2024 13:59:08 +0200 Subject: [PATCH 35/40] update op-sync ci --- .github/workflows/op-sync.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/op-sync.yml b/.github/workflows/op-sync.yml index 73303b032d05..bf4f5fda480e 100644 --- a/.github/workflows/op-sync.yml +++ b/.github/workflows/op-sync.yml @@ -33,8 +33,7 @@ jobs: with: cache-on-failure: true - name: Build op-reth - run: | - cargo install --features asm-keccak,jemalloc,optimism --bin op-reth --path bin/reth + run: make install-op - name: Run sync # https://basescan.org/block/10000 run: | From 565d9d7102ad0f285ade5b0e9941d7db92d85c4d Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Mon, 29 Jul 2024 14:18:30 +0200 Subject: [PATCH 36/40] add test ci --- .github/workflows/release-test.yml | 79 ++++++++++++++++++++++++++++++ 1 file changed, 79 insertions(+) create mode 100644 .github/workflows/release-test.yml diff --git a/.github/workflows/release-test.yml b/.github/workflows/release-test.yml new file mode 100644 index 000000000000..20250f5bed5e --- /dev/null +++ b/.github/workflows/release-test.yml @@ -0,0 +1,79 @@ +# This workflow is modified from Lighthouse: +# https://github.com/sigp/lighthouse/blob/441fc1691b69f9edc4bbdc6665f3efab16265c9b/.github/workflows/release.yml + +name: release-test + +on: + pull_request: + merge_group: + push: + branches: [main] + +env: + REPO_NAME: ${{ github.repository_owner }}/reth + OP_IMAGE_NAME: ${{ github.repository_owner }}/op-reth + IMAGE_NAME: ${{ github.repository_owner }}/reth + CARGO_TERM_COLOR: always + +jobs: + extract-version: + name: extract version + runs-on: ubuntu-latest + steps: + - name: Extract version + run: echo "VERSION=$(echo ${GITHUB_REF#refs/tags/})" >> $GITHUB_OUTPUT + id: extract_version + outputs: + VERSION: ${{ steps.extract_version.outputs.VERSION }} + + build: + name: build release + runs-on: ${{ matrix.configs.os }} + needs: extract-version + strategy: + matrix: + configs: + - target: x86_64-unknown-linux-gnu + os: ubuntu-20.04 + profile: maxperf + - target: aarch64-unknown-linux-gnu + os: ubuntu-20.04 + profile: maxperf + - target: x86_64-apple-darwin + os: macos-13 + profile: maxperf + - target: aarch64-apple-darwin + os: macos-14 + profile: maxperf + - target: x86_64-pc-windows-gnu + os: ubuntu-20.04 + profile: maxperf + build: + - command: build + binary: reth + - command: op-build + binary: op-reth + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + with: + target: ${{ matrix.configs.target }} + - uses: taiki-e/install-action@cross + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + + - name: Apple M1 setup + if: matrix.configs.target == 'aarch64-apple-darwin' + run: | + echo "SDKROOT=$(xcrun -sdk macosx --show-sdk-path)" >> $GITHUB_ENV + echo "MACOSX_DEPLOYMENT_TARGET=$(xcrun -sdk macosx --show-sdk-platform-version)" >> $GITHUB_ENV + + - name: Build Reth + run: make PROFILE=${{ matrix.configs.profile }} ${{ matrix.build.command }}-${{ matrix.configs.target }} + - name: Move binary + run: | + mkdir artifacts + [[ "${{ matrix.configs.target }}" == *windows* ]] && ext=".exe" + mv "target/${{ matrix.configs.target }}/${{ matrix.configs.profile }}/${{ matrix.build.binary }}${ext}" ./artifacts + \ No newline at end of file From 8e40c3c4e1954c34bdfd010347c8846690a1150a Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Mon, 29 Jul 2024 15:29:52 +0200 Subject: [PATCH 37/40] rm tmp ci test --- .github/workflows/release-test.yml | 79 ------------------------------ 1 file changed, 79 deletions(-) delete mode 100644 .github/workflows/release-test.yml diff --git a/.github/workflows/release-test.yml b/.github/workflows/release-test.yml deleted file mode 100644 index 20250f5bed5e..000000000000 --- a/.github/workflows/release-test.yml +++ /dev/null @@ -1,79 +0,0 @@ -# This workflow is modified from Lighthouse: -# https://github.com/sigp/lighthouse/blob/441fc1691b69f9edc4bbdc6665f3efab16265c9b/.github/workflows/release.yml - -name: release-test - -on: - pull_request: - merge_group: - push: - branches: [main] - -env: - REPO_NAME: ${{ github.repository_owner }}/reth - OP_IMAGE_NAME: ${{ github.repository_owner }}/op-reth - IMAGE_NAME: ${{ github.repository_owner }}/reth - CARGO_TERM_COLOR: always - -jobs: - extract-version: - name: extract version - runs-on: ubuntu-latest - steps: - - name: Extract version - run: echo "VERSION=$(echo ${GITHUB_REF#refs/tags/})" >> $GITHUB_OUTPUT - id: extract_version - outputs: - VERSION: ${{ steps.extract_version.outputs.VERSION }} - - build: - name: build release - runs-on: ${{ matrix.configs.os }} - needs: extract-version - strategy: - matrix: - configs: - - target: x86_64-unknown-linux-gnu - os: ubuntu-20.04 - profile: maxperf - - target: aarch64-unknown-linux-gnu - os: ubuntu-20.04 - profile: maxperf - - target: x86_64-apple-darwin - os: macos-13 - profile: maxperf - - target: aarch64-apple-darwin - os: macos-14 - profile: maxperf - - target: x86_64-pc-windows-gnu - os: ubuntu-20.04 - profile: maxperf - build: - - command: build - binary: reth - - command: op-build - binary: op-reth - steps: - - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@stable - with: - target: ${{ matrix.configs.target }} - - uses: taiki-e/install-action@cross - - uses: Swatinem/rust-cache@v2 - with: - cache-on-failure: true - - - name: Apple M1 setup - if: matrix.configs.target == 'aarch64-apple-darwin' - run: | - echo "SDKROOT=$(xcrun -sdk macosx --show-sdk-path)" >> $GITHUB_ENV - echo "MACOSX_DEPLOYMENT_TARGET=$(xcrun -sdk macosx --show-sdk-platform-version)" >> $GITHUB_ENV - - - name: Build Reth - run: make PROFILE=${{ matrix.configs.profile }} ${{ matrix.build.command }}-${{ matrix.configs.target }} - - name: Move binary - run: | - mkdir artifacts - [[ "${{ matrix.configs.target }}" == *windows* ]] && ext=".exe" - mv "target/${{ matrix.configs.target }}/${{ matrix.configs.profile }}/${{ matrix.build.binary }}${ext}" ./artifacts - \ No newline at end of file From 29b77db43639fa27d42d22688731cd769a230cf8 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Sun, 1 Sep 2024 02:49:28 +0400 Subject: [PATCH 38/40] fixes --- Cargo.lock | 15 +--- Cargo.toml | 2 +- bin/reth/Cargo.toml | 18 +---- bin/reth/src/lib.rs | 1 + crates/optimism/bin/Cargo.toml | 12 ++-- crates/optimism/bin/src/main.rs | 103 +++++++++++++-------------- crates/optimism/chainspec/src/lib.rs | 2 +- crates/optimism/cli/src/chainspec.rs | 7 +- 8 files changed, 64 insertions(+), 96 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7e4490af8420..6c79e201a862 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5102,7 +5102,8 @@ dependencies = [ "reth-node-builder", "reth-node-optimism", "reth-optimism-cli", - "reth-tracing", + "reth-optimism-rpc", + "reth-provider", "tikv-jemallocator", ] @@ -6042,13 +6043,8 @@ dependencies = [ "aquamarine", "backon", "clap", - "discv5", "eyre", - "fdlimit", "futures", - "itertools 0.13.0", - "libc", - "metrics-process", "reth-basic-payload-builder", "reth-beacon-consensus", "reth-blockchain-tree", @@ -6062,7 +6058,6 @@ dependencies = [ "reth-consensus-common", "reth-db", "reth-db-api", - "reth-db-common", "reth-discv4", "reth-downloaders", "reth-engine-util", @@ -6081,8 +6076,6 @@ dependencies = [ "reth-node-ethereum", "reth-node-events", "reth-node-metrics", - "reth-optimism-cli", - "reth-optimism-rpc", "reth-payload-builder", "reth-payload-primitives", "reth-payload-validator", @@ -6098,21 +6091,17 @@ dependencies = [ "reth-rpc-types", "reth-rpc-types-compat", "reth-stages", - "reth-stages-api", "reth-static-file", - "reth-static-file-types", "reth-tasks", "reth-tracing", "reth-transaction-pool", "reth-trie", "reth-trie-db", - "serde", "serde_json", "similar-asserts", "tempfile", "tikv-jemallocator", "tokio", - "toml", "tracing", ] diff --git a/Cargo.toml b/Cargo.toml index d9f0e90fdada..0d5682ab604d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -564,4 +564,4 @@ serial_test = "3" similar-asserts = "1.5.0" tempfile = "3.8" test-fuzz = "5" -tikv-jemallocator = { version = "0.5.0" } \ No newline at end of file +tikv-jemallocator = { version = "0.5.0" } diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index c7afc472a451..55f45214a1e1 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -53,13 +53,11 @@ reth-payload-primitives.workspace = true reth-payload-validator.workspace = true reth-basic-payload-builder.workspace = true reth-static-file.workspace = true -reth-static-file-types = { workspace = true, features = ["clap"] } reth-trie = { workspace = true, features = ["metrics"] } reth-trie-db = { workspace = true, features = ["metrics"] } reth-node-api.workspace = true reth-node-core.workspace = true reth-ethereum-payload-builder.workspace = true -reth-db-common.workspace = true reth-node-ethereum.workspace = true reth-node-builder.workspace = true reth-node-events.workspace = true @@ -67,9 +65,6 @@ reth-node-metrics.workspace = true reth-consensus.workspace = true reth-engine-util.workspace = true reth-prune.workspace = true -reth-stages-api.workspace = true -reth-optimism-cli = { workspace = true, optional = true } -reth-optimism-rpc.workspace = true # crypto alloy-rlp.workspace = true @@ -78,13 +73,7 @@ alloy-rlp.workspace = true tracing.workspace = true # io -fdlimit.workspace = true -serde.workspace = true serde_json.workspace = true -toml = { workspace = true, features = ["display"] } - -# metrics -metrics-process.workspace = true # async tokio = { workspace = true, features = [ @@ -99,20 +88,15 @@ futures.workspace = true aquamarine.workspace = true eyre.workspace = true clap = { workspace = true, features = ["derive", "env"] } -tempfile.workspace = true backon.workspace = true similar-asserts.workspace = true -itertools.workspace = true - -# p2p -discv5.workspace = true [target.'cfg(unix)'.dependencies] tikv-jemallocator = { workspace = true, optional = true } -libc = "0.2" [dev-dependencies] reth-discv4.workspace = true +tempfile.workspace = true [features] default = ["jemalloc"] diff --git a/bin/reth/src/lib.rs b/bin/reth/src/lib.rs index 56ff58362410..654ac1201a94 100644 --- a/bin/reth/src/lib.rs +++ b/bin/reth/src/lib.rs @@ -27,6 +27,7 @@ html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] pub mod cli; diff --git a/crates/optimism/bin/Cargo.toml b/crates/optimism/bin/Cargo.toml index 31be158c042a..97c001f74451 100644 --- a/crates/optimism/bin/Cargo.toml +++ b/crates/optimism/bin/Cargo.toml @@ -12,7 +12,8 @@ exclude.workspace = true reth-node-builder.workspace = true reth-cli-util.workspace = true reth-optimism-cli.workspace = true -reth-tracing.workspace = true +reth-provider.workspace = true +reth-optimism-rpc.workspace = true reth-node-optimism.workspace = true clap = { workspace = true, features = ["derive", "env"] } @@ -23,11 +24,6 @@ tikv-jemallocator = { workspace = true, optional = true } [lints] workspace = true -[[bin]] -name = "op-reth" -path = "src/main.rs" - - [features] default = ["jemalloc"] @@ -40,3 +36,7 @@ optimism = [ "reth-optimism-cli/optimism", "reth-node-optimism/optimism", ] + +[[bin]] +name = "op-reth" +path = "src/main.rs" diff --git a/crates/optimism/bin/src/main.rs b/crates/optimism/bin/src/main.rs index b4741ee47c27..7b91bca6a51c 100644 --- a/crates/optimism/bin/src/main.rs +++ b/crates/optimism/bin/src/main.rs @@ -1,16 +1,14 @@ +#![cfg_attr(not(test), warn(unused_crate_dependencies))] #![allow(missing_docs, rustdoc::missing_crate_level_docs)] // The `optimism` feature must be enabled to use this crate. #![cfg(feature = "optimism")] use clap::Parser; -use reth::cli::Cli; use reth_node_builder::EngineNodeLauncher; -use reth_node_optimism::{ - args::RollupArgs, node::OptimismAddOns, rpc::SequencerClient, OptimismNode, -}; +use reth_node_optimism::{args::RollupArgs, node::OptimismAddOns, OptimismNode}; +use reth_optimism_cli::Cli; use reth_optimism_rpc::eth::rpc::SequencerClient; use reth_provider::providers::BlockchainProvider2; -use std::sync::Arc; // We use jemalloc for performance reasons #[cfg(all(feature = "jemalloc", unix))] @@ -18,9 +16,6 @@ use std::sync::Arc; static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; fn main() { - use reth::args::utils::DefaultChainSpecParser; - use reth_optimism_cli::Cli; - reth_cli_util::sigsegv_handler::install(); // Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided. @@ -28,58 +23,56 @@ fn main() { std::env::set_var("RUST_BACKTRACE", "1"); } - if let Err(err) = - Cli::::parse().run(|builder, rollup_args| async move { - let enable_engine2 = rollup_args.experimental; - let sequencer_http_arg = rollup_args.sequencer_http.clone(); - match enable_engine2 { - true => { - let handle = builder - .with_types_and_provider::>() - .with_components(OptimismNode::components(rollup_args)) - .with_add_ons::() - .extend_rpc_modules(move |ctx| { - // register sequencer tx forwarder - if let Some(sequencer_http) = sequencer_http_arg { - ctx.registry - .eth_api() - .set_sequencer_client(SequencerClient::new(sequencer_http)); - } + if let Err(err) = Cli::::parse().run(|builder, rollup_args| async move { + let enable_engine2 = rollup_args.experimental; + let sequencer_http_arg = rollup_args.sequencer_http.clone(); + match enable_engine2 { + true => { + let handle = builder + .with_types_and_provider::>() + .with_components(OptimismNode::components(rollup_args)) + .with_add_ons::() + .extend_rpc_modules(move |ctx| { + // register sequencer tx forwarder + if let Some(sequencer_http) = sequencer_http_arg { + ctx.registry + .eth_api() + .set_sequencer_client(SequencerClient::new(sequencer_http)); + } - Ok(()) - }) - .launch_with_fn(|builder| { - let launcher = EngineNodeLauncher::new( - builder.task_executor().clone(), - builder.config().datadir(), - ); - builder.launch_with(launcher) - }) - .await?; + Ok(()) + }) + .launch_with_fn(|builder| { + let launcher = EngineNodeLauncher::new( + builder.task_executor().clone(), + builder.config().datadir(), + ); + builder.launch_with(launcher) + }) + .await?; - handle.node_exit_future.await - } - false => { - let handle = builder - .node(OptimismNode::new(rollup_args.clone())) - .extend_rpc_modules(move |ctx| { - // register sequencer tx forwarder - if let Some(sequencer_http) = sequencer_http_arg { - ctx.registry - .eth_api() - .set_sequencer_client(SequencerClient::new(sequencer_http)); - } + handle.node_exit_future.await + } + false => { + let handle = builder + .node(OptimismNode::new(rollup_args.clone())) + .extend_rpc_modules(move |ctx| { + // register sequencer tx forwarder + if let Some(sequencer_http) = sequencer_http_arg { + ctx.registry + .eth_api() + .set_sequencer_client(SequencerClient::new(sequencer_http)); + } - Ok(()) - }) - .launch() - .await?; + Ok(()) + }) + .launch() + .await?; - handle.node_exit_future.await - } + handle.node_exit_future.await } - }) - { + } + }) { eprintln!("Error: {err:?}"); std::process::exit(1); } diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index fd290448d18b..952fee7625dc 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -28,7 +28,7 @@ use derive_more::{Constructor, Deref, Into}; use reth_chainspec::ChainSpec; /// OP stack chain spec type. -#[derive(Debug, Deref, Into, Constructor)] +#[derive(Debug, Clone, Deref, Into, Constructor)] pub struct OpChainSpec { /// [`ChainSpec`]. pub inner: ChainSpec, diff --git a/crates/optimism/cli/src/chainspec.rs b/crates/optimism/cli/src/chainspec.rs index af4cbc8956e1..03d78cba0a3c 100644 --- a/crates/optimism/cli/src/chainspec.rs +++ b/crates/optimism/cli/src/chainspec.rs @@ -1,5 +1,6 @@ use std::sync::Arc; +use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; use reth_node_core::args::utils::parse_custom_chain_spec; use reth_optimism_chainspec::{ @@ -26,7 +27,7 @@ fn chain_value_parser(s: &str) -> eyre::Result, eyre::Error> { pub struct OpChainSpecParser; impl ChainSpecParser for OpChainSpecParser { - type ChainSpec = OpChainSpec; + type ChainSpec = ChainSpec; const SUPPORTED_CHAINS: &'static [&'static str] = &[ "dev", @@ -38,8 +39,8 @@ impl ChainSpecParser for OpChainSpecParser { "base-sepolia", ]; - fn parse(s: &str) -> eyre::Result> { - chain_value_parser(s) + fn parse(s: &str) -> eyre::Result> { + chain_value_parser(s).map(|s| Arc::new(Arc::unwrap_or_clone(s).inner)) } } From 2b7adcc3767aff01b9bf04c24802ff608104d125 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Sun, 1 Sep 2024 03:01:18 +0400 Subject: [PATCH 39/40] rename to op-reth --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 0d5682ab604d..96974a9ab88f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -350,7 +350,7 @@ reth-node-ethereum = { path = "crates/ethereum/node" } reth-node-events = { path = "crates/node/events" } reth-node-metrics = { path = "crates/node/metrics" } reth-node-optimism = { path = "crates/optimism/node" } -reth-optimism-bin = { path = "crates/optimism/bin" } +op-reth = { path = "crates/optimism/bin" } reth-optimism-chainspec = { path = "crates/optimism/chainspec" } reth-optimism-cli = { path = "crates/optimism/cli" } reth-optimism-consensus = { path = "crates/optimism/consensus" } From ab626d5ca6f1a885164c5c39cf2eddb440535969 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Mon, 2 Sep 2024 17:15:13 +0400 Subject: [PATCH 40/40] add step to crate-checks --- .github/workflows/lint.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index f5879297c21d..6e944eb8f077 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -72,6 +72,7 @@ jobs: with: cache-on-failure: true - run: cargo hack check --workspace --exclude op-reth + - run: cargo check -p op-reth --features "optimism" msrv: name: MSRV / ${{ matrix.network }}