diff --git a/Cargo.lock b/Cargo.lock
index 91fd00c99c..19e1816ce9 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -6647,6 +6647,18 @@ dependencies = [
"memchr",
]
+[[package]]
+name = "quick_cache"
+version = "0.6.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ec932c60e6faf77dc6601ea149a23d821598b019b450bb1d98fe89c0301c0b61"
+dependencies = [
+ "ahash",
+ "equivalent",
+ "hashbrown 0.14.5",
+ "parking_lot 0.12.3",
+]
+
[[package]]
name = "quinn"
version = "0.11.2"
@@ -7269,9 +7281,11 @@ dependencies = [
"alloy-genesis",
"aquamarine",
"assert_matches",
+ "lazy_static",
"linked_hash_set",
"metrics",
"parking_lot 0.12.3",
+ "quick_cache",
"reth-blockchain-tree-api",
"reth-chainspec",
"reth-consensus",
@@ -8718,6 +8732,7 @@ dependencies = [
"reth-auto-seal-consensus",
"reth-basic-payload-builder",
"reth-beacon-consensus",
+ "reth-blockchain-tree",
"reth-chainspec",
"reth-consensus",
"reth-db",
diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml
index b3679677a1..99c0e8fe75 100644
--- a/crates/blockchain-tree/Cargo.toml
+++ b/crates/blockchain-tree/Cargo.toml
@@ -38,6 +38,10 @@ tokio = { workspace = true, features = ["macros", "sync"] }
reth-metrics = { workspace = true, features = ["common"] }
metrics.workspace = true
+# cache
+quick_cache = "0.6.2"
+lazy_static = "1.4.0"
+
# misc
aquamarine.workspace = true
linked_hash_set.workspace = true
diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs
index 501ce43817..d265fdc8df 100644
--- a/crates/blockchain-tree/src/blockchain_tree.rs
+++ b/crates/blockchain-tree/src/blockchain_tree.rs
@@ -1,6 +1,7 @@
//! Implementation of [`BlockchainTree`]
use crate::{
+ canonical_cache::apply_bundle_state,
metrics::{MakeCanonicalAction, MakeCanonicalDurationsRecorder, TreeMetrics},
state::{BlockchainId, TreeState},
AppendableChain, BlockIndices, BlockchainTreeConfig, ExecutionData, TreeExternals,
@@ -1261,6 +1262,7 @@ where
};
recorder.record_relative(MakeCanonicalAction::RetrieveStateTrieUpdates);
+ let cloned_bundle = state.bundle.clone();
let provider_rw = self.externals.provider_factory.provider_rw()?;
provider_rw
.append_blocks_with_state(
@@ -1274,6 +1276,9 @@ where
provider_rw.commit()?;
recorder.record_relative(MakeCanonicalAction::CommitCanonicalChainToDatabase);
+ // update global canonical cache
+ apply_bundle_state(cloned_bundle);
+
Ok(())
}
diff --git a/crates/blockchain-tree/src/canonical_cache.rs b/crates/blockchain-tree/src/canonical_cache.rs
new file mode 100644
index 0000000000..33c297efe2
--- /dev/null
+++ b/crates/blockchain-tree/src/canonical_cache.rs
@@ -0,0 +1,397 @@
+use lazy_static::lazy_static;
+use quick_cache::sync::Cache;
+use reth_primitives::{Account, Address, BlockNumber, Bytecode, StorageKey, StorageValue, B256};
+use reth_provider::{
+ AccountReader, BlockHashReader, ExecutionDataProvider, StateProofProvider, StateProvider,
+ StateRootProvider,
+};
+use reth_revm::db::BundleState;
+use reth_storage_errors::provider::ProviderResult;
+use reth_trie::{updates::TrieUpdates, AccountProof, HashedPostState};
+
+/// The size of cache, counted by the number of accounts.
+const CACHE_SIZE: usize = 1000000;
+
+type AddressStorageKey = (Address, StorageKey);
+
+lazy_static! {
+ /// Account cache
+ pub static ref ACCOUNT_CACHE: Cache
= Cache::new(CACHE_SIZE);
+
+ /// Contract cache
+ /// The size of contract is large and the hot contracts should be limited.
+ static ref CONTRACT_CACHE: Cache = Cache::new(CACHE_SIZE/10);
+
+ /// Storage cache
+ static ref STORAGE_CACHE: Cache = Cache::new(CACHE_SIZE*10);
+
+ /// Block hash cache
+ static ref BLOCK_HASH_CACHE: Cache = Cache::new(CACHE_SIZE/10);
+}
+
+/// Apply committed state to canonical cache.
+pub(crate) fn apply_bundle_state(bundle: BundleState) {
+ let change_set = bundle.into_plain_state(reth_provider::OriginalValuesKnown::Yes);
+
+ for (address, account_info) in &change_set.accounts {
+ match account_info {
+ None => {
+ ACCOUNT_CACHE.remove(address);
+ }
+ Some(acc) => {
+ ACCOUNT_CACHE.insert(
+ *address,
+ Account {
+ nonce: acc.nonce,
+ balance: acc.balance,
+ bytecode_hash: Some(acc.code_hash),
+ },
+ );
+ }
+ }
+ }
+
+ let mut to_wipe = false;
+ for storage in &change_set.storage {
+ if storage.wipe_storage {
+ to_wipe = true;
+ break;
+ } else {
+ for (k, v) in storage.storage.clone() {
+ STORAGE_CACHE.insert((storage.address, StorageKey::from(k)), v);
+ }
+ }
+ }
+ if to_wipe {
+ STORAGE_CACHE.clear();
+ }
+}
+
+/// Clear cached accounts and storages.
+pub fn clear_accounts_and_storages() {
+ ACCOUNT_CACHE.clear();
+ STORAGE_CACHE.clear();
+}
+
+#[derive(Debug)]
+pub(crate) struct CachedBundleStateProvider {
+ /// The inner state provider.
+ pub(crate) state_provider: SP,
+ /// Block execution data.
+ pub(crate) block_execution_data_provider: EDP,
+}
+
+impl CachedBundleStateProvider {
+ /// Create new cached bundle state provider
+ pub(crate) const fn new(state_provider: SP, block_execution_data_provider: EDP) -> Self {
+ Self { state_provider, block_execution_data_provider }
+ }
+}
+
+impl BlockHashReader
+ for CachedBundleStateProvider
+{
+ fn block_hash(&self, block_number: BlockNumber) -> ProviderResult