Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: implement anvil_zks_{prove,execute}Batch #586

Open
wants to merge 7 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 6 additions & 0 deletions crates/api_decl/src/namespaces/anvil_zks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,4 +7,10 @@ use zksync_types::{L1BatchNumber, H256};
pub trait AnvilZksNamespace {
#[method(name = "commitBatch")]
async fn commit_batch(&self, batch_number: L1BatchNumber) -> RpcResult<H256>;

#[method(name = "proveBatch")]
async fn prove_batch(&self, batch_number: L1BatchNumber) -> RpcResult<H256>;

#[method(name = "executeBatch")]
async fn execute_batch(&self, batch_number: L1BatchNumber) -> RpcResult<H256>;
Comment on lines +10 to +15
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we be documenting these API endpoints in https://github.com/matter-labs/anvil-zksync/blob/main/SUPPORTED_APIS.md?

}
16 changes: 16 additions & 0 deletions crates/api_server/src/impls/anvil_zks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,4 +23,20 @@ impl AnvilZksNamespaceServer for AnvilZksNamespace {
.await
.map_err(RpcError::from)?)
}

async fn prove_batch(&self, batch_number: L1BatchNumber) -> RpcResult<H256> {
Ok(self
.l1_sidecar
.prove_batch(batch_number)
.await
.map_err(RpcError::from)?)
}

async fn execute_batch(&self, batch_number: L1BatchNumber) -> RpcResult<H256> {
Ok(self
.l1_sidecar
.execute_batch(batch_number)
.await
.map_err(RpcError::from)?)
}
}
21 changes: 11 additions & 10 deletions crates/cli/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -189,16 +189,6 @@ async fn main() -> anyhow::Result<()> {
None
};

let mut node_service_tasks: Vec<Pin<Box<dyn Future<Output = anyhow::Result<()>>>>> = Vec::new();
let l1_sidecar = match config.l1_config.as_ref() {
Some(l1_config) => {
let (l1_sidecar, l1_sidecar_runner) = L1Sidecar::builtin(l1_config.port).await?;
node_service_tasks.push(Box::pin(l1_sidecar_runner.run()));
l1_sidecar
}
None => L1Sidecar::none(),
};

let impersonation = ImpersonationManager::default();
if config.enable_auto_impersonate {
// Enable auto impersonation if configured
Expand Down Expand Up @@ -230,6 +220,17 @@ async fn main() -> anyhow::Result<()> {
storage_key_layout,
);

let mut node_service_tasks: Vec<Pin<Box<dyn Future<Output = anyhow::Result<()>>>>> = Vec::new();
let l1_sidecar = match config.l1_config.as_ref() {
Some(l1_config) => {
let (l1_sidecar, l1_sidecar_runner) =
L1Sidecar::builtin(l1_config.port, blockchain.clone()).await?;
node_service_tasks.push(Box::pin(l1_sidecar_runner.run()));
l1_sidecar
}
None => L1Sidecar::none(),
};

let (node_executor, node_handle) = NodeExecutor::new(
node_inner.clone(),
system_contracts.clone(),
Expand Down
35 changes: 26 additions & 9 deletions crates/core/src/node/in_memory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ use std::collections::{HashMap, HashSet};
use std::io::{Read, Write};
use std::sync::Arc;
use tokio::sync::RwLock;
use zksync_contracts::BaseSystemContracts;
use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes};
use zksync_multivm::interface::storage::{ReadStorage, StoragePtr};
use zksync_multivm::interface::VmFactory;
use zksync_multivm::interface::{
Expand All @@ -48,7 +48,7 @@ use crate::node::keys::StorageKeyLayout;
use zksync_multivm::vm_latest::{HistoryDisabled, ToTracerPointer};
use zksync_multivm::VmVersion;
use zksync_types::api::{Block, DebugCall, TransactionReceipt, TransactionVariant};
use zksync_types::block::unpack_block_info;
use zksync_types::block::{unpack_block_info, L1BatchHeader};
use zksync_types::fee_model::BatchFeeInput;
use zksync_types::l2::L2Tx;
use zksync_types::storage::{
Expand All @@ -57,7 +57,8 @@ use zksync_types::storage::{
use zksync_types::web3::{keccak256, Bytes};
use zksync_types::{
h256_to_u256, AccountTreeId, Address, Bloom, L1BatchNumber, L2BlockNumber, L2ChainId,
PackedEthSignature, StorageKey, StorageValue, Transaction, H160, H256, H64, U256, U64,
PackedEthSignature, ProtocolVersionId, StorageKey, StorageValue, Transaction, H160, H256, H64,
U256, U64,
};

/// Max possible size of an ABI encoded tx (in bytes).
Expand All @@ -82,7 +83,7 @@ pub fn compute_hash<'a>(block_number: u64, tx_hashes: impl IntoIterator<Item = &
pub fn create_genesis_from_json(
genesis: &Genesis,
timestamp: Option<u64>,
) -> Block<TransactionVariant> {
) -> (Block<TransactionVariant>, L1BatchHeader) {
let hash = genesis.hash.unwrap_or_else(|| compute_hash(0, []));
let timestamp = timestamp
.or(genesis.timestamp)
Expand All @@ -103,7 +104,7 @@ pub fn create_genesis_from_json(
},
});

create_block(
let genesis_block = create_block(
&l1_batch_env,
hash,
genesis.parent_hash.unwrap_or_else(H256::zero),
Expand All @@ -112,10 +113,18 @@ pub fn create_genesis_from_json(
genesis.transactions.clone().unwrap_or_default(),
genesis.gas_used.unwrap_or_else(U256::zero),
genesis.logs_bloom.unwrap_or_else(Bloom::zero),
)
);
let genesis_batch_header = L1BatchHeader::new(
L1BatchNumber(0),
timestamp,
BaseSystemContractsHashes::default(),
ProtocolVersionId::latest(),
);

(genesis_block, genesis_batch_header)
}

pub fn create_genesis<TX>(timestamp: Option<u64>) -> Block<TX> {
pub fn create_genesis<TX>(timestamp: Option<u64>) -> (Block<TX>, L1BatchHeader) {
let hash = compute_hash(0, []);
let timestamp = timestamp.unwrap_or(NON_FORK_FIRST_BLOCK_TIMESTAMP);
let batch_env = L1BatchEnv {
Expand All @@ -132,7 +141,7 @@ pub fn create_genesis<TX>(timestamp: Option<u64>) -> Block<TX> {
max_virtual_blocks_to_create: 0,
},
};
create_block(
let genesis_block = create_block(
&batch_env,
hash,
H256::zero(),
Expand All @@ -141,7 +150,15 @@ pub fn create_genesis<TX>(timestamp: Option<u64>) -> Block<TX> {
vec![],
U256::zero(),
Bloom::zero(),
)
);
let genesis_batch_header = L1BatchHeader::new(
L1BatchNumber(0),
timestamp,
BaseSystemContractsHashes::default(),
ProtocolVersionId::latest(),
);

(genesis_block, genesis_batch_header)
}

#[allow(clippy::too_many_arguments)]
Expand Down
47 changes: 41 additions & 6 deletions crates/core/src/node/inner/blockchain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,14 @@ use anyhow::Context;
use async_trait::async_trait;
use itertools::Itertools;
use std::collections::HashMap;
use std::fmt::Debug;
use std::sync::Arc;
use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard};
use zksync_contracts::BaseSystemContractsHashes;
use zksync_multivm::interface::storage::{ReadStorage, StoragePtr};
use zksync_multivm::interface::L2Block;
use zksync_multivm::vm_latest::utils::l2_blocks::load_last_l2_block;
use zksync_types::block::{unpack_block_info, L2BlockHasher};
use zksync_types::block::{unpack_block_info, L1BatchHeader, L2BlockHasher};
use zksync_types::{
api, h256_to_u256, AccountTreeId, Address, ExecuteTransactionCommon, L1BatchNumber,
L2BlockNumber, ProtocolVersionId, StorageKey, H256, SYSTEM_CONTEXT_ADDRESS,
Expand All @@ -24,7 +25,7 @@ use zksync_types::{

/// Read-only view on blockchain state.
#[async_trait]
pub trait ReadBlockchain: Send + Sync {
pub trait ReadBlockchain: Send + Sync + Debug {
/// Alternative for [`Clone::clone`] that is object safe.
fn dyn_cloned(&self) -> Box<dyn ReadBlockchain>;

Expand Down Expand Up @@ -143,6 +144,9 @@ pub trait ReadBlockchain: Send + Sync {

/// Retrieve all logs matching given filter. Does not return matching logs from pre-fork blocks.
async fn get_filter_logs(&self, log_filter: &LogFilter) -> Vec<api::Log>;

/// Retrieve batch header by its number.
async fn get_batch_header(&self, batch_number: L1BatchNumber) -> Option<L1BatchHeader>;
}

impl Clone for Box<dyn ReadBlockchain> {
Expand All @@ -151,7 +155,7 @@ impl Clone for Box<dyn ReadBlockchain> {
}
}

#[derive(Clone)]
#[derive(Debug, Clone)]
pub(super) struct Blockchain {
inner: Arc<RwLock<BlockchainState>>,
}
Expand Down Expand Up @@ -468,6 +472,11 @@ impl ReadBlockchain for Blockchain {
})
.await
}

async fn get_batch_header(&self, batch_number: L1BatchNumber) -> Option<L1BatchHeader> {
let storage = self.inner.read().await;
storage.batches.get(&batch_number).cloned()
}
}

impl Blockchain {
Expand All @@ -487,11 +496,12 @@ impl Blockchain {
fork_details.api_block.clone(),
)]),
hashes: HashMap::from_iter([(fork_details.block_number, fork_details.block_hash)]),
// Batches are not being used when running in forking mode
batches: HashMap::from_iter([]),
Comment on lines +499 to +500
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why would batches not be used during forking?

}
} else {
let block_hash = compute_hash(0, []);
let genesis_block: api::Block<api::TransactionVariant> = if let Some(genesis) = genesis
{
let (genesis_block, genesis_batch_header) = if let Some(genesis) = genesis {
create_genesis_from_json(genesis, genesis_timestamp)
} else {
create_genesis(genesis_timestamp)
Expand All @@ -504,6 +514,7 @@ impl Blockchain {
tx_results: Default::default(),
blocks: HashMap::from_iter([(block_hash, genesis_block)]),
hashes: HashMap::from_iter([(L2BlockNumber(0), block_hash)]),
batches: HashMap::from_iter([(L1BatchNumber(0), genesis_batch_header)]),
}
};
let inner = Arc::new(RwLock::new(state));
Expand All @@ -522,7 +533,7 @@ impl Blockchain {
}

/// Stores the blockchain data (blocks, transactions)
#[derive(Clone)]
#[derive(Debug, Clone)]
pub(super) struct BlockchainState {
/// The latest batch number that was already generated.
/// Next block will go to the batch `current_batch + 1`.
Expand All @@ -538,6 +549,10 @@ pub(super) struct BlockchainState {
pub(super) blocks: HashMap<H256, api::Block<api::TransactionVariant>>,
/// Map from block number to a block hash.
pub(super) hashes: HashMap<L2BlockNumber, H256>,
/// Map from batch number to batch header. Hash is not used as the key because it is not
/// necessarily computed by the time this entry is inserted (i.e. it is not an inherent property
/// of a batch).
pub(super) batches: HashMap<L1BatchNumber, L1BatchHeader>,
}

impl BlockchainState {
Expand Down Expand Up @@ -620,6 +635,26 @@ impl BlockchainState {
self.blocks.insert(block.hash, block);
}

pub(super) fn apply_batch(&mut self, tx_results: impl IntoIterator<Item = TransactionResult>) {
self.current_batch += 1;
// Our version of contracts has system log and data availability verification disabled.
// Hence, we are free to create a zeroed out dummy header without any logs whatsoever.
let header = L1BatchHeader::new(
self.current_batch,
0,
// We could use contract hashes that were actually used, but they might differ from what
// L1 expects due to impersonation
Comment on lines +640 to +646
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If a user checkouts different system contracts that has system log and data availability verification enabled will that be a problem? Just thinking about our own protocol devs usage.

BaseSystemContractsHashes::default(),
ProtocolVersionId::latest(),
);
self.batches.insert(self.current_batch, header);
self.tx_results.extend(
tx_results
.into_iter()
.map(|r| (r.receipt.transaction_hash, r)),
);
}

pub(super) fn load_blocks(
&mut self,
time: &mut Time,
Expand Down
11 changes: 4 additions & 7 deletions crates/core/src/node/inner/in_memory_inner.rs
Original file line number Diff line number Diff line change
Expand Up @@ -230,12 +230,7 @@ impl InMemoryNodeInner {
}

let mut storage = self.blockchain.write().await;
storage.current_batch += 1;
storage.tx_results.extend(
tx_results
.into_iter()
.map(|r| (r.receipt.transaction_hash, r)),
);
storage.apply_batch(tx_results);
for (index, block) in blocks.into_iter().enumerate() {
// archive current state before we produce new batch/blocks
archive_state(
Expand Down Expand Up @@ -1697,10 +1692,12 @@ mod tests {

#[tokio::test]
async fn test_create_genesis_creates_block_with_hash_and_zero_parent_hash() {
let first_block = create_genesis::<TransactionVariant>(Some(1000));
let (first_block, first_batch) = create_genesis::<TransactionVariant>(Some(1000));

assert_eq!(first_block.hash, compute_hash(0, []));
assert_eq!(first_block.parent_hash, H256::zero());

assert_eq!(first_batch.number, L1BatchNumber(0));
}

#[tokio::test]
Expand Down
4 changes: 3 additions & 1 deletion crates/l1_sidecar/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,8 @@ keywords.workspace = true
categories.workspace = true

[dependencies]
anvil_zksync_config.workspace = true
anvil_zksync_core.workspace = true
anvil_zksync_types.workspace = true

zksync_contracts.workspace = true
zksync_types.workspace = true
Expand All @@ -28,3 +29,4 @@ tokio.workspace = true
tracing.workspace = true

[dev-dependencies]
async-trait.workspace = true
26 changes: 16 additions & 10 deletions crates/l1_sidecar/src/anvil.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
use crate::zkstack_config::ZkstackConfig;
use alloy::network::EthereumWallet;
use alloy::providers::{Provider, ProviderBuilder};
use anyhow::Context;
use foundry_anvil::{NodeConfig, NodeHandle};
use foundry_common::Shell;
use std::time::Duration;
Expand Down Expand Up @@ -92,18 +93,23 @@ async fn setup_provider(port: u16, config: &ZkstackConfig) -> anyhow::Result<Box
.await?;

// Wait for anvil to be up
loop {
match provider.get_accounts().await {
Ok(_) => {
break;
tokio::time::timeout(Duration::from_secs(60), async {
loop {
match provider.get_accounts().await {
Ok(_) => {
return anyhow::Ok(());
}
Err(err) if err.is_transport_error() => {
tracing::debug!(?err, "L1 Anvil is not up yet; sleeping");
tokio::time::sleep(Duration::from_millis(100)).await;
}
Err(err) => return Err(err.into()),
}
Err(err) if err.is_transport_error() => {
tracing::debug!(?err, "L1 Anvil is not up yet; sleeping");
tokio::time::sleep(Duration::from_millis(100)).await;
}
Err(err) => return Err(err.into()),
}
}
})
.await
.context("L1 anvil failed to start")?
.context("unexpected response from L1 anvil")?;

Ok(Box::new(provider))
}
Loading