Skip to content

Commit

Permalink
fix(commiment-generator): compatibility with older protocol versions (#…
Browse files Browse the repository at this point in the history
…2963)

## What ❔

- fixes compatibility with older protocol versions
- refactor code

## Why ❔

<!-- Why are these changes done? What goal do they contribute to? What
are the principles behind them? -->
<!-- Example: PR templates ensure PR reviewers, observers, and future
iterators are in context about the evolution of repos. -->

## Checklist

<!-- Check your PR fulfills the following items. -->
<!-- For draft PRs check the boxes as you complete them. -->

- [ ] PR title corresponds to the body of PR (we generate changelog
entries from PRs).
- [ ] Tests for the changes have been added / updated.
- [ ] Documentation comments have been added / updated.
- [ ] Code has been formatted via `zk fmt` and `zk lint`.
  • Loading branch information
perekopskiy committed Sep 27, 2024
1 parent cf814d1 commit 0c9b1e3
Show file tree
Hide file tree
Showing 3 changed files with 129 additions and 107 deletions.
52 changes: 34 additions & 18 deletions core/lib/types/src/commitment/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,18 @@ use zksync_contracts::BaseSystemContractsHashes;
use zksync_crypto_primitives::hasher::{keccak::KeccakHasher, Hasher};
use zksync_mini_merkle_tree::MiniMerkleTree;
use zksync_system_constants::{
KNOWN_CODES_STORAGE_ADDRESS, L2_TO_L1_LOGS_TREE_ROOT_KEY, ZKPORTER_IS_AVAILABLE,
KNOWN_CODES_STORAGE_ADDRESS, L2_TO_L1_LOGS_TREE_ROOT_KEY, STATE_DIFF_HASH_KEY_PRE_GATEWAY,
ZKPORTER_IS_AVAILABLE,
};
use zksync_utils::u256_to_h256;

use crate::{
blob::num_blobs_required,
block::{L1BatchHeader, L1BatchTreeData},
l2_to_l1_log::{l2_to_l1_logs_tree_size, L2ToL1Log, SystemL2ToL1Log, UserL2ToL1Log},
l2_to_l1_log::{
l2_to_l1_logs_tree_size, parse_system_logs_for_blob_hashes, L2ToL1Log, SystemL2ToL1Log,
UserL2ToL1Log,
},
web3::keccak256,
writes::{
compress_state_diffs, InitialStorageWrite, RepeatedStorageWrite, StateDiffRecord,
Expand Down Expand Up @@ -383,7 +387,11 @@ impl L1BatchAuxiliaryOutput {
)
.merkle_root();

let l2_l1_logs_merkle_root = KeccakHasher.compress(&local_root, &aggregated_root);
let l2_l1_logs_merkle_root = if common_input.protocol_version.is_pre_gateway() {
local_root
} else {
KeccakHasher.compress(&local_root, &aggregated_root)
};

let common_output = L1BatchAuxiliaryCommonOutput {
l2_l1_logs_merkle_root,
Expand All @@ -399,18 +407,28 @@ impl L1BatchAuxiliaryOutput {

// Sanity checks. System logs are empty for the genesis batch, so we can't do checks for it.
if !system_logs.is_empty() {
// FIXME: maybe track for older versions
// let state_diff_hash_from_logs = system_logs
// .iter()
// .find_map(|log| {
// (log.0.key == u256_to_h256(STATE_DIFF_HASH_KEY.into()))
// .then_some(log.0.value)
// })
// .expect("Failed to find state diff hash in system logs");
// assert_eq!(
// state_diffs_hash, state_diff_hash_from_logs,
// "State diff hash mismatch"
// );
if common_input.protocol_version.is_pre_gateway() {
let state_diff_hash_from_logs = system_logs
.iter()
.find_map(|log| {
(log.0.key == u256_to_h256(STATE_DIFF_HASH_KEY_PRE_GATEWAY.into()))
.then_some(log.0.value)
})
.expect("Failed to find state diff hash in system logs");
assert_eq!(
state_diffs_hash, state_diff_hash_from_logs,
"State diff hash mismatch"
);

let blob_linear_hashes_from_logs = parse_system_logs_for_blob_hashes(
&common_input.protocol_version,
&system_logs,
);
assert_eq!(
blob_linear_hashes, blob_linear_hashes_from_logs,
"State diff hash mismatch"
);
}

let l2_to_l1_logs_tree_root_from_logs = system_logs
.iter()
Expand Down Expand Up @@ -448,8 +466,7 @@ impl L1BatchAuxiliaryOutput {

pub fn get_local_root(&self) -> H256 {
match self {
// FIXME for pre boojum this is incorrect
Self::PreBoojum { .. } => H256::zero(),
Self::PreBoojum { common, .. } => common.l2_l1_logs_merkle_root,
Self::PostBoojum { local_root, .. } => *local_root,
}
}
Expand Down Expand Up @@ -737,7 +754,6 @@ pub enum CommitmentInput {
state_diffs: Vec<StateDiffRecord>,
aux_commitments: AuxCommitments,
blob_commitments: Vec<H256>,
// FIXME: figure out whether it will work for the old server
blob_linear_hashes: Vec<H256>,
aggregated_root: H256,
},
Expand Down
99 changes: 12 additions & 87 deletions core/node/commitment_generator/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,25 +5,25 @@ use itertools::Itertools;
use tokio::{sync::watch, task::JoinHandle};
use zksync_dal::{ConnectionPool, Core, CoreDal};
use zksync_health_check::{Health, HealthStatus, HealthUpdater, ReactiveHealthCheck};
use zksync_l1_contract_interface::i_executor::commit::kzg::{
pubdata_to_blob_commitments, ZK_SYNC_BYTES_PER_BLOB,
};
use zksync_l1_contract_interface::i_executor::commit::kzg::pubdata_to_blob_commitments;
use zksync_multivm::zk_evm_latest::ethereum_types::U256;
use zksync_types::{
blob::num_blobs_required,
commitment::{
AuxCommitments, CommitmentCommonInput, CommitmentInput, L1BatchAuxiliaryOutput,
L1BatchCommitment, L1BatchCommitmentArtifacts, L1BatchCommitmentMode,
},
web3::keccak256,
writes::{InitialStorageWrite, RepeatedStorageWrite, StateDiffRecord},
AccountTreeId, L1BatchNumber, ProtocolVersionId, StorageKey, H256, L2_MESSAGE_ROOT_ADDRESS,
L1BatchNumber, ProtocolVersionId, StorageKey, H256,
};
use zksync_utils::{h256_to_u256, u256_to_h256};
use zksync_utils::h256_to_u256;

use crate::{
metrics::{CommitmentStage, METRICS},
utils::{convert_vm_events_to_log_queries, CommitmentComputer, RealCommitmentComputer},
utils::{
convert_vm_events_to_log_queries, pubdata_to_blob_linear_hashes, read_aggregation_root,
CommitmentComputer, RealCommitmentComputer,
},
};

mod metrics;
Expand Down Expand Up @@ -196,6 +196,7 @@ impl CommitmentGenerator {
.storage_logs_dal()
.get_l1_batches_and_indices_for_initial_writes(&touched_hashed_keys)
.await?;
drop(connection);

let mut input = if protocol_version.is_pre_boojum() {
let mut initial_writes = Vec::new();
Expand Down Expand Up @@ -287,65 +288,11 @@ impl CommitmentGenerator {
)
};

let right_block = connection
.blocks_dal()
.get_l2_block_range_of_l1_batch(l1_batch_number)
.await?
.expect("No range for batch")
.1;

let message_root_addr = L2_MESSAGE_ROOT_ADDRESS;

println!("message_root_addr = {:#?}", message_root_addr);

const FULL_TREE_SLOT: usize = 3;
const NODES_SLOT: usize = 5;

let agg_tree_height_slot = StorageKey::new(
AccountTreeId::new(message_root_addr),
u256_to_h256(FULL_TREE_SLOT.into()),
);

let agg_tree_height = connection
.storage_web3_dal()
.get_historical_value_unchecked(agg_tree_height_slot.hashed_key(), right_block)
let mut connection = self
.connection_pool
.connection_tagged("commitment_generator")
.await?;
let agg_tree_height = h256_to_u256(agg_tree_height);

println!("Agg tree height: {}", agg_tree_height);

let nodes_slot_position_enoded = u256_to_h256(U256::from(NODES_SLOT));

println!(
"nodes_slot_position_enoded: {:#?}",
nodes_slot_position_enoded
);
let nodes_slot_position_enoded = H256(keccak256(&nodes_slot_position_enoded.0));
println!(
"nodes_slot_position_enoded2: {:#?}",
nodes_slot_position_enoded
);

let nodes_slot_position_enoded =
u256_to_h256(h256_to_u256(nodes_slot_position_enoded) + agg_tree_height);
println!(
"nodes_slot_position_enoded3: {:#?}",
nodes_slot_position_enoded
);

let root_slot_offset = H256(keccak256(&nodes_slot_position_enoded.0));
println!("root_slot_offset: {:#?}", nodes_slot_position_enoded);

let root_slot =
StorageKey::new(AccountTreeId::new(message_root_addr), root_slot_offset);
let aggregated_root = connection
.storage_web3_dal()
.get_historical_value_unchecked(root_slot.hashed_key(), right_block)
.await?;

println!("aggregated_root: {:#?}", aggregated_root);

// let root_slot_offset = H256(keccak256(&nodes_slot_position_enoded.0));
let aggregated_root = read_aggregation_root(&mut connection, l1_batch_number).await?;

CommitmentInput::PostBoojum {
common,
Expand Down Expand Up @@ -433,7 +380,6 @@ impl CommitmentGenerator {
(L1BatchCommitmentMode::Rollup, _) => {
// Do nothing
}

(
L1BatchCommitmentMode::Validium,
CommitmentInput::PostBoojum {
Expand Down Expand Up @@ -534,24 +480,3 @@ impl CommitmentGenerator {
Ok(())
}
}

fn pubdata_to_blob_linear_hashes(blobs_required: usize, mut pubdata_input: Vec<u8>) -> Vec<H256> {
// Now, we need to calculate the linear hashes of the blobs.
// Firstly, let's pad the pubdata to the size of the blob.
if pubdata_input.len() % ZK_SYNC_BYTES_PER_BLOB != 0 {
let padding =
vec![0u8; ZK_SYNC_BYTES_PER_BLOB - pubdata_input.len() % ZK_SYNC_BYTES_PER_BLOB];
pubdata_input.extend(padding);
}

let mut result = vec![H256::zero(); blobs_required];

pubdata_input
.chunks(ZK_SYNC_BYTES_PER_BLOB)
.enumerate()
.for_each(|(i, chunk)| {
result[i] = H256(keccak256(chunk));
});

result
}
85 changes: 83 additions & 2 deletions core/node/commitment_generator/src/utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

use std::fmt;

use anyhow::Context;
use itertools::Itertools;
use zk_evm_1_3_3::{
aux_structures::Timestamp as Timestamp_1_3_3,
Expand All @@ -15,13 +16,17 @@ use zk_evm_1_5_0::{
aux_structures::Timestamp as Timestamp_1_5_0,
zk_evm_abstractions::queries::LogQuery as LogQuery_1_5_0,
};
use zksync_dal::{Connection, Core, CoreDal};
use zksync_l1_contract_interface::i_executor::commit::kzg::ZK_SYNC_BYTES_PER_BLOB;
use zksync_multivm::{interface::VmEvent, utils::get_used_bootloader_memory_bytes};
use zksync_types::{
vm::VmVersion,
web3::keccak256,
zk_evm_types::{LogQuery, Timestamp},
ProtocolVersionId, EVENT_WRITER_ADDRESS, H256, U256,
AccountTreeId, L1BatchNumber, ProtocolVersionId, StorageKey, EVENT_WRITER_ADDRESS, H256,
L2_MESSAGE_ROOT_ADDRESS, U256,
};
use zksync_utils::{address_to_u256, expand_memory_contents, h256_to_u256};
use zksync_utils::{address_to_u256, expand_memory_contents, h256_to_u256, u256_to_h256};

/// Encapsulates computations of commitment components.
///
Expand Down Expand Up @@ -236,3 +241,79 @@ pub(crate) fn convert_vm_events_to_log_queries(events: &[VmEvent]) -> Vec<LogQue
})
.collect()
}

pub(crate) fn pubdata_to_blob_linear_hashes(
blobs_required: usize,
mut pubdata_input: Vec<u8>,
) -> Vec<H256> {
// Now, we need to calculate the linear hashes of the blobs.
// Firstly, let's pad the pubdata to the size of the blob.
if pubdata_input.len() % ZK_SYNC_BYTES_PER_BLOB != 0 {
let padding =
vec![0u8; ZK_SYNC_BYTES_PER_BLOB - pubdata_input.len() % ZK_SYNC_BYTES_PER_BLOB];
pubdata_input.extend(padding);
}

let mut result = vec![H256::zero(); blobs_required];

pubdata_input
.chunks(ZK_SYNC_BYTES_PER_BLOB)
.enumerate()
.for_each(|(i, chunk)| {
result[i] = H256(keccak256(chunk));
});

result
}

pub(crate) async fn read_aggregation_root(
connection: &mut Connection<'_, Core>,
l1_batch_number: L1BatchNumber,
) -> anyhow::Result<H256> {
// Position of `FullTree::_height` in `MessageRoot`'s storage layout.
const AGG_TREE_HEIGHT_KEY: usize = 3;

// Position of `FullTree::nodes` in `MessageRoot`'s storage layout.
const AGG_TREE_NODES_KEY: usize = 5;

let (_, last_l2_block) = connection
.blocks_dal()
.get_l2_block_range_of_l1_batch(l1_batch_number)
.await?
.context("No range for batch")?;

let agg_tree_height_slot = StorageKey::new(
AccountTreeId::new(L2_MESSAGE_ROOT_ADDRESS),
u256_to_h256(AGG_TREE_HEIGHT_KEY.into()),
);

let agg_tree_height = connection
.storage_web3_dal()
.get_historical_value_unchecked(agg_tree_height_slot.hashed_key(), last_l2_block)
.await?;
let agg_tree_height = h256_to_u256(agg_tree_height);

// `nodes[height][0]`
let agg_tree_root_hash_key =
n_dim_array_key_in_layout(AGG_TREE_NODES_KEY, &[agg_tree_height, U256::zero()]);
let agg_tree_root_hash_slot = StorageKey::new(
AccountTreeId::new(L2_MESSAGE_ROOT_ADDRESS),
agg_tree_root_hash_key,
);

Ok(connection
.storage_web3_dal()
.get_historical_value_unchecked(agg_tree_root_hash_slot.hashed_key(), last_l2_block)
.await?)
}

fn n_dim_array_key_in_layout(array_key: usize, indices: &[U256]) -> H256 {
let mut key: H256 = u256_to_h256(array_key.into());

for index in indices {
key = H256(keccak256(key.as_bytes()));
key = u256_to_h256(h256_to_u256(key).overflowing_add(*index).0);
}

key
}

0 comments on commit 0c9b1e3

Please sign in to comment.