diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e1bdcd37f..f049b4a30 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -152,7 +152,7 @@ jobs: uses: actions/checkout@v3 - name: Run the script - run: ./scripts/prove_stdio.sh artifacts/witness_b19807080.json + run: ./scripts/prove_stdio.sh artifacts/witness_b19807080.json true simple_proof_witness_only: name: Execute bash script to generate the proof witness for a small block. @@ -163,7 +163,7 @@ jobs: uses: actions/checkout@v3 - name: Run the script - run: ./scripts/prove_stdio.sh artifacts/witness_b19807080.json test_only + run: ./scripts/prove_stdio.sh artifacts/witness_b19807080.json false test_only multi_blocks_proof_regular: name: Execute bash script to generate and verify a proof for multiple blocks using parallel proving. @@ -174,7 +174,7 @@ jobs: uses: actions/checkout@v3 - name: Run the script - run: ./scripts/prove_stdio.sh artifacts/witness_b3_b6.json + run: ./scripts/prove_stdio.sh artifacts/witness_b3_b6.json true lints: name: Rustdoc, Formatting and Clippy diff --git a/evm_arithmetization/src/fixed_recursive_verifier.rs b/evm_arithmetization/src/fixed_recursive_verifier.rs index 3c1f0b1e1..6b020dc06 100644 --- a/evm_arithmetization/src/fixed_recursive_verifier.rs +++ b/evm_arithmetization/src/fixed_recursive_verifier.rs @@ -108,6 +108,108 @@ where pub by_table: [RecursiveCircuitsForTable; NUM_TABLES], } +// TODO(Robin): This should be refactored in two distinct states, chain-specific +// (i.e. current `AllRecursiveCircuits` up to block circuit), and cross-chain +// specific (the 2-to-1 aggregation piece). +// cf: https://github.com/0xPolygonZero/zk_evm/issues/622 +/// While the prover state [`AllRecursiveCircuits`] can also verify proofs, this +/// [`AllVerifierData`] is much lighter, allowing anyone to verify block proofs, +/// wrapped block proofs and independently aggregated block proofs regardless of +/// the underlying hardware. +pub struct AllVerifierData +where + F: RichField + Extendable, + C: GenericConfig, + C::Hasher: AlgebraicHasher, +{ + /// Verifier data used to verify block proofs. + block_data: VerifierCircuitData, + /// Verifier data used to verify wrapped block proofs. + wrapped_block_data: VerifierCircuitData, + /// Verifier data used to verify aggregated block proofs. + aggregated_block_data: VerifierCircuitData, +} + +impl AllVerifierData +where + F: RichField + Extendable, + C: GenericConfig, + C::Hasher: AlgebraicHasher, +{ + /// Verifies a chain's block proof generated with the + /// [`AllRecursiveCircuits`] counterpart of this [`AllVerifierData`]. + /// + /// Block proofs can represent an arbitrarily long sequence of contiguous + /// blocks from a known checkpoint block height. + pub fn verify_block(&self, proof: ProofWithPublicInputs) -> anyhow::Result<()> { + // Verifier data verification + check_cyclic_proof_verifier_data( + &proof, + &self.block_data.verifier_only, + &self.block_data.common, + )?; + + // Proof verification + self.block_data.verify(proof) + } + + /// Verifies a chain's wrapped block proof generated with the + /// [`AllRecursiveCircuits`] counterpart of this [`AllVerifierData`]. + /// + /// Wrapped block proofs are the final version of block proofs, in that they + /// are more succinct and only exposes minimal public inputs related to + /// the chain and its latest state. + pub fn verify_block_wrapper( + &self, + proof: ProofWithPublicInputs, + ) -> anyhow::Result<()> { + // Proof verification + self.wrapped_block_data.verify(proof) + } + + /// Verifies an aggregated block proof generated with the + /// [`AllRecursiveCircuits`] counterpart of this [`AllVerifierData`]. + /// + /// Aggregated block proofs consist in an aggregation of arbitrarily many, + /// independent wrapped block proofs generated by arbitrarily many distinct + /// chains. + pub fn verify_block_aggreg(&self, proof: ProofWithPublicInputs) -> anyhow::Result<()> { + // Verifier data verification + check_cyclic_proof_verifier_data( + &proof, + &self.aggregated_block_data.verifier_only, + &self.aggregated_block_data.common, + )?; + + // Proof verification + self.aggregated_block_data.verify(proof) + } + + pub fn to_bytes(&self, gate_serializer: &dyn GateSerializer) -> IoResult> { + let mut buffer = Vec::new(); + buffer.write_verifier_circuit_data(&self.block_data, gate_serializer)?; + buffer.write_verifier_circuit_data(&self.wrapped_block_data, gate_serializer)?; + buffer.write_verifier_circuit_data(&self.aggregated_block_data, gate_serializer)?; + Ok(buffer) + } + + pub fn from_bytes( + bytes: Vec, + gate_serializer: &dyn GateSerializer, + ) -> IoResult { + let mut buffer = Buffer::new(&bytes); + let block_data = buffer.read_verifier_circuit_data(gate_serializer)?; + let wrapped_block_data = buffer.read_verifier_circuit_data(gate_serializer)?; + let aggregated_block_data = buffer.read_verifier_circuit_data(gate_serializer)?; + + Ok(Self { + block_data, + wrapped_block_data, + aggregated_block_data, + }) + } +} + /// Data for the EVM root circuit, which is used to combine each STARK's shrunk /// wrapper proof into a single proof. #[derive(Eq, PartialEq, Debug)] @@ -755,7 +857,8 @@ where } } - /// Outputs the `VerifierCircuitData` needed to verify any block proof + /// Outputs the `AllVerifierData` needed to verify any upper level proofs + /// (i.e. block, wrapped block, and block aggregation proofs) /// generated by an honest prover. /// While the [`AllRecursiveCircuits`] prover state can also verify proofs, /// verifiers only need a fraction of the state to verify proofs. This @@ -769,10 +872,14 @@ where /// let verifier_state = prover_state.final_verifier_data(); /// /// // Verify a provided block proof - /// assert!(verifier_state.verify(&block_proof).is_ok()); + /// assert!(verifier_state.verify_block(&block_proof).is_ok()); /// ``` - pub fn final_verifier_data(&self) -> VerifierCircuitData { - self.block.circuit.verifier_data() + pub fn final_verifier_data(&self) -> AllVerifierData { + AllVerifierData { + block_data: self.block.circuit.verifier_data(), + wrapped_block_data: self.block_wrapper.circuit.verifier_data(), + aggregated_block_data: self.two_to_one_block.circuit.verifier_data(), + } } fn create_segment_circuit( diff --git a/evm_arithmetization/src/lib.rs b/evm_arithmetization/src/lib.rs index 1a6a4a38e..5eae872c7 100644 --- a/evm_arithmetization/src/lib.rs +++ b/evm_arithmetization/src/lib.rs @@ -218,6 +218,7 @@ pub mod util; // Public definitions and re-exports mod public_types; +pub use proof::FinalPublicValues; pub use public_types::*; pub use starky::config::StarkConfig; diff --git a/evm_arithmetization/src/proof.rs b/evm_arithmetization/src/proof.rs index d4a04deff..39554d5c2 100644 --- a/evm_arithmetization/src/proof.rs +++ b/evm_arithmetization/src/proof.rs @@ -14,7 +14,7 @@ use starky::lookup::GrandProductChallengeSet; use starky::proof::{MultiProof, StarkProofChallenges}; use crate::all_stark::NUM_TABLES; -use crate::util::{get_h160, get_h256, get_u256, h256_limbs, h2u}; +use crate::util::{get_h160, get_h256, get_u256, h256_limbs, h2u, u256_to_u32}; use crate::witness::state::RegistersState; /// The default cap height used for our zkEVM STARK proofs. @@ -152,6 +152,18 @@ pub struct FinalPublicValues> { } impl> FinalPublicValues { + fn to_field_elements(&self) -> Vec { + let mut out = Vec::with_capacity(FinalPublicValuesTarget::SIZE); + + out.push(u256_to_u32(self.chain_id).expect("Chain ID should fit in a u32")); + out.extend(&h256_limbs(self.checkpoint_state_trie_root)); + out.extend(&h256_limbs(self.new_state_trie_root)); + out.extend(&self.checkpoint_consolidated_hash); + out.extend(&self.new_consolidated_hash); + + out + } + /// Extracts final public values from the given public inputs of a proof. /// Public values are always the first public inputs added to the circuit, /// so we can start extracting at index 0. @@ -293,6 +305,65 @@ impl FinalPublicValuesTarget { } } +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(bound = "")] +pub enum HashOrPV> { + /// Some `PublicValues` associated to a proof. + Val(FinalPublicValues), + + /// The hash of some `PublicValues`. + Hash(H::Hash), + + /// An arbitrary sequence of `HashorPV` values, useful for nested sequences. + Sequence(Vec>), +} + +impl> From> for HashOrPV { + fn from(values: FinalPublicValues) -> Self { + Self::Val(values) + } +} + +impl> HashOrPV { + pub fn hash(&self) -> H::Hash { + match self { + // Do nothing and just extract the underlying value + Self::Hash(h) => *h, + + // Flatten these public values into field elements and hash them + Self::Val(pvs) => H::hash_no_pad(&pvs.to_field_elements()), + + // Flatten this sequence first, and then hash and compress its + // public values using a foldleft approach. + Self::Sequence(seq) => { + if seq.is_empty() { + panic!("Sequence should not be empty"); + } + + if seq.len() == 1 { + return seq[0].hash(); + } + + let mut seq_hash = seq[0].hash(); + + for item in seq.iter().skip(1) { + let next_hash = match item { + HashOrPV::Val(pvs) => H::hash_no_pad(&pvs.to_field_elements()), + HashOrPV::Hash(h) => *h, + HashOrPV::Sequence(sub_seq) => { + Self::hash(&HashOrPV::Sequence(sub_seq.to_vec())) + } + }; + + seq_hash = H::two_to_one(seq_hash, next_hash); + } + + seq_hash + } + } + } +} + /// Trie hashes. #[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)] pub struct TrieRoots { @@ -320,17 +391,6 @@ impl TrieRoots { } } -// There should be 256 previous hashes stored, so the default should also -// contain 256 values. -impl Default for BlockHashes { - fn default() -> Self { - Self { - prev_hashes: vec![H256::default(); 256], - cur_hash: H256::default(), - } - } -} - /// User-provided helper values to compute the `BLOCKHASH` opcode. /// The proofs across consecutive blocks ensure that these values /// are consistent (i.e. shifted by one to the left). @@ -347,6 +407,17 @@ pub struct BlockHashes { pub cur_hash: H256, } +/// There should be 256 previous hashes stored, so the default should also +/// contain 256 values. +impl Default for BlockHashes { + fn default() -> Self { + Self { + prev_hashes: vec![H256::default(); 256], + cur_hash: H256::default(), + } + } +} + impl BlockHashes { pub fn from_public_inputs(pis: &[F]) -> Self { assert!(pis.len() == BlockHashesTarget::SIZE); diff --git a/evm_arithmetization/src/public_types.rs b/evm_arithmetization/src/public_types.rs index 0b917317d..492c15238 100644 --- a/evm_arithmetization/src/public_types.rs +++ b/evm_arithmetization/src/public_types.rs @@ -5,6 +5,7 @@ use crate::{generation::segments::SegmentError, GenerationSegmentData}; pub type Node = mpt_trie::partial_trie::Node; pub type BlockHeight = u64; +pub type ChainID = u64; use plonky2::{ field::goldilocks_field::GoldilocksField, hash::poseidon::PoseidonHash, @@ -31,6 +32,8 @@ pub type Hash = >::Hash; pub type ConsolidatedHash = [Field; NUM_HASH_OUT_ELTS]; pub use crate::proof::EMPTY_CONSOLIDATED_BLOCKHASH; +pub type HashOrPV = crate::proof::HashOrPV; + /// A type alias for recursive proofs generated by the zkEVM. pub type ProofWithPublicInputs = plonky2::plonk::proof::ProofWithPublicInputs; @@ -64,7 +67,7 @@ pub type RecursiveCircuitsForTableSize = /// A type alias for the verifier data necessary to verify succinct block /// proofs. /// While the prover state [`AllRecursiveCircuits`] can also verify proofs, this -/// [`VerifierData`] is much lighter, allowing anyone to verify block proofs, +/// [`VerifierData`] is much lighter, allowing anyone to verify final proofs, /// regardless of the underlying hardware. pub type VerifierData = - plonky2::plonk::circuit_data::VerifierCircuitData; + crate::fixed_recursive_verifier::AllVerifierData; diff --git a/scripts/prove_rpc.sh b/scripts/prove_rpc.sh index 736b9a943..dcfe6cab7 100755 --- a/scripts/prove_rpc.sh +++ b/scripts/prove_rpc.sh @@ -47,6 +47,7 @@ NODE_RPC_TYPE=$4 IGNORE_PREVIOUS_PROOFS=$5 BACKOFF=${6:-0} RETRIES=${7:-0} +TEST_ONLY=$8 # Sometimes we need to override file logging, e.g. in the CI run OUTPUT_TO_TERMINAL="${OUTPUT_TO_TERMINAL:-false}" @@ -106,7 +107,7 @@ fi # If we set test_only flag, we'll generate a dummy # proof. This is useful for quickly testing decoding and all of the # other non-proving code. -if [[ $8 == "test_only" ]]; then +if [[ $TEST_ONLY == "test_only" ]]; then # test only run echo "Proving blocks ${BLOCK_INTERVAL} in a test_only mode now... (Total: ${TOT_BLOCKS})" command='cargo r --release --package zero --bin leader -- --test-only --runtime in-memory --load-strategy on-demand --proof-output-dir $PROOF_OUTPUT_DIR --block-batch-size $BLOCK_BATCH_SIZE rpc --rpc-type "$NODE_RPC_TYPE" --rpc-url "$NODE_RPC_URL" --block-interval $BLOCK_INTERVAL $PREV_PROOF_EXTRA_ARG --backoff "$BACKOFF" --max-retries "$RETRIES" ' @@ -160,7 +161,7 @@ if [ "$RUN_VERIFICATION" = true ]; then proof_file_name=$PROOF_OUTPUT_DIR/b$END_BLOCK.zkproof echo "Verifying the proof of the latest block in the interval:" $proof_file_name - cargo r --release --package zero --bin verifier -- -f $proof_file_name > $PROOF_OUTPUT_DIR/verify.out 2>&1 + cargo r --release --package zero --bin verifier -- -f $proof_file_name block > $PROOF_OUTPUT_DIR/verify.out 2>&1 if grep -q 'All proofs verified successfully!' $PROOF_OUTPUT_DIR/verify.out; then echo "$proof_file_name verified successfully!"; diff --git a/scripts/prove_stdio.sh b/scripts/prove_stdio.sh index 7249dce92..2aeb7a5d9 100755 --- a/scripts/prove_stdio.sh +++ b/scripts/prove_stdio.sh @@ -8,7 +8,8 @@ set -exo pipefail # Args: # 1 --> Input witness json file -# 2 --> Test run only flag `test_only` (optional) +# 2 --> Wrapping flag for the final block proof (boolean) +# 3 --> Test run only flag `test_only` (optional) # We're going to set the parallelism in line with the total cpu count if [[ "$OSTYPE" == "darwin"* ]]; then @@ -40,7 +41,8 @@ export RUST_LOG=info export RUSTFLAGS='-C target-cpu=native -Zlinker-features=-lld' INPUT_FILE=$1 -TEST_ONLY=$2 +WRAP_PROOF=$2 +TEST_ONLY=$3 if [[ $INPUT_FILE == "" ]]; then echo "Please provide witness json input file, e.g. artifacts/witness_b19240705.json" @@ -125,10 +127,30 @@ cat $PROOFS_FILE_LIST | while read proof_file; do echo "Verifying proof file $proof_file" verify_file=$PROOF_OUTPUT_DIR/verify_$(basename $proof_file).out - "${REPO_ROOT}/target/release/verifier" -f $proof_file | tee $verify_file + "${REPO_ROOT}/target/release/verifier" -f $proof_file block | tee $verify_file if grep -q 'All proofs verified successfully!' $verify_file; then - echo "Proof verification for file $proof_file successful"; - rm $verify_file # we keep the generated proof for potential reuse + echo "Proof verification for file $proof_file successful"; + rm $verify_file # we keep the generated proof for potential reuse + + if $WRAP_PROOF ; then + "${REPO_ROOT}/target/release/aggregator" --runtime in-memory --load-strategy on-demand --wrap stdio < $proof_file &> $OUTPUT_LOG + cat $OUTPUT_LOG | grep "Successfully wrote to disk proof file " | awk '{print $NF}' | tee $PROOFS_FILE_LIST + if [ ! -s "$PROOFS_FILE_LIST" ]; then + echo "Proof list not generated, some error happened. For more details check the log file $OUTPUT_LOG" + exit 1 + fi + + cat $PROOFS_FILE_LIST | while read proof_file; + do + echo "Verifying wrapped proof file $proof_file" + verify_file=$PROOF_OUTPUT_DIR/verify_$(basename $proof_file).out + "${REPO_ROOT}/target/release/verifier" -f $proof_file wrapped-block | tee $verify_file + if grep -q 'All proofs verified successfully!' $verify_file; then + echo "Wrapper proof verification for file $proof_file successful"; + rm $verify_file # we keep the generated proof for potential reuse + fi + done + fi else echo "there was an issue with proof verification"; exit 1 diff --git a/zero/src/bin/aggregator.rs b/zero/src/bin/aggregator.rs new file mode 100644 index 000000000..29c6289d8 --- /dev/null +++ b/zero/src/bin/aggregator.rs @@ -0,0 +1,96 @@ +zk_evm_common::check_chain_features!(); + +use std::sync::Arc; + +use anyhow::Result; +use clap::Parser; +use cli::Command; +use paladin::directive::{Directive, IndexedStream}; +use paladin::runtime::Runtime; +use tracing::info; +use zero::env::load_dotenvy_vars_if_present; +use zero::ops::{register, BlockAggProof, WrappedBlockProof}; +use zero::proof_types::{ + AggregatableBlockProof, GeneratedAggBlockProof, GeneratedBlockProof, GeneratedWrappedBlockProof, +}; +use zero::prover::ProverConfig; +use zero::prover_state::persistence::set_circuit_cache_dir_env_if_not_set; + +use self::aggregator::*; +mod aggregator { + pub mod cli; + pub mod stdio; +} + +pub async fn wrap( + proof: GeneratedBlockProof, + runtime: Arc, + prover_config: Arc, +) -> Result { + let block_number = proof.b_height; + info!("Wrapping block proof at height {block_number}"); + + let block_proof = paladin::directive::Literal(proof) + .map(&WrappedBlockProof { + save_inputs_on_error: prover_config.save_inputs_on_error, + }) + .run(&runtime) + .await?; + + info!("Successfully wrapped proof for block {block_number}"); + Ok(block_proof.0) +} + +pub async fn aggregate( + proofs: Vec, + runtime: Arc, + prover_config: Arc, +) -> Result { + info!("Aggregating wrapped block proofs"); + + let agg_proof = IndexedStream::from(proofs) + .fold(&BlockAggProof { + save_inputs_on_error: prover_config.save_inputs_on_error, + }) + .run(&runtime) + .await?; + + info!("Successfully aggregated block proofs"); + + Ok(agg_proof.into()) +} + +#[tokio::main] +async fn main() -> Result<()> { + load_dotenvy_vars_if_present(); + set_circuit_cache_dir_env_if_not_set()?; + zero::tracing::init(); + + let args = cli::Cli::parse(); + + if let paladin::config::Runtime::InMemory = args.paladin.runtime { + args.prover_state_config + .into_prover_state_manager() + .initialize()?; + } + + let runtime = Arc::new(Runtime::from_config(&args.paladin, register()).await?); + let prover_config: ProverConfig = args.prover_config.into(); + if prover_config.block_pool_size == 0 { + panic!("block-pool-size must be greater than 0"); + } + + match args.command { + Command::Stdio {} => { + if args.wrap { + stdio::stdio_wrap(runtime, Arc::new(prover_config)).await? + } else { + stdio::stdio_aggregate(runtime, Arc::new(prover_config)).await? + } + } + Command::Rpc {} => todo!(), // TODO(Robin): Do we want to support RPC input source? + Command::Http {} => todo!(), // TODO(Robin): Do we want to support RPC input source? + } + + Ok(()) +} diff --git a/zero/src/bin/aggregator/cli.rs b/zero/src/bin/aggregator/cli.rs new file mode 100644 index 000000000..7e008b912 --- /dev/null +++ b/zero/src/bin/aggregator/cli.rs @@ -0,0 +1,36 @@ +use clap::{Parser, Subcommand}; +use zero::{prover::cli::CliProverConfig, prover_state::cli::CliProverStateConfig}; + +/// zero-bin leader config +#[derive(Parser)] +#[command(version = zero::version(), propagate_version = true)] +pub(crate) struct Cli { + #[command(subcommand)] + pub(crate) command: Command, + + /// Boolean indicating weather to wrap or aggregate block proofs. + /// Defaults to `false = aggregate`. + #[arg(short, long, default_value_t = false)] + pub(crate) wrap: bool, + + #[clap(flatten)] + pub(crate) paladin: paladin::config::Config, + + #[clap(flatten)] + pub(crate) prover_config: CliProverConfig, + + // Note this is only relevant for the aggregator when running in in-memory + // mode. + #[clap(flatten)] + pub(crate) prover_state_config: CliProverStateConfig, +} + +#[derive(Subcommand)] +pub(crate) enum Command { + /// Reads input from stdin and writes output to stdout. + Stdio, + /// Reads input from a node rpc and writes output to stdout. + Rpc, + /// Reads input from HTTP and writes output to a directory. + Http, +} diff --git a/zero/src/bin/aggregator/stdio.rs b/zero/src/bin/aggregator/stdio.rs new file mode 100644 index 000000000..b11d09604 --- /dev/null +++ b/zero/src/bin/aggregator/stdio.rs @@ -0,0 +1,111 @@ +use std::io::Read; +use std::sync::Arc; + +use anyhow::{ensure, Result}; +use paladin::runtime::Runtime; +use tracing::{error, info}; +use zero::{ + fs::write_proof_to_dir, + proof_types::{AggregatableBlockProof, GeneratedBlockProof}, + prover::ProverConfig, +}; + +// TODO(Robin): This should probably live in `leader` binary +/// Wrapping function for the stdio mode. +pub(crate) async fn stdio_wrap( + runtime: Arc, + prover_config: Arc, +) -> Result<()> { + let mut buffer = String::new(); + std::io::stdin().read_to_string(&mut buffer)?; + + let des = &mut serde_json::Deserializer::from_str(&buffer); + let mut block_proof = serde_path_to_error::deserialize::<_, Vec>(des)? + .into_iter() + .collect::>(); + + ensure!( + block_proof.len() == 1, + "Expected only one block proof to be wrapped", + ); + let block_proof = block_proof.pop().expect("valid block proof"); + let block_number = block_proof.b_height; + + let proving_task = tokio::spawn(crate::wrap( + block_proof, + runtime.clone(), + prover_config.clone(), + )); + + let proof = match proving_task.await { + Ok(Ok(proof)) => { + info!("Proving task successfully finished"); + AggregatableBlockProof::Block(proof) + } + Ok(Err(e)) => { + anyhow::bail!("Proving task finished with error: {e:?}"); + } + Err(e) => { + anyhow::bail!("Unable to join proving task, error: {e:?}"); + } + }; + + runtime.close().await?; + + write_proof_to_dir( + &prover_config.proof_output_dir, + proof, + Some("_wrapper".to_string()), + ) + .await + .inspect_err(|e| { + error!("failed to output wrapped proof for block {block_number} to directory {e:?}") + })?; + + Ok(()) +} + +/// Aggregation function for the stdio mode. +pub(crate) async fn stdio_aggregate( + runtime: Arc, + prover_config: Arc, +) -> Result<()> { + let mut buffer = String::new(); + std::io::stdin().read_to_string(&mut buffer)?; + + let des = &mut serde_json::Deserializer::from_str(&buffer); + let block_proofs = serde_path_to_error::deserialize::<_, Vec>(des)? + .into_iter() + .collect::>(); + + let proving_task = tokio::spawn(crate::aggregate( + block_proofs, + runtime.clone(), + prover_config.clone(), + )); + + let proof = match proving_task.await { + Ok(Ok(proof)) => { + info!("Proving task successfully finished"); + proof + } + Ok(Err(e)) => { + anyhow::bail!("Proving task finished with error: {e:?}"); + } + Err(e) => { + anyhow::bail!("Unable to join proving task, error: {e:?}"); + } + }; + + runtime.close().await?; + + write_proof_to_dir( + &prover_config.proof_output_dir, + proof, + Some("block_aggreg".to_string()), + ) + .await + .inspect_err(|e| error!("failed to output aggregated block proof to directory {e:?}"))?; + + Ok(()) +} diff --git a/zero/src/bin/verifier.rs b/zero/src/bin/verifier.rs index d306eed10..2ebdbc1f2 100644 --- a/zero/src/bin/verifier.rs +++ b/zero/src/bin/verifier.rs @@ -2,12 +2,13 @@ zk_evm_common::check_chain_features!(); use std::fs::File; -use anyhow::Result; +use anyhow::{Context, Result}; use clap::Parser; use dotenvy::dotenv; +use evm_arithmetization::fixed_recursive_verifier::extract_two_to_one_block_hash; use serde_json::Deserializer; -use tracing::info; -use zero::proof_types::GeneratedBlockProof; +use tracing::{error, info, warn}; +use zero::proof_types::{AggregatableBlockProof, GeneratedAggBlockProof, GeneratedBlockProof}; use zero::prover_state::persistence::set_circuit_cache_dir_env_if_not_set; use self::verifier::*; @@ -23,24 +24,66 @@ fn main() -> Result<()> { let args = cli::Cli::parse(); - let file = File::open(args.file_path)?; - let des = &mut Deserializer::from_reader(&file); - let input_proofs: Vec = serde_path_to_error::deserialize(des)?; - let verifier = args .prover_state_config .into_prover_state_manager() .verifier()?; - if input_proofs.into_iter().all(|block_proof| { - verifier - .verify(&block_proof.intern) - .map_err(|e| { - info!("Proof verification failed with error: {:?}", e); - }) - .is_ok() - }) { - info!("All proofs verified successfully!"); + let file = File::open(args.file_path)?; + let des = &mut Deserializer::from_reader(&file); + + match args.command { + cli::Command::Block => { + let input_proofs: Vec = serde_path_to_error::deserialize(des)?; + + if input_proofs.into_iter().all(|block_proof| { + verifier + .verify_block(&block_proof.intern) + .context("Failed to verify block proof") + .inspect_err(|e| error!("{e:?}")) + .is_ok() + }) { + info!("All proofs verified successfully!"); + }; + } + cli::Command::WrappedBlock => { + let input_proofs: Vec = serde_path_to_error::deserialize(des)?; + + if input_proofs.into_iter().all(|block_proof| { + verifier + .verify_block_wrapper(block_proof.intern()) + .context("Failed to verify wrapped block proof") + .inspect_err(|e| error!("{e:?}")) + .is_ok() + }) { + info!("All proofs verified successfully!"); + }; + } + cli::Command::AggBlock => { + let input_proofs: Vec = serde_path_to_error::deserialize(des)?; + + if input_proofs.into_iter().all(|wrapped_proof| { + // Assert consistency of the sequence of Public Values. + // + // This is not needed for proof verification, but allows to *trust* + // that the public info being attached in the clear are actually what + // was used internally when generating this proof, for external use. + let pis_match = if extract_two_to_one_block_hash(&wrapped_proof.intern.public_inputs) != &wrapped_proof.p_vals.hash().elements { + warn!("The sequence of Public Values attached to this proof does not match the public inputs hash."); + false + } else { + true + }; + + verifier + .verify_block_aggreg(&wrapped_proof.intern) + .context("Failed to verify aggregated block proof") + .inspect_err(|e| error!("{e:?}")) + .is_ok() && pis_match + }) { + info!("All proofs verified successfully!"); + }; + } }; Ok(()) diff --git a/zero/src/bin/verifier/cli.rs b/zero/src/bin/verifier/cli.rs index 8c046d85c..fa9b03223 100644 --- a/zero/src/bin/verifier/cli.rs +++ b/zero/src/bin/verifier/cli.rs @@ -1,11 +1,14 @@ use std::path::PathBuf; -use clap::{Parser, ValueHint}; +use clap::{Parser, Subcommand, ValueHint}; use zero::prover_state::cli::CliProverStateConfig; #[derive(Parser)] #[command(version = zero::version(), propagate_version = true)] pub(crate) struct Cli { + #[command(subcommand)] + pub(crate) command: Command, + /// The file containing the proof to verify #[arg(short, long, value_hint = ValueHint::FilePath)] pub(crate) file_path: PathBuf, @@ -14,3 +17,13 @@ pub(crate) struct Cli { #[clap(flatten)] pub(crate) prover_state_config: CliProverStateConfig, } + +#[derive(Subcommand)] +pub(crate) enum Command { + /// Verifies a provided block proof. + Block, + /// Verifies a provided wrapped block proof. + WrappedBlock, + /// Verifies a provided aggregated block proof. + AggBlock, +} diff --git a/zero/src/fs.rs b/zero/src/fs.rs index c53a676d0..85b4c55d7 100644 --- a/zero/src/fs.rs +++ b/zero/src/fs.rs @@ -1,13 +1,26 @@ use std::fs::File; -use std::path::PathBuf; +use std::path::{Path, PathBuf}; -use anyhow::anyhow; +use anyhow::{anyhow, Context}; +use tokio::io::AsyncWriteExt; -use crate::proof_types::GeneratedBlockProof; +use crate::proof_types::{GeneratedBlockProof, WritableProof}; -pub fn generate_block_proof_file_name(directory: &Option<&str>, block_height: u64) -> PathBuf { +pub fn generate_block_proof_file_name( + directory: &Option<&str>, + block_height: Option, + extra_info: &Option, +) -> PathBuf { let mut path = PathBuf::from(directory.unwrap_or("")); - path.push(format!("b{}.zkproof", block_height)); + let mut filename = if let Some(height) = block_height { + format!("b{}", height) + } else { + "".to_string() + }; + if let Some(info) = extra_info { + filename = filename + info; + } + path.push(filename + ".zkproof"); path } @@ -27,3 +40,37 @@ pub fn get_previous_proof(path: Option) -> anyhow::Result( + output_dir: &Path, + proof: P, + extra_info: Option, +) -> anyhow::Result<()> { + // Check if output directory exists, and create one if it doesn't. + if !output_dir.exists() { + tracing::info!("Created output directory {:?}", output_dir.display()); + std::fs::create_dir(output_dir)?; + } + + let block_proof_file_path = + generate_block_proof_file_name(&output_dir.to_str(), proof.block_height(), &extra_info); + + // Serialize as a single element array to match the expected format. + let proof_serialized = serde_json::to_vec(&vec![proof])?; + + if let Some(parent) = block_proof_file_path.parent() { + tokio::fs::create_dir_all(parent).await?; + } + + let mut f = tokio::fs::File::create(block_proof_file_path.clone()).await?; + f.write_all(&proof_serialized) + .await + .context("Failed to write proof to disk")?; + + tracing::info!( + "Successfully wrote to disk proof file {}", + block_proof_file_path.display() + ); + Ok(()) +} diff --git a/zero/src/ops.rs b/zero/src/ops.rs index cc180c7e5..be10a2e09 100644 --- a/zero/src/ops.rs +++ b/zero/src/ops.rs @@ -4,8 +4,9 @@ use std::time::Instant; use anyhow::anyhow; use evm_arithmetization::fixed_recursive_verifier::ProverOutputData; +use evm_arithmetization::proof::FinalPublicValues; use evm_arithmetization::{prover::testing::simulate_execution_all_segments, GenerationInputs}; -use evm_arithmetization::{Field, PublicValues, TrimmedGenerationInputs}; +use evm_arithmetization::{Field, HashOrPV, PublicValues, TrimmedGenerationInputs}; use paladin::{ operation::{FatalError, FatalStrategy, Monoid, Operation, Result}, registry, RemoteExecute, @@ -16,7 +17,8 @@ use tracing::{event, info_span, Level}; use crate::debug_utils::save_tries_to_disk; use crate::proof_types::{ - BatchAggregatableProof, GeneratedBlockProof, GeneratedSegmentAggProof, GeneratedTxnAggProof, + AggregatableBlockProof, BatchAggregatableProof, GeneratedAggBlockProof, GeneratedBatchAggProof, + GeneratedBlockProof, GeneratedSegmentAggProof, GeneratedWrappedBlockProof, SegmentAggregatableProof, }; use crate::prover_state::ProverState; @@ -302,6 +304,7 @@ impl Monoid for SegmentAggProof { pub struct BatchAggProof { pub save_inputs_on_error: bool, } + fn get_agg_proof_public_values(elem: BatchAggregatableProof) -> PublicValues { match elem { BatchAggregatableProof::Segment(info) => info.p_vals, @@ -370,7 +373,7 @@ impl Monoid for BatchAggProof { FatalError::from_str(&e.to_string(), FatalStrategy::Terminate) })?; - Ok(GeneratedTxnAggProof { + Ok(GeneratedBatchAggProof { p_vals, intern: proof, } @@ -390,7 +393,7 @@ pub struct BlockProof { } impl Operation for BlockProof { - type Input = GeneratedTxnAggProof; + type Input = GeneratedBatchAggProof; type Output = GeneratedBlockProof; fn execute(&self, input: Self::Input) -> Result { @@ -402,13 +405,9 @@ impl Operation for BlockProof { .prove_block(parent_intern, &input.intern, input.p_vals.clone()) .map_err(|e| { if self.save_inputs_on_error { - if let Err(write_err) = save_inputs_to_disk( - format!( - "b{}_block_input.json", - input.p_vals.block_metadata.block_number - ), - input.p_vals, - ) { + if let Err(write_err) = + save_inputs_to_disk(format!("b{}_block_input.json", b_height), input.p_vals) + { error!("Failed to save block proof input to disk: {:?}", write_err); } } @@ -422,3 +421,104 @@ impl Operation for BlockProof { }) } } + +#[derive(Deserialize, Serialize, RemoteExecute)] +pub struct WrappedBlockProof { + pub save_inputs_on_error: bool, +} + +impl Operation for WrappedBlockProof { + type Input = GeneratedBlockProof; + type Output = GeneratedWrappedBlockProof; + + fn execute(&self, input: Self::Input) -> Result { + let b_height = input.b_height; + let p_vals = PublicValues::from_public_inputs(&input.intern.public_inputs); + let chain_id = p_vals.block_metadata.block_chain_id.low_u64(); + + let (b_proof_intern, _) = p_state() + .state + .prove_block_wrapper(&input.intern, p_vals.clone()) + .map_err(|e| { + if self.save_inputs_on_error { + if let Err(write_err) = + save_inputs_to_disk(format!("b{}_block_input.json", b_height), p_vals) + { + error!( + "Failed to save wrapped block proof input to disk: {:?}", + write_err + ); + } + } + + FatalError::from_str(&e.to_string(), FatalStrategy::Terminate) + })?; + + Ok(GeneratedWrappedBlockProof { + b_height, + chain_id, + intern: b_proof_intern, + }) + } +} + +#[derive(Deserialize, Serialize, RemoteExecute)] +pub struct BlockAggProof { + pub save_inputs_on_error: bool, +} + +fn get_block_agg_proof_public_values(elem: AggregatableBlockProof) -> HashOrPV { + match elem { + AggregatableBlockProof::Block(info) => { + FinalPublicValues::from_public_inputs(&info.intern.public_inputs).into() + } + AggregatableBlockProof::Agg(info) => info.p_vals, + } +} + +impl Monoid for BlockAggProof { + type Elem = AggregatableBlockProof; + + fn combine(&self, a: Self::Elem, b: Self::Elem) -> Result { + let proof = p_state() + .state + .prove_two_to_one_block(a.intern(), a.is_agg(), b.intern(), b.is_agg()) + .map_err(|e| { + if self.save_inputs_on_error { + let pv = vec![ + get_block_agg_proof_public_values(a.clone()), + get_block_agg_proof_public_values(b.clone()), + ]; + + if let Err(write_err) = save_inputs_to_disk( + format!( + "block_agg_{:?}_{:?}_inputs.json", + pv[0].hash(), + pv[1].hash(), + ), + pv, + ) { + error!("Failed to save agg proof inputs to disk: {:?}", write_err); + } + } + + FatalError::from_str(&e.to_string(), FatalStrategy::Terminate) + })?; + + // Unhashed public values are 112 bytes long, i.e. small enough to be aggregated + // in the clear. This will allow the verifier to easily perform + // additional verification on the *claimed* chain data to be verified. + let p_vals = HashOrPV::Sequence(vec![a.public_values(), b.public_values()]); + + Ok(GeneratedAggBlockProof { + p_vals, + intern: proof, + } + .into()) + } + + fn empty(&self) -> Self::Elem { + // Expect that empty blocks are padded. + unimplemented!("empty agg proof") + } +} diff --git a/zero/src/proof_types.rs b/zero/src/proof_types.rs index 55ab492d0..c382cba9e 100644 --- a/zero/src/proof_types.rs +++ b/zero/src/proof_types.rs @@ -2,11 +2,9 @@ //! generation process. use evm_arithmetization::{ - fixed_recursive_verifier::{extract_block_final_public_values, extract_two_to_one_block_hash}, - BlockHeight, Hash, Hasher, ProofWithPublicInputs, PublicValues, + BlockHeight, ChainID, FinalPublicValues, HashOrPV, ProofWithPublicInputs, PublicValues, }; -use plonky2::plonk::config::Hasher as _; -use serde::{Deserialize, Serialize}; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; /// A transaction proof along with its public values, for proper connection with /// contiguous proofs. @@ -37,7 +35,7 @@ pub struct GeneratedSegmentAggProof { /// Transaction agregation proofs can represent any contiguous range of two or /// more transactions, up to an entire block. #[derive(Clone, Debug, Deserialize, Serialize)] -pub struct GeneratedTxnAggProof { +pub struct GeneratedBatchAggProof { /// Public values of this transaction aggregation proof. pub p_vals: PublicValues, /// Underlying plonky2 proof. @@ -54,13 +52,27 @@ pub struct GeneratedBlockProof { pub intern: ProofWithPublicInputs, } -/// An aggregation block proof along with its hashed public values, for proper -/// connection with other proofs. +/// A wrapped block proof along with the block height against which this proof +/// ensures the validity since the last proof checkpoint. +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct GeneratedWrappedBlockProof { + /// Associated block height. + pub b_height: BlockHeight, + /// Associated chain ID. + pub chain_id: ChainID, + /// Underlying plonky2 proof. + pub intern: ProofWithPublicInputs, +} + +/// An aggregation block proof along with its public values, for proper +/// verification by a third-party. /// /// Aggregation block proofs can represent any aggregation of independent /// blocks. #[derive(Clone, Debug, Deserialize, Serialize)] pub struct GeneratedAggBlockProof { + /// Public values of this aggregation proof. + pub p_vals: HashOrPV, /// Underlying plonky2 proof. pub intern: ProofWithPublicInputs, } @@ -87,7 +99,7 @@ pub enum BatchAggregatableProof { /// The underlying proof is a transaction proof. Txn(GeneratedSegmentAggProof), /// The underlying proof is an aggregation proof. - Agg(GeneratedTxnAggProof), + Agg(GeneratedBatchAggProof), } impl SegmentAggregatableProof { @@ -157,8 +169,8 @@ impl From for BatchAggregatableProof { } } -impl From for BatchAggregatableProof { - fn from(v: GeneratedTxnAggProof) -> Self { +impl From for BatchAggregatableProof { + fn from(v: GeneratedBatchAggProof) -> Self { Self::Agg(v) } } @@ -174,28 +186,22 @@ impl From for BatchAggregatableProof { #[derive(Clone, Debug, Deserialize, Serialize)] pub enum AggregatableBlockProof { - /// The underlying proof is a single block proof. - Block(GeneratedBlockProof), + /// The underlying proof is a single wrapped block proof. + Block(GeneratedWrappedBlockProof), /// The underlying proof is an aggregated proof. Agg(GeneratedAggBlockProof), } impl AggregatableBlockProof { - pub fn pv_hash(&self) -> Hash { + pub(crate) fn public_values(&self) -> HashOrPV { match self { - AggregatableBlockProof::Block(info) => { - let pv = extract_block_final_public_values(&info.intern.public_inputs); - Hasher::hash_no_pad(pv) - } - AggregatableBlockProof::Agg(info) => { - let hash = extract_two_to_one_block_hash(&info.intern.public_inputs); - Hash::from_partial(hash) - } + AggregatableBlockProof::Block(info) => HashOrPV::Val( + FinalPublicValues::from_public_inputs(&info.intern.public_inputs), + ), + AggregatableBlockProof::Agg(info) => info.p_vals.clone(), } } - // TODO(Robin): https://github.com/0xPolygonZero/zk_evm/issues/387 - #[allow(unused)] pub(crate) const fn is_agg(&self) -> bool { match self { AggregatableBlockProof::Block(_) => false, @@ -203,9 +209,7 @@ impl AggregatableBlockProof { } } - // TODO(Robin): https://github.com/0xPolygonZero/zk_evm/issues/387 - #[allow(unused)] - pub(crate) const fn intern(&self) -> &ProofWithPublicInputs { + pub const fn intern(&self) -> &ProofWithPublicInputs { match self { AggregatableBlockProof::Block(info) => &info.intern, AggregatableBlockProof::Agg(info) => &info.intern, @@ -213,14 +217,59 @@ impl AggregatableBlockProof { } } -impl From for AggregatableBlockProof { - fn from(v: GeneratedBlockProof) -> Self { +impl From for AggregatableBlockProof { + fn from(v: GeneratedWrappedBlockProof) -> Self { Self::Block(v) } } +impl From for GeneratedAggBlockProof { + fn from(v: AggregatableBlockProof) -> Self { + match v { + AggregatableBlockProof::Block(info) => GeneratedAggBlockProof { + p_vals: HashOrPV::Val(FinalPublicValues::from_public_inputs( + &info.intern.public_inputs, + )), + intern: info.intern, + }, + AggregatableBlockProof::Agg(info) => info, + } + } +} + impl From for AggregatableBlockProof { fn from(v: GeneratedAggBlockProof) -> Self { Self::Agg(v) } } + +pub trait WritableProof: Serialize + DeserializeOwned { + fn block_height(&self) -> Option; +} + +impl WritableProof for GeneratedBlockProof { + fn block_height(&self) -> Option { + Some(self.b_height) + } +} + +impl WritableProof for GeneratedWrappedBlockProof { + fn block_height(&self) -> Option { + Some(self.b_height) + } +} + +impl WritableProof for GeneratedAggBlockProof { + fn block_height(&self) -> Option { + None + } +} + +impl WritableProof for AggregatableBlockProof { + fn block_height(&self) -> Option { + match self { + AggregatableBlockProof::Block(block) => Some(block.b_height), + AggregatableBlockProof::Agg(_) => None, + } + } +} diff --git a/zero/src/prover.rs b/zero/src/prover.rs index 665d4f828..cd3df5dfd 100644 --- a/zero/src/prover.rs +++ b/zero/src/prover.rs @@ -3,11 +3,11 @@ zk_evm_common::check_chain_features!(); pub mod cli; use std::future::Future; -use std::path::{Path, PathBuf}; +use std::path::PathBuf; use std::sync::Arc; use alloy::primitives::U256; -use anyhow::{Context, Result}; +use anyhow::Result; use evm_arithmetization::Field; use futures::{future::BoxFuture, FutureExt, TryFutureExt, TryStreamExt}; use hashbrown::HashMap; @@ -17,14 +17,13 @@ use plonky2::gates::noop::NoopGate; use plonky2::plonk::circuit_builder::CircuitBuilder; use plonky2::plonk::circuit_data::CircuitConfig; use serde::{Deserialize, Serialize}; -use tokio::io::AsyncWriteExt; use tokio::sync::mpsc::Receiver; use tokio::sync::{oneshot, Semaphore}; use trace_decoder::observer::DummyObserver; use trace_decoder::{BlockTrace, OtherBlockData}; use tracing::{error, info}; -use crate::fs::generate_block_proof_file_name; +use crate::fs::write_proof_to_dir; use crate::ops; use crate::proof_types::GeneratedBlockProof; @@ -310,7 +309,7 @@ pub async fn prove( || prover_config.keep_intermediate_proofs || is_block_batch_finished) { - write_proof_to_dir(&prover_config.proof_output_dir, proof.clone()) + write_proof_to_dir(&prover_config.proof_output_dir, proof.clone(), None) .await .inspect_err(|e| error!("failed to output proof for block {block_number} to directory {e:?}"))?; } @@ -339,33 +338,3 @@ pub async fn prove( } Ok(()) } - -/// Write the proof to the `output_dir` directory. -async fn write_proof_to_dir(output_dir: &Path, proof: GeneratedBlockProof) -> Result<()> { - // Check if output directory exists, and create one if it doesn't. - if !output_dir.exists() { - info!("Created output directory {:?}", output_dir.display()); - std::fs::create_dir(output_dir)?; - } - - let block_proof_file_path = - generate_block_proof_file_name(&output_dir.to_str(), proof.b_height); - - // Serialize as a single element array to match the expected format. - let proof_serialized = serde_json::to_vec(&vec![proof])?; - - if let Some(parent) = block_proof_file_path.parent() { - tokio::fs::create_dir_all(parent).await?; - } - - let mut f = tokio::fs::File::create(block_proof_file_path.clone()).await?; - f.write_all(&proof_serialized) - .await - .context("Failed to write proof to disk")?; - - info!( - "Successfully wrote to disk proof file {}", - block_proof_file_path.display() - ); - Ok(()) -} diff --git a/zero/src/prover_state/mod.rs b/zero/src/prover_state/mod.rs index 5dc34a53d..cce1b16d3 100644 --- a/zero/src/prover_state/mod.rs +++ b/zero/src/prover_state/mod.rs @@ -21,7 +21,6 @@ use evm_arithmetization::{ TrimmedGenerationInputs, }; use evm_arithmetization::{ProofWithPublicInputs, VerifierData}; -use plonky2::recursion::cyclic_recursion::check_cyclic_proof_verifier_data; use plonky2::util::timing::TimingTree; use tracing::info; @@ -63,12 +62,25 @@ impl> From for VerifierState { impl VerifierState { /// Verifies a `block_proof`. - pub fn verify(&self, block_proof: &ProofWithPublicInputs) -> anyhow::Result<()> { - // Proof verification - self.state.verify(block_proof.clone())?; + pub fn verify_block(&self, block_proof: &ProofWithPublicInputs) -> anyhow::Result<()> { + self.state.verify_block(block_proof.clone()) + } - // Verifier data verification - check_cyclic_proof_verifier_data(block_proof, &self.state.verifier_only, &self.state.common) + /// Verifies a `wrapped_block_proof`. + pub fn verify_block_wrapper( + &self, + wrapped_block_proof: &ProofWithPublicInputs, + ) -> anyhow::Result<()> { + self.state.verify_block_wrapper(wrapped_block_proof.clone()) + } + + /// Verifies an aggregation of independent `wrapped_block_proof` combined + /// into a single `agg_block_proof`. + pub fn verify_block_aggreg( + &self, + agg_block_proof: &ProofWithPublicInputs, + ) -> anyhow::Result<()> { + self.state.verify_block_aggreg(agg_block_proof.clone()) } }